diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate.yml b/.github/workflows/cluster_endtoend_vreplication_migrate.yml new file mode 100644 index 00000000000..8b7d9f69c8d --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_migrate.yml @@ -0,0 +1,40 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_migrate) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vreplication_migrate) + runs-on: ubuntu-latest + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.15 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vreplication_migrate diff --git a/Makefile b/Makefile index 6b0f8744cbf..f90016290ff 100644 --- a/Makefile +++ b/Makefile @@ -103,7 +103,7 @@ parser: make -C go/vt/sqlparser visitor: - go generate go/vt/sqlparser/rewriter.go + go run ./go/tools/asthelpergen -in ./go/vt/sqlparser -iface vitess.io/vitess/go/vt/sqlparser.SQLNode -except "*ColName" sizegen: go run go/tools/sizegen/sizegen.go \ diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile index c6d28a79b6c..a8665ed148f 100644 --- a/docker/k8s/Dockerfile +++ b/docker/k8s/Dockerfile @@ -42,6 +42,7 @@ COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificat # Copy binaries COPY --from=base /vt/bin/mysqlctld /vt/bin/ +COPY --from=base /vt/bin/mysqlctl /vt/bin/ COPY --from=base /vt/bin/vtctld /vt/bin/ COPY --from=base /vt/bin/vtctl /vt/bin/ COPY --from=base /vt/bin/vtctlclient /vt/bin/ diff --git a/docker/k8s/mysqlctl/Dockerfile b/docker/k8s/mysqlctl/Dockerfile new file mode 100644 index 00000000000..45abdfda5dc --- /dev/null +++ b/docker/k8s/mysqlctl/Dockerfile @@ -0,0 +1,42 @@ +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG VT_BASE_VER=latest + +FROM vitess/k8s:${VT_BASE_VER} AS k8s + +FROM debian:buster-slim + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt +ENV VTDATAROOT /vtdataroot + +# Prepare directory structure. +RUN mkdir -p /vt/bin && \ + mkdir -p /vt/config && mkdir -p /vtdataroot + +# Copy binaries +COPY --from=k8s /vt/bin/mysqlctl /vt/bin/ + +# Copy certs to allow https calls +COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +# copy vitess config +COPY --from=k8s /vt/config /vt/config + +# add vitess user/group and add permissions +RUN groupadd -r --gid 2000 vitess && \ + useradd -r -g vitess --uid 1000 vitess && \ + chown -R vitess:vitess /vt && \ + chown -R vitess:vitess /vtdataroot diff --git a/go/test/endtoend/vreplication/cluster.go b/go/test/endtoend/vreplication/cluster.go index 170236969bb..0d26fac77dd 100644 --- a/go/test/endtoend/vreplication/cluster.go +++ b/go/test/endtoend/vreplication/cluster.go @@ -23,37 +23,38 @@ import ( var ( debug = false // set to true to always use local env vtdataroot for local debugging - originalVtdataroot string - vtdataroot string + originalVtdataroot string + vtdataroot string + mainClusterConfig *ClusterConfig + externalClusterConfig *ClusterConfig ) -var globalConfig = struct { - hostname string - topoPort int - vtctldPort int - vtctldGrpcPort int - tmpDir string - vtgatePort int - vtgateGrpcPort int - vtgateMySQLPort int - tabletTypes string -}{"localhost", 2379, 15000, 15999, vtdataroot + "/tmp", - 15001, 15991, 15306, "MASTER,REPLICA"} - -var ( - tabletPortBase = 15000 - tabletGrpcPortBase = 20000 - tabletMysqlPortBase = 25000 -) +// ClusterConfig defines the parameters like ports, tmpDir, tablet types which uniquely define a vitess cluster +type ClusterConfig struct { + hostname string + topoPort int + vtctldPort int + vtctldGrpcPort int + vtdataroot string + tmpDir string + vtgatePort int + vtgateGrpcPort int + vtgateMySQLPort int + tabletTypes string + tabletPortBase int + tabletGrpcPortBase int + tabletMysqlPortBase int +} // VitessCluster represents all components within the test cluster type VitessCluster struct { - Name string - Cells map[string]*Cell - Topo *cluster.TopoProcess - Vtctld *cluster.VtctldProcess - Vtctl *cluster.VtctlProcess - VtctlClient *cluster.VtctlClientProcess + ClusterConfig *ClusterConfig + Name string + Cells map[string]*Cell + Topo *cluster.TopoProcess + Vtctld *cluster.VtctldProcess + Vtctl *cluster.VtctlProcess + VtctlClient *cluster.VtctlClientProcess } // Cell represents a Vitess cell within the test cluster @@ -85,37 +86,66 @@ type Tablet struct { DbServer *cluster.MysqlctlProcess } -func init() { - originalVtdataroot = os.Getenv("VTDATAROOT") -} - -func initGlobals() { - rand.Seed(time.Now().UTC().UnixNano()) +func setTempVtDataRoot() string { dirSuffix := 100000 + rand.Intn(999999-100000) // 6 digits if debug { vtdataroot = originalVtdataroot } else { vtdataroot = path.Join(originalVtdataroot, fmt.Sprintf("vreple2e_%d", dirSuffix)) } - globalConfig.tmpDir = vtdataroot + "/tmp" if _, err := os.Stat(vtdataroot); os.IsNotExist(err) { os.Mkdir(vtdataroot, 0700) } _ = os.Setenv("VTDATAROOT", vtdataroot) fmt.Printf("VTDATAROOT is %s\n", vtdataroot) + return vtdataroot } -// NewVitessCluster creates an entire VitessCluster for e2e testing -func NewVitessCluster(name string) (cluster *VitessCluster, err error) { - return &VitessCluster{Name: name, Cells: make(map[string]*Cell)}, nil +func getClusterConfig(idx int, dataRootDir string) *ClusterConfig { + basePort := 15000 + etcdPort := 2379 + + basePort += idx * 10000 + etcdPort += idx * 10000 + if _, err := os.Stat(dataRootDir); os.IsNotExist(err) { + os.Mkdir(dataRootDir, 0700) + } + + return &ClusterConfig{ + hostname: "localhost", + topoPort: etcdPort, + vtctldPort: basePort, + vtctldGrpcPort: basePort + 999, + tmpDir: dataRootDir + "/tmp", + vtgatePort: basePort + 1, + vtgateGrpcPort: basePort + 991, + vtgateMySQLPort: basePort + 306, + tabletTypes: "master", + vtdataroot: dataRootDir, + tabletPortBase: basePort + 1000, + tabletGrpcPortBase: basePort + 1991, + tabletMysqlPortBase: basePort + 1306, + } +} + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) + originalVtdataroot = os.Getenv("VTDATAROOT") + var mainVtDataRoot string + if debug { + mainVtDataRoot = originalVtdataroot + } else { + mainVtDataRoot = setTempVtDataRoot() + } + mainClusterConfig = getClusterConfig(0, mainVtDataRoot) + externalClusterConfig = getClusterConfig(1, mainVtDataRoot+"/ext") } -// InitCluster creates the global processes needed for a cluster -func InitCluster(t *testing.T, cellNames []string) *VitessCluster { - initGlobals() - vc, _ := NewVitessCluster("Vdemo") +// NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo +func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { + vc := &VitessCluster{Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} require.NotNil(t, vc) - topo := cluster.TopoProcessInstance(globalConfig.topoPort, globalConfig.topoPort*10, globalConfig.hostname, "etcd2", "global") + topo := cluster.TopoProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.topoPort+1, vc.ClusterConfig.hostname, "etcd2", "global") require.NotNil(t, topo) require.Nil(t, topo.Setup("etcd2", nil)) @@ -125,14 +155,14 @@ func InitCluster(t *testing.T, cellNames []string) *VitessCluster { topo.ManageTopoDir("mkdir", "/vitess/"+cellName) } - vtctld := cluster.VtctldProcessInstance(globalConfig.vtctldPort, globalConfig.vtctldGrpcPort, - globalConfig.topoPort, globalConfig.hostname, globalConfig.tmpDir) + vtctld := cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, + vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname, vc.ClusterConfig.tmpDir) vc.Vtctld = vtctld require.NotNil(t, vc.Vtctld) // use first cell as `-cell` vc.Vtctld.Setup(cellNames[0]) - vc.Vtctl = cluster.VtctlProcessInstance(globalConfig.topoPort, globalConfig.hostname) + vc.Vtctl = cluster.VtctlProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname) require.NotNil(t, vc.Vtctl) for _, cellName := range cellNames { vc.Vtctl.AddCellInfo(cellName) @@ -141,7 +171,7 @@ func InitCluster(t *testing.T, cellNames []string) *VitessCluster { require.NotNil(t, cell) } - vc.VtctlClient = cluster.VtctlClientProcessInstance(globalConfig.hostname, vc.Vtctld.GrpcPort, globalConfig.tmpDir) + vc.VtctlClient = cluster.VtctlClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) require.NotNil(t, vc.VtctlClient) return vc @@ -194,17 +224,17 @@ func (vc *VitessCluster) AddTablet(t *testing.T, cell *Cell, keyspace *Keyspace, tablet := &Tablet{} vttablet := cluster.VttabletProcessInstance( - tabletPortBase+tabletID, - tabletGrpcPortBase+tabletID, + vc.ClusterConfig.tabletPortBase+tabletID, + vc.ClusterConfig.tabletGrpcPortBase+tabletID, tabletID, cell.Name, shard.Name, keyspace.Name, - globalConfig.vtctldPort, + vc.ClusterConfig.vtctldPort, tabletType, vc.Topo.Port, - globalConfig.hostname, - globalConfig.tmpDir, + vc.ClusterConfig.hostname, + vc.ClusterConfig.tmpDir, []string{ "-queryserver-config-schema-reload-time", "5", "-enable-lag-throttler", @@ -216,7 +246,7 @@ func (vc *VitessCluster) AddTablet(t *testing.T, cell *Cell, keyspace *Keyspace, require.NotNil(t, vttablet) vttablet.SupportsBackup = false - tablet.DbServer = cluster.MysqlCtlProcessInstance(tabletID, tabletMysqlPortBase+tabletID, globalConfig.tmpDir) + tablet.DbServer = cluster.MysqlCtlProcessInstance(tabletID, vc.ClusterConfig.tabletMysqlPortBase+tabletID, vc.ClusterConfig.tmpDir) require.NotNil(t, tablet.DbServer) tablet.DbServer.InitMysql = true proc, err := tablet.DbServer.StartProcess() @@ -331,15 +361,15 @@ func (vc *VitessCluster) DeleteShard(t *testing.T, cellName string, ksName strin // StartVtgate starts a vtgate process func (vc *VitessCluster) StartVtgate(t *testing.T, cell *Cell, cellsToWatch string) { vtgate := cluster.VtgateProcessInstance( - globalConfig.vtgatePort, - globalConfig.vtgateGrpcPort, - globalConfig.vtgateMySQLPort, + vc.ClusterConfig.vtgatePort, + vc.ClusterConfig.vtgateGrpcPort, + vc.ClusterConfig.vtgateMySQLPort, cell.Name, cellsToWatch, - globalConfig.hostname, - globalConfig.tabletTypes, - globalConfig.topoPort, - globalConfig.tmpDir, + vc.ClusterConfig.hostname, + vc.ClusterConfig.tabletTypes, + vc.ClusterConfig.topoPort, + vc.ClusterConfig.tmpDir, []string{"-tablet_refresh_interval", "10ms"}) require.NotNil(t, vtgate) if err := vtgate.Setup(); err != nil { diff --git a/go/test/endtoend/vreplication/config.go b/go/test/endtoend/vreplication/config.go index d937b7a4948..8bb6150c0c5 100644 --- a/go/test/endtoend/vreplication/config.go +++ b/go/test/endtoend/vreplication/config.go @@ -287,5 +287,18 @@ create table customer_seq2(id int, next_id bigint, cache bigint, primary key(id) "create_ddl": "create table rollup(rollupname varchar(100), kount int, primary key (rollupname))" }] } +` + initialExternalSchema = ` +create table review(rid int, pid int, review varbinary(128), primary key(rid)); +create table rating(gid int, pid int, rating int, primary key(gid)); +` + + initialExternalVSchema = ` +{ + "tables": { + "review": {}, + "rating": {} + } +} ` ) diff --git a/go/test/endtoend/vreplication/helper.go b/go/test/endtoend/vreplication/helper.go index c2d69d1afea..0fea60f0d83 100644 --- a/go/test/endtoend/vreplication/helper.go +++ b/go/test/endtoend/vreplication/helper.go @@ -33,9 +33,9 @@ func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { return qr } -func getConnection(t *testing.T, port int) *mysql.Conn { +func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { vtParams := mysql.ConnParams{ - Host: globalConfig.hostname, + Host: hostname, Port: port, Uname: "vt_dba", } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go new file mode 100644 index 00000000000..d327f39788f --- /dev/null +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -0,0 +1,163 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" +) + +func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { + t.Run("insertInitialData", func(t *testing.T) { + fmt.Printf("Inserting initial data\n") + execVtgateQuery(t, conn, "rating:0", "insert into review(rid, pid, review) values(1, 1, 'review1');") + execVtgateQuery(t, conn, "rating:0", "insert into review(rid, pid, review) values(2, 1, 'review2');") + execVtgateQuery(t, conn, "rating:0", "insert into review(rid, pid, review) values(3, 2, 'review3');") + execVtgateQuery(t, conn, "rating:0", "insert into rating(gid, pid, rating) values(1, 1, 4);") + execVtgateQuery(t, conn, "rating:0", "insert into rating(gid, pid, rating) values(2, 2, 5);") + }) +} + +// TestMigrate runs an e2e test for importing from an external cluster using the Mount and Migrate commands. +// We have an anti-pattern in Vitess: vt executables look for an environment variable VTDATAROOT for certain cluster parameters +// like the log directory when they are created. Until this test we just needed a single cluster for e2e tests. +// However now we need to create an external Vitess cluster. For this we need a different VTDATAROOT and +// hence the VTDATAROOT env variable gets overwritten. +// Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT +func TestMigrate(t *testing.T) { + defaultCellName := "zone1" + cells := []string{"zone1"} + allCellNames = "zone1" + vc = NewVitessCluster(t, "TestMigrate", cells, mainClusterConfig) + + require.NotNil(t, vc) + defaultReplicas = 0 + defaultRdonly = 0 + defer vc.TearDown() + + defaultCell = vc.Cells[defaultCellName] + vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100) + vtgate = defaultCell.Vtgates[0] + require.NotNil(t, vtgate) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + insertInitialData(t) + + // create external cluster + extCell := "extcell1" + extCells := []string{extCell} + extVc := NewVitessCluster(t, "TestMigrateExternal", extCells, externalClusterConfig) + require.NotNil(t, extVc) + defer extVc.TearDown() + + extCell2 := extVc.Cells[extCell] + extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000) + extVtgate := extCell2.Vtgates[0] + require.NotNil(t, extVtgate) + + extVtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "rating", "0"), 1) + verifyClusterHealth(t, extVc) + extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) + insertInitialDataIntoExternalCluster(t, extVtgateConn) + + var err error + var output, expected string + ksWorkflow := "product.e1" + + t.Run("mount external cluster", func(t *testing.T) { + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-topo_type=etcd2", + fmt.Sprintf("-topo_server=localhost:%d", extVc.ClusterConfig.topoPort), "-topo_root=/vitess/global", "ext1"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-list"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + expected = "ext1\n" + require.Equal(t, expected, output) + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-show", "ext1"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + expected = `{"ClusterName":"ext1","topo_config":{"topo_type":"etcd2","server":"localhost:12379","root":"/vitess/global"}}` + "\n" + require.Equal(t, expected, output) + }) + + t.Run("migrate from external cluster", func(t *testing.T) { + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "-all", "-cells=extcell1", + "-source=ext1.rating", "create", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + validateCount(t, vtgateConn, "product:0", "rating", 2) + validateCount(t, vtgateConn, "product:0", "review", 3) + execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") + execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") + time.Sleep(1 * time.Second) // wait for stream to find row + validateCount(t, vtgateConn, "product:0", "rating", 3) + validateCount(t, vtgateConn, "product:0", "review", 4) + vdiff(t, ksWorkflow, "extcell1") + + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "complete", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + }) + t.Run("cancel migrate workflow", func(t *testing.T) { + execVtgateQuery(t, vtgateConn, "product", "drop table review,rating") + + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "-all", "-auto_start=false", "-cells=extcell1", + "-source=ext1.rating", "create", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + validateCount(t, vtgateConn, "product:0", "rating", 0) + validateCount(t, vtgateConn, "product:0", "review", 0) + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "cancel", ksWorkflow); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + var found bool + found, err = checkIfTableExists(t, vc, "zone1-100", "review") + require.NoError(t, err) + require.False(t, found) + found, err = checkIfTableExists(t, vc, "zone1-100", "rating") + require.NoError(t, err) + require.False(t, found) + }) + t.Run("unmount external cluster", func(t *testing.T) { + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-unmount", "ext1"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + + if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-list"); err != nil { + t.Fatalf("Mount command failed with %+v : %s\n", err, output) + } + expected = "\n" + require.Equal(t, expected, output) + + output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "-type=vitess", "-show", "ext1") + require.Errorf(t, err, "there is no vitess cluster named ext1") + }) +} diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 9541c154730..cb189b8867c 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -65,7 +65,7 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro time.Sleep(1 * time.Second) catchup(t, targetTab1, workflowName, "Reshard") catchup(t, targetTab2, workflowName, "Reshard") - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") return nil } @@ -79,7 +79,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) error { catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") time.Sleep(1 * time.Second) - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") return nil } @@ -233,7 +233,7 @@ func getCurrentState(t *testing.T) string { func TestBasicV2Workflows(t *testing.T) { vc = setupCluster(t) defer vtgateConn.Close() - //defer vc.TearDown() + defer vc.TearDown() testMoveTablesV2Workflow(t) testReshardV2Workflow(t) @@ -387,7 +387,7 @@ func testRestOfWorkflow(t *testing.T) { func setupCluster(t *testing.T) *VitessCluster { cells := []string{"zone1", "zone2"} - vc = InitCluster(t, cells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) defaultCellName := "zone1" allCellNames = defaultCellName @@ -403,8 +403,8 @@ func setupCluster(t *testing.T) *VitessCluster { vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) - verifyClusterHealth(t) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + verifyClusterHealth(t, vc) insertInitialData(t) sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet @@ -463,7 +463,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias moveTables(t, sourceCellOrAlias, workflow, sourceKs, targetKs, tables) catchup(t, targetTab1, workflow, "MoveTables") catchup(t, targetTab2, workflow, "MoveTables") - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") } var switchReadsFollowedBySwitchWrites = func() { diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 722af7cb9c5..899273b9192 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -87,7 +87,7 @@ func TestBasicVreplicationWorkflow(t *testing.T) { defaultCellName := "zone1" allCells := []string{"zone1"} allCellNames = "zone1" - vc = InitCluster(t, allCells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", allCells, mainClusterConfig) require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with master tablets @@ -101,9 +101,9 @@ func TestBasicVreplicationWorkflow(t *testing.T) { require.NotNil(t, vtgate) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - verifyClusterHealth(t) + verifyClusterHealth(t, vc) insertInitialData(t) materializeRollup(t) @@ -135,7 +135,7 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} allCellNames = "zone1,zone2" - vc = InitCluster(t, cells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) defaultCellName := "zone1" defaultCell = vc.Cells[defaultCellName] @@ -151,9 +151,9 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - verifyClusterHealth(t) + verifyClusterHealth(t, vc) insertInitialData(t) shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name) } @@ -161,7 +161,7 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - vc = InitCluster(t, cells) + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) allCellNames = "zone1,zone2" defaultCellName := "zone1" @@ -182,9 +182,9 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgateConn = getConnection(t, globalConfig.vtgateMySQLPort) + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - verifyClusterHealth(t) + verifyClusterHealth(t, vc) insertInitialData(t) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias") } @@ -267,7 +267,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1, :vtg2)" require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") switchReadsDryRun(t, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, allCellNames, ksWorkflow) require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query)) @@ -477,7 +477,7 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou continue } } - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") switchReads(t, allCellNames, ksWorkflow) if dryRunResultSwitchWrites != nil { switchWritesDryRun(t, ksWorkflow, dryRunResultSwitchWrites) @@ -510,7 +510,7 @@ func shardOrders(t *testing.T) { customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet catchup(t, customerTab1, workflow, "MoveTables") catchup(t, customerTab2, workflow, "MoveTables") - vdiff(t, ksWorkflow) + vdiff(t, ksWorkflow, "") switchReads(t, allCellNames, ksWorkflow) switchWrites(t, ksWorkflow, false) dropSources(t, ksWorkflow) @@ -544,7 +544,7 @@ func shardMerchant(t *testing.T) { catchup(t, merchantTab1, workflow, "MoveTables") catchup(t, merchantTab2, workflow, "MoveTables") - vdiff(t, "merchant.p2m") + vdiff(t, "merchant.p2m", "") switchReads(t, allCellNames, ksWorkflow) switchWrites(t, ksWorkflow, false) dropSources(t, ksWorkflow) @@ -555,9 +555,9 @@ func shardMerchant(t *testing.T) { }) } -func vdiff(t *testing.T, workflow string) { +func vdiff(t *testing.T, workflow, cells string) { t.Run("vdiff", func(t *testing.T) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "-format", "json", workflow) + output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "-tablet_types=master", "-source_cell="+cells, "-format", "json", workflow) fmt.Printf("vdiff err: %+v, output: %+v\n", err, output) require.Nil(t, err) require.NotNil(t, output) @@ -570,7 +570,7 @@ func vdiff(t *testing.T, workflow string) { require.True(t, len(diffReports) > 0) for key, diffReport := range diffReports { if diffReport.ProcessedRows != diffReport.MatchingRows { - t.Errorf("vdiff error for %d : %#v\n", key, diffReport) + require.Failf(t, "vdiff failed", "Table %d : %#v\n", key, diffReport) } } }) @@ -776,8 +776,8 @@ func checkTabletHealth(t *testing.T, tablet *Tablet) { } } -func iterateTablets(t *testing.T, f func(t *testing.T, tablet *Tablet)) { - for _, cell := range vc.Cells { +func iterateTablets(t *testing.T, cluster *VitessCluster, f func(t *testing.T, tablet *Tablet)) { + for _, cell := range cluster.Cells { for _, ks := range cell.Keyspaces { for _, shard := range ks.Shards { for _, tablet := range shard.Tablets { @@ -788,15 +788,15 @@ func iterateTablets(t *testing.T, f func(t *testing.T, tablet *Tablet)) { } } -func iterateCells(t *testing.T, f func(t *testing.T, cell *Cell)) { - for _, cell := range vc.Cells { +func iterateCells(t *testing.T, cluster *VitessCluster, f func(t *testing.T, cell *Cell)) { + for _, cell := range cluster.Cells { f(t, cell) } } -func verifyClusterHealth(t *testing.T) { - iterateCells(t, checkVtgateHealth) - iterateTablets(t, checkTabletHealth) +func verifyClusterHealth(t *testing.T, cluster *VitessCluster) { + iterateCells(t, cluster, checkVtgateHealth) + iterateTablets(t, cluster, checkTabletHealth) } func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info string) { diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go new file mode 100644 index 00000000000..0bf452fccaa --- /dev/null +++ b/go/tools/asthelpergen/asthelpergen.go @@ -0,0 +1,291 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/types" + "io/ioutil" + "log" + "path" + "strings" + + "github.com/dave/jennifer/jen" + "golang.org/x/tools/go/packages" +) + +const licenseFileHeader = `Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.` + +type generator interface { + visitStruct(t types.Type, stroct *types.Struct) error + visitInterface(t types.Type, iface *types.Interface) error + visitSlice(t types.Type, slice *types.Slice) error + createFile(pkgName string) (string, *jen.File) +} + +// astHelperGen finds implementations of the given interface, +// and uses the supplied `generator`s to produce the output code +type astHelperGen struct { + DebugTypes bool + mod *packages.Module + sizes types.Sizes + namedIface *types.Named + iface *types.Interface + gens []generator +} + +func newGenerator(mod *packages.Module, sizes types.Sizes, named *types.Named, generators ...generator) *astHelperGen { + return &astHelperGen{ + DebugTypes: true, + mod: mod, + sizes: sizes, + namedIface: named, + iface: named.Underlying().(*types.Interface), + gens: generators, + } +} + +func findImplementations(scope *types.Scope, iff *types.Interface, impl func(types.Type) error) error { + for _, name := range scope.Names() { + obj := scope.Lookup(name) + if _, ok := obj.(*types.TypeName); !ok { + continue + } + baseType := obj.Type() + if types.Implements(baseType, iff) { + err := impl(baseType) + if err != nil { + return err + } + continue + } + pointerT := types.NewPointer(baseType) + if types.Implements(pointerT, iff) { + err := impl(pointerT) + if err != nil { + return err + } + continue + } + } + return nil +} + +func (gen *astHelperGen) visitStruct(t types.Type, stroct *types.Struct) error { + for _, g := range gen.gens { + err := g.visitStruct(t, stroct) + if err != nil { + return err + } + } + return nil +} + +func (gen *astHelperGen) visitSlice(t types.Type, slice *types.Slice) error { + for _, g := range gen.gens { + err := g.visitSlice(t, slice) + if err != nil { + return err + } + } + return nil +} + +func (gen *astHelperGen) visitInterface(t types.Type, iface *types.Interface) error { + for _, g := range gen.gens { + err := g.visitInterface(t, iface) + if err != nil { + return err + } + } + return nil +} + +// GenerateCode is the main loop where we build up the code per file. +func (gen *astHelperGen) GenerateCode() (map[string]*jen.File, error) { + pkg := gen.namedIface.Obj().Pkg() + iface, ok := gen.iface.Underlying().(*types.Interface) + if !ok { + return nil, fmt.Errorf("expected interface, but got %T", gen.iface) + } + + err := findImplementations(pkg.Scope(), iface, func(t types.Type) error { + switch n := t.Underlying().(type) { + case *types.Struct: + return gen.visitStruct(t, n) + case *types.Slice: + return gen.visitSlice(t, n) + case *types.Pointer: + strct, isStrct := n.Elem().Underlying().(*types.Struct) + if isStrct { + return gen.visitStruct(t, strct) + } + case *types.Interface: + return gen.visitInterface(t, n) + default: + // do nothing + } + return nil + }) + + if err != nil { + return nil, err + } + + result := map[string]*jen.File{} + for _, g := range gen.gens { + file, code := g.createFile(pkg.Name()) + fullPath := path.Join(gen.mod.Dir, strings.TrimPrefix(pkg.Path(), gen.mod.Path), file) + result[fullPath] = code + } + + return result, nil +} + +type typePaths []string + +func (t *typePaths) String() string { + return fmt.Sprintf("%v", *t) +} + +func (t *typePaths) Set(path string) error { + *t = append(*t, path) + return nil +} + +func main() { + var patterns typePaths + var generate, except string + var verify bool + + flag.Var(&patterns, "in", "Go packages to load the generator") + flag.StringVar(&generate, "iface", "", "Root interface generate rewriter for") + flag.BoolVar(&verify, "verify", false, "ensure that the generated files are correct") + flag.StringVar(&except, "except", "", "don't deep clone these types") + flag.Parse() + + result, err := GenerateASTHelpers(patterns, generate, except) + if err != nil { + log.Fatal(err) + } + + if verify { + for _, err := range VerifyFilesOnDisk(result) { + log.Fatal(err) + } + log.Printf("%d files OK", len(result)) + } else { + for fullPath, file := range result { + if err := file.Save(fullPath); err != nil { + log.Fatalf("failed to save file to '%s': %v", fullPath, err) + } + log.Printf("saved '%s'", fullPath) + } + } +} + +// VerifyFilesOnDisk compares the generated results from the codegen against the files that +// currently exist on disk and returns any mismatches +func VerifyFilesOnDisk(result map[string]*jen.File) (errors []error) { + for fullPath, file := range result { + existing, err := ioutil.ReadFile(fullPath) + if err != nil { + errors = append(errors, fmt.Errorf("missing file on disk: %s (%w)", fullPath, err)) + continue + } + + var buf bytes.Buffer + if err := file.Render(&buf); err != nil { + errors = append(errors, fmt.Errorf("render error for '%s': %w", fullPath, err)) + continue + } + + if !bytes.Equal(existing, buf.Bytes()) { + errors = append(errors, fmt.Errorf("'%s' has changed", fullPath)) + continue + } + } + return errors +} + +// GenerateASTHelpers loads the input code, constructs the necessary generators, +// and generates the rewriter and clone methods for the AST +func GenerateASTHelpers(packagePatterns []string, rootIface, exceptCloneType string) (map[string]*jen.File, error) { + loaded, err := packages.Load(&packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedDeps | packages.NeedImports | packages.NeedModule, + Logf: log.Printf, + }, packagePatterns...) + + if err != nil { + return nil, err + } + + scopes := make(map[string]*types.Scope) + for _, pkg := range loaded { + scopes[pkg.PkgPath] = pkg.Types.Scope() + } + + pos := strings.LastIndexByte(rootIface, '.') + if pos < 0 { + return nil, fmt.Errorf("unexpected input type: %s", rootIface) + } + + pkgname := rootIface[:pos] + typename := rootIface[pos+1:] + + scope := scopes[pkgname] + if scope == nil { + return nil, fmt.Errorf("no scope found for type '%s'", rootIface) + } + + tt := scope.Lookup(typename) + if tt == nil { + return nil, fmt.Errorf("no type called '%s' found in '%s'", typename, pkgname) + } + + nt := tt.Type().(*types.Named) + + iface := nt.Underlying().(*types.Interface) + + interestingType := func(t types.Type) bool { + return types.Implements(t, iface) + } + rewriter := newRewriterGen(interestingType, nt.Obj().Name()) + clone := newCloneGen(iface, scope, exceptCloneType) + + generator := newGenerator(loaded[0].Module, loaded[0].TypesSizes, nt, rewriter, clone) + it, err := generator.GenerateCode() + if err != nil { + return nil, err + } + + return it, nil +} diff --git a/go/tools/asthelpergen/asthelpergen_test.go b/go/tools/asthelpergen/asthelpergen_test.go new file mode 100644 index 00000000000..30dc2eb61a5 --- /dev/null +++ b/go/tools/asthelpergen/asthelpergen_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFullGeneration(t *testing.T) { + result, err := GenerateASTHelpers([]string{"./integration/..."}, "vitess.io/vitess/go/tools/asthelpergen/integration.AST", "*NoCloneType") + require.NoError(t, err) + + verifyErrors := VerifyFilesOnDisk(result) + require.Empty(t, verifyErrors) + + for _, file := range result { + contents := fmt.Sprintf("%#v", file) + require.Contains(t, contents, "http://www.apache.org/licenses/LICENSE-2.0") + applyIdx := strings.Index(contents, "func (a *application) apply(parent, node AST, replacer replacerFunc)") + cloneIdx := strings.Index(contents, "CloneAST(in AST) AST") + if applyIdx == 0 && cloneIdx == 0 { + t.Fatalf("file doesn't contain expected contents") + } + } +} diff --git a/go/tools/asthelpergen/clone_gen.go b/go/tools/asthelpergen/clone_gen.go new file mode 100644 index 00000000000..066a4c82923 --- /dev/null +++ b/go/tools/asthelpergen/clone_gen.go @@ -0,0 +1,360 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "go/types" + + "vitess.io/vitess/go/vt/log" + + "github.com/dave/jennifer/jen" +) + +// cloneGen creates the deep clone methods for the AST. It works by discovering the types that it needs to support, +// starting from a root interface type. While creating the clone method for this root interface, more types that need +// to be cloned are discovered. This continues type by type until all necessary types have been traversed. +type cloneGen struct { + methods []jen.Code + iface *types.Interface + scope *types.Scope + todo []types.Type + exceptType string +} + +var _ generator = (*cloneGen)(nil) + +func newCloneGen(iface *types.Interface, scope *types.Scope, exceptType string) *cloneGen { + return &cloneGen{ + iface: iface, + scope: scope, + exceptType: exceptType, + } +} + +func (c *cloneGen) visitStruct(types.Type, *types.Struct) error { + return nil +} + +func (c *cloneGen) visitSlice(types.Type, *types.Slice) error { + return nil +} + +func (c *cloneGen) visitInterface(t types.Type, _ *types.Interface) error { + c.todo = append(c.todo, t) + return nil +} + +const cloneName = "Clone" + +func (c *cloneGen) addFunc(name string, code jen.Code) { + c.methods = append(c.methods, jen.Comment(name+" creates a deep clone of the input."), code) +} + +// readValueOfType produces code to read the expression of type `t`, and adds the type to the todo-list +func (c *cloneGen) readValueOfType(t types.Type, expr jen.Code) jen.Code { + switch t.Underlying().(type) { + case *types.Basic: + return expr + case *types.Interface: + if types.TypeString(t, noQualifier) == "interface{}" { + // these fields have to be taken care of manually + return expr + } + } + c.todo = append(c.todo, t) + return jen.Id(cloneName + printableTypeName(t)).Call(expr) +} + +func (c *cloneGen) makeStructCloneMethod(t types.Type) error { + receiveType := types.TypeString(t, noQualifier) + funcName := "Clone" + printableTypeName(t) + c.addFunc(funcName, + jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType).Block( + jen.Return(jen.Op("*").Add(c.readValueOfType(types.NewPointer(t), jen.Op("&").Id("n")))), + )) + return nil +} + +func (c *cloneGen) makeSliceCloneMethod(t types.Type, slice *types.Slice) error { + typeString := types.TypeString(t, noQualifier) + name := printableTypeName(t) + funcName := cloneName + name + + c.addFunc(funcName, + //func (n Bytes) Clone() Bytes { + jen.Func().Id(funcName).Call(jen.Id("n").Id(typeString)).Id(typeString).Block( + // res := make(Bytes, len(n)) + jen.Id("res").Op(":=").Id("make").Call(jen.Id(typeString), jen.Lit(0), jen.Id("len").Call(jen.Id("n"))), + c.copySliceElement(slice.Elem()), + // return res + jen.Return(jen.Id("res")), + )) + return nil +} + +func (c *cloneGen) copySliceElement(elType types.Type) jen.Code { + if isBasic(elType) { + // copy(res, n) + return jen.Id("copy").Call(jen.Id("res"), jen.Id("n")) + } + + //for _, x := range n { + // res = append(res, CloneAST(x)) + //} + c.todo = append(c.todo, elType) + return jen.For(jen.List(jen.Op("_"), jen.Id("x"))).Op(":=").Range().Id("n").Block( + jen.Id("res").Op("=").Id("append").Call(jen.Id("res"), c.readValueOfType(elType, jen.Id("x"))), + ) +} + +func (c *cloneGen) makeInterfaceCloneMethod(t types.Type, iface *types.Interface) error { + + //func CloneAST(in AST) AST { + // if in == nil { + // return nil + //} + // switch in := in.(type) { + //case *RefContainer: + // return in.CloneRefOfRefContainer() + //} + // // this should never happen + // return nil + //} + + typeString := types.TypeString(t, noQualifier) + typeName := printableTypeName(t) + + stmts := []jen.Code{ifNilReturnNil("in")} + + var cases []jen.Code + _ = findImplementations(c.scope, iface, func(t types.Type) error { + typeString := types.TypeString(t, noQualifier) + + // case Type: return CloneType(in) + block := jen.Case(jen.Id(typeString)).Block(jen.Return(c.readValueOfType(t, jen.Id("in")))) + switch t := t.(type) { + case *types.Pointer: + _, isIface := t.Elem().(*types.Interface) + if !isIface { + cases = append(cases, block) + } + + case *types.Named: + _, isIface := t.Underlying().(*types.Interface) + if !isIface { + cases = append(cases, block) + } + + default: + log.Errorf("unexpected type encountered: %s", typeString) + } + + return nil + }) + + cases = append(cases, + jen.Default().Block( + jen.Comment("this should never happen"), + jen.Return(jen.Nil()), + )) + + // switch n := node.(type) { + stmts = append(stmts, jen.Switch(jen.Id("in").Op(":=").Id("in").Assert(jen.Id("type")).Block( + cases..., + ))) + + funcName := cloneName + typeName + funcDecl := jen.Func().Id(funcName).Call(jen.Id("in").Id(typeString)).Id(typeString).Block(stmts...) + c.addFunc(funcName, funcDecl) + return nil +} + +func (c *cloneGen) makePtrCloneMethod(t types.Type, ptr *types.Pointer) error { + receiveType := types.TypeString(t, noQualifier) + + funcName := "Clone" + printableTypeName(t) + c.addFunc(funcName, + jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType).Block( + ifNilReturnNil("n"), + jen.Id("out").Op(":=").Add(c.readValueOfType(ptr.Elem(), jen.Op("*").Id("n"))), + jen.Return(jen.Op("&").Id("out")), + )) + + return nil +} + +func (c *cloneGen) createFile(pkgName string) (string, *jen.File) { + out := jen.NewFile(pkgName) + out.HeaderComment(licenseFileHeader) + out.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") + alreadyDone := map[string]bool{} + for len(c.todo) > 0 { + t := c.todo[0] + underlying := t.Underlying() + typeName := printableTypeName(t) + c.todo = c.todo[1:] + + if alreadyDone[typeName] { + continue + } + + if c.tryInterface(underlying, t) || + c.trySlice(underlying, t) || + c.tryStruct(underlying, t) || + c.tryPtr(underlying, t) { + alreadyDone[typeName] = true + continue + } + + log.Errorf("don't know how to handle %s %T", typeName, underlying) + } + + for _, method := range c.methods { + out.Add(method) + } + + return "clone.go", out +} + +func ifNilReturnNil(id string) *jen.Statement { + return jen.If(jen.Id(id).Op("==").Nil()).Block(jen.Return(jen.Nil())) +} + +func isBasic(t types.Type) bool { + _, x := t.Underlying().(*types.Basic) + return x +} + +func (c *cloneGen) tryStruct(underlying, t types.Type) bool { + _, ok := underlying.(*types.Struct) + if !ok { + return false + } + + err := c.makeStructCloneMethod(t) + if err != nil { + panic(err) // todo + } + return true +} +func (c *cloneGen) tryPtr(underlying, t types.Type) bool { + ptr, ok := underlying.(*types.Pointer) + if !ok { + return false + } + + if strct, isStruct := ptr.Elem().Underlying().(*types.Struct); isStruct { + c.makePtrToStructCloneMethod(t, strct) + return true + } + + err := c.makePtrCloneMethod(t, ptr) + if err != nil { + panic(err) // todo + } + return true +} + +func (c *cloneGen) makePtrToStructCloneMethod(t types.Type, strct *types.Struct) { + receiveType := types.TypeString(t, noQualifier) + funcName := "Clone" + printableTypeName(t) + + //func CloneRefOfType(n *Type) *Type + funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType) + + if receiveType == c.exceptType { + c.addFunc(funcName, funcDeclaration.Block( + jen.Return(jen.Id("n")), + )) + return + } + + var fields []jen.Code + for i := 0; i < strct.NumFields(); i++ { + field := strct.Field(i) + if isBasic(field.Type()) || field.Name() == "_" { + continue + } + // out.Field = CloneType(n.Field) + fields = append(fields, + jen.Id("out").Dot(field.Name()).Op("=").Add(c.readValueOfType(field.Type(), jen.Id("n").Dot(field.Name())))) + } + + stmts := []jen.Code{ + // if n == nil { return nil } + ifNilReturnNil("n"), + // out := *n + jen.Id("out").Op(":=").Op("*").Id("n"), + } + + // handle all fields with CloneAble types + stmts = append(stmts, fields...) + + stmts = append(stmts, + // return &out + jen.Return(jen.Op("&").Id("out")), + ) + + c.addFunc(funcName, + funcDeclaration.Block(stmts...), + ) +} + +func (c *cloneGen) tryInterface(underlying, t types.Type) bool { + iface, ok := underlying.(*types.Interface) + if !ok { + return false + } + + err := c.makeInterfaceCloneMethod(t, iface) + if err != nil { + panic(err) // todo + } + return true +} + +func (c *cloneGen) trySlice(underlying, t types.Type) bool { + slice, ok := underlying.(*types.Slice) + if !ok { + return false + } + + err := c.makeSliceCloneMethod(t, slice) + if err != nil { + panic(err) // todo + } + return true +} + +// printableTypeName returns a string that can be used as a valid golang identifier +func printableTypeName(t types.Type) string { + switch t := t.(type) { + case *types.Pointer: + return "RefOf" + printableTypeName(t.Elem()) + case *types.Slice: + return "SliceOf" + printableTypeName(t.Elem()) + case *types.Named: + return t.Obj().Name() + case *types.Basic: + return t.Name() + case *types.Interface: + return t.String() + default: + panic(fmt.Sprintf("unknown type %T %v", t, t)) + } +} diff --git a/go/tools/asthelpergen/integration/clone.go b/go/tools/asthelpergen/integration/clone.go new file mode 100644 index 00000000000..84d1c2b7537 --- /dev/null +++ b/go/tools/asthelpergen/integration/clone.go @@ -0,0 +1,213 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package integration + +// CloneAST creates a deep clone of the input. +func CloneAST(in AST) AST { + if in == nil { + return nil + } + switch in := in.(type) { + case BasicType: + return in + case Bytes: + return CloneBytes(in) + case InterfaceContainer: + return CloneInterfaceContainer(in) + case InterfaceSlice: + return CloneInterfaceSlice(in) + case *Leaf: + return CloneRefOfLeaf(in) + case LeafSlice: + return CloneLeafSlice(in) + case *NoCloneType: + return CloneRefOfNoCloneType(in) + case *RefContainer: + return CloneRefOfRefContainer(in) + case *RefSliceContainer: + return CloneRefOfRefSliceContainer(in) + case *SubImpl: + return CloneRefOfSubImpl(in) + case ValueContainer: + return CloneValueContainer(in) + case ValueSliceContainer: + return CloneValueSliceContainer(in) + default: + // this should never happen + return nil + } +} + +// CloneSubIface creates a deep clone of the input. +func CloneSubIface(in SubIface) SubIface { + if in == nil { + return nil + } + switch in := in.(type) { + case *SubImpl: + return CloneRefOfSubImpl(in) + default: + // this should never happen + return nil + } +} + +// CloneBytes creates a deep clone of the input. +func CloneBytes(n Bytes) Bytes { + res := make(Bytes, 0, len(n)) + copy(res, n) + return res +} + +// CloneInterfaceContainer creates a deep clone of the input. +func CloneInterfaceContainer(n InterfaceContainer) InterfaceContainer { + return *CloneRefOfInterfaceContainer(&n) +} + +// CloneInterfaceSlice creates a deep clone of the input. +func CloneInterfaceSlice(n InterfaceSlice) InterfaceSlice { + res := make(InterfaceSlice, 0, len(n)) + for _, x := range n { + res = append(res, CloneAST(x)) + } + return res +} + +// CloneRefOfLeaf creates a deep clone of the input. +func CloneRefOfLeaf(n *Leaf) *Leaf { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneLeafSlice creates a deep clone of the input. +func CloneLeafSlice(n LeafSlice) LeafSlice { + res := make(LeafSlice, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfLeaf(x)) + } + return res +} + +// CloneRefOfNoCloneType creates a deep clone of the input. +func CloneRefOfNoCloneType(n *NoCloneType) *NoCloneType { + return n +} + +// CloneRefOfRefContainer creates a deep clone of the input. +func CloneRefOfRefContainer(n *RefContainer) *RefContainer { + if n == nil { + return nil + } + out := *n + out.ASTType = CloneAST(n.ASTType) + out.ASTImplementationType = CloneRefOfLeaf(n.ASTImplementationType) + return &out +} + +// CloneRefOfRefSliceContainer creates a deep clone of the input. +func CloneRefOfRefSliceContainer(n *RefSliceContainer) *RefSliceContainer { + if n == nil { + return nil + } + out := *n + out.ASTElements = CloneSliceOfAST(n.ASTElements) + out.NotASTElements = CloneSliceOfint(n.NotASTElements) + out.ASTImplementationElements = CloneSliceOfRefOfLeaf(n.ASTImplementationElements) + return &out +} + +// CloneRefOfSubImpl creates a deep clone of the input. +func CloneRefOfSubImpl(n *SubImpl) *SubImpl { + if n == nil { + return nil + } + out := *n + out.inner = CloneSubIface(n.inner) + return &out +} + +// CloneValueContainer creates a deep clone of the input. +func CloneValueContainer(n ValueContainer) ValueContainer { + return *CloneRefOfValueContainer(&n) +} + +// CloneValueSliceContainer creates a deep clone of the input. +func CloneValueSliceContainer(n ValueSliceContainer) ValueSliceContainer { + return *CloneRefOfValueSliceContainer(&n) +} + +// CloneRefOfInterfaceContainer creates a deep clone of the input. +func CloneRefOfInterfaceContainer(n *InterfaceContainer) *InterfaceContainer { + if n == nil { + return nil + } + out := *n + out.v = n.v + return &out +} + +// CloneSliceOfAST creates a deep clone of the input. +func CloneSliceOfAST(n []AST) []AST { + res := make([]AST, 0, len(n)) + for _, x := range n { + res = append(res, CloneAST(x)) + } + return res +} + +// CloneSliceOfint creates a deep clone of the input. +func CloneSliceOfint(n []int) []int { + res := make([]int, 0, len(n)) + copy(res, n) + return res +} + +// CloneSliceOfRefOfLeaf creates a deep clone of the input. +func CloneSliceOfRefOfLeaf(n []*Leaf) []*Leaf { + res := make([]*Leaf, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfLeaf(x)) + } + return res +} + +// CloneRefOfValueContainer creates a deep clone of the input. +func CloneRefOfValueContainer(n *ValueContainer) *ValueContainer { + if n == nil { + return nil + } + out := *n + out.ASTType = CloneAST(n.ASTType) + out.ASTImplementationType = CloneRefOfLeaf(n.ASTImplementationType) + return &out +} + +// CloneRefOfValueSliceContainer creates a deep clone of the input. +func CloneRefOfValueSliceContainer(n *ValueSliceContainer) *ValueSliceContainer { + if n == nil { + return nil + } + out := *n + out.ASTElements = CloneSliceOfAST(n.ASTElements) + out.NotASTElements = CloneSliceOfint(n.NotASTElements) + out.ASTImplementationElements = CloneSliceOfRefOfLeaf(n.ASTImplementationElements) + return &out +} diff --git a/go/tools/asthelpergen/integration/integration_clone_test.go b/go/tools/asthelpergen/integration/integration_clone_test.go new file mode 100644 index 00000000000..f7adf9e7eef --- /dev/null +++ b/go/tools/asthelpergen/integration/integration_clone_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCloneLeaf(t *testing.T) { + leaf1 := &Leaf{1} + clone := CloneRefOfLeaf(leaf1) + assert.Equal(t, leaf1, clone) + leaf1.v = 5 + assert.NotEqual(t, leaf1, clone) +} + +func TestClone2(t *testing.T) { + container := &RefContainer{ + ASTType: &RefContainer{}, + NotASTType: 0, + ASTImplementationType: &Leaf{2}, + } + clone := CloneRefOfRefContainer(container) + assert.Equal(t, container, clone) + container.ASTImplementationType.v = 5 + assert.NotEqual(t, container, clone) +} + +func TestTypeException(t *testing.T) { + l1 := &Leaf{1} + nc := &NoCloneType{1} + + slice := InterfaceSlice{ + l1, + nc, + } + + clone := CloneAST(slice) + + // change the original values + l1.v = 99 + nc.v = 99 + + expected := InterfaceSlice{ + &Leaf{1}, // the change is not seen + &NoCloneType{99}, // since this type is not cloned, we do see the change + } + + assert.Equal(t, expected, clone) +} diff --git a/go/tools/asthelpergen/integration/integration_rewriter_test.go b/go/tools/asthelpergen/integration/integration_rewriter_test.go new file mode 100644 index 00000000000..1189974a79c --- /dev/null +++ b/go/tools/asthelpergen/integration/integration_rewriter_test.go @@ -0,0 +1,389 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVisitRefContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := &RefContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := &RefContainer{ASTType: container} + + tv := &testVisitor{} + + Rewrite(containerContainer, tv.pre, tv.post) + + expected := []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Post{container}, + Post{containerContainer}, + } + tv.assertEquals(t, expected) +} + +func TestVisitValueContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + container := ValueContainer{ASTType: leaf1, ASTImplementationType: leaf2} + containerContainer := ValueContainer{ASTType: container} + + tv := &testVisitor{} + + Rewrite(containerContainer, tv.pre, tv.post) + + expected := []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Post{container}, + Post{containerContainer}, + } + tv.assertEquals(t, expected) +} + +func TestVisitRefSliceContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + leaf3 := &Leaf{3} + leaf4 := &Leaf{4} + container := &RefSliceContainer{ASTElements: []AST{leaf1, leaf2}, ASTImplementationElements: []*Leaf{leaf3, leaf4}} + containerContainer := &RefSliceContainer{ASTElements: []AST{container}} + + tv := &testVisitor{} + + Rewrite(containerContainer, tv.pre, tv.post) + + tv.assertEquals(t, []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Pre{leaf3}, + Post{leaf3}, + Pre{leaf4}, + Post{leaf4}, + Post{container}, + Post{containerContainer}, + }) +} + +func TestVisitValueSliceContainer(t *testing.T) { + leaf1 := &Leaf{1} + leaf2 := &Leaf{2} + leaf3 := &Leaf{3} + leaf4 := &Leaf{4} + container := ValueSliceContainer{ASTElements: []AST{leaf1, leaf2}, ASTImplementationElements: []*Leaf{leaf3, leaf4}} + containerContainer := ValueSliceContainer{ASTElements: []AST{container}} + + tv := &testVisitor{} + + Rewrite(containerContainer, tv.pre, tv.post) + + tv.assertEquals(t, []step{ + Pre{containerContainer}, + Pre{container}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Pre{leaf3}, + Post{leaf3}, + Pre{leaf4}, + Post{leaf4}, + Post{container}, + Post{containerContainer}, + }) +} + +func TestVisitInterfaceSlice(t *testing.T) { + leaf1 := &Leaf{2} + astType := &RefContainer{NotASTType: 12} + implementationType := &Leaf{2} + + leaf2 := &Leaf{3} + refContainer := &RefContainer{ + ASTType: astType, + ASTImplementationType: implementationType, + } + ast := InterfaceSlice{ + refContainer, + leaf1, + leaf2, + } + + tv := &testVisitor{} + + Rewrite(ast, tv.pre, tv.post) + + tv.assertEquals(t, []step{ + Pre{ast}, + Pre{refContainer}, + Pre{astType}, + Post{astType}, + Pre{implementationType}, + Post{implementationType}, + Post{refContainer}, + Pre{leaf1}, + Post{leaf1}, + Pre{leaf2}, + Post{leaf2}, + Post{ast}, + }) +} + +func TestVisitRefContainerReplace(t *testing.T) { + ast := &RefContainer{ + ASTType: &RefContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + } + + // rewrite field of type AST + Rewrite(ast, func(cursor *Cursor) bool { + leaf, ok := cursor.node.(*RefContainer) + if ok && leaf.NotASTType == 12 { + cursor.Replace(&Leaf{99}) + } + return true + }, nil) + + assert.Equal(t, &RefContainer{ + ASTType: &Leaf{99}, + ASTImplementationType: &Leaf{2}, + }, ast) + + Rewrite(ast, rewriteLeaf(2, 55), nil) + + assert.Equal(t, &RefContainer{ + ASTType: &Leaf{99}, + ASTImplementationType: &Leaf{55}, + }, ast) +} + +func TestVisitValueContainerReplace(t *testing.T) { + ast := ValueContainer{ + ASTType: ValueContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + } + + defer func() { + if r := recover(); r != nil { + assert.Contains(t, r, "ValueContainer ASTType") + } + }() + + Rewrite(ast, func(cursor *Cursor) bool { + leaf, ok := cursor.node.(ValueContainer) + if ok && leaf.NotASTType == 12 { + cursor.Replace(&Leaf{99}) + } + return true + }, nil) + + t.Fatalf("should not get here") +} + +func TestVisitValueContainerReplace2(t *testing.T) { + ast := ValueContainer{ + ASTType: ValueContainer{NotASTType: 12}, + ASTImplementationType: &Leaf{2}, + } + + defer func() { + if r := recover(); r != nil { + assert.Contains(t, r, "ValueContainer ASTImplementationType") + } + }() + + Rewrite(ast, rewriteLeaf(2, 10), nil) + + t.Fatalf("should not get here") +} + +func rewriteLeaf(from, to int) func(*Cursor) bool { + return func(cursor *Cursor) bool { + leaf, ok := cursor.node.(*Leaf) + if ok && leaf.v == from { + cursor.Replace(&Leaf{to}) + } + return true + } +} + +func TestRefSliceContainerReplace(t *testing.T) { + ast := &RefSliceContainer{ + ASTElements: []AST{&Leaf{1}, &Leaf{2}}, + ASTImplementationElements: []*Leaf{{3}, {4}}, + } + + Rewrite(ast, rewriteLeaf(2, 42), nil) + + assert.Equal(t, &RefSliceContainer{ + ASTElements: []AST{&Leaf{1}, &Leaf{42}}, + ASTImplementationElements: []*Leaf{{3}, {4}}, + }, ast) + + Rewrite(ast, rewriteLeaf(3, 88), nil) + + assert.Equal(t, &RefSliceContainer{ + ASTElements: []AST{&Leaf{1}, &Leaf{42}}, + ASTImplementationElements: []*Leaf{{88}, {4}}, + }, ast) +} + +type step interface { + String() string +} +type Pre struct { + el AST +} + +func (r Pre) String() string { + return fmt.Sprintf("Pre(%s)", r.el.String()) +} +func (r Post) String() string { + return fmt.Sprintf("Pre(%s)", r.el.String()) +} + +type Post struct { + el AST +} + +type testVisitor struct { + walk []step +} + +func (tv *testVisitor) pre(cursor *Cursor) bool { + tv.walk = append(tv.walk, Pre{el: cursor.Node()}) + return true +} +func (tv *testVisitor) post(cursor *Cursor) bool { + tv.walk = append(tv.walk, Post{el: cursor.Node()}) + return true +} +func (tv *testVisitor) assertEquals(t *testing.T, expected []step) { + t.Helper() + var lines []string + error := false + expectedSize := len(expected) + for i, step := range tv.walk { + if expectedSize <= i { + t.Errorf("❌️ - Expected less elements %v", tv.walk[i:]) + break + } else { + e := expected[i] + if reflect.DeepEqual(e, step) { + a := "✔️ - " + e.String() + if error { + fmt.Println(a) + } else { + lines = append(lines, a) + } + } else { + if !error { + // first error we see. + error = true + for _, line := range lines { + fmt.Println(line) + } + } + t.Errorf("❌️ - Expected: %s Got: %s\n", e.String(), step.String()) + } + } + } + walkSize := len(tv.walk) + if expectedSize > walkSize { + t.Errorf("❌️ - Expected more elements %v", expected[walkSize:]) + } + +} + +// below follows two different ways of creating the replacement method for slices, and benchmark +// between them. Diff seems to be very small, so I'll use the most readable form +type replaceA int + +func (r *replaceA) replace(newNode, container AST) { + container.(InterfaceSlice)[int(*r)] = newNode.(AST) +} + +func (r *replaceA) inc() { + *r++ +} + +func replaceB(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(InterfaceSlice)[idx] = newNode.(AST) + } +} + +func BenchmarkSliceReplacerA(b *testing.B) { + islice := make(InterfaceSlice, 20) + for i := range islice { + islice[i] = &Leaf{i} + } + a := &application{ + pre: func(c *Cursor) bool { + return true + }, + post: nil, + cursor: Cursor{}, + } + + for i := 0; i < b.N; i++ { + replacer := replaceA(0) + for _, el := range islice { + a.apply(islice, el, replacer.replace) + replacer.inc() + } + } +} + +func BenchmarkSliceReplacerB(b *testing.B) { + islice := make(InterfaceSlice, 20) + for i := range islice { + islice[i] = &Leaf{i} + } + a := &application{ + pre: func(c *Cursor) bool { + return true + }, + post: nil, + cursor: Cursor{}, + } + + for i := 0; i < b.N; i++ { + for x, el := range islice { + a.apply(islice, el, replaceB(x)) + } + } +} diff --git a/go/tools/asthelpergen/integration/rewriter.go b/go/tools/asthelpergen/integration/rewriter.go new file mode 100644 index 00000000000..300ccef16ea --- /dev/null +++ b/go/tools/asthelpergen/integration/rewriter.go @@ -0,0 +1,102 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package integration + +func (a *application) apply(parent, node AST, replacer replacerFunc) { + if node == nil || isNilValue(node) { + return + } + saved := a.cursor + a.cursor.replacer = replacer + a.cursor.node = node + a.cursor.parent = parent + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + switch n := node.(type) { + case Bytes: + case InterfaceContainer: + case InterfaceSlice: + for x, el := range n { + a.apply(node, el, func(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(InterfaceSlice)[idx] = newNode.(AST) + } + }(x)) + } + case *Leaf: + case LeafSlice: + for x, el := range n { + a.apply(node, el, func(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(LeafSlice)[idx] = newNode.(*Leaf) + } + }(x)) + } + case *NoCloneType: + case *RefContainer: + a.apply(node, n.ASTType, func(newNode, parent AST) { + parent.(*RefContainer).ASTType = newNode.(AST) + }) + a.apply(node, n.ASTImplementationType, func(newNode, parent AST) { + parent.(*RefContainer).ASTImplementationType = newNode.(*Leaf) + }) + case *RefSliceContainer: + for x, el := range n.ASTElements { + a.apply(node, el, func(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(*RefSliceContainer).ASTElements[idx] = newNode.(AST) + } + }(x)) + } + for x, el := range n.ASTImplementationElements { + a.apply(node, el, func(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(*RefSliceContainer).ASTImplementationElements[idx] = newNode.(*Leaf) + } + }(x)) + } + case *SubImpl: + a.apply(node, n.inner, func(newNode, parent AST) { + parent.(*SubImpl).inner = newNode.(SubIface) + }) + case ValueContainer: + a.apply(node, n.ASTType, replacePanic("ValueContainer ASTType")) + a.apply(node, n.ASTImplementationType, replacePanic("ValueContainer ASTImplementationType")) + case ValueSliceContainer: + for x, el := range n.ASTElements { + a.apply(node, el, func(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(ValueSliceContainer).ASTElements[idx] = newNode.(AST) + } + }(x)) + } + for x, el := range n.ASTImplementationElements { + a.apply(node, el, func(idx int) func(AST, AST) { + return func(newNode, container AST) { + container.(ValueSliceContainer).ASTImplementationElements[idx] = newNode.(*Leaf) + } + }(x)) + } + } + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + a.cursor = saved +} diff --git a/go/tools/asthelpergen/integration/test_helpers.go b/go/tools/asthelpergen/integration/test_helpers.go new file mode 100644 index 00000000000..3a2da19be80 --- /dev/null +++ b/go/tools/asthelpergen/integration/test_helpers.go @@ -0,0 +1,97 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "reflect" + "strings" +) + +// ast type helpers + +func sliceStringAST(els ...AST) string { + result := make([]string, len(els)) + for i, el := range els { + result[i] = el.String() + } + return strings.Join(result, ", ") +} +func sliceStringLeaf(els ...*Leaf) string { + result := make([]string, len(els)) + for i, el := range els { + result[i] = el.String() + } + return strings.Join(result, ", ") +} + +// the methods below are what the generated code expected to be there in the package + +type application struct { + pre, post ApplyFunc + cursor Cursor +} + +type ApplyFunc func(*Cursor) bool + +type Cursor struct { + parent AST + replacer replacerFunc + node AST +} + +// Node returns the current Node. +func (c *Cursor) Node() AST { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() AST { return c.parent } + +// Replace replaces the current node in the parent field with this new object. The use needs to make sure to not +// replace the object with something of the wrong type, or the visitor will panic. +func (c *Cursor) Replace(newNode AST) { + c.replacer(newNode, c.parent) + c.node = newNode +} + +type replacerFunc func(newNode, parent AST) + +func isNilValue(i interface{}) bool { + valueOf := reflect.ValueOf(i) + kind := valueOf.Kind() + isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice + return isNullable && valueOf.IsNil() +} + +var abort = new(int) // singleton, to signal termination of Apply + +func Rewrite(node AST, pre, post ApplyFunc) (result AST) { + parent := &struct{ AST }{node} + + a := &application{ + pre: pre, + post: post, + cursor: Cursor{}, + } + + a.apply(parent.AST, node, nil) + return parent.AST +} + +func replacePanic(msg string) func(newNode, parent AST) { + return func(newNode, parent AST) { + panic("Tried replacing a field of a value type. This is not supported. " + msg) + } +} diff --git a/go/tools/asthelpergen/integration/types.go b/go/tools/asthelpergen/integration/types.go new file mode 100644 index 00000000000..1a89d78c3a2 --- /dev/null +++ b/go/tools/asthelpergen/integration/types.go @@ -0,0 +1,172 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//nolint +package integration + +import ( + "fmt" + "strings" +) + +/* +These types are used to test the rewriter generator against these types. +To recreate them, just run: + +go run go/tools/asthelpergen -in ./go/tools/asthelpergen/integration -iface vitess.io/vitess/go/tools/asthelpergen/integration.AST +*/ +// AST is the interface all interface types implement +type AST interface { + String() string +} + +// Empty struct impl of the iface +type Leaf struct { + v int +} + +func (l *Leaf) String() string { + if l == nil { + return "nil" + } + return fmt.Sprintf("Leaf(%d)", l.v) +} + +// Container implements the interface ByRef +type RefContainer struct { + ASTType AST + NotASTType int + ASTImplementationType *Leaf +} + +func (r *RefContainer) String() string { + if r == nil { + return "nil" + } + var astType = "" + if r.ASTType == nil { + astType = "nil" + } else { + astType = r.ASTType.String() + } + return fmt.Sprintf("RefContainer{%s, %d, %s}", astType, r.NotASTType, r.ASTImplementationType.String()) +} + +// Container implements the interface ByRef +type RefSliceContainer struct { + ASTElements []AST + NotASTElements []int + ASTImplementationElements []*Leaf +} + +func (r *RefSliceContainer) String() string { + return fmt.Sprintf("RefSliceContainer{%s, %s, %s}", sliceStringAST(r.ASTElements...), "r.NotASTType", sliceStringLeaf(r.ASTImplementationElements...)) +} + +// Container implements the interface ByValue +type ValueContainer struct { + ASTType AST + NotASTType int + ASTImplementationType *Leaf +} + +func (r ValueContainer) String() string { + return fmt.Sprintf("ValueContainer{%s, %d, %s}", r.ASTType.String(), r.NotASTType, r.ASTImplementationType.String()) +} + +// Container implements the interface ByValue +type ValueSliceContainer struct { + ASTElements []AST + NotASTElements []int + ASTImplementationElements []*Leaf +} + +func (r ValueSliceContainer) String() string { + return fmt.Sprintf("ValueSliceContainer{%s, %s, %s}", sliceStringAST(r.ASTElements...), "r.NotASTType", sliceStringLeaf(r.ASTImplementationElements...)) +} + +// We need to support these types - a slice of AST elements can implement the interface +type InterfaceSlice []AST + +func (r InterfaceSlice) String() string { + var elements []string + for _, el := range r { + elements = append(elements, el.String()) + } + + return "[" + strings.Join(elements, ", ") + "]" +} + +// We need to support these types - a slice of AST elements can implement the interface +type Bytes []byte + +func (r Bytes) String() string { + return string(r) +} + +type LeafSlice []*Leaf + +func (r LeafSlice) String() string { + var elements []string + for _, el := range r { + elements = append(elements, el.String()) + } + return strings.Join(elements, ", ") +} + +type BasicType int + +func (r BasicType) String() string { + return fmt.Sprintf("int(%d)", r) +} + +const ( + // these consts are here to try to trick the generator + thisIsNotAType BasicType = 1 + thisIsNotAType2 BasicType = 2 +) + +// We want to support all types that are used as field types, which can include interfaces. +// Example would be sqlparser.Expr that implements sqlparser.SQLNode +type SubIface interface { + AST + iface() +} + +type SubImpl struct { + inner SubIface +} + +func (r *SubImpl) String() string { + return "SubImpl" +} +func (r *SubImpl) iface() {} + +type InterfaceContainer struct { + v interface{} +} + +func (r InterfaceContainer) String() string { + return fmt.Sprintf("%v", r.v) +} + +type NoCloneType struct { + v int +} + +func (r *NoCloneType) String() string { + return fmt.Sprintf("NoClone(%d)", r.v) +} diff --git a/go/tools/asthelpergen/rewriter_gen.go b/go/tools/asthelpergen/rewriter_gen.go new file mode 100644 index 00000000000..54a0ca8e093 --- /dev/null +++ b/go/tools/asthelpergen/rewriter_gen.go @@ -0,0 +1,209 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "go/types" + + "github.com/dave/jennifer/jen" +) + +type rewriterGen struct { + cases []jen.Code + interestingType func(types.Type) bool + ifaceName string +} + +func newRewriterGen(f func(types.Type) bool, name string) *rewriterGen { + return &rewriterGen{interestingType: f, ifaceName: name} +} + +var noQualifier = func(p *types.Package) string { + return "" +} + +func (r *rewriterGen) visitStruct(t types.Type, stroct *types.Struct) error { + typeString := types.TypeString(t, noQualifier) + typeName := printableTypeName(t) + var caseStmts []jen.Code + for i := 0; i < stroct.NumFields(); i++ { + field := stroct.Field(i) + if r.interestingType(field.Type()) { + if _, ok := t.(*types.Pointer); ok { + function := r.createReplaceMethod(typeString, field) + caseStmts = append(caseStmts, caseStmtFor(field, function)) + } else { + caseStmts = append(caseStmts, casePanicStmtFor(field, typeName+" "+field.Name())) + } + } + sliceT, ok := field.Type().(*types.Slice) + if ok && r.interestingType(sliceT.Elem()) { // we have a field containing a slice of interesting elements + function := r.createReplacementMethod(t, sliceT.Elem(), jen.Dot(field.Name())) + caseStmts = append(caseStmts, caseStmtForSliceField(field, function)) + } + } + r.cases = append(r.cases, jen.Case(jen.Id(typeString)).Block(caseStmts...)) + return nil +} + +func (r *rewriterGen) visitInterface(types.Type, *types.Interface) error { + return nil // rewriter doesn't deal with interfaces +} + +func (r *rewriterGen) visitSlice(t types.Type, slice *types.Slice) error { + typeString := types.TypeString(t, noQualifier) + + var stmts []jen.Code + if r.interestingType(slice.Elem()) { + function := r.createReplacementMethod(t, slice.Elem(), jen.Empty()) + stmts = append(stmts, caseStmtForSlice(function)) + } + r.cases = append(r.cases, jen.Case(jen.Id(typeString)).Block(stmts...)) + return nil +} + +func caseStmtFor(field *types.Var, expr jen.Code) *jen.Statement { + // a.apply(node, node.Field, replacerMethod) + return jen.Id("a").Dot("apply").Call(jen.Id("node"), jen.Id("n").Dot(field.Name()), expr) +} + +func casePanicStmtFor(field *types.Var, name string) *jen.Statement { + return jen.Id("a").Dot("apply").Call(jen.Id("node"), jen.Id("n").Dot(field.Name()), jen.Id("replacePanic").Call(jen.Lit(name))) +} + +func caseStmtForSlice(function *jen.Statement) jen.Code { + return jen.For(jen.List(jen.Op("x"), jen.Id("el"))).Op(":=").Range().Id("n").Block( + jen.Id("a").Dot("apply").Call( + jen.Id("node"), + jen.Id("el"), + function, + ), + ) +} + +func caseStmtForSliceField(field *types.Var, function *jen.Statement) jen.Code { + //for x, el := range n { + return jen.For(jen.List(jen.Op("x"), jen.Id("el"))).Op(":=").Range().Id("n").Dot(field.Name()).Block( + jen.Id("a").Dot("apply").Call( + // a.apply(node, el, replaceInterfaceSlice(x)) + jen.Id("node"), + jen.Id("el"), + function, + ), + ) +} + +func (r *rewriterGen) structCase(name string, stroct *types.Struct) (jen.Code, error) { + var stmts []jen.Code + for i := 0; i < stroct.NumFields(); i++ { + field := stroct.Field(i) + if r.interestingType(field.Type()) { + stmts = append(stmts, jen.Id("a").Dot("apply").Call(jen.Id("node"), jen.Id("n").Dot(field.Name()), jen.Nil())) + } + } + return jen.Case(jen.Op("*").Id(name)).Block(stmts...), nil +} + +func (r *rewriterGen) createReplaceMethod(structType string, field *types.Var) jen.Code { + return jen.Func().Params( + jen.Id("newNode"), + jen.Id("parent").Id(r.ifaceName), + ).Block( + jen.Id("parent").Assert(jen.Id(structType)).Dot(field.Name()).Op("=").Id("newNode").Assert(jen.Id(types.TypeString(field.Type(), noQualifier))), + ) +} + +func (r *rewriterGen) createReplacementMethod(container, elem types.Type, x jen.Code) *jen.Statement { + /* + func replacer(idx int) func(AST, AST) { + return func(newnode, container AST) { + container.(InterfaceSlice)[idx] = newnode.(AST) + } + }(x) + */ + return jen.Func().Params(jen.Id("idx").Int()).Func().Params(jen.List(jen.Id(r.ifaceName), jen.Id(r.ifaceName))).Block( + jen.Return(jen.Func().Params(jen.List(jen.Id("newNode"), jen.Id("container")).Id(r.ifaceName))).Block( + jen.Id("container").Assert(jen.Id(types.TypeString(container, noQualifier))).Add(x).Index(jen.Id("idx")).Op("="). + Id("newNode").Assert(jen.Id(types.TypeString(elem, noQualifier))), + ), + ).Call(jen.Id("x")) +} + +func (r *rewriterGen) createFile(pkgName string) (string, *jen.File) { + out := jen.NewFile(pkgName) + out.HeaderComment(licenseFileHeader) + out.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") + + out.Add( + // func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { + jen.Func().Params( + jen.Id("a").Op("*").Id("application"), + ).Id("apply").Params( + jen.Id("parent"), + jen.Id("node").Id(r.ifaceName), + jen.Id("replacer").Id("replacerFunc"), + ).Block( + /* + if node == nil || isNilValue(node) { + return + } + */ + jen.If( + jen.Id("node").Op("==").Nil().Op("||"). + Id("isNilValue").Call(jen.Id("node"))).Block( + jen.Return(), + ), + /* + saved := a.cursor + a.cursor.replacer = replacer + a.cursor.node = node + a.cursor.parent = parent + */ + jen.Id("saved").Op(":=").Id("a").Dot("cursor"), + jen.Id("a").Dot("cursor").Dot("replacer").Op("=").Id("replacer"), + jen.Id("a").Dot("cursor").Dot("node").Op("=").Id("node"), + jen.Id("a").Dot("cursor").Dot("parent").Op("=").Id("parent"), + jen.If( + jen.Id("a").Dot("pre").Op("!=").Nil().Op("&&"). + Op("!").Id("a").Dot("pre").Call(jen.Op("&").Id("a").Dot("cursor"))).Block( + jen.Id("a").Dot("cursor").Op("=").Id("saved"), + jen.Return(), + ), + + // switch n := node.(type) { + jen.Switch(jen.Id("n").Op(":=").Id("node").Assert(jen.Id("type")).Block( + r.cases..., + )), + + /* + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + */ + jen.If( + jen.Id("a").Dot("post").Op("!=").Nil().Op("&&"). + Op("!").Id("a").Dot("post").Call(jen.Op("&").Id("a").Dot("cursor"))).Block( + jen.Id("panic").Call(jen.Id("abort")), + ), + + // a.cursor = saved + jen.Id("a").Dot("cursor").Op("=").Id("saved"), + ), + ) + + return "rewriter.go", out +} diff --git a/go/tools/sizegen/sizegen.go b/go/tools/sizegen/sizegen.go index 6281cd1485e..a02c2aa69a9 100644 --- a/go/tools/sizegen/sizegen.go +++ b/go/tools/sizegen/sizegen.go @@ -170,14 +170,6 @@ func findImplementations(scope *types.Scope, iff *types.Interface, impl func(typ } } -func (sizegen *sizegen) generateKnownInterface(pkg *types.Package, iff *types.Interface) { - findImplementations(pkg.Scope(), iff, func(tt types.Type) { - if named, ok := tt.(*types.Named); ok { - sizegen.generateKnownType(named) - } - }) -} - func (sizegen *sizegen) finalize() map[string]*jen.File { var complete bool diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 6243bbfd679..5d69cff8afd 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -834,7 +834,10 @@ type BinlogSource struct { ExternalMysql string `protobuf:"bytes,8,opt,name=external_mysql,json=externalMysql,proto3" json:"external_mysql,omitempty"` // StopAfterCopy specifies if vreplication should be stopped // after copying is done. - StopAfterCopy bool `protobuf:"varint,9,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + StopAfterCopy bool `protobuf:"varint,9,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + ExternalCluster string `protobuf:"bytes,10,opt,name=external_cluster,json=externalCluster,proto3" json:"external_cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -936,6 +939,13 @@ func (m *BinlogSource) GetStopAfterCopy() bool { return false } +func (m *BinlogSource) GetExternalCluster() string { + if m != nil { + return m.ExternalCluster + } + return "" +} + // RowChange represents one row change. // If Before is set and not After, it's a delete. // If After is set and not Before, it's an insert. @@ -2242,128 +2252,129 @@ func init() { func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_5fd02bcb2e350dad) } var fileDescriptor_5fd02bcb2e350dad = []byte{ - // 1939 bytes of a gzipped FileDescriptorProto + // 1955 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x6f, 0x23, 0x59, 0x15, 0xee, 0xf2, 0xdb, 0xa7, 0x1c, 0xa7, 0x72, 0xf3, 0xc0, 0xb4, 0x66, 0xa2, 0x4c, 0x89, 0x99, 0x0e, 0x91, 0x70, 0x06, 0xc3, 0x34, 0x42, 0x62, 0x66, 0xf0, 0xa3, 0x3a, 0xed, 0x8e, 0x1f, 0xe9, - 0xeb, 0xea, 0xf4, 0x68, 0x36, 0xa5, 0x8a, 0x7d, 0x93, 0x14, 0x29, 0xbb, 0xaa, 0xab, 0xae, 0x93, - 0xf1, 0x0f, 0x40, 0x62, 0xcf, 0x86, 0xbf, 0xc0, 0x9a, 0x25, 0xb0, 0x05, 0x96, 0xfc, 0x00, 0x16, - 0xa8, 0x11, 0x2b, 0x7e, 0x01, 0x3b, 0x74, 0x1f, 0xf5, 0x4a, 0xcf, 0x74, 0xd2, 0x23, 0xb1, 0x80, - 0x8d, 0x75, 0xef, 0xb9, 0xe7, 0x9c, 0x7b, 0x5e, 0xdf, 0xa9, 0xe3, 0x0b, 0xda, 0x99, 0xb3, 0x70, - 0xbd, 0x8b, 0x99, 0x4d, 0xed, 0xa6, 0x1f, 0x78, 0xd4, 0x43, 0x90, 0x50, 0x1e, 0xaa, 0xd7, 0x34, - 0xf0, 0xa7, 0xe2, 0xe0, 0xa1, 0xfa, 0x6a, 0x49, 0x82, 0x95, 0xdc, 0xd4, 0xa9, 0xe7, 0x7b, 0x89, - 0x94, 0x3e, 0x84, 0x72, 0xf7, 0xd2, 0x0e, 0x42, 0x42, 0xd1, 0x0e, 0x94, 0xa6, 0xae, 0x43, 0x16, - 0xb4, 0xa1, 0xec, 0x29, 0xfb, 0x45, 0x2c, 0x77, 0x08, 0x41, 0x61, 0xea, 0x2d, 0x16, 0x8d, 0x1c, - 0xa7, 0xf2, 0x35, 0xe3, 0x0d, 0x49, 0x70, 0x4d, 0x82, 0x46, 0x5e, 0xf0, 0x8a, 0x9d, 0xfe, 0xcf, - 0x3c, 0x6c, 0x74, 0xb8, 0x1d, 0x66, 0x60, 0x2f, 0x42, 0x7b, 0x4a, 0x1d, 0x6f, 0x81, 0x8e, 0x00, - 0x42, 0x6a, 0x53, 0x32, 0x27, 0x0b, 0x1a, 0x36, 0x94, 0xbd, 0xfc, 0xbe, 0xda, 0x7a, 0xd4, 0x4c, - 0x79, 0xf0, 0x86, 0x48, 0x73, 0x12, 0xf1, 0xe3, 0x94, 0x28, 0x6a, 0x81, 0x4a, 0xae, 0xc9, 0x82, - 0x5a, 0xd4, 0xbb, 0x22, 0x8b, 0x46, 0x61, 0x4f, 0xd9, 0x57, 0x5b, 0x1b, 0x4d, 0xe1, 0xa0, 0xc1, - 0x4e, 0x4c, 0x76, 0x80, 0x81, 0xc4, 0xeb, 0x87, 0x7f, 0xca, 0x41, 0x35, 0xd6, 0x86, 0x06, 0x50, - 0x99, 0xda, 0x94, 0x5c, 0x78, 0xc1, 0x8a, 0xbb, 0x59, 0x6f, 0x7d, 0x7c, 0x4f, 0x43, 0x9a, 0x5d, - 0x29, 0x87, 0x63, 0x0d, 0xe8, 0x07, 0x50, 0x9e, 0x8a, 0xe8, 0xf1, 0xe8, 0xa8, 0xad, 0xcd, 0xb4, - 0x32, 0x19, 0x58, 0x1c, 0xf1, 0x20, 0x0d, 0xf2, 0xe1, 0x2b, 0x97, 0x87, 0xac, 0x86, 0xd9, 0x52, - 0xff, 0xad, 0x02, 0x95, 0x48, 0x2f, 0xda, 0x84, 0xf5, 0xce, 0xc0, 0x7a, 0x31, 0xc2, 0x46, 0x77, - 0x7c, 0x34, 0xea, 0x7f, 0x69, 0xf4, 0xb4, 0x07, 0xa8, 0x06, 0x95, 0xce, 0xc0, 0xea, 0x18, 0x47, - 0xfd, 0x91, 0xa6, 0xa0, 0x35, 0xa8, 0x76, 0x06, 0x56, 0x77, 0x3c, 0x1c, 0xf6, 0x4d, 0x2d, 0x87, - 0xd6, 0x41, 0xed, 0x0c, 0x2c, 0x3c, 0x1e, 0x0c, 0x3a, 0xed, 0xee, 0xb1, 0x96, 0x47, 0xdb, 0xb0, - 0xd1, 0x19, 0x58, 0xbd, 0xe1, 0xc0, 0xea, 0x19, 0x27, 0xd8, 0xe8, 0xb6, 0x4d, 0xa3, 0xa7, 0x15, - 0x10, 0x40, 0x89, 0x91, 0x7b, 0x03, 0xad, 0x28, 0xd7, 0x13, 0xc3, 0xd4, 0x4a, 0x52, 0x5d, 0x7f, - 0x34, 0x31, 0xb0, 0xa9, 0x95, 0xe5, 0xf6, 0xc5, 0x49, 0xaf, 0x6d, 0x1a, 0x5a, 0x45, 0x6e, 0x7b, - 0xc6, 0xc0, 0x30, 0x0d, 0xad, 0xfa, 0xac, 0x50, 0xc9, 0x69, 0xf9, 0x67, 0x85, 0x4a, 0x5e, 0x2b, - 0xe8, 0xbf, 0x56, 0x60, 0x7b, 0x42, 0x03, 0x62, 0xcf, 0x8f, 0xc9, 0x0a, 0xdb, 0x8b, 0x0b, 0x82, - 0xc9, 0xab, 0x25, 0x09, 0x29, 0x7a, 0x08, 0x15, 0xdf, 0x0b, 0x1d, 0x16, 0x3b, 0x1e, 0xe0, 0x2a, - 0x8e, 0xf7, 0xe8, 0x10, 0xaa, 0x57, 0x64, 0x65, 0x05, 0x8c, 0x5f, 0x06, 0x0c, 0x35, 0xe3, 0x82, - 0x8c, 0x35, 0x55, 0xae, 0xe4, 0x2a, 0x1d, 0xdf, 0xfc, 0xdd, 0xf1, 0xd5, 0xcf, 0x61, 0xe7, 0xb6, - 0x51, 0xa1, 0xef, 0x2d, 0x42, 0x82, 0x06, 0x80, 0x84, 0xa0, 0x45, 0x93, 0xdc, 0x72, 0xfb, 0xd4, - 0xd6, 0xfb, 0x6f, 0x2d, 0x00, 0xbc, 0x71, 0x76, 0x9b, 0xa4, 0x7f, 0x05, 0x9b, 0xe2, 0x1e, 0xd3, - 0x3e, 0x73, 0x49, 0x78, 0x1f, 0xd7, 0x77, 0xa0, 0x44, 0x39, 0x73, 0x23, 0xb7, 0x97, 0xdf, 0xaf, - 0x62, 0xb9, 0x7b, 0x57, 0x0f, 0x67, 0xb0, 0x95, 0xbd, 0xf9, 0xbf, 0xe2, 0xdf, 0x8f, 0xa1, 0x80, - 0x97, 0x2e, 0x41, 0x5b, 0x50, 0x9c, 0xdb, 0x74, 0x7a, 0x29, 0xbd, 0x11, 0x1b, 0xe6, 0xca, 0xb9, - 0xe3, 0x52, 0x12, 0xf0, 0x14, 0x56, 0xb1, 0xdc, 0xe9, 0xbf, 0x53, 0xa0, 0xf4, 0x84, 0x2f, 0xd1, - 0x47, 0x50, 0x0c, 0x96, 0xcc, 0x59, 0x81, 0x75, 0x2d, 0x6d, 0x01, 0xd3, 0x8c, 0xc5, 0x31, 0xea, - 0x43, 0xfd, 0xdc, 0x21, 0xee, 0x8c, 0x43, 0x77, 0xe8, 0xcd, 0x44, 0x55, 0xd4, 0x5b, 0x1f, 0xa4, - 0x05, 0x84, 0xce, 0xe6, 0x93, 0x0c, 0x23, 0xbe, 0x25, 0xa8, 0x3f, 0x86, 0x7a, 0x96, 0x83, 0xc1, - 0xc9, 0xc0, 0xd8, 0x1a, 0x8f, 0xac, 0x61, 0x7f, 0x32, 0x6c, 0x9b, 0xdd, 0xa7, 0xda, 0x03, 0x8e, - 0x18, 0x63, 0x62, 0x5a, 0xc6, 0x93, 0x27, 0x63, 0x6c, 0x6a, 0x8a, 0xfe, 0xaf, 0x1c, 0xd4, 0x44, - 0x50, 0x26, 0xde, 0x32, 0x98, 0x12, 0x96, 0xc5, 0x2b, 0xb2, 0x0a, 0x7d, 0x7b, 0x4a, 0xa2, 0x2c, - 0x46, 0x7b, 0x16, 0x90, 0xf0, 0xd2, 0x0e, 0x66, 0xd2, 0x73, 0xb1, 0x41, 0x9f, 0x80, 0xca, 0xb3, - 0x49, 0x2d, 0xba, 0xf2, 0x09, 0xcf, 0x63, 0xbd, 0xb5, 0x95, 0x14, 0x36, 0xcf, 0x15, 0x35, 0x57, - 0x3e, 0xc1, 0x40, 0xe3, 0x75, 0x16, 0x0d, 0x85, 0x7b, 0xa0, 0x21, 0xa9, 0xa1, 0x62, 0xa6, 0x86, - 0x0e, 0xe2, 0x84, 0x94, 0xa4, 0x96, 0x37, 0xa2, 0x17, 0x25, 0x09, 0x35, 0xa1, 0xe4, 0x2d, 0xac, - 0xd9, 0xcc, 0x6d, 0x94, 0xb9, 0x99, 0xdf, 0x49, 0xf3, 0x8e, 0x17, 0xbd, 0xde, 0xa0, 0x2d, 0xca, - 0xa2, 0xe8, 0x2d, 0x7a, 0x33, 0x17, 0x7d, 0x08, 0x75, 0xf2, 0x15, 0x25, 0xc1, 0xc2, 0x76, 0xad, - 0xf9, 0x8a, 0x75, 0xaf, 0x0a, 0x77, 0x7d, 0x2d, 0xa2, 0x0e, 0x19, 0x11, 0x7d, 0x04, 0xeb, 0x21, - 0xf5, 0x7c, 0xcb, 0x3e, 0xa7, 0x24, 0xb0, 0xa6, 0x9e, 0xbf, 0x6a, 0x54, 0xf7, 0x94, 0xfd, 0x0a, - 0x5e, 0x63, 0xe4, 0x36, 0xa3, 0x76, 0x3d, 0x7f, 0xa5, 0x3f, 0x87, 0x2a, 0xf6, 0x6e, 0xba, 0x97, - 0xdc, 0x1f, 0x1d, 0x4a, 0x67, 0xe4, 0xdc, 0x0b, 0x88, 0x2c, 0x54, 0x90, 0x8d, 0x1c, 0x7b, 0x37, - 0x58, 0x9e, 0xa0, 0x3d, 0x28, 0x72, 0x9d, 0xb2, 0x5d, 0xa4, 0x59, 0xc4, 0x81, 0x6e, 0x43, 0x05, - 0x7b, 0x37, 0x3c, 0xed, 0xe8, 0x7d, 0x10, 0x01, 0xb6, 0x16, 0xf6, 0x3c, 0xca, 0x5e, 0x95, 0x53, - 0x46, 0xf6, 0x9c, 0xa0, 0xc7, 0xa0, 0x06, 0xde, 0x8d, 0x35, 0xe5, 0xd7, 0x0b, 0x24, 0xaa, 0xad, - 0xed, 0x4c, 0x71, 0x46, 0xc6, 0x61, 0x08, 0xa2, 0x65, 0xa8, 0x3f, 0x07, 0x48, 0x6a, 0xeb, 0xae, - 0x4b, 0xbe, 0xc7, 0xb2, 0x41, 0xdc, 0x59, 0xa4, 0xbf, 0x26, 0x4d, 0xe6, 0x1a, 0xb0, 0x3c, 0xd3, - 0x7f, 0xa5, 0x40, 0x75, 0xc2, 0xaa, 0xe7, 0x88, 0x3a, 0xb3, 0x6f, 0x51, 0x73, 0x08, 0x0a, 0x17, - 0xd4, 0x99, 0xf1, 0x62, 0xab, 0x62, 0xbe, 0x46, 0x9f, 0x44, 0x86, 0xf9, 0xd6, 0x55, 0xd8, 0x28, - 0xf0, 0xdb, 0x33, 0xf9, 0xe5, 0x85, 0x38, 0xb0, 0x43, 0x7a, 0x72, 0x8c, 0x2b, 0x9c, 0xf5, 0xe4, - 0x38, 0xd4, 0x3f, 0x87, 0xe2, 0x29, 0xb7, 0xe2, 0x31, 0xa8, 0x5c, 0xb9, 0xc5, 0xb4, 0x45, 0xd8, - 0xcd, 0x84, 0x27, 0xb6, 0x18, 0x43, 0x18, 0x2d, 0x43, 0xbd, 0x0d, 0x6b, 0xc7, 0xd2, 0x5a, 0xce, - 0xf0, 0xee, 0xee, 0xe8, 0x7f, 0xc8, 0x41, 0xf9, 0x99, 0xb7, 0x64, 0x05, 0x85, 0xea, 0x90, 0x73, - 0x66, 0x5c, 0x2e, 0x8f, 0x73, 0xce, 0x0c, 0xfd, 0x1c, 0xea, 0x73, 0xe7, 0x22, 0xb0, 0x59, 0x59, - 0x0a, 0x84, 0x89, 0x26, 0xf1, 0xdd, 0xb4, 0x65, 0xc3, 0x88, 0x83, 0xc3, 0x6c, 0x6d, 0x9e, 0xde, - 0xa6, 0x80, 0x93, 0xcf, 0x00, 0xe7, 0x43, 0xa8, 0xbb, 0xde, 0xd4, 0x76, 0xad, 0xb8, 0x6d, 0x17, - 0x44, 0x71, 0x73, 0xea, 0x49, 0xd4, 0xbb, 0x6f, 0xc5, 0xa5, 0x78, 0xcf, 0xb8, 0xa0, 0x4f, 0xa1, - 0xe6, 0xdb, 0x01, 0x75, 0xa6, 0x8e, 0x6f, 0xb3, 0xc1, 0xa7, 0xc4, 0x05, 0x33, 0x66, 0x67, 0xe2, - 0x86, 0x33, 0xec, 0xe8, 0xfb, 0xa0, 0x85, 0xbc, 0x25, 0x59, 0x37, 0x5e, 0x70, 0x75, 0xee, 0x7a, - 0x37, 0x61, 0xa3, 0xcc, 0xed, 0x5f, 0x17, 0xf4, 0x97, 0x11, 0x59, 0xff, 0x7d, 0x1e, 0x4a, 0xa7, - 0xa2, 0x3a, 0x0f, 0xa0, 0xc0, 0x63, 0x24, 0x86, 0x9b, 0x9d, 0xf4, 0x65, 0x82, 0x83, 0x07, 0x88, - 0xf3, 0xa0, 0xf7, 0xa0, 0x4a, 0x9d, 0x39, 0x09, 0xa9, 0x3d, 0xf7, 0x79, 0x50, 0xf3, 0x38, 0x21, - 0x7c, 0x6d, 0x89, 0xbd, 0x07, 0xd5, 0x78, 0x1c, 0x93, 0xc1, 0x4a, 0x08, 0xe8, 0x87, 0x50, 0x65, - 0xf8, 0xe2, 0xc3, 0x57, 0xa3, 0xc8, 0x01, 0xbb, 0x75, 0x0b, 0x5d, 0xdc, 0x04, 0x5c, 0x09, 0x22, - 0xc4, 0xfe, 0x04, 0x54, 0x8e, 0x08, 0x29, 0x24, 0x1a, 0xd8, 0x4e, 0xb6, 0x81, 0x45, 0xc8, 0xc3, - 0x90, 0xf4, 0x7c, 0xf4, 0x08, 0x8a, 0xd7, 0xdc, 0xbc, 0xb2, 0x1c, 0x02, 0xd3, 0x8e, 0xf2, 0x54, - 0x88, 0x73, 0xf6, 0x85, 0xfd, 0x85, 0xa8, 0x2c, 0xde, 0xba, 0x6e, 0x7d, 0x61, 0x65, 0xd1, 0xe1, - 0x88, 0x87, 0xcd, 0x68, 0xb3, 0xb9, 0xcb, 0xbb, 0x57, 0x15, 0xb3, 0x25, 0xfa, 0x00, 0x6a, 0xd3, - 0x65, 0x10, 0xf0, 0xb1, 0xd3, 0x99, 0x93, 0xc6, 0x16, 0x0f, 0x94, 0x2a, 0x69, 0xa6, 0x33, 0x27, - 0xe8, 0x67, 0x50, 0x77, 0xed, 0x90, 0x32, 0xe0, 0x49, 0x47, 0xb6, 0xf9, 0x55, 0x19, 0xf4, 0x09, - 0xe0, 0x09, 0x4f, 0x54, 0x37, 0xd9, 0xe8, 0x97, 0x50, 0x1b, 0x3a, 0x0b, 0x67, 0x6e, 0xbb, 0x1c, - 0xa0, 0x2c, 0xf0, 0xa9, 0xd6, 0xc2, 0xd7, 0xf7, 0xeb, 0x2a, 0x68, 0x17, 0x54, 0x66, 0xc2, 0xd4, - 0x73, 0x97, 0xf3, 0x85, 0xa8, 0xf6, 0x3c, 0xae, 0xfa, 0xc7, 0x5d, 0x41, 0x60, 0x48, 0x95, 0x37, - 0x4d, 0xa6, 0x97, 0x64, 0x6e, 0xa3, 0x8f, 0x63, 0x64, 0x08, 0xb4, 0x37, 0xb2, 0x98, 0x4a, 0x8c, - 0x8a, 0x30, 0xa3, 0xff, 0x39, 0x07, 0xf5, 0x53, 0x31, 0x83, 0x44, 0x73, 0xcf, 0xe7, 0xb0, 0x49, - 0xce, 0xcf, 0xc9, 0x94, 0x3a, 0xd7, 0xc4, 0x9a, 0xda, 0xae, 0x4b, 0x02, 0x4b, 0x22, 0x58, 0x6d, - 0xad, 0x37, 0xc5, 0x7f, 0x91, 0x2e, 0xa7, 0xf7, 0x7b, 0x78, 0x23, 0xe6, 0x95, 0xa4, 0x19, 0x32, - 0x60, 0xd3, 0x99, 0xcf, 0xc9, 0xcc, 0xb1, 0x69, 0x5a, 0x81, 0x68, 0xf9, 0xdb, 0xd2, 0xd3, 0x53, - 0xf3, 0xc8, 0xa6, 0x24, 0x51, 0x13, 0x4b, 0xc4, 0x6a, 0x3e, 0x64, 0xce, 0x04, 0x17, 0xf1, 0x28, - 0xb5, 0x26, 0x25, 0x4d, 0x4e, 0xc4, 0xf2, 0x30, 0x33, 0xa6, 0x15, 0x6e, 0x8d, 0x69, 0xc9, 0xa7, - 0xb4, 0x78, 0xe7, 0xa7, 0xf4, 0x33, 0x58, 0x17, 0xed, 0x36, 0x4a, 0x7d, 0x84, 0xf0, 0x6f, 0xec, - 0xb9, 0x35, 0x9a, 0x6c, 0x42, 0xfd, 0x53, 0x58, 0x8f, 0x03, 0x29, 0xc7, 0xb8, 0x03, 0x28, 0xf1, - 0xf2, 0x89, 0xd2, 0x81, 0xde, 0x84, 0x2f, 0x96, 0x1c, 0xfa, 0x2f, 0x73, 0x80, 0x22, 0x79, 0xef, - 0x26, 0xfc, 0x1f, 0x4d, 0xc6, 0x16, 0x14, 0x39, 0x5d, 0x66, 0x42, 0x6c, 0x58, 0x1c, 0x58, 0x50, - 0xfd, 0xab, 0x38, 0x0d, 0x42, 0xf8, 0x39, 0xfb, 0xc5, 0x24, 0x5c, 0xba, 0x14, 0x4b, 0x0e, 0xfd, - 0x8f, 0x0a, 0x6c, 0x66, 0xe2, 0x20, 0x63, 0x99, 0x20, 0x46, 0x79, 0x0b, 0x62, 0xf6, 0xa1, 0xe2, - 0x5f, 0xbd, 0x05, 0x59, 0xf1, 0xe9, 0xd7, 0xb6, 0xc3, 0x5d, 0x28, 0x04, 0xac, 0x2d, 0x8b, 0x6f, - 0x6d, 0x7a, 0x38, 0xe1, 0x74, 0x36, 0xe1, 0x64, 0xfc, 0xc8, 0x4c, 0x38, 0xd2, 0x7e, 0x07, 0xd4, + 0xeb, 0xea, 0xf4, 0x68, 0x36, 0xa5, 0x4a, 0xf9, 0x26, 0x29, 0x52, 0x76, 0xb9, 0xab, 0xae, 0x93, + 0xf1, 0x0f, 0x40, 0x62, 0x8f, 0x84, 0xf8, 0x0b, 0xac, 0x59, 0x02, 0x5b, 0x60, 0xc9, 0x0f, 0x60, + 0x81, 0x1a, 0xf1, 0x23, 0xd8, 0xa1, 0xfb, 0xa8, 0x57, 0x7a, 0xa6, 0x93, 0x1e, 0x89, 0x05, 0x6c, + 0xac, 0x7b, 0xcf, 0x3d, 0xe7, 0xdc, 0xf3, 0xfa, 0x4e, 0x1d, 0x5f, 0xd0, 0xce, 0xdc, 0xb9, 0xe7, + 0x5f, 0x4c, 0x6d, 0x6a, 0x37, 0x17, 0x81, 0x4f, 0x7d, 0x04, 0x09, 0xe5, 0xa1, 0x7a, 0x4d, 0x83, + 0x85, 0x23, 0x0e, 0x1e, 0xaa, 0xaf, 0x96, 0x24, 0x58, 0xc9, 0x4d, 0x9d, 0xfa, 0x0b, 0x3f, 0x91, + 0xd2, 0x87, 0x50, 0xee, 0x5e, 0xda, 0x41, 0x48, 0x28, 0xda, 0x81, 0x92, 0xe3, 0xb9, 0x64, 0x4e, + 0x1b, 0xca, 0x9e, 0xb2, 0x5f, 0xc4, 0x72, 0x87, 0x10, 0x14, 0x1c, 0x7f, 0x3e, 0x6f, 0xe4, 0x38, + 0x95, 0xaf, 0x19, 0x6f, 0x48, 0x82, 0x6b, 0x12, 0x34, 0xf2, 0x82, 0x57, 0xec, 0xf4, 0x7f, 0xe5, + 0x61, 0xa3, 0xc3, 0xed, 0x30, 0x03, 0x7b, 0x1e, 0xda, 0x0e, 0x75, 0xfd, 0x39, 0x3a, 0x02, 0x08, + 0xa9, 0x4d, 0xc9, 0x8c, 0xcc, 0x69, 0xd8, 0x50, 0xf6, 0xf2, 0xfb, 0x6a, 0xeb, 0x51, 0x33, 0xe5, + 0xc1, 0x1b, 0x22, 0xcd, 0x49, 0xc4, 0x8f, 0x53, 0xa2, 0xa8, 0x05, 0x2a, 0xb9, 0x26, 0x73, 0x6a, + 0x51, 0xff, 0x8a, 0xcc, 0x1b, 0x85, 0x3d, 0x65, 0x5f, 0x6d, 0x6d, 0x34, 0x85, 0x83, 0x06, 0x3b, + 0x31, 0xd9, 0x01, 0x06, 0x12, 0xaf, 0x1f, 0xfe, 0x39, 0x07, 0xd5, 0x58, 0x1b, 0x1a, 0x40, 0xc5, + 0xb1, 0x29, 0xb9, 0xf0, 0x83, 0x15, 0x77, 0xb3, 0xde, 0xfa, 0xf8, 0x9e, 0x86, 0x34, 0xbb, 0x52, + 0x0e, 0xc7, 0x1a, 0xd0, 0x0f, 0xa0, 0xec, 0x88, 0xe8, 0xf1, 0xe8, 0xa8, 0xad, 0xcd, 0xb4, 0x32, + 0x19, 0x58, 0x1c, 0xf1, 0x20, 0x0d, 0xf2, 0xe1, 0x2b, 0x8f, 0x87, 0xac, 0x86, 0xd9, 0x52, 0xff, + 0x9d, 0x02, 0x95, 0x48, 0x2f, 0xda, 0x84, 0xf5, 0xce, 0xc0, 0x7a, 0x31, 0xc2, 0x46, 0x77, 0x7c, + 0x34, 0xea, 0x7f, 0x69, 0xf4, 0xb4, 0x07, 0xa8, 0x06, 0x95, 0xce, 0xc0, 0xea, 0x18, 0x47, 0xfd, + 0x91, 0xa6, 0xa0, 0x35, 0xa8, 0x76, 0x06, 0x56, 0x77, 0x3c, 0x1c, 0xf6, 0x4d, 0x2d, 0x87, 0xd6, + 0x41, 0xed, 0x0c, 0x2c, 0x3c, 0x1e, 0x0c, 0x3a, 0xed, 0xee, 0xb1, 0x96, 0x47, 0xdb, 0xb0, 0xd1, + 0x19, 0x58, 0xbd, 0xe1, 0xc0, 0xea, 0x19, 0x27, 0xd8, 0xe8, 0xb6, 0x4d, 0xa3, 0xa7, 0x15, 0x10, + 0x40, 0x89, 0x91, 0x7b, 0x03, 0xad, 0x28, 0xd7, 0x13, 0xc3, 0xd4, 0x4a, 0x52, 0x5d, 0x7f, 0x34, + 0x31, 0xb0, 0xa9, 0x95, 0xe5, 0xf6, 0xc5, 0x49, 0xaf, 0x6d, 0x1a, 0x5a, 0x45, 0x6e, 0x7b, 0xc6, + 0xc0, 0x30, 0x0d, 0xad, 0xfa, 0xac, 0x50, 0xc9, 0x69, 0xf9, 0x67, 0x85, 0x4a, 0x5e, 0x2b, 0xe8, + 0xbf, 0x56, 0x60, 0x7b, 0x42, 0x03, 0x62, 0xcf, 0x8e, 0xc9, 0x0a, 0xdb, 0xf3, 0x0b, 0x82, 0xc9, + 0xab, 0x25, 0x09, 0x29, 0x7a, 0x08, 0x95, 0x85, 0x1f, 0xba, 0x2c, 0x76, 0x3c, 0xc0, 0x55, 0x1c, + 0xef, 0xd1, 0x21, 0x54, 0xaf, 0xc8, 0xca, 0x0a, 0x18, 0xbf, 0x0c, 0x18, 0x6a, 0xc6, 0x05, 0x19, + 0x6b, 0xaa, 0x5c, 0xc9, 0x55, 0x3a, 0xbe, 0xf9, 0xbb, 0xe3, 0xab, 0x9f, 0xc3, 0xce, 0x6d, 0xa3, + 0xc2, 0x85, 0x3f, 0x0f, 0x09, 0x1a, 0x00, 0x12, 0x82, 0x16, 0x4d, 0x72, 0xcb, 0xed, 0x53, 0x5b, + 0xef, 0xbf, 0xb5, 0x00, 0xf0, 0xc6, 0xd9, 0x6d, 0x92, 0xfe, 0x15, 0x6c, 0x8a, 0x7b, 0x4c, 0xfb, + 0xcc, 0x23, 0xe1, 0x7d, 0x5c, 0xdf, 0x81, 0x12, 0xe5, 0xcc, 0x8d, 0xdc, 0x5e, 0x7e, 0xbf, 0x8a, + 0xe5, 0xee, 0x5d, 0x3d, 0x9c, 0xc2, 0x56, 0xf6, 0xe6, 0xff, 0x8a, 0x7f, 0x3f, 0x86, 0x02, 0x5e, + 0x7a, 0x04, 0x6d, 0x41, 0x71, 0x66, 0x53, 0xe7, 0x52, 0x7a, 0x23, 0x36, 0xcc, 0x95, 0x73, 0xd7, + 0xa3, 0x24, 0xe0, 0x29, 0xac, 0x62, 0xb9, 0xd3, 0x7f, 0xaf, 0x40, 0xe9, 0x09, 0x5f, 0xa2, 0x8f, + 0xa0, 0x18, 0x2c, 0x99, 0xb3, 0x02, 0xeb, 0x5a, 0xda, 0x02, 0xa6, 0x19, 0x8b, 0x63, 0xd4, 0x87, + 0xfa, 0xb9, 0x4b, 0xbc, 0x29, 0x87, 0xee, 0xd0, 0x9f, 0x8a, 0xaa, 0xa8, 0xb7, 0x3e, 0x48, 0x0b, + 0x08, 0x9d, 0xcd, 0x27, 0x19, 0x46, 0x7c, 0x4b, 0x50, 0x7f, 0x0c, 0xf5, 0x2c, 0x07, 0x83, 0x93, + 0x81, 0xb1, 0x35, 0x1e, 0x59, 0xc3, 0xfe, 0x64, 0xd8, 0x36, 0xbb, 0x4f, 0xb5, 0x07, 0x1c, 0x31, + 0xc6, 0xc4, 0xb4, 0x8c, 0x27, 0x4f, 0xc6, 0xd8, 0xd4, 0x14, 0xfd, 0x37, 0x79, 0xa8, 0x89, 0xa0, + 0x4c, 0xfc, 0x65, 0xe0, 0x10, 0x96, 0xc5, 0x2b, 0xb2, 0x0a, 0x17, 0xb6, 0x43, 0xa2, 0x2c, 0x46, + 0x7b, 0x16, 0x90, 0xf0, 0xd2, 0x0e, 0xa6, 0xd2, 0x73, 0xb1, 0x41, 0x9f, 0x80, 0xca, 0xb3, 0x49, + 0x2d, 0xba, 0x5a, 0x10, 0x9e, 0xc7, 0x7a, 0x6b, 0x2b, 0x29, 0x6c, 0x9e, 0x2b, 0x6a, 0xae, 0x16, + 0x04, 0x03, 0x8d, 0xd7, 0x59, 0x34, 0x14, 0xee, 0x81, 0x86, 0xa4, 0x86, 0x8a, 0x99, 0x1a, 0x3a, + 0x88, 0x13, 0x52, 0x92, 0x5a, 0xde, 0x88, 0x5e, 0x94, 0x24, 0xd4, 0x84, 0x92, 0x3f, 0xb7, 0xa6, + 0x53, 0xaf, 0x51, 0xe6, 0x66, 0x7e, 0x27, 0xcd, 0x3b, 0x9e, 0xf7, 0x7a, 0x83, 0xb6, 0x28, 0x8b, + 0xa2, 0x3f, 0xef, 0x4d, 0x3d, 0xf4, 0x21, 0xd4, 0xc9, 0x57, 0x94, 0x04, 0x73, 0xdb, 0xb3, 0x66, + 0x2b, 0xd6, 0xbd, 0x2a, 0xdc, 0xf5, 0xb5, 0x88, 0x3a, 0x64, 0x44, 0xf4, 0x11, 0xac, 0x87, 0xd4, + 0x5f, 0x58, 0xf6, 0x39, 0x25, 0x81, 0xe5, 0xf8, 0x8b, 0x55, 0xa3, 0xba, 0xa7, 0xec, 0x57, 0xf0, + 0x1a, 0x23, 0xb7, 0x19, 0xb5, 0xeb, 0x2f, 0x56, 0xe8, 0xfb, 0xa0, 0xc5, 0xea, 0x1c, 0x6f, 0x19, + 0x32, 0xa3, 0x81, 0x2b, 0x5c, 0x8f, 0xe8, 0x5d, 0x41, 0xd6, 0x9f, 0x43, 0x15, 0xfb, 0x37, 0xdd, + 0x4b, 0xee, 0xba, 0x0e, 0xa5, 0x33, 0x72, 0xee, 0x07, 0x44, 0xd6, 0x34, 0xc8, 0x9e, 0x8f, 0xfd, + 0x1b, 0x2c, 0x4f, 0xd0, 0x1e, 0x14, 0xf9, 0xf5, 0xb2, 0xb3, 0xa4, 0x59, 0xc4, 0x81, 0x6e, 0x43, + 0x05, 0xfb, 0x37, 0xbc, 0x42, 0xd0, 0xfb, 0x20, 0x72, 0x61, 0xcd, 0xed, 0x59, 0x94, 0xe8, 0x2a, + 0xa7, 0x8c, 0xec, 0x19, 0x41, 0x8f, 0x41, 0x0d, 0xfc, 0x1b, 0xcb, 0xe1, 0xd7, 0x0b, 0xd0, 0xaa, + 0xad, 0xed, 0x4c, 0x1d, 0x47, 0xc6, 0x61, 0x08, 0xa2, 0x65, 0xa8, 0x3f, 0x07, 0x48, 0xca, 0xf0, + 0xae, 0x4b, 0xbe, 0xc7, 0x12, 0x47, 0xbc, 0x69, 0xa4, 0xbf, 0x26, 0x4d, 0xe6, 0x1a, 0xb0, 0x3c, + 0xd3, 0x7f, 0xa5, 0x40, 0x75, 0xc2, 0x0a, 0xed, 0x88, 0xba, 0xd3, 0x6f, 0x51, 0x9e, 0x08, 0x0a, + 0x17, 0xd4, 0x9d, 0xf2, 0xba, 0xac, 0x62, 0xbe, 0x46, 0x9f, 0x44, 0x86, 0x2d, 0xac, 0xab, 0xb0, + 0x51, 0xe0, 0xb7, 0x67, 0x4a, 0x81, 0xd7, 0xec, 0xc0, 0x0e, 0xe9, 0xc9, 0x31, 0xae, 0x70, 0xd6, + 0x93, 0xe3, 0x50, 0xff, 0x1c, 0x8a, 0xa7, 0xdc, 0x8a, 0xc7, 0xa0, 0x72, 0xe5, 0x16, 0xd3, 0x16, + 0xc1, 0x3c, 0x13, 0x9e, 0xd8, 0x62, 0x0c, 0x61, 0xb4, 0x0c, 0xf5, 0x36, 0xac, 0x1d, 0x4b, 0x6b, + 0x39, 0xc3, 0xbb, 0xbb, 0xa3, 0xff, 0x31, 0x07, 0xe5, 0x67, 0xfe, 0x92, 0x95, 0x0a, 0xaa, 0x43, + 0xce, 0x9d, 0x72, 0xb9, 0x3c, 0xce, 0xb9, 0x53, 0xf4, 0x73, 0xa8, 0xcf, 0xdc, 0x8b, 0xc0, 0x66, + 0x15, 0x2c, 0xc0, 0x28, 0xfa, 0xc9, 0x77, 0xd3, 0x96, 0x0d, 0x23, 0x0e, 0x8e, 0xc8, 0xb5, 0x59, + 0x7a, 0x9b, 0xc2, 0x58, 0x3e, 0x83, 0xb1, 0x0f, 0xa1, 0xee, 0xf9, 0x8e, 0xed, 0x59, 0x71, 0x87, + 0x2f, 0x08, 0x1c, 0x70, 0xea, 0x49, 0xd4, 0xe6, 0x6f, 0xc5, 0xa5, 0x78, 0xcf, 0xb8, 0xa0, 0x4f, + 0xa1, 0xb6, 0xb0, 0x03, 0xea, 0x3a, 0xee, 0xc2, 0x66, 0x33, 0x52, 0x89, 0x0b, 0x66, 0xcc, 0xce, + 0xc4, 0x0d, 0x67, 0xd8, 0x19, 0xac, 0x42, 0xde, 0xbd, 0xac, 0x1b, 0x3f, 0xb8, 0x3a, 0xf7, 0xfc, + 0x9b, 0xb0, 0x51, 0xe6, 0xf6, 0xaf, 0x0b, 0xfa, 0xcb, 0x88, 0xac, 0xff, 0x21, 0x0f, 0xa5, 0x53, + 0x51, 0x9d, 0x07, 0x50, 0xe0, 0x31, 0x12, 0x73, 0xd0, 0x4e, 0xfa, 0x32, 0xc1, 0xc1, 0x03, 0xc4, + 0x79, 0xd0, 0x7b, 0x50, 0xa5, 0xee, 0x8c, 0x84, 0xd4, 0x9e, 0x2d, 0x78, 0x50, 0xf3, 0x38, 0x21, + 0x7c, 0x6d, 0x89, 0xbd, 0x07, 0xd5, 0x78, 0x72, 0x93, 0xc1, 0x4a, 0x08, 0xe8, 0x87, 0x50, 0x65, + 0xf8, 0xe2, 0x73, 0x5a, 0xa3, 0xc8, 0x01, 0xbb, 0x75, 0x0b, 0x5d, 0xdc, 0x04, 0x5c, 0x09, 0x22, + 0xc4, 0xfe, 0x04, 0x54, 0x8e, 0x08, 0x29, 0x24, 0x7a, 0xdd, 0x4e, 0xb6, 0xd7, 0x45, 0xc8, 0xc3, + 0x90, 0x7c, 0x1e, 0xd0, 0x23, 0x28, 0x5e, 0x73, 0xf3, 0xca, 0x72, 0x5e, 0x4c, 0x3b, 0xca, 0x53, + 0x21, 0xce, 0xd9, 0xc7, 0xf8, 0x17, 0xa2, 0xb2, 0x78, 0x97, 0xbb, 0xf5, 0x31, 0x96, 0x45, 0x87, + 0x23, 0x1e, 0x36, 0xce, 0x4d, 0x67, 0x1e, 0x6f, 0x74, 0x55, 0xcc, 0x96, 0xe8, 0x03, 0xa8, 0x39, + 0xcb, 0x20, 0xe0, 0x13, 0xaa, 0x3b, 0x23, 0x8d, 0x2d, 0x1e, 0x28, 0x55, 0xd2, 0x4c, 0x77, 0x46, + 0xd0, 0xcf, 0xa0, 0xee, 0xd9, 0x21, 0x65, 0xc0, 0x93, 0x8e, 0x6c, 0xf3, 0xab, 0x32, 0xe8, 0x13, + 0xc0, 0x13, 0x9e, 0xa8, 0x5e, 0xb2, 0xd1, 0x2f, 0xa1, 0x36, 0x74, 0xe7, 0xee, 0xcc, 0xf6, 0x38, + 0x40, 0x59, 0xe0, 0x53, 0xad, 0x85, 0xaf, 0xef, 0xd7, 0x55, 0xd0, 0x2e, 0xa8, 0xcc, 0x04, 0xc7, + 0xf7, 0x96, 0xb3, 0xb9, 0xa8, 0xf6, 0x3c, 0xae, 0x2e, 0x8e, 0xbb, 0x82, 0xc0, 0x90, 0x2a, 0x6f, + 0x9a, 0x38, 0x97, 0x64, 0x66, 0xa3, 0x8f, 0x63, 0x64, 0x08, 0xb4, 0x37, 0xb2, 0x98, 0x4a, 0x8c, + 0x8a, 0x30, 0xa3, 0xff, 0x25, 0x07, 0xf5, 0x53, 0x31, 0xae, 0x44, 0x23, 0xd2, 0xe7, 0xb0, 0x49, + 0xce, 0xcf, 0x89, 0x43, 0xdd, 0x6b, 0x62, 0x39, 0xb6, 0xe7, 0x91, 0xc0, 0x92, 0x08, 0x56, 0x5b, + 0xeb, 0x4d, 0xf1, 0xb7, 0xa5, 0xcb, 0xe9, 0xfd, 0x1e, 0xde, 0x88, 0x79, 0x25, 0x69, 0x8a, 0x0c, + 0xd8, 0x74, 0x67, 0x33, 0x32, 0x75, 0x6d, 0x9a, 0x56, 0x20, 0x5a, 0xfe, 0xb6, 0xf4, 0xf4, 0xd4, + 0x3c, 0xb2, 0x29, 0x49, 0xd4, 0xc4, 0x12, 0xb1, 0x9a, 0x0f, 0x99, 0x33, 0xc1, 0x45, 0x3c, 0x75, + 0xad, 0x49, 0x49, 0x93, 0x13, 0xb1, 0x3c, 0xcc, 0x4c, 0x74, 0x85, 0x5b, 0x13, 0x5d, 0xf2, 0xd5, + 0x2d, 0xde, 0xf9, 0xd5, 0xfd, 0x0c, 0xd6, 0x45, 0xbb, 0x8d, 0x52, 0x1f, 0x21, 0xfc, 0x1b, 0x7b, + 0x6e, 0x8d, 0x26, 0x9b, 0x50, 0xff, 0x14, 0xd6, 0xe3, 0x40, 0xca, 0x89, 0xef, 0x00, 0x4a, 0xbc, + 0x7c, 0xa2, 0x74, 0xa0, 0x37, 0xe1, 0x8b, 0x25, 0x87, 0xfe, 0xcb, 0x1c, 0xa0, 0x48, 0xde, 0xbf, + 0x09, 0xff, 0x47, 0x93, 0xb1, 0x05, 0x45, 0x4e, 0x97, 0x99, 0x10, 0x1b, 0x16, 0x07, 0x16, 0xd4, + 0xc5, 0x55, 0x9c, 0x06, 0x21, 0xfc, 0x9c, 0xfd, 0x62, 0x12, 0x2e, 0x3d, 0x8a, 0x25, 0x87, 0xfe, + 0x27, 0x05, 0x36, 0x33, 0x71, 0x90, 0xb1, 0x4c, 0x10, 0xa3, 0xbc, 0x05, 0x31, 0xfb, 0x50, 0x59, + 0x5c, 0xbd, 0x05, 0x59, 0xf1, 0xe9, 0xd7, 0xb6, 0xc3, 0x5d, 0x28, 0x04, 0xac, 0x2d, 0x8b, 0x6f, + 0x6d, 0x7a, 0x38, 0xe1, 0x74, 0x36, 0xe1, 0x64, 0xfc, 0xc8, 0x4c, 0x38, 0xd2, 0x7e, 0x17, 0xd4, 0x54, 0x67, 0x60, 0xad, 0x24, 0x5b, 0x55, 0x32, 0x75, 0xdf, 0x58, 0x54, 0x6a, 0xaa, 0xa8, 0x58, - 0x7f, 0x9e, 0x7a, 0x73, 0xdf, 0x25, 0x94, 0x88, 0x94, 0x55, 0x70, 0x42, 0xd0, 0xbf, 0x00, 0x35, - 0x25, 0x79, 0xd7, 0x20, 0x93, 0x24, 0x21, 0x7f, 0x67, 0x12, 0xfe, 0xa6, 0xc0, 0x76, 0x52, 0xcc, - 0x4b, 0x97, 0xfe, 0x5f, 0xd5, 0xa3, 0x1e, 0xc0, 0xce, 0x6d, 0xef, 0xde, 0xa9, 0xca, 0xbe, 0x45, - 0xed, 0x1c, 0x7c, 0x06, 0x6a, 0x6a, 0x1e, 0x67, 0x7f, 0xdb, 0xfb, 0x47, 0xa3, 0x31, 0x36, 0xb4, - 0x07, 0xa8, 0x02, 0x85, 0x89, 0x39, 0x3e, 0xd1, 0x14, 0xb6, 0x32, 0xbe, 0x30, 0xba, 0xe2, 0x29, + 0x7f, 0x76, 0xfc, 0xd9, 0xc2, 0x23, 0x94, 0x88, 0x94, 0x55, 0x70, 0x42, 0xd0, 0xbf, 0x00, 0x35, + 0x25, 0x79, 0xd7, 0x20, 0x93, 0x24, 0x21, 0x7f, 0x67, 0x12, 0xfe, 0xae, 0xc0, 0x76, 0x52, 0xcc, + 0x4b, 0x8f, 0xfe, 0x5f, 0xd5, 0xa3, 0x1e, 0xc0, 0xce, 0x6d, 0xef, 0xde, 0xa9, 0xca, 0xbe, 0x45, + 0xed, 0x1c, 0x7c, 0x06, 0x6a, 0x6a, 0x74, 0x67, 0xff, 0xf0, 0xfb, 0x47, 0xa3, 0x31, 0x36, 0xb4, + 0x07, 0xa8, 0x02, 0x85, 0x89, 0x39, 0x3e, 0xd1, 0x14, 0xb6, 0x32, 0xbe, 0x30, 0xba, 0xe2, 0xd5, 0x80, 0xad, 0x2c, 0xc9, 0x94, 0x3f, 0xf8, 0xb7, 0x02, 0x90, 0x7c, 0xf1, 0x91, 0x0a, 0xe5, 0x17, 0xa3, 0xe3, 0xd1, 0xf8, 0xe5, 0x48, 0x28, 0x38, 0x32, 0xfb, 0x3d, 0x4d, 0x41, 0x55, 0x28, 0x8a, - 0xb7, 0x85, 0x1c, 0xbb, 0x41, 0x3e, 0x2c, 0xe4, 0x51, 0x0d, 0x2a, 0xf1, 0xab, 0x42, 0x01, 0x95, - 0x21, 0x1f, 0xbf, 0x1d, 0xc8, 0xc7, 0x82, 0x12, 0x53, 0x88, 0x8d, 0x93, 0x41, 0xbb, 0x6b, 0x68, - 0x65, 0x76, 0x10, 0x3f, 0x1b, 0x00, 0x94, 0xa2, 0x37, 0x03, 0x26, 0x39, 0x31, 0x4c, 0x0d, 0xd8, + 0x67, 0x88, 0x1c, 0xbb, 0x41, 0xbe, 0x41, 0xe4, 0x51, 0x0d, 0x2a, 0xf1, 0x03, 0x44, 0x01, 0x95, + 0x21, 0x1f, 0x3f, 0x33, 0xc8, 0x77, 0x85, 0x12, 0x53, 0x88, 0x8d, 0x93, 0x41, 0xbb, 0x6b, 0x68, + 0x65, 0x76, 0x10, 0xbf, 0x30, 0x00, 0x94, 0xa2, 0xe7, 0x05, 0x26, 0x39, 0x31, 0x4c, 0x0d, 0xd8, 0x3d, 0x63, 0xf3, 0xa9, 0x81, 0x35, 0x95, 0xd1, 0xf0, 0xf8, 0xa5, 0x56, 0x63, 0xb4, 0x27, 0x7d, 0x63, 0xd0, 0xd3, 0xd6, 0xd0, 0x1a, 0x54, 0x9f, 0x1a, 0x6d, 0x6c, 0x76, 0x8c, 0xb6, 0xa9, 0xd5, 0xd9, 0xc9, 0x29, 0x37, 0x70, 0x9d, 0x5d, 0xf3, 0x6c, 0xfc, 0x02, 0x8f, 0xda, 0x03, 0x4d, 0x63, 0x9b, 0x53, 0x03, 0x4f, 0xfa, 0xe3, 0x91, 0xb6, 0xc1, 0xee, 0x19, 0xb4, 0x27, 0xe6, 0xc9, 0xb1, 0x86, 0x98, 0xfc, 0xa4, 0x7d, 0x6a, 0x9c, 0x8c, 0xfb, 0x23, 0x53, 0xdb, 0x3c, 0x78, 0xc4, 0xbe, 0x73, 0xe9, 0x09, 0x10, 0xa0, 0x64, 0xb6, 0x3b, 0x03, 0x63, 0xa2, 0x3d, 0x60, 0xeb, 0xc9, 0xd3, - 0x36, 0xee, 0x4d, 0x34, 0xa5, 0xf3, 0xd3, 0xbf, 0xbc, 0xde, 0x55, 0xfe, 0xfa, 0x7a, 0x57, 0xf9, - 0xfb, 0xeb, 0x5d, 0xe5, 0x37, 0xff, 0xd8, 0x7d, 0xf0, 0xe5, 0xa3, 0x6b, 0x87, 0x92, 0x30, 0x6c, - 0x3a, 0xde, 0xa1, 0x58, 0x1d, 0x5e, 0x78, 0x87, 0xd7, 0xf4, 0x90, 0x3f, 0x97, 0x1d, 0x26, 0x18, - 0x3c, 0x2b, 0x71, 0xca, 0x8f, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xf7, 0xf9, 0x08, 0x8a, + 0x36, 0xee, 0x4d, 0x34, 0xa5, 0xf3, 0xd3, 0xbf, 0xbe, 0xde, 0x55, 0xfe, 0xf6, 0x7a, 0x57, 0xf9, + 0xc7, 0xeb, 0x5d, 0xe5, 0xb7, 0xff, 0xdc, 0x7d, 0xf0, 0xe5, 0xa3, 0x6b, 0x97, 0x92, 0x30, 0x6c, + 0xba, 0xfe, 0xa1, 0x58, 0x1d, 0x5e, 0xf8, 0x87, 0xd7, 0xf4, 0x90, 0xbf, 0xac, 0x1d, 0x26, 0x18, + 0x3c, 0x2b, 0x71, 0xca, 0x8f, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x22, 0xbd, 0x3f, 0x86, 0xb5, 0x13, 0x00, 0x00, } @@ -2815,6 +2826,13 @@ func (m *BinlogSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarintBinlogdata(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x52 + } if m.StopAfterCopy { i-- if m.StopAfterCopy { @@ -4283,6 +4301,10 @@ func (m *BinlogSource) Size() (n int) { if m.StopAfterCopy { n += 2 } + l = len(m.ExternalCluster) + if l > 0 { + n += 1 + l + sovBinlogdata(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6176,6 +6198,38 @@ func (m *BinlogSource) Unmarshal(dAtA []byte) error { } } m.StopAfterCopy = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinlogdata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinlogdata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinlogdata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipBinlogdata(dAtA[iNdEx:]) diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index 3c2ef986095..84e06dc199a 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -1493,6 +1493,164 @@ func (m *CellsAlias) GetCells() []string { return nil } +type TopoConfig struct { + TopoType string `protobuf:"bytes,1,opt,name=topo_type,json=topoType,proto3" json:"topo_type,omitempty"` + Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"` + Root string `protobuf:"bytes,3,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopoConfig) Reset() { *m = TopoConfig{} } +func (m *TopoConfig) String() string { return proto.CompactTextString(m) } +func (*TopoConfig) ProtoMessage() {} +func (*TopoConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{11} +} +func (m *TopoConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopoConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TopoConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TopoConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopoConfig.Merge(m, src) +} +func (m *TopoConfig) XXX_Size() int { + return m.Size() +} +func (m *TopoConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TopoConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TopoConfig proto.InternalMessageInfo + +func (m *TopoConfig) GetTopoType() string { + if m != nil { + return m.TopoType + } + return "" +} + +func (m *TopoConfig) GetServer() string { + if m != nil { + return m.Server + } + return "" +} + +func (m *TopoConfig) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +type ExternalVitessCluster struct { + TopoConfig *TopoConfig `protobuf:"bytes,1,opt,name=topo_config,json=topoConfig,proto3" json:"topo_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExternalVitessCluster) Reset() { *m = ExternalVitessCluster{} } +func (m *ExternalVitessCluster) String() string { return proto.CompactTextString(m) } +func (*ExternalVitessCluster) ProtoMessage() {} +func (*ExternalVitessCluster) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{12} +} +func (m *ExternalVitessCluster) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalVitessCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExternalVitessCluster.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExternalVitessCluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalVitessCluster.Merge(m, src) +} +func (m *ExternalVitessCluster) XXX_Size() int { + return m.Size() +} +func (m *ExternalVitessCluster) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalVitessCluster.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalVitessCluster proto.InternalMessageInfo + +func (m *ExternalVitessCluster) GetTopoConfig() *TopoConfig { + if m != nil { + return m.TopoConfig + } + return nil +} + +// ExternalClusters +type ExternalClusters struct { + VitessCluster []*ExternalVitessCluster `protobuf:"bytes,1,rep,name=vitess_cluster,json=vitessCluster,proto3" json:"vitess_cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExternalClusters) Reset() { *m = ExternalClusters{} } +func (m *ExternalClusters) String() string { return proto.CompactTextString(m) } +func (*ExternalClusters) ProtoMessage() {} +func (*ExternalClusters) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{13} +} +func (m *ExternalClusters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalClusters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExternalClusters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExternalClusters) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalClusters.Merge(m, src) +} +func (m *ExternalClusters) XXX_Size() int { + return m.Size() +} +func (m *ExternalClusters) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalClusters.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalClusters proto.InternalMessageInfo + +func (m *ExternalClusters) GetVitessCluster() []*ExternalVitessCluster { + if m != nil { + return m.VitessCluster + } + return nil +} + func init() { proto.RegisterEnum("topodata.KeyspaceType", KeyspaceType_name, KeyspaceType_value) proto.RegisterEnum("topodata.KeyspaceIdType", KeyspaceIdType_name, KeyspaceIdType_value) @@ -1517,98 +1675,107 @@ func init() { proto.RegisterType((*SrvKeyspace_ServedFrom)(nil), "topodata.SrvKeyspace.ServedFrom") proto.RegisterType((*CellInfo)(nil), "topodata.CellInfo") proto.RegisterType((*CellsAlias)(nil), "topodata.CellsAlias") + proto.RegisterType((*TopoConfig)(nil), "topodata.TopoConfig") + proto.RegisterType((*ExternalVitessCluster)(nil), "topodata.ExternalVitessCluster") + proto.RegisterType((*ExternalClusters)(nil), "topodata.ExternalClusters") } func init() { proto.RegisterFile("topodata.proto", fileDescriptor_52c350cb619f972e) } var fileDescriptor_52c350cb619f972e = []byte{ - // 1372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdf, 0x6f, 0x1b, 0xc5, - 0x13, 0xef, 0xf9, 0x57, 0xce, 0xe3, 0xb3, 0x73, 0xdd, 0xa6, 0xd1, 0xc9, 0xdf, 0x6f, 0x43, 0x64, - 0x54, 0x11, 0x05, 0xe1, 0x40, 0xda, 0x42, 0x55, 0x84, 0x54, 0xd7, 0x71, 0x49, 0x9a, 0xc4, 0xb1, - 0xd6, 0x8e, 0xa0, 0xbc, 0x9c, 0x2e, 0xf6, 0x26, 0x3d, 0xc5, 0xbe, 0x73, 0x77, 0x37, 0x96, 0xcc, - 0xbf, 0xc0, 0x03, 0x3c, 0x22, 0xfe, 0x03, 0xfe, 0x13, 0x1e, 0x79, 0xe0, 0x91, 0x07, 0x08, 0xff, - 0x06, 0x0f, 0x68, 0x67, 0xef, 0xec, 0xb3, 0xdd, 0x94, 0x14, 0xe5, 0x6d, 0x66, 0x76, 0x66, 0x6e, - 0xe6, 0xb3, 0x9f, 0x99, 0xb5, 0xa1, 0x24, 0xc3, 0x61, 0xd8, 0xf3, 0xa4, 0x57, 0x1d, 0xf2, 0x50, - 0x86, 0xc4, 0x8c, 0xf5, 0xb2, 0x35, 0x92, 0xd2, 0x1f, 0x30, 0x6d, 0xaf, 0x6c, 0x83, 0xb9, 0xcf, - 0xc6, 0xd4, 0x0b, 0xce, 0x18, 0x59, 0x81, 0xac, 0x90, 0x1e, 0x97, 0x8e, 0xb1, 0x6e, 0x6c, 0x58, - 0x54, 0x2b, 0xc4, 0x86, 0x34, 0x0b, 0x7a, 0x4e, 0x0a, 0x6d, 0x4a, 0xac, 0x3c, 0x80, 0x42, 0xc7, - 0x3b, 0xe9, 0x33, 0x59, 0xeb, 0xfb, 0x9e, 0x20, 0x04, 0x32, 0x5d, 0xd6, 0xef, 0x63, 0x54, 0x9e, - 0xa2, 0xac, 0x82, 0x2e, 0x7c, 0x1d, 0x54, 0xa4, 0x4a, 0xac, 0xfc, 0x9d, 0x81, 0x9c, 0x8e, 0x22, - 0x1f, 0x42, 0xd6, 0x53, 0x91, 0x18, 0x51, 0xd8, 0xbe, 0x5b, 0x9d, 0xd4, 0x9a, 0x48, 0x4b, 0xb5, - 0x0f, 0x29, 0x83, 0xf9, 0x2a, 0x14, 0x32, 0xf0, 0x06, 0x0c, 0xd3, 0xe5, 0xe9, 0x44, 0x27, 0x8f, - 0xc1, 0x1c, 0x86, 0x5c, 0xba, 0x03, 0x6f, 0xe8, 0x64, 0xd6, 0xd3, 0x1b, 0x85, 0xed, 0x7b, 0xf3, - 0xb9, 0xaa, 0xad, 0x90, 0xcb, 0x43, 0x6f, 0xd8, 0x08, 0x24, 0x1f, 0xd3, 0xa5, 0xa1, 0xd6, 0x54, - 0xd6, 0x73, 0x36, 0x16, 0x43, 0xaf, 0xcb, 0x9c, 0xac, 0xce, 0x1a, 0xeb, 0x08, 0xc3, 0x2b, 0x8f, - 0xf7, 0x9c, 0x1c, 0x1e, 0x68, 0x85, 0x6c, 0x41, 0xfe, 0x9c, 0x8d, 0x5d, 0xae, 0x90, 0x72, 0x96, - 0xb0, 0x70, 0x32, 0xfd, 0x58, 0x8c, 0x21, 0xa6, 0xd1, 0x68, 0x6e, 0x40, 0x46, 0x8e, 0x87, 0xcc, - 0x31, 0xd7, 0x8d, 0x8d, 0xd2, 0xf6, 0xca, 0x7c, 0x61, 0x9d, 0xf1, 0x90, 0x51, 0xf4, 0x20, 0x1b, - 0x60, 0xf7, 0x4e, 0x5c, 0xd5, 0x91, 0x1b, 0x8e, 0x18, 0xe7, 0x7e, 0x8f, 0x39, 0x79, 0xfc, 0x76, - 0xa9, 0x77, 0xd2, 0xf4, 0x06, 0xec, 0x28, 0xb2, 0x92, 0x2a, 0x64, 0xa4, 0x77, 0x26, 0x1c, 0xc0, - 0x66, 0xcb, 0x0b, 0xcd, 0x76, 0xbc, 0x33, 0xa1, 0x3b, 0x45, 0x3f, 0x72, 0x1f, 0x4a, 0x83, 0xb1, - 0x78, 0xdd, 0x77, 0x27, 0x10, 0x5a, 0x98, 0xb7, 0x88, 0xd6, 0xdd, 0x18, 0xc7, 0x7b, 0x00, 0xda, - 0x4d, 0xc1, 0xe3, 0x14, 0xd7, 0x8d, 0x8d, 0x2c, 0xcd, 0xa3, 0x45, 0xa1, 0x47, 0x6a, 0xb0, 0x3a, - 0xf0, 0x84, 0x64, 0xdc, 0x95, 0x8c, 0x0f, 0x5c, 0xa4, 0x85, 0xab, 0x38, 0xe4, 0x94, 0x10, 0x07, - 0xab, 0x1a, 0x51, 0xaa, 0xe3, 0x0f, 0x18, 0xbd, 0xa3, 0x7d, 0x3b, 0x8c, 0x0f, 0xda, 0xca, 0x53, - 0x19, 0xcb, 0x4f, 0xc0, 0x4a, 0x5e, 0x84, 0xe2, 0xc7, 0x39, 0x1b, 0x47, 0x94, 0x51, 0xa2, 0x42, - 0x7d, 0xe4, 0xf5, 0x2f, 0xf4, 0x25, 0x67, 0xa9, 0x56, 0x9e, 0xa4, 0x1e, 0x1b, 0xe5, 0xcf, 0x20, - 0x3f, 0xe9, 0xeb, 0xdf, 0x02, 0xf3, 0x89, 0xc0, 0x17, 0x19, 0x33, 0x6d, 0x67, 0x5e, 0x64, 0xcc, - 0x82, 0x6d, 0x55, 0x7e, 0xcb, 0x41, 0xb6, 0x8d, 0x17, 0xf9, 0x18, 0xac, 0xa8, 0x9b, 0x6b, 0x90, - 0xb0, 0xa0, 0x5d, 0x35, 0xd1, 0xaf, 0xc6, 0xc1, 0xbc, 0x26, 0x0e, 0xb3, 0x2c, 0x4a, 0x5d, 0x83, - 0x45, 0x5f, 0x80, 0x25, 0x18, 0x1f, 0xb1, 0x9e, 0xab, 0xa8, 0x22, 0x9c, 0xf4, 0xfc, 0xcd, 0x63, - 0x53, 0xd5, 0x36, 0xfa, 0x20, 0xa7, 0x0a, 0x62, 0x22, 0x0b, 0xf2, 0x14, 0x8a, 0x22, 0xbc, 0xe0, - 0x5d, 0xe6, 0x22, 0x8b, 0x45, 0x34, 0x26, 0xff, 0x5b, 0x88, 0x47, 0x27, 0x94, 0xa9, 0x25, 0xa6, - 0x8a, 0x20, 0xcf, 0x61, 0x59, 0x22, 0x20, 0x6e, 0x37, 0x0c, 0x24, 0x0f, 0xfb, 0xc2, 0xc9, 0xcd, - 0x8f, 0x9a, 0xce, 0xa1, 0x71, 0xab, 0x6b, 0x2f, 0x5a, 0x92, 0x49, 0x55, 0x90, 0x4d, 0xb8, 0xed, - 0x0b, 0x37, 0xc2, 0x4f, 0x95, 0xe8, 0x07, 0x67, 0x38, 0x47, 0x26, 0x5d, 0xf6, 0xc5, 0x21, 0xda, - 0xdb, 0xda, 0x5c, 0x7e, 0x09, 0x30, 0x6d, 0x88, 0x3c, 0x82, 0x42, 0x54, 0x01, 0xce, 0x93, 0xf1, - 0x96, 0x79, 0x02, 0x39, 0x91, 0x15, 0x2f, 0xd4, 0x2a, 0x12, 0x4e, 0x6a, 0x3d, 0xad, 0x78, 0x81, - 0x4a, 0xf9, 0x27, 0x03, 0x0a, 0x89, 0x66, 0xe3, 0x45, 0x65, 0x4c, 0x16, 0xd5, 0xcc, 0x6a, 0x48, - 0x5d, 0xb5, 0x1a, 0xd2, 0x57, 0xae, 0x86, 0xcc, 0x35, 0x2e, 0x75, 0x15, 0x72, 0x58, 0xa8, 0x70, - 0xb2, 0x58, 0x5b, 0xa4, 0x95, 0x7f, 0x36, 0xa0, 0x38, 0x83, 0xe2, 0x8d, 0xf6, 0x4e, 0x3e, 0x02, - 0x72, 0xd2, 0xf7, 0xba, 0xe7, 0x7d, 0x5f, 0x48, 0x45, 0x28, 0x5d, 0x42, 0x06, 0x5d, 0x6e, 0x27, - 0x4e, 0x30, 0xa9, 0x50, 0x55, 0x9e, 0xf2, 0xf0, 0x5b, 0x16, 0xe0, 0x86, 0x34, 0x69, 0xa4, 0x4d, - 0xc6, 0x2a, 0x6b, 0xe7, 0x2a, 0xbf, 0xa7, 0xf1, 0xfd, 0xd0, 0xe8, 0x7c, 0x0c, 0x2b, 0x08, 0x88, - 0x1f, 0x9c, 0xb9, 0xdd, 0xb0, 0x7f, 0x31, 0x08, 0x70, 0xa9, 0x45, 0xc3, 0x4a, 0xe2, 0xb3, 0x3a, - 0x1e, 0xa9, 0xbd, 0x46, 0x5e, 0x2c, 0x46, 0x60, 0x9f, 0x29, 0xec, 0xd3, 0x99, 0x01, 0x11, 0xbf, - 0xb1, 0xa7, 0x39, 0x3e, 0x97, 0x0b, 0x7b, 0x7e, 0x3a, 0x99, 0x94, 0x53, 0x1e, 0x0e, 0xc4, 0xe2, - 0x83, 0x10, 0xe7, 0x88, 0x86, 0xe5, 0x39, 0x0f, 0x07, 0xf1, 0xb0, 0x28, 0x59, 0x90, 0xcf, 0xa1, - 0x18, 0xdf, 0xb4, 0x2e, 0x23, 0x8b, 0x65, 0xac, 0x2e, 0xa6, 0xc0, 0x22, 0xac, 0xf3, 0x84, 0x46, - 0xde, 0x87, 0xe2, 0x89, 0x27, 0x98, 0x3b, 0xe1, 0x8e, 0x7e, 0x3d, 0x2c, 0x65, 0x9c, 0x20, 0xf4, - 0x09, 0x14, 0x45, 0xe0, 0x0d, 0xc5, 0xab, 0x30, 0x5a, 0x1c, 0x4b, 0x6f, 0x58, 0x1c, 0x56, 0xec, - 0x82, 0x9b, 0xf3, 0x22, 0x9e, 0x05, 0x55, 0xe3, 0xcd, 0xf2, 0x21, 0xc9, 0xf4, 0xf4, 0x2c, 0xd3, - 0xf5, 0x25, 0x57, 0xbe, 0x33, 0xc0, 0xd6, 0x4b, 0x81, 0x0d, 0xfb, 0x7e, 0xd7, 0x93, 0x7e, 0x18, - 0x90, 0x47, 0x90, 0x0d, 0xc2, 0x1e, 0x53, 0x9b, 0x53, 0x21, 0xfc, 0xde, 0xdc, 0x1e, 0x48, 0xb8, - 0x56, 0x9b, 0x61, 0x8f, 0x51, 0xed, 0x5d, 0x7e, 0x0a, 0x19, 0xa5, 0xaa, 0xfd, 0x1b, 0xb5, 0x70, - 0x9d, 0xfd, 0x2b, 0xa7, 0x4a, 0xe5, 0x18, 0x4a, 0xd1, 0x17, 0x4e, 0x19, 0x67, 0x41, 0x97, 0xa9, - 0x9f, 0x1e, 0x09, 0x86, 0xa1, 0xfc, 0xce, 0x2b, 0xb6, 0xf2, 0xbd, 0x01, 0x04, 0xf3, 0xce, 0x8e, - 0xde, 0x4d, 0xe4, 0x26, 0x0f, 0x61, 0xf5, 0xf5, 0x05, 0xe3, 0x63, 0xbd, 0xf1, 0xba, 0xcc, 0xed, - 0xf9, 0x42, 0x7d, 0x45, 0x6f, 0x10, 0x93, 0xae, 0xe0, 0x69, 0x5b, 0x1f, 0xee, 0x44, 0x67, 0x95, - 0xcb, 0x0c, 0x14, 0xda, 0x7c, 0x34, 0xa1, 0xcd, 0x97, 0x00, 0x43, 0x8f, 0x4b, 0x5f, 0x61, 0x1a, - 0xc3, 0xfe, 0x41, 0x02, 0xf6, 0xa9, 0xeb, 0x84, 0xa1, 0xad, 0xd8, 0x9f, 0x26, 0x42, 0xaf, 0x9c, - 0xd0, 0xd4, 0x3b, 0x4f, 0x68, 0xfa, 0x3f, 0x4c, 0x68, 0x0d, 0x0a, 0x89, 0x09, 0x8d, 0x06, 0x74, - 0xfd, 0xcd, 0x7d, 0x24, 0x66, 0x14, 0xa6, 0x33, 0x5a, 0xfe, 0xd3, 0x80, 0xdb, 0x0b, 0x2d, 0xaa, - 0xa9, 0x48, 0x3c, 0x92, 0x6f, 0x9f, 0x8a, 0xe9, 0xeb, 0x48, 0xea, 0x60, 0x63, 0x95, 0x2e, 0x8f, - 0x09, 0xa5, 0x07, 0xa4, 0x90, 0xec, 0x6b, 0x96, 0x71, 0x74, 0x59, 0xcc, 0xe8, 0x82, 0xb4, 0xe0, - 0xae, 0x4e, 0x32, 0xff, 0x4a, 0xea, 0x97, 0xfa, 0xff, 0x73, 0x99, 0x66, 0x1f, 0xc9, 0x3b, 0x62, - 0xc1, 0x26, 0xca, 0xee, 0x4d, 0x4c, 0xfc, 0x5b, 0x5e, 0xb1, 0x68, 0x75, 0xef, 0x83, 0x59, 0x67, - 0xfd, 0xfe, 0x5e, 0x70, 0x1a, 0xaa, 0xdf, 0x89, 0x88, 0x0b, 0x77, 0xbd, 0x5e, 0x8f, 0x33, 0x21, - 0x22, 0xd6, 0x17, 0xb5, 0xb5, 0xa6, 0x8d, 0x6a, 0x24, 0x78, 0x18, 0xca, 0x28, 0x21, 0xca, 0xd1, - 0xa2, 0xa8, 0x00, 0xa8, 0x64, 0x42, 0xff, 0x50, 0x7a, 0xe3, 0xba, 0xd9, 0xdc, 0x00, 0x2b, 0xb9, - 0x3f, 0x09, 0x40, 0xae, 0x79, 0x44, 0x0f, 0x6b, 0x07, 0xf6, 0x2d, 0x62, 0x81, 0xd9, 0x6e, 0xd6, - 0x5a, 0xed, 0xdd, 0xa3, 0x8e, 0x6d, 0x6c, 0x6e, 0x43, 0x69, 0x96, 0x4e, 0x24, 0x0f, 0xd9, 0xe3, - 0x66, 0xbb, 0xd1, 0xb1, 0x6f, 0xa9, 0xb0, 0xe3, 0xbd, 0x66, 0xe7, 0xd3, 0x87, 0xb6, 0xa1, 0xcc, - 0xcf, 0x5e, 0x76, 0x1a, 0x6d, 0x3b, 0xb5, 0xf9, 0x83, 0x01, 0x30, 0xc5, 0x82, 0x14, 0x60, 0xe9, - 0xb8, 0xb9, 0xdf, 0x3c, 0xfa, 0xaa, 0xa9, 0x43, 0x0e, 0x6b, 0xed, 0x4e, 0x83, 0xda, 0x86, 0x3a, - 0xa0, 0x8d, 0xd6, 0xc1, 0x5e, 0xbd, 0x66, 0xa7, 0xd4, 0x01, 0xdd, 0x39, 0x6a, 0x1e, 0xbc, 0xb4, - 0xd3, 0x98, 0xab, 0xd6, 0xa9, 0xef, 0x6a, 0xb1, 0xdd, 0xaa, 0xd1, 0x86, 0x9d, 0x21, 0x36, 0x58, - 0x8d, 0xaf, 0x5b, 0x0d, 0xba, 0x77, 0xd8, 0x68, 0x76, 0x6a, 0x07, 0x76, 0x56, 0xc5, 0x3c, 0xab, - 0xd5, 0xf7, 0x8f, 0x5b, 0x76, 0x4e, 0x27, 0x6b, 0x77, 0x8e, 0x68, 0xc3, 0x5e, 0x52, 0xca, 0x0e, - 0xad, 0xed, 0x35, 0x1b, 0x3b, 0xb6, 0x59, 0x4e, 0xd9, 0xc6, 0xb3, 0xdd, 0x5f, 0x2e, 0xd7, 0x8c, - 0x5f, 0x2f, 0xd7, 0x8c, 0x3f, 0x2e, 0xd7, 0x8c, 0x1f, 0xff, 0x5a, 0xbb, 0x05, 0xcb, 0x7e, 0x58, - 0x1d, 0xf9, 0x92, 0x09, 0xa1, 0xff, 0x7e, 0x7d, 0x73, 0x3f, 0xd2, 0xfc, 0x70, 0x4b, 0x4b, 0x5b, - 0x67, 0xe1, 0xd6, 0x48, 0x6e, 0xe1, 0xe9, 0x56, 0x7c, 0xc9, 0x27, 0x39, 0xd4, 0x1f, 0xfc, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x01, 0x7e, 0x35, 0xd8, 0xd6, 0x0d, 0x00, 0x00, + // 1470 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0xdb, 0xc6, + 0x12, 0x0f, 0xf5, 0xcf, 0xd2, 0x88, 0x92, 0x99, 0x8d, 0x63, 0x10, 0xca, 0x8b, 0x9f, 0xa1, 0x87, + 0xe0, 0x19, 0x7e, 0x78, 0x72, 0xeb, 0x24, 0x6d, 0x90, 0xa2, 0x40, 0x14, 0x59, 0xa9, 0x1d, 0xdb, + 0xb2, 0xb0, 0x92, 0xdb, 0x26, 0x17, 0x82, 0x96, 0xd6, 0x0e, 0x61, 0x8a, 0x54, 0xb8, 0x2b, 0xa1, + 0xea, 0x57, 0xe8, 0xa1, 0x3d, 0x16, 0xfd, 0x06, 0xfd, 0x26, 0x3d, 0xf6, 0xd0, 0x63, 0x0f, 0xad, + 0xfb, 0x35, 0x7a, 0x28, 0x76, 0x96, 0xa4, 0x28, 0xc9, 0x4e, 0x9d, 0xc2, 0xb7, 0x9d, 0xd9, 0x99, + 0xd9, 0x99, 0xdf, 0xfe, 0x66, 0x96, 0x84, 0xb2, 0xf0, 0x87, 0x7e, 0xdf, 0x16, 0x76, 0x6d, 0x18, + 0xf8, 0xc2, 0x27, 0xf9, 0x48, 0xae, 0xe8, 0x63, 0x21, 0x9c, 0x01, 0x53, 0xfa, 0xea, 0x36, 0xe4, + 0xf7, 0xd9, 0x84, 0xda, 0xde, 0x19, 0x23, 0x2b, 0x90, 0xe5, 0xc2, 0x0e, 0x84, 0xa9, 0xad, 0x6b, + 0x1b, 0x3a, 0x55, 0x02, 0x31, 0x20, 0xcd, 0xbc, 0xbe, 0x99, 0x42, 0x9d, 0x5c, 0x56, 0x1f, 0x42, + 0xb1, 0x6b, 0x9f, 0xb8, 0x4c, 0xd4, 0x5d, 0xc7, 0xe6, 0x84, 0x40, 0xa6, 0xc7, 0x5c, 0x17, 0xbd, + 0x0a, 0x14, 0xd7, 0xd2, 0x69, 0xe4, 0x28, 0xa7, 0x12, 0x95, 0xcb, 0xea, 0x9f, 0x19, 0xc8, 0x29, + 0x2f, 0xf2, 0x3f, 0xc8, 0xda, 0xd2, 0x13, 0x3d, 0x8a, 0xdb, 0x77, 0x6b, 0x71, 0xae, 0x89, 0xb0, + 0x54, 0xd9, 0x90, 0x0a, 0xe4, 0xdf, 0xf8, 0x5c, 0x78, 0xf6, 0x80, 0x61, 0xb8, 0x02, 0x8d, 0x65, + 0xf2, 0x04, 0xf2, 0x43, 0x3f, 0x10, 0xd6, 0xc0, 0x1e, 0x9a, 0x99, 0xf5, 0xf4, 0x46, 0x71, 0xfb, + 0xfe, 0x7c, 0xac, 0x5a, 0xdb, 0x0f, 0xc4, 0xa1, 0x3d, 0x6c, 0x7a, 0x22, 0x98, 0xd0, 0xa5, 0xa1, + 0x92, 0x64, 0xd4, 0x73, 0x36, 0xe1, 0x43, 0xbb, 0xc7, 0xcc, 0xac, 0x8a, 0x1a, 0xc9, 0x08, 0xc3, + 0x1b, 0x3b, 0xe8, 0x9b, 0x39, 0xdc, 0x50, 0x02, 0xd9, 0x82, 0xc2, 0x39, 0x9b, 0x58, 0x81, 0x44, + 0xca, 0x5c, 0xc2, 0xc4, 0xc9, 0xf4, 0xb0, 0x08, 0x43, 0x0c, 0xa3, 0xd0, 0xdc, 0x80, 0x8c, 0x98, + 0x0c, 0x99, 0x99, 0x5f, 0xd7, 0x36, 0xca, 0xdb, 0x2b, 0xf3, 0x89, 0x75, 0x27, 0x43, 0x46, 0xd1, + 0x82, 0x6c, 0x80, 0xd1, 0x3f, 0xb1, 0x64, 0x45, 0x96, 0x3f, 0x66, 0x41, 0xe0, 0xf4, 0x99, 0x59, + 0xc0, 0xb3, 0xcb, 0xfd, 0x93, 0x96, 0x3d, 0x60, 0x47, 0xa1, 0x96, 0xd4, 0x20, 0x23, 0xec, 0x33, + 0x6e, 0x02, 0x16, 0x5b, 0x59, 0x28, 0xb6, 0x6b, 0x9f, 0x71, 0x55, 0x29, 0xda, 0x91, 0x07, 0x50, + 0x1e, 0x4c, 0xf8, 0x5b, 0xd7, 0x8a, 0x21, 0xd4, 0x31, 0x6e, 0x09, 0xb5, 0xbb, 0x11, 0x8e, 0xf7, + 0x01, 0x94, 0x99, 0x84, 0xc7, 0x2c, 0xad, 0x6b, 0x1b, 0x59, 0x5a, 0x40, 0x8d, 0x44, 0x8f, 0xd4, + 0x61, 0x75, 0x60, 0x73, 0xc1, 0x02, 0x4b, 0xb0, 0x60, 0x60, 0x21, 0x2d, 0x2c, 0xc9, 0x21, 0xb3, + 0x8c, 0x38, 0xe8, 0xb5, 0x90, 0x52, 0x5d, 0x67, 0xc0, 0xe8, 0x1d, 0x65, 0xdb, 0x65, 0xc1, 0xa0, + 0x23, 0x2d, 0xa5, 0xb2, 0xf2, 0x14, 0xf4, 0xe4, 0x45, 0x48, 0x7e, 0x9c, 0xb3, 0x49, 0x48, 0x19, + 0xb9, 0x94, 0xa8, 0x8f, 0x6d, 0x77, 0xa4, 0x2e, 0x39, 0x4b, 0x95, 0xf0, 0x34, 0xf5, 0x44, 0xab, + 0x7c, 0x0c, 0x85, 0xb8, 0xae, 0xbf, 0x73, 0x2c, 0x24, 0x1c, 0x5f, 0x66, 0xf2, 0x69, 0x23, 0xf3, + 0x32, 0x93, 0x2f, 0x1a, 0x7a, 0xf5, 0x97, 0x1c, 0x64, 0x3b, 0x78, 0x91, 0x4f, 0x40, 0x0f, 0xab, + 0xb9, 0x06, 0x09, 0x8b, 0xca, 0x54, 0x11, 0xfd, 0x6a, 0x1c, 0xf2, 0xd7, 0xc4, 0x61, 0x96, 0x45, + 0xa9, 0x6b, 0xb0, 0xe8, 0x53, 0xd0, 0x39, 0x0b, 0xc6, 0xac, 0x6f, 0x49, 0xaa, 0x70, 0x33, 0x3d, + 0x7f, 0xf3, 0x58, 0x54, 0xad, 0x83, 0x36, 0xc8, 0xa9, 0x22, 0x8f, 0xd7, 0x9c, 0x3c, 0x83, 0x12, + 0xf7, 0x47, 0x41, 0x8f, 0x59, 0xc8, 0x62, 0x1e, 0xb6, 0xc9, 0xbd, 0x05, 0x7f, 0x34, 0xc2, 0x35, + 0xd5, 0xf9, 0x54, 0xe0, 0xe4, 0x05, 0x2c, 0x0b, 0x04, 0xc4, 0xea, 0xf9, 0x9e, 0x08, 0x7c, 0x97, + 0x9b, 0xb9, 0xf9, 0x56, 0x53, 0x31, 0x14, 0x6e, 0x0d, 0x65, 0x45, 0xcb, 0x22, 0x29, 0x72, 0xb2, + 0x09, 0xb7, 0x1d, 0x6e, 0x85, 0xf8, 0xc9, 0x14, 0x1d, 0xef, 0x0c, 0xfb, 0x28, 0x4f, 0x97, 0x1d, + 0x7e, 0x88, 0xfa, 0x8e, 0x52, 0x57, 0x5e, 0x01, 0x4c, 0x0b, 0x22, 0x8f, 0xa1, 0x18, 0x66, 0x80, + 0xfd, 0xa4, 0xbd, 0xa3, 0x9f, 0x40, 0xc4, 0x6b, 0xc9, 0x0b, 0x39, 0x8a, 0xb8, 0x99, 0x5a, 0x4f, + 0x4b, 0x5e, 0xa0, 0x50, 0xf9, 0x41, 0x83, 0x62, 0xa2, 0xd8, 0x68, 0x50, 0x69, 0xf1, 0xa0, 0x9a, + 0x19, 0x0d, 0xa9, 0xab, 0x46, 0x43, 0xfa, 0xca, 0xd1, 0x90, 0xb9, 0xc6, 0xa5, 0xae, 0x42, 0x0e, + 0x13, 0xe5, 0x66, 0x16, 0x73, 0x0b, 0xa5, 0xca, 0x8f, 0x1a, 0x94, 0x66, 0x50, 0xbc, 0xd1, 0xda, + 0xc9, 0xff, 0x81, 0x9c, 0xb8, 0x76, 0xef, 0xdc, 0x75, 0xb8, 0x90, 0x84, 0x52, 0x29, 0x64, 0xd0, + 0xe4, 0x76, 0x62, 0x07, 0x83, 0x72, 0x99, 0xe5, 0x69, 0xe0, 0x7f, 0xcd, 0x3c, 0x9c, 0x90, 0x79, + 0x1a, 0x4a, 0x71, 0x5b, 0x65, 0x8d, 0x5c, 0xf5, 0xd7, 0x34, 0xbe, 0x1f, 0x0a, 0x9d, 0x0f, 0x60, + 0x05, 0x01, 0x71, 0xbc, 0x33, 0xab, 0xe7, 0xbb, 0xa3, 0x81, 0x87, 0x43, 0x2d, 0x6c, 0x56, 0x12, + 0xed, 0x35, 0x70, 0x4b, 0xce, 0x35, 0xf2, 0x72, 0xd1, 0x03, 0xeb, 0x4c, 0x61, 0x9d, 0xe6, 0x0c, + 0x88, 0x78, 0xc6, 0x9e, 0xe2, 0xf8, 0x5c, 0x2c, 0xac, 0xf9, 0x59, 0xdc, 0x29, 0xa7, 0x81, 0x3f, + 0xe0, 0x8b, 0x0f, 0x42, 0x14, 0x23, 0x6c, 0x96, 0x17, 0x81, 0x3f, 0x88, 0x9a, 0x45, 0xae, 0x39, + 0xf9, 0x04, 0x4a, 0xd1, 0x4d, 0xab, 0x34, 0xb2, 0x98, 0xc6, 0xea, 0x62, 0x08, 0x4c, 0x42, 0x3f, + 0x4f, 0x48, 0xe4, 0x3f, 0x50, 0x3a, 0xb1, 0x39, 0xb3, 0x62, 0xee, 0xa8, 0xd7, 0x43, 0x97, 0xca, + 0x18, 0xa1, 0x0f, 0xa1, 0xc4, 0x3d, 0x7b, 0xc8, 0xdf, 0xf8, 0xe1, 0xe0, 0x58, 0xba, 0x64, 0x70, + 0xe8, 0x91, 0x09, 0x4e, 0xce, 0x51, 0xd4, 0x0b, 0x32, 0xc7, 0x9b, 0xe5, 0x43, 0x92, 0xe9, 0xe9, + 0x59, 0xa6, 0xab, 0x4b, 0xae, 0x7e, 0xa3, 0x81, 0xa1, 0x86, 0x02, 0x1b, 0xba, 0x4e, 0xcf, 0x16, + 0x8e, 0xef, 0x91, 0xc7, 0x90, 0xf5, 0xfc, 0x3e, 0x93, 0x93, 0x53, 0x22, 0xfc, 0xef, 0xb9, 0x39, + 0x90, 0x30, 0xad, 0xb5, 0xfc, 0x3e, 0xa3, 0xca, 0xba, 0xf2, 0x0c, 0x32, 0x52, 0x94, 0xf3, 0x37, + 0x2c, 0xe1, 0x3a, 0xf3, 0x57, 0x4c, 0x85, 0xea, 0x31, 0x94, 0xc3, 0x13, 0x4e, 0x59, 0xc0, 0xbc, + 0x1e, 0x93, 0x9f, 0x1e, 0x09, 0x86, 0xe1, 0xfa, 0xbd, 0x47, 0x6c, 0xf5, 0x5b, 0x0d, 0x08, 0xc6, + 0x9d, 0x6d, 0xbd, 0x9b, 0x88, 0x4d, 0x1e, 0xc1, 0xea, 0xdb, 0x11, 0x0b, 0x26, 0x6a, 0xe2, 0xf5, + 0x98, 0xd5, 0x77, 0xb8, 0x3c, 0x45, 0x4d, 0x90, 0x3c, 0x5d, 0xc1, 0xdd, 0x8e, 0xda, 0xdc, 0x09, + 0xf7, 0xaa, 0x17, 0x19, 0x28, 0x76, 0x82, 0x71, 0x4c, 0x9b, 0xcf, 0x00, 0x86, 0x76, 0x20, 0x1c, + 0x89, 0x69, 0x04, 0xfb, 0x7f, 0x13, 0xb0, 0x4f, 0x4d, 0x63, 0x86, 0xb6, 0x23, 0x7b, 0x9a, 0x70, + 0xbd, 0xb2, 0x43, 0x53, 0xef, 0xdd, 0xa1, 0xe9, 0x7f, 0xd0, 0xa1, 0x75, 0x28, 0x26, 0x3a, 0x34, + 0x6c, 0xd0, 0xf5, 0xcb, 0xeb, 0x48, 0xf4, 0x28, 0x4c, 0x7b, 0xb4, 0xf2, 0xbb, 0x06, 0xb7, 0x17, + 0x4a, 0x94, 0x5d, 0x91, 0x78, 0x24, 0xdf, 0xdd, 0x15, 0xd3, 0xd7, 0x91, 0x34, 0xc0, 0xc0, 0x2c, + 0xad, 0x20, 0x22, 0x94, 0x6a, 0x90, 0x62, 0xb2, 0xae, 0x59, 0xc6, 0xd1, 0x65, 0x3e, 0x23, 0x73, + 0xd2, 0x86, 0xbb, 0x2a, 0xc8, 0xfc, 0x2b, 0xa9, 0x5e, 0xea, 0x7f, 0xcd, 0x45, 0x9a, 0x7d, 0x24, + 0xef, 0xf0, 0x05, 0x1d, 0xaf, 0x58, 0x37, 0xd1, 0xf1, 0xef, 0x78, 0xc5, 0xc2, 0xd1, 0xbd, 0x0f, + 0xf9, 0x06, 0x73, 0xdd, 0x3d, 0xef, 0xd4, 0x97, 0xdf, 0x89, 0x88, 0x4b, 0x60, 0xd9, 0xfd, 0x7e, + 0xc0, 0x38, 0x0f, 0x59, 0x5f, 0x52, 0xda, 0xba, 0x52, 0xca, 0x96, 0x08, 0x7c, 0x5f, 0x84, 0x01, + 0x71, 0x1d, 0x0e, 0x8a, 0x2a, 0x80, 0x0c, 0xc6, 0xd5, 0x87, 0xd2, 0xa5, 0xe3, 0xa6, 0x7a, 0x0c, + 0xd0, 0xf5, 0x87, 0x7e, 0xc3, 0xf7, 0x4e, 0x9d, 0x33, 0x72, 0x0f, 0x0a, 0xb2, 0x86, 0x69, 0x55, + 0x05, 0x8a, 0xff, 0x28, 0x98, 0xfd, 0x2a, 0xe4, 0xd4, 0xc9, 0xe1, 0x51, 0xa1, 0x14, 0x27, 0x90, + 0x9e, 0x26, 0x50, 0x6d, 0xc1, 0xdd, 0xe6, 0x57, 0x82, 0x05, 0x9e, 0xed, 0x7e, 0xee, 0x08, 0xc6, + 0x79, 0xc3, 0x1d, 0xc9, 0x8f, 0x09, 0x44, 0x4e, 0x9e, 0xd0, 0xc3, 0x03, 0xc3, 0x39, 0x93, 0x44, + 0x2e, 0x4e, 0x86, 0x82, 0x88, 0xd7, 0xd5, 0xd7, 0x60, 0x44, 0xf1, 0xc2, 0x48, 0xf2, 0x23, 0xa8, + 0x3c, 0xc6, 0xd8, 0x56, 0x4f, 0xa9, 0x16, 0x67, 0xdf, 0xa5, 0x39, 0xd0, 0xd2, 0x38, 0x29, 0x6e, + 0x6e, 0x80, 0x9e, 0x7c, 0x42, 0x08, 0x40, 0xae, 0x75, 0x44, 0x0f, 0xeb, 0x07, 0xc6, 0x2d, 0xa2, + 0x43, 0xbe, 0xd3, 0xaa, 0xb7, 0x3b, 0xbb, 0x47, 0x5d, 0x43, 0xdb, 0xdc, 0x86, 0xf2, 0x6c, 0x47, + 0x91, 0x02, 0x64, 0x8f, 0x5b, 0x9d, 0x66, 0xd7, 0xb8, 0x25, 0xdd, 0x8e, 0xf7, 0x5a, 0xdd, 0x8f, + 0x1e, 0x19, 0x9a, 0x54, 0x3f, 0x7f, 0xd5, 0x6d, 0x76, 0x8c, 0xd4, 0xe6, 0x77, 0x1a, 0xc0, 0x94, + 0x0e, 0xa4, 0x08, 0x4b, 0xc7, 0xad, 0xfd, 0xd6, 0xd1, 0x17, 0x2d, 0xe5, 0x72, 0x58, 0xef, 0x74, + 0x9b, 0xd4, 0xd0, 0xe4, 0x06, 0x6d, 0xb6, 0x0f, 0xf6, 0x1a, 0x75, 0x23, 0x25, 0x37, 0xe8, 0xce, + 0x51, 0xeb, 0xe0, 0x95, 0x91, 0xc6, 0x58, 0xf5, 0x6e, 0x63, 0x57, 0x2d, 0x3b, 0xed, 0x3a, 0x6d, + 0x1a, 0x19, 0x62, 0x80, 0xde, 0xfc, 0xb2, 0xdd, 0xa4, 0x7b, 0x87, 0xcd, 0x56, 0xb7, 0x7e, 0x60, + 0x64, 0xa5, 0xcf, 0xf3, 0x7a, 0x63, 0xff, 0xb8, 0x6d, 0xe4, 0x54, 0xb0, 0x4e, 0xf7, 0x88, 0x36, + 0x8d, 0x25, 0x29, 0xec, 0xd0, 0xfa, 0x5e, 0xab, 0xb9, 0x63, 0xe4, 0x2b, 0x29, 0x43, 0x7b, 0xbe, + 0xfb, 0xd3, 0xc5, 0x9a, 0xf6, 0xf3, 0xc5, 0x9a, 0xf6, 0xdb, 0xc5, 0x9a, 0xf6, 0xfd, 0x1f, 0x6b, + 0xb7, 0x60, 0xd9, 0xf1, 0x6b, 0x0a, 0x14, 0xf5, 0x07, 0xfa, 0xfa, 0x41, 0x28, 0x39, 0xfe, 0x96, + 0x5a, 0x6d, 0x9d, 0xf9, 0x5b, 0x63, 0xb1, 0x85, 0xbb, 0x5b, 0x11, 0xbe, 0x27, 0x39, 0x94, 0x1f, + 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xd6, 0x3a, 0x79, 0xd9, 0x0e, 0x00, 0x00, } func (m *KeyRange) Marshal() (dAtA []byte, err error) { @@ -2668,6 +2835,134 @@ func (m *CellsAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TopoConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopoConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopoConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x1a + } + if len(m.Server) > 0 { + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0x12 + } + if len(m.TopoType) > 0 { + i -= len(m.TopoType) + copy(dAtA[i:], m.TopoType) + i = encodeVarintTopodata(dAtA, i, uint64(len(m.TopoType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalVitessCluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalVitessCluster) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalVitessCluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TopoConfig != nil { + { + size, err := m.TopoConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalClusters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalClusters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalClusters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.VitessCluster) > 0 { + for iNdEx := len(m.VitessCluster) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VitessCluster[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTopodata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintTopodata(dAtA []byte, offset int, v uint64) int { offset -= sovTopodata(v) base := offset @@ -3166,6 +3461,64 @@ func (m *CellsAlias) Size() (n int) { return n } +func (m *TopoConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TopoType) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Server) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExternalVitessCluster) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TopoConfig != nil { + l = m.TopoConfig.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExternalClusters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.VitessCluster) > 0 { + for _, e := range m.VitessCluster { + l = e.Size() + n += 1 + l + sovTopodata(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovTopodata(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -6178,6 +6531,334 @@ func (m *CellsAlias) Unmarshal(dAtA []byte) error { } return nil } +func (m *TopoConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopoConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopoConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopoType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalVitessCluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalVitessCluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalVitessCluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TopoConfig == nil { + m.TopoConfig = &TopoConfig{} + } + if err := m.TopoConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalClusters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalClusters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalClusters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalVitessCluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTopodata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTopodata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTopodata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VitessCluster = append(m.VitessCluster, &ExternalVitessCluster{}) + if err := m.VitessCluster[len(m.VitessCluster)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTopodata(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTopodata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTopodata(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 0e090bb1d3b..f955b88275d 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -3300,8 +3300,11 @@ type MaterializeSettings struct { StopAfterCopy bool `protobuf:"varint,4,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` TableSettings []*TableMaterializeSettings `protobuf:"bytes,5,rep,name=table_settings,json=tableSettings,proto3" json:"table_settings,omitempty"` // optional parameters. - Cell string `protobuf:"bytes,6,opt,name=cell,proto3" json:"cell,omitempty"` - TabletTypes string `protobuf:"bytes,7,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` + Cell string `protobuf:"bytes,6,opt,name=cell,proto3" json:"cell,omitempty"` + TabletTypes string `protobuf:"bytes,7,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + ExternalCluster string `protobuf:"bytes,8,opt,name=external_cluster,json=externalCluster,proto3" json:"external_cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3389,6 +3392,13 @@ func (m *MaterializeSettings) GetTabletTypes() string { return "" } +func (m *MaterializeSettings) GetExternalCluster() string { + if m != nil { + return m.ExternalCluster + } + return "" +} + func init() { proto.RegisterType((*ExecuteVtctlCommandRequest)(nil), "vtctldata.ExecuteVtctlCommandRequest") proto.RegisterType((*ExecuteVtctlCommandResponse)(nil), "vtctldata.ExecuteVtctlCommandResponse") @@ -3455,135 +3465,136 @@ func init() { func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_f41247b323a1ab2e) } var fileDescriptor_f41247b323a1ab2e = []byte{ - // 2045 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0xdb, 0xc8, - 0x19, 0x2f, 0xf5, 0xb2, 0xf4, 0xe9, 0xe5, 0xd0, 0xb2, 0xad, 0x68, 0x13, 0x37, 0x61, 0x1a, 0xaf, - 0x90, 0x76, 0xa5, 0x6c, 0x16, 0x0d, 0x82, 0x74, 0x5b, 0x24, 0xb1, 0x95, 0xc0, 0x9b, 0xad, 0xeb, - 0xd2, 0x46, 0x0a, 0xb4, 0x40, 0x09, 0x9a, 0x1c, 0x29, 0x44, 0x28, 0x92, 0xcb, 0x19, 0xc9, 0xe6, - 0xf6, 0xd0, 0x4b, 0x7b, 0x58, 0xa0, 0x40, 0xaf, 0x05, 0xf6, 0xd2, 0x53, 0x8f, 0x3d, 0xee, 0xa1, - 0x28, 0x7a, 0x2c, 0x7a, 0xec, 0x9f, 0x50, 0xa4, 0x7f, 0x46, 0x2f, 0xc5, 0xbc, 0x48, 0x8a, 0x7a, - 0xc4, 0x71, 0x02, 0x14, 0x3d, 0x59, 0xf3, 0x3d, 0xe6, 0xfb, 0x7d, 0x8f, 0xf9, 0xe6, 0x1b, 0x1a, - 0x9a, 0x53, 0x62, 0x11, 0xd7, 0x36, 0x89, 0xd9, 0x0b, 0x42, 0x9f, 0xf8, 0x6a, 0x25, 0x26, 0x74, - 0xea, 0xae, 0x3f, 0x9a, 0x10, 0xc7, 0xe5, 0x9c, 0x4e, 0x63, 0x1c, 0xe1, 0x2f, 0x5c, 0x8b, 0xc8, - 0xf5, 0x36, 0x31, 0x4f, 0x5d, 0x44, 0xc6, 0xa6, 0x67, 0x8e, 0x50, 0x98, 0x6c, 0xd1, 0x69, 0x10, - 0x3f, 0xf0, 0x53, 0xeb, 0xfa, 0x14, 0x5b, 0x2f, 0xd1, 0x58, 0x2e, 0x6b, 0x53, 0x42, 0x9c, 0x31, - 0xe2, 0x2b, 0xed, 0x67, 0xd0, 0x19, 0x9c, 0x23, 0x6b, 0x42, 0xd0, 0x0b, 0x6a, 0x78, 0xcf, 0x1f, - 0x8f, 0x4d, 0xcf, 0xd6, 0xd1, 0x17, 0x13, 0x84, 0x89, 0xaa, 0x42, 0xc1, 0x0c, 0x47, 0xb8, 0xad, - 0xdc, 0xc8, 0x77, 0x2b, 0x3a, 0xfb, 0xad, 0xde, 0x86, 0x86, 0x69, 0x11, 0xc7, 0xf7, 0x0c, 0xba, - 0x8d, 0x3f, 0x21, 0xed, 0xdc, 0x0d, 0xa5, 0x9b, 0xd7, 0xeb, 0x9c, 0x7a, 0xc2, 0x89, 0xda, 0x1e, - 0x7c, 0xb0, 0x70, 0x63, 0x1c, 0xf8, 0x1e, 0x46, 0xea, 0x77, 0xa0, 0x88, 0xa6, 0xc8, 0x23, 0x6d, - 0xe5, 0x86, 0xd2, 0xad, 0xde, 0x6b, 0xf4, 0xa4, 0xb3, 0x03, 0x4a, 0xd5, 0x39, 0x53, 0xfb, 0x5a, - 0x81, 0xed, 0xbd, 0x97, 0xa6, 0x37, 0x42, 0x27, 0xcc, 0xd9, 0x93, 0x28, 0x40, 0x12, 0xdb, 0x03, - 0xa8, 0xf1, 0x08, 0x18, 0xa6, 0xeb, 0x98, 0x58, 0x6c, 0xb4, 0xd9, 0x8b, 0xbd, 0xe7, 0x2a, 0x8f, - 0x29, 0x53, 0xaf, 0x92, 0x64, 0xa1, 0x7e, 0x04, 0x6b, 0xf6, 0xa9, 0x41, 0xa2, 0x00, 0x31, 0xe8, - 0x8d, 0x7b, 0xad, 0xac, 0x12, 0xb3, 0x53, 0xb2, 0x4f, 0xe9, 0x5f, 0x75, 0x1b, 0xd6, 0xec, 0x30, - 0x32, 0xc2, 0x89, 0xd7, 0xce, 0xdf, 0x50, 0xba, 0x65, 0xbd, 0x64, 0x87, 0x91, 0x3e, 0xf1, 0xb4, - 0x3f, 0x29, 0xd0, 0x9e, 0x47, 0x27, 0x1c, 0xfc, 0x3e, 0xd4, 0x4f, 0xd1, 0xd0, 0x0f, 0x91, 0xc1, - 0x4d, 0x0b, 0x7c, 0xeb, 0x59, 0x53, 0x7a, 0x8d, 0x8b, 0xf1, 0x95, 0xfa, 0x09, 0xd4, 0xcc, 0x21, - 0x41, 0xa1, 0xd4, 0xca, 0x2d, 0xd1, 0xaa, 0x32, 0x29, 0xa1, 0xb4, 0x03, 0xd5, 0x33, 0x13, 0x1b, - 0xb3, 0x28, 0x2b, 0x67, 0x26, 0xde, 0xe7, 0x40, 0xbf, 0xc9, 0xc3, 0xe6, 0x5e, 0x88, 0x4c, 0x82, - 0x9e, 0xa3, 0x08, 0x07, 0xa6, 0x85, 0x52, 0x09, 0xf6, 0xcc, 0x31, 0x62, 0xe0, 0x2a, 0x3a, 0xfb, - 0xad, 0xb6, 0xa0, 0x38, 0xf4, 0x43, 0x8b, 0x07, 0xa7, 0xac, 0xf3, 0x85, 0xda, 0x87, 0x96, 0xe9, - 0xba, 0xfe, 0x99, 0x81, 0xc6, 0x01, 0x89, 0x8c, 0xa9, 0xc1, 0x8b, 0x4a, 0x18, 0xbb, 0xc2, 0x78, - 0x03, 0xca, 0x7a, 0x71, 0xcc, 0x18, 0xea, 0x5d, 0x68, 0xe1, 0x97, 0x66, 0x68, 0x3b, 0xde, 0xc8, - 0xb0, 0x7c, 0x77, 0x32, 0xf6, 0x0c, 0x66, 0xaa, 0xc0, 0x4c, 0xa9, 0x92, 0xb7, 0xc7, 0x58, 0x87, - 0xd4, 0xf0, 0x67, 0xf3, 0x1a, 0x2c, 0x49, 0x45, 0x96, 0xa4, 0x76, 0x12, 0x03, 0xe9, 0xc5, 0x81, - 0xcd, 0x42, 0x9e, 0xd9, 0x8b, 0x25, 0xed, 0x11, 0xd4, 0x30, 0x0a, 0xa7, 0xc8, 0x36, 0x86, 0xa1, - 0x3f, 0xc6, 0xed, 0xd2, 0x8d, 0x7c, 0xb7, 0x7a, 0xef, 0xfa, 0xfc, 0x1e, 0xbd, 0x63, 0x26, 0xf6, - 0x34, 0xf4, 0xc7, 0x7a, 0x15, 0xc7, 0xbf, 0xb1, 0x7a, 0x07, 0x0a, 0xcc, 0xfa, 0x1a, 0xb3, 0xbe, - 0x35, 0xaf, 0xc9, 0x6c, 0x33, 0x19, 0xf5, 0x16, 0xd4, 0x4f, 0x4d, 0x8c, 0x8c, 0x57, 0x82, 0xd5, - 0x2e, 0x33, 0x27, 0x6b, 0x94, 0x28, 0xc5, 0xd5, 0x8f, 0xa1, 0x8e, 0x3d, 0x33, 0xc0, 0x2f, 0x7d, - 0xc2, 0x8e, 0x4e, 0xbb, 0xc2, 0x72, 0x5b, 0xeb, 0x89, 0x03, 0x49, 0x4f, 0x8e, 0x5e, 0x93, 0x22, - 0x74, 0xa5, 0x1d, 0xc0, 0x56, 0x36, 0x6f, 0xa2, 0xbc, 0xfa, 0x50, 0x8e, 0x8d, 0xf1, 0xca, 0xda, - 0xe8, 0x25, 0xbd, 0x24, 0x16, 0x8f, 0x85, 0xb4, 0xdf, 0x29, 0xa0, 0xf2, 0xbd, 0x8e, 0x69, 0xb4, - 0x64, 0x01, 0x74, 0x32, 0xfb, 0x54, 0x12, 0x15, 0xf5, 0x3a, 0x00, 0x8b, 0x2c, 0xcf, 0x5b, 0x8e, - 0x71, 0x2b, 0x8c, 0x72, 0x38, 0x53, 0x27, 0xf9, 0x74, 0x9d, 0xdc, 0x86, 0x86, 0xe3, 0x59, 0xee, - 0xc4, 0x46, 0x46, 0x60, 0x86, 0xf4, 0x84, 0x17, 0x18, 0xbb, 0x2e, 0xa8, 0x47, 0x8c, 0xa8, 0xfd, - 0x51, 0x81, 0x8d, 0x19, 0x38, 0x97, 0xf4, 0x4b, 0xdd, 0x85, 0x22, 0x83, 0x14, 0x9f, 0x94, 0x44, - 0x9a, 0xef, 0xcc, 0xd9, 0x71, 0x39, 0x1a, 0xa6, 0x1b, 0x22, 0xd3, 0x8e, 0x0c, 0x74, 0xee, 0x60, - 0x82, 0x05, 0x78, 0x5e, 0x42, 0x8f, 0x39, 0x6b, 0xc0, 0x38, 0xda, 0x4f, 0x61, 0x73, 0x1f, 0xb9, - 0x68, 0xfe, 0xd0, 0xac, 0x8a, 0xd9, 0x35, 0xa8, 0x84, 0xc8, 0x9a, 0x84, 0xd8, 0x99, 0xca, 0x03, - 0x94, 0x10, 0xb4, 0x36, 0x6c, 0x65, 0xb7, 0xe4, 0x7e, 0x6b, 0xbf, 0x55, 0x60, 0x83, 0xb3, 0x18, - 0x6a, 0x2c, 0x6d, 0x75, 0xa1, 0xc4, 0xa0, 0xf1, 0x1e, 0xbc, 0xc8, 0x3f, 0xc1, 0x5f, 0x6d, 0x59, - 0xdd, 0x85, 0x26, 0x6d, 0xa9, 0x86, 0x33, 0x34, 0x68, 0x91, 0x3b, 0xde, 0x48, 0xe6, 0x85, 0x92, - 0x0f, 0x86, 0xc7, 0x9c, 0xa8, 0x6d, 0x41, 0x6b, 0x16, 0x86, 0xc0, 0x17, 0x49, 0x3a, 0x6f, 0x39, - 0x31, 0xbe, 0x4f, 0xa1, 0x91, 0xee, 0xc2, 0x48, 0xe2, 0x5c, 0xd2, 0x87, 0xeb, 0xa9, 0x3e, 0x8c, - 0x30, 0x3d, 0x37, 0xbc, 0xa9, 0x04, 0xa1, 0x33, 0x36, 0xc3, 0x48, 0xe0, 0xae, 0x31, 0xe2, 0x11, - 0xa7, 0x69, 0xdb, 0x32, 0x0f, 0xb1, 0x69, 0x81, 0xe9, 0xf7, 0x39, 0xb8, 0x3e, 0x18, 0xa3, 0x70, - 0x84, 0x3c, 0x2b, 0xd2, 0x11, 0x2f, 0xb7, 0x0b, 0x57, 0x77, 0x2b, 0x5d, 0x38, 0x15, 0x59, 0x26, - 0xf7, 0xa1, 0xea, 0xa1, 0x04, 0x4f, 0x7e, 0xd5, 0xa5, 0x02, 0x1e, 0x92, 0x20, 0xd5, 0x1f, 0x41, - 0xd3, 0x19, 0x79, 0xb4, 0xdd, 0x87, 0x28, 0x70, 0x1d, 0xcb, 0xc4, 0xed, 0xc2, 0xaa, 0x40, 0x34, - 0xb8, 0xb4, 0x2e, 0x84, 0xd5, 0x7d, 0xd8, 0x3c, 0x33, 0x1d, 0x12, 0x6b, 0xc7, 0x97, 0x6b, 0x31, - 0x2e, 0x6b, 0xd6, 0x24, 0xf6, 0x27, 0xa1, 0x49, 0xaf, 0x59, 0x7d, 0x83, 0x8a, 0x4b, 0x75, 0x79, - 0xe9, 0xfe, 0x55, 0x81, 0x9d, 0x65, 0x11, 0x11, 0x07, 0xec, 0xed, 0x43, 0xf2, 0x08, 0xd6, 0x83, - 0xd0, 0x1f, 0xfb, 0x04, 0xd9, 0x17, 0x8b, 0x4b, 0x53, 0x8a, 0xcb, 0xe0, 0xec, 0x42, 0x89, 0xdd, - 0xe7, 0x32, 0x26, 0xd9, 0xdb, 0x5e, 0x70, 0xb5, 0x01, 0x5c, 0x79, 0x86, 0xc8, 0x13, 0xd3, 0x7a, - 0x35, 0x09, 0xf0, 0xa5, 0x73, 0xa8, 0xed, 0x83, 0x9a, 0xde, 0x46, 0x38, 0xde, 0x83, 0xb5, 0x53, - 0x4e, 0x12, 0x25, 0xda, 0xea, 0xc5, 0x13, 0x15, 0x97, 0x3d, 0xf0, 0x86, 0xbe, 0x2e, 0x85, 0xb4, - 0xab, 0xb0, 0xfd, 0x0c, 0x91, 0x3d, 0xe4, 0xba, 0x94, 0x4e, 0x3b, 0x9e, 0x84, 0xa4, 0xdd, 0x85, - 0xf6, 0x3c, 0x4b, 0x98, 0x69, 0x41, 0x91, 0xb6, 0x4b, 0x39, 0x33, 0xf1, 0x85, 0xd6, 0x65, 0x90, - 0xa4, 0x46, 0xea, 0xf6, 0xb5, 0x90, 0xeb, 0xca, 0xdb, 0x97, 0xfe, 0xd6, 0x9e, 0xc2, 0xc6, 0x8c, - 0x64, 0xdc, 0x17, 0x2b, 0x94, 0x6d, 0x38, 0xde, 0xd0, 0x17, 0x8d, 0x51, 0x4d, 0xa2, 0x1f, 0x8b, - 0x97, 0x2d, 0xf1, 0x8b, 0xb6, 0x1a, 0xb1, 0x0f, 0x16, 0xa7, 0x4d, 0xa2, 0xff, 0x46, 0x89, 0x3d, - 0x4b, 0x58, 0xc2, 0xcc, 0x01, 0xac, 0xcd, 0x9e, 0xe3, 0x7e, 0xaa, 0xdf, 0x2c, 0x51, 0xea, 0x89, - 0xf5, 0xc0, 0x23, 0x61, 0xa4, 0x4b, 0xfd, 0xce, 0x11, 0xd4, 0xd2, 0x0c, 0x75, 0x1d, 0xf2, 0xaf, - 0x50, 0x24, 0x7c, 0xa5, 0x3f, 0xd5, 0x3b, 0x50, 0x9c, 0x9a, 0xee, 0x04, 0x89, 0xd6, 0xdd, 0x9a, - 0xf5, 0x87, 0x9b, 0xd1, 0xb9, 0xc8, 0xc3, 0xdc, 0x03, 0x45, 0xdb, 0x64, 0xa1, 0x91, 0xad, 0x33, - 0xf6, 0xe7, 0x00, 0x5a, 0xb3, 0x64, 0xe1, 0xcb, 0xc7, 0x50, 0x91, 0x85, 0x22, 0xbd, 0x59, 0x78, - 0x97, 0x24, 0x52, 0xda, 0x5d, 0x96, 0xa6, 0xb7, 0xe8, 0xf7, 0x22, 0x5d, 0xef, 0x7e, 0x3d, 0xff, - 0x26, 0x07, 0xeb, 0xcf, 0x10, 0xe1, 0xb3, 0xd3, 0xbb, 0x8f, 0xb8, 0x5b, 0x50, 0x62, 0x4b, 0xdc, - 0xce, 0xb1, 0x32, 0x14, 0x2b, 0x7a, 0x3b, 0xa3, 0x73, 0x7e, 0x3b, 0x0b, 0x7e, 0x9e, 0xf1, 0xeb, - 0x82, 0x7a, 0xc2, 0xc5, 0x6e, 0x81, 0xbc, 0xae, 0x8d, 0xa9, 0x83, 0xce, 0xb0, 0xb8, 0x2b, 0x6a, - 0x82, 0xf8, 0x82, 0xd2, 0xd4, 0x2e, 0xac, 0xb3, 0x3d, 0xd8, 0x78, 0x80, 0x0d, 0xdf, 0x73, 0x23, - 0xd6, 0xad, 0xca, 0x3a, 0xbf, 0x12, 0xd8, 0xb9, 0xf8, 0x89, 0xe7, 0x46, 0x89, 0x24, 0x76, 0xbe, - 0x94, 0x92, 0xa5, 0x94, 0xe4, 0x31, 0x25, 0x53, 0x49, 0xed, 0x88, 0x75, 0x00, 0x19, 0x05, 0x11, - 0xcc, 0x1f, 0x40, 0x49, 0x0c, 0x9b, 0x3c, 0x00, 0xb7, 0x7a, 0xf3, 0x4f, 0x1f, 0xae, 0xb2, 0x8f, - 0x86, 0x8e, 0xe7, 0xb0, 0xfe, 0x28, 0x54, 0xb4, 0xcf, 0xa1, 0x49, 0x77, 0x7c, 0x3f, 0x33, 0x8f, - 0xf6, 0x90, 0x67, 0x69, 0xa6, 0xa3, 0xc6, 0x13, 0x88, 0xb2, 0x72, 0x02, 0xd1, 0xee, 0xb0, 0x3a, - 0x3d, 0x0e, 0xa7, 0x2f, 0x66, 0xb3, 0xbc, 0xa8, 0x0b, 0x1c, 0xc2, 0x66, 0x46, 0x36, 0x7e, 0x56, - 0xd4, 0x70, 0x38, 0x4d, 0xc6, 0xef, 0xb8, 0xb8, 0xc4, 0x1b, 0x2f, 0xa5, 0x02, 0x38, 0xfe, 0xad, - 0x7d, 0xce, 0x70, 0x8b, 0xb7, 0xc3, 0xbb, 0x56, 0x97, 0xf6, 0x43, 0x96, 0x25, 0xb9, 0x9b, 0x40, - 0xd6, 0x15, 0x25, 0xb7, 0xfc, 0xa5, 0x23, 0xf8, 0xda, 0x2f, 0x52, 0xea, 0x97, 0x6f, 0xf3, 0x94, - 0x4a, 0x63, 0x25, 0x4b, 0x98, 0x2f, 0xb4, 0x47, 0xec, 0x08, 0x67, 0x46, 0x05, 0xf5, 0x0e, 0xac, - 0x71, 0xe3, 0xc9, 0x1c, 0x95, 0x45, 0x27, 0x05, 0xb4, 0x3e, 0x83, 0x97, 0x49, 0xd2, 0xaa, 0x1e, - 0xf0, 0x84, 0x99, 0xcc, 0x66, 0xea, 0x7b, 0x50, 0xce, 0x64, 0xe9, 0x4a, 0x9c, 0xa5, 0xb8, 0x01, - 0xac, 0x4d, 0x45, 0x82, 0xfe, 0xa3, 0xc0, 0xf6, 0x81, 0xe7, 0xf0, 0xd2, 0x12, 0xf7, 0xe6, 0xe5, - 0x43, 0xa3, 0x43, 0x47, 0xdc, 0xd4, 0x06, 0x72, 0x91, 0x45, 0x8c, 0x99, 0x44, 0xaf, 0xbc, 0xbc, - 0xb7, 0x85, 0xe2, 0x80, 0xea, 0xa5, 0x18, 0xc9, 0xb8, 0x5f, 0x48, 0x8f, 0xfb, 0xef, 0x67, 0x6e, - 0x79, 0x02, 0xed, 0x79, 0xe7, 0xe3, 0xe3, 0x25, 0x87, 0x07, 0x65, 0xe5, 0xf0, 0xf0, 0x55, 0x0e, - 0x3e, 0x38, 0x72, 0x4d, 0xcf, 0x43, 0xf6, 0xff, 0x78, 0x16, 0x7c, 0x08, 0x75, 0x73, 0xea, 0x3b, - 0xc9, 0xb4, 0x54, 0x58, 0xa5, 0x59, 0x63, 0xb2, 0x52, 0xf7, 0xfd, 0xc4, 0xf3, 0x2f, 0x0a, 0x5c, - 0x5b, 0x1c, 0x8b, 0xff, 0x83, 0x29, 0xf0, 0xd7, 0x70, 0x55, 0x47, 0x63, 0x7f, 0x1a, 0x3f, 0x92, - 0xe8, 0x34, 0x70, 0x91, 0x2c, 0xca, 0x46, 0x9a, 0x4b, 0x1a, 0xe9, 0x92, 0x47, 0xea, 0xcc, 0x5b, - 0xa9, 0x90, 0x7d, 0xa5, 0x5d, 0x83, 0xce, 0x22, 0x00, 0xe2, 0xd5, 0xf1, 0xb5, 0x02, 0x5b, 0x9c, - 0xcd, 0x42, 0x7a, 0x51, 0x70, 0x6f, 0x78, 0x4c, 0x4b, 0xec, 0xf9, 0x45, 0xd8, 0x0b, 0x4b, 0xb1, - 0x17, 0xb3, 0xd8, 0xaf, 0xc2, 0xf6, 0x1c, 0x38, 0x01, 0xfc, 0x29, 0x6c, 0xca, 0x62, 0x98, 0xbd, - 0x08, 0x3e, 0xca, 0x74, 0xee, 0x25, 0x09, 0x95, 0xed, 0xfb, 0x57, 0xd4, 0xff, 0xd9, 0x7d, 0x2e, - 0x5d, 0x55, 0x7d, 0x58, 0xbb, 0x50, 0x31, 0x49, 0x29, 0x4d, 0x87, 0x9b, 0x9c, 0x3e, 0x38, 0x27, - 0x28, 0xf4, 0x4c, 0xd7, 0x8d, 0xdf, 0x39, 0xc8, 0xbe, 0xa4, 0x43, 0x7f, 0x57, 0x40, 0x5b, 0xb5, - 0xe9, 0xa5, 0xbd, 0xbb, 0x6c, 0x03, 0xb9, 0x0f, 0x55, 0xdf, 0xbd, 0x60, 0xfb, 0x00, 0xdf, 0x95, - 0x27, 0x4c, 0x3b, 0x84, 0xf2, 0xf3, 0xd4, 0x61, 0x98, 0xfb, 0xb2, 0xd7, 0x4b, 0x79, 0x90, 0xcb, - 0xbe, 0x21, 0x16, 0x0c, 0xa5, 0x9f, 0xc2, 0xce, 0x53, 0xc7, 0xb3, 0x1f, 0xbb, 0x2e, 0xff, 0x1a, - 0x70, 0xe0, 0xbd, 0xcd, 0x68, 0xfc, 0x37, 0x05, 0xbe, 0xbd, 0x54, 0x5d, 0xc4, 0xf4, 0x30, 0xf3, - 0x79, 0xe3, 0x7e, 0x6a, 0x78, 0x7a, 0x83, 0x2e, 0x1f, 0xae, 0xc4, 0xab, 0x43, 0xec, 0xd2, 0x79, - 0x0e, 0xd5, 0x14, 0x79, 0xc1, 0x9b, 0x63, 0x77, 0xf6, 0xcd, 0xb1, 0x60, 0x58, 0x4b, 0xde, 0x1b, - 0xbf, 0x84, 0x22, 0xa3, 0xbd, 0xa9, 0xe9, 0xa4, 0x4e, 0x34, 0x8f, 0xf3, 0x6d, 0x59, 0x0d, 0x3c, - 0xe3, 0xcd, 0x24, 0xc8, 0x33, 0x03, 0xe1, 0x57, 0x0a, 0xb4, 0x59, 0x2a, 0x7f, 0x6c, 0x12, 0x14, - 0x3a, 0xa6, 0xeb, 0x7c, 0x89, 0x8e, 0x11, 0x21, 0x8e, 0x37, 0xc2, 0xea, 0x4d, 0x3a, 0x9d, 0x85, - 0x23, 0x24, 0xee, 0x6e, 0x61, 0xb7, 0xca, 0x69, 0x4c, 0x4b, 0xfd, 0x2e, 0x5c, 0xc1, 0xfe, 0x24, - 0xb4, 0x90, 0x81, 0xce, 0x83, 0x10, 0x61, 0xec, 0xf8, 0x9e, 0xc0, 0xb1, 0xce, 0x19, 0x83, 0x98, - 0x4e, 0xfb, 0x8f, 0xc5, 0xbe, 0xb7, 0x19, 0xb6, 0x2d, 0xdb, 0x4c, 0x85, 0x53, 0xf6, 0x6d, 0x57, - 0xfb, 0x73, 0x0e, 0x36, 0x16, 0xc1, 0xe8, 0x40, 0xf9, 0xcc, 0x0f, 0x5f, 0x0d, 0x5d, 0xff, 0x4c, - 0xba, 0x2e, 0xd7, 0xea, 0x87, 0xd0, 0x14, 0xf6, 0x67, 0xaa, 0xaa, 0xa2, 0x37, 0x38, 0x39, 0xae, - 0xc5, 0x0f, 0xa1, 0x29, 0x7c, 0x89, 0x05, 0x39, 0x80, 0x06, 0x27, 0x3f, 0x4f, 0x3e, 0xe6, 0x35, - 0x31, 0xf1, 0x03, 0x83, 0x7f, 0x02, 0xb7, 0xfc, 0x20, 0x92, 0x5f, 0xa9, 0x28, 0xf9, 0x31, 0xa5, - 0xee, 0xf9, 0x41, 0xa4, 0x7e, 0x26, 0xbe, 0x3a, 0x19, 0x58, 0xe0, 0x6c, 0x17, 0x59, 0xf9, 0xdc, - 0x4a, 0xa5, 0x73, 0x59, 0x64, 0xc5, 0x37, 0xa8, 0xd8, 0x43, 0xd9, 0x79, 0x4b, 0xa9, 0xce, 0x7b, - 0x33, 0x1e, 0x8d, 0x49, 0x14, 0x20, 0xcc, 0xbe, 0x01, 0x57, 0xe4, 0x0c, 0x7c, 0x42, 0x49, 0x4f, - 0x1e, 0xfc, 0xe3, 0xf5, 0x8e, 0xf2, 0xcf, 0xd7, 0x3b, 0xca, 0xbf, 0x5e, 0xef, 0x28, 0x7f, 0xf8, - 0xf7, 0xce, 0xb7, 0x7e, 0xbe, 0x3b, 0x75, 0x08, 0xc2, 0xb8, 0xe7, 0xf8, 0x7d, 0xfe, 0xab, 0x3f, - 0xf2, 0xfb, 0x53, 0xd2, 0x67, 0xff, 0x68, 0xe9, 0xc7, 0xc0, 0x4e, 0x4b, 0x8c, 0xf0, 0xc9, 0x7f, - 0x03, 0x00, 0x00, 0xff, 0xff, 0xca, 0xc4, 0x4c, 0xca, 0xfc, 0x19, 0x00, 0x00, + // 2063 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x6f, 0xdb, 0xc8, + 0xb5, 0x94, 0x2c, 0x59, 0x7a, 0xfa, 0x72, 0x68, 0xd9, 0x56, 0xb4, 0x89, 0xeb, 0x30, 0x8d, 0x57, + 0x4d, 0xbb, 0x52, 0x36, 0x8b, 0x06, 0x41, 0xba, 0x2d, 0x92, 0xd8, 0x4a, 0xe0, 0xcd, 0xd6, 0x75, + 0x69, 0x23, 0x05, 0x5a, 0xa0, 0x04, 0x4d, 0x8e, 0x14, 0x22, 0x14, 0xc9, 0xe5, 0x8c, 0x64, 0x73, + 0x7b, 0xe8, 0xa5, 0x3d, 0x2c, 0x50, 0xa0, 0xd7, 0x02, 0x7b, 0xe9, 0xa9, 0x3f, 0x21, 0x87, 0xa2, + 0xe8, 0xb1, 0xe8, 0xb1, 0xd7, 0xde, 0x8a, 0xf4, 0x67, 0xf4, 0x52, 0xcc, 0x17, 0x49, 0x51, 0x1f, + 0x71, 0x9c, 0x00, 0xc5, 0x9e, 0xc4, 0x79, 0x1f, 0xf3, 0x3e, 0xe7, 0xbd, 0x37, 0x23, 0x68, 0x4c, + 0x88, 0x45, 0x5c, 0xdb, 0x24, 0x66, 0x37, 0x08, 0x7d, 0xe2, 0xab, 0xe5, 0x18, 0xd0, 0xae, 0xb9, + 0xfe, 0x70, 0x4c, 0x1c, 0x97, 0x63, 0xda, 0xf5, 0x51, 0x84, 0xbf, 0x70, 0x2d, 0x22, 0xd7, 0x5b, + 0xc4, 0x3c, 0x75, 0x11, 0x19, 0x99, 0x9e, 0x39, 0x44, 0x61, 0xb2, 0x45, 0xbb, 0x4e, 0xfc, 0xc0, + 0x4f, 0xad, 0x6b, 0x13, 0x6c, 0xbd, 0x40, 0x23, 0xb9, 0xac, 0x4e, 0x08, 0x71, 0x46, 0x88, 0xaf, + 0xb4, 0x9f, 0x43, 0xbb, 0x7f, 0x8e, 0xac, 0x31, 0x41, 0xcf, 0xa9, 0xe0, 0x3d, 0x7f, 0x34, 0x32, + 0x3d, 0x5b, 0x47, 0x5f, 0x8c, 0x11, 0x26, 0xaa, 0x0a, 0x2b, 0x66, 0x38, 0xc4, 0x2d, 0x65, 0x27, + 0xdf, 0x29, 0xeb, 0xec, 0x5b, 0xbd, 0x05, 0x75, 0xd3, 0x22, 0x8e, 0xef, 0x19, 0x74, 0x1b, 0x7f, + 0x4c, 0x5a, 0xb9, 0x1d, 0xa5, 0x93, 0xd7, 0x6b, 0x1c, 0x7a, 0xc2, 0x81, 0xda, 0x1e, 0x7c, 0x30, + 0x77, 0x63, 0x1c, 0xf8, 0x1e, 0x46, 0xea, 0x77, 0xa0, 0x80, 0x26, 0xc8, 0x23, 0x2d, 0x65, 0x47, + 0xe9, 0x54, 0xee, 0xd6, 0xbb, 0xd2, 0xd8, 0x3e, 0x85, 0xea, 0x1c, 0xa9, 0x7d, 0xad, 0xc0, 0xd6, + 0xde, 0x0b, 0xd3, 0x1b, 0xa2, 0x13, 0x66, 0xec, 0x49, 0x14, 0x20, 0xa9, 0xdb, 0x7d, 0xa8, 0x72, + 0x0f, 0x18, 0xa6, 0xeb, 0x98, 0x58, 0x6c, 0xb4, 0xd1, 0x8d, 0xad, 0xe7, 0x2c, 0x8f, 0x28, 0x52, + 0xaf, 0x90, 0x64, 0xa1, 0x7e, 0x04, 0xab, 0xf6, 0xa9, 0x41, 0xa2, 0x00, 0x31, 0xd5, 0xeb, 0x77, + 0x9b, 0x59, 0x26, 0x26, 0xa7, 0x68, 0x9f, 0xd2, 0x5f, 0x75, 0x0b, 0x56, 0xed, 0x30, 0x32, 0xc2, + 0xb1, 0xd7, 0xca, 0xef, 0x28, 0x9d, 0x92, 0x5e, 0xb4, 0xc3, 0x48, 0x1f, 0x7b, 0xda, 0x9f, 0x15, + 0x68, 0xcd, 0x6a, 0x27, 0x0c, 0xfc, 0x01, 0xd4, 0x4e, 0xd1, 0xc0, 0x0f, 0x91, 0xc1, 0x45, 0x0b, + 0xfd, 0xd6, 0xb2, 0xa2, 0xf4, 0x2a, 0x27, 0xe3, 0x2b, 0xf5, 0x13, 0xa8, 0x9a, 0x03, 0x82, 0x42, + 0xc9, 0x95, 0x5b, 0xc0, 0x55, 0x61, 0x54, 0x82, 0x69, 0x1b, 0x2a, 0x67, 0x26, 0x36, 0xa6, 0xb5, + 0x2c, 0x9f, 0x99, 0x78, 0x9f, 0x2b, 0xfa, 0x2a, 0x0f, 0x1b, 0x7b, 0x21, 0x32, 0x09, 0x7a, 0x86, + 0x22, 0x1c, 0x98, 0x16, 0x4a, 0x05, 0xd8, 0x33, 0x47, 0x88, 0x29, 0x57, 0xd6, 0xd9, 0xb7, 0xda, + 0x84, 0xc2, 0xc0, 0x0f, 0x2d, 0xee, 0x9c, 0x92, 0xce, 0x17, 0x6a, 0x0f, 0x9a, 0xa6, 0xeb, 0xfa, + 0x67, 0x06, 0x1a, 0x05, 0x24, 0x32, 0x26, 0x06, 0x4f, 0x2a, 0x21, 0xec, 0x0a, 0xc3, 0xf5, 0x29, + 0xea, 0xf9, 0x31, 0x43, 0xa8, 0x77, 0xa0, 0x89, 0x5f, 0x98, 0xa1, 0xed, 0x78, 0x43, 0xc3, 0xf2, + 0xdd, 0xf1, 0xc8, 0x33, 0x98, 0xa8, 0x15, 0x26, 0x4a, 0x95, 0xb8, 0x3d, 0x86, 0x3a, 0xa4, 0x82, + 0x3f, 0x9b, 0xe5, 0x60, 0x41, 0x2a, 0xb0, 0x20, 0xb5, 0x12, 0x1f, 0x48, 0x2b, 0x0e, 0x6c, 0xe6, + 0xf2, 0xcc, 0x5e, 0x2c, 0x68, 0x0f, 0xa1, 0x8a, 0x51, 0x38, 0x41, 0xb6, 0x31, 0x08, 0xfd, 0x11, + 0x6e, 0x15, 0x77, 0xf2, 0x9d, 0xca, 0xdd, 0xeb, 0xb3, 0x7b, 0x74, 0x8f, 0x19, 0xd9, 0x93, 0xd0, + 0x1f, 0xe9, 0x15, 0x1c, 0x7f, 0x63, 0xf5, 0x36, 0xac, 0x30, 0xe9, 0xab, 0x4c, 0xfa, 0xe6, 0x2c, + 0x27, 0x93, 0xcd, 0x68, 0xd4, 0x9b, 0x50, 0x3b, 0x35, 0x31, 0x32, 0x5e, 0x0a, 0x54, 0xab, 0xc4, + 0x8c, 0xac, 0x52, 0xa0, 0x24, 0x57, 0x3f, 0x86, 0x1a, 0xf6, 0xcc, 0x00, 0xbf, 0xf0, 0x09, 0x3b, + 0x3a, 0xad, 0x32, 0x8b, 0x6d, 0xb5, 0x2b, 0x0e, 0x24, 0x3d, 0x39, 0x7a, 0x55, 0x92, 0xd0, 0x95, + 0x76, 0x00, 0x9b, 0xd9, 0xb8, 0x89, 0xf4, 0xea, 0x41, 0x29, 0x16, 0xc6, 0x33, 0x6b, 0xbd, 0x9b, + 0xd4, 0x92, 0x98, 0x3c, 0x26, 0xd2, 0x7e, 0xaf, 0x80, 0xca, 0xf7, 0x3a, 0xa6, 0xde, 0x92, 0x09, + 0xd0, 0xce, 0xec, 0x53, 0x4e, 0x58, 0xd4, 0xeb, 0x00, 0xcc, 0xb3, 0x3c, 0x6e, 0x39, 0x86, 0x2d, + 0x33, 0xc8, 0xe1, 0x54, 0x9e, 0xe4, 0xd3, 0x79, 0x72, 0x0b, 0xea, 0x8e, 0x67, 0xb9, 0x63, 0x1b, + 0x19, 0x81, 0x19, 0xd2, 0x13, 0xbe, 0xc2, 0xd0, 0x35, 0x01, 0x3d, 0x62, 0x40, 0xed, 0x4f, 0x0a, + 0xac, 0x4f, 0xa9, 0x73, 0x49, 0xbb, 0xd4, 0x5d, 0x28, 0x30, 0x95, 0xe2, 0x93, 0x92, 0x50, 0xf3, + 0x9d, 0x39, 0x3a, 0x4e, 0x47, 0xc3, 0x74, 0x43, 0x64, 0xda, 0x91, 0x81, 0xce, 0x1d, 0x4c, 0xb0, + 0x50, 0x9e, 0xa7, 0xd0, 0x23, 0x8e, 0xea, 0x33, 0x8c, 0xf6, 0x33, 0xd8, 0xd8, 0x47, 0x2e, 0x9a, + 0x3d, 0x34, 0xcb, 0x7c, 0x76, 0x0d, 0xca, 0x21, 0xb2, 0xc6, 0x21, 0x76, 0x26, 0xf2, 0x00, 0x25, + 0x00, 0xad, 0x05, 0x9b, 0xd9, 0x2d, 0xb9, 0xdd, 0xda, 0xef, 0x14, 0x58, 0xe7, 0x28, 0xa6, 0x35, + 0x96, 0xb2, 0x3a, 0x50, 0x64, 0xaa, 0xf1, 0x1a, 0x3c, 0xcf, 0x3e, 0x81, 0x5f, 0x2e, 0x59, 0xdd, + 0x85, 0x06, 0x2d, 0xa9, 0x86, 0x33, 0x30, 0x68, 0x92, 0x3b, 0xde, 0x50, 0xc6, 0x85, 0x82, 0x0f, + 0x06, 0xc7, 0x1c, 0xa8, 0x6d, 0x42, 0x73, 0x5a, 0x0d, 0xa1, 0x5f, 0x24, 0xe1, 0xbc, 0xe4, 0xc4, + 0xfa, 0x7d, 0x0a, 0xf5, 0x74, 0x15, 0x46, 0x52, 0xcf, 0x05, 0x75, 0xb8, 0x96, 0xaa, 0xc3, 0x08, + 0xd3, 0x73, 0xc3, 0x8b, 0x4a, 0x10, 0x3a, 0x23, 0x33, 0x8c, 0x84, 0xde, 0x55, 0x06, 0x3c, 0xe2, + 0x30, 0x6d, 0x4b, 0xc6, 0x21, 0x16, 0x2d, 0x74, 0xfa, 0x43, 0x0e, 0xae, 0xf7, 0x47, 0x28, 0x1c, + 0x22, 0xcf, 0x8a, 0x74, 0xc4, 0xd3, 0xed, 0xc2, 0xd9, 0xdd, 0x4c, 0x27, 0x4e, 0x59, 0xa6, 0xc9, + 0x3d, 0xa8, 0x78, 0x28, 0xd1, 0x27, 0xbf, 0xac, 0xa9, 0x80, 0x87, 0xa4, 0x92, 0xea, 0x8f, 0xa1, + 0xe1, 0x0c, 0x3d, 0x5a, 0xee, 0x43, 0x14, 0xb8, 0x8e, 0x65, 0xe2, 0xd6, 0xca, 0x32, 0x47, 0xd4, + 0x39, 0xb5, 0x2e, 0x88, 0xd5, 0x7d, 0xd8, 0x38, 0x33, 0x1d, 0x12, 0x73, 0xc7, 0xcd, 0xb5, 0x10, + 0xa7, 0x35, 0x2b, 0x12, 0xfb, 0xe3, 0xd0, 0xa4, 0x6d, 0x56, 0x5f, 0xa7, 0xe4, 0x92, 0x5d, 0x36, + 0xdd, 0xbf, 0x2a, 0xb0, 0xbd, 0xc8, 0x23, 0xe2, 0x80, 0xbd, 0xbd, 0x4b, 0x1e, 0xc2, 0x5a, 0x10, + 0xfa, 0x23, 0x9f, 0x20, 0xfb, 0x62, 0x7e, 0x69, 0x48, 0x72, 0xe9, 0x9c, 0x5d, 0x28, 0xb2, 0x7e, + 0x2e, 0x7d, 0x92, 0xed, 0xf6, 0x02, 0xab, 0xf5, 0xe1, 0xca, 0x53, 0x44, 0x1e, 0x9b, 0xd6, 0xcb, + 0x71, 0x80, 0x2f, 0x1d, 0x43, 0x6d, 0x1f, 0xd4, 0xf4, 0x36, 0xc2, 0xf0, 0x2e, 0xac, 0x9e, 0x72, + 0x90, 0x48, 0xd1, 0x66, 0x37, 0x9e, 0xa8, 0x38, 0xed, 0x81, 0x37, 0xf0, 0x75, 0x49, 0xa4, 0x5d, + 0x85, 0xad, 0xa7, 0x88, 0xec, 0x21, 0xd7, 0xa5, 0x70, 0x5a, 0xf1, 0xa4, 0x4a, 0xda, 0x1d, 0x68, + 0xcd, 0xa2, 0x84, 0x98, 0x26, 0x14, 0x68, 0xb9, 0x94, 0x33, 0x13, 0x5f, 0x68, 0x1d, 0xa6, 0x92, + 0xe4, 0x48, 0x75, 0x5f, 0x0b, 0xb9, 0xae, 0xec, 0xbe, 0xf4, 0x5b, 0x7b, 0x02, 0xeb, 0x53, 0x94, + 0x71, 0x5d, 0x2c, 0x53, 0xb4, 0xe1, 0x78, 0x03, 0x5f, 0x14, 0x46, 0x35, 0xf1, 0x7e, 0x4c, 0x5e, + 0xb2, 0xc4, 0x17, 0x2d, 0x35, 0x62, 0x1f, 0x2c, 0x4e, 0x9b, 0xd4, 0xfe, 0x95, 0x12, 0x5b, 0x96, + 0xa0, 0x84, 0x98, 0x03, 0x58, 0x9d, 0x3e, 0xc7, 0xbd, 0x54, 0xbd, 0x59, 0xc0, 0xd4, 0x15, 0xeb, + 0xbe, 0x47, 0xc2, 0x48, 0x97, 0xfc, 0xed, 0x23, 0xa8, 0xa6, 0x11, 0xea, 0x1a, 0xe4, 0x5f, 0xa2, + 0x48, 0xd8, 0x4a, 0x3f, 0xd5, 0xdb, 0x50, 0x98, 0x98, 0xee, 0x18, 0x89, 0xd2, 0xdd, 0x9c, 0xb6, + 0x87, 0x8b, 0xd1, 0x39, 0xc9, 0x83, 0xdc, 0x7d, 0x45, 0xdb, 0x60, 0xae, 0x91, 0xa5, 0x33, 0xb6, + 0xe7, 0x00, 0x9a, 0xd3, 0x60, 0x61, 0xcb, 0xc7, 0x50, 0x96, 0x89, 0x22, 0xad, 0x99, 0xdb, 0x4b, + 0x12, 0x2a, 0xed, 0x0e, 0x0b, 0xd3, 0x5b, 0xd4, 0x7b, 0x11, 0xae, 0x77, 0x6f, 0xcf, 0xbf, 0xcd, + 0xc1, 0xda, 0x53, 0x44, 0xf8, 0xec, 0xf4, 0xee, 0x23, 0xee, 0x26, 0x14, 0xd9, 0x12, 0xb7, 0x72, + 0x2c, 0x0d, 0xc5, 0x8a, 0x76, 0x67, 0x74, 0xce, 0xbb, 0xb3, 0xc0, 0xe7, 0x19, 0xbe, 0x26, 0xa0, + 0x27, 0x9c, 0xec, 0x26, 0xc8, 0x76, 0x6d, 0x4c, 0x1c, 0x74, 0x86, 0x45, 0xaf, 0xa8, 0x0a, 0xe0, + 0x73, 0x0a, 0x53, 0x3b, 0xb0, 0xc6, 0xf6, 0x60, 0xe3, 0x01, 0x36, 0x7c, 0xcf, 0x8d, 0x58, 0xb5, + 0x2a, 0xe9, 0xbc, 0x25, 0xb0, 0x73, 0xf1, 0x53, 0xcf, 0x8d, 0x12, 0x4a, 0xec, 0x7c, 0x29, 0x29, + 0x8b, 0x29, 0xca, 0x63, 0x0a, 0xa6, 0x94, 0xda, 0x11, 0xab, 0x00, 0xd2, 0x0b, 0xc2, 0x99, 0x3f, + 0x84, 0xa2, 0x18, 0x36, 0xb9, 0x03, 0x6e, 0x76, 0x67, 0xaf, 0x3e, 0x9c, 0x65, 0x1f, 0x0d, 0x1c, + 0xcf, 0x61, 0xf5, 0x51, 0xb0, 0x68, 0x9f, 0x43, 0x83, 0xee, 0xf8, 0x7e, 0x66, 0x1e, 0xed, 0x01, + 0x8f, 0xd2, 0x54, 0x45, 0x8d, 0x27, 0x10, 0x65, 0xe9, 0x04, 0xa2, 0xdd, 0x66, 0x79, 0x7a, 0x1c, + 0x4e, 0x9e, 0x4f, 0x47, 0x79, 0x5e, 0x15, 0x38, 0x84, 0x8d, 0x0c, 0x6d, 0x7c, 0xad, 0xa8, 0xe2, + 0x70, 0x92, 0x8c, 0xdf, 0x71, 0x72, 0x89, 0x3b, 0x5e, 0x8a, 0x05, 0x70, 0xfc, 0xad, 0x7d, 0xce, + 0xf4, 0x16, 0x77, 0x87, 0x77, 0xcd, 0x2e, 0xed, 0x47, 0x2c, 0x4a, 0x72, 0x37, 0xa1, 0x59, 0x47, + 0xa4, 0xdc, 0xe2, 0x9b, 0x8e, 0xc0, 0x6b, 0xbf, 0x4c, 0xb1, 0x5f, 0xbe, 0xcc, 0x53, 0x28, 0xf5, + 0x95, 0x4c, 0x61, 0xbe, 0xd0, 0x1e, 0xb2, 0x23, 0x9c, 0x19, 0x15, 0xd4, 0xdb, 0xb0, 0xca, 0x85, + 0x27, 0x73, 0x54, 0x56, 0x3b, 0x49, 0xa0, 0xf5, 0x98, 0x7a, 0x99, 0x20, 0x2d, 0xab, 0x01, 0x8f, + 0x99, 0xc8, 0x6c, 0xa4, 0xbe, 0x0f, 0xa5, 0x4c, 0x94, 0xae, 0xc4, 0x51, 0x8a, 0x0b, 0xc0, 0xea, + 0x44, 0x04, 0xe8, 0xbf, 0x0a, 0x6c, 0x1d, 0x78, 0x0e, 0x4f, 0x2d, 0xd1, 0x37, 0x2f, 0xef, 0x1a, + 0x1d, 0xda, 0xa2, 0x53, 0x1b, 0xc8, 0x45, 0x16, 0x31, 0xa6, 0x02, 0xbd, 0xb4, 0x79, 0x6f, 0x09, + 0xc6, 0x3e, 0xe5, 0x4b, 0x21, 0x92, 0x71, 0x7f, 0x25, 0x3d, 0xee, 0xbf, 0x9f, 0xb9, 0xe5, 0x31, + 0xb4, 0x66, 0x8d, 0x8f, 0x8f, 0x97, 0x1c, 0x1e, 0x94, 0xa5, 0xc3, 0xc3, 0x57, 0x39, 0xf8, 0xe0, + 0xc8, 0x35, 0x3d, 0x0f, 0xd9, 0xff, 0xe7, 0x59, 0xf0, 0x01, 0xd4, 0xcc, 0x89, 0xef, 0x24, 0xd3, + 0xd2, 0xca, 0x32, 0xce, 0x2a, 0xa3, 0x95, 0xbc, 0xef, 0xc7, 0x9f, 0x7f, 0x51, 0xe0, 0xda, 0x7c, + 0x5f, 0x7c, 0x03, 0xa6, 0xc0, 0xdf, 0xc0, 0x55, 0x1d, 0x8d, 0xfc, 0x49, 0x7c, 0x49, 0xa2, 0xd3, + 0xc0, 0x45, 0xa2, 0x28, 0x0b, 0x69, 0x2e, 0x29, 0xa4, 0x0b, 0x2e, 0xa9, 0x53, 0x77, 0xa5, 0x95, + 0xec, 0x2d, 0xed, 0x1a, 0xb4, 0xe7, 0x29, 0x20, 0x6e, 0x1d, 0x5f, 0x2b, 0xb0, 0xc9, 0xd1, 0xcc, + 0xa5, 0x17, 0x55, 0xee, 0x0d, 0x97, 0x69, 0xa9, 0x7b, 0x7e, 0x9e, 0xee, 0x2b, 0x0b, 0x75, 0x2f, + 0x64, 0x75, 0xbf, 0x0a, 0x5b, 0x33, 0xca, 0x09, 0xc5, 0x9f, 0xc0, 0x86, 0x4c, 0x86, 0xe9, 0x46, + 0xf0, 0x51, 0xa6, 0x72, 0x2f, 0x08, 0xa8, 0x2c, 0xdf, 0xbf, 0xa6, 0xf6, 0x4f, 0xef, 0x73, 0xe9, + 0xac, 0xea, 0xc1, 0xea, 0x85, 0x92, 0x49, 0x52, 0x69, 0x3a, 0xdc, 0xe0, 0xf0, 0xfe, 0x39, 0x41, + 0xa1, 0x67, 0xba, 0x6e, 0x7c, 0xcf, 0x41, 0xf6, 0x25, 0x0d, 0xfa, 0xbb, 0x02, 0xda, 0xb2, 0x4d, + 0x2f, 0x6d, 0xdd, 0x65, 0x0b, 0xc8, 0x3d, 0xa8, 0xf8, 0xee, 0x05, 0xcb, 0x07, 0xf8, 0xae, 0x3c, + 0x61, 0xda, 0x21, 0x94, 0x9e, 0xa5, 0x0e, 0xc3, 0xcc, 0xcb, 0x5e, 0x37, 0x65, 0x41, 0x2e, 0x7b, + 0x87, 0x98, 0x33, 0x94, 0x7e, 0x0a, 0xdb, 0x4f, 0x1c, 0xcf, 0x7e, 0xe4, 0xba, 0xfc, 0x35, 0xe0, + 0xc0, 0x7b, 0x9b, 0xd1, 0xf8, 0x6f, 0x0a, 0x7c, 0x7b, 0x21, 0xbb, 0xf0, 0xe9, 0x61, 0xe6, 0x79, + 0xe3, 0x5e, 0x6a, 0x78, 0x7a, 0x03, 0x2f, 0x1f, 0xae, 0xc4, 0xad, 0x43, 0xec, 0xd2, 0x7e, 0x06, + 0x95, 0x14, 0x78, 0xce, 0x9d, 0x63, 0x77, 0xfa, 0xce, 0x31, 0x67, 0x58, 0x4b, 0xee, 0x1b, 0xbf, + 0x82, 0x02, 0x83, 0xbd, 0xa9, 0xe8, 0xa4, 0x4e, 0x34, 0xf7, 0xf3, 0x2d, 0x99, 0x0d, 0x3c, 0xe2, + 0x8d, 0xc4, 0xc9, 0x53, 0x03, 0xe1, 0x57, 0x0a, 0xb4, 0x58, 0x28, 0x7f, 0x62, 0x12, 0x14, 0x3a, + 0xa6, 0xeb, 0x7c, 0x89, 0x8e, 0x11, 0x21, 0x8e, 0x37, 0xc4, 0xea, 0x0d, 0x3a, 0x9d, 0x85, 0x43, + 0x24, 0x7a, 0xb7, 0x90, 0x5b, 0xe1, 0x30, 0xc6, 0xa5, 0x7e, 0x0f, 0xae, 0x60, 0x7f, 0x1c, 0x5a, + 0xc8, 0x40, 0xe7, 0x41, 0x88, 0x30, 0x76, 0x7c, 0x4f, 0xe8, 0xb1, 0xc6, 0x11, 0xfd, 0x18, 0x4e, + 0xeb, 0x8f, 0xc5, 0xde, 0xdb, 0x0c, 0xdb, 0x96, 0x65, 0xa6, 0xcc, 0x21, 0xfb, 0xb6, 0xab, 0xfd, + 0x2b, 0x07, 0xeb, 0xf3, 0xd4, 0x68, 0x43, 0xe9, 0xcc, 0x0f, 0x5f, 0x0e, 0x5c, 0xff, 0x4c, 0x9a, + 0x2e, 0xd7, 0xea, 0x87, 0xd0, 0x10, 0xf2, 0xa7, 0xb2, 0xaa, 0xac, 0xd7, 0x39, 0x38, 0xce, 0xc5, + 0x0f, 0xa1, 0x21, 0x6c, 0x89, 0x09, 0xb9, 0x02, 0x75, 0x0e, 0x7e, 0x96, 0x3c, 0xe6, 0x35, 0x30, + 0xf1, 0x03, 0x83, 0x3f, 0x81, 0x5b, 0x7e, 0x10, 0xc9, 0x57, 0x2a, 0x0a, 0x7e, 0x44, 0xa1, 0x7b, + 0x7e, 0x10, 0xa9, 0x9f, 0x89, 0x57, 0x27, 0x03, 0x0b, 0x3d, 0x5b, 0x05, 0x96, 0x3e, 0x37, 0x53, + 0xe1, 0x5c, 0xe4, 0x59, 0xf1, 0x06, 0x15, 0x5b, 0x28, 0x2b, 0x6f, 0x31, 0x55, 0x79, 0x6f, 0xc4, + 0xa3, 0x31, 0x89, 0x02, 0x84, 0xd9, 0x1b, 0x70, 0x59, 0xce, 0xc0, 0x27, 0x14, 0xa4, 0x7e, 0x17, + 0xd6, 0x90, 0xa8, 0x16, 0x86, 0xe5, 0x8e, 0x31, 0x41, 0xa1, 0x78, 0xf5, 0x6d, 0x48, 0xf8, 0x1e, + 0x07, 0x3f, 0xbe, 0xff, 0x8f, 0xd7, 0xdb, 0xca, 0x3f, 0x5f, 0x6f, 0x2b, 0xff, 0x7e, 0xbd, 0xad, + 0xfc, 0xf1, 0x3f, 0xdb, 0xdf, 0xfa, 0xc5, 0xee, 0xc4, 0x21, 0x08, 0xe3, 0xae, 0xe3, 0xf7, 0xf8, + 0x57, 0x6f, 0xe8, 0xf7, 0x26, 0xa4, 0xc7, 0xfe, 0x93, 0xe9, 0xc5, 0x36, 0x9c, 0x16, 0x19, 0xe0, + 0x93, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x55, 0x21, 0xc8, 0x27, 0x1a, 0x00, 0x00, } func (m *ExecuteVtctlCommandRequest) Marshal() (dAtA []byte, err error) { @@ -6286,6 +6297,13 @@ func (m *MaterializeSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarintVtctldata(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x42 + } if len(m.TabletTypes) > 0 { i -= len(m.TabletTypes) copy(dAtA[i:], m.TabletTypes) @@ -7563,6 +7581,10 @@ func (m *MaterializeSettings) Size() (n int) { if l > 0 { n += 1 + l + sovVtctldata(uint64(l)) } + l = len(m.ExternalCluster) + if l > 0 { + n += 1 + l + sovVtctldata(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -14642,6 +14664,38 @@ func (m *MaterializeSettings) Unmarshal(dAtA []byte) error { } m.TabletTypes = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVtctldata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVtctldata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVtctldata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipVtctldata(dAtA[iNdEx:]) diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index d34548a5513..43e239c97e7 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -1466,7 +1466,7 @@ type ( func (*StarExpr) iSelectExpr() {} func (*AliasedExpr) iSelectExpr() {} -func (Nextval) iSelectExpr() {} +func (*Nextval) iSelectExpr() {} // Columns represents an insert column list. type Columns []ColIdent @@ -1579,7 +1579,6 @@ type ( Expr interface { iExpr() SQLNode - Clone() Expr } // AndExpr represents an AND expression. @@ -2639,7 +2638,7 @@ func (node *AliasedExpr) Format(buf *TrackedBuffer) { } // Format formats the node. -func (node Nextval) Format(buf *TrackedBuffer) { +func (node *Nextval) Format(buf *TrackedBuffer) { buf.astPrintf(node, "next %v values", node.Expr) } diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index 07fea6767d5..bd5773d438a 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -56,7 +56,10 @@ func Walk(visit Visit, nodes ...SQLNode) error { return err == nil // now we can abort the traversal if an error was found } - Rewrite(node, pre, post) + _, rewriterErr := Rewrite(node, pre, post) + if rewriterErr != nil { + return rewriterErr + } if err != nil { return err } @@ -391,10 +394,15 @@ func NewWhere(typ WhereType, expr Expr) *Where { // and replaces it with to. If from matches root, // then to is returned. func ReplaceExpr(root, from, to Expr) Expr { - tmp := Rewrite(root, replaceExpr(from, to), nil) + tmp, err := Rewrite(root, replaceExpr(from, to), nil) + if err != nil { + log.Errorf("Failed to rewrite expression. Rewriter returned an error: %s", err.Error()) + return from + } + expr, success := tmp.(Expr) if !success { - log.Errorf("Failed to rewrite expression. Rewriter returned a non-expression: " + String(tmp)) + log.Errorf("Failed to rewrite expression. Rewriter returned a non-expression: %s", String(tmp)) return from } @@ -1330,315 +1338,3 @@ const ( // DoubleAt represnts @@ DoubleAt ) - -func nilOrClone(in Expr) Expr { - if in == nil { - return nil - } - return in.Clone() -} - -// Clone implements the Expr interface -func (node *Subquery) Clone() Expr { - if node == nil { - return nil - } - panic("Subquery cloning not supported") -} - -// Clone implements the Expr interface -func (node *AndExpr) Clone() Expr { - if node == nil { - return nil - } - return &AndExpr{ - Left: nilOrClone(node.Left), - Right: nilOrClone(node.Right), - } -} - -// Clone implements the Expr interface -func (node *OrExpr) Clone() Expr { - if node == nil { - return nil - } - return &OrExpr{ - Left: nilOrClone(node.Left), - Right: nilOrClone(node.Right), - } -} - -// Clone implements the Expr interface -func (node *XorExpr) Clone() Expr { - if node == nil { - return nil - } - return &XorExpr{ - Left: nilOrClone(node.Left), - Right: nilOrClone(node.Right), - } -} - -// Clone implements the Expr interface -func (node *NotExpr) Clone() Expr { - if node == nil { - return nil - } - return &NotExpr{ - Expr: nilOrClone(node), - } -} - -// Clone implements the Expr interface -func (node *ComparisonExpr) Clone() Expr { - if node == nil { - return nil - } - return &ComparisonExpr{ - Operator: node.Operator, - Left: nilOrClone(node.Left), - Right: nilOrClone(node.Right), - Escape: nilOrClone(node.Escape), - } -} - -// Clone implements the Expr interface -func (node *RangeCond) Clone() Expr { - if node == nil { - return nil - } - return &RangeCond{ - Operator: node.Operator, - Left: nilOrClone(node.Left), - From: nilOrClone(node.From), - To: nilOrClone(node.To), - } -} - -// Clone implements the Expr interface -func (node *IsExpr) Clone() Expr { - if node == nil { - return nil - } - return &IsExpr{ - Operator: node.Operator, - Expr: nilOrClone(node.Expr), - } -} - -// Clone implements the Expr interface -func (node *ExistsExpr) Clone() Expr { - if node == nil { - return nil - } - return &ExistsExpr{ - Subquery: nilOrClone(node.Subquery).(*Subquery), - } -} - -// Clone implements the Expr interface -func (node *Literal) Clone() Expr { - if node == nil { - return nil - } - return &Literal{} -} - -// Clone implements the Expr interface -func (node Argument) Clone() Expr { - if node == nil { - return nil - } - cpy := make(Argument, len(node)) - copy(cpy, node) - return cpy -} - -// Clone implements the Expr interface -func (node *NullVal) Clone() Expr { - if node == nil { - return nil - } - return &NullVal{} -} - -// Clone implements the Expr interface -func (node BoolVal) Clone() Expr { - return node -} - -// Clone implements the Expr interface -func (node *ColName) Clone() Expr { - return node -} - -// Clone implements the Expr interface -func (node ValTuple) Clone() Expr { - if node == nil { - return nil - } - cpy := make(ValTuple, len(node)) - copy(cpy, node) - return cpy -} - -// Clone implements the Expr interface -func (node ListArg) Clone() Expr { - if node == nil { - return nil - } - cpy := make(ListArg, len(node)) - copy(cpy, node) - return cpy -} - -// Clone implements the Expr interface -func (node *BinaryExpr) Clone() Expr { - if node == nil { - return nil - } - return &BinaryExpr{ - Operator: node.Operator, - Left: nilOrClone(node.Left), - Right: nilOrClone(node.Right), - } -} - -// Clone implements the Expr interface -func (node *UnaryExpr) Clone() Expr { - if node == nil { - return nil - } - return &UnaryExpr{ - Operator: node.Operator, - Expr: nilOrClone(node.Expr), - } -} - -// Clone implements the Expr interface -func (node *IntervalExpr) Clone() Expr { - if node == nil { - return nil - } - return &IntervalExpr{ - Expr: nilOrClone(node.Expr), - Unit: node.Unit, - } -} - -// Clone implements the Expr interface -func (node *CollateExpr) Clone() Expr { - if node == nil { - return nil - } - return &CollateExpr{ - Expr: nilOrClone(node.Expr), - Charset: node.Charset, - } -} - -// Clone implements the Expr interface -func (node *FuncExpr) Clone() Expr { - if node == nil { - return nil - } - panic("FuncExpr cloning not supported") -} - -// Clone implements the Expr interface -func (node *TimestampFuncExpr) Clone() Expr { - if node == nil { - return nil - } - return &TimestampFuncExpr{ - Name: node.Name, - Expr1: nilOrClone(node.Expr1), - Expr2: nilOrClone(node.Expr2), - Unit: node.Unit, - } -} - -// Clone implements the Expr interface -func (node *CurTimeFuncExpr) Clone() Expr { - if node == nil { - return nil - } - return &CurTimeFuncExpr{ - Name: node.Name, - Fsp: nilOrClone(node.Fsp), - } -} - -// Clone implements the Expr interface -func (node *CaseExpr) Clone() Expr { - if node == nil { - return nil - } - panic("CaseExpr cloning not supported") -} - -// Clone implements the Expr interface -func (node *ValuesFuncExpr) Clone() Expr { - if node == nil { - return nil - } - return &ValuesFuncExpr{ - Name: nilOrClone(node.Name).(*ColName), - } -} - -// Clone implements the Expr interface -func (node *ConvertExpr) Clone() Expr { - if node == nil { - return nil - } - panic("ConvertExpr cloning not supported") -} - -// Clone implements the Expr interface -func (node *SubstrExpr) Clone() Expr { - if node == nil { - return nil - } - return &SubstrExpr{ - Name: node.Name, - StrVal: nilOrClone(node.StrVal).(*Literal), - From: nilOrClone(node.From), - To: nilOrClone(node.To), - } -} - -// Clone implements the Expr interface -func (node *ConvertUsingExpr) Clone() Expr { - if node == nil { - return nil - } - return &ConvertUsingExpr{ - Expr: nilOrClone(node.Expr), - Type: node.Type, - } -} - -// Clone implements the Expr interface -func (node *MatchExpr) Clone() Expr { - if node == nil { - return nil - } - panic("MatchExpr cloning not supported") -} - -// Clone implements the Expr interface -func (node *GroupConcatExpr) Clone() Expr { - if node == nil { - return nil - } - panic("GroupConcatExpr cloning not supported") -} - -// Clone implements the Expr interface -func (node *Default) Clone() Expr { - if node == nil { - return nil - } - return &Default{ColName: node.ColName} -} diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go index 2494a39527f..6b075f731ab 100644 --- a/go/vt/sqlparser/ast_rewriting.go +++ b/go/vt/sqlparser/ast_rewriting.go @@ -35,7 +35,10 @@ type RewriteASTResult struct { // PrepareAST will normalize the query func PrepareAST(in Statement, bindVars map[string]*querypb.BindVariable, prefix string, parameterize bool, keyspace string) (*RewriteASTResult, error) { if parameterize { - Normalize(in, bindVars, prefix) + err := Normalize(in, bindVars, prefix) + if err != nil { + return nil, err + } } return RewriteAST(in, keyspace) } @@ -45,7 +48,11 @@ func RewriteAST(in Statement, keyspace string) (*RewriteASTResult, error) { er := newExpressionRewriter(keyspace) er.shouldRewriteDatabaseFunc = shouldRewriteDatabaseFunc(in) setRewriter := &setNormalizer{} - out, ok := Rewrite(in, er.rewrite, setRewriter.rewriteSetComingUp).(Statement) + result, err := Rewrite(in, er.rewrite, setRewriter.rewriteSetComingUp) + if err != nil { + return nil, err + } + out, ok := result.(Statement) if !ok { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "statement rewriting returned a non statement: %s", String(out)) } @@ -114,7 +121,10 @@ const ( func (er *expressionRewriter) rewriteAliasedExpr(node *AliasedExpr) (*BindVarNeeds, error) { inner := newExpressionRewriter(er.keyspace) inner.shouldRewriteDatabaseFunc = er.shouldRewriteDatabaseFunc - tmp := Rewrite(node.Expr, inner.rewrite, nil) + tmp, err := Rewrite(node.Expr, inner.rewrite, nil) + if err != nil { + return nil, err + } newExpr, ok := tmp.(Expr) if !ok { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to rewrite AST. function expected to return Expr returned a %s", String(tmp)) @@ -312,7 +322,11 @@ func (er *expressionRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquer er.bindVars.NoteRewrite() // we need to make sure that the inner expression also gets rewritten, // so we fire off another rewriter traversal here - rewrittenExpr := Rewrite(expr.Expr, er.rewrite, nil) + rewrittenExpr, err := Rewrite(expr.Expr, er.rewrite, nil) + if err != nil { + er.err = err + return + } cursor.Replace(rewrittenExpr) } diff --git a/go/vt/sqlparser/clone.go b/go/vt/sqlparser/clone.go new file mode 100644 index 00000000000..7999702feb7 --- /dev/null +++ b/go/vt/sqlparser/clone.go @@ -0,0 +1,2487 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. + +package sqlparser + +// CloneAlterOption creates a deep clone of the input. +func CloneAlterOption(in AlterOption) AlterOption { + if in == nil { + return nil + } + switch in := in.(type) { + case *AddColumns: + return CloneRefOfAddColumns(in) + case *AddConstraintDefinition: + return CloneRefOfAddConstraintDefinition(in) + case *AddIndexDefinition: + return CloneRefOfAddIndexDefinition(in) + case AlgorithmValue: + return in + case *AlterCharset: + return CloneRefOfAlterCharset(in) + case *AlterColumn: + return CloneRefOfAlterColumn(in) + case *ChangeColumn: + return CloneRefOfChangeColumn(in) + case *DropColumn: + return CloneRefOfDropColumn(in) + case *DropKey: + return CloneRefOfDropKey(in) + case *Force: + return CloneRefOfForce(in) + case *KeyState: + return CloneRefOfKeyState(in) + case *LockOption: + return CloneRefOfLockOption(in) + case *ModifyColumn: + return CloneRefOfModifyColumn(in) + case *OrderByOption: + return CloneRefOfOrderByOption(in) + case *RenameIndex: + return CloneRefOfRenameIndex(in) + case *RenameTableName: + return CloneRefOfRenameTableName(in) + case TableOptions: + return CloneTableOptions(in) + case *TablespaceOperation: + return CloneRefOfTablespaceOperation(in) + case *Validation: + return CloneRefOfValidation(in) + default: + // this should never happen + return nil + } +} + +// CloneCharacteristic creates a deep clone of the input. +func CloneCharacteristic(in Characteristic) Characteristic { + if in == nil { + return nil + } + switch in := in.(type) { + case AccessMode: + return in + case IsolationLevel: + return in + default: + // this should never happen + return nil + } +} + +// CloneColTuple creates a deep clone of the input. +func CloneColTuple(in ColTuple) ColTuple { + if in == nil { + return nil + } + switch in := in.(type) { + case ListArg: + return CloneListArg(in) + case *Subquery: + return CloneRefOfSubquery(in) + case ValTuple: + return CloneValTuple(in) + default: + // this should never happen + return nil + } +} + +// CloneConstraintInfo creates a deep clone of the input. +func CloneConstraintInfo(in ConstraintInfo) ConstraintInfo { + if in == nil { + return nil + } + switch in := in.(type) { + case *CheckConstraintDefinition: + return CloneRefOfCheckConstraintDefinition(in) + case *ForeignKeyDefinition: + return CloneRefOfForeignKeyDefinition(in) + default: + // this should never happen + return nil + } +} + +// CloneDBDDLStatement creates a deep clone of the input. +func CloneDBDDLStatement(in DBDDLStatement) DBDDLStatement { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterDatabase: + return CloneRefOfAlterDatabase(in) + case *CreateDatabase: + return CloneRefOfCreateDatabase(in) + case *DropDatabase: + return CloneRefOfDropDatabase(in) + default: + // this should never happen + return nil + } +} + +// CloneDDLStatement creates a deep clone of the input. +func CloneDDLStatement(in DDLStatement) DDLStatement { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterTable: + return CloneRefOfAlterTable(in) + case *AlterView: + return CloneRefOfAlterView(in) + case *CreateTable: + return CloneRefOfCreateTable(in) + case *CreateView: + return CloneRefOfCreateView(in) + case *DropTable: + return CloneRefOfDropTable(in) + case *DropView: + return CloneRefOfDropView(in) + case *RenameTable: + return CloneRefOfRenameTable(in) + case *TruncateTable: + return CloneRefOfTruncateTable(in) + default: + // this should never happen + return nil + } +} + +// CloneExplain creates a deep clone of the input. +func CloneExplain(in Explain) Explain { + if in == nil { + return nil + } + switch in := in.(type) { + case *ExplainStmt: + return CloneRefOfExplainStmt(in) + case *ExplainTab: + return CloneRefOfExplainTab(in) + default: + // this should never happen + return nil + } +} + +// CloneExpr creates a deep clone of the input. +func CloneExpr(in Expr) Expr { + if in == nil { + return nil + } + switch in := in.(type) { + case *AndExpr: + return CloneRefOfAndExpr(in) + case Argument: + return CloneArgument(in) + case *BinaryExpr: + return CloneRefOfBinaryExpr(in) + case BoolVal: + return in + case *CaseExpr: + return CloneRefOfCaseExpr(in) + case *ColName: + return CloneRefOfColName(in) + case *CollateExpr: + return CloneRefOfCollateExpr(in) + case *ComparisonExpr: + return CloneRefOfComparisonExpr(in) + case *ConvertExpr: + return CloneRefOfConvertExpr(in) + case *ConvertUsingExpr: + return CloneRefOfConvertUsingExpr(in) + case *CurTimeFuncExpr: + return CloneRefOfCurTimeFuncExpr(in) + case *Default: + return CloneRefOfDefault(in) + case *ExistsExpr: + return CloneRefOfExistsExpr(in) + case *FuncExpr: + return CloneRefOfFuncExpr(in) + case *GroupConcatExpr: + return CloneRefOfGroupConcatExpr(in) + case *IntervalExpr: + return CloneRefOfIntervalExpr(in) + case *IsExpr: + return CloneRefOfIsExpr(in) + case ListArg: + return CloneListArg(in) + case *Literal: + return CloneRefOfLiteral(in) + case *MatchExpr: + return CloneRefOfMatchExpr(in) + case *NotExpr: + return CloneRefOfNotExpr(in) + case *NullVal: + return CloneRefOfNullVal(in) + case *OrExpr: + return CloneRefOfOrExpr(in) + case *RangeCond: + return CloneRefOfRangeCond(in) + case *Subquery: + return CloneRefOfSubquery(in) + case *SubstrExpr: + return CloneRefOfSubstrExpr(in) + case *TimestampFuncExpr: + return CloneRefOfTimestampFuncExpr(in) + case *UnaryExpr: + return CloneRefOfUnaryExpr(in) + case ValTuple: + return CloneValTuple(in) + case *ValuesFuncExpr: + return CloneRefOfValuesFuncExpr(in) + case *XorExpr: + return CloneRefOfXorExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneInsertRows creates a deep clone of the input. +func CloneInsertRows(in InsertRows) InsertRows { + if in == nil { + return nil + } + switch in := in.(type) { + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *Select: + return CloneRefOfSelect(in) + case *Union: + return CloneRefOfUnion(in) + case Values: + return CloneValues(in) + default: + // this should never happen + return nil + } +} + +// CloneSQLNode creates a deep clone of the input. +func CloneSQLNode(in SQLNode) SQLNode { + if in == nil { + return nil + } + switch in := in.(type) { + case AccessMode: + return in + case *AddColumns: + return CloneRefOfAddColumns(in) + case *AddConstraintDefinition: + return CloneRefOfAddConstraintDefinition(in) + case *AddIndexDefinition: + return CloneRefOfAddIndexDefinition(in) + case AlgorithmValue: + return in + case *AliasedExpr: + return CloneRefOfAliasedExpr(in) + case *AliasedTableExpr: + return CloneRefOfAliasedTableExpr(in) + case *AlterCharset: + return CloneRefOfAlterCharset(in) + case *AlterColumn: + return CloneRefOfAlterColumn(in) + case *AlterDatabase: + return CloneRefOfAlterDatabase(in) + case *AlterTable: + return CloneRefOfAlterTable(in) + case *AlterView: + return CloneRefOfAlterView(in) + case *AlterVschema: + return CloneRefOfAlterVschema(in) + case *AndExpr: + return CloneRefOfAndExpr(in) + case Argument: + return CloneArgument(in) + case *AutoIncSpec: + return CloneRefOfAutoIncSpec(in) + case *Begin: + return CloneRefOfBegin(in) + case *BinaryExpr: + return CloneRefOfBinaryExpr(in) + case BoolVal: + return in + case *CallProc: + return CloneRefOfCallProc(in) + case *CaseExpr: + return CloneRefOfCaseExpr(in) + case *ChangeColumn: + return CloneRefOfChangeColumn(in) + case *CheckConstraintDefinition: + return CloneRefOfCheckConstraintDefinition(in) + case ColIdent: + return CloneColIdent(in) + case *ColName: + return CloneRefOfColName(in) + case *CollateExpr: + return CloneRefOfCollateExpr(in) + case *ColumnDefinition: + return CloneRefOfColumnDefinition(in) + case *ColumnType: + return CloneRefOfColumnType(in) + case Columns: + return CloneColumns(in) + case Comments: + return CloneComments(in) + case *Commit: + return CloneRefOfCommit(in) + case *ComparisonExpr: + return CloneRefOfComparisonExpr(in) + case *ConstraintDefinition: + return CloneRefOfConstraintDefinition(in) + case *ConvertExpr: + return CloneRefOfConvertExpr(in) + case *ConvertType: + return CloneRefOfConvertType(in) + case *ConvertUsingExpr: + return CloneRefOfConvertUsingExpr(in) + case *CreateDatabase: + return CloneRefOfCreateDatabase(in) + case *CreateTable: + return CloneRefOfCreateTable(in) + case *CreateView: + return CloneRefOfCreateView(in) + case *CurTimeFuncExpr: + return CloneRefOfCurTimeFuncExpr(in) + case *Default: + return CloneRefOfDefault(in) + case *Delete: + return CloneRefOfDelete(in) + case *DerivedTable: + return CloneRefOfDerivedTable(in) + case *DropColumn: + return CloneRefOfDropColumn(in) + case *DropDatabase: + return CloneRefOfDropDatabase(in) + case *DropKey: + return CloneRefOfDropKey(in) + case *DropTable: + return CloneRefOfDropTable(in) + case *DropView: + return CloneRefOfDropView(in) + case *ExistsExpr: + return CloneRefOfExistsExpr(in) + case *ExplainStmt: + return CloneRefOfExplainStmt(in) + case *ExplainTab: + return CloneRefOfExplainTab(in) + case Exprs: + return CloneExprs(in) + case *Flush: + return CloneRefOfFlush(in) + case *Force: + return CloneRefOfForce(in) + case *ForeignKeyDefinition: + return CloneRefOfForeignKeyDefinition(in) + case *FuncExpr: + return CloneRefOfFuncExpr(in) + case GroupBy: + return CloneGroupBy(in) + case *GroupConcatExpr: + return CloneRefOfGroupConcatExpr(in) + case *IndexDefinition: + return CloneRefOfIndexDefinition(in) + case *IndexHints: + return CloneRefOfIndexHints(in) + case *IndexInfo: + return CloneRefOfIndexInfo(in) + case *Insert: + return CloneRefOfInsert(in) + case *IntervalExpr: + return CloneRefOfIntervalExpr(in) + case *IsExpr: + return CloneRefOfIsExpr(in) + case IsolationLevel: + return in + case JoinCondition: + return CloneJoinCondition(in) + case *JoinTableExpr: + return CloneRefOfJoinTableExpr(in) + case *KeyState: + return CloneRefOfKeyState(in) + case *Limit: + return CloneRefOfLimit(in) + case ListArg: + return CloneListArg(in) + case *Literal: + return CloneRefOfLiteral(in) + case *Load: + return CloneRefOfLoad(in) + case *LockOption: + return CloneRefOfLockOption(in) + case *LockTables: + return CloneRefOfLockTables(in) + case *MatchExpr: + return CloneRefOfMatchExpr(in) + case *ModifyColumn: + return CloneRefOfModifyColumn(in) + case *Nextval: + return CloneRefOfNextval(in) + case *NotExpr: + return CloneRefOfNotExpr(in) + case *NullVal: + return CloneRefOfNullVal(in) + case OnDup: + return CloneOnDup(in) + case *OptLike: + return CloneRefOfOptLike(in) + case *OrExpr: + return CloneRefOfOrExpr(in) + case *Order: + return CloneRefOfOrder(in) + case OrderBy: + return CloneOrderBy(in) + case *OrderByOption: + return CloneRefOfOrderByOption(in) + case *OtherAdmin: + return CloneRefOfOtherAdmin(in) + case *OtherRead: + return CloneRefOfOtherRead(in) + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *ParenTableExpr: + return CloneRefOfParenTableExpr(in) + case *PartitionDefinition: + return CloneRefOfPartitionDefinition(in) + case *PartitionSpec: + return CloneRefOfPartitionSpec(in) + case Partitions: + return ClonePartitions(in) + case *RangeCond: + return CloneRefOfRangeCond(in) + case ReferenceAction: + return in + case *Release: + return CloneRefOfRelease(in) + case *RenameIndex: + return CloneRefOfRenameIndex(in) + case *RenameTable: + return CloneRefOfRenameTable(in) + case *RenameTableName: + return CloneRefOfRenameTableName(in) + case *Rollback: + return CloneRefOfRollback(in) + case *SRollback: + return CloneRefOfSRollback(in) + case *Savepoint: + return CloneRefOfSavepoint(in) + case *Select: + return CloneRefOfSelect(in) + case SelectExprs: + return CloneSelectExprs(in) + case *SelectInto: + return CloneRefOfSelectInto(in) + case *Set: + return CloneRefOfSet(in) + case *SetExpr: + return CloneRefOfSetExpr(in) + case SetExprs: + return CloneSetExprs(in) + case *SetTransaction: + return CloneRefOfSetTransaction(in) + case *Show: + return CloneRefOfShow(in) + case *ShowBasic: + return CloneRefOfShowBasic(in) + case *ShowCreate: + return CloneRefOfShowCreate(in) + case *ShowFilter: + return CloneRefOfShowFilter(in) + case *ShowLegacy: + return CloneRefOfShowLegacy(in) + case *StarExpr: + return CloneRefOfStarExpr(in) + case *Stream: + return CloneRefOfStream(in) + case *Subquery: + return CloneRefOfSubquery(in) + case *SubstrExpr: + return CloneRefOfSubstrExpr(in) + case TableExprs: + return CloneTableExprs(in) + case TableIdent: + return CloneTableIdent(in) + case TableName: + return CloneTableName(in) + case TableNames: + return CloneTableNames(in) + case TableOptions: + return CloneTableOptions(in) + case *TableSpec: + return CloneRefOfTableSpec(in) + case *TablespaceOperation: + return CloneRefOfTablespaceOperation(in) + case *TimestampFuncExpr: + return CloneRefOfTimestampFuncExpr(in) + case *TruncateTable: + return CloneRefOfTruncateTable(in) + case *UnaryExpr: + return CloneRefOfUnaryExpr(in) + case *Union: + return CloneRefOfUnion(in) + case *UnionSelect: + return CloneRefOfUnionSelect(in) + case *UnlockTables: + return CloneRefOfUnlockTables(in) + case *Update: + return CloneRefOfUpdate(in) + case *UpdateExpr: + return CloneRefOfUpdateExpr(in) + case UpdateExprs: + return CloneUpdateExprs(in) + case *Use: + return CloneRefOfUse(in) + case *VStream: + return CloneRefOfVStream(in) + case ValTuple: + return CloneValTuple(in) + case *Validation: + return CloneRefOfValidation(in) + case Values: + return CloneValues(in) + case *ValuesFuncExpr: + return CloneRefOfValuesFuncExpr(in) + case VindexParam: + return CloneVindexParam(in) + case *VindexSpec: + return CloneRefOfVindexSpec(in) + case *When: + return CloneRefOfWhen(in) + case *Where: + return CloneRefOfWhere(in) + case *XorExpr: + return CloneRefOfXorExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneSelectExpr creates a deep clone of the input. +func CloneSelectExpr(in SelectExpr) SelectExpr { + if in == nil { + return nil + } + switch in := in.(type) { + case *AliasedExpr: + return CloneRefOfAliasedExpr(in) + case *Nextval: + return CloneRefOfNextval(in) + case *StarExpr: + return CloneRefOfStarExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneSelectStatement creates a deep clone of the input. +func CloneSelectStatement(in SelectStatement) SelectStatement { + if in == nil { + return nil + } + switch in := in.(type) { + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *Select: + return CloneRefOfSelect(in) + case *Union: + return CloneRefOfUnion(in) + default: + // this should never happen + return nil + } +} + +// CloneShowInternal creates a deep clone of the input. +func CloneShowInternal(in ShowInternal) ShowInternal { + if in == nil { + return nil + } + switch in := in.(type) { + case *ShowBasic: + return CloneRefOfShowBasic(in) + case *ShowCreate: + return CloneRefOfShowCreate(in) + case *ShowLegacy: + return CloneRefOfShowLegacy(in) + default: + // this should never happen + return nil + } +} + +// CloneSimpleTableExpr creates a deep clone of the input. +func CloneSimpleTableExpr(in SimpleTableExpr) SimpleTableExpr { + if in == nil { + return nil + } + switch in := in.(type) { + case *DerivedTable: + return CloneRefOfDerivedTable(in) + case TableName: + return CloneTableName(in) + default: + // this should never happen + return nil + } +} + +// CloneStatement creates a deep clone of the input. +func CloneStatement(in Statement) Statement { + if in == nil { + return nil + } + switch in := in.(type) { + case *AlterDatabase: + return CloneRefOfAlterDatabase(in) + case *AlterTable: + return CloneRefOfAlterTable(in) + case *AlterView: + return CloneRefOfAlterView(in) + case *AlterVschema: + return CloneRefOfAlterVschema(in) + case *Begin: + return CloneRefOfBegin(in) + case *CallProc: + return CloneRefOfCallProc(in) + case *Commit: + return CloneRefOfCommit(in) + case *CreateDatabase: + return CloneRefOfCreateDatabase(in) + case *CreateTable: + return CloneRefOfCreateTable(in) + case *CreateView: + return CloneRefOfCreateView(in) + case *Delete: + return CloneRefOfDelete(in) + case *DropDatabase: + return CloneRefOfDropDatabase(in) + case *DropTable: + return CloneRefOfDropTable(in) + case *DropView: + return CloneRefOfDropView(in) + case *ExplainStmt: + return CloneRefOfExplainStmt(in) + case *ExplainTab: + return CloneRefOfExplainTab(in) + case *Flush: + return CloneRefOfFlush(in) + case *Insert: + return CloneRefOfInsert(in) + case *Load: + return CloneRefOfLoad(in) + case *LockTables: + return CloneRefOfLockTables(in) + case *OtherAdmin: + return CloneRefOfOtherAdmin(in) + case *OtherRead: + return CloneRefOfOtherRead(in) + case *ParenSelect: + return CloneRefOfParenSelect(in) + case *Release: + return CloneRefOfRelease(in) + case *RenameTable: + return CloneRefOfRenameTable(in) + case *Rollback: + return CloneRefOfRollback(in) + case *SRollback: + return CloneRefOfSRollback(in) + case *Savepoint: + return CloneRefOfSavepoint(in) + case *Select: + return CloneRefOfSelect(in) + case *Set: + return CloneRefOfSet(in) + case *SetTransaction: + return CloneRefOfSetTransaction(in) + case *Show: + return CloneRefOfShow(in) + case *Stream: + return CloneRefOfStream(in) + case *TruncateTable: + return CloneRefOfTruncateTable(in) + case *Union: + return CloneRefOfUnion(in) + case *UnlockTables: + return CloneRefOfUnlockTables(in) + case *Update: + return CloneRefOfUpdate(in) + case *Use: + return CloneRefOfUse(in) + case *VStream: + return CloneRefOfVStream(in) + default: + // this should never happen + return nil + } +} + +// CloneTableExpr creates a deep clone of the input. +func CloneTableExpr(in TableExpr) TableExpr { + if in == nil { + return nil + } + switch in := in.(type) { + case *AliasedTableExpr: + return CloneRefOfAliasedTableExpr(in) + case *JoinTableExpr: + return CloneRefOfJoinTableExpr(in) + case *ParenTableExpr: + return CloneRefOfParenTableExpr(in) + default: + // this should never happen + return nil + } +} + +// CloneRefOfAddColumns creates a deep clone of the input. +func CloneRefOfAddColumns(n *AddColumns) *AddColumns { + if n == nil { + return nil + } + out := *n + out.Columns = CloneSliceOfRefOfColumnDefinition(n.Columns) + out.First = CloneRefOfColName(n.First) + out.After = CloneRefOfColName(n.After) + return &out +} + +// CloneRefOfAddConstraintDefinition creates a deep clone of the input. +func CloneRefOfAddConstraintDefinition(n *AddConstraintDefinition) *AddConstraintDefinition { + if n == nil { + return nil + } + out := *n + out.ConstraintDefinition = CloneRefOfConstraintDefinition(n.ConstraintDefinition) + return &out +} + +// CloneRefOfAddIndexDefinition creates a deep clone of the input. +func CloneRefOfAddIndexDefinition(n *AddIndexDefinition) *AddIndexDefinition { + if n == nil { + return nil + } + out := *n + out.IndexDefinition = CloneRefOfIndexDefinition(n.IndexDefinition) + return &out +} + +// CloneRefOfAlterCharset creates a deep clone of the input. +func CloneRefOfAlterCharset(n *AlterCharset) *AlterCharset { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfAlterColumn creates a deep clone of the input. +func CloneRefOfAlterColumn(n *AlterColumn) *AlterColumn { + if n == nil { + return nil + } + out := *n + out.Column = CloneRefOfColName(n.Column) + out.DefaultVal = CloneExpr(n.DefaultVal) + return &out +} + +// CloneRefOfChangeColumn creates a deep clone of the input. +func CloneRefOfChangeColumn(n *ChangeColumn) *ChangeColumn { + if n == nil { + return nil + } + out := *n + out.OldColumn = CloneRefOfColName(n.OldColumn) + out.NewColDefinition = CloneRefOfColumnDefinition(n.NewColDefinition) + out.First = CloneRefOfColName(n.First) + out.After = CloneRefOfColName(n.After) + return &out +} + +// CloneRefOfDropColumn creates a deep clone of the input. +func CloneRefOfDropColumn(n *DropColumn) *DropColumn { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + return &out +} + +// CloneRefOfDropKey creates a deep clone of the input. +func CloneRefOfDropKey(n *DropKey) *DropKey { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfForce creates a deep clone of the input. +func CloneRefOfForce(n *Force) *Force { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfKeyState creates a deep clone of the input. +func CloneRefOfKeyState(n *KeyState) *KeyState { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfLockOption creates a deep clone of the input. +func CloneRefOfLockOption(n *LockOption) *LockOption { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfModifyColumn creates a deep clone of the input. +func CloneRefOfModifyColumn(n *ModifyColumn) *ModifyColumn { + if n == nil { + return nil + } + out := *n + out.NewColDefinition = CloneRefOfColumnDefinition(n.NewColDefinition) + out.First = CloneRefOfColName(n.First) + out.After = CloneRefOfColName(n.After) + return &out +} + +// CloneRefOfOrderByOption creates a deep clone of the input. +func CloneRefOfOrderByOption(n *OrderByOption) *OrderByOption { + if n == nil { + return nil + } + out := *n + out.Cols = CloneColumns(n.Cols) + return &out +} + +// CloneRefOfRenameIndex creates a deep clone of the input. +func CloneRefOfRenameIndex(n *RenameIndex) *RenameIndex { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfRenameTableName creates a deep clone of the input. +func CloneRefOfRenameTableName(n *RenameTableName) *RenameTableName { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneTableOptions creates a deep clone of the input. +func CloneTableOptions(n TableOptions) TableOptions { + res := make(TableOptions, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfTableOption(x)) + } + return res +} + +// CloneRefOfTablespaceOperation creates a deep clone of the input. +func CloneRefOfTablespaceOperation(n *TablespaceOperation) *TablespaceOperation { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfValidation creates a deep clone of the input. +func CloneRefOfValidation(n *Validation) *Validation { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneListArg creates a deep clone of the input. +func CloneListArg(n ListArg) ListArg { + res := make(ListArg, 0, len(n)) + copy(res, n) + return res +} + +// CloneRefOfSubquery creates a deep clone of the input. +func CloneRefOfSubquery(n *Subquery) *Subquery { + if n == nil { + return nil + } + out := *n + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneValTuple creates a deep clone of the input. +func CloneValTuple(n ValTuple) ValTuple { + res := make(ValTuple, 0, len(n)) + for _, x := range n { + res = append(res, CloneExpr(x)) + } + return res +} + +// CloneRefOfCheckConstraintDefinition creates a deep clone of the input. +func CloneRefOfCheckConstraintDefinition(n *CheckConstraintDefinition) *CheckConstraintDefinition { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfForeignKeyDefinition creates a deep clone of the input. +func CloneRefOfForeignKeyDefinition(n *ForeignKeyDefinition) *ForeignKeyDefinition { + if n == nil { + return nil + } + out := *n + out.Source = CloneColumns(n.Source) + out.ReferencedTable = CloneTableName(n.ReferencedTable) + out.ReferencedColumns = CloneColumns(n.ReferencedColumns) + return &out +} + +// CloneRefOfAlterDatabase creates a deep clone of the input. +func CloneRefOfAlterDatabase(n *AlterDatabase) *AlterDatabase { + if n == nil { + return nil + } + out := *n + out.AlterOptions = CloneSliceOfCollateAndCharset(n.AlterOptions) + return &out +} + +// CloneRefOfCreateDatabase creates a deep clone of the input. +func CloneRefOfCreateDatabase(n *CreateDatabase) *CreateDatabase { + if n == nil { + return nil + } + out := *n + out.CreateOptions = CloneSliceOfCollateAndCharset(n.CreateOptions) + return &out +} + +// CloneRefOfDropDatabase creates a deep clone of the input. +func CloneRefOfDropDatabase(n *DropDatabase) *DropDatabase { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfAlterTable creates a deep clone of the input. +func CloneRefOfAlterTable(n *AlterTable) *AlterTable { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + out.AlterOptions = CloneSliceOfAlterOption(n.AlterOptions) + out.PartitionSpec = CloneRefOfPartitionSpec(n.PartitionSpec) + return &out +} + +// CloneRefOfAlterView creates a deep clone of the input. +func CloneRefOfAlterView(n *AlterView) *AlterView { + if n == nil { + return nil + } + out := *n + out.ViewName = CloneTableName(n.ViewName) + out.Columns = CloneColumns(n.Columns) + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfCreateTable creates a deep clone of the input. +func CloneRefOfCreateTable(n *CreateTable) *CreateTable { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + out.TableSpec = CloneRefOfTableSpec(n.TableSpec) + out.OptLike = CloneRefOfOptLike(n.OptLike) + return &out +} + +// CloneRefOfCreateView creates a deep clone of the input. +func CloneRefOfCreateView(n *CreateView) *CreateView { + if n == nil { + return nil + } + out := *n + out.ViewName = CloneTableName(n.ViewName) + out.Columns = CloneColumns(n.Columns) + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfDropTable creates a deep clone of the input. +func CloneRefOfDropTable(n *DropTable) *DropTable { + if n == nil { + return nil + } + out := *n + out.FromTables = CloneTableNames(n.FromTables) + return &out +} + +// CloneRefOfDropView creates a deep clone of the input. +func CloneRefOfDropView(n *DropView) *DropView { + if n == nil { + return nil + } + out := *n + out.FromTables = CloneTableNames(n.FromTables) + return &out +} + +// CloneRefOfRenameTable creates a deep clone of the input. +func CloneRefOfRenameTable(n *RenameTable) *RenameTable { + if n == nil { + return nil + } + out := *n + out.TablePairs = CloneSliceOfRefOfRenameTablePair(n.TablePairs) + return &out +} + +// CloneRefOfTruncateTable creates a deep clone of the input. +func CloneRefOfTruncateTable(n *TruncateTable) *TruncateTable { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneRefOfExplainStmt creates a deep clone of the input. +func CloneRefOfExplainStmt(n *ExplainStmt) *ExplainStmt { + if n == nil { + return nil + } + out := *n + out.Statement = CloneStatement(n.Statement) + return &out +} + +// CloneRefOfExplainTab creates a deep clone of the input. +func CloneRefOfExplainTab(n *ExplainTab) *ExplainTab { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneRefOfAndExpr creates a deep clone of the input. +func CloneRefOfAndExpr(n *AndExpr) *AndExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneArgument creates a deep clone of the input. +func CloneArgument(n Argument) Argument { + res := make(Argument, 0, len(n)) + copy(res, n) + return res +} + +// CloneRefOfBinaryExpr creates a deep clone of the input. +func CloneRefOfBinaryExpr(n *BinaryExpr) *BinaryExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneRefOfCaseExpr creates a deep clone of the input. +func CloneRefOfCaseExpr(n *CaseExpr) *CaseExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + out.Whens = CloneSliceOfRefOfWhen(n.Whens) + out.Else = CloneExpr(n.Else) + return &out +} + +// CloneRefOfColName creates a deep clone of the input. +func CloneRefOfColName(n *ColName) *ColName { + return n +} + +// CloneRefOfCollateExpr creates a deep clone of the input. +func CloneRefOfCollateExpr(n *CollateExpr) *CollateExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfComparisonExpr creates a deep clone of the input. +func CloneRefOfComparisonExpr(n *ComparisonExpr) *ComparisonExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + out.Escape = CloneExpr(n.Escape) + return &out +} + +// CloneRefOfConvertExpr creates a deep clone of the input. +func CloneRefOfConvertExpr(n *ConvertExpr) *ConvertExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + out.Type = CloneRefOfConvertType(n.Type) + return &out +} + +// CloneRefOfConvertUsingExpr creates a deep clone of the input. +func CloneRefOfConvertUsingExpr(n *ConvertUsingExpr) *ConvertUsingExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfCurTimeFuncExpr creates a deep clone of the input. +func CloneRefOfCurTimeFuncExpr(n *CurTimeFuncExpr) *CurTimeFuncExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Fsp = CloneExpr(n.Fsp) + return &out +} + +// CloneRefOfDefault creates a deep clone of the input. +func CloneRefOfDefault(n *Default) *Default { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfExistsExpr creates a deep clone of the input. +func CloneRefOfExistsExpr(n *ExistsExpr) *ExistsExpr { + if n == nil { + return nil + } + out := *n + out.Subquery = CloneRefOfSubquery(n.Subquery) + return &out +} + +// CloneRefOfFuncExpr creates a deep clone of the input. +func CloneRefOfFuncExpr(n *FuncExpr) *FuncExpr { + if n == nil { + return nil + } + out := *n + out.Qualifier = CloneTableIdent(n.Qualifier) + out.Name = CloneColIdent(n.Name) + out.Exprs = CloneSelectExprs(n.Exprs) + return &out +} + +// CloneRefOfGroupConcatExpr creates a deep clone of the input. +func CloneRefOfGroupConcatExpr(n *GroupConcatExpr) *GroupConcatExpr { + if n == nil { + return nil + } + out := *n + out.Exprs = CloneSelectExprs(n.Exprs) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfIntervalExpr creates a deep clone of the input. +func CloneRefOfIntervalExpr(n *IntervalExpr) *IntervalExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfIsExpr creates a deep clone of the input. +func CloneRefOfIsExpr(n *IsExpr) *IsExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfLiteral creates a deep clone of the input. +func CloneRefOfLiteral(n *Literal) *Literal { + if n == nil { + return nil + } + out := *n + out.Val = CloneSliceOfbyte(n.Val) + return &out +} + +// CloneRefOfMatchExpr creates a deep clone of the input. +func CloneRefOfMatchExpr(n *MatchExpr) *MatchExpr { + if n == nil { + return nil + } + out := *n + out.Columns = CloneSelectExprs(n.Columns) + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfNotExpr creates a deep clone of the input. +func CloneRefOfNotExpr(n *NotExpr) *NotExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfNullVal creates a deep clone of the input. +func CloneRefOfNullVal(n *NullVal) *NullVal { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfOrExpr creates a deep clone of the input. +func CloneRefOfOrExpr(n *OrExpr) *OrExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneRefOfRangeCond creates a deep clone of the input. +func CloneRefOfRangeCond(n *RangeCond) *RangeCond { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.From = CloneExpr(n.From) + out.To = CloneExpr(n.To) + return &out +} + +// CloneRefOfSubstrExpr creates a deep clone of the input. +func CloneRefOfSubstrExpr(n *SubstrExpr) *SubstrExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + out.StrVal = CloneRefOfLiteral(n.StrVal) + out.From = CloneExpr(n.From) + out.To = CloneExpr(n.To) + return &out +} + +// CloneRefOfTimestampFuncExpr creates a deep clone of the input. +func CloneRefOfTimestampFuncExpr(n *TimestampFuncExpr) *TimestampFuncExpr { + if n == nil { + return nil + } + out := *n + out.Expr1 = CloneExpr(n.Expr1) + out.Expr2 = CloneExpr(n.Expr2) + return &out +} + +// CloneRefOfUnaryExpr creates a deep clone of the input. +func CloneRefOfUnaryExpr(n *UnaryExpr) *UnaryExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneRefOfValuesFuncExpr creates a deep clone of the input. +func CloneRefOfValuesFuncExpr(n *ValuesFuncExpr) *ValuesFuncExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + return &out +} + +// CloneRefOfXorExpr creates a deep clone of the input. +func CloneRefOfXorExpr(n *XorExpr) *XorExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + +// CloneRefOfParenSelect creates a deep clone of the input. +func CloneRefOfParenSelect(n *ParenSelect) *ParenSelect { + if n == nil { + return nil + } + out := *n + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneRefOfSelect creates a deep clone of the input. +func CloneRefOfSelect(n *Select) *Select { + if n == nil { + return nil + } + out := *n + out.Cache = CloneRefOfbool(n.Cache) + out.Comments = CloneComments(n.Comments) + out.SelectExprs = CloneSelectExprs(n.SelectExprs) + out.From = CloneTableExprs(n.From) + out.Where = CloneRefOfWhere(n.Where) + out.GroupBy = CloneGroupBy(n.GroupBy) + out.Having = CloneRefOfWhere(n.Having) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + out.Into = CloneRefOfSelectInto(n.Into) + return &out +} + +// CloneRefOfUnion creates a deep clone of the input. +func CloneRefOfUnion(n *Union) *Union { + if n == nil { + return nil + } + out := *n + out.FirstStatement = CloneSelectStatement(n.FirstStatement) + out.UnionSelects = CloneSliceOfRefOfUnionSelect(n.UnionSelects) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneValues creates a deep clone of the input. +func CloneValues(n Values) Values { + res := make(Values, 0, len(n)) + for _, x := range n { + res = append(res, CloneValTuple(x)) + } + return res +} + +// CloneRefOfAliasedExpr creates a deep clone of the input. +func CloneRefOfAliasedExpr(n *AliasedExpr) *AliasedExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + out.As = CloneColIdent(n.As) + return &out +} + +// CloneRefOfAliasedTableExpr creates a deep clone of the input. +func CloneRefOfAliasedTableExpr(n *AliasedTableExpr) *AliasedTableExpr { + if n == nil { + return nil + } + out := *n + out.Expr = CloneSimpleTableExpr(n.Expr) + out.Partitions = ClonePartitions(n.Partitions) + out.As = CloneTableIdent(n.As) + out.Hints = CloneRefOfIndexHints(n.Hints) + return &out +} + +// CloneRefOfAlterVschema creates a deep clone of the input. +func CloneRefOfAlterVschema(n *AlterVschema) *AlterVschema { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + out.VindexSpec = CloneRefOfVindexSpec(n.VindexSpec) + out.VindexCols = CloneSliceOfColIdent(n.VindexCols) + out.AutoIncSpec = CloneRefOfAutoIncSpec(n.AutoIncSpec) + return &out +} + +// CloneRefOfAutoIncSpec creates a deep clone of the input. +func CloneRefOfAutoIncSpec(n *AutoIncSpec) *AutoIncSpec { + if n == nil { + return nil + } + out := *n + out.Column = CloneColIdent(n.Column) + out.Sequence = CloneTableName(n.Sequence) + return &out +} + +// CloneRefOfBegin creates a deep clone of the input. +func CloneRefOfBegin(n *Begin) *Begin { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfCallProc creates a deep clone of the input. +func CloneRefOfCallProc(n *CallProc) *CallProc { + if n == nil { + return nil + } + out := *n + out.Name = CloneTableName(n.Name) + out.Params = CloneExprs(n.Params) + return &out +} + +// CloneColIdent creates a deep clone of the input. +func CloneColIdent(n ColIdent) ColIdent { + return *CloneRefOfColIdent(&n) +} + +// CloneRefOfColumnDefinition creates a deep clone of the input. +func CloneRefOfColumnDefinition(n *ColumnDefinition) *ColumnDefinition { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Type = CloneColumnType(n.Type) + return &out +} + +// CloneRefOfColumnType creates a deep clone of the input. +func CloneRefOfColumnType(n *ColumnType) *ColumnType { + if n == nil { + return nil + } + out := *n + out.Options = CloneRefOfColumnTypeOptions(n.Options) + out.Length = CloneRefOfLiteral(n.Length) + out.Scale = CloneRefOfLiteral(n.Scale) + out.EnumValues = CloneSliceOfstring(n.EnumValues) + return &out +} + +// CloneColumns creates a deep clone of the input. +func CloneColumns(n Columns) Columns { + res := make(Columns, 0, len(n)) + for _, x := range n { + res = append(res, CloneColIdent(x)) + } + return res +} + +// CloneComments creates a deep clone of the input. +func CloneComments(n Comments) Comments { + res := make(Comments, 0, len(n)) + for _, x := range n { + res = append(res, CloneSliceOfbyte(x)) + } + return res +} + +// CloneRefOfCommit creates a deep clone of the input. +func CloneRefOfCommit(n *Commit) *Commit { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfConstraintDefinition creates a deep clone of the input. +func CloneRefOfConstraintDefinition(n *ConstraintDefinition) *ConstraintDefinition { + if n == nil { + return nil + } + out := *n + out.Details = CloneConstraintInfo(n.Details) + return &out +} + +// CloneRefOfConvertType creates a deep clone of the input. +func CloneRefOfConvertType(n *ConvertType) *ConvertType { + if n == nil { + return nil + } + out := *n + out.Length = CloneRefOfLiteral(n.Length) + out.Scale = CloneRefOfLiteral(n.Scale) + return &out +} + +// CloneRefOfDelete creates a deep clone of the input. +func CloneRefOfDelete(n *Delete) *Delete { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.Targets = CloneTableNames(n.Targets) + out.TableExprs = CloneTableExprs(n.TableExprs) + out.Partitions = ClonePartitions(n.Partitions) + out.Where = CloneRefOfWhere(n.Where) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfDerivedTable creates a deep clone of the input. +func CloneRefOfDerivedTable(n *DerivedTable) *DerivedTable { + if n == nil { + return nil + } + out := *n + out.Select = CloneSelectStatement(n.Select) + return &out +} + +// CloneExprs creates a deep clone of the input. +func CloneExprs(n Exprs) Exprs { + res := make(Exprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneExpr(x)) + } + return res +} + +// CloneRefOfFlush creates a deep clone of the input. +func CloneRefOfFlush(n *Flush) *Flush { + if n == nil { + return nil + } + out := *n + out.FlushOptions = CloneSliceOfstring(n.FlushOptions) + out.TableNames = CloneTableNames(n.TableNames) + return &out +} + +// CloneGroupBy creates a deep clone of the input. +func CloneGroupBy(n GroupBy) GroupBy { + res := make(GroupBy, 0, len(n)) + for _, x := range n { + res = append(res, CloneExpr(x)) + } + return res +} + +// CloneRefOfIndexDefinition creates a deep clone of the input. +func CloneRefOfIndexDefinition(n *IndexDefinition) *IndexDefinition { + if n == nil { + return nil + } + out := *n + out.Info = CloneRefOfIndexInfo(n.Info) + out.Columns = CloneSliceOfRefOfIndexColumn(n.Columns) + out.Options = CloneSliceOfRefOfIndexOption(n.Options) + return &out +} + +// CloneRefOfIndexHints creates a deep clone of the input. +func CloneRefOfIndexHints(n *IndexHints) *IndexHints { + if n == nil { + return nil + } + out := *n + out.Indexes = CloneSliceOfColIdent(n.Indexes) + return &out +} + +// CloneRefOfIndexInfo creates a deep clone of the input. +func CloneRefOfIndexInfo(n *IndexInfo) *IndexInfo { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.ConstraintName = CloneColIdent(n.ConstraintName) + return &out +} + +// CloneRefOfInsert creates a deep clone of the input. +func CloneRefOfInsert(n *Insert) *Insert { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.Table = CloneTableName(n.Table) + out.Partitions = ClonePartitions(n.Partitions) + out.Columns = CloneColumns(n.Columns) + out.Rows = CloneInsertRows(n.Rows) + out.OnDup = CloneOnDup(n.OnDup) + return &out +} + +// CloneJoinCondition creates a deep clone of the input. +func CloneJoinCondition(n JoinCondition) JoinCondition { + return *CloneRefOfJoinCondition(&n) +} + +// CloneRefOfJoinTableExpr creates a deep clone of the input. +func CloneRefOfJoinTableExpr(n *JoinTableExpr) *JoinTableExpr { + if n == nil { + return nil + } + out := *n + out.LeftExpr = CloneTableExpr(n.LeftExpr) + out.RightExpr = CloneTableExpr(n.RightExpr) + out.Condition = CloneJoinCondition(n.Condition) + return &out +} + +// CloneRefOfLimit creates a deep clone of the input. +func CloneRefOfLimit(n *Limit) *Limit { + if n == nil { + return nil + } + out := *n + out.Offset = CloneExpr(n.Offset) + out.Rowcount = CloneExpr(n.Rowcount) + return &out +} + +// CloneRefOfLoad creates a deep clone of the input. +func CloneRefOfLoad(n *Load) *Load { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfLockTables creates a deep clone of the input. +func CloneRefOfLockTables(n *LockTables) *LockTables { + if n == nil { + return nil + } + out := *n + out.Tables = CloneTableAndLockTypes(n.Tables) + return &out +} + +// CloneRefOfNextval creates a deep clone of the input. +func CloneRefOfNextval(n *Nextval) *Nextval { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneOnDup creates a deep clone of the input. +func CloneOnDup(n OnDup) OnDup { + res := make(OnDup, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfUpdateExpr(x)) + } + return res +} + +// CloneRefOfOptLike creates a deep clone of the input. +func CloneRefOfOptLike(n *OptLike) *OptLike { + if n == nil { + return nil + } + out := *n + out.LikeTable = CloneTableName(n.LikeTable) + return &out +} + +// CloneRefOfOrder creates a deep clone of the input. +func CloneRefOfOrder(n *Order) *Order { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneOrderBy creates a deep clone of the input. +func CloneOrderBy(n OrderBy) OrderBy { + res := make(OrderBy, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfOrder(x)) + } + return res +} + +// CloneRefOfOtherAdmin creates a deep clone of the input. +func CloneRefOfOtherAdmin(n *OtherAdmin) *OtherAdmin { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfOtherRead creates a deep clone of the input. +func CloneRefOfOtherRead(n *OtherRead) *OtherRead { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfParenTableExpr creates a deep clone of the input. +func CloneRefOfParenTableExpr(n *ParenTableExpr) *ParenTableExpr { + if n == nil { + return nil + } + out := *n + out.Exprs = CloneTableExprs(n.Exprs) + return &out +} + +// CloneRefOfPartitionDefinition creates a deep clone of the input. +func CloneRefOfPartitionDefinition(n *PartitionDefinition) *PartitionDefinition { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Limit = CloneExpr(n.Limit) + return &out +} + +// CloneRefOfPartitionSpec creates a deep clone of the input. +func CloneRefOfPartitionSpec(n *PartitionSpec) *PartitionSpec { + if n == nil { + return nil + } + out := *n + out.Names = ClonePartitions(n.Names) + out.Number = CloneRefOfLiteral(n.Number) + out.TableName = CloneTableName(n.TableName) + out.Definitions = CloneSliceOfRefOfPartitionDefinition(n.Definitions) + return &out +} + +// ClonePartitions creates a deep clone of the input. +func ClonePartitions(n Partitions) Partitions { + res := make(Partitions, 0, len(n)) + for _, x := range n { + res = append(res, CloneColIdent(x)) + } + return res +} + +// CloneRefOfRelease creates a deep clone of the input. +func CloneRefOfRelease(n *Release) *Release { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneRefOfRollback creates a deep clone of the input. +func CloneRefOfRollback(n *Rollback) *Rollback { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfSRollback creates a deep clone of the input. +func CloneRefOfSRollback(n *SRollback) *SRollback { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneRefOfSavepoint creates a deep clone of the input. +func CloneRefOfSavepoint(n *Savepoint) *Savepoint { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + return &out +} + +// CloneSelectExprs creates a deep clone of the input. +func CloneSelectExprs(n SelectExprs) SelectExprs { + res := make(SelectExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneSelectExpr(x)) + } + return res +} + +// CloneRefOfSelectInto creates a deep clone of the input. +func CloneRefOfSelectInto(n *SelectInto) *SelectInto { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfSet creates a deep clone of the input. +func CloneRefOfSet(n *Set) *Set { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.Exprs = CloneSetExprs(n.Exprs) + return &out +} + +// CloneRefOfSetExpr creates a deep clone of the input. +func CloneRefOfSetExpr(n *SetExpr) *SetExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneSetExprs creates a deep clone of the input. +func CloneSetExprs(n SetExprs) SetExprs { + res := make(SetExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfSetExpr(x)) + } + return res +} + +// CloneRefOfSetTransaction creates a deep clone of the input. +func CloneRefOfSetTransaction(n *SetTransaction) *SetTransaction { + if n == nil { + return nil + } + out := *n + out.SQLNode = CloneSQLNode(n.SQLNode) + out.Comments = CloneComments(n.Comments) + out.Characteristics = CloneSliceOfCharacteristic(n.Characteristics) + return &out +} + +// CloneRefOfShow creates a deep clone of the input. +func CloneRefOfShow(n *Show) *Show { + if n == nil { + return nil + } + out := *n + out.Internal = CloneShowInternal(n.Internal) + return &out +} + +// CloneRefOfShowBasic creates a deep clone of the input. +func CloneRefOfShowBasic(n *ShowBasic) *ShowBasic { + if n == nil { + return nil + } + out := *n + out.Tbl = CloneTableName(n.Tbl) + out.Filter = CloneRefOfShowFilter(n.Filter) + return &out +} + +// CloneRefOfShowCreate creates a deep clone of the input. +func CloneRefOfShowCreate(n *ShowCreate) *ShowCreate { + if n == nil { + return nil + } + out := *n + out.Op = CloneTableName(n.Op) + return &out +} + +// CloneRefOfShowFilter creates a deep clone of the input. +func CloneRefOfShowFilter(n *ShowFilter) *ShowFilter { + if n == nil { + return nil + } + out := *n + out.Filter = CloneExpr(n.Filter) + return &out +} + +// CloneRefOfShowLegacy creates a deep clone of the input. +func CloneRefOfShowLegacy(n *ShowLegacy) *ShowLegacy { + if n == nil { + return nil + } + out := *n + out.OnTable = CloneTableName(n.OnTable) + out.Table = CloneTableName(n.Table) + out.ShowTablesOpt = CloneRefOfShowTablesOpt(n.ShowTablesOpt) + out.ShowCollationFilterOpt = CloneExpr(n.ShowCollationFilterOpt) + return &out +} + +// CloneRefOfStarExpr creates a deep clone of the input. +func CloneRefOfStarExpr(n *StarExpr) *StarExpr { + if n == nil { + return nil + } + out := *n + out.TableName = CloneTableName(n.TableName) + return &out +} + +// CloneRefOfStream creates a deep clone of the input. +func CloneRefOfStream(n *Stream) *Stream { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.SelectExpr = CloneSelectExpr(n.SelectExpr) + out.Table = CloneTableName(n.Table) + return &out +} + +// CloneTableExprs creates a deep clone of the input. +func CloneTableExprs(n TableExprs) TableExprs { + res := make(TableExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneTableExpr(x)) + } + return res +} + +// CloneTableIdent creates a deep clone of the input. +func CloneTableIdent(n TableIdent) TableIdent { + return *CloneRefOfTableIdent(&n) +} + +// CloneTableName creates a deep clone of the input. +func CloneTableName(n TableName) TableName { + return *CloneRefOfTableName(&n) +} + +// CloneTableNames creates a deep clone of the input. +func CloneTableNames(n TableNames) TableNames { + res := make(TableNames, 0, len(n)) + for _, x := range n { + res = append(res, CloneTableName(x)) + } + return res +} + +// CloneRefOfTableSpec creates a deep clone of the input. +func CloneRefOfTableSpec(n *TableSpec) *TableSpec { + if n == nil { + return nil + } + out := *n + out.Columns = CloneSliceOfRefOfColumnDefinition(n.Columns) + out.Indexes = CloneSliceOfRefOfIndexDefinition(n.Indexes) + out.Constraints = CloneSliceOfRefOfConstraintDefinition(n.Constraints) + out.Options = CloneTableOptions(n.Options) + return &out +} + +// CloneRefOfUnionSelect creates a deep clone of the input. +func CloneRefOfUnionSelect(n *UnionSelect) *UnionSelect { + if n == nil { + return nil + } + out := *n + out.Statement = CloneSelectStatement(n.Statement) + return &out +} + +// CloneRefOfUnlockTables creates a deep clone of the input. +func CloneRefOfUnlockTables(n *UnlockTables) *UnlockTables { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfUpdate creates a deep clone of the input. +func CloneRefOfUpdate(n *Update) *Update { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.TableExprs = CloneTableExprs(n.TableExprs) + out.Exprs = CloneUpdateExprs(n.Exprs) + out.Where = CloneRefOfWhere(n.Where) + out.OrderBy = CloneOrderBy(n.OrderBy) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneRefOfUpdateExpr creates a deep clone of the input. +func CloneRefOfUpdateExpr(n *UpdateExpr) *UpdateExpr { + if n == nil { + return nil + } + out := *n + out.Name = CloneRefOfColName(n.Name) + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneUpdateExprs creates a deep clone of the input. +func CloneUpdateExprs(n UpdateExprs) UpdateExprs { + res := make(UpdateExprs, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfUpdateExpr(x)) + } + return res +} + +// CloneRefOfUse creates a deep clone of the input. +func CloneRefOfUse(n *Use) *Use { + if n == nil { + return nil + } + out := *n + out.DBName = CloneTableIdent(n.DBName) + return &out +} + +// CloneRefOfVStream creates a deep clone of the input. +func CloneRefOfVStream(n *VStream) *VStream { + if n == nil { + return nil + } + out := *n + out.Comments = CloneComments(n.Comments) + out.SelectExpr = CloneSelectExpr(n.SelectExpr) + out.Table = CloneTableName(n.Table) + out.Where = CloneRefOfWhere(n.Where) + out.Limit = CloneRefOfLimit(n.Limit) + return &out +} + +// CloneVindexParam creates a deep clone of the input. +func CloneVindexParam(n VindexParam) VindexParam { + return *CloneRefOfVindexParam(&n) +} + +// CloneRefOfVindexSpec creates a deep clone of the input. +func CloneRefOfVindexSpec(n *VindexSpec) *VindexSpec { + if n == nil { + return nil + } + out := *n + out.Name = CloneColIdent(n.Name) + out.Type = CloneColIdent(n.Type) + out.Params = CloneSliceOfVindexParam(n.Params) + return &out +} + +// CloneRefOfWhen creates a deep clone of the input. +func CloneRefOfWhen(n *When) *When { + if n == nil { + return nil + } + out := *n + out.Cond = CloneExpr(n.Cond) + out.Val = CloneExpr(n.Val) + return &out +} + +// CloneRefOfWhere creates a deep clone of the input. +func CloneRefOfWhere(n *Where) *Where { + if n == nil { + return nil + } + out := *n + out.Expr = CloneExpr(n.Expr) + return &out +} + +// CloneSliceOfRefOfColumnDefinition creates a deep clone of the input. +func CloneSliceOfRefOfColumnDefinition(n []*ColumnDefinition) []*ColumnDefinition { + res := make([]*ColumnDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfColumnDefinition(x)) + } + return res +} + +// CloneRefOfTableOption creates a deep clone of the input. +func CloneRefOfTableOption(n *TableOption) *TableOption { + if n == nil { + return nil + } + out := *n + out.Value = CloneRefOfLiteral(n.Value) + out.Tables = CloneTableNames(n.Tables) + return &out +} + +// CloneSliceOfCollateAndCharset creates a deep clone of the input. +func CloneSliceOfCollateAndCharset(n []CollateAndCharset) []CollateAndCharset { + res := make([]CollateAndCharset, 0, len(n)) + for _, x := range n { + res = append(res, CloneCollateAndCharset(x)) + } + return res +} + +// CloneSliceOfAlterOption creates a deep clone of the input. +func CloneSliceOfAlterOption(n []AlterOption) []AlterOption { + res := make([]AlterOption, 0, len(n)) + for _, x := range n { + res = append(res, CloneAlterOption(x)) + } + return res +} + +// CloneSliceOfRefOfRenameTablePair creates a deep clone of the input. +func CloneSliceOfRefOfRenameTablePair(n []*RenameTablePair) []*RenameTablePair { + res := make([]*RenameTablePair, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfRenameTablePair(x)) + } + return res +} + +// CloneSliceOfRefOfWhen creates a deep clone of the input. +func CloneSliceOfRefOfWhen(n []*When) []*When { + res := make([]*When, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfWhen(x)) + } + return res +} + +// CloneSliceOfbyte creates a deep clone of the input. +func CloneSliceOfbyte(n []byte) []byte { + res := make([]byte, 0, len(n)) + copy(res, n) + return res +} + +// CloneRefOfbool creates a deep clone of the input. +func CloneRefOfbool(n *bool) *bool { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneSliceOfRefOfUnionSelect creates a deep clone of the input. +func CloneSliceOfRefOfUnionSelect(n []*UnionSelect) []*UnionSelect { + res := make([]*UnionSelect, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfUnionSelect(x)) + } + return res +} + +// CloneSliceOfColIdent creates a deep clone of the input. +func CloneSliceOfColIdent(n []ColIdent) []ColIdent { + res := make([]ColIdent, 0, len(n)) + for _, x := range n { + res = append(res, CloneColIdent(x)) + } + return res +} + +// CloneRefOfColIdent creates a deep clone of the input. +func CloneRefOfColIdent(n *ColIdent) *ColIdent { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneColumnType creates a deep clone of the input. +func CloneColumnType(n ColumnType) ColumnType { + return *CloneRefOfColumnType(&n) +} + +// CloneRefOfColumnTypeOptions creates a deep clone of the input. +func CloneRefOfColumnTypeOptions(n *ColumnTypeOptions) *ColumnTypeOptions { + if n == nil { + return nil + } + out := *n + out.Default = CloneExpr(n.Default) + out.OnUpdate = CloneExpr(n.OnUpdate) + out.Comment = CloneRefOfLiteral(n.Comment) + return &out +} + +// CloneSliceOfstring creates a deep clone of the input. +func CloneSliceOfstring(n []string) []string { + res := make([]string, 0, len(n)) + copy(res, n) + return res +} + +// CloneSliceOfRefOfIndexColumn creates a deep clone of the input. +func CloneSliceOfRefOfIndexColumn(n []*IndexColumn) []*IndexColumn { + res := make([]*IndexColumn, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfIndexColumn(x)) + } + return res +} + +// CloneSliceOfRefOfIndexOption creates a deep clone of the input. +func CloneSliceOfRefOfIndexOption(n []*IndexOption) []*IndexOption { + res := make([]*IndexOption, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfIndexOption(x)) + } + return res +} + +// CloneRefOfJoinCondition creates a deep clone of the input. +func CloneRefOfJoinCondition(n *JoinCondition) *JoinCondition { + if n == nil { + return nil + } + out := *n + out.On = CloneExpr(n.On) + out.Using = CloneColumns(n.Using) + return &out +} + +// CloneTableAndLockTypes creates a deep clone of the input. +func CloneTableAndLockTypes(n TableAndLockTypes) TableAndLockTypes { + res := make(TableAndLockTypes, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfTableAndLockType(x)) + } + return res +} + +// CloneSliceOfRefOfPartitionDefinition creates a deep clone of the input. +func CloneSliceOfRefOfPartitionDefinition(n []*PartitionDefinition) []*PartitionDefinition { + res := make([]*PartitionDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfPartitionDefinition(x)) + } + return res +} + +// CloneSliceOfCharacteristic creates a deep clone of the input. +func CloneSliceOfCharacteristic(n []Characteristic) []Characteristic { + res := make([]Characteristic, 0, len(n)) + for _, x := range n { + res = append(res, CloneCharacteristic(x)) + } + return res +} + +// CloneRefOfShowTablesOpt creates a deep clone of the input. +func CloneRefOfShowTablesOpt(n *ShowTablesOpt) *ShowTablesOpt { + if n == nil { + return nil + } + out := *n + out.Filter = CloneRefOfShowFilter(n.Filter) + return &out +} + +// CloneRefOfTableIdent creates a deep clone of the input. +func CloneRefOfTableIdent(n *TableIdent) *TableIdent { + if n == nil { + return nil + } + out := *n + return &out +} + +// CloneRefOfTableName creates a deep clone of the input. +func CloneRefOfTableName(n *TableName) *TableName { + if n == nil { + return nil + } + out := *n + out.Name = CloneTableIdent(n.Name) + out.Qualifier = CloneTableIdent(n.Qualifier) + return &out +} + +// CloneSliceOfRefOfIndexDefinition creates a deep clone of the input. +func CloneSliceOfRefOfIndexDefinition(n []*IndexDefinition) []*IndexDefinition { + res := make([]*IndexDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfIndexDefinition(x)) + } + return res +} + +// CloneSliceOfRefOfConstraintDefinition creates a deep clone of the input. +func CloneSliceOfRefOfConstraintDefinition(n []*ConstraintDefinition) []*ConstraintDefinition { + res := make([]*ConstraintDefinition, 0, len(n)) + for _, x := range n { + res = append(res, CloneRefOfConstraintDefinition(x)) + } + return res +} + +// CloneRefOfVindexParam creates a deep clone of the input. +func CloneRefOfVindexParam(n *VindexParam) *VindexParam { + if n == nil { + return nil + } + out := *n + out.Key = CloneColIdent(n.Key) + return &out +} + +// CloneSliceOfVindexParam creates a deep clone of the input. +func CloneSliceOfVindexParam(n []VindexParam) []VindexParam { + res := make([]VindexParam, 0, len(n)) + for _, x := range n { + res = append(res, CloneVindexParam(x)) + } + return res +} + +// CloneCollateAndCharset creates a deep clone of the input. +func CloneCollateAndCharset(n CollateAndCharset) CollateAndCharset { + return *CloneRefOfCollateAndCharset(&n) +} + +// CloneRefOfRenameTablePair creates a deep clone of the input. +func CloneRefOfRenameTablePair(n *RenameTablePair) *RenameTablePair { + if n == nil { + return nil + } + out := *n + out.FromTable = CloneTableName(n.FromTable) + out.ToTable = CloneTableName(n.ToTable) + return &out +} + +// CloneRefOfIndexColumn creates a deep clone of the input. +func CloneRefOfIndexColumn(n *IndexColumn) *IndexColumn { + if n == nil { + return nil + } + out := *n + out.Column = CloneColIdent(n.Column) + out.Length = CloneRefOfLiteral(n.Length) + return &out +} + +// CloneRefOfIndexOption creates a deep clone of the input. +func CloneRefOfIndexOption(n *IndexOption) *IndexOption { + if n == nil { + return nil + } + out := *n + out.Value = CloneRefOfLiteral(n.Value) + return &out +} + +// CloneRefOfTableAndLockType creates a deep clone of the input. +func CloneRefOfTableAndLockType(n *TableAndLockType) *TableAndLockType { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableExpr(n.Table) + return &out +} + +// CloneRefOfCollateAndCharset creates a deep clone of the input. +func CloneRefOfCollateAndCharset(n *CollateAndCharset) *CollateAndCharset { + if n == nil { + return nil + } + out := *n + return &out +} diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go index ea5f7c3ee08..5716d1c91dd 100644 --- a/go/vt/sqlparser/normalizer.go +++ b/go/vt/sqlparser/normalizer.go @@ -31,9 +31,13 @@ import ( // Within Select constructs, bind vars are deduped. This allows // us to identify vindex equality. Otherwise, every value is // treated as distinct. -func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) { +func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) error { nz := newNormalizer(stmt, bindVars, prefix) - Rewrite(stmt, nz.WalkStatement, nil) + _, err := Rewrite(stmt, nz.WalkStatement, nil) + if err != nil { + return err + } + return nz.err } type normalizer struct { @@ -42,6 +46,7 @@ type normalizer struct { reserved map[string]struct{} counter int vals map[string]string + err error } func newNormalizer(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer { @@ -63,7 +68,8 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool { case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, *SetTransaction, DDLStatement, *SRollback, *Release, *OtherAdmin, *OtherRead: return false case *Select: - Rewrite(node, nz.WalkSelect, nil) + _, err := Rewrite(node, nz.WalkSelect, nil) + nz.err = err // Don't continue return false case *Literal: @@ -77,7 +83,7 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool { case *ConvertType: // we should not rewrite the type description return false } - return true + return nz.err == nil // only continue if we haven't found any errors } // WalkSelect normalizes the AST in Select mode. @@ -98,7 +104,7 @@ func (nz *normalizer) WalkSelect(cursor *Cursor) bool { // we should not rewrite the type description return false } - return true + return nz.err == nil // only continue if we haven't found any errors } func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index c28d3c61ba5..7a40b6cab4a 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -21,6 +21,8 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -229,7 +231,8 @@ func TestNormalize(t *testing.T) { continue } bv := make(map[string]*querypb.BindVariable) - Normalize(stmt, bv, prefix) + require.NoError(t, + Normalize(stmt, bv, prefix)) outstmt := String(stmt) if outstmt != tc.outstmt { t.Errorf("Query:\n%s:\n%s, want\n%s", tc.in, outstmt, tc.outstmt) @@ -271,6 +274,7 @@ func BenchmarkNormalize(b *testing.B) { b.Fatal(err) } for i := 0; i < b.N; i++ { - Normalize(ast, map[string]*querypb.BindVariable{}, "") + require.NoError(b, + Normalize(ast, map[string]*querypb.BindVariable{}, "")) } } diff --git a/go/vt/sqlparser/parse_next_test.go b/go/vt/sqlparser/parse_next_test.go index 9f4e9c486d9..493afa4a698 100644 --- a/go/vt/sqlparser/parse_next_test.go +++ b/go/vt/sqlparser/parse_next_test.go @@ -32,7 +32,7 @@ func TestParseNextValid(t *testing.T) { sql.WriteRune(';') } - tokens := NewTokenizer(&sql) + tokens := NewStringTokenizer(sql.String()) for i, tcase := range validSQL { input := tcase.input + ";" want := tcase.output diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index 1266bad3a92..f4028547062 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -77,7 +77,7 @@ var ( input: "select 1 from t # aa\n", output: "select 1 from t", }, { - input: "select 1 --aa\nfrom t", + input: "select 1 -- aa\nfrom t", output: "select 1 from t", }, { input: "select 1 #aa\nfrom t", @@ -840,6 +840,9 @@ var ( }, { input: "set character set 'utf8'", output: "set charset 'utf8'", + }, { + input: "set s = 1--4", + output: "set s = 1 - -4", }, { input: "set character set \"utf8\"", output: "set charset 'utf8'", diff --git a/go/vt/sqlparser/redact_query.go b/go/vt/sqlparser/redact_query.go index 55b760178f8..c46e1179494 100644 --- a/go/vt/sqlparser/redact_query.go +++ b/go/vt/sqlparser/redact_query.go @@ -29,7 +29,10 @@ func RedactSQLQuery(sql string) (string, error) { } prefix := "redacted" - Normalize(stmt, bv, prefix) + err = Normalize(stmt, bv, prefix) + if err != nil { + return "", err + } return comments.Leading + String(stmt) + comments.Trailing, nil } diff --git a/go/vt/sqlparser/rewriter.go b/go/vt/sqlparser/rewriter.go index 9e407fee6a3..18cefa42fca 100644 --- a/go/vt/sqlparser/rewriter.go +++ b/go/vt/sqlparser/rewriter.go @@ -1,1716 +1,924 @@ -// Code generated by visitorgen/main/main.go. DO NOT EDIT. +/* +Copyright 2021 The Vitess Authors. -package sqlparser - -//go:generate go run ./visitorgen/main -input=ast.go -output=rewriter.go - -import ( - "reflect" -) - -type replacerFunc func(newNode, parent SQLNode) - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor -} - -func replaceAddColumnsAfter(newNode, parent SQLNode) { - parent.(*AddColumns).After = newNode.(*ColName) -} - -type replaceAddColumnsColumns int - -func (r *replaceAddColumnsColumns) replace(newNode, container SQLNode) { - container.(*AddColumns).Columns[int(*r)] = newNode.(*ColumnDefinition) -} - -func (r *replaceAddColumnsColumns) inc() { - *r++ -} - -func replaceAddColumnsFirst(newNode, parent SQLNode) { - parent.(*AddColumns).First = newNode.(*ColName) -} - -func replaceAddConstraintDefinitionConstraintDefinition(newNode, parent SQLNode) { - parent.(*AddConstraintDefinition).ConstraintDefinition = newNode.(*ConstraintDefinition) -} - -func replaceAddIndexDefinitionIndexDefinition(newNode, parent SQLNode) { - parent.(*AddIndexDefinition).IndexDefinition = newNode.(*IndexDefinition) -} - -func replaceAliasedExprAs(newNode, parent SQLNode) { - parent.(*AliasedExpr).As = newNode.(ColIdent) -} - -func replaceAliasedExprExpr(newNode, parent SQLNode) { - parent.(*AliasedExpr).Expr = newNode.(Expr) -} - -func replaceAliasedTableExprAs(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).As = newNode.(TableIdent) -} - -func replaceAliasedTableExprExpr(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).Expr = newNode.(SimpleTableExpr) -} - -func replaceAliasedTableExprHints(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).Hints = newNode.(*IndexHints) -} - -func replaceAliasedTableExprPartitions(newNode, parent SQLNode) { - parent.(*AliasedTableExpr).Partitions = newNode.(Partitions) -} - -func replaceAlterColumnColumn(newNode, parent SQLNode) { - parent.(*AlterColumn).Column = newNode.(*ColName) -} - -func replaceAlterColumnDefaultVal(newNode, parent SQLNode) { - parent.(*AlterColumn).DefaultVal = newNode.(Expr) -} - -type replaceAlterTableAlterOptions int - -func (r *replaceAlterTableAlterOptions) replace(newNode, container SQLNode) { - container.(*AlterTable).AlterOptions[int(*r)] = newNode.(AlterOption) -} - -func (r *replaceAlterTableAlterOptions) inc() { - *r++ -} - -func replaceAlterTablePartitionSpec(newNode, parent SQLNode) { - parent.(*AlterTable).PartitionSpec = newNode.(*PartitionSpec) -} - -func replaceAlterTableTable(newNode, parent SQLNode) { - parent.(*AlterTable).Table = newNode.(TableName) -} - -func replaceAlterViewColumns(newNode, parent SQLNode) { - parent.(*AlterView).Columns = newNode.(Columns) -} - -func replaceAlterViewSelect(newNode, parent SQLNode) { - parent.(*AlterView).Select = newNode.(SelectStatement) -} - -func replaceAlterViewViewName(newNode, parent SQLNode) { - parent.(*AlterView).ViewName = newNode.(TableName) -} - -func replaceAlterVschemaAutoIncSpec(newNode, parent SQLNode) { - parent.(*AlterVschema).AutoIncSpec = newNode.(*AutoIncSpec) -} - -func replaceAlterVschemaTable(newNode, parent SQLNode) { - parent.(*AlterVschema).Table = newNode.(TableName) -} - -type replaceAlterVschemaVindexCols int - -func (r *replaceAlterVschemaVindexCols) replace(newNode, container SQLNode) { - container.(*AlterVschema).VindexCols[int(*r)] = newNode.(ColIdent) -} - -func (r *replaceAlterVschemaVindexCols) inc() { - *r++ -} - -func replaceAlterVschemaVindexSpec(newNode, parent SQLNode) { - parent.(*AlterVschema).VindexSpec = newNode.(*VindexSpec) -} - -func replaceAndExprLeft(newNode, parent SQLNode) { - parent.(*AndExpr).Left = newNode.(Expr) -} - -func replaceAndExprRight(newNode, parent SQLNode) { - parent.(*AndExpr).Right = newNode.(Expr) -} - -func replaceAutoIncSpecColumn(newNode, parent SQLNode) { - parent.(*AutoIncSpec).Column = newNode.(ColIdent) -} - -func replaceAutoIncSpecSequence(newNode, parent SQLNode) { - parent.(*AutoIncSpec).Sequence = newNode.(TableName) -} - -func replaceBinaryExprLeft(newNode, parent SQLNode) { - parent.(*BinaryExpr).Left = newNode.(Expr) -} - -func replaceBinaryExprRight(newNode, parent SQLNode) { - parent.(*BinaryExpr).Right = newNode.(Expr) -} - -func replaceCallProcName(newNode, parent SQLNode) { - parent.(*CallProc).Name = newNode.(TableName) -} - -func replaceCallProcParams(newNode, parent SQLNode) { - parent.(*CallProc).Params = newNode.(Exprs) -} - -func replaceCaseExprElse(newNode, parent SQLNode) { - parent.(*CaseExpr).Else = newNode.(Expr) -} - -func replaceCaseExprExpr(newNode, parent SQLNode) { - parent.(*CaseExpr).Expr = newNode.(Expr) -} - -type replaceCaseExprWhens int - -func (r *replaceCaseExprWhens) replace(newNode, container SQLNode) { - container.(*CaseExpr).Whens[int(*r)] = newNode.(*When) -} - -func (r *replaceCaseExprWhens) inc() { - *r++ -} - -func replaceChangeColumnAfter(newNode, parent SQLNode) { - parent.(*ChangeColumn).After = newNode.(*ColName) -} - -func replaceChangeColumnFirst(newNode, parent SQLNode) { - parent.(*ChangeColumn).First = newNode.(*ColName) -} - -func replaceChangeColumnNewColDefinition(newNode, parent SQLNode) { - parent.(*ChangeColumn).NewColDefinition = newNode.(*ColumnDefinition) -} - -func replaceChangeColumnOldColumn(newNode, parent SQLNode) { - parent.(*ChangeColumn).OldColumn = newNode.(*ColName) -} - -func replaceCheckConstraintDefinitionExpr(newNode, parent SQLNode) { - parent.(*CheckConstraintDefinition).Expr = newNode.(Expr) -} - -func replaceColNameName(newNode, parent SQLNode) { - parent.(*ColName).Name = newNode.(ColIdent) -} - -func replaceColNameQualifier(newNode, parent SQLNode) { - parent.(*ColName).Qualifier = newNode.(TableName) -} - -func replaceCollateExprExpr(newNode, parent SQLNode) { - parent.(*CollateExpr).Expr = newNode.(Expr) -} - -func replaceColumnDefinitionName(newNode, parent SQLNode) { - parent.(*ColumnDefinition).Name = newNode.(ColIdent) -} - -func replaceColumnTypeLength(newNode, parent SQLNode) { - parent.(*ColumnType).Length = newNode.(*Literal) -} - -func replaceColumnTypeScale(newNode, parent SQLNode) { - parent.(*ColumnType).Scale = newNode.(*Literal) -} - -type replaceColumnsItems int - -func (r *replaceColumnsItems) replace(newNode, container SQLNode) { - container.(Columns)[int(*r)] = newNode.(ColIdent) -} - -func (r *replaceColumnsItems) inc() { - *r++ -} - -func replaceComparisonExprEscape(newNode, parent SQLNode) { - parent.(*ComparisonExpr).Escape = newNode.(Expr) -} - -func replaceComparisonExprLeft(newNode, parent SQLNode) { - parent.(*ComparisonExpr).Left = newNode.(Expr) -} - -func replaceComparisonExprRight(newNode, parent SQLNode) { - parent.(*ComparisonExpr).Right = newNode.(Expr) -} - -func replaceConstraintDefinitionDetails(newNode, parent SQLNode) { - parent.(*ConstraintDefinition).Details = newNode.(ConstraintInfo) -} - -func replaceConvertExprExpr(newNode, parent SQLNode) { - parent.(*ConvertExpr).Expr = newNode.(Expr) -} - -func replaceConvertExprType(newNode, parent SQLNode) { - parent.(*ConvertExpr).Type = newNode.(*ConvertType) -} - -func replaceConvertTypeLength(newNode, parent SQLNode) { - parent.(*ConvertType).Length = newNode.(*Literal) -} - -func replaceConvertTypeScale(newNode, parent SQLNode) { - parent.(*ConvertType).Scale = newNode.(*Literal) -} - -func replaceConvertUsingExprExpr(newNode, parent SQLNode) { - parent.(*ConvertUsingExpr).Expr = newNode.(Expr) -} - -func replaceCreateTableOptLike(newNode, parent SQLNode) { - parent.(*CreateTable).OptLike = newNode.(*OptLike) -} - -func replaceCreateTableTable(newNode, parent SQLNode) { - parent.(*CreateTable).Table = newNode.(TableName) -} - -func replaceCreateTableTableSpec(newNode, parent SQLNode) { - parent.(*CreateTable).TableSpec = newNode.(*TableSpec) -} - -func replaceCreateViewColumns(newNode, parent SQLNode) { - parent.(*CreateView).Columns = newNode.(Columns) -} - -func replaceCreateViewSelect(newNode, parent SQLNode) { - parent.(*CreateView).Select = newNode.(SelectStatement) -} - -func replaceCreateViewViewName(newNode, parent SQLNode) { - parent.(*CreateView).ViewName = newNode.(TableName) -} - -func replaceCurTimeFuncExprFsp(newNode, parent SQLNode) { - parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr) -} - -func replaceCurTimeFuncExprName(newNode, parent SQLNode) { - parent.(*CurTimeFuncExpr).Name = newNode.(ColIdent) -} - -func replaceDeleteComments(newNode, parent SQLNode) { - parent.(*Delete).Comments = newNode.(Comments) -} - -func replaceDeleteLimit(newNode, parent SQLNode) { - parent.(*Delete).Limit = newNode.(*Limit) -} - -func replaceDeleteOrderBy(newNode, parent SQLNode) { - parent.(*Delete).OrderBy = newNode.(OrderBy) -} - -func replaceDeletePartitions(newNode, parent SQLNode) { - parent.(*Delete).Partitions = newNode.(Partitions) -} - -func replaceDeleteTableExprs(newNode, parent SQLNode) { - parent.(*Delete).TableExprs = newNode.(TableExprs) -} - -func replaceDeleteTargets(newNode, parent SQLNode) { - parent.(*Delete).Targets = newNode.(TableNames) -} - -func replaceDeleteWhere(newNode, parent SQLNode) { - parent.(*Delete).Where = newNode.(*Where) -} - -func replaceDerivedTableSelect(newNode, parent SQLNode) { - parent.(*DerivedTable).Select = newNode.(SelectStatement) -} - -func replaceDropColumnName(newNode, parent SQLNode) { - parent.(*DropColumn).Name = newNode.(*ColName) -} - -func replaceDropTableFromTables(newNode, parent SQLNode) { - parent.(*DropTable).FromTables = newNode.(TableNames) -} - -func replaceDropViewFromTables(newNode, parent SQLNode) { - parent.(*DropView).FromTables = newNode.(TableNames) -} - -func replaceExistsExprSubquery(newNode, parent SQLNode) { - parent.(*ExistsExpr).Subquery = newNode.(*Subquery) -} - -func replaceExplainStmtStatement(newNode, parent SQLNode) { - parent.(*ExplainStmt).Statement = newNode.(Statement) -} - -func replaceExplainTabTable(newNode, parent SQLNode) { - parent.(*ExplainTab).Table = newNode.(TableName) -} - -type replaceExprsItems int - -func (r *replaceExprsItems) replace(newNode, container SQLNode) { - container.(Exprs)[int(*r)] = newNode.(Expr) -} - -func (r *replaceExprsItems) inc() { - *r++ -} - -func replaceFlushTableNames(newNode, parent SQLNode) { - parent.(*Flush).TableNames = newNode.(TableNames) -} - -func replaceForeignKeyDefinitionOnDelete(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).OnDelete = newNode.(ReferenceAction) -} - -func replaceForeignKeyDefinitionOnUpdate(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).OnUpdate = newNode.(ReferenceAction) -} - -func replaceForeignKeyDefinitionReferencedColumns(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).ReferencedColumns = newNode.(Columns) -} - -func replaceForeignKeyDefinitionReferencedTable(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).ReferencedTable = newNode.(TableName) -} - -func replaceForeignKeyDefinitionSource(newNode, parent SQLNode) { - parent.(*ForeignKeyDefinition).Source = newNode.(Columns) -} - -func replaceFuncExprExprs(newNode, parent SQLNode) { - parent.(*FuncExpr).Exprs = newNode.(SelectExprs) -} - -func replaceFuncExprName(newNode, parent SQLNode) { - parent.(*FuncExpr).Name = newNode.(ColIdent) -} - -func replaceFuncExprQualifier(newNode, parent SQLNode) { - parent.(*FuncExpr).Qualifier = newNode.(TableIdent) -} - -type replaceGroupByItems int - -func (r *replaceGroupByItems) replace(newNode, container SQLNode) { - container.(GroupBy)[int(*r)] = newNode.(Expr) -} - -func (r *replaceGroupByItems) inc() { - *r++ -} - -func replaceGroupConcatExprExprs(newNode, parent SQLNode) { - parent.(*GroupConcatExpr).Exprs = newNode.(SelectExprs) -} - -func replaceGroupConcatExprLimit(newNode, parent SQLNode) { - parent.(*GroupConcatExpr).Limit = newNode.(*Limit) -} - -func replaceGroupConcatExprOrderBy(newNode, parent SQLNode) { - parent.(*GroupConcatExpr).OrderBy = newNode.(OrderBy) -} - -func replaceIndexDefinitionInfo(newNode, parent SQLNode) { - parent.(*IndexDefinition).Info = newNode.(*IndexInfo) -} - -type replaceIndexHintsIndexes int - -func (r *replaceIndexHintsIndexes) replace(newNode, container SQLNode) { - container.(*IndexHints).Indexes[int(*r)] = newNode.(ColIdent) -} - -func (r *replaceIndexHintsIndexes) inc() { - *r++ -} - -func replaceIndexInfoConstraintName(newNode, parent SQLNode) { - parent.(*IndexInfo).ConstraintName = newNode.(ColIdent) -} - -func replaceIndexInfoName(newNode, parent SQLNode) { - parent.(*IndexInfo).Name = newNode.(ColIdent) -} - -func replaceInsertColumns(newNode, parent SQLNode) { - parent.(*Insert).Columns = newNode.(Columns) -} - -func replaceInsertComments(newNode, parent SQLNode) { - parent.(*Insert).Comments = newNode.(Comments) -} - -func replaceInsertOnDup(newNode, parent SQLNode) { - parent.(*Insert).OnDup = newNode.(OnDup) -} - -func replaceInsertPartitions(newNode, parent SQLNode) { - parent.(*Insert).Partitions = newNode.(Partitions) -} - -func replaceInsertRows(newNode, parent SQLNode) { - parent.(*Insert).Rows = newNode.(InsertRows) -} - -func replaceInsertTable(newNode, parent SQLNode) { - parent.(*Insert).Table = newNode.(TableName) -} - -func replaceIntervalExprExpr(newNode, parent SQLNode) { - parent.(*IntervalExpr).Expr = newNode.(Expr) -} - -func replaceIsExprExpr(newNode, parent SQLNode) { - parent.(*IsExpr).Expr = newNode.(Expr) -} - -func replaceJoinConditionOn(newNode, parent SQLNode) { - tmp := parent.(JoinCondition) - tmp.On = newNode.(Expr) -} - -func replaceJoinConditionUsing(newNode, parent SQLNode) { - tmp := parent.(JoinCondition) - tmp.Using = newNode.(Columns) -} - -func replaceJoinTableExprCondition(newNode, parent SQLNode) { - parent.(*JoinTableExpr).Condition = newNode.(JoinCondition) -} - -func replaceJoinTableExprLeftExpr(newNode, parent SQLNode) { - parent.(*JoinTableExpr).LeftExpr = newNode.(TableExpr) -} - -func replaceJoinTableExprRightExpr(newNode, parent SQLNode) { - parent.(*JoinTableExpr).RightExpr = newNode.(TableExpr) -} - -func replaceLimitOffset(newNode, parent SQLNode) { - parent.(*Limit).Offset = newNode.(Expr) -} - -func replaceLimitRowcount(newNode, parent SQLNode) { - parent.(*Limit).Rowcount = newNode.(Expr) -} - -func replaceMatchExprColumns(newNode, parent SQLNode) { - parent.(*MatchExpr).Columns = newNode.(SelectExprs) -} - -func replaceMatchExprExpr(newNode, parent SQLNode) { - parent.(*MatchExpr).Expr = newNode.(Expr) -} - -func replaceModifyColumnAfter(newNode, parent SQLNode) { - parent.(*ModifyColumn).After = newNode.(*ColName) -} - -func replaceModifyColumnFirst(newNode, parent SQLNode) { - parent.(*ModifyColumn).First = newNode.(*ColName) -} - -func replaceModifyColumnNewColDefinition(newNode, parent SQLNode) { - parent.(*ModifyColumn).NewColDefinition = newNode.(*ColumnDefinition) -} - -func replaceNextvalExpr(newNode, parent SQLNode) { - tmp := parent.(Nextval) - tmp.Expr = newNode.(Expr) -} - -func replaceNotExprExpr(newNode, parent SQLNode) { - parent.(*NotExpr).Expr = newNode.(Expr) -} - -type replaceOnDupItems int - -func (r *replaceOnDupItems) replace(newNode, container SQLNode) { - container.(OnDup)[int(*r)] = newNode.(*UpdateExpr) -} - -func (r *replaceOnDupItems) inc() { - *r++ -} - -func replaceOptLikeLikeTable(newNode, parent SQLNode) { - parent.(*OptLike).LikeTable = newNode.(TableName) -} - -func replaceOrExprLeft(newNode, parent SQLNode) { - parent.(*OrExpr).Left = newNode.(Expr) -} - -func replaceOrExprRight(newNode, parent SQLNode) { - parent.(*OrExpr).Right = newNode.(Expr) -} - -func replaceOrderExpr(newNode, parent SQLNode) { - parent.(*Order).Expr = newNode.(Expr) -} - -type replaceOrderByItems int - -func (r *replaceOrderByItems) replace(newNode, container SQLNode) { - container.(OrderBy)[int(*r)] = newNode.(*Order) -} - -func (r *replaceOrderByItems) inc() { - *r++ -} - -func replaceOrderByOptionCols(newNode, parent SQLNode) { - parent.(*OrderByOption).Cols = newNode.(Columns) -} - -func replaceParenSelectSelect(newNode, parent SQLNode) { - parent.(*ParenSelect).Select = newNode.(SelectStatement) -} - -func replaceParenTableExprExprs(newNode, parent SQLNode) { - parent.(*ParenTableExpr).Exprs = newNode.(TableExprs) -} - -func replacePartitionDefinitionLimit(newNode, parent SQLNode) { - parent.(*PartitionDefinition).Limit = newNode.(Expr) -} - -func replacePartitionDefinitionName(newNode, parent SQLNode) { - parent.(*PartitionDefinition).Name = newNode.(ColIdent) -} - -type replacePartitionSpecDefinitions int - -func (r *replacePartitionSpecDefinitions) replace(newNode, container SQLNode) { - container.(*PartitionSpec).Definitions[int(*r)] = newNode.(*PartitionDefinition) -} - -func (r *replacePartitionSpecDefinitions) inc() { - *r++ -} - -func replacePartitionSpecNames(newNode, parent SQLNode) { - parent.(*PartitionSpec).Names = newNode.(Partitions) -} - -func replacePartitionSpecNumber(newNode, parent SQLNode) { - parent.(*PartitionSpec).Number = newNode.(*Literal) -} - -func replacePartitionSpecTableName(newNode, parent SQLNode) { - parent.(*PartitionSpec).TableName = newNode.(TableName) -} - -type replacePartitionsItems int - -func (r *replacePartitionsItems) replace(newNode, container SQLNode) { - container.(Partitions)[int(*r)] = newNode.(ColIdent) -} - -func (r *replacePartitionsItems) inc() { - *r++ -} - -func replaceRangeCondFrom(newNode, parent SQLNode) { - parent.(*RangeCond).From = newNode.(Expr) -} - -func replaceRangeCondLeft(newNode, parent SQLNode) { - parent.(*RangeCond).Left = newNode.(Expr) -} - -func replaceRangeCondTo(newNode, parent SQLNode) { - parent.(*RangeCond).To = newNode.(Expr) -} - -func replaceReleaseName(newNode, parent SQLNode) { - parent.(*Release).Name = newNode.(ColIdent) -} - -func replaceRenameTableNameTable(newNode, parent SQLNode) { - parent.(*RenameTableName).Table = newNode.(TableName) -} - -func replaceSRollbackName(newNode, parent SQLNode) { - parent.(*SRollback).Name = newNode.(ColIdent) -} - -func replaceSavepointName(newNode, parent SQLNode) { - parent.(*Savepoint).Name = newNode.(ColIdent) -} - -func replaceSelectComments(newNode, parent SQLNode) { - parent.(*Select).Comments = newNode.(Comments) -} - -func replaceSelectFrom(newNode, parent SQLNode) { - parent.(*Select).From = newNode.(TableExprs) -} - -func replaceSelectGroupBy(newNode, parent SQLNode) { - parent.(*Select).GroupBy = newNode.(GroupBy) -} - -func replaceSelectHaving(newNode, parent SQLNode) { - parent.(*Select).Having = newNode.(*Where) -} - -func replaceSelectInto(newNode, parent SQLNode) { - parent.(*Select).Into = newNode.(*SelectInto) -} - -func replaceSelectLimit(newNode, parent SQLNode) { - parent.(*Select).Limit = newNode.(*Limit) -} - -func replaceSelectOrderBy(newNode, parent SQLNode) { - parent.(*Select).OrderBy = newNode.(OrderBy) -} - -func replaceSelectSelectExprs(newNode, parent SQLNode) { - parent.(*Select).SelectExprs = newNode.(SelectExprs) -} - -func replaceSelectWhere(newNode, parent SQLNode) { - parent.(*Select).Where = newNode.(*Where) -} - -type replaceSelectExprsItems int - -func (r *replaceSelectExprsItems) replace(newNode, container SQLNode) { - container.(SelectExprs)[int(*r)] = newNode.(SelectExpr) -} - -func (r *replaceSelectExprsItems) inc() { - *r++ -} - -func replaceSetComments(newNode, parent SQLNode) { - parent.(*Set).Comments = newNode.(Comments) -} - -func replaceSetExprs(newNode, parent SQLNode) { - parent.(*Set).Exprs = newNode.(SetExprs) -} - -func replaceSetExprExpr(newNode, parent SQLNode) { - parent.(*SetExpr).Expr = newNode.(Expr) -} - -func replaceSetExprName(newNode, parent SQLNode) { - parent.(*SetExpr).Name = newNode.(ColIdent) -} - -type replaceSetExprsItems int - -func (r *replaceSetExprsItems) replace(newNode, container SQLNode) { - container.(SetExprs)[int(*r)] = newNode.(*SetExpr) -} - -func (r *replaceSetExprsItems) inc() { - *r++ -} - -type replaceSetTransactionCharacteristics int - -func (r *replaceSetTransactionCharacteristics) replace(newNode, container SQLNode) { - container.(*SetTransaction).Characteristics[int(*r)] = newNode.(Characteristic) -} - -func (r *replaceSetTransactionCharacteristics) inc() { - *r++ -} - -func replaceSetTransactionComments(newNode, parent SQLNode) { - parent.(*SetTransaction).Comments = newNode.(Comments) -} - -func replaceShowInternal(newNode, parent SQLNode) { - parent.(*Show).Internal = newNode.(ShowInternal) -} - -func replaceShowBasicFilter(newNode, parent SQLNode) { - parent.(*ShowBasic).Filter = newNode.(*ShowFilter) -} - -func replaceShowBasicTbl(newNode, parent SQLNode) { - parent.(*ShowBasic).Tbl = newNode.(TableName) -} - -func replaceShowCreateOp(newNode, parent SQLNode) { - parent.(*ShowCreate).Op = newNode.(TableName) -} - -func replaceShowFilterFilter(newNode, parent SQLNode) { - parent.(*ShowFilter).Filter = newNode.(Expr) -} - -func replaceShowLegacyOnTable(newNode, parent SQLNode) { - parent.(*ShowLegacy).OnTable = newNode.(TableName) -} - -func replaceShowLegacyShowCollationFilterOpt(newNode, parent SQLNode) { - parent.(*ShowLegacy).ShowCollationFilterOpt = newNode.(Expr) -} - -func replaceShowLegacyTable(newNode, parent SQLNode) { - parent.(*ShowLegacy).Table = newNode.(TableName) -} - -func replaceStarExprTableName(newNode, parent SQLNode) { - parent.(*StarExpr).TableName = newNode.(TableName) -} - -func replaceStreamComments(newNode, parent SQLNode) { - parent.(*Stream).Comments = newNode.(Comments) -} - -func replaceStreamSelectExpr(newNode, parent SQLNode) { - parent.(*Stream).SelectExpr = newNode.(SelectExpr) -} - -func replaceStreamTable(newNode, parent SQLNode) { - parent.(*Stream).Table = newNode.(TableName) -} - -func replaceSubquerySelect(newNode, parent SQLNode) { - parent.(*Subquery).Select = newNode.(SelectStatement) -} - -func replaceSubstrExprFrom(newNode, parent SQLNode) { - parent.(*SubstrExpr).From = newNode.(Expr) -} - -func replaceSubstrExprName(newNode, parent SQLNode) { - parent.(*SubstrExpr).Name = newNode.(*ColName) -} - -func replaceSubstrExprStrVal(newNode, parent SQLNode) { - parent.(*SubstrExpr).StrVal = newNode.(*Literal) -} - -func replaceSubstrExprTo(newNode, parent SQLNode) { - parent.(*SubstrExpr).To = newNode.(Expr) -} - -type replaceTableExprsItems int - -func (r *replaceTableExprsItems) replace(newNode, container SQLNode) { - container.(TableExprs)[int(*r)] = newNode.(TableExpr) -} - -func (r *replaceTableExprsItems) inc() { - *r++ -} - -func replaceTableNameName(newNode, parent SQLNode) { - tmp := parent.(TableName) - tmp.Name = newNode.(TableIdent) -} - -func replaceTableNameQualifier(newNode, parent SQLNode) { - tmp := parent.(TableName) - tmp.Qualifier = newNode.(TableIdent) -} - -type replaceTableNamesItems int - -func (r *replaceTableNamesItems) replace(newNode, container SQLNode) { - container.(TableNames)[int(*r)] = newNode.(TableName) -} - -func (r *replaceTableNamesItems) inc() { - *r++ -} - -type replaceTableSpecColumns int - -func (r *replaceTableSpecColumns) replace(newNode, container SQLNode) { - container.(*TableSpec).Columns[int(*r)] = newNode.(*ColumnDefinition) -} - -func (r *replaceTableSpecColumns) inc() { - *r++ -} - -type replaceTableSpecConstraints int - -func (r *replaceTableSpecConstraints) replace(newNode, container SQLNode) { - container.(*TableSpec).Constraints[int(*r)] = newNode.(*ConstraintDefinition) -} - -func (r *replaceTableSpecConstraints) inc() { - *r++ -} - -type replaceTableSpecIndexes int - -func (r *replaceTableSpecIndexes) replace(newNode, container SQLNode) { - container.(*TableSpec).Indexes[int(*r)] = newNode.(*IndexDefinition) -} - -func (r *replaceTableSpecIndexes) inc() { - *r++ -} - -func replaceTableSpecOptions(newNode, parent SQLNode) { - parent.(*TableSpec).Options = newNode.(TableOptions) -} - -func replaceTimestampFuncExprExpr1(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) -} - -func replaceTimestampFuncExprExpr2(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) -} - -func replaceTruncateTableTable(newNode, parent SQLNode) { - parent.(*TruncateTable).Table = newNode.(TableName) -} - -func replaceUnaryExprExpr(newNode, parent SQLNode) { - parent.(*UnaryExpr).Expr = newNode.(Expr) -} - -func replaceUnionFirstStatement(newNode, parent SQLNode) { - parent.(*Union).FirstStatement = newNode.(SelectStatement) -} - -func replaceUnionLimit(newNode, parent SQLNode) { - parent.(*Union).Limit = newNode.(*Limit) -} - -func replaceUnionOrderBy(newNode, parent SQLNode) { - parent.(*Union).OrderBy = newNode.(OrderBy) -} - -type replaceUnionUnionSelects int - -func (r *replaceUnionUnionSelects) replace(newNode, container SQLNode) { - container.(*Union).UnionSelects[int(*r)] = newNode.(*UnionSelect) -} - -func (r *replaceUnionUnionSelects) inc() { - *r++ -} - -func replaceUnionSelectStatement(newNode, parent SQLNode) { - parent.(*UnionSelect).Statement = newNode.(SelectStatement) -} - -func replaceUpdateComments(newNode, parent SQLNode) { - parent.(*Update).Comments = newNode.(Comments) -} - -func replaceUpdateExprs(newNode, parent SQLNode) { - parent.(*Update).Exprs = newNode.(UpdateExprs) -} - -func replaceUpdateLimit(newNode, parent SQLNode) { - parent.(*Update).Limit = newNode.(*Limit) -} - -func replaceUpdateOrderBy(newNode, parent SQLNode) { - parent.(*Update).OrderBy = newNode.(OrderBy) -} - -func replaceUpdateTableExprs(newNode, parent SQLNode) { - parent.(*Update).TableExprs = newNode.(TableExprs) -} - -func replaceUpdateWhere(newNode, parent SQLNode) { - parent.(*Update).Where = newNode.(*Where) -} - -func replaceUpdateExprExpr(newNode, parent SQLNode) { - parent.(*UpdateExpr).Expr = newNode.(Expr) -} - -func replaceUpdateExprName(newNode, parent SQLNode) { - parent.(*UpdateExpr).Name = newNode.(*ColName) -} - -type replaceUpdateExprsItems int - -func (r *replaceUpdateExprsItems) replace(newNode, container SQLNode) { - container.(UpdateExprs)[int(*r)] = newNode.(*UpdateExpr) -} - -func (r *replaceUpdateExprsItems) inc() { - *r++ -} - -func replaceUseDBName(newNode, parent SQLNode) { - parent.(*Use).DBName = newNode.(TableIdent) -} +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -func replaceVStreamComments(newNode, parent SQLNode) { - parent.(*VStream).Comments = newNode.(Comments) -} - -func replaceVStreamLimit(newNode, parent SQLNode) { - parent.(*VStream).Limit = newNode.(*Limit) -} - -func replaceVStreamSelectExpr(newNode, parent SQLNode) { - parent.(*VStream).SelectExpr = newNode.(SelectExpr) -} - -func replaceVStreamTable(newNode, parent SQLNode) { - parent.(*VStream).Table = newNode.(TableName) -} - -func replaceVStreamWhere(newNode, parent SQLNode) { - parent.(*VStream).Where = newNode.(*Where) -} - -type replaceValTupleItems int - -func (r *replaceValTupleItems) replace(newNode, container SQLNode) { - container.(ValTuple)[int(*r)] = newNode.(Expr) -} - -func (r *replaceValTupleItems) inc() { - *r++ -} - -type replaceValuesItems int - -func (r *replaceValuesItems) replace(newNode, container SQLNode) { - container.(Values)[int(*r)] = newNode.(ValTuple) -} - -func (r *replaceValuesItems) inc() { - *r++ -} - -func replaceValuesFuncExprName(newNode, parent SQLNode) { - parent.(*ValuesFuncExpr).Name = newNode.(*ColName) -} - -func replaceVindexParamKey(newNode, parent SQLNode) { - tmp := parent.(VindexParam) - tmp.Key = newNode.(ColIdent) -} - -func replaceVindexSpecName(newNode, parent SQLNode) { - parent.(*VindexSpec).Name = newNode.(ColIdent) -} - -type replaceVindexSpecParams int - -func (r *replaceVindexSpecParams) replace(newNode, container SQLNode) { - container.(*VindexSpec).Params[int(*r)] = newNode.(VindexParam) -} - -func (r *replaceVindexSpecParams) inc() { - *r++ -} - -func replaceVindexSpecType(newNode, parent SQLNode) { - parent.(*VindexSpec).Type = newNode.(ColIdent) -} + http://www.apache.org/licenses/LICENSE-2.0 -func replaceWhenCond(newNode, parent SQLNode) { - parent.(*When).Cond = newNode.(Expr) -} - -func replaceWhenVal(newNode, parent SQLNode) { - parent.(*When).Val = newNode.(Expr) -} - -func replaceWhereExpr(newNode, parent SQLNode) { - parent.(*Where).Expr = newNode.(Expr) -} - -func replaceXorExprLeft(newNode, parent SQLNode) { - parent.(*XorExpr).Left = newNode.(Expr) -} +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by ASTHelperGen. DO NOT EDIT. -func replaceXorExprRight(newNode, parent SQLNode) { - parent.(*XorExpr).Right = newNode.(Expr) -} +package sqlparser -// apply is where the visiting happens. Here is where we keep the big switch-case that will be used -// to do the actual visiting of SQLNodes func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { if node == nil || isNilValue(node) { return } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead saved := a.cursor a.cursor.replacer = replacer a.cursor.node = node a.cursor.parent = parent - if a.pre != nil && !a.pre(&a.cursor) { a.cursor = saved return } - - // walk children - // (the order of the cases is alphabetical) switch n := node.(type) { - case nil: - case AccessMode: - case *AddColumns: - a.apply(node, n.After, replaceAddColumnsAfter) - replacerColumns := replaceAddColumnsColumns(0) - replacerColumnsB := &replacerColumns - for _, item := range n.Columns { - a.apply(node, item, replacerColumnsB.replace) - replacerColumnsB.inc() + for x, el := range n.Columns { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*AddColumns).Columns[idx] = newNode.(*ColumnDefinition) + } + }(x)) } - a.apply(node, n.First, replaceAddColumnsFirst) - + a.apply(node, n.First, func(newNode, parent SQLNode) { + parent.(*AddColumns).First = newNode.(*ColName) + }) + a.apply(node, n.After, func(newNode, parent SQLNode) { + parent.(*AddColumns).After = newNode.(*ColName) + }) case *AddConstraintDefinition: - a.apply(node, n.ConstraintDefinition, replaceAddConstraintDefinitionConstraintDefinition) - + a.apply(node, n.ConstraintDefinition, func(newNode, parent SQLNode) { + parent.(*AddConstraintDefinition).ConstraintDefinition = newNode.(*ConstraintDefinition) + }) case *AddIndexDefinition: - a.apply(node, n.IndexDefinition, replaceAddIndexDefinitionIndexDefinition) - - case AlgorithmValue: - + a.apply(node, n.IndexDefinition, func(newNode, parent SQLNode) { + parent.(*AddIndexDefinition).IndexDefinition = newNode.(*IndexDefinition) + }) case *AliasedExpr: - a.apply(node, n.As, replaceAliasedExprAs) - a.apply(node, n.Expr, replaceAliasedExprExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*AliasedExpr).Expr = newNode.(Expr) + }) + a.apply(node, n.As, func(newNode, parent SQLNode) { + parent.(*AliasedExpr).As = newNode.(ColIdent) + }) case *AliasedTableExpr: - a.apply(node, n.As, replaceAliasedTableExprAs) - a.apply(node, n.Expr, replaceAliasedTableExprExpr) - a.apply(node, n.Hints, replaceAliasedTableExprHints) - a.apply(node, n.Partitions, replaceAliasedTableExprPartitions) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Expr = newNode.(SimpleTableExpr) + }) + a.apply(node, n.Partitions, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Partitions = newNode.(Partitions) + }) + a.apply(node, n.As, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).As = newNode.(TableIdent) + }) + a.apply(node, n.Hints, func(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Hints = newNode.(*IndexHints) + }) case *AlterCharset: - case *AlterColumn: - a.apply(node, n.Column, replaceAlterColumnColumn) - a.apply(node, n.DefaultVal, replaceAlterColumnDefaultVal) - + a.apply(node, n.Column, func(newNode, parent SQLNode) { + parent.(*AlterColumn).Column = newNode.(*ColName) + }) + a.apply(node, n.DefaultVal, func(newNode, parent SQLNode) { + parent.(*AlterColumn).DefaultVal = newNode.(Expr) + }) case *AlterDatabase: - case *AlterTable: - replacerAlterOptions := replaceAlterTableAlterOptions(0) - replacerAlterOptionsB := &replacerAlterOptions - for _, item := range n.AlterOptions { - a.apply(node, item, replacerAlterOptionsB.replace) - replacerAlterOptionsB.inc() + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*AlterTable).Table = newNode.(TableName) + }) + for x, el := range n.AlterOptions { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*AlterTable).AlterOptions[idx] = newNode.(AlterOption) + } + }(x)) } - a.apply(node, n.PartitionSpec, replaceAlterTablePartitionSpec) - a.apply(node, n.Table, replaceAlterTableTable) - + a.apply(node, n.PartitionSpec, func(newNode, parent SQLNode) { + parent.(*AlterTable).PartitionSpec = newNode.(*PartitionSpec) + }) case *AlterView: - a.apply(node, n.Columns, replaceAlterViewColumns) - a.apply(node, n.Select, replaceAlterViewSelect) - a.apply(node, n.ViewName, replaceAlterViewViewName) - + a.apply(node, n.ViewName, func(newNode, parent SQLNode) { + parent.(*AlterView).ViewName = newNode.(TableName) + }) + a.apply(node, n.Columns, func(newNode, parent SQLNode) { + parent.(*AlterView).Columns = newNode.(Columns) + }) + a.apply(node, n.Select, func(newNode, parent SQLNode) { + parent.(*AlterView).Select = newNode.(SelectStatement) + }) case *AlterVschema: - a.apply(node, n.AutoIncSpec, replaceAlterVschemaAutoIncSpec) - a.apply(node, n.Table, replaceAlterVschemaTable) - replacerVindexCols := replaceAlterVschemaVindexCols(0) - replacerVindexColsB := &replacerVindexCols - for _, item := range n.VindexCols { - a.apply(node, item, replacerVindexColsB.replace) - replacerVindexColsB.inc() + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*AlterVschema).Table = newNode.(TableName) + }) + a.apply(node, n.VindexSpec, func(newNode, parent SQLNode) { + parent.(*AlterVschema).VindexSpec = newNode.(*VindexSpec) + }) + for x, el := range n.VindexCols { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*AlterVschema).VindexCols[idx] = newNode.(ColIdent) + } + }(x)) } - a.apply(node, n.VindexSpec, replaceAlterVschemaVindexSpec) - + a.apply(node, n.AutoIncSpec, func(newNode, parent SQLNode) { + parent.(*AlterVschema).AutoIncSpec = newNode.(*AutoIncSpec) + }) case *AndExpr: - a.apply(node, n.Left, replaceAndExprLeft) - a.apply(node, n.Right, replaceAndExprRight) - + a.apply(node, n.Left, func(newNode, parent SQLNode) { + parent.(*AndExpr).Left = newNode.(Expr) + }) + a.apply(node, n.Right, func(newNode, parent SQLNode) { + parent.(*AndExpr).Right = newNode.(Expr) + }) case Argument: - case *AutoIncSpec: - a.apply(node, n.Column, replaceAutoIncSpecColumn) - a.apply(node, n.Sequence, replaceAutoIncSpecSequence) - + a.apply(node, n.Column, func(newNode, parent SQLNode) { + parent.(*AutoIncSpec).Column = newNode.(ColIdent) + }) + a.apply(node, n.Sequence, func(newNode, parent SQLNode) { + parent.(*AutoIncSpec).Sequence = newNode.(TableName) + }) case *Begin: - case *BinaryExpr: - a.apply(node, n.Left, replaceBinaryExprLeft) - a.apply(node, n.Right, replaceBinaryExprRight) - - case BoolVal: - + a.apply(node, n.Left, func(newNode, parent SQLNode) { + parent.(*BinaryExpr).Left = newNode.(Expr) + }) + a.apply(node, n.Right, func(newNode, parent SQLNode) { + parent.(*BinaryExpr).Right = newNode.(Expr) + }) case *CallProc: - a.apply(node, n.Name, replaceCallProcName) - a.apply(node, n.Params, replaceCallProcParams) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*CallProc).Name = newNode.(TableName) + }) + a.apply(node, n.Params, func(newNode, parent SQLNode) { + parent.(*CallProc).Params = newNode.(Exprs) + }) case *CaseExpr: - a.apply(node, n.Else, replaceCaseExprElse) - a.apply(node, n.Expr, replaceCaseExprExpr) - replacerWhens := replaceCaseExprWhens(0) - replacerWhensB := &replacerWhens - for _, item := range n.Whens { - a.apply(node, item, replacerWhensB.replace) - replacerWhensB.inc() + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*CaseExpr).Expr = newNode.(Expr) + }) + for x, el := range n.Whens { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*CaseExpr).Whens[idx] = newNode.(*When) + } + }(x)) } - + a.apply(node, n.Else, func(newNode, parent SQLNode) { + parent.(*CaseExpr).Else = newNode.(Expr) + }) case *ChangeColumn: - a.apply(node, n.After, replaceChangeColumnAfter) - a.apply(node, n.First, replaceChangeColumnFirst) - a.apply(node, n.NewColDefinition, replaceChangeColumnNewColDefinition) - a.apply(node, n.OldColumn, replaceChangeColumnOldColumn) - + a.apply(node, n.OldColumn, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).OldColumn = newNode.(*ColName) + }) + a.apply(node, n.NewColDefinition, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).NewColDefinition = newNode.(*ColumnDefinition) + }) + a.apply(node, n.First, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).First = newNode.(*ColName) + }) + a.apply(node, n.After, func(newNode, parent SQLNode) { + parent.(*ChangeColumn).After = newNode.(*ColName) + }) case *CheckConstraintDefinition: - a.apply(node, n.Expr, replaceCheckConstraintDefinitionExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*CheckConstraintDefinition).Expr = newNode.(Expr) + }) case ColIdent: - case *ColName: - a.apply(node, n.Name, replaceColNameName) - a.apply(node, n.Qualifier, replaceColNameQualifier) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*ColName).Name = newNode.(ColIdent) + }) + a.apply(node, n.Qualifier, func(newNode, parent SQLNode) { + parent.(*ColName).Qualifier = newNode.(TableName) + }) case *CollateExpr: - a.apply(node, n.Expr, replaceCollateExprExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*CollateExpr).Expr = newNode.(Expr) + }) case *ColumnDefinition: - a.apply(node, n.Name, replaceColumnDefinitionName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*ColumnDefinition).Name = newNode.(ColIdent) + }) case *ColumnType: - a.apply(node, n.Length, replaceColumnTypeLength) - a.apply(node, n.Scale, replaceColumnTypeScale) - + a.apply(node, n.Length, func(newNode, parent SQLNode) { + parent.(*ColumnType).Length = newNode.(*Literal) + }) + a.apply(node, n.Scale, func(newNode, parent SQLNode) { + parent.(*ColumnType).Scale = newNode.(*Literal) + }) case Columns: - replacer := replaceColumnsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(Columns)[idx] = newNode.(ColIdent) + } + }(x)) } - case Comments: - case *Commit: - case *ComparisonExpr: - a.apply(node, n.Escape, replaceComparisonExprEscape) - a.apply(node, n.Left, replaceComparisonExprLeft) - a.apply(node, n.Right, replaceComparisonExprRight) - + a.apply(node, n.Left, func(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Left = newNode.(Expr) + }) + a.apply(node, n.Right, func(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Right = newNode.(Expr) + }) + a.apply(node, n.Escape, func(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Escape = newNode.(Expr) + }) case *ConstraintDefinition: - a.apply(node, n.Details, replaceConstraintDefinitionDetails) - + a.apply(node, n.Details, func(newNode, parent SQLNode) { + parent.(*ConstraintDefinition).Details = newNode.(ConstraintInfo) + }) case *ConvertExpr: - a.apply(node, n.Expr, replaceConvertExprExpr) - a.apply(node, n.Type, replaceConvertExprType) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*ConvertExpr).Expr = newNode.(Expr) + }) + a.apply(node, n.Type, func(newNode, parent SQLNode) { + parent.(*ConvertExpr).Type = newNode.(*ConvertType) + }) case *ConvertType: - a.apply(node, n.Length, replaceConvertTypeLength) - a.apply(node, n.Scale, replaceConvertTypeScale) - + a.apply(node, n.Length, func(newNode, parent SQLNode) { + parent.(*ConvertType).Length = newNode.(*Literal) + }) + a.apply(node, n.Scale, func(newNode, parent SQLNode) { + parent.(*ConvertType).Scale = newNode.(*Literal) + }) case *ConvertUsingExpr: - a.apply(node, n.Expr, replaceConvertUsingExprExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*ConvertUsingExpr).Expr = newNode.(Expr) + }) case *CreateDatabase: - case *CreateTable: - a.apply(node, n.OptLike, replaceCreateTableOptLike) - a.apply(node, n.Table, replaceCreateTableTable) - a.apply(node, n.TableSpec, replaceCreateTableTableSpec) - + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*CreateTable).Table = newNode.(TableName) + }) + a.apply(node, n.TableSpec, func(newNode, parent SQLNode) { + parent.(*CreateTable).TableSpec = newNode.(*TableSpec) + }) + a.apply(node, n.OptLike, func(newNode, parent SQLNode) { + parent.(*CreateTable).OptLike = newNode.(*OptLike) + }) case *CreateView: - a.apply(node, n.Columns, replaceCreateViewColumns) - a.apply(node, n.Select, replaceCreateViewSelect) - a.apply(node, n.ViewName, replaceCreateViewViewName) - + a.apply(node, n.ViewName, func(newNode, parent SQLNode) { + parent.(*CreateView).ViewName = newNode.(TableName) + }) + a.apply(node, n.Columns, func(newNode, parent SQLNode) { + parent.(*CreateView).Columns = newNode.(Columns) + }) + a.apply(node, n.Select, func(newNode, parent SQLNode) { + parent.(*CreateView).Select = newNode.(SelectStatement) + }) case *CurTimeFuncExpr: - a.apply(node, n.Fsp, replaceCurTimeFuncExprFsp) - a.apply(node, n.Name, replaceCurTimeFuncExprName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*CurTimeFuncExpr).Name = newNode.(ColIdent) + }) + a.apply(node, n.Fsp, func(newNode, parent SQLNode) { + parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr) + }) case *Default: - case *Delete: - a.apply(node, n.Comments, replaceDeleteComments) - a.apply(node, n.Limit, replaceDeleteLimit) - a.apply(node, n.OrderBy, replaceDeleteOrderBy) - a.apply(node, n.Partitions, replaceDeletePartitions) - a.apply(node, n.TableExprs, replaceDeleteTableExprs) - a.apply(node, n.Targets, replaceDeleteTargets) - a.apply(node, n.Where, replaceDeleteWhere) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*Delete).Comments = newNode.(Comments) + }) + a.apply(node, n.Targets, func(newNode, parent SQLNode) { + parent.(*Delete).Targets = newNode.(TableNames) + }) + a.apply(node, n.TableExprs, func(newNode, parent SQLNode) { + parent.(*Delete).TableExprs = newNode.(TableExprs) + }) + a.apply(node, n.Partitions, func(newNode, parent SQLNode) { + parent.(*Delete).Partitions = newNode.(Partitions) + }) + a.apply(node, n.Where, func(newNode, parent SQLNode) { + parent.(*Delete).Where = newNode.(*Where) + }) + a.apply(node, n.OrderBy, func(newNode, parent SQLNode) { + parent.(*Delete).OrderBy = newNode.(OrderBy) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*Delete).Limit = newNode.(*Limit) + }) case *DerivedTable: - a.apply(node, n.Select, replaceDerivedTableSelect) - + a.apply(node, n.Select, func(newNode, parent SQLNode) { + parent.(*DerivedTable).Select = newNode.(SelectStatement) + }) case *DropColumn: - a.apply(node, n.Name, replaceDropColumnName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*DropColumn).Name = newNode.(*ColName) + }) case *DropDatabase: - case *DropKey: - case *DropTable: - a.apply(node, n.FromTables, replaceDropTableFromTables) - + a.apply(node, n.FromTables, func(newNode, parent SQLNode) { + parent.(*DropTable).FromTables = newNode.(TableNames) + }) case *DropView: - a.apply(node, n.FromTables, replaceDropViewFromTables) - + a.apply(node, n.FromTables, func(newNode, parent SQLNode) { + parent.(*DropView).FromTables = newNode.(TableNames) + }) case *ExistsExpr: - a.apply(node, n.Subquery, replaceExistsExprSubquery) - + a.apply(node, n.Subquery, func(newNode, parent SQLNode) { + parent.(*ExistsExpr).Subquery = newNode.(*Subquery) + }) case *ExplainStmt: - a.apply(node, n.Statement, replaceExplainStmtStatement) - + a.apply(node, n.Statement, func(newNode, parent SQLNode) { + parent.(*ExplainStmt).Statement = newNode.(Statement) + }) case *ExplainTab: - a.apply(node, n.Table, replaceExplainTabTable) - + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*ExplainTab).Table = newNode.(TableName) + }) case Exprs: - replacer := replaceExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(Exprs)[idx] = newNode.(Expr) + } + }(x)) } - case *Flush: - a.apply(node, n.TableNames, replaceFlushTableNames) - + a.apply(node, n.TableNames, func(newNode, parent SQLNode) { + parent.(*Flush).TableNames = newNode.(TableNames) + }) case *Force: - case *ForeignKeyDefinition: - a.apply(node, n.OnDelete, replaceForeignKeyDefinitionOnDelete) - a.apply(node, n.OnUpdate, replaceForeignKeyDefinitionOnUpdate) - a.apply(node, n.ReferencedColumns, replaceForeignKeyDefinitionReferencedColumns) - a.apply(node, n.ReferencedTable, replaceForeignKeyDefinitionReferencedTable) - a.apply(node, n.Source, replaceForeignKeyDefinitionSource) - + a.apply(node, n.Source, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).Source = newNode.(Columns) + }) + a.apply(node, n.ReferencedTable, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).ReferencedTable = newNode.(TableName) + }) + a.apply(node, n.ReferencedColumns, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).ReferencedColumns = newNode.(Columns) + }) + a.apply(node, n.OnDelete, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).OnDelete = newNode.(ReferenceAction) + }) + a.apply(node, n.OnUpdate, func(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).OnUpdate = newNode.(ReferenceAction) + }) case *FuncExpr: - a.apply(node, n.Exprs, replaceFuncExprExprs) - a.apply(node, n.Name, replaceFuncExprName) - a.apply(node, n.Qualifier, replaceFuncExprQualifier) - + a.apply(node, n.Qualifier, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Qualifier = newNode.(TableIdent) + }) + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Name = newNode.(ColIdent) + }) + a.apply(node, n.Exprs, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Exprs = newNode.(SelectExprs) + }) case GroupBy: - replacer := replaceGroupByItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(GroupBy)[idx] = newNode.(Expr) + } + }(x)) } - case *GroupConcatExpr: - a.apply(node, n.Exprs, replaceGroupConcatExprExprs) - a.apply(node, n.Limit, replaceGroupConcatExprLimit) - a.apply(node, n.OrderBy, replaceGroupConcatExprOrderBy) - + a.apply(node, n.Exprs, func(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).Exprs = newNode.(SelectExprs) + }) + a.apply(node, n.OrderBy, func(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).OrderBy = newNode.(OrderBy) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).Limit = newNode.(*Limit) + }) case *IndexDefinition: - a.apply(node, n.Info, replaceIndexDefinitionInfo) - + a.apply(node, n.Info, func(newNode, parent SQLNode) { + parent.(*IndexDefinition).Info = newNode.(*IndexInfo) + }) case *IndexHints: - replacerIndexes := replaceIndexHintsIndexes(0) - replacerIndexesB := &replacerIndexes - for _, item := range n.Indexes { - a.apply(node, item, replacerIndexesB.replace) - replacerIndexesB.inc() + for x, el := range n.Indexes { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*IndexHints).Indexes[idx] = newNode.(ColIdent) + } + }(x)) } - case *IndexInfo: - a.apply(node, n.ConstraintName, replaceIndexInfoConstraintName) - a.apply(node, n.Name, replaceIndexInfoName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*IndexInfo).Name = newNode.(ColIdent) + }) + a.apply(node, n.ConstraintName, func(newNode, parent SQLNode) { + parent.(*IndexInfo).ConstraintName = newNode.(ColIdent) + }) case *Insert: - a.apply(node, n.Columns, replaceInsertColumns) - a.apply(node, n.Comments, replaceInsertComments) - a.apply(node, n.OnDup, replaceInsertOnDup) - a.apply(node, n.Partitions, replaceInsertPartitions) - a.apply(node, n.Rows, replaceInsertRows) - a.apply(node, n.Table, replaceInsertTable) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*Insert).Comments = newNode.(Comments) + }) + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*Insert).Table = newNode.(TableName) + }) + a.apply(node, n.Partitions, func(newNode, parent SQLNode) { + parent.(*Insert).Partitions = newNode.(Partitions) + }) + a.apply(node, n.Columns, func(newNode, parent SQLNode) { + parent.(*Insert).Columns = newNode.(Columns) + }) + a.apply(node, n.Rows, func(newNode, parent SQLNode) { + parent.(*Insert).Rows = newNode.(InsertRows) + }) + a.apply(node, n.OnDup, func(newNode, parent SQLNode) { + parent.(*Insert).OnDup = newNode.(OnDup) + }) case *IntervalExpr: - a.apply(node, n.Expr, replaceIntervalExprExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*IntervalExpr).Expr = newNode.(Expr) + }) case *IsExpr: - a.apply(node, n.Expr, replaceIsExprExpr) - - case IsolationLevel: - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*IsExpr).Expr = newNode.(Expr) + }) case JoinCondition: - a.apply(node, n.On, replaceJoinConditionOn) - a.apply(node, n.Using, replaceJoinConditionUsing) - + a.apply(node, n.On, replacePanic("JoinCondition On")) + a.apply(node, n.Using, replacePanic("JoinCondition Using")) case *JoinTableExpr: - a.apply(node, n.Condition, replaceJoinTableExprCondition) - a.apply(node, n.LeftExpr, replaceJoinTableExprLeftExpr) - a.apply(node, n.RightExpr, replaceJoinTableExprRightExpr) - + a.apply(node, n.LeftExpr, func(newNode, parent SQLNode) { + parent.(*JoinTableExpr).LeftExpr = newNode.(TableExpr) + }) + a.apply(node, n.RightExpr, func(newNode, parent SQLNode) { + parent.(*JoinTableExpr).RightExpr = newNode.(TableExpr) + }) + a.apply(node, n.Condition, func(newNode, parent SQLNode) { + parent.(*JoinTableExpr).Condition = newNode.(JoinCondition) + }) case *KeyState: - case *Limit: - a.apply(node, n.Offset, replaceLimitOffset) - a.apply(node, n.Rowcount, replaceLimitRowcount) - + a.apply(node, n.Offset, func(newNode, parent SQLNode) { + parent.(*Limit).Offset = newNode.(Expr) + }) + a.apply(node, n.Rowcount, func(newNode, parent SQLNode) { + parent.(*Limit).Rowcount = newNode.(Expr) + }) case ListArg: - case *Literal: - case *Load: - case *LockOption: - case *LockTables: - case *MatchExpr: - a.apply(node, n.Columns, replaceMatchExprColumns) - a.apply(node, n.Expr, replaceMatchExprExpr) - + a.apply(node, n.Columns, func(newNode, parent SQLNode) { + parent.(*MatchExpr).Columns = newNode.(SelectExprs) + }) + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*MatchExpr).Expr = newNode.(Expr) + }) case *ModifyColumn: - a.apply(node, n.After, replaceModifyColumnAfter) - a.apply(node, n.First, replaceModifyColumnFirst) - a.apply(node, n.NewColDefinition, replaceModifyColumnNewColDefinition) - - case Nextval: - a.apply(node, n.Expr, replaceNextvalExpr) - + a.apply(node, n.NewColDefinition, func(newNode, parent SQLNode) { + parent.(*ModifyColumn).NewColDefinition = newNode.(*ColumnDefinition) + }) + a.apply(node, n.First, func(newNode, parent SQLNode) { + parent.(*ModifyColumn).First = newNode.(*ColName) + }) + a.apply(node, n.After, func(newNode, parent SQLNode) { + parent.(*ModifyColumn).After = newNode.(*ColName) + }) + case *Nextval: + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*Nextval).Expr = newNode.(Expr) + }) case *NotExpr: - a.apply(node, n.Expr, replaceNotExprExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*NotExpr).Expr = newNode.(Expr) + }) case *NullVal: - case OnDup: - replacer := replaceOnDupItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(OnDup)[idx] = newNode.(*UpdateExpr) + } + }(x)) } - case *OptLike: - a.apply(node, n.LikeTable, replaceOptLikeLikeTable) - + a.apply(node, n.LikeTable, func(newNode, parent SQLNode) { + parent.(*OptLike).LikeTable = newNode.(TableName) + }) case *OrExpr: - a.apply(node, n.Left, replaceOrExprLeft) - a.apply(node, n.Right, replaceOrExprRight) - + a.apply(node, n.Left, func(newNode, parent SQLNode) { + parent.(*OrExpr).Left = newNode.(Expr) + }) + a.apply(node, n.Right, func(newNode, parent SQLNode) { + parent.(*OrExpr).Right = newNode.(Expr) + }) case *Order: - a.apply(node, n.Expr, replaceOrderExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*Order).Expr = newNode.(Expr) + }) case OrderBy: - replacer := replaceOrderByItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(OrderBy)[idx] = newNode.(*Order) + } + }(x)) } - case *OrderByOption: - a.apply(node, n.Cols, replaceOrderByOptionCols) - + a.apply(node, n.Cols, func(newNode, parent SQLNode) { + parent.(*OrderByOption).Cols = newNode.(Columns) + }) case *OtherAdmin: - case *OtherRead: - case *ParenSelect: - a.apply(node, n.Select, replaceParenSelectSelect) - + a.apply(node, n.Select, func(newNode, parent SQLNode) { + parent.(*ParenSelect).Select = newNode.(SelectStatement) + }) case *ParenTableExpr: - a.apply(node, n.Exprs, replaceParenTableExprExprs) - + a.apply(node, n.Exprs, func(newNode, parent SQLNode) { + parent.(*ParenTableExpr).Exprs = newNode.(TableExprs) + }) case *PartitionDefinition: - a.apply(node, n.Limit, replacePartitionDefinitionLimit) - a.apply(node, n.Name, replacePartitionDefinitionName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*PartitionDefinition).Name = newNode.(ColIdent) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*PartitionDefinition).Limit = newNode.(Expr) + }) case *PartitionSpec: - replacerDefinitions := replacePartitionSpecDefinitions(0) - replacerDefinitionsB := &replacerDefinitions - for _, item := range n.Definitions { - a.apply(node, item, replacerDefinitionsB.replace) - replacerDefinitionsB.inc() + a.apply(node, n.Names, func(newNode, parent SQLNode) { + parent.(*PartitionSpec).Names = newNode.(Partitions) + }) + a.apply(node, n.Number, func(newNode, parent SQLNode) { + parent.(*PartitionSpec).Number = newNode.(*Literal) + }) + a.apply(node, n.TableName, func(newNode, parent SQLNode) { + parent.(*PartitionSpec).TableName = newNode.(TableName) + }) + for x, el := range n.Definitions { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*PartitionSpec).Definitions[idx] = newNode.(*PartitionDefinition) + } + }(x)) } - a.apply(node, n.Names, replacePartitionSpecNames) - a.apply(node, n.Number, replacePartitionSpecNumber) - a.apply(node, n.TableName, replacePartitionSpecTableName) - case Partitions: - replacer := replacePartitionsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(Partitions)[idx] = newNode.(ColIdent) + } + }(x)) } - case *RangeCond: - a.apply(node, n.From, replaceRangeCondFrom) - a.apply(node, n.Left, replaceRangeCondLeft) - a.apply(node, n.To, replaceRangeCondTo) - - case ReferenceAction: - + a.apply(node, n.Left, func(newNode, parent SQLNode) { + parent.(*RangeCond).Left = newNode.(Expr) + }) + a.apply(node, n.From, func(newNode, parent SQLNode) { + parent.(*RangeCond).From = newNode.(Expr) + }) + a.apply(node, n.To, func(newNode, parent SQLNode) { + parent.(*RangeCond).To = newNode.(Expr) + }) case *Release: - a.apply(node, n.Name, replaceReleaseName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*Release).Name = newNode.(ColIdent) + }) case *RenameIndex: - case *RenameTable: - case *RenameTableName: - a.apply(node, n.Table, replaceRenameTableNameTable) - + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*RenameTableName).Table = newNode.(TableName) + }) case *Rollback: - case *SRollback: - a.apply(node, n.Name, replaceSRollbackName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*SRollback).Name = newNode.(ColIdent) + }) case *Savepoint: - a.apply(node, n.Name, replaceSavepointName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*Savepoint).Name = newNode.(ColIdent) + }) case *Select: - a.apply(node, n.Comments, replaceSelectComments) - a.apply(node, n.From, replaceSelectFrom) - a.apply(node, n.GroupBy, replaceSelectGroupBy) - a.apply(node, n.Having, replaceSelectHaving) - a.apply(node, n.Into, replaceSelectInto) - a.apply(node, n.Limit, replaceSelectLimit) - a.apply(node, n.OrderBy, replaceSelectOrderBy) - a.apply(node, n.SelectExprs, replaceSelectSelectExprs) - a.apply(node, n.Where, replaceSelectWhere) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*Select).Comments = newNode.(Comments) + }) + a.apply(node, n.SelectExprs, func(newNode, parent SQLNode) { + parent.(*Select).SelectExprs = newNode.(SelectExprs) + }) + a.apply(node, n.From, func(newNode, parent SQLNode) { + parent.(*Select).From = newNode.(TableExprs) + }) + a.apply(node, n.Where, func(newNode, parent SQLNode) { + parent.(*Select).Where = newNode.(*Where) + }) + a.apply(node, n.GroupBy, func(newNode, parent SQLNode) { + parent.(*Select).GroupBy = newNode.(GroupBy) + }) + a.apply(node, n.Having, func(newNode, parent SQLNode) { + parent.(*Select).Having = newNode.(*Where) + }) + a.apply(node, n.OrderBy, func(newNode, parent SQLNode) { + parent.(*Select).OrderBy = newNode.(OrderBy) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*Select).Limit = newNode.(*Limit) + }) + a.apply(node, n.Into, func(newNode, parent SQLNode) { + parent.(*Select).Into = newNode.(*SelectInto) + }) case SelectExprs: - replacer := replaceSelectExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(SelectExprs)[idx] = newNode.(SelectExpr) + } + }(x)) } - case *SelectInto: - case *Set: - a.apply(node, n.Comments, replaceSetComments) - a.apply(node, n.Exprs, replaceSetExprs) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*Set).Comments = newNode.(Comments) + }) + a.apply(node, n.Exprs, func(newNode, parent SQLNode) { + parent.(*Set).Exprs = newNode.(SetExprs) + }) case *SetExpr: - a.apply(node, n.Expr, replaceSetExprExpr) - a.apply(node, n.Name, replaceSetExprName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*SetExpr).Name = newNode.(ColIdent) + }) + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*SetExpr).Expr = newNode.(Expr) + }) case SetExprs: - replacer := replaceSetExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(SetExprs)[idx] = newNode.(*SetExpr) + } + }(x)) } - case *SetTransaction: - replacerCharacteristics := replaceSetTransactionCharacteristics(0) - replacerCharacteristicsB := &replacerCharacteristics - for _, item := range n.Characteristics { - a.apply(node, item, replacerCharacteristicsB.replace) - replacerCharacteristicsB.inc() + a.apply(node, n.SQLNode, func(newNode, parent SQLNode) { + parent.(*SetTransaction).SQLNode = newNode.(SQLNode) + }) + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*SetTransaction).Comments = newNode.(Comments) + }) + for x, el := range n.Characteristics { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*SetTransaction).Characteristics[idx] = newNode.(Characteristic) + } + }(x)) } - a.apply(node, n.Comments, replaceSetTransactionComments) - case *Show: - a.apply(node, n.Internal, replaceShowInternal) - + a.apply(node, n.Internal, func(newNode, parent SQLNode) { + parent.(*Show).Internal = newNode.(ShowInternal) + }) case *ShowBasic: - a.apply(node, n.Filter, replaceShowBasicFilter) - a.apply(node, n.Tbl, replaceShowBasicTbl) - + a.apply(node, n.Tbl, func(newNode, parent SQLNode) { + parent.(*ShowBasic).Tbl = newNode.(TableName) + }) + a.apply(node, n.Filter, func(newNode, parent SQLNode) { + parent.(*ShowBasic).Filter = newNode.(*ShowFilter) + }) case *ShowCreate: - a.apply(node, n.Op, replaceShowCreateOp) - + a.apply(node, n.Op, func(newNode, parent SQLNode) { + parent.(*ShowCreate).Op = newNode.(TableName) + }) case *ShowFilter: - a.apply(node, n.Filter, replaceShowFilterFilter) - + a.apply(node, n.Filter, func(newNode, parent SQLNode) { + parent.(*ShowFilter).Filter = newNode.(Expr) + }) case *ShowLegacy: - a.apply(node, n.OnTable, replaceShowLegacyOnTable) - a.apply(node, n.ShowCollationFilterOpt, replaceShowLegacyShowCollationFilterOpt) - a.apply(node, n.Table, replaceShowLegacyTable) - + a.apply(node, n.OnTable, func(newNode, parent SQLNode) { + parent.(*ShowLegacy).OnTable = newNode.(TableName) + }) + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*ShowLegacy).Table = newNode.(TableName) + }) + a.apply(node, n.ShowCollationFilterOpt, func(newNode, parent SQLNode) { + parent.(*ShowLegacy).ShowCollationFilterOpt = newNode.(Expr) + }) case *StarExpr: - a.apply(node, n.TableName, replaceStarExprTableName) - + a.apply(node, n.TableName, func(newNode, parent SQLNode) { + parent.(*StarExpr).TableName = newNode.(TableName) + }) case *Stream: - a.apply(node, n.Comments, replaceStreamComments) - a.apply(node, n.SelectExpr, replaceStreamSelectExpr) - a.apply(node, n.Table, replaceStreamTable) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*Stream).Comments = newNode.(Comments) + }) + a.apply(node, n.SelectExpr, func(newNode, parent SQLNode) { + parent.(*Stream).SelectExpr = newNode.(SelectExpr) + }) + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*Stream).Table = newNode.(TableName) + }) case *Subquery: - a.apply(node, n.Select, replaceSubquerySelect) - + a.apply(node, n.Select, func(newNode, parent SQLNode) { + parent.(*Subquery).Select = newNode.(SelectStatement) + }) case *SubstrExpr: - a.apply(node, n.From, replaceSubstrExprFrom) - a.apply(node, n.Name, replaceSubstrExprName) - a.apply(node, n.StrVal, replaceSubstrExprStrVal) - a.apply(node, n.To, replaceSubstrExprTo) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).Name = newNode.(*ColName) + }) + a.apply(node, n.StrVal, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).StrVal = newNode.(*Literal) + }) + a.apply(node, n.From, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).From = newNode.(Expr) + }) + a.apply(node, n.To, func(newNode, parent SQLNode) { + parent.(*SubstrExpr).To = newNode.(Expr) + }) case TableExprs: - replacer := replaceTableExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(TableExprs)[idx] = newNode.(TableExpr) + } + }(x)) } - case TableIdent: - case TableName: - a.apply(node, n.Name, replaceTableNameName) - a.apply(node, n.Qualifier, replaceTableNameQualifier) - + a.apply(node, n.Name, replacePanic("TableName Name")) + a.apply(node, n.Qualifier, replacePanic("TableName Qualifier")) case TableNames: - replacer := replaceTableNamesItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(TableNames)[idx] = newNode.(TableName) + } + }(x)) } - case TableOptions: - case *TableSpec: - replacerColumns := replaceTableSpecColumns(0) - replacerColumnsB := &replacerColumns - for _, item := range n.Columns { - a.apply(node, item, replacerColumnsB.replace) - replacerColumnsB.inc() + for x, el := range n.Columns { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*TableSpec).Columns[idx] = newNode.(*ColumnDefinition) + } + }(x)) } - replacerConstraints := replaceTableSpecConstraints(0) - replacerConstraintsB := &replacerConstraints - for _, item := range n.Constraints { - a.apply(node, item, replacerConstraintsB.replace) - replacerConstraintsB.inc() + for x, el := range n.Indexes { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*TableSpec).Indexes[idx] = newNode.(*IndexDefinition) + } + }(x)) } - replacerIndexes := replaceTableSpecIndexes(0) - replacerIndexesB := &replacerIndexes - for _, item := range n.Indexes { - a.apply(node, item, replacerIndexesB.replace) - replacerIndexesB.inc() + for x, el := range n.Constraints { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*TableSpec).Constraints[idx] = newNode.(*ConstraintDefinition) + } + }(x)) } - a.apply(node, n.Options, replaceTableSpecOptions) - + a.apply(node, n.Options, func(newNode, parent SQLNode) { + parent.(*TableSpec).Options = newNode.(TableOptions) + }) case *TablespaceOperation: - case *TimestampFuncExpr: - a.apply(node, n.Expr1, replaceTimestampFuncExprExpr1) - a.apply(node, n.Expr2, replaceTimestampFuncExprExpr2) - + a.apply(node, n.Expr1, func(newNode, parent SQLNode) { + parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) + }) + a.apply(node, n.Expr2, func(newNode, parent SQLNode) { + parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) + }) case *TruncateTable: - a.apply(node, n.Table, replaceTruncateTableTable) - + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*TruncateTable).Table = newNode.(TableName) + }) case *UnaryExpr: - a.apply(node, n.Expr, replaceUnaryExprExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*UnaryExpr).Expr = newNode.(Expr) + }) case *Union: - a.apply(node, n.FirstStatement, replaceUnionFirstStatement) - a.apply(node, n.Limit, replaceUnionLimit) - a.apply(node, n.OrderBy, replaceUnionOrderBy) - replacerUnionSelects := replaceUnionUnionSelects(0) - replacerUnionSelectsB := &replacerUnionSelects - for _, item := range n.UnionSelects { - a.apply(node, item, replacerUnionSelectsB.replace) - replacerUnionSelectsB.inc() + a.apply(node, n.FirstStatement, func(newNode, parent SQLNode) { + parent.(*Union).FirstStatement = newNode.(SelectStatement) + }) + for x, el := range n.UnionSelects { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*Union).UnionSelects[idx] = newNode.(*UnionSelect) + } + }(x)) } - + a.apply(node, n.OrderBy, func(newNode, parent SQLNode) { + parent.(*Union).OrderBy = newNode.(OrderBy) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*Union).Limit = newNode.(*Limit) + }) case *UnionSelect: - a.apply(node, n.Statement, replaceUnionSelectStatement) - + a.apply(node, n.Statement, func(newNode, parent SQLNode) { + parent.(*UnionSelect).Statement = newNode.(SelectStatement) + }) case *UnlockTables: - case *Update: - a.apply(node, n.Comments, replaceUpdateComments) - a.apply(node, n.Exprs, replaceUpdateExprs) - a.apply(node, n.Limit, replaceUpdateLimit) - a.apply(node, n.OrderBy, replaceUpdateOrderBy) - a.apply(node, n.TableExprs, replaceUpdateTableExprs) - a.apply(node, n.Where, replaceUpdateWhere) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*Update).Comments = newNode.(Comments) + }) + a.apply(node, n.TableExprs, func(newNode, parent SQLNode) { + parent.(*Update).TableExprs = newNode.(TableExprs) + }) + a.apply(node, n.Exprs, func(newNode, parent SQLNode) { + parent.(*Update).Exprs = newNode.(UpdateExprs) + }) + a.apply(node, n.Where, func(newNode, parent SQLNode) { + parent.(*Update).Where = newNode.(*Where) + }) + a.apply(node, n.OrderBy, func(newNode, parent SQLNode) { + parent.(*Update).OrderBy = newNode.(OrderBy) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*Update).Limit = newNode.(*Limit) + }) case *UpdateExpr: - a.apply(node, n.Expr, replaceUpdateExprExpr) - a.apply(node, n.Name, replaceUpdateExprName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*UpdateExpr).Name = newNode.(*ColName) + }) + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*UpdateExpr).Expr = newNode.(Expr) + }) case UpdateExprs: - replacer := replaceUpdateExprsItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(UpdateExprs)[idx] = newNode.(*UpdateExpr) + } + }(x)) } - case *Use: - a.apply(node, n.DBName, replaceUseDBName) - + a.apply(node, n.DBName, func(newNode, parent SQLNode) { + parent.(*Use).DBName = newNode.(TableIdent) + }) case *VStream: - a.apply(node, n.Comments, replaceVStreamComments) - a.apply(node, n.Limit, replaceVStreamLimit) - a.apply(node, n.SelectExpr, replaceVStreamSelectExpr) - a.apply(node, n.Table, replaceVStreamTable) - a.apply(node, n.Where, replaceVStreamWhere) - + a.apply(node, n.Comments, func(newNode, parent SQLNode) { + parent.(*VStream).Comments = newNode.(Comments) + }) + a.apply(node, n.SelectExpr, func(newNode, parent SQLNode) { + parent.(*VStream).SelectExpr = newNode.(SelectExpr) + }) + a.apply(node, n.Table, func(newNode, parent SQLNode) { + parent.(*VStream).Table = newNode.(TableName) + }) + a.apply(node, n.Where, func(newNode, parent SQLNode) { + parent.(*VStream).Where = newNode.(*Where) + }) + a.apply(node, n.Limit, func(newNode, parent SQLNode) { + parent.(*VStream).Limit = newNode.(*Limit) + }) case ValTuple: - replacer := replaceValTupleItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(ValTuple)[idx] = newNode.(Expr) + } + }(x)) } - case *Validation: - case Values: - replacer := replaceValuesItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() + for x, el := range n { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(Values)[idx] = newNode.(ValTuple) + } + }(x)) } - case *ValuesFuncExpr: - a.apply(node, n.Name, replaceValuesFuncExprName) - + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*ValuesFuncExpr).Name = newNode.(*ColName) + }) case VindexParam: - a.apply(node, n.Key, replaceVindexParamKey) - + a.apply(node, n.Key, replacePanic("VindexParam Key")) case *VindexSpec: - a.apply(node, n.Name, replaceVindexSpecName) - replacerParams := replaceVindexSpecParams(0) - replacerParamsB := &replacerParams - for _, item := range n.Params { - a.apply(node, item, replacerParamsB.replace) - replacerParamsB.inc() + a.apply(node, n.Name, func(newNode, parent SQLNode) { + parent.(*VindexSpec).Name = newNode.(ColIdent) + }) + a.apply(node, n.Type, func(newNode, parent SQLNode) { + parent.(*VindexSpec).Type = newNode.(ColIdent) + }) + for x, el := range n.Params { + a.apply(node, el, func(idx int) func(SQLNode, SQLNode) { + return func(newNode, container SQLNode) { + container.(*VindexSpec).Params[idx] = newNode.(VindexParam) + } + }(x)) } - a.apply(node, n.Type, replaceVindexSpecType) - case *When: - a.apply(node, n.Cond, replaceWhenCond) - a.apply(node, n.Val, replaceWhenVal) - + a.apply(node, n.Cond, func(newNode, parent SQLNode) { + parent.(*When).Cond = newNode.(Expr) + }) + a.apply(node, n.Val, func(newNode, parent SQLNode) { + parent.(*When).Val = newNode.(Expr) + }) case *Where: - a.apply(node, n.Expr, replaceWhereExpr) - + a.apply(node, n.Expr, func(newNode, parent SQLNode) { + parent.(*Where).Expr = newNode.(Expr) + }) case *XorExpr: - a.apply(node, n.Left, replaceXorExprLeft) - a.apply(node, n.Right, replaceXorExprRight) - - default: - panic("unknown ast type " + reflect.TypeOf(node).String()) + a.apply(node, n.Left, func(newNode, parent SQLNode) { + parent.(*XorExpr).Left = newNode.(Expr) + }) + a.apply(node, n.Right, func(newNode, parent SQLNode) { + parent.(*XorExpr).Right = newNode.(Expr) + }) } - if a.post != nil && !a.post(&a.cursor) { panic(abort) } - a.cursor = saved } - -func isNilValue(i interface{}) bool { - valueOf := reflect.ValueOf(i) - kind := valueOf.Kind() - isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice - return isNullable && valueOf.IsNil() -} diff --git a/go/vt/sqlparser/rewriter_api.go b/go/vt/sqlparser/rewriter_api.go index 47c85e0473b..ea25e67b1d6 100644 --- a/go/vt/sqlparser/rewriter_api.go +++ b/go/vt/sqlparser/rewriter_api.go @@ -16,6 +16,11 @@ limitations under the License. package sqlparser +import ( + "reflect" + "runtime" +) + // The rewriter was heavily inspired by https://github.com/golang/tools/blob/master/go/ast/astutil/rewrite.go // Rewrite traverses a syntax tree recursively, starting with root, @@ -34,11 +39,20 @@ package sqlparser // Only fields that refer to AST nodes are considered children; // i.e., fields of basic types (strings, []byte, etc.) are ignored. // -func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) { +func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode, err error) { parent := &struct{ SQLNode }{node} defer func() { - if r := recover(); r != nil && r != abort { - panic(r) + if r := recover(); r != nil { + switch r := r.(type) { + case abortT: // nothing to do + + case *runtime.TypeAssertionError: + err = r + case *valueTypeFieldCantChangeErr: + err = r + default: + panic(r) + } } result = parent.SQLNode }() @@ -56,7 +70,7 @@ func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) { a.apply(parent, node, replacer) - return parent.SQLNode + return parent.SQLNode, nil } // An ApplyFunc is invoked by Rewrite for each node n, even if n is nil, @@ -67,7 +81,9 @@ func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) { // See Rewrite for details. type ApplyFunc func(*Cursor) bool -var abort = new(int) // singleton, to signal termination of Apply +type abortT int + +var abort = abortT(0) // singleton, to signal termination of Apply // A Cursor describes a node encountered during Apply. // Information about the node and its parent is available @@ -90,3 +106,34 @@ func (c *Cursor) Replace(newNode SQLNode) { c.replacer(newNode, c.parent) c.node = newNode } + +type replacerFunc func(newNode, parent SQLNode) + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor +} + +func isNilValue(i interface{}) bool { + valueOf := reflect.ValueOf(i) + kind := valueOf.Kind() + isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice + return isNullable && valueOf.IsNil() +} + +// this type is here so we can catch it in the Rewrite method above +type valueTypeFieldCantChangeErr struct { + msg string +} + +// Error implements the error interface +func (e *valueTypeFieldCantChangeErr) Error() string { + return "Tried replacing a field of a value type. This is not supported. " + e.msg +} + +func replacePanic(msg string) func(newNode, parent SQLNode) { + return func(newNode, parent SQLNode) { + panic(&valueTypeFieldCantChangeErr{msg: msg}) + } +} diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go new file mode 100644 index 00000000000..6131c6c5588 --- /dev/null +++ b/go/vt/sqlparser/rewriter_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkVisitLargeExpression(b *testing.B) { + gen := newGenerator(1, 5) + exp := gen.expression() + + depth := 0 + for i := 0; i < b.N; i++ { + _, err := Rewrite(exp, func(cursor *Cursor) bool { + depth++ + return true + }, func(cursor *Cursor) bool { + depth-- + return true + }) + require.NoError(b, err) + } +} + +func TestBadTypeReturnsErrorAndNotPanic(t *testing.T) { + parse, err := Parse("select 42 from dual") + require.NoError(t, err) + _, err = Rewrite(parse, func(cursor *Cursor) bool { + _, ok := cursor.Node().(*Literal) + if ok { + cursor.Replace(&AliasedTableExpr{}) // this is not a valid replacement because of types + } + return true + }, nil) + require.Error(t, err) +} + +func TestChangeValueTypeGivesError(t *testing.T) { + parse, err := Parse("select * from a join b on a.id = b.id") + require.NoError(t, err) + _, err = Rewrite(parse, func(cursor *Cursor) bool { + _, ok := cursor.Node().(*ComparisonExpr) + if ok { + cursor.Replace(&NullVal{}) // this is not a valid replacement because the container is a value type + } + return true + }, nil) + require.Error(t, err) + +} diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index 84e70012044..b18682673a8 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -5488,7 +5488,7 @@ yydefault: yyDollar = yyS[yypt-7 : yypt+1] //line sql.y:509 { - yyVAL.selStmt = NewSelect(Comments(yyDollar[2].bytes2), SelectExprs{Nextval{Expr: yyDollar[5].expr}}, []string{yyDollar[3].str} /*options*/, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/) + yyVAL.selStmt = NewSelect(Comments(yyDollar[2].bytes2), SelectExprs{&Nextval{Expr: yyDollar[5].expr}}, []string{yyDollar[3].str} /*options*/, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/) } case 45: yyDollar = yyS[yypt-4 : yypt+1] diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index c06168310fb..df99cb0dec4 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -507,7 +507,7 @@ select_statement: } | SELECT comment_opt cache_opt NEXT num_val for_from table_name { - $$ = NewSelect(Comments($2), SelectExprs{Nextval{Expr: $5}}, []string{$3}/*options*/, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/) + $$ = NewSelect(Comments($2), SelectExprs{&Nextval{Expr: $5}}, []string{$3}/*options*/, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/) } // simple_select is an unparenthesized select used for subquery. diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 46382591429..42ba28a2623 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -19,7 +19,6 @@ package sqlparser import ( "bytes" "fmt" - "io" "strings" "vitess.io/vitess/go/bytes2" @@ -27,14 +26,12 @@ import ( ) const ( - defaultBufSize = 4096 - eofChar = 0x100 + eofChar = 0x100 ) // Tokenizer is the struct used to generate SQL // tokens for the parser. type Tokenizer struct { - InStream io.Reader AllowComments bool SkipSpecialComments bool SkipToEnd bool @@ -64,15 +61,6 @@ func NewStringTokenizer(sql string) *Tokenizer { } } -// NewTokenizer creates a new Tokenizer reading a sql -// string from the io.Reader. -func NewTokenizer(r io.Reader) *Tokenizer { - return &Tokenizer{ - InStream: r, - buf: make([]byte, defaultBufSize), - } -} - // keywords is a map of mysql keywords that fall into two categories: // 1) keywords considered reserved by MySQL // 2) keywords for us to handle specially in sql.y @@ -691,8 +679,11 @@ func (tkn *Tokenizer) Scan() (int, []byte) { case '-': switch tkn.lastChar { case '-': - tkn.next() - return tkn.scanCommentType1("--") + nextChar := tkn.peek(0) + if nextChar == ' ' || nextChar == '\n' || nextChar == '\t' || nextChar == '\r' || nextChar == eofChar { + tkn.next() + return tkn.scanCommentType1("--") + } case '>': tkn.next() if tkn.lastChar == '>' { @@ -1052,15 +1043,6 @@ func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) { } func (tkn *Tokenizer) next() { - if tkn.bufPos >= tkn.bufSize && tkn.InStream != nil { - // Try and refill the buffer - var err error - tkn.bufPos = 0 - if tkn.bufSize, err = tkn.InStream.Read(tkn.buf); err != io.EOF && err != nil { - tkn.LastError = err - } - } - if tkn.bufPos >= tkn.bufSize { if tkn.lastChar != eofChar { tkn.Position++ @@ -1073,6 +1055,13 @@ func (tkn *Tokenizer) next() { } } +func (tkn *Tokenizer) peek(dist int) uint16 { + if tkn.bufPos+dist >= tkn.bufSize { + return eofChar + } + return uint16(tkn.buf[tkn.bufPos+dist]) +} + // reset clears any internal state. func (tkn *Tokenizer) reset() { tkn.ParseTree = nil diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go index 1de7833a58e..983faaec22b 100644 --- a/go/vt/sqlparser/utils.go +++ b/go/vt/sqlparser/utils.go @@ -40,7 +40,10 @@ func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, e if err != nil { return "", err } - Normalize(stmt, bv, "") + err = Normalize(stmt, bv, "") + if err != nil { + return "", err + } normalized := String(stmt) return normalized, nil } diff --git a/go/vt/sqlparser/visitorgen/ast_walker.go b/go/vt/sqlparser/visitorgen/ast_walker.go deleted file mode 100644 index 822fb6c4c5e..00000000000 --- a/go/vt/sqlparser/visitorgen/ast_walker.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "go/ast" - "reflect" -) - -var _ ast.Visitor = (*walker)(nil) - -type walker struct { - result SourceFile -} - -// Walk walks the given AST and translates it to the simplified AST used by the next steps -func Walk(node ast.Node) *SourceFile { - var w walker - ast.Walk(&w, node) - return &w.result -} - -// Visit implements the ast.Visitor interface -func (w *walker) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.TypeSpec: - switch t2 := n.Type.(type) { - case *ast.InterfaceType: - w.append(&InterfaceDeclaration{ - name: n.Name.Name, - block: "", - }) - case *ast.StructType: - var fields []*Field - for _, f := range t2.Fields.List { - for _, name := range f.Names { - fields = append(fields, &Field{ - name: name.Name, - typ: sastType(f.Type), - }) - } - - } - w.append(&StructDeclaration{ - name: n.Name.Name, - fields: fields, - }) - case *ast.ArrayType: - w.append(&TypeAlias{ - name: n.Name.Name, - typ: &Array{inner: sastType(t2.Elt)}, - }) - case *ast.Ident: - w.append(&TypeAlias{ - name: n.Name.Name, - typ: &TypeString{t2.Name}, - }) - - default: - panic(reflect.TypeOf(t2)) - } - case *ast.FuncDecl: - if len(n.Recv.List) > 1 || len(n.Recv.List[0].Names) > 1 { - panic("don't know what to do!") - } - var f *Field - if len(n.Recv.List) == 1 { - r := n.Recv.List[0] - t := sastType(r.Type) - if len(r.Names) > 1 { - panic("don't know what to do!") - } - if len(r.Names) == 1 { - f = &Field{ - name: r.Names[0].Name, - typ: t, - } - } else { - f = &Field{ - name: "", - typ: t, - } - } - } - - w.append(&FuncDeclaration{ - receiver: f, - name: n.Name.Name, - block: "", - arguments: nil, - }) - } - - return w -} - -func (w *walker) append(line Sast) { - w.result.lines = append(w.result.lines, line) -} - -func sastType(e ast.Expr) Type { - switch n := e.(type) { - case *ast.StarExpr: - return &Ref{sastType(n.X)} - case *ast.Ident: - return &TypeString{n.Name} - case *ast.ArrayType: - return &Array{inner: sastType(n.Elt)} - case *ast.InterfaceType: - return &TypeString{"interface{}"} - case *ast.StructType: - return &TypeString{"struct{}"} - } - - panic(reflect.TypeOf(e)) -} diff --git a/go/vt/sqlparser/visitorgen/ast_walker_test.go b/go/vt/sqlparser/visitorgen/ast_walker_test.go deleted file mode 100644 index a4b01f70835..00000000000 --- a/go/vt/sqlparser/visitorgen/ast_walker_test.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "go/parser" - "go/token" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/stretchr/testify/require" -) - -func TestSingleInterface(t *testing.T) { - input := ` -package sqlparser - -type Nodeiface interface { - iNode() -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&InterfaceDeclaration{ - name: "Nodeiface", - block: "", - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestEmptyStruct(t *testing.T) { - input := ` -package sqlparser - -type Empty struct {} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Empty", - fields: []*Field{}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithStringField(t *testing.T) { - input := ` -package sqlparser - -type Struct struct { - field string -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Struct", - fields: []*Field{{ - name: "field", - typ: &TypeString{typName: "string"}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithDifferentTypes(t *testing.T) { - input := ` -package sqlparser - -type Struct struct { - field string - reference *string - array []string - arrayOfRef []*string -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Struct", - fields: []*Field{{ - name: "field", - typ: &TypeString{typName: "string"}, - }, { - name: "reference", - typ: &Ref{&TypeString{typName: "string"}}, - }, { - name: "array", - typ: &Array{&TypeString{typName: "string"}}, - }, { - name: "arrayOfRef", - typ: &Array{&Ref{&TypeString{typName: "string"}}}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithTwoStringFieldInOneLine(t *testing.T) { - input := ` -package sqlparser - -type Struct struct { - left, right string -} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&StructDeclaration{ - name: "Struct", - fields: []*Field{{ - name: "left", - typ: &TypeString{typName: "string"}, - }, { - name: "right", - typ: &TypeString{typName: "string"}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithSingleMethod(t *testing.T) { - input := ` -package sqlparser - -type Empty struct {} - -func (*Empty) method() {} -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{ - &StructDeclaration{ - name: "Empty", - fields: []*Field{}}, - &FuncDeclaration{ - receiver: &Field{ - name: "", - typ: &Ref{&TypeString{"Empty"}}, - }, - name: "method", - block: "", - arguments: []*Field{}, - }, - }, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestSingleArrayType(t *testing.T) { - input := ` -package sqlparser - -type Strings []string -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&TypeAlias{ - name: "Strings", - typ: &Array{&TypeString{"string"}}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestSingleTypeAlias(t *testing.T) { - input := ` -package sqlparser - -type String string -` - - fset := token.NewFileSet() - ast, err := parser.ParseFile(fset, "ast.go", input, 0) - require.NoError(t, err) - - result := Walk(ast) - expected := SourceFile{ - lines: []Sast{&TypeAlias{ - name: "String", - typ: &TypeString{"string"}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} diff --git a/go/vt/sqlparser/visitorgen/main/main.go b/go/vt/sqlparser/visitorgen/main/main.go deleted file mode 100644 index 0d940ea060f..00000000000 --- a/go/vt/sqlparser/visitorgen/main/main.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/parser" - "go/token" - "io/ioutil" - "os" - - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/sqlparser/visitorgen" -) - -var ( - inputFile = flag.String("input", "", "input file to use") - outputFile = flag.String("output", "", "output file") - compare = flag.Bool("compareOnly", false, "instead of writing to the output file, compare if the generated visitor is still valid for this ast.go") -) - -const usage = `Usage of visitorgen: - -go run /path/to/visitorgen/main -input=/path/to/ast.go -output=/path/to/rewriter.go -` - -func main() { - defer exit.Recover() - flag.Usage = printUsage - flag.Parse() - - if *inputFile == "" || *outputFile == "" { - printUsage() - exit.Return(1) - } - - fs := token.NewFileSet() - file, err := parser.ParseFile(fs, *inputFile, nil, parser.DeclarationErrors) - if err != nil { - log.Error(err) - exit.Return(1) - } - - astWalkResult := visitorgen.Walk(file) - vp := visitorgen.Transform(astWalkResult) - vd := visitorgen.ToVisitorPlan(vp) - - replacementMethods := visitorgen.EmitReplacementMethods(vd) - typeSwitch := visitorgen.EmitTypeSwitches(vd) - - b := &bytes.Buffer{} - fmt.Fprint(b, fileHeader) - fmt.Fprintln(b) - fmt.Fprintln(b, replacementMethods) - fmt.Fprint(b, applyHeader) - fmt.Fprintln(b, typeSwitch) - fmt.Fprintln(b, fileFooter) - - if *compare { - currentFile, err := ioutil.ReadFile(*outputFile) - if err != nil { - log.Error(err) - exit.Return(1) - } - if !bytes.Equal(b.Bytes(), currentFile) { - fmt.Println("rewriter needs to be re-generated: go generate " + *outputFile) - exit.Return(1) - } - } else { - err = ioutil.WriteFile(*outputFile, b.Bytes(), 0644) - if err != nil { - log.Error(err) - exit.Return(1) - } - } - -} - -func printUsage() { - os.Stderr.WriteString(usage) - os.Stderr.WriteString("\nOptions:\n") - flag.PrintDefaults() -} - -const fileHeader = `// Code generated by visitorgen/main/main.go. DO NOT EDIT. - -package sqlparser - -//go:generate go run ./visitorgen/main -input=ast.go -output=rewriter.go - -import ( - "reflect" -) - -type replacerFunc func(newNode, parent SQLNode) - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor -} -` - -const applyHeader = ` -// apply is where the visiting happens. Here is where we keep the big switch-case that will be used -// to do the actual visiting of SQLNodes -func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { - if node == nil || isNilValue(node) { - return - } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead - saved := a.cursor - a.cursor.replacer = replacer - a.cursor.node = node - a.cursor.parent = parent - - if a.pre != nil && !a.pre(&a.cursor) { - a.cursor = saved - return - } - - // walk children - // (the order of the cases is alphabetical) - switch n := node.(type) { - case nil: - ` - -const fileFooter = ` - default: - panic("unknown ast type " + reflect.TypeOf(node).String()) - } - - if a.post != nil && !a.post(&a.cursor) { - panic(abort) - } - - a.cursor = saved -} - -func isNilValue(i interface{}) bool { - valueOf := reflect.ValueOf(i) - kind := valueOf.Kind() - isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice - return isNullable && valueOf.IsNil() -}` diff --git a/go/vt/sqlparser/visitorgen/sast.go b/go/vt/sqlparser/visitorgen/sast.go deleted file mode 100644 index e46485e8f5d..00000000000 --- a/go/vt/sqlparser/visitorgen/sast.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -// simplified ast - when reading the golang ast of the ast.go file, we translate the golang ast objects -// to this much simpler format, that contains only the necessary information and no more -type ( - // SourceFile contains all important lines from an ast.go file - SourceFile struct { - lines []Sast - } - - // Sast or simplified AST, is a representation of the ast.go lines we are interested in - Sast interface { - toSastString() string - } - - // InterfaceDeclaration represents a declaration of an interface. This is used to keep track of which types - // need to be handled by the visitor framework - InterfaceDeclaration struct { - name, block string - } - - // TypeAlias is used whenever we see a `type XXX YYY` - XXX is the new name for YYY. - // Note that YYY could be an array or a reference - TypeAlias struct { - name string - typ Type - } - - // FuncDeclaration represents a function declaration. These are tracked to know which types implement interfaces. - FuncDeclaration struct { - receiver *Field - name, block string - arguments []*Field - } - - // StructDeclaration represents a struct. It contains the fields and their types - StructDeclaration struct { - name string - fields []*Field - } - - // Field is a field in a struct - a name with a type tuple - Field struct { - name string - typ Type - } - - // Type represents a type in the golang type system. Used to keep track of type we need to handle, - // and the types of fields. - Type interface { - toTypString() string - rawTypeName() string - } - - // TypeString is a raw type name, such as `string` - TypeString struct { - typName string - } - - // Ref is a reference to something, such as `*string` - Ref struct { - inner Type - } - - // Array is an array of things, such as `[]string` - Array struct { - inner Type - } -) - -var _ Sast = (*InterfaceDeclaration)(nil) -var _ Sast = (*StructDeclaration)(nil) -var _ Sast = (*FuncDeclaration)(nil) -var _ Sast = (*TypeAlias)(nil) - -var _ Type = (*TypeString)(nil) -var _ Type = (*Ref)(nil) -var _ Type = (*Array)(nil) - -// String returns a textual representation of the SourceFile. This is for testing purposed -func (t *SourceFile) String() string { - var result string - for _, l := range t.lines { - result += l.toSastString() - result += "\n" - } - - return result -} - -func (t *Ref) toTypString() string { - return "*" + t.inner.toTypString() -} - -func (t *Array) toTypString() string { - return "[]" + t.inner.toTypString() -} - -func (t *TypeString) toTypString() string { - return t.typName -} - -func (f *FuncDeclaration) toSastString() string { - var receiver string - if f.receiver != nil { - receiver = "(" + f.receiver.String() + ") " - } - var args string - for i, arg := range f.arguments { - if i > 0 { - args += ", " - } - args += arg.String() - } - - return "func " + receiver + f.name + "(" + args + ") {" + blockInNewLines(f.block) + "}" -} - -func (i *InterfaceDeclaration) toSastString() string { - return "type " + i.name + " interface {" + blockInNewLines(i.block) + "}" -} - -func (a *TypeAlias) toSastString() string { - return "type " + a.name + " " + a.typ.toTypString() -} - -func (s *StructDeclaration) toSastString() string { - var block string - for _, f := range s.fields { - block += "\t" + f.String() + "\n" - } - - return "type " + s.name + " struct {" + blockInNewLines(block) + "}" -} - -func blockInNewLines(block string) string { - if block == "" { - return "" - } - return "\n" + block + "\n" -} - -// String returns a string representation of a field -func (f *Field) String() string { - if f.name != "" { - return f.name + " " + f.typ.toTypString() - } - - return f.typ.toTypString() -} - -func (t *TypeString) rawTypeName() string { - return t.typName -} - -func (t *Ref) rawTypeName() string { - return t.inner.rawTypeName() -} - -func (t *Array) rawTypeName() string { - return t.inner.rawTypeName() -} diff --git a/go/vt/sqlparser/visitorgen/struct_producer.go b/go/vt/sqlparser/visitorgen/struct_producer.go deleted file mode 100644 index 1c293f30803..00000000000 --- a/go/vt/sqlparser/visitorgen/struct_producer.go +++ /dev/null @@ -1,253 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "fmt" - "sort" -) - -// VisitorData is the data needed to produce the output file -type ( - // VisitorItem represents something that needs to be added to the rewriter infrastructure - VisitorItem interface { - toFieldItemString() string - typeName() string - asSwitchCase() string - asReplMethod() string - getFieldName() string - } - - // SingleFieldItem is a single field in a struct - SingleFieldItem struct { - StructType, FieldType Type - FieldName string - } - - // ArrayFieldItem is an array field in a struct - ArrayFieldItem struct { - StructType, ItemType Type - FieldName string - } - - // ArrayItem is an array that implements SQLNode - ArrayItem struct { - StructType, ItemType Type - } - - // VisitorPlan represents all the output needed for the rewriter - VisitorPlan struct { - Switches []*SwitchCase // The cases for the big switch statement used to implement the visitor - } - - // SwitchCase is what we need to know to produce all the type switch cases in the visitor. - SwitchCase struct { - Type Type - Fields []VisitorItem - } -) - -var _ VisitorItem = (*SingleFieldItem)(nil) -var _ VisitorItem = (*ArrayItem)(nil) -var _ VisitorItem = (*ArrayFieldItem)(nil) -var _ sort.Interface = (*VisitorPlan)(nil) -var _ sort.Interface = (*SwitchCase)(nil) - -// ToVisitorPlan transforms the source information into a plan for the visitor code that needs to be produced -func ToVisitorPlan(input *SourceInformation) *VisitorPlan { - var output VisitorPlan - - for _, typ := range input.interestingTypes { - switchit := &SwitchCase{Type: typ} - stroct, isStruct := input.structs[typ.rawTypeName()] - if isStruct { - for _, f := range stroct.fields { - switchit.Fields = append(switchit.Fields, trySingleItem(input, f, typ)...) - } - } else { - itemType := input.getItemTypeOfArray(typ) - if itemType != nil && input.isSQLNode(itemType) { - switchit.Fields = append(switchit.Fields, &ArrayItem{ - StructType: typ, - ItemType: itemType, - }) - } - } - sort.Sort(switchit) - output.Switches = append(output.Switches, switchit) - } - sort.Sort(&output) - return &output -} - -func trySingleItem(input *SourceInformation, f *Field, typ Type) []VisitorItem { - if input.isSQLNode(f.typ) { - return []VisitorItem{&SingleFieldItem{ - StructType: typ, - FieldType: f.typ, - FieldName: f.name, - }} - } - - arrType, isArray := f.typ.(*Array) - if isArray && input.isSQLNode(arrType.inner) { - return []VisitorItem{&ArrayFieldItem{ - StructType: typ, - ItemType: arrType.inner, - FieldName: f.name, - }} - } - return []VisitorItem{} -} - -// String returns a string, used for testing -func (v *VisitorPlan) String() string { - var sb builder - for _, s := range v.Switches { - sb.appendF("Type: %v", s.Type.toTypString()) - for _, f := range s.Fields { - sb.appendF("\t%v", f.toFieldItemString()) - } - } - return sb.String() -} - -func (s *SingleFieldItem) toFieldItemString() string { - return fmt.Sprintf("single item: %v of type: %v", s.FieldName, s.FieldType.toTypString()) -} - -func (s *SingleFieldItem) asSwitchCase() string { - return fmt.Sprintf(` a.apply(node, n.%s, %s)`, s.FieldName, s.typeName()) -} - -func (s *SingleFieldItem) asReplMethod() string { - _, isRef := s.StructType.(*Ref) - - if isRef { - return fmt.Sprintf(`func %s(newNode, parent SQLNode) { - parent.(%s).%s = newNode.(%s) -}`, s.typeName(), s.StructType.toTypString(), s.FieldName, s.FieldType.toTypString()) - } - - return fmt.Sprintf(`func %s(newNode, parent SQLNode) { - tmp := parent.(%s) - tmp.%s = newNode.(%s) -}`, s.typeName(), s.StructType.toTypString(), s.FieldName, s.FieldType.toTypString()) - -} - -func (ai *ArrayItem) asReplMethod() string { - name := ai.typeName() - return fmt.Sprintf(`type %s int - -func (r *%s) replace(newNode, container SQLNode) { - container.(%s)[int(*r)] = newNode.(%s) -} - -func (r *%s) inc() { - *r++ -}`, name, name, ai.StructType.toTypString(), ai.ItemType.toTypString(), name) -} - -func (afi *ArrayFieldItem) asReplMethod() string { - name := afi.typeName() - return fmt.Sprintf(`type %s int - -func (r *%s) replace(newNode, container SQLNode) { - container.(%s).%s[int(*r)] = newNode.(%s) -} - -func (r *%s) inc() { - *r++ -}`, name, name, afi.StructType.toTypString(), afi.FieldName, afi.ItemType.toTypString(), name) -} - -func (s *SingleFieldItem) getFieldName() string { - return s.FieldName -} - -func (s *SingleFieldItem) typeName() string { - return "replace" + s.StructType.rawTypeName() + s.FieldName -} - -func (afi *ArrayFieldItem) toFieldItemString() string { - return fmt.Sprintf("array field item: %v.%v contains items of type %v", afi.StructType.toTypString(), afi.FieldName, afi.ItemType.toTypString()) -} - -func (ai *ArrayItem) toFieldItemString() string { - return fmt.Sprintf("array item: %v containing items of type %v", ai.StructType.toTypString(), ai.ItemType.toTypString()) -} - -func (ai *ArrayItem) getFieldName() string { - panic("Should not be called!") -} - -func (afi *ArrayFieldItem) getFieldName() string { - return afi.FieldName -} - -func (ai *ArrayItem) asSwitchCase() string { - return fmt.Sprintf(` replacer := %s(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - }`, ai.typeName()) -} - -func (afi *ArrayFieldItem) asSwitchCase() string { - return fmt.Sprintf(` replacer%s := %s(0) - replacer%sB := &replacer%s - for _, item := range n.%s { - a.apply(node, item, replacer%sB.replace) - replacer%sB.inc() - }`, afi.FieldName, afi.typeName(), afi.FieldName, afi.FieldName, afi.FieldName, afi.FieldName, afi.FieldName) -} - -func (ai *ArrayItem) typeName() string { - return "replace" + ai.StructType.rawTypeName() + "Items" -} - -func (afi *ArrayFieldItem) typeName() string { - return "replace" + afi.StructType.rawTypeName() + afi.FieldName -} -func (v *VisitorPlan) Len() int { - return len(v.Switches) -} - -func (v *VisitorPlan) Less(i, j int) bool { - return v.Switches[i].Type.rawTypeName() < v.Switches[j].Type.rawTypeName() -} - -func (v *VisitorPlan) Swap(i, j int) { - temp := v.Switches[i] - v.Switches[i] = v.Switches[j] - v.Switches[j] = temp -} -func (s *SwitchCase) Len() int { - return len(s.Fields) -} - -func (s *SwitchCase) Less(i, j int) bool { - return s.Fields[i].getFieldName() < s.Fields[j].getFieldName() -} - -func (s *SwitchCase) Swap(i, j int) { - temp := s.Fields[i] - s.Fields[i] = s.Fields[j] - s.Fields[j] = temp -} diff --git a/go/vt/sqlparser/visitorgen/struct_producer_test.go b/go/vt/sqlparser/visitorgen/struct_producer_test.go deleted file mode 100644 index 065b532a9eb..00000000000 --- a/go/vt/sqlparser/visitorgen/struct_producer_test.go +++ /dev/null @@ -1,423 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEmptyStructVisitor(t *testing.T) { - /* - type Node interface{} - type Struct struct {} - func (*Struct) iNode() {} - */ - - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}, - }, - interfaces: map[string]bool{ - "Node": true, - }, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{}}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithSqlNodeField(t *testing.T) { - /* - type Node interface{} - type Struct struct { - Field Node - } - func (*Struct) iNode() {} - */ - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}, - }, - interfaces: map[string]bool{ - "Node": true, - }, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Field", typ: &TypeString{"Node"}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{&SingleFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - FieldType: &TypeString{"Node"}, - FieldName: "Field", - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithStringField2(t *testing.T) { - /* - type Node interface{} - type Struct struct { - Field Node - } - func (*Struct) iNode() {} - */ - - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}, - }, - interfaces: map[string]bool{ - "Node": true, - }, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Field", typ: &TypeString{"string"}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestArrayAsSqlNode(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - func (*NodeArray) iNode{} - - type NodeArray []NodeInterface - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{ - "*NodeArray": &Ref{&TypeString{"NodeArray"}}}, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "NodeArray": { - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - }, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"NodeArray"}}, - Fields: []VisitorItem{&ArrayItem{ - StructType: &Ref{&TypeString{"NodeArray"}}, - ItemType: &TypeString{"NodeInterface"}, - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithStructField(t *testing.T) { - /* - type Node interface{} - type Struct struct { - Field *Struct - } - func (*Struct) iNode() {} - */ - - input := &SourceInformation{ - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Field", typ: &Ref{&TypeString{"Struct"}}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{&SingleFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - FieldType: &Ref{&TypeString{"Struct"}}, - FieldName: "Field", - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithArrayOfNodes(t *testing.T) { - /* - type NodeInterface interface {} - type Struct struct { - Items []NodeInterface - } - - func (*Struct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{ - "NodeInterface": true, - }, - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Items", typ: &Array{&TypeString{"NodeInterface"}}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{&ArrayFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - ItemType: &TypeString{"NodeInterface"}, - FieldName: "Items", - }}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestStructWithArrayOfStrings(t *testing.T) { - /* - type NodeInterface interface {} - type Struct struct { - Items []string - } - - func (*Struct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{ - "NodeInterface": true, - }, - interestingTypes: map[string]Type{ - "*Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{ - "Struct": {name: "Struct", fields: []*Field{ - {name: "Items", typ: &Array{&TypeString{"string"}}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestArrayOfStringsThatImplementSQLNode(t *testing.T) { - /* - type NodeInterface interface {} - type Struct []string - func (Struct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{"Struct": &Ref{&TypeString{"Struct"}}}, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "Struct": { - name: "Struct", - typ: &Array{&TypeString{"string"}}, - }, - }, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{{ - Type: &Ref{&TypeString{"Struct"}}, - Fields: []VisitorItem{}, - }}, - } - - assert.Equal(t, expected.String(), result.String()) -} - -func TestSortingOfOutputs(t *testing.T) { - /* - type NodeInterface interface {} - type AStruct struct { - AField NodeInterface - BField NodeInterface - } - type BStruct struct { - CField NodeInterface - } - func (*AStruct) iNode{} - func (*BStruct) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{ - "AStruct": &Ref{&TypeString{"AStruct"}}, - "BStruct": &Ref{&TypeString{"BStruct"}}, - }, - structs: map[string]*StructDeclaration{ - "AStruct": {name: "AStruct", fields: []*Field{ - {name: "BField", typ: &TypeString{"NodeInterface"}}, - {name: "AField", typ: &TypeString{"NodeInterface"}}, - }}, - "BStruct": {name: "BStruct", fields: []*Field{ - {name: "CField", typ: &TypeString{"NodeInterface"}}, - }}, - }, - typeAliases: map[string]*TypeAlias{}, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{ - {Type: &Ref{&TypeString{"AStruct"}}, - Fields: []VisitorItem{ - &SingleFieldItem{ - StructType: &Ref{&TypeString{"AStruct"}}, - FieldType: &TypeString{"NodeInterface"}, - FieldName: "AField", - }, - &SingleFieldItem{ - StructType: &Ref{&TypeString{"AStruct"}}, - FieldType: &TypeString{"NodeInterface"}, - FieldName: "BField", - }}}, - {Type: &Ref{&TypeString{"BStruct"}}, - Fields: []VisitorItem{ - &SingleFieldItem{ - StructType: &Ref{&TypeString{"BStruct"}}, - FieldType: &TypeString{"NodeInterface"}, - FieldName: "CField", - }}}}, - } - assert.Equal(t, expected.String(), result.String()) -} - -func TestAliasOfAlias(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - type NodeArray []NodeInterface - type AliasOfAlias NodeArray - - func (NodeArray) iNode{} - func (AliasOfAlias) iNode{} - */ - - input := &SourceInformation{ - interfaces: map[string]bool{"NodeInterface": true}, - interestingTypes: map[string]Type{ - "NodeArray": &TypeString{"NodeArray"}, - "AliasOfAlias": &TypeString{"AliasOfAlias"}, - }, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "NodeArray": { - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - "AliasOfAlias": { - name: "NodeArray", - typ: &TypeString{"NodeArray"}, - }, - }, - } - - result := ToVisitorPlan(input) - - expected := &VisitorPlan{ - Switches: []*SwitchCase{ - {Type: &TypeString{"AliasOfAlias"}, - Fields: []VisitorItem{&ArrayItem{ - StructType: &TypeString{"AliasOfAlias"}, - ItemType: &TypeString{"NodeInterface"}, - }}, - }, - {Type: &TypeString{"NodeArray"}, - Fields: []VisitorItem{&ArrayItem{ - StructType: &TypeString{"NodeArray"}, - ItemType: &TypeString{"NodeInterface"}, - }}, - }}, - } - assert.Equal(t, expected.String(), result.String()) -} diff --git a/go/vt/sqlparser/visitorgen/transformer.go b/go/vt/sqlparser/visitorgen/transformer.go deleted file mode 100644 index 98129be81b1..00000000000 --- a/go/vt/sqlparser/visitorgen/transformer.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import "fmt" - -// Transform takes an input file and collects the information into an easier to consume format -func Transform(input *SourceFile) *SourceInformation { - interestingTypes := make(map[string]Type) - interfaces := make(map[string]bool) - structs := make(map[string]*StructDeclaration) - typeAliases := make(map[string]*TypeAlias) - - for _, l := range input.lines { - switch line := l.(type) { - case *FuncDeclaration: - interestingTypes[line.receiver.typ.toTypString()] = line.receiver.typ - case *StructDeclaration: - structs[line.name] = line - case *TypeAlias: - typeAliases[line.name] = line - case *InterfaceDeclaration: - interfaces[line.name] = true - } - } - - return &SourceInformation{ - interfaces: interfaces, - interestingTypes: interestingTypes, - structs: structs, - typeAliases: typeAliases, - } -} - -// SourceInformation contains the information from the ast.go file, but in a format that is easier to consume -type SourceInformation struct { - interestingTypes map[string]Type - interfaces map[string]bool - structs map[string]*StructDeclaration - typeAliases map[string]*TypeAlias -} - -func (v *SourceInformation) String() string { - var types string - for _, k := range v.interestingTypes { - types += k.toTypString() + "\n" - } - var structs string - for _, k := range v.structs { - structs += k.toSastString() + "\n" - } - var typeAliases string - for _, k := range v.typeAliases { - typeAliases += k.toSastString() + "\n" - } - - return fmt.Sprintf("Types to build visitor for:\n%s\nStructs with fields: \n%s\nTypeAliases with type: \n%s\n", types, structs, typeAliases) -} - -// getItemTypeOfArray will return nil if the given type is not pointing to a array type. -// If it is an array type, the type of it's items will be returned -func (v *SourceInformation) getItemTypeOfArray(typ Type) Type { - alias := v.typeAliases[typ.rawTypeName()] - if alias == nil { - return nil - } - arrTyp, isArray := alias.typ.(*Array) - if !isArray { - return v.getItemTypeOfArray(alias.typ) - } - return arrTyp.inner -} - -func (v *SourceInformation) isSQLNode(typ Type) bool { - _, isInteresting := v.interestingTypes[typ.toTypString()] - if isInteresting { - return true - } - _, isInterface := v.interfaces[typ.toTypString()] - return isInterface -} diff --git a/go/vt/sqlparser/visitorgen/transformer_test.go b/go/vt/sqlparser/visitorgen/transformer_test.go deleted file mode 100644 index 4a0849e9e9c..00000000000 --- a/go/vt/sqlparser/visitorgen/transformer_test.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimplestAst(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - type NodeStruct struct {} - - func (*NodeStruct) iNode{} - */ - input := &SourceFile{ - lines: []Sast{ - &InterfaceDeclaration{ - name: "NodeInterface", - block: "// an interface lives here"}, - &StructDeclaration{ - name: "NodeStruct", - fields: []*Field{}}, - &FuncDeclaration{ - receiver: &Field{ - name: "", - typ: &Ref{&TypeString{"NodeStruct"}}, - }, - name: "iNode", - block: "", - arguments: []*Field{}}, - }, - } - - expected := &SourceInformation{ - interestingTypes: map[string]Type{ - "*NodeStruct": &Ref{&TypeString{"NodeStruct"}}}, - structs: map[string]*StructDeclaration{ - "NodeStruct": { - name: "NodeStruct", - fields: []*Field{}}}, - } - - assert.Equal(t, expected.String(), Transform(input).String()) -} - -func TestAstWithArray(t *testing.T) { - /* - type NodeInterface interface { - iNode() - } - - func (*NodeArray) iNode{} - - type NodeArray []NodeInterface - */ - input := &SourceFile{ - lines: []Sast{ - &InterfaceDeclaration{ - name: "NodeInterface"}, - &TypeAlias{ - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - &FuncDeclaration{ - receiver: &Field{ - name: "", - typ: &Ref{&TypeString{"NodeArray"}}, - }, - name: "iNode", - block: "", - arguments: []*Field{}}, - }, - } - - expected := &SourceInformation{ - interestingTypes: map[string]Type{ - "*NodeArray": &Ref{&TypeString{"NodeArray"}}}, - structs: map[string]*StructDeclaration{}, - typeAliases: map[string]*TypeAlias{ - "NodeArray": { - name: "NodeArray", - typ: &Array{&TypeString{"NodeInterface"}}, - }, - }, - } - - result := Transform(input) - - assert.Equal(t, expected.String(), result.String()) -} diff --git a/go/vt/sqlparser/visitorgen/visitor_emitter.go b/go/vt/sqlparser/visitorgen/visitor_emitter.go deleted file mode 100644 index 889c05fe7f7..00000000000 --- a/go/vt/sqlparser/visitorgen/visitor_emitter.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "fmt" - "strings" -) - -// EmitReplacementMethods is an anti-parser (a.k.a prettifier) - it takes a struct that is much like an AST, -// and produces a string from it. This method will produce the replacement methods that make it possible to -// replace objects in fields or in slices. -func EmitReplacementMethods(vd *VisitorPlan) string { - var sb builder - for _, s := range vd.Switches { - for _, k := range s.Fields { - sb.appendF(k.asReplMethod()) - sb.newLine() - } - } - - return sb.String() -} - -// EmitTypeSwitches is an anti-parser (a.k.a prettifier) - it takes a struct that is much like an AST, -// and produces a string from it. This method will produce the switch cases needed to cover the Vitess AST. -func EmitTypeSwitches(vd *VisitorPlan) string { - var sb builder - for _, s := range vd.Switches { - sb.newLine() - sb.appendF(" case %s:", s.Type.toTypString()) - for _, k := range s.Fields { - sb.appendF(k.asSwitchCase()) - } - } - - return sb.String() -} - -func (b *builder) String() string { - return strings.TrimSpace(b.sb.String()) -} - -type builder struct { - sb strings.Builder -} - -func (b *builder) appendF(format string, data ...interface{}) *builder { - _, err := b.sb.WriteString(fmt.Sprintf(format, data...)) - if err != nil { - panic(err) - } - b.newLine() - return b -} - -func (b *builder) newLine() { - _, err := b.sb.WriteString("\n") - if err != nil { - panic(err) - } -} diff --git a/go/vt/sqlparser/visitorgen/visitor_emitter_test.go b/go/vt/sqlparser/visitorgen/visitor_emitter_test.go deleted file mode 100644 index 94666daa743..00000000000 --- a/go/vt/sqlparser/visitorgen/visitor_emitter_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package visitorgen - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSingleItem(t *testing.T) { - sfi := SingleFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - FieldType: &TypeString{"string"}, - FieldName: "Field", - } - - expectedReplacer := `func replaceStructField(newNode, parent SQLNode) { - parent.(*Struct).Field = newNode.(string) -}` - - expectedSwitch := ` a.apply(node, n.Field, replaceStructField)` - require.Equal(t, expectedReplacer, sfi.asReplMethod()) - require.Equal(t, expectedSwitch, sfi.asSwitchCase()) -} - -func TestArrayFieldItem(t *testing.T) { - sfi := ArrayFieldItem{ - StructType: &Ref{&TypeString{"Struct"}}, - ItemType: &TypeString{"string"}, - FieldName: "Field", - } - - expectedReplacer := `type replaceStructField int - -func (r *replaceStructField) replace(newNode, container SQLNode) { - container.(*Struct).Field[int(*r)] = newNode.(string) -} - -func (r *replaceStructField) inc() { - *r++ -}` - - expectedSwitch := ` replacerField := replaceStructField(0) - replacerFieldB := &replacerField - for _, item := range n.Field { - a.apply(node, item, replacerFieldB.replace) - replacerFieldB.inc() - }` - require.Equal(t, expectedReplacer, sfi.asReplMethod()) - require.Equal(t, expectedSwitch, sfi.asSwitchCase()) -} - -func TestArrayItem(t *testing.T) { - sfi := ArrayItem{ - StructType: &Ref{&TypeString{"Struct"}}, - ItemType: &TypeString{"string"}, - } - - expectedReplacer := `type replaceStructItems int - -func (r *replaceStructItems) replace(newNode, container SQLNode) { - container.(*Struct)[int(*r)] = newNode.(string) -} - -func (r *replaceStructItems) inc() { - *r++ -}` - - expectedSwitch := ` replacer := replaceStructItems(0) - replacerRef := &replacer - for _, item := range n { - a.apply(node, item, replacerRef.replace) - replacerRef.inc() - }` - require.Equal(t, expectedReplacer, sfi.asReplMethod()) - require.Equal(t, expectedSwitch, sfi.asSwitchCase()) -} diff --git a/go/vt/sqlparser/visitorgen/visitorgen.go b/go/vt/sqlparser/visitorgen/visitorgen.go deleted file mode 100644 index 284f8c4d9be..00000000000 --- a/go/vt/sqlparser/visitorgen/visitorgen.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//Package visitorgen is responsible for taking the ast.go of Vitess and -//and producing visitor infrastructure for it. -// -//This is accomplished in a few steps. -//Step 1: Walk the AST and collect the interesting information into a format that is -// easy to consume for the next step. The output format is a *SourceFile, that -// contains the needed information in a format that is pretty close to the golang ast, -// but simplified -//Step 2: A SourceFile is packaged into a SourceInformation. SourceInformation is still -// concerned with the input ast - it's just an even more distilled and easy to -// consume format for the last step. This step is performed by the code in transformer.go. -//Step 3: Using the SourceInformation, the struct_producer.go code produces the final data structure -// used, a VisitorPlan. This is focused on the output - it contains a list of all fields or -// arrays that need to be handled by the visitor produced. -//Step 4: The VisitorPlan is lastly turned into a string that is written as the output of -// this whole process. -package visitorgen diff --git a/go/vt/topo/events/external_cluster_change.go b/go/vt/topo/events/external_cluster_change.go new file mode 100644 index 00000000000..4d1b6762d81 --- /dev/null +++ b/go/vt/topo/events/external_cluster_change.go @@ -0,0 +1,28 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// ExternalVitessClusterChange is an event that describes changes to a vitess cluster. +type ExternalVitessClusterChange struct { + ClusterName string + ExternalVitessCluster *topodatapb.ExternalVitessCluster + Status string +} diff --git a/go/vt/topo/external_vitess_cluster.go b/go/vt/topo/external_vitess_cluster.go new file mode 100644 index 00000000000..e19bf7b161d --- /dev/null +++ b/go/vt/topo/external_vitess_cluster.go @@ -0,0 +1,137 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo + +import ( + "context" + "path" + + "github.com/golang/protobuf/proto" + + "vitess.io/vitess/go/event" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/events" + "vitess.io/vitess/go/vt/vterrors" +) + +// ExternalVitessClusterInfo is a meta struct that contains metadata to give the +// data more context and convenience. This is the main way we interact +// with a vitess cluster stored in the topo. +type ExternalVitessClusterInfo struct { + ClusterName string + version Version + *topodatapb.ExternalVitessCluster +} + +// GetExternalVitessClusterDir returns node path containing external vitess clusters +func GetExternalVitessClusterDir() string { + return path.Join(ExternalClustersFile, ExternalClusterVitess) +} + +// GetExternalVitessClusterPath returns node path containing external clusters +func GetExternalVitessClusterPath(clusterName string) string { + return path.Join(GetExternalVitessClusterDir(), clusterName) +} + +// CreateExternalVitessCluster creates a topo record for the passed vitess cluster +func (ts *Server) CreateExternalVitessCluster(ctx context.Context, clusterName string, value *topodatapb.ExternalVitessCluster) error { + data, err := proto.Marshal(value) + if err != nil { + return err + } + + if _, err := ts.globalCell.Create(ctx, GetExternalVitessClusterPath(clusterName), data); err != nil { + return err + } + + event.Dispatch(&events.ExternalVitessClusterChange{ + ClusterName: clusterName, + ExternalVitessCluster: value, + Status: "created", + }) + return nil +} + +// GetExternalVitessCluster returns a topo record for the named vitess cluster +func (ts *Server) GetExternalVitessCluster(ctx context.Context, clusterName string) (*ExternalVitessClusterInfo, error) { + data, version, err := ts.globalCell.Get(ctx, GetExternalVitessClusterPath(clusterName)) + switch { + case IsErrType(err, NoNode): + return nil, nil + case err == nil: + default: + return nil, err + } + vc := &topodatapb.ExternalVitessCluster{} + if err = proto.Unmarshal(data, vc); err != nil { + return nil, vterrors.Wrap(err, "bad vitess cluster data") + } + + return &ExternalVitessClusterInfo{ + ClusterName: clusterName, + version: version, + ExternalVitessCluster: vc, + }, nil +} + +// UpdateExternalVitessCluster updates the topo record for the named vitess cluster +func (ts *Server) UpdateExternalVitessCluster(ctx context.Context, vc *ExternalVitessClusterInfo) error { + //FIXME: check for cluster lock + data, err := proto.Marshal(vc.ExternalVitessCluster) + if err != nil { + return err + } + version, err := ts.globalCell.Update(ctx, GetExternalVitessClusterPath(vc.ClusterName), data, vc.version) + if err != nil { + return err + } + vc.version = version + + event.Dispatch(&events.ExternalVitessClusterChange{ + ClusterName: vc.ClusterName, + ExternalVitessCluster: vc.ExternalVitessCluster, + Status: "updated", + }) + return nil +} + +// DeleteExternalVitessCluster deletes the topo record for the named vitess cluster +func (ts *Server) DeleteExternalVitessCluster(ctx context.Context, clusterName string) error { + if err := ts.globalCell.Delete(ctx, GetExternalVitessClusterPath(clusterName), nil); err != nil { + return err + } + + event.Dispatch(&events.ExternalVitessClusterChange{ + ClusterName: clusterName, + ExternalVitessCluster: nil, + Status: "deleted", + }) + return nil +} + +// GetExternalVitessClusters returns the list of external vitess clusters in the topology. +func (ts *Server) GetExternalVitessClusters(ctx context.Context) ([]string, error) { + children, err := ts.globalCell.ListDir(ctx, GetExternalVitessClusterDir(), false /*full*/) + switch { + case err == nil: + return DirEntriesToStringArray(children), nil + case IsErrType(err, NoNode): + return nil, nil + default: + return nil, err + } +} diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index f8be44e5c85..8fc31c4a6ae 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -76,6 +76,7 @@ const ( SrvVSchemaFile = "SrvVSchema" SrvKeyspaceFile = "SrvKeyspace" RoutingRulesFile = "RoutingRules" + ExternalClustersFile = "ExternalClusters" ) // Path for all object types. @@ -86,6 +87,9 @@ const ( ShardsPath = "shards" TabletsPath = "tablets" MetadataPath = "metadata" + + ExternalClusterMySQL = "mysql" + ExternalClusterVitess = "vitess" ) // Factory is a factory interface to create Conn objects. @@ -329,3 +333,23 @@ func (ts *Server) clearCellAliasesCache() { defer cellsAliases.mu.Unlock() cellsAliases.cellsToAliases = make(map[string]string) } + +// OpenExternalVitessClusterServer returns the topo server of the external cluster +func (ts *Server) OpenExternalVitessClusterServer(ctx context.Context, clusterName string) (*Server, error) { + vc, err := ts.GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return nil, err + } + if vc == nil { + return nil, fmt.Errorf("no vitess cluster found with name %s", clusterName) + } + var externalTopo *Server + externalTopo, err = OpenServer(vc.TopoConfig.TopoType, vc.TopoConfig.Server, vc.TopoConfig.Root) + if err != nil { + return nil, err + } + if externalTopo == nil { + return nil, fmt.Errorf("unable to open external topo for config %s", clusterName) + } + return externalTopo, nil +} diff --git a/go/vt/vtctl/topo.go b/go/vt/vtctl/topo.go index 529affdafb6..a408051d31a 100644 --- a/go/vt/vtctl/topo.go +++ b/go/vt/vtctl/topo.go @@ -60,7 +60,7 @@ func init() { // the right object, then echoes it as a string. func DecodeContent(filename string, data []byte, json bool) (string, error) { name := path.Base(filename) - + dir := path.Dir(filename) var p proto.Message switch name { case topo.CellInfoFile: @@ -82,9 +82,15 @@ func DecodeContent(filename string, data []byte, json bool) (string, error) { case topo.RoutingRulesFile: p = new(vschemapb.RoutingRules) default: - if json { - return "", fmt.Errorf("unknown topo protobuf type for %v", name) - } else { + switch dir { + case "/" + topo.GetExternalVitessClusterDir(): + p = new(topodatapb.ExternalVitessCluster) + default: + } + if p == nil { + if json { + return "", fmt.Errorf("unknown topo protobuf type for %v", name) + } return string(data), nil } } @@ -95,15 +101,14 @@ func DecodeContent(filename string, data []byte, json bool) (string, error) { if json { return new(jsonpb.Marshaler).MarshalToString(p) - } else { - return proto.MarshalTextString(p), nil } + return proto.MarshalTextString(p), nil } func commandTopoCat(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { cell := subFlags.String("cell", topo.GlobalCell, "topology cell to cat the file from. Defaults to global cell.") long := subFlags.Bool("long", false, "long listing.") - decodeProtoJson := subFlags.Bool("decode_proto_json", false, "decode proto files and display them as json") + decodeProtoJSON := subFlags.Bool("decode_proto_json", false, "decode proto files and display them as json") decodeProto := subFlags.Bool("decode_proto", false, "decode proto files and display them as text") subFlags.Parse(args) if subFlags.NArg() == 0 { @@ -125,15 +130,15 @@ func commandTopoCat(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.F var topologyDecoder TopologyDecoder switch { - case *decodeProtoJson: - topologyDecoder = JsonTopologyDecoder{} + case *decodeProtoJSON: + topologyDecoder = JSONTopologyDecoder{} case *decodeProto: topologyDecoder = ProtoTopologyDecoder{} default: topologyDecoder = PlainTopologyDecoder{} } - return topologyDecoder.decode(resolved, conn, ctx, wr, *long) + return topologyDecoder.decode(ctx, resolved, conn, wr, *long) } func commandTopoCp(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { @@ -176,15 +181,21 @@ func copyFileToTopo(ctx context.Context, ts *topo.Server, cell, from, to string) return err } +// TopologyDecoder interface for exporting out a leaf node in a readable form type TopologyDecoder interface { - decode([]string, topo.Conn, context.Context, *wrangler.Wrangler, bool) error + decode(context.Context, []string, topo.Conn, *wrangler.Wrangler, bool) error } +// ProtoTopologyDecoder exports topo node as a proto type ProtoTopologyDecoder struct{} + +// PlainTopologyDecoder exports topo node as plain text type PlainTopologyDecoder struct{} -type JsonTopologyDecoder struct{} -func (d ProtoTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx context.Context, wr *wrangler.Wrangler, long bool) error { +// JSONTopologyDecoder exports topo node as JSON +type JSONTopologyDecoder struct{} + +func (d ProtoTopologyDecoder) decode(ctx context.Context, topoPaths []string, conn topo.Conn, wr *wrangler.Wrangler, long bool) error { hasError := false for _, topoPath := range topoPaths { data, version, err := conn.Get(ctx, topoPath) @@ -216,7 +227,7 @@ func (d ProtoTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx con return nil } -func (d PlainTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx context.Context, wr *wrangler.Wrangler, long bool) error { +func (d PlainTopologyDecoder) decode(ctx context.Context, topoPaths []string, conn topo.Conn, wr *wrangler.Wrangler, long bool) error { hasError := false for _, topoPath := range topoPaths { data, version, err := conn.Get(ctx, topoPath) @@ -242,7 +253,7 @@ func (d PlainTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx con return nil } -func (d JsonTopologyDecoder) decode(topoPaths []string, conn topo.Conn, ctx context.Context, wr *wrangler.Wrangler, long bool) error { +func (d JSONTopologyDecoder) decode(ctx context.Context, topoPaths []string, conn topo.Conn, wr *wrangler.Wrangler, long bool) error { hasError := false var jsonData []interface{} for _, topoPath := range topoPaths { diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 364eae2b7fc..5d73db924cf 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -308,6 +308,9 @@ var commands = []commandGroup{ {"MoveTables", commandMoveTables, "[-cells=] [-tablet_types=] -workflow= ", `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`}, + {"Migrate", commandMigrate, + "[-cells=] [-tablet_types=] -workflow= ", + `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`}, {"DropSources", commandDropSources, "[-dry_run] [-rename_tables] ", "After a MoveTables or Resharding workflow cleanup unused artifacts like source tables, source shards and blacklists"}, @@ -355,6 +358,9 @@ var commands = []commandGroup{ "Blocks until no new queries were observed on all tablets with the given tablet type in the specified keyspace. " + " This can be used as sanity check to ensure that the tablets were drained after running vtctl MigrateServedTypes " + " and vtgate is no longer using them. If -timeout is set, it fails when the timeout is reached."}, + {"Mount", commandMount, + "[-topo_type=etcd2|consul|zookeeper] [-topo_server=topo_url] [-topo_root=root_topo_node> [-unmount] [-list] [-show] []", + "Add/Remove/Display/List external cluster(s) to this vitess cluster"}, }, }, { @@ -1942,7 +1948,7 @@ func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla target := subFlags.Arg(1) tableSpecs := subFlags.Arg(2) return wr.MoveTables(ctx, *workflow, source, target, tableSpecs, *cells, *tabletTypes, *allTables, - *excludes, *autoStart, *stopAfterCopy) + *excludes, *autoStart, *stopAfterCopy, "") } // VReplicationWorkflowAction defines subcommands passed to vtctl for movetables or reshard @@ -1959,6 +1965,21 @@ const ( vReplicationWorkflowActionGetState = "getstate" ) +func commandMigrate(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MigrateWorkflow) +} + +// getSourceKeyspace expects a keyspace of the form "externalClusterName.keyspaceName" and returns the components +func getSourceKeyspace(clusterKeyspace string) (clusterName string, sourceKeyspace string, err error) { + splits := strings.Split(clusterKeyspace, ".") + if len(splits) != 2 { + return "", "", fmt.Errorf("invalid format for external source cluster: %s", clusterKeyspace) + } + return splits[0], splits[1], nil +} + +// commandVRWorkflow is the common entry point for MoveTables/Reshard/Migrate workflows +// FIXME: this needs a refactor. Also validations for params need to be done per workflow type func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string, workflowType wrangler.VReplicationWorkflowType) error { @@ -1972,14 +1993,16 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla autoStart := subFlags.Bool("auto_start", true, "If false, streams will start in the Stopped state and will need to be explicitly started") stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed") - // MoveTables-only params - sourceKeyspace := subFlags.String("source", "", "Source keyspace") + // MoveTables and Migrate params tables := subFlags.String("tables", "", "A table spec or a list of tables") allTables := subFlags.Bool("all", false, "Move all tables from the source keyspace") excludes := subFlags.String("exclude", "", "Tables to exclude (comma-separated) if -all is specified") + sourceKeyspace := subFlags.String("source", "", "Source keyspace") + + // MoveTables-only params renameTables := subFlags.Bool("rename_tables", false, "Rename tables instead of dropping them") - // Reshard-only params + // Reshard params sourceShards := subFlags.String("source_shards", "", "Source shards") targetShards := subFlags.String("target_shards", "", "Target shards") skipSchemaCopy := subFlags.Bool("skip_schema_copy", false, "Skip copying of schema to target shards") @@ -2054,14 +2077,37 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla //TODO: check if invalid parameters were passed in that do not apply to this action originalAction := action action = strings.ToLower(action) // allow users to input action in a case-insensitive manner + if workflowType == wrangler.MigrateWorkflow { + switch action { + case vReplicationWorkflowActionCreate, vReplicationWorkflowActionCancel, vReplicationWorkflowActionComplete: + default: + return fmt.Errorf("invalid action for Migrate: %s", action) + } + } + switch action { case vReplicationWorkflowActionCreate: switch workflowType { - case wrangler.MoveTablesWorkflow: + case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow: + var sourceTopo *topo.Server + var externalClusterName string + + sourceTopo = wr.TopoServer() if *sourceKeyspace == "" { return fmt.Errorf("source keyspace is not specified") } - _, err := wr.TopoServer().GetKeyspace(ctx, *sourceKeyspace) + if workflowType == wrangler.MigrateWorkflow { + externalClusterName, *sourceKeyspace, err = getSourceKeyspace(*sourceKeyspace) + if err != nil { + return err + } + sourceTopo, err = sourceTopo.OpenExternalVitessClusterServer(ctx, externalClusterName) + if err != nil { + return err + } + } + + _, err := sourceTopo.GetKeyspace(ctx, *sourceKeyspace) if err != nil { wr.Logger().Errorf("keyspace %s not found", *sourceKeyspace) return err @@ -2074,7 +2120,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla vrwp.AllTables = *allTables vrwp.ExcludeTables = *excludes vrwp.Timeout = *timeout - workflowType = wrangler.MoveTablesWorkflow + vrwp.ExternalCluster = externalClusterName case wrangler.ReshardWorkflow: if *sourceShards == "" || *targetShards == "" { return fmt.Errorf("source and target shards are not specified") @@ -2083,8 +2129,6 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla vrwp.TargetShards = strings.Split(*targetShards, ",") vrwp.SkipSchemaCopy = *skipSchemaCopy vrwp.SourceKeyspace = target - workflowType = wrangler.ReshardWorkflow - log.Infof("params are %s, %s, %+v", *sourceShards, *targetShards, vrwp) default: return fmt.Errorf("unknown workflow type passed: %v", workflowType) } @@ -2105,12 +2149,13 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *fla case wrangler.MoveTablesWorkflow: vrwp.RenameTables = *renameTables case wrangler.ReshardWorkflow: + case wrangler.MigrateWorkflow: default: return fmt.Errorf("unknown workflow type passed: %v", workflowType) } vrwp.KeepData = *keepData } - + vrwp.WorkflowType = workflowType wf, err := wr.NewVReplicationWorkflow(ctx, workflowType, vrwp) if err != nil { log.Warningf("NewVReplicationWorkflow returned error %+v", wf) @@ -3451,6 +3496,62 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag. return nil } +func commandMount(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + clusterType := subFlags.String("type", "vitess", "Specify cluster type: mysql or vitess, only vitess clustered right now") + unmount := subFlags.Bool("unmount", false, "Unmount cluster") + show := subFlags.Bool("show", false, "Display contents of cluster") + list := subFlags.Bool("list", false, "List all clusters") + + // vitess cluster params + topoType := subFlags.String("topo_type", "", "Type of cluster's topology server") + topoServer := subFlags.String("topo_server", "", "Server url of cluster's topology server") + topoRoot := subFlags.String("topo_root", "", "Root node of cluster's topology") + + if err := subFlags.Parse(args); err != nil { + return err + } + if *list { + clusters, err := wr.TopoServer().GetExternalVitessClusters(ctx) + if err != nil { + return err + } + wr.Logger().Printf("%s\n", strings.Join(clusters, ",")) + return nil + } + if subFlags.NArg() != 1 { + return fmt.Errorf("cluster name needs to be provided") + } + + clusterName := subFlags.Arg(0) + switch *clusterType { + case "vitess": + switch { + case *unmount: + return wr.UnmountExternalVitessCluster(ctx, clusterName) + case *show: + vci, err := wr.TopoServer().GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return err + } + if vci == nil { + return fmt.Errorf("there is no vitess cluster named %s", clusterName) + } + data, err := json.Marshal(vci) + if err != nil { + return err + } + wr.Logger().Printf("%s\n", string(data)) + return nil + default: + return wr.MountExternalVitessCluster(ctx, clusterName, *topoType, *topoServer, *topoRoot) + } + case "mysql": + return fmt.Errorf("mysql cluster type not yet supported") + default: + return fmt.Errorf("cluster type can be only one of vitess or mysql") + } +} + func commandGenerateShardRanges(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { numShards := subFlags.Int("num_shards", 2, "Number of shards to generate shard ranges for.") diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index 1f0a85e2401..9c8e10ed6fc 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -163,7 +163,7 @@ func buildAlterView(vschema ContextVSchema, ddl *sqlparser.AlterView) (key.Desti if routePlan.Opcode != engine.SelectUnsharded && routePlan.Opcode != engine.SelectEqualUnique && routePlan.Opcode != engine.SelectScatter { return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex) } - sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { + _, err = sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { switch tableName := cursor.Node().(type) { case sqlparser.TableName: cursor.Replace(sqlparser.TableName{ @@ -172,6 +172,9 @@ func buildAlterView(vschema ContextVSchema, ddl *sqlparser.AlterView) (key.Desti } return true }, nil) + if err != nil { + return nil, nil, err + } return destination, keyspace, nil } @@ -199,7 +202,7 @@ func buildCreateView(vschema ContextVSchema, ddl *sqlparser.CreateView) (key.Des if routePlan.Opcode != engine.SelectUnsharded && routePlan.Opcode != engine.SelectEqualUnique && routePlan.Opcode != engine.SelectScatter { return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex) } - sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { + _, err = sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool { switch tableName := cursor.Node().(type) { case sqlparser.TableName: cursor.Replace(sqlparser.TableName{ @@ -208,6 +211,9 @@ func buildCreateView(vschema ContextVSchema, ddl *sqlparser.CreateView) (key.Des } return true }, nil) + if err != nil { + return nil, nil, err + } return destination, keyspace, nil } diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index 36b2b46d1a9..0250f4215f5 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -433,8 +433,9 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, c // this is shown by not having any info at all after the result for the V3 planner // with this last expectation, it is an error if the V4 planner // produces the same plan as the V3 planner does + testName := fmt.Sprintf("%d V4: %s", tcase.lineno, tcase.comments) if !empty || checkAllTests { - t.Run(fmt.Sprintf("%d V4: %s", tcase.lineno, tcase.comments), func(t *testing.T) { + t.Run(testName, func(t *testing.T) { if out != tcase.output2ndPlanner { fail = true t.Errorf("V4 - %s:%d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output2ndPlanner, out), tcase.output, out) @@ -452,7 +453,7 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, c }) } else { if out == tcase.output && checkV4equalPlan { - t.Run("V4: "+tcase.comments, func(t *testing.T) { + t.Run(testName, func(t *testing.T) { t.Errorf("V4 - %s:%d\nplanner produces same output as V3", filename, tcase.lineno) }) } diff --git a/go/vt/vtgate/planbuilder/route_planning.go b/go/vt/vtgate/planbuilder/route_planning.go index 42f3e9eb4d2..75d17fd3870 100644 --- a/go/vt/vtgate/planbuilder/route_planning.go +++ b/go/vt/vtgate/planbuilder/route_planning.go @@ -114,7 +114,7 @@ func planLimit(limit *sqlparser.Limit, plan logicalPlan) (logicalPlan, error) { func planProjections(sel *sqlparser.Select, plan logicalPlan, semTable *semantics.SemTable) error { rb, ok := plan.(*route) - if ok { + if ok && rb.isSingleShard() { ast := rb.Select.(*sqlparser.Select) ast.Distinct = sel.Distinct ast.GroupBy = sel.GroupBy @@ -122,16 +122,24 @@ func planProjections(sel *sqlparser.Select, plan logicalPlan, semTable *semantic ast.SelectExprs = sel.SelectExprs ast.Comments = sel.Comments } else { - // TODO real horizon planning to be done + if sel.Distinct { + return semantics.Gen4NotSupportedF("DISTINCT") + } + if sel.GroupBy != nil { + return semantics.Gen4NotSupportedF("GROUP BY") + } for _, expr := range sel.SelectExprs { switch e := expr.(type) { case *sqlparser.AliasedExpr: + if nodeHasAggregates(e.Expr) { + return semantics.Gen4NotSupportedF("aggregation [%s]", sqlparser.String(e)) + } if _, err := pushProjection(e, plan, semTable); err != nil { return err } default: - return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "not yet supported %T", e) + return semantics.Gen4NotSupportedF("%T", e) } } @@ -312,6 +320,8 @@ func (rp *routePlan) searchForNewVindexes(predicates []sqlparser.Expr) (bool, er } } } + default: + return false, semantics.Gen4NotSupportedF("%s", sqlparser.String(filter)) } } } @@ -446,8 +456,8 @@ func pushPredicate2(exprs []sqlparser.Expr, tree joinTree, semTable *semantics.S } func breakPredicateInLHSandRHS(expr sqlparser.Expr, semTable *semantics.SemTable, lhs semantics.TableSet) (columns []*sqlparser.ColName, predicate sqlparser.Expr, err error) { - predicate = expr.Clone() - sqlparser.Rewrite(predicate, nil, func(cursor *sqlparser.Cursor) bool { + predicate = sqlparser.CloneExpr(expr) + _, err = sqlparser.Rewrite(predicate, nil, func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.ColName: deps := semTable.Dependencies(node) @@ -463,6 +473,9 @@ func breakPredicateInLHSandRHS(expr sqlparser.Expr, semTable *semantics.SemTable } return true }) + if err != nil { + return nil, nil, err + } return } @@ -620,6 +633,9 @@ func createRoutePlan(table *queryTable, solves semantics.TableSet, vschema Conte if err != nil { return nil, err } + if vschemaTable.Name.String() != table.table.Name.String() { + return nil, semantics.Gen4NotSupportedF("routed tables") + } plan := &routePlan{ solved: solves, _tables: []*routeTable{{ diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 0d6886e39b9..bd0efc44d17 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -532,7 +532,7 @@ func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs) } } resultColumns = append(resultColumns, rb.PushAnonymous(node)) - case sqlparser.Nextval: + case *sqlparser.Nextval: rb, ok := pb.plan.(*route) if !ok { // This code is unreachable because the parser doesn't allow joins for next val statements. diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt index 83b1ae979ea..747d50f67ac 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt @@ -78,7 +78,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # distinct and group by together for single route. "select distinct col1, id from user group by col1" @@ -97,7 +96,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # scatter group by a text column "select count(*), a, textcol1, b from user group by a, textcol1, b" @@ -347,7 +345,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # group by a unique vindex and other column should use a simple route "select id, col, count(*) from user group by id, col" @@ -366,7 +363,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # group by a non-vindex column should use an OrderdAggregate primitive "select col, count(*) from user group by col" @@ -445,7 +441,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # group by a unique vindex where alias from select list is used "select id as val, 1+count(*) from user group by val" @@ -464,7 +459,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # group by a unique vindex where expression is qualified (alias should be ignored) "select val as id, 1+count(*) from user group by user.id" @@ -483,7 +477,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # group by a unique vindex where it should skip non-aliased expressions. "select *, id, 1+count(*) from user group by id" @@ -502,7 +495,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # group by a unique vindex should revert to simple route, and having clause should find the correct symbols. "select id, count(*) c from user group by id having id=1 and c=10" @@ -657,7 +649,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # count with distinct unique vindex "select col, count(distinct id) from user group by col" diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt index 9e1f069f2b5..3a86be6121e 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt @@ -15,21 +15,7 @@ "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } -} +Gen4 plan same as above # Query that always return empty "select id from user where someColumn = null" @@ -48,21 +34,7 @@ "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where someColumn = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectNone", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where someColumn = null", - "Table": "`user`" - } -} +Gen4 plan same as above # Single table unique vindex route "select id from user where user.id = 5" @@ -85,25 +57,7 @@ "Vindex": "user_index" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where user.id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - 5 - ], - "Vindex": "user_index" - } -} +Gen4 plan same as above # Single table unique vindex route, but complex expr "select id from user where user.id = 5+5" @@ -122,21 +76,7 @@ "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where user.id = 5+5", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 5 + 5", - "Table": "`user`" - } -} +Gen4 plan same as above # Single table multiple unique vindex match "select id from music where id = 5 and user_id = 4" @@ -182,25 +122,7 @@ Gen4 plan same as above "Vindex": "name_user_map" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where costly = 'aa' and name = 'bb'", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'", - "Table": "`user`", - "Values": [ - "bb" - ], - "Vindex": "name_user_map" - } -} +Gen4 plan same as above # Single table multiple non-unique vindex match for IN clause "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')" @@ -372,25 +294,6 @@ Gen4 plan same as above "Vindex": "user_index" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5", - "Table": "`user`", - "Values": [ - 5 - ], - "Vindex": "user_index" - } -} # Composite IN: multiple vindex matches "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))" @@ -484,21 +387,6 @@ Gen4 plan same as above "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "`user`" - } -} # Composite IN: RHS not tuple "select id from user where (col1, name) in (select * from music where music.user_id=user.id)" @@ -535,21 +423,6 @@ Gen4 plan same as above "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where (col1, name) in (('aa', 1+1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))", - "Table": "`user`" - } -} # IN clause: LHS is neither column nor composite tuple "select Id from user where 1 in ('aa', 'bb')" @@ -586,21 +459,6 @@ Gen4 plan same as above "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where name in (col, 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` in (col, 'bb')", - "Table": "`user`" - } -} # Single table equality route with val arg "select id from user where name = :a" @@ -623,25 +481,7 @@ Gen4 plan same as above "Vindex": "name_user_map" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where name = :a", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` = :a", - "Table": "`user`", - "Values": [ - ":a" - ], - "Vindex": "name_user_map" - } -} +Gen4 plan same as above # Single table equality route with unsigned value "select id from user where name = 18446744073709551615" @@ -664,25 +504,7 @@ Gen4 plan same as above "Vindex": "name_user_map" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where name = 18446744073709551615", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` = 18446744073709551615", - "Table": "`user`", - "Values": [ - 18446744073709551615 - ], - "Vindex": "name_user_map" - } -} +Gen4 plan same as above # Single table in clause list arg "select id from user where name in ::list" @@ -834,44 +656,7 @@ Gen4 plan same as above ] } } -{ - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "SelectEqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - 5 - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } -} +Gen4 plan same as above # Multi-route unique vindex route on both routes "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5" @@ -1067,25 +852,6 @@ Gen4 plan same as above "Vindex": "name_user_map" } } -{ - "QueryType": "SELECT", - "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id or col as val from `user` where 1 != 1", - "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'", - "Table": "`user`", - "Values": [ - "aa" - ], - "Vindex": "name_user_map" - } -} # Route with multiple route constraints, SelectEqual is the best constraint. "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'" @@ -1108,25 +874,6 @@ Gen4 plan same as above "Vindex": "name_user_map" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'", - "Table": "`user`", - "Values": [ - "aa" - ], - "Vindex": "name_user_map" - } -} # Route with multiple route constraints, SelectEqualUnique is the best constraint. "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1" @@ -1149,25 +896,6 @@ Gen4 plan same as above "Vindex": "user_index" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1", - "Table": "`user`", - "Values": [ - 1 - ], - "Vindex": "user_index" - } -} # Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed. "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5" @@ -1190,25 +918,6 @@ Gen4 plan same as above "Vindex": "user_index" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectEqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5", - "Table": "`user`", - "Values": [ - 1 - ], - "Vindex": "user_index" - } -} # Route with OR and AND clause, must parenthesize correctly. "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)" @@ -1227,21 +936,7 @@ Gen4 plan same as above "Table": "`user`" } } -{ - "QueryType": "SELECT", - "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)", - "Table": "`user`" - } -} +Gen4 plan same as above # Unsharded route "select unsharded.id from user join unsharded where unsharded.id = user.id" @@ -2037,7 +1732,6 @@ Gen4 plan same as above "Vindex": "user_index" } } -Gen4 plan same as above # Single table with unique vindex match and NOT IN (null, 1, 2) "select id from music where user_id = 4 and id NOT IN (null, 1, 2)" diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.txt b/go/vt/vtgate/planbuilder/testdata/from_cases.txt index b27a6708761..45aea022d18 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.txt @@ -108,21 +108,6 @@ Gen4 plan same as above "Table": "unsharded" } } -{ - "QueryType": "SELECT", - "Original": "select m1.col from unsharded as m1 join unsharded as m2", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectUnsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from unsharded as m1, unsharded as m2 where 1 != 1", - "Query": "select m1.col from unsharded as m1, unsharded as m2", - "Table": "unsharded" - } -} # Multi-table, multi-chunk "select music.col from user join music" @@ -179,7 +164,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # routing rules where table name matches, and there's an alias. "select * from second_user.user as a" @@ -198,7 +182,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # routing rules where table name does not match, and there's no alias. "select * from route1" @@ -319,7 +302,6 @@ Gen4 plan same as above "Table": "unsharded" } } -Gen4 plan same as above # ',' join information_schema "select a.id,b.id from information_schema.a as a, information_schema.b as b" @@ -355,7 +337,6 @@ Gen4 plan same as above "Table": "unsharded" } } -Gen4 plan same as above # Left join, single chunk "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b" @@ -632,40 +613,6 @@ Gen4 plan same as above ] } } -{ - "QueryType": "SELECT", - "Original": "select user.col from user join unsharded as m1 join unsharded as m2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "SelectUnsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1", - "Query": "select 1 from unsharded as m1, unsharded as m2", - "Table": "unsharded" - } - ] - } -} # Parenthesized, single chunk "select user.col from user join (unsharded as m1 join unsharded as m2)" @@ -703,40 +650,6 @@ Gen4 plan same as above ] } } -{ - "QueryType": "SELECT", - "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "SelectUnsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1", - "Query": "select 1 from unsharded as m1, unsharded as m2", - "Table": "unsharded" - } - ] - } -} # Parenthesized, multi-chunk "select user.col from user join (user as u1 join unsharded)" @@ -1020,40 +933,6 @@ Gen4 plan same as above ] } } -{ - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id \u003c user_extra.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "SelectScatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where :user_id \u003c user_extra.user_id", - "Table": "user_extra" - } - ] - } -} # sharded join, non-col reference RHS "select user.col from user join user_extra on user.id = 5" @@ -1290,21 +1169,6 @@ Gen4 plan same as above "Table": "ref" } } -{ - "QueryType": "SELECT", - "Original": "select r1.col from ref r1 join ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "SelectReference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select r1.col from ref as r1, ref where 1 != 1", - "Query": "select r1.col from ref as r1, ref", - "Table": "ref" - } -} # reference table can merge with other opcodes left to right. "select ref.col from ref join user" diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt index 3fd2d164f23..28263baae24 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt @@ -314,7 +314,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # ORDER BY after pull-out subquery "select col from user where col in (select col2 from user) order by col" @@ -539,7 +538,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # ORDER BY RAND() for join "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()" @@ -1019,7 +1017,6 @@ Gen4 plan same as above ] } } -Gen4 plan same as above # scatter limit after pullout subquery "select col from user where col in (select col1 from user) limit 1" diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.txt b/go/vt/vtgate/planbuilder/testdata/select_cases.txt index 628484ebb7a..93f34a6ab9f 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.txt @@ -34,7 +34,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # unqualified '*' expression for simple route "select * from user" @@ -53,7 +52,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # select with timeout directive sets QueryTimeout in the route "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user" @@ -72,7 +70,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # select aggregation with timeout directive sets QueryTimeout in the route "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user" @@ -123,7 +120,6 @@ Gen4 plan same as above ] } } -Gen4 plan same as above # select with partial scatter directive "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user" @@ -142,7 +138,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # select aggregation with partial scatter directive "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user" @@ -219,7 +214,6 @@ Gen4 plan same as above ] } } -Gen4 plan same as above # qualified '*' expression for simple route "select user.* from user" @@ -238,7 +232,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # fully qualified '*' expression for simple route "select user.user.* from user.user" @@ -838,7 +831,6 @@ Gen4 plan same as above "Table": "`user`" } } -Gen4 plan same as above # sharded limit offset "select user_id from music order by user_id limit 10, 20" @@ -956,7 +948,6 @@ Gen4 plan same as above "Vindex": "user_index" } } -Gen4 plan same as above # Column Aliasing with Column "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3" @@ -979,7 +970,6 @@ Gen4 plan same as above "Vindex": "user_index" } } -Gen4 plan same as above # Booleans and parenthesis "select * from user where (id = 1) AND name = true limit 5" diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt index 09564e2ec67..ef39f949223 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt @@ -446,14 +446,11 @@ Gen4 plan same as above # create view with sql_calc_found_rows with limit "create view user.view_a as select sql_calc_found_rows * from music limit 100" "Complex select queries are not supported in create or alter view statements" -Gen4 plan same as above # create view with sql_calc_found_rows with group by and having "create view user.view_a as select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2" "Complex select queries are not supported in create or alter view statements" -Gen4 plan same as above # create view with incompatible keyspaces "create view main.view_a as select * from user.user_extra" "Select query does not belong to the same keyspace as the view statement" -Gen4 plan same as above diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt b/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt index d4766864a73..4fc36b8dd1e 100644 --- a/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt +++ b/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt @@ -633,9 +633,7 @@ # Invalid value in IN clause from LHS of join "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616" "strconv.ParseUint: parsing "18446744073709551616": value out of range" -Gen4 plan same as above # Invalid value in IN clause from RHS of join "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616" "strconv.ParseUint: parsing "18446744073709551616": value out of range" -Gen4 plan same as above diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go index 71629b34164..69c40dc9412 100644 --- a/go/vt/vtgate/semantics/analyzer.go +++ b/go/vt/vtgate/semantics/analyzer.go @@ -53,7 +53,7 @@ func (a *analyzer) analyzeDown(cursor *sqlparser.Cursor) bool { return false } case *sqlparser.DerivedTable: - a.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%T not supported", node) + a.err = Gen4NotSupportedF("derived tables") case *sqlparser.TableExprs: // this has already been visited when we encountered the SELECT struct return false @@ -98,10 +98,13 @@ func (a *analyzer) analyzeTableExprs(tablExprs sqlparser.TableExprs) error { func (a *analyzer) analyzeTableExpr(tableExpr sqlparser.TableExpr) error { switch table := tableExpr.(type) { case *sqlparser.AliasedTableExpr: + if !table.As.IsEmpty() { + return Gen4NotSupportedF("table aliases") + } return a.bindTable(table, table.Expr) case *sqlparser.JoinTableExpr: if table.Join != sqlparser.NormalJoinType { - return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Join type not supported: %s", table.Join.ToString()) + return Gen4NotSupportedF("join type %s", table.Join.ToString()) } if err := a.analyzeTableExpr(table.LeftExpr); err != nil { return err @@ -137,7 +140,7 @@ func (a *analyzer) resolveUnQualifiedColumn(current *scope, expr *sqlparser.ColN return tableExpr, nil } } - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unable to map column to a table: %s", sqlparser.String(expr)) + return nil, Gen4NotSupportedF("unable to map column to a table: %s", sqlparser.String(expr)) } func (a *analyzer) tableSetFor(t table) TableSet { @@ -171,8 +174,10 @@ func (a *analyzer) bindTable(alias *sqlparser.AliasedTableExpr, expr sqlparser.S } func (a *analyzer) analyze(statement sqlparser.Statement) error { - _ = sqlparser.Rewrite(statement, a.analyzeDown, a.analyzeUp) - + _, err := sqlparser.Rewrite(statement, a.analyzeDown, a.analyzeUp) + if err != nil { + return err + } return a.err } @@ -204,3 +209,8 @@ func (a *analyzer) currentScope() *scope { } return a.scopes[size-1] } + +// Gen4NotSupportedF returns a common error for shortcomings in the gen4 planner +func Gen4NotSupportedF(format string, args ...interface{}) error { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "gen4 does not yet support: "+format, args...) +} diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go index 8a30c4dbf84..dc30488cac2 100644 --- a/go/vt/vtgate/semantics/analyzer_test.go +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -17,6 +17,7 @@ limitations under the License. package semantics import ( + "strings" "testing" "github.com/stretchr/testify/assert" @@ -40,6 +41,7 @@ func extract(in *sqlparser.Select, idx int) sqlparser.Expr { } func TestScopeForSubqueries(t *testing.T) { + t.Skip("subqueries not yet supported") query := ` select t.col1, ( select t.col2 from z as t) @@ -157,6 +159,9 @@ func TestNotUniqueTableName(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { + if strings.Contains(query, "as") { + t.Skip("table alias not implemented") + } parse, _ := sqlparser.Parse(query) _, err := Analyse(parse) require.Error(t, err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 11597f60504..5a35f346f27 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -118,7 +118,15 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor } log.Infof("creating tablet picker for source keyspace/shard %v/%v with cell: %v and tabletTypes: %v", ct.source.Keyspace, ct.source.Shard, cell, tabletTypesStr) cells := strings.Split(cell, ",") - tp, err := discovery.NewTabletPicker(ts, cells, ct.source.Keyspace, ct.source.Shard, tabletTypesStr) + + sourceTopo := ts + if ct.source.ExternalCluster != "" { + sourceTopo, err = sourceTopo.OpenExternalVitessClusterServer(ctx, ct.source.ExternalCluster) + if err != nil { + return nil, err + } + } + tp, err := discovery.NewTabletPicker(sourceTopo, cells, ct.source.Keyspace, ct.source.Shard, tabletTypesStr) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/planbuilder/builder.go b/go/vt/vttablet/tabletserver/planbuilder/builder.go index 747ea2a2659..990ecdc3be8 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/builder.go +++ b/go/vt/vttablet/tabletserver/planbuilder/builder.go @@ -46,7 +46,7 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan } // Check if it's a NEXT VALUE statement. - if nextVal, ok := sel.SelectExprs[0].(sqlparser.Nextval); ok { + if nextVal, ok := sel.SelectExprs[0].(*sqlparser.Nextval); ok { if plan.Table == nil || plan.Table.Type != schema.Sequence { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a sequence", sqlparser.String(sel.From)) } @@ -134,7 +134,10 @@ func analyzeShow(show *sqlparser.Show, dbName string) (plan *Plan, err error) { // rewrite WHERE clause if it exists // `where Tables_in_Keyspace` => `where Tables_in_DbName` if showInternal.Filter != nil { - showTableRewrite(showInternal, dbName) + err := showTableRewrite(showInternal, dbName) + if err != nil { + return nil, err + } } } return &Plan{ @@ -153,10 +156,10 @@ func analyzeShow(show *sqlparser.Show, dbName string) (plan *Plan, err error) { return &Plan{PlanID: PlanOtherRead}, nil } -func showTableRewrite(show *sqlparser.ShowBasic, dbName string) { +func showTableRewrite(show *sqlparser.ShowBasic, dbName string) error { filter := show.Filter.Filter if filter != nil { - sqlparser.Rewrite(filter, func(cursor *sqlparser.Cursor) bool { + _, err := sqlparser.Rewrite(filter, func(cursor *sqlparser.Cursor) bool { switch n := cursor.Node().(type) { case *sqlparser.ColName: if n.Qualifier.IsEmpty() && strings.HasPrefix(n.Name.Lowered(), "tables_in_") { @@ -165,7 +168,11 @@ func showTableRewrite(show *sqlparser.ShowBasic, dbName string) { } return true }, nil) + if err != nil { + return err + } } + return nil } func analyzeSet(set *sqlparser.Set) (plan *Plan) { diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 787f3a93714..e7de45df1fe 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -518,7 +518,6 @@ func (throttler *Throttler) collectMySQLMetrics(ctx context.Context) error { // refreshMySQLInventory will re-structure the inventory based on reading config settings func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { - log.Infof("refreshing MySQL inventory") addInstanceKey := func(key *mysql.InstanceKey, clusterName string, clusterSettings *config.MySQLClusterConfigurationSettings, probes *mysql.Probes) { for _, ignore := range clusterSettings.IgnoreHosts { @@ -531,7 +530,6 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { log.Infof("Throttler: read invalid instance key: [%+v] for cluster %+v", key, clusterName) return } - log.Infof("Throttler: read instance key: %+v", key) probe := &mysql.Probe{ Key: *key, @@ -596,7 +594,6 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { // synchronous update of inventory func (throttler *Throttler) updateMySQLClusterProbes(ctx context.Context, clusterProbes *mysql.ClusterProbes) error { - log.Infof("Throttler: updating MySQLClusterProbes: %s", clusterProbes.ClusterName) throttler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] = clusterProbes.InstanceProbes throttler.mysqlInventory.IgnoreHostsCount[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsCount throttler.mysqlInventory.IgnoreHostsThreshold[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsThreshold diff --git a/go/vt/wrangler/external_cluster.go b/go/vt/wrangler/external_cluster.go new file mode 100644 index 00000000000..e3f93f62d4c --- /dev/null +++ b/go/vt/wrangler/external_cluster.go @@ -0,0 +1,55 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/vt/proto/topodata" +) + +// MountExternalVitessCluster adds a topo record for cluster with specified parameters so that it is available to a Migrate command +func (wr *Wrangler) MountExternalVitessCluster(ctx context.Context, clusterName, topoType, topoServer, topoRoot string) error { + vci, err := wr.TopoServer().GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return err + } + if vci != nil { + return fmt.Errorf("there is already a vitess cluster named %s", clusterName) + } + vc := &topodata.ExternalVitessCluster{ + TopoConfig: &topodata.TopoConfig{ + TopoType: topoType, + Server: topoServer, + Root: topoRoot, + }, + } + return wr.TopoServer().CreateExternalVitessCluster(ctx, clusterName, vc) +} + +// UnmountExternalVitessCluster deletes a mounted cluster from the topo +func (wr *Wrangler) UnmountExternalVitessCluster(ctx context.Context, clusterName string) error { + vci, err := wr.TopoServer().GetExternalVitessCluster(ctx, clusterName) + if err != nil { + return err + } + if vci == nil { + return fmt.Errorf("there is no vitess cluster named %s", clusterName) + } + return wr.TopoServer().DeleteExternalVitessCluster(ctx, clusterName) +} diff --git a/go/vt/wrangler/external_cluster_test.go b/go/vt/wrangler/external_cluster_test.go new file mode 100644 index 00000000000..3be5970a769 --- /dev/null +++ b/go/vt/wrangler/external_cluster_test.go @@ -0,0 +1,67 @@ +package wrangler + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +func TestVitessCluster(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("zone1") + tmc := newTestWranglerTMClient() + wr := New(logutil.NewConsoleLogger(), ts, tmc) + name, topoType, topoServer, topoRoot := "c1", "x", "y", "z" + + t.Run("Zero clusters to start", func(t *testing.T) { + clusters, err := ts.GetExternalVitessClusters(ctx) + require.NoError(t, err) + require.Equal(t, 0, len(clusters)) + }) + t.Run("Mount first cluster", func(t *testing.T) { + err := wr.MountExternalVitessCluster(ctx, name, topoType, topoServer, topoRoot) + require.NoError(t, err) + vci, err := ts.GetExternalVitessCluster(ctx, name) + require.NoError(t, err) + require.Equal(t, vci.ClusterName, name) + expectedVc := &topodata.ExternalVitessCluster{ + TopoConfig: &topodata.TopoConfig{ + TopoType: topoType, + Server: topoServer, + Root: topoRoot, + }, + } + require.Equal(t, expectedVc, vci.ExternalVitessCluster) + }) + + t.Run("Mount second cluster", func(t *testing.T) { + name2 := "c2" + err := wr.MountExternalVitessCluster(ctx, name2, topoType, topoServer, topoRoot) + require.NoError(t, err) + }) + + t.Run("List clusters should return c1,c2", func(t *testing.T) { + clusters, err := ts.GetExternalVitessClusters(ctx) + require.NoError(t, err) + require.Equal(t, 2, len(clusters)) + require.EqualValues(t, []string{"c1", "c2"}, clusters) + }) + t.Run("Unmount first cluster", func(t *testing.T) { + err := wr.UnmountExternalVitessCluster(ctx, name) + require.NoError(t, err) + vci, err := ts.GetExternalVitessCluster(ctx, name) + require.NoError(t, err) + require.Nil(t, vci) + }) + t.Run("List clusters should return c2", func(t *testing.T) { + clusters, err := ts.GetExternalVitessClusters(ctx) + require.NoError(t, err) + require.Equal(t, 1, len(clusters)) + require.EqualValues(t, []string{"c2"}, clusters) + }) +} diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 8016e453082..7c064455d65 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -64,11 +64,21 @@ const ( // MoveTables initiates moving table(s) over to another keyspace func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, targetKeyspace, tableSpecs, - cell, tabletTypes string, allTables bool, excludeTables string, autoStart, stopAfterCopy bool) error { + cell, tabletTypes string, allTables bool, excludeTables string, autoStart, stopAfterCopy bool, + externalCluster string) error { //FIXME validate tableSpecs, allTables, excludeTables var tables []string + var externalTopo *topo.Server var err error + if externalCluster != "" { + externalTopo, err = wr.ts.OpenExternalVitessClusterServer(ctx, externalCluster) + if err != nil { + return err + } + wr.sourceTs = externalTopo + log.Infof("Successfully opened external topo: %+v", externalTopo) + } var vschema *vschemapb.Keyspace vschema, err = wr.ts.GetVSchema(ctx, targetKeyspace) if err != nil { @@ -97,7 +107,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta if len(strings.TrimSpace(tableSpecs)) > 0 { tables = strings.Split(tableSpecs, ",") } - ksTables, err := wr.getKeyspaceTables(ctx, sourceKeyspace) + ksTables, err := wr.getKeyspaceTables(ctx, sourceKeyspace, wr.sourceTs) if err != nil { return err } @@ -148,45 +158,46 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta } } } - - // Save routing rules before vschema. If we save vschema first, and routing rules - // fails to save, we may generate duplicate table errors. - rules, err := wr.getRoutingRules(ctx) - if err != nil { - return err - } - for _, table := range tables { - toSource := []string{sourceKeyspace + "." + table} - rules[table] = toSource - rules[table+"@replica"] = toSource - rules[table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[targetKeyspace+"."+table+"@replica"] = toSource - rules[targetKeyspace+"."+table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[sourceKeyspace+"."+table+"@replica"] = toSource - rules[sourceKeyspace+"."+table+"@rdonly"] = toSource - } - if err := wr.saveRoutingRules(ctx, rules); err != nil { - return err - } - if vschema != nil { - // We added to the vschema. - if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + if externalTopo == nil { + // Save routing rules before vschema. If we save vschema first, and routing rules + // fails to save, we may generate duplicate table errors. + rules, err := wr.getRoutingRules(ctx) + if err != nil { + return err + } + for _, table := range tables { + toSource := []string{sourceKeyspace + "." + table} + rules[table] = toSource + rules[table+"@replica"] = toSource + rules[table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[targetKeyspace+"."+table+"@replica"] = toSource + rules[targetKeyspace+"."+table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[sourceKeyspace+"."+table+"@replica"] = toSource + rules[sourceKeyspace+"."+table+"@rdonly"] = toSource + } + if err := wr.saveRoutingRules(ctx, rules); err != nil { return err } + if vschema != nil { + // We added to the vschema. + if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return err + } + } } if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { return err } - ms := &vtctldatapb.MaterializeSettings{ - Workflow: workflow, - SourceKeyspace: sourceKeyspace, - TargetKeyspace: targetKeyspace, - Cell: cell, - TabletTypes: tabletTypes, - StopAfterCopy: stopAfterCopy, + Workflow: workflow, + SourceKeyspace: sourceKeyspace, + TargetKeyspace: targetKeyspace, + Cell: cell, + TabletTypes: tabletTypes, + StopAfterCopy: stopAfterCopy, + ExternalCluster: externalCluster, } for _, table := range tables { buf := sqlparser.NewTrackedBuffer(nil) @@ -211,17 +222,19 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return err } - exists, tablets, err := wr.checkIfPreviousJournalExists(ctx, mz, migrationID) - if err != nil { - return err - } - if exists { - wr.Logger().Errorf("Found a previous journal entry for %d", migrationID) - msg := fmt.Sprintf("found an entry from a previous run for migration id %d in _vt.resharding_journal of tablets %s,", - migrationID, strings.Join(tablets, ",")) - msg += fmt.Sprintf("please review and delete it before proceeding and restart the workflow using the Workflow %s.%s start", - workflow, targetKeyspace) - return fmt.Errorf(msg) + if externalCluster == "" { + exists, tablets, err := wr.checkIfPreviousJournalExists(ctx, mz, migrationID) + if err != nil { + return err + } + if exists { + wr.Logger().Errorf("Found a previous journal entry for %d", migrationID) + msg := fmt.Sprintf("found an entry from a previous run for migration id %d in _vt.resharding_journal of tablets %s,", + migrationID, strings.Join(tablets, ",")) + msg += fmt.Sprintf("please review and delete it before proceeding and restart the workflow using the Workflow %s.%s start", + workflow, targetKeyspace) + return fmt.Errorf(msg) + } } if autoStart { return mz.startStreams(ctx) @@ -252,8 +265,8 @@ func (wr *Wrangler) validateSourceTablesExist(ctx context.Context, sourceKeyspac return nil } -func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string) ([]string, error) { - shards, err := wr.ts.GetServingShards(ctx, ks) +func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string, ts *topo.Server) ([]string, error) { + shards, err := ts.GetServingShards(ctx, ks) if err != nil { return nil, err } @@ -266,7 +279,11 @@ func (wr *Wrangler) getKeyspaceTables(ctx context.Context, ks string) ([]string, } allTables := []string{"/.*/"} - schema, err := wr.GetSchema(ctx, master, allTables, nil, false) + ti, err := ts.GetTablet(ctx, master) + if err != nil { + return nil, err + } + schema, err := wr.tmc.GetSchema(ctx, ti.Tablet, allTables, nil, false) if err != nil { return nil, err } @@ -823,7 +840,7 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater } } - sourceShards, err := wr.ts.GetServingShards(ctx, ms.SourceKeyspace) + sourceShards, err := wr.sourceTs.GetServingShards(ctx, ms.SourceKeyspace) if err != nil { return nil, err } @@ -849,8 +866,11 @@ func (mz *materializer) getSourceTableDDLs(ctx context.Context) (map[string]stri return nil, fmt.Errorf("source shard must have a master for copying schema: %v", mz.sourceShards[0].ShardName()) } - var err error - sourceSchema, err := mz.wr.GetSchema(ctx, sourceMaster, allTables, nil, false) + ti, err := mz.wr.sourceTs.GetTablet(ctx, sourceMaster) + if err != nil { + return nil, err + } + sourceSchema, err := mz.wr.tmc.GetSchema(ctx, ti.Tablet, allTables, nil, false) if err != nil { return nil, err } @@ -973,7 +993,10 @@ func stripTableConstraints(ddl string) (string, error) { return true } - noConstraintAST := sqlparser.Rewrite(ast, stripConstraints, nil) + noConstraintAST, err := sqlparser.Rewrite(ast, stripConstraints, nil) + if err != nil { + return "", err + } newDDL := sqlparser.String(noConstraintAST) return newDDL, nil @@ -984,10 +1007,11 @@ func (mz *materializer) generateInserts(ctx context.Context) (string, error) { for _, source := range mz.sourceShards { bls := &binlogdatapb.BinlogSource{ - Keyspace: mz.ms.SourceKeyspace, - Shard: source.ShardName(), - Filter: &binlogdatapb.Filter{}, - StopAfterCopy: mz.ms.StopAfterCopy, + Keyspace: mz.ms.SourceKeyspace, + Shard: source.ShardName(), + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: mz.ms.StopAfterCopy, + ExternalCluster: mz.ms.ExternalCluster, } for _, ts := range mz.ms.TableSettings { rule := &binlogdatapb.Rule{ diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 08500a2d80d..8a70bf549af 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -62,7 +62,7 @@ func TestMigrateTables(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false) + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "") require.NoError(t, err) vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) @@ -103,11 +103,11 @@ func TestMissingTables(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt", "", "", false, "", true, false) + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt", "", "", false, "", true, false, "") require.EqualError(t, err, "table(s) not found in source keyspace sourceks: tyt") - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt,t2,txt", "", "", false, "", true, false) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt,t2,txt", "", "", false, "", true, false, "") require.EqualError(t, err, "table(s) not found in source keyspace sourceks: tyt,txt") - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "") require.NoError(t, err) } @@ -163,7 +163,7 @@ func TestMoveTablesAllAndExclude(t *testing.T) { env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "", "", "", tcase.allTables, tcase.excludeTables, true, false) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "", "", "", tcase.allTables, tcase.excludeTables, true, false, "") require.NoError(t, err) require.EqualValues(t, tcase.want, targetTables(env)) }) @@ -197,7 +197,7 @@ func TestMoveTablesStopFlags(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) // -auto_start=false is tested by NOT expecting the update query which sets state to RUNNING err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", - "", false, "", false, true) + "", false, "", false, true, "") require.NoError(t, err) env.tmc.verifyQueries(t) }) @@ -223,7 +223,7 @@ func TestMigrateVSchema(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", `{"t1":{}}`, "", "", false, "", true, false) + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", `{"t1":{}}`, "", "", false, "", true, false, "") require.NoError(t, err) vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) diff --git a/go/vt/wrangler/switcher.go b/go/vt/wrangler/switcher.go index 9aea911adb3..aad7ae46ead 100644 --- a/go/vt/wrangler/switcher.go +++ b/go/vt/wrangler/switcher.go @@ -31,6 +31,10 @@ type switcher struct { wr *Wrangler } +func (r *switcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + return r.ts.addParticipatingTablesToKeyspace(ctx, keyspace, tableSpecs) +} + func (r *switcher) deleteRoutingRules(ctx context.Context) error { return r.ts.deleteRoutingRules(ctx) } diff --git a/go/vt/wrangler/switcher_dry_run.go b/go/vt/wrangler/switcher_dry_run.go index 6f7a505ce6c..c01cd09ebfb 100644 --- a/go/vt/wrangler/switcher_dry_run.go +++ b/go/vt/wrangler/switcher_dry_run.go @@ -37,6 +37,11 @@ type switcherDryRun struct { ts *trafficSwitcher } +func (dr *switcherDryRun) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + dr.drLog.Log("All source tables will be added to the target keyspace vschema") + return nil +} + func (dr *switcherDryRun) deleteRoutingRules(ctx context.Context) error { dr.drLog.Log("Routing rules for participating tables will be deleted") return nil diff --git a/go/vt/wrangler/switcher_interface.go b/go/vt/wrangler/switcher_interface.go index d7c94fa8011..87272e6736b 100644 --- a/go/vt/wrangler/switcher_interface.go +++ b/go/vt/wrangler/switcher_interface.go @@ -49,6 +49,6 @@ type iswitcher interface { removeTargetTables(ctx context.Context) error dropTargetShards(ctx context.Context) error deleteRoutingRules(ctx context.Context) error - + addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error logs() *[]string } diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index 240e363187c..06c92c3f88a 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -26,6 +26,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -114,7 +116,8 @@ type trafficSwitcher struct { sourceKSSchema *vindexes.KeyspaceSchema optCells string //cells option passed to MoveTables/Reshard optTabletTypes string //tabletTypes option passed to MoveTables/Reshard - + externalCluster string + externalTopo *topo.Server } // tsTarget contains the metadata for each migration target. @@ -704,6 +707,47 @@ func (wr *Wrangler) dropArtifacts(ctx context.Context, sw iswitcher) error { return nil } +// finalizeMigrateWorkflow deletes the streams for the Migrate workflow. +// We only cleanup the target for external sources +func (wr *Wrangler) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, workflow, tableSpecs string, + cancel, keepData, dryRun bool) (*[]string, error) { + ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflow) + if err != nil { + wr.Logger().Errorf("buildTrafficSwitcher failed: %v", err) + return nil, err + } + var sw iswitcher + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, wr: wr} + } + var tctx context.Context + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.targetKeyspace, "completeMigrateWorkflow") + if lockErr != nil { + ts.wr.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + if err := sw.dropTargetVReplicationStreams(ctx); err != nil { + return nil, err + } + if !cancel { + sw.addParticipatingTablesToKeyspace(ctx, targetKeyspace, tableSpecs) + if err := ts.wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + } + log.Infof("cancel is %t, keepData %t", cancel, keepData) + if cancel && !keepData { + if err := sw.removeTargetTables(ctx); err != nil { + return nil, err + } + } + return sw.logs(), nil +} + // DropSources cleans up source tables, shards and blacklisted tables after a MoveTables/Reshard is completed func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflow string, removalType TableRemovalType, keepData, force, dryRun bool) (*[]string, error) { ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflow) @@ -789,12 +833,22 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo optTabletTypes: optTabletTypes, } log.Infof("Migration ID for workflow %s: %d", workflow, ts.id) + sourceTopo := wr.ts // Build the sources for _, target := range targets { for _, bls := range target.sources { if ts.sourceKeyspace == "" { ts.sourceKeyspace = bls.Keyspace + ts.externalCluster = bls.ExternalCluster + if ts.externalCluster != "" { + externalTopo, err := wr.ts.OpenExternalVitessClusterServer(ctx, ts.externalCluster) + if err != nil { + return nil, err + } + sourceTopo = externalTopo + ts.externalTopo = externalTopo + } } else if ts.sourceKeyspace != bls.Keyspace { return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", ts.sourceKeyspace, bls.Keyspace) } @@ -818,11 +872,11 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo if _, ok := ts.sources[bls.Shard]; ok { continue } - sourcesi, err := ts.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + sourcesi, err := sourceTopo.GetShard(ctx, bls.Keyspace, bls.Shard) if err != nil { return nil, err } - sourceMaster, err := ts.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) + sourceMaster, err := sourceTopo.GetTablet(ctx, sourcesi.MasterAlias) if err != nil { return nil, err } @@ -832,7 +886,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo } } } - if ts.sourceKeyspace != ts.targetKeyspace { + if ts.sourceKeyspace != ts.targetKeyspace || ts.externalCluster != "" { ts.migrationType = binlogdatapb.MigrationType_TABLES } else { // TODO(sougou): for shard migration, validate that source and target combined @@ -846,7 +900,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo } } } - vs, err := ts.wr.ts.GetVSchema(ctx, ts.sourceKeyspace) + vs, err := sourceTopo.GetVSchema(ctx, ts.sourceKeyspace) if err != nil { return nil, err } @@ -952,11 +1006,15 @@ func hashStreams(targetKeyspace string, targets map[string]*tsTarget) int64 { func (ts *trafficSwitcher) validate(ctx context.Context) error { if ts.migrationType == binlogdatapb.MigrationType_TABLES { + sourceTopo := ts.wr.ts + if ts.externalTopo != nil { + sourceTopo = ts.externalTopo + } // All shards must be present. - if err := ts.compareShards(ctx, ts.sourceKeyspace, ts.sourceShards()); err != nil { + if err := ts.compareShards(ctx, ts.sourceKeyspace, ts.sourceShards(), sourceTopo); err != nil { return err } - if err := ts.compareShards(ctx, ts.targetKeyspace, ts.targetShards()); err != nil { + if err := ts.compareShards(ctx, ts.targetKeyspace, ts.targetShards(), ts.wr.ts); err != nil { return err } // Wildcard table names not allowed. @@ -969,12 +1027,12 @@ func (ts *trafficSwitcher) validate(ctx context.Context) error { return nil } -func (ts *trafficSwitcher) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo) error { +func (ts *trafficSwitcher) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo, topo *topo.Server) error { var shards []string for _, si := range sis { shards = append(shards, si.ShardName()) } - topoShards, err := ts.wr.ts.GetShardNames(ctx, keyspace) + topoShards, err := topo.GetShardNames(ctx, keyspace) if err != nil { return err } @@ -1671,6 +1729,7 @@ func (ts *trafficSwitcher) dropSourceReverseVReplicationStreams(ctx context.Cont } func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { + log.Infof("removeTargetTables") err := ts.forAllTargets(func(target *tsTarget) error { for _, tableName := range ts.tables { query := fmt.Sprintf("drop table %s.%s", target.master.DbName(), tableName) @@ -1759,3 +1818,41 @@ func reverseName(workflow string) string { } return workflow + reverse } + +// addParticipatingTablesToKeyspace updates the vschema with the new tables that were created as part of the +// Migrate flow. It is called when the Migrate flow is Completed +func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + var err error + var vschema *vschemapb.Keyspace + vschema, err = ts.wr.ts.GetVSchema(ctx, keyspace) + if err != nil { + return err + } + if vschema == nil { + return fmt.Errorf("no vschema found for keyspace %s", keyspace) + } + if vschema.Tables == nil { + vschema.Tables = make(map[string]*vschemapb.Table) + } + if strings.HasPrefix(tableSpecs, "{") { // user defined the vschema snippet, typically for a sharded target + wrap := fmt.Sprintf(`{"tables": %s}`, tableSpecs) + ks := &vschemapb.Keyspace{} + if err := json2.Unmarshal([]byte(wrap), ks); err != nil { + return err + } + if err != nil { + return err + } + for table, vtab := range ks.Tables { + vschema.Tables[table] = vtab + } + } else { + if vschema.Sharded { + return fmt.Errorf("no sharded vschema was provided, so you will need to update the vschema of the target manually for the moved tables") + } + for _, table := range ts.tables { + vschema.Tables[table] = &vschemapb.Table{} + } + } + return ts.wr.ts.SaveVSchema(ctx, keyspace, vschema) +} diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 6f6fcb035ea..9d32aa04717 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -194,7 +194,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflow, sourceC return nil, vterrors.Wrap(err, "buildVDiffPlan") } - if err := df.selectTablets(ctx); err != nil { + if err := df.selectTablets(ctx, ts); err != nil { return nil, vterrors.Wrap(err, "selectTablets") } defer func(ctx context.Context) { @@ -501,7 +501,7 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []int) *e } // selectTablets selects the tablets that will be used for the diff. -func (df *vdiff) selectTablets(ctx context.Context) error { +func (df *vdiff) selectTablets(ctx context.Context, ts *trafficSwitcher) error { var wg sync.WaitGroup var err1, err2 error @@ -510,7 +510,11 @@ func (df *vdiff) selectTablets(ctx context.Context) error { go func() { defer wg.Done() err1 = df.forAll(df.sources, func(shard string, source *shardStreamer) error { - tp, err := discovery.NewTabletPicker(df.ts.wr.ts, []string{df.sourceCell}, df.ts.sourceKeyspace, shard, df.tabletTypesStr) + sourceTopo := df.ts.wr.ts + if ts.externalTopo != nil { + sourceTopo = ts.externalTopo + } + tp, err := discovery.NewTabletPicker(sourceTopo, []string{df.sourceCell}, df.ts.sourceKeyspace, shard, df.tabletTypesStr) if err != nil { return err } diff --git a/go/vt/wrangler/workflow.go b/go/vt/wrangler/workflow.go index 0ccc085c544..c08a6e50ac8 100644 --- a/go/vt/wrangler/workflow.go +++ b/go/vt/wrangler/workflow.go @@ -22,6 +22,7 @@ type VReplicationWorkflowType int const ( MoveTablesWorkflow = VReplicationWorkflowType(iota) ReshardWorkflow + MigrateWorkflow ) // Workflow state display strings @@ -54,6 +55,7 @@ func (vrw *VReplicationWorkflow) String() string { // VReplicationWorkflowParams stores args and options passed to a VReplicationWorkflow command type VReplicationWorkflowParams struct { + WorkflowType VReplicationWorkflowType Workflow, TargetKeyspace string Cells, TabletTypes, ExcludeTables string EnableReverseReplication, DryRun bool @@ -69,6 +71,9 @@ type VReplicationWorkflowParams struct { SourceShards, TargetShards []string SkipSchemaCopy bool AutoStart, StopAfterCopy bool + + // Migrate specific + ExternalCluster string } // NewVReplicationWorkflow sets up a MoveTables or Reshard workflow based on options provided, deduces the state of the @@ -166,7 +171,7 @@ func (vrw *VReplicationWorkflow) Create() error { return fmt.Errorf("workflow has already been created, state is %s", vrw.CachedState()) } switch vrw.workflowType { - case MoveTablesWorkflow: + case MoveTablesWorkflow, MigrateWorkflow: err = vrw.initMoveTables() case ReshardWorkflow: err = vrw.initReshard() @@ -225,7 +230,7 @@ func (vrw *VReplicationWorkflow) GetStreamCount() (int64, int64, []*WorkflowErro return totalStreams, runningStreams, workflowErrors, nil } -// SwitchTraffic switches traffic forward for tablet_types passed +// SwitchTraffic switches traffic in the direction passed for specified tablet_types func (vrw *VReplicationWorkflow) SwitchTraffic(direction TrafficSwitchDirection) (*[]string, error) { var dryRunResults []string var rdDryRunResults, wrDryRunResults *[]string @@ -236,6 +241,9 @@ func (vrw *VReplicationWorkflow) SwitchTraffic(direction TrafficSwitchDirection) if !vrw.Exists() { return nil, fmt.Errorf("workflow has not yet been started") } + if vrw.workflowType == MigrateWorkflow { + return nil, fmt.Errorf("invalid action for Migrate workflow: SwitchTraffic") + } isCopyInProgress, err = vrw.IsCopyInProgress() if err != nil { @@ -274,6 +282,9 @@ func (vrw *VReplicationWorkflow) ReverseTraffic() (*[]string, error) { if !vrw.Exists() { return nil, fmt.Errorf("workflow has not yet been started") } + if vrw.workflowType == MigrateWorkflow { + return nil, fmt.Errorf("invalid action for Migrate workflow: ReverseTraffic") + } return vrw.SwitchTraffic(DirectionBackward) } @@ -285,7 +296,15 @@ const ( // Complete cleans up a successful workflow func (vrw *VReplicationWorkflow) Complete() (*[]string, error) { + var dryRunResults *[]string + var err error ws := vrw.ws + + if vrw.workflowType == MigrateWorkflow { + return vrw.wr.finalizeMigrateWorkflow(vrw.ctx, ws.TargetKeyspace, ws.Workflow, vrw.params.Tables, + false, vrw.params.KeepData, vrw.params.DryRun) + } + if !ws.WritesSwitched || len(ws.ReplicaCellsNotSwitched) > 0 || len(ws.RdonlyCellsNotSwitched) > 0 { return nil, fmt.Errorf(ErrWorkflowNotFullySwitched) } @@ -295,10 +314,8 @@ func (vrw *VReplicationWorkflow) Complete() (*[]string, error) { } else { renameTable = DropTable } - var dryRunResults *[]string - var err error - if dryRunResults, err = vrw.wr.DropSources(vrw.ctx, vrw.ws.TargetKeyspace, vrw.ws.Workflow, renameTable, vrw.params.KeepData, - false, vrw.params.DryRun); err != nil { + if dryRunResults, err = vrw.wr.DropSources(vrw.ctx, vrw.ws.TargetKeyspace, vrw.ws.Workflow, renameTable, + false, vrw.params.KeepData, vrw.params.DryRun); err != nil { return nil, err } return dryRunResults, nil @@ -307,6 +324,12 @@ func (vrw *VReplicationWorkflow) Complete() (*[]string, error) { // Cancel deletes all artifacts from a workflow which has not yet been switched func (vrw *VReplicationWorkflow) Cancel() error { ws := vrw.ws + if vrw.workflowType == MigrateWorkflow { + _, err := vrw.wr.finalizeMigrateWorkflow(vrw.ctx, ws.TargetKeyspace, ws.Workflow, "", + true, vrw.params.KeepData, vrw.params.DryRun) + return err + } + if ws.WritesSwitched || len(ws.ReplicaCellsSwitched) > 0 || len(ws.RdonlyCellsSwitched) > 0 { return fmt.Errorf(ErrWorkflowPartiallySwitched) } @@ -362,7 +385,8 @@ func (vrw *VReplicationWorkflow) parseTabletTypes() (hasReplica, hasRdonly, hasM func (vrw *VReplicationWorkflow) initMoveTables() error { log.Infof("In VReplicationWorkflow.initMoveTables() for %+v", vrw) return vrw.wr.MoveTables(vrw.ctx, vrw.params.Workflow, vrw.params.SourceKeyspace, vrw.params.TargetKeyspace, - vrw.params.Tables, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AllTables, vrw.params.ExcludeTables, vrw.params.AutoStart, vrw.params.StopAfterCopy) + vrw.params.Tables, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AllTables, vrw.params.ExcludeTables, + vrw.params.AutoStart, vrw.params.StopAfterCopy, vrw.params.ExternalCluster) } func (vrw *VReplicationWorkflow) initReshard() error { diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index 787f5008fc7..9b9e1cd6fbe 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -42,19 +42,21 @@ var ( // Multiple go routines can use the same Wrangler at the same time, // provided they want to share the same logger / topo server / lock timeout. type Wrangler struct { - logger logutil.Logger - ts *topo.Server - tmc tmclient.TabletManagerClient - vtctld vtctlservicepb.VtctldServer + logger logutil.Logger + ts *topo.Server + tmc tmclient.TabletManagerClient + vtctld vtctlservicepb.VtctldServer + sourceTs *topo.Server } // New creates a new Wrangler object. func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { return &Wrangler{ - logger: logger, - ts: ts, - tmc: tmc, - vtctld: grpcvtctldserver.NewVtctldServer(ts), + logger: logger, + ts: ts, + tmc: tmc, + vtctld: grpcvtctldserver.NewVtctldServer(ts), + sourceTs: ts, } } diff --git a/helm/release.sh b/helm/release.sh index 0c225f54e19..ffdd994f301 100755 --- a/helm/release.sh +++ b/helm/release.sh @@ -27,6 +27,11 @@ docker tag vitess/mysqlctld:$vt_base_version-buster vitess/mysqlctld:$vt_base_ve docker push vitess/mysqlctld:$vt_base_version-buster docker push vitess/mysqlctld:$vt_base_version +docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/mysqlctl:$vt_base_version-buster mysqlctl +docker tag vitess/mysqlctl:$vt_base_version-buster vitess/mysqlctl:$vt_base_version +docker push vitess/mysqlctl:$vt_base_version-buster +docker push vitess/mysqlctl:$vt_base_version + docker build --build-arg VT_BASE_VER=$vt_base_version -t vitess/vtctl:$vt_base_version-buster vtctl docker tag vitess/vtctl:$vt_base_version-buster vitess/vtctl:$vt_base_version docker push vitess/vtctl:$vt_base_version-buster diff --git a/misc/git/hooks/visitorgen b/misc/git/hooks/visitorgen index 65c04d613db..3ac99cb0a07 100755 --- a/misc/git/hooks/visitorgen +++ b/misc/git/hooks/visitorgen @@ -15,4 +15,4 @@ # this script, which should run before committing code, makes sure that the visitor is re-generated when the ast changes -go run ./go/vt/sqlparser/visitorgen/main -compareOnly=true -input=go/vt/sqlparser/ast.go -output=go/vt/sqlparser/rewriter.go \ No newline at end of file +go run ./go/tools/asthelpergen -in ./go/vt/sqlparser -verify=true -iface vitess.io/vitess/go/vt/sqlparser.SQLNode -except "*ColName" \ No newline at end of file diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index fc647d19857..777affe1766 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -200,6 +200,10 @@ message BinlogSource { // StopAfterCopy specifies if vreplication should be stopped // after copying is done. bool stop_after_copy = 9; + + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + string external_cluster = 10; } // VEventType enumerates the event types. Many of these types diff --git a/proto/topodata.proto b/proto/topodata.proto index f56d89977c1..85cce658e5b 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -406,3 +406,18 @@ message CellsAlias { // Cells that map to this alias repeated string cells = 2; } + +message TopoConfig { + string topo_type = 1; + string server = 2; + string root = 3; +} + +message ExternalVitessCluster { + TopoConfig topo_config = 1; +} + +// ExternalClusters +message ExternalClusters { + repeated ExternalVitessCluster vitess_cluster = 1; +} diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 4c5057d5b2c..e58913a3e94 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -452,4 +452,8 @@ message MaterializeSettings { // optional parameters. string cell = 6; string tablet_types = 7; + // ExternalCluster is the name of the mounted cluster which has the source keyspace/db for this workflow + // it is of the type + string external_cluster = 8; + } diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 49e8d0685f7..1407d0a0808 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -32,7 +32,7 @@ const ( unitTestDatabases = "percona56, mysql57, mysql80, mariadb101, mariadb102, mariadb103" clusterTestTemplate = "templates/cluster_endtoend_test.tpl" - clusterList = "11,12,13,14,15,16,17,18,19,20,21,22,23,24,26,27,vreplication_basic,vreplication_multicell,vreplication_cellalias,vreplication_v2,onlineddl_ghost,onlineddl_vrepl,onlineddl_vrepl_stress,onlineddl_revert" + clusterList = "11,12,13,14,15,16,17,18,19,20,21,22,23,24,26,27,vreplication_basic,vreplication_multicell,vreplication_cellalias,vreplication_v2,onlineddl_ghost,onlineddl_vrepl,onlineddl_vrepl_stress,vreplication_migrate,onlineddl_revert" // TODO: currently some percona tools including xtrabackup are installed on all clusters, we can possibly optimize // this by only installing them in the required clusters clustersRequiringXtraBackup = clusterList diff --git a/test/config.json b/test/config.json index 83220a689bc..31f20149763 100644 --- a/test/config.json +++ b/test/config.json @@ -696,6 +696,15 @@ "Shard": "vreplication_v2", "RetryMax": 0, "Tags": [] + }, + "vreplication_migrate": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMigrate"], + "Command": [], + "Manual": false, + "Shard": "vreplication_migrate", + "RetryMax": 0, + "Tags": [] } } }