diff --git a/docker/orchestrator/orchestrator.conf.json b/docker/orchestrator/orchestrator.conf.json index 73234cf20e8..729594044ed 100644 --- a/docker/orchestrator/orchestrator.conf.json +++ b/docker/orchestrator/orchestrator.conf.json @@ -5,7 +5,6 @@ "AuditToSyslog": false, "AuthenticationMethod": "", "AuthUserHeader": "", - "AutoPseudoGTID": false, "BackendDB": "sqlite", "BinlogEventsChunkSize": 10000, "CandidateInstanceExpireMinutes": 60, @@ -19,7 +18,6 @@ "DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'", "DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'", "DetectDataCenterQuery": "SELECT value FROM _vt.local_metadata WHERE name='DataCenter'", - "DetectPseudoGTIDQuery": "", "DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000", "DiscoverByShowSlaveHosts": false, "EnableSyslog": false, @@ -77,8 +75,6 @@ ], "PromotionIgnoreHostnameFilters": [ ], - "PseudoGTIDMonotonicHint": "asc:", - "PseudoGTIDPattern": "drop view if exists .*?`_pseudo_gtid_hint__", "ReadLongRunningQueries": false, "ReadOnly": false, "ReasonableMaintenanceReplicationLagSeconds": 20, @@ -97,7 +93,6 @@ "SkipBinlogEventsContaining": [ ], "SkipBinlogServerUnresolveCheck": true, - "SkipMaxScaleCheck": true, "SkipOrchestratorDatabaseUpdate": false, "SlaveStartPostWaitMilliseconds": 1000, "SnapshotTopologiesIntervalHours": 0, diff --git a/go.mod b/go.mod index ab099ac097f..20ca8a320f5 100644 --- a/go.mod +++ b/go.mod @@ -13,14 +13,13 @@ require ( github.com/PuerkitoBio/goquery v1.5.1 github.com/aquarapid/vaultlib v0.5.1 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 - github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect github.com/aws/aws-sdk-go v1.28.8 github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13 github.com/cespare/xxhash/v2 v2.1.1 github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect github.com/corpix/uarand v0.1.1 // indirect - github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/dave/jennifer v1.4.1 github.com/evanphx/json-patch v4.5.0+incompatible github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab @@ -40,7 +39,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/consul/api v1.5.0 github.com/hashicorp/go-immutable-radix v1.1.0 // indirect - github.com/hashicorp/go-msgpack v0.5.5 + github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/serf v0.9.2 // indirect diff --git a/go.sum b/go.sum index 9217f1c5bc0..49128274505 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps= -github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw= diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go index 4edd4effdfb..706fefd04fa 100644 --- a/go/cmd/vtadmin/main.go +++ b/go/cmd/vtadmin/main.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/vtadmin/cluster" "vitess.io/vitess/go/vt/vtadmin/grpcserver" vtadminhttp "vitess.io/vitess/go/vt/vtadmin/http" + "vitess.io/vitess/go/vt/vtadmin/http/debug" ) var ( @@ -129,6 +130,9 @@ func main() { rootCmd.Flags().BoolVar(&httpOpts.EnableTracing, "http-tracing", false, "whether to enable tracing on the HTTP server") rootCmd.Flags().BoolVar(&httpOpts.DisableCompression, "http-no-compress", false, "whether to disable compression of HTTP API responses") + rootCmd.Flags().BoolVar(&httpOpts.DisableDebug, "http-no-debug", false, "whether to disable /debug/pprof/* and /debug/env HTTP endpoints") + rootCmd.Flags().Var(&debug.OmitEnv, "http-debug-omit-env", "name of an environment variable to omit from /debug/env, if http debug endpoints are enabled. specify multiple times to omit multiple env vars") + rootCmd.Flags().Var(&debug.SanitizeEnv, "http-debug-sanitize-env", "name of an environment variable to sanitize in /debug/env, if http debug endpoints are enabled. specify multiple times to sanitize multiple env vars") rootCmd.Flags().StringSliceVar(&httpOpts.CORSOrigins, "http-origin", []string{}, "repeated, comma-separated flag of allowed CORS origins. omit to disable CORS") rootCmd.Flags().StringVar(&httpOpts.ExperimentalOptions.TabletURLTmpl, "http-tablet-url-tmpl", diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index f5117376a23..cd8fbab570b 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -447,8 +447,8 @@ func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServ } // Stop replication (in case we're restarting), set master, and start replication. - if err := mysqld.SetMaster(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { - return vterrors.Wrap(err, "MysqlDaemon.SetMaster failed") + if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") } return nil } diff --git a/go/cmd/vtctldclient/internal/command/vschemas.go b/go/cmd/vtctldclient/internal/command/vschemas.go index ac4f4499090..5a519b6d9a0 100644 --- a/go/cmd/vtctldclient/internal/command/vschemas.go +++ b/go/cmd/vtctldclient/internal/command/vschemas.go @@ -18,11 +18,14 @@ package command import ( "fmt" + "io/ioutil" "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/json2" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -33,8 +36,89 @@ var ( Args: cobra.ExactArgs(1), RunE: commandGetVSchema, } + // ApplyVSchema makes an ApplyVSchema gRPC call to a vtctld. + ApplyVSchema = &cobra.Command{ + Use: "ApplyVSchema {-vschema= || -vschema-file= || -sql= || -sql-file=} [-cells=c1,c2,...] [-skip-rebuild] [-dry-run] ", + Args: cobra.ExactArgs(1), + DisableFlagsInUseLine: true, + RunE: commandApplyVSchema, + Short: "Applies the VTGate routing schema to the provided keyspace. Shows the result after application.", + } ) +var applyVSchemaOptions = struct { + VSchema string + VSchemaFile string + SQL string + SQLFile string + DryRun bool + SkipRebuild bool + Cells []string +}{} + +func commandApplyVSchema(cmd *cobra.Command, args []string) error { + sqlMode := (applyVSchemaOptions.SQL != "") != (applyVSchemaOptions.SQLFile != "") + jsonMode := (applyVSchemaOptions.VSchema != "") != (applyVSchemaOptions.VSchemaFile != "") + + if sqlMode && jsonMode { + return fmt.Errorf("only one of the sql, sql-file, vschema, or vschema-file flags may be specified when calling the ApplyVSchema command") + } + + if !sqlMode && !jsonMode { + return fmt.Errorf("one of the sql, sql-file, vschema, or vschema-file flags must be specified when calling the ApplyVSchema command") + } + + req := &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: cmd.Flags().Arg(0), + SkipRebuild: applyVSchemaOptions.SkipRebuild, + Cells: applyVSchemaOptions.Cells, + DryRun: applyVSchemaOptions.DryRun, + } + + var err error + if sqlMode { + if applyVSchemaOptions.SQLFile != "" { + sqlBytes, err := ioutil.ReadFile(applyVSchemaOptions.SQLFile) + if err != nil { + return err + } + req.Sql = string(sqlBytes) + } else { + req.Sql = applyVSchemaOptions.SQL + } + } else { // jsonMode + var schema []byte + if applyVSchemaOptions.VSchemaFile != "" { + schema, err = ioutil.ReadFile(applyVSchemaOptions.VSchemaFile) + if err != nil { + return err + } + } else { + schema = []byte(applyVSchemaOptions.VSchema) + } + + var vs *vschemapb.Keyspace + err = json2.Unmarshal(schema, vs) + if err != nil { + return err + } + req.VSchema = vs + } + + cli.FinishedParsing(cmd) + + res, err := client.ApplyVSchema(commandCtx, req) + if err != nil { + return err + } + data, err := cli.MarshalJSON(res.VSchema) + if err != nil { + return err + } + fmt.Printf("New VSchema object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", data) + return nil +} + func commandGetVSchema(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) @@ -58,5 +142,14 @@ func commandGetVSchema(cmd *cobra.Command, args []string) error { } func init() { + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.VSchema, "vschema", "", "VSchema") + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.VSchemaFile, "vschema-file", "", "VSchema File") + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.SQL, "sql", "", "A VSchema DDL SQL statement, e.g. `alter table t add vindex hash(id)`") + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.SQLFile, "sql-file", "", "A file containing VSchema DDL SQL") + ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.DryRun, "dry-run", false, "If set, do not save the altered vschema, simply echo to console.") + ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.SkipRebuild, "skip-rebuild", false, "If set, do no rebuild the SrvSchema objects.") + ApplyVSchema.Flags().StringSliceVar(&applyVSchemaOptions.Cells, "cells", nil, "If specified, limits the rebuild to the cells, after upload. Ignored if skipRebuild is set.") + Root.AddCommand(ApplyVSchema) + Root.AddCommand(GetVSchema) } diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index e64ced875d0..840296fe102 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -42,7 +42,6 @@ func main() { verbose := flag.Bool("verbose", false, "verbose") debug := flag.Bool("debug", false, "debug mode (very verbose)") stack := flag.Bool("stack", false, "add stack trace upon error") - config.RuntimeCLIFlags.SkipBinlogSearch = flag.Bool("skip-binlog-search", false, "when matching via Pseudo-GTID, only use relay logs. This can save the hassle of searching for a non-existend pseudo-GTID entry, for example in servers with replication filters.") config.RuntimeCLIFlags.SkipUnresolve = flag.Bool("skip-unresolve", false, "Do not unresolve a host name") config.RuntimeCLIFlags.SkipUnresolveCheck = flag.Bool("skip-unresolve-check", false, "Skip/ignore checking an unresolve mapping (via hostname_unresolve table) resolves back to same hostname") config.RuntimeCLIFlags.Noop = flag.Bool("noop", false, "Dry run; do not perform destructing operations") diff --git a/go/flagutil/sets.go b/go/flagutil/sets.go new file mode 100644 index 00000000000..cfe21481f42 --- /dev/null +++ b/go/flagutil/sets.go @@ -0,0 +1,80 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "flag" + "strings" + + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + _ flag.Value = (*StringSetFlag)(nil) + _ pflag.Value = (*StringSetFlag)(nil) +) + +// StringSetFlag can be used to collect multiple instances of a flag into a set +// of values. +// +// For example, defining the following: +// +// var x flagutil.StringSetFlag +// flag.Var(&x, "foo", "") +// +// And then specifying "-foo x -foo y -foo x", will result in a set of {x, y}. +// +// In addition to implemnting the standard flag.Value interface, it also +// provides an implementation of pflag.Value, so it is usable in libraries like +// cobra. +type StringSetFlag struct { + set sets.String +} + +// ToSet returns the underlying string set, or an empty set if the underlying +// set is nil. +func (set *StringSetFlag) ToSet() sets.String { + if set.set == nil { + set.set = sets.NewString() + } + + return set.set +} + +// Set is part of the pflag.Value and flag.Value interfaces. +func (set *StringSetFlag) Set(s string) error { + if set.set == nil { + set.set = sets.NewString(s) + return nil + } + + set.set.Insert(s) + return nil +} + +// String is part of the pflag.Value and flag.Value interfaces. +func (set *StringSetFlag) String() string { + if set.set == nil { + return "" + } + + return strings.Join(set.set.List(), ", ") +} + +// Type is part of the pflag.Value interface. +func (set *StringSetFlag) Type() string { return "StringSetFlag" } diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index b178a5f0d5f..e0a22e1b966 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -34,8 +34,8 @@ var ( // Returned by ShowReplicationStatus(). ErrNotReplica = errors.New("no replication status") - // ErrNoMasterStatus means no status was returned by ShowMasterStatus(). - ErrNoMasterStatus = errors.New("no master status") + // ErrNoPrimaryStatus means no status was returned by ShowPrimaryStatus(). + ErrNoPrimaryStatus = errors.New("no master status") ) const ( @@ -57,8 +57,8 @@ const ( // 1. Oracle MySQL 5.6, 5.7, 8.0, ... // 2. MariaDB 10.X type flavor interface { - // masterGTIDSet returns the current GTIDSet of a server. - masterGTIDSet(c *Conn) (GTIDSet, error) + // primaryGTIDSet returns the current GTIDSet of a server. + primaryGTIDSet(c *Conn) (GTIDSet, error) // startReplicationCommand returns the command to start the replication. startReplicationCommand() string @@ -91,17 +91,17 @@ type flavor interface { // replication position at which the replica will resume. setReplicationPositionCommands(pos Position) []string - // changeMasterArg returns the specific parameter to add to - // a change master command. - changeMasterArg() string + // changeReplicationSourceArg returns the specific parameter to add to + // a "change master" command. + changeReplicationSourceArg() string // status returns the result of the appropriate status command, // with parsed replication position. status(c *Conn) (ReplicationStatus, error) - // masterStatus returns the result of 'SHOW MASTER STATUS', + // primaryStatus returns the result of 'SHOW MASTER STATUS', // with parsed executed position. - masterStatus(c *Conn) (MasterStatus, error) + primaryStatus(c *Conn) (PrimaryStatus, error) // waitUntilPositionCommand returns the SQL command to issue // to wait until the given position, until the context @@ -176,9 +176,9 @@ func (c *Conn) IsMariaDB() bool { return false } -// MasterPosition returns the current master replication position. -func (c *Conn) MasterPosition() (Position, error) { - gtidSet, err := c.flavor.masterGTIDSet(c) +// PrimaryPosition returns the current primary's replication position. +func (c *Conn) PrimaryPosition() (Position, error) { + gtidSet, err := c.flavor.primaryGTIDSet(c) if err != nil { return Position{}, err } @@ -187,10 +187,10 @@ func (c *Conn) MasterPosition() (Position, error) { }, nil } -// MasterFilePosition returns the current master's file based replication position. -func (c *Conn) MasterFilePosition() (Position, error) { +// PrimaryFilePosition returns the current primary's file based replication position. +func (c *Conn) PrimaryFilePosition() (Position, error) { filePosFlavor := filePosFlavor{} - gtidSet, err := filePosFlavor.masterGTIDSet(c) + gtidSet, err := filePosFlavor.primaryGTIDSet(c) if err != nil { return Position{}, err } @@ -245,22 +245,22 @@ func (c *Conn) ResetReplicationCommands() []string { // SetReplicationPositionCommands returns the commands to set the // replication position at which the replica will resume -// when it is later reparented with SetMasterCommands. +// when it is later reparented with SetReplicationSourceCommand. func (c *Conn) SetReplicationPositionCommands(pos Position) []string { return c.flavor.setReplicationPositionCommands(pos) } -// SetMasterCommand returns the command to use the provided master -// as the new master (without changing any GTID position). +// SetReplicationSourceCommand returns the command to use the provided host/port +// as the new replication source (without changing any GTID position). // It is guaranteed to be called with replication stopped. // It should not start or stop replication. -func (c *Conn) SetMasterCommand(params *ConnParams, masterHost string, masterPort int, masterConnectRetry int) string { +func (c *Conn) SetReplicationSourceCommand(params *ConnParams, host string, port int, connectRetry int) string { args := []string{ - fmt.Sprintf("MASTER_HOST = '%s'", masterHost), - fmt.Sprintf("MASTER_PORT = %d", masterPort), + fmt.Sprintf("MASTER_HOST = '%s'", host), + fmt.Sprintf("MASTER_PORT = %d", port), fmt.Sprintf("MASTER_USER = '%s'", params.Uname), fmt.Sprintf("MASTER_PASSWORD = '%s'", params.Pass), - fmt.Sprintf("MASTER_CONNECT_RETRY = %d", masterConnectRetry), + fmt.Sprintf("MASTER_CONNECT_RETRY = %d", connectRetry), } if params.SslEnabled() { args = append(args, "MASTER_SSL = 1") @@ -277,7 +277,7 @@ func (c *Conn) SetMasterCommand(params *ConnParams, masterHost string, masterPor if params.SslKey != "" { args = append(args, fmt.Sprintf("MASTER_SSL_KEY = '%s'", params.SslKey)) } - args = append(args, c.flavor.changeMasterArg()) + args = append(args, c.flavor.changeReplicationSourceArg()) return "CHANGE MASTER TO\n " + strings.Join(args, ",\n ") } @@ -350,9 +350,9 @@ func (c *Conn) ShowReplicationStatus() (ReplicationStatus, error) { return c.flavor.status(c) } -// parseMasterStatus parses the common fields of SHOW MASTER STATUS. -func parseMasterStatus(fields map[string]string) MasterStatus { - status := MasterStatus{} +// parsePrimaryStatus parses the common fields of SHOW MASTER STATUS. +func parsePrimaryStatus(fields map[string]string) PrimaryStatus { + status := PrimaryStatus{} fileExecPosStr := fields["Position"] file := fields["File"] @@ -369,10 +369,10 @@ func parseMasterStatus(fields map[string]string) MasterStatus { return status } -// ShowMasterStatus executes the right SHOW MASTER STATUS command, +// ShowPrimaryStatus executes the right SHOW MASTER STATUS command, // and returns a parsed executed Position, as well as file based Position. -func (c *Conn) ShowMasterStatus() (MasterStatus, error) { - return c.flavor.masterStatus(c) +func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) { + return c.flavor.primaryStatus(c) } // WaitUntilPositionCommand returns the SQL command to issue diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index 33e67b76624..0e6b2f0ff4c 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -17,7 +17,6 @@ limitations under the License. package mysql import ( - "errors" "fmt" "io" "strconv" @@ -38,14 +37,14 @@ func newFilePosFlavor() flavor { return &filePosFlavor{} } -// masterGTIDSet is part of the Flavor interface. -func (flv *filePosFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { +// primaryGTIDSet is part of the Flavor interface. +func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { return nil, err } if len(qr.Rows) == 0 { - return nil, errors.New("no master status") + return nil, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) @@ -180,7 +179,7 @@ func (flv *filePosFlavor) setReplicationPositionCommands(pos Position) []string } // setReplicationPositionCommands is part of the Flavor interface. -func (flv *filePosFlavor) changeMasterArg() string { +func (flv *filePosFlavor) changeReplicationSourceArg() string { return "unsupported" } @@ -214,26 +213,26 @@ func parseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStat } // masterStatus is part of the Flavor interface. -func (flv *filePosFlavor) masterStatus(c *Conn) (MasterStatus, error) { +func (flv *filePosFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return MasterStatus{}, ErrNoMasterStatus + return PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } return parseFilePosMasterStatus(resultMap) } -func parseFilePosMasterStatus(resultMap map[string]string) (MasterStatus, error) { - status := parseMasterStatus(resultMap) +func parseFilePosMasterStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := parsePrimaryStatus(resultMap) status.Position = status.FilePosition diff --git a/go/mysql/flavor_filepos_test.go b/go/mysql/flavor_filepos_test.go index 0570c137b40..9af21dc7a9e 100644 --- a/go/mysql/flavor_filepos_test.go +++ b/go/mysql/flavor_filepos_test.go @@ -64,7 +64,7 @@ func TestFilePosShouldGetMasterPosition(t *testing.T) { "File": "source-bin.000003", } - want := MasterStatus{ + want := PrimaryStatus{ Position: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, } diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 422344d4f5a..cf53e6e6521 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -40,8 +40,8 @@ type mariadbFlavor102 struct { var _ flavor = (*mariadbFlavor101)(nil) var _ flavor = (*mariadbFlavor102)(nil) -// masterGTIDSet is part of the Flavor interface. -func (mariadbFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { +// primaryGTIDSet is part of the Flavor interface. +func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { qr, err := c.ExecuteFetch("SELECT @@GLOBAL.gtid_binlog_pos", 1, false) if err != nil { return nil, err @@ -108,7 +108,7 @@ func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Po func (mariadbFlavor) resetReplicationCommands(c *Conn) []string { resetCommands := []string{ "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget master host:port. + "RESET SLAVE ALL", // "ALL" makes it forget source host:port. "RESET MASTER", "SET GLOBAL gtid_slave_pos = ''", } @@ -130,8 +130,8 @@ func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string { // Set gtid_slave_pos to tell the replica where to start // replicating. fmt.Sprintf("SET GLOBAL gtid_slave_pos = '%s'", pos), - // Set gtid_binlog_state so that if this server later becomes a - // master, it will know that it has seen everything up to and + // Set gtid_binlog_state so that if this server later becomes the + // primary, it will know that it has seen everything up to and // including 'pos'. Otherwise, if another replica asks this // server to replicate starting at exactly 'pos', this server // will throw an error when in gtid_strict_mode, since it @@ -142,7 +142,7 @@ func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mariadbFlavor) changeMasterArg() string { +func (mariadbFlavor) changeReplicationSourceArg() string { return "MASTER_USE_GTID = current_pos" } @@ -178,24 +178,24 @@ func parseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStat return status, nil } -// masterStatus is part of the Flavor interface. -func (m mariadbFlavor) masterStatus(c *Conn) (MasterStatus, error) { +// primaryStatus is part of the Flavor interface. +func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return MasterStatus{}, ErrNoMasterStatus + return PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } - status := parseMasterStatus(resultMap) - status.Position.GTIDSet, err = m.masterGTIDSet(c) + status := parsePrimaryStatus(resultMap) + status.Position.GTIDSet, err = m.primaryGTIDSet(c) return status, err } diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go index 82a5b1312b4..49b7aac1d74 100644 --- a/go/mysql/flavor_mariadb_test.go +++ b/go/mysql/flavor_mariadb_test.go @@ -41,7 +41,7 @@ func TestMariadbSetMasterCommands(t *testing.T) { MASTER_USE_GTID = current_pos` conn := &Conn{flavor: mariadbFlavor101{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mariadbFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } @@ -74,7 +74,7 @@ func TestMariadbSetMasterCommandsSSL(t *testing.T) { MASTER_USE_GTID = current_pos` conn := &Conn{flavor: mariadbFlavor101{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mariadbFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index 9d4dd5e08da..8f2f8e61d7f 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -43,8 +43,8 @@ var _ flavor = (*mysqlFlavor56)(nil) var _ flavor = (*mysqlFlavor57)(nil) var _ flavor = (*mysqlFlavor80)(nil) -// masterGTIDSet is part of the Flavor interface. -func (mysqlFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { +// primaryGTIDSet is part of the Flavor interface. +func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { // keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value qr, err := c.ExecuteFetch("SELECT @@global.gtid_executed", 1, false) if err != nil { @@ -96,7 +96,7 @@ func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Posi func (mysqlFlavor) resetReplicationCommands(c *Conn) []string { resetCommands := []string{ "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget master host:port. + "RESET SLAVE ALL", // "ALL" makes it forget source host:port. "RESET MASTER", // This will also clear gtid_executed and gtid_purged. } if c.SemiSyncExtensionLoaded() { @@ -114,7 +114,7 @@ func (mysqlFlavor) setReplicationPositionCommands(pos Position) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mysqlFlavor) changeMasterArg() string { +func (mysqlFlavor) changeReplicationSourceArg() string { return "MASTER_AUTO_POSITION = 1" } @@ -166,32 +166,32 @@ func parseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus return status, nil } -// masterStatus is part of the Flavor interface. -func (mysqlFlavor) masterStatus(c *Conn) (MasterStatus, error) { +// primaryStatus is part of the Flavor interface. +func (mysqlFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return MasterStatus{}, ErrNoMasterStatus + return PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } - return parseMysqlMasterStatus(resultMap) + return parseMysqlPrimaryStatus(resultMap) } -func parseMysqlMasterStatus(resultMap map[string]string) (MasterStatus, error) { - status := parseMasterStatus(resultMap) +func parseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := parsePrimaryStatus(resultMap) var err error status.Position.GTIDSet, err = parseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) if err != nil { - return MasterStatus{}, vterrors.Wrapf(err, "MasterStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) + return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) } return status, nil diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go index 8f72242a891..50153011bcf 100644 --- a/go/mysql/flavor_mysql_test.go +++ b/go/mysql/flavor_mysql_test.go @@ -40,9 +40,9 @@ func TestMysql56SetMasterCommands(t *testing.T) { MASTER_AUTO_POSITION = 1` conn := &Conn{flavor: mysqlFlavor57{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { - t.Errorf("mysqlFlavor.SetMasterCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) + t.Errorf("mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } } @@ -73,7 +73,7 @@ func TestMysql56SetMasterCommandsSSL(t *testing.T) { MASTER_AUTO_POSITION = 1` conn := &Conn{flavor: mysqlFlavor57{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mysqlFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } @@ -136,11 +136,11 @@ func TestMysqlShouldGetMasterPosition(t *testing.T) { } sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") - want := MasterStatus{ + want := PrimaryStatus{ Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, } - got, err := parseMysqlMasterStatus(resultMap) + got, err := parseMysqlPrimaryStatus(resultMap) require.NoError(t, err) assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) diff --git a/go/mysql/master_status.go b/go/mysql/primary_status.go similarity index 70% rename from go/mysql/master_status.go rename to go/mysql/primary_status.go index 0f5cfff679e..19ef63be60d 100644 --- a/go/mysql/master_status.go +++ b/go/mysql/primary_status.go @@ -20,16 +20,16 @@ import ( replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" ) -// MasterStatus holds replication information from SHOW MASTER STATUS. -type MasterStatus struct { - // Position represents the master's GTID based position. +// PrimaryStatus holds replication information from SHOW MASTER STATUS. +type PrimaryStatus struct { + // Position represents the server's GTID based position. Position Position - // FilePosition represents the master's file based position. + // FilePosition represents the server's file based position. FilePosition Position } -// MasterStatusToProto translates a MasterStatus to proto3. -func MasterStatusToProto(s MasterStatus) *replicationdatapb.MasterStatus { +// PrimaryStatusToProto translates a PrimaryStatus to proto3. +func PrimaryStatusToProto(s PrimaryStatus) *replicationdatapb.MasterStatus { return &replicationdatapb.MasterStatus{ Position: EncodePosition(s.Position), FilePosition: EncodePosition(s.FilePosition), diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index 09d5984c921..ded68cfb27f 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -100,20 +100,21 @@ func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) { func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mysql.Position, <-chan mysql.BinlogEvent, error) { ctx, bc.cancel = context.WithCancel(ctx) - masterPosition, err := bc.Conn.MasterPosition() + position, err := bc.Conn.PrimaryPosition() if err != nil { - return mysql.Position{}, nil, fmt.Errorf("failed to get master position: %v", err) + return mysql.Position{}, nil, fmt.Errorf("failed to get primary position: %v", err) } - c, err := bc.StartBinlogDumpFromPosition(ctx, masterPosition) - return masterPosition, c, err + c, err := bc.StartBinlogDumpFromPosition(ctx, position) + return position, c, err } // StartBinlogDumpFromPosition requests a replication binlog dump from -// the master mysqld at the given Position and then sends binlog +// the replication source mysqld (typically the primary server in the cluster) +// at the given Position and then sends binlog // events to the provided channel. // The stream will continue in the background, waiting for new events if -// necessary, until the connection is closed, either by the master or +// necessary, until the connection is closed, either by the replication source or // by canceling the context. // // Note the context is valid and used until eventChan is closed. @@ -166,7 +167,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogE } // StartBinlogDumpFromBinlogBeforeTimestamp requests a replication -// binlog dump from the master mysqld starting with a file that has +// binlog dump from the source mysqld starting with a file that has // timestamps smaller than the provided timestamp, and then sends // binlog events to the provided channel. // @@ -189,7 +190,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogE // given range. // // The stream will continue in the background, waiting for new events if -// necessary, until the connection is closed, either by the master or +// necessary, until the connection is closed, either by the source or // by canceling the context. // // Note the context is valid and used until eventChan is closed. diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index 1e727f5afcf..b435ec29439 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -142,10 +142,10 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP // Save initial state so we can restore. replicaStartRequired := false - sourceIsMaster := false + sourceIsPrimary := false readOnly := true //nolint var replicationPosition mysql.Position - semiSyncMaster, semiSyncReplica := params.Mysqld.SemiSyncEnabled() + semiSyncSource, semiSyncReplica := params.Mysqld.SemiSyncEnabled() // See if we need to restart replication after backup. params.Logger.Infof("getting current replication status") @@ -154,8 +154,8 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP case nil: replicaStartRequired = replicaStatus.ReplicationRunning() && !*DisableActiveReparents case mysql.ErrNotReplica: - // keep going if we're the master, might be a degenerate case - sourceIsMaster = true + // keep going if we're the primary, might be a degenerate case + sourceIsPrimary = true default: return false, vterrors.Wrap(err, "can't get replica status") } @@ -167,16 +167,16 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP } // get the replication position - if sourceIsMaster { + if sourceIsPrimary { if !readOnly { - params.Logger.Infof("turning master read-only before backup") + params.Logger.Infof("turning primary read-only before backup") if err = params.Mysqld.SetReadOnly(true); err != nil { return false, vterrors.Wrap(err, "can't set read-only status") } } - replicationPosition, err = params.Mysqld.MasterPosition() + replicationPosition, err = params.Mysqld.PrimaryPosition() if err != nil { - return false, vterrors.Wrap(err, "can't get master position") + return false, vterrors.Wrap(err, "can't get position on primary") } } else { if err = params.Mysqld.StopReplication(params.HookExtraEnv); err != nil { @@ -216,12 +216,12 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP } // Restore original mysqld state that we saved above. - if semiSyncMaster || semiSyncReplica { + if semiSyncSource || semiSyncReplica { // Only do this if one of them was on, since both being off could mean // the plugin isn't even loaded, and the server variables don't exist. params.Logger.Infof("restoring semi-sync settings from before backup: master=%v, replica=%v", - semiSyncMaster, semiSyncReplica) - err := params.Mysqld.SetSemiSyncEnabled(semiSyncMaster, semiSyncReplica) + semiSyncSource, semiSyncReplica) + err := params.Mysqld.SetSemiSyncEnabled(semiSyncSource, semiSyncReplica) if err != nil { return usable, err } @@ -240,8 +240,8 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP // Wait for a reliable value for SecondsBehindMaster from ReplicationStatus() // We know that we stopped at replicationPosition. - // If MasterPosition is the same, that means no writes - // have happened to master, so we are up-to-date. + // If PrimaryPosition is the same, that means no writes + // have happened to primary, so we are up-to-date. // Otherwise, we wait for replica's Position to change from // the saved replicationPosition before proceeding tmc := tmclient.NewTabletManagerClient() @@ -249,12 +249,12 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP remoteCtx, remoteCancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) defer remoteCancel() - masterPos, err := getMasterPosition(remoteCtx, tmc, params.TopoServer, params.Keyspace, params.Shard) - // If we are unable to get master position, return error. + pos, err := getPrimaryPosition(remoteCtx, tmc, params.TopoServer, params.Keyspace, params.Shard) + // If we are unable to get the primary's position, return error. if err != nil { return usable, err } - if !replicationPosition.Equal(masterPos) { + if !replicationPosition.Equal(pos) { for { if err := ctx.Err(); err != nil { return usable, err @@ -621,7 +621,7 @@ func (be *BuiltinBackupEngine) ShouldDrainForBackup() bool { return true } -func getMasterPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (mysql.Position, error) { +func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (mysql.Position, error) { si, err := ts.GetShard(ctx, keyspace, shard) if err != nil { return mysql.Position{}, vterrors.Wrap(err, "can't read shard") diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go index 9d2fa9ef6da..f09ddbf71c6 100644 --- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go @@ -68,12 +68,12 @@ type FakeMysqlDaemon struct { Replicating bool // IOThreadRunning is always true except in one testcase - // where we want to test error handling during SetMaster + // where we want to test error handling during SetReplicationSource IOThreadRunning bool - // CurrentMasterPosition is returned by MasterPosition + // CurrentPrimaryPosition is returned by PrimaryPosition // and ReplicationStatus - CurrentMasterPosition mysql.Position + CurrentPrimaryPosition mysql.Position // CurrentMasterFilePosition is used to determine the executed file based positioning of the master. CurrentMasterFilePosition mysql.Position @@ -84,8 +84,8 @@ type FakeMysqlDaemon struct { // StartReplicationError is used by StartReplication StartReplicationError error - // MasterStatusError is used by MasterStatus - MasterStatusError error + // PrimaryStatusError is used by PrimaryStatus + PrimaryStatusError error // CurrentMasterHost is returned by ReplicationStatus CurrentMasterHost string @@ -109,16 +109,16 @@ type FakeMysqlDaemon struct { // StartReplicationUntilAfterPos is matched against the input StartReplicationUntilAfterPos mysql.Position - // SetMasterInput is matched against the input of SetMaster - // (as "%v:%v"). If it doesn't match, SetMaster will return an error. - SetMasterInput string + // SetReplicationSourceInput is matched against the input of SetReplicationSource + // (as "%v:%v"). If it doesn't match, SetReplicationSource will return an error. + SetReplicationSourceInput string - // SetMasterError is used by SetMaster - SetMasterError error + // SetReplicationSourceError is used by SetReplicationSource + SetReplicationSourceError error - // WaitMasterPosition is checked by WaitMasterPos, if the + // WaitPrimaryPosition is checked by WaitSourcePos, if the // same it returns nil, if different it returns an error - WaitMasterPosition mysql.Position + WaitPrimaryPosition mysql.Position // PromoteResult is returned by Promote PromoteResult mysql.Position @@ -250,11 +250,11 @@ func (fmd *FakeMysqlDaemon) GetMysqlPort() (int32, error) { return fmd.MysqlPort.Get(), nil } -// CurrentMasterPositionLocked is thread-safe -func (fmd *FakeMysqlDaemon) CurrentMasterPositionLocked(pos mysql.Position) { +// CurrentPrimaryPositionLocked is thread-safe +func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos mysql.Position) { fmd.mu.Lock() defer fmd.mu.Unlock() - fmd.CurrentMasterPosition = pos + fmd.CurrentPrimaryPosition = pos } // ReplicationStatus is part of the MysqlDaemon interface @@ -265,7 +265,7 @@ func (fmd *FakeMysqlDaemon) ReplicationStatus() (mysql.ReplicationStatus, error) fmd.mu.Lock() defer fmd.mu.Unlock() return mysql.ReplicationStatus{ - Position: fmd.CurrentMasterPosition, + Position: fmd.CurrentPrimaryPosition, FilePosition: fmd.CurrentMasterFilePosition, FileRelayLogPosition: fmd.CurrentMasterFilePosition, SecondsBehindMaster: fmd.SecondsBehindMaster, @@ -278,13 +278,13 @@ func (fmd *FakeMysqlDaemon) ReplicationStatus() (mysql.ReplicationStatus, error) }, nil } -// MasterStatus is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) MasterStatus(ctx context.Context) (mysql.MasterStatus, error) { - if fmd.MasterStatusError != nil { - return mysql.MasterStatus{}, fmd.MasterStatusError +// PrimaryStatus is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { + if fmd.PrimaryStatusError != nil { + return mysql.PrimaryStatus{}, fmd.PrimaryStatusError } - return mysql.MasterStatus{ - Position: fmd.CurrentMasterPosition, + return mysql.PrimaryStatus{ + Position: fmd.CurrentPrimaryPosition, FilePosition: fmd.CurrentMasterFilePosition, }, nil } @@ -296,9 +296,9 @@ func (fmd *FakeMysqlDaemon) ResetReplication(ctx context.Context) error { }) } -// MasterPosition is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) MasterPosition() (mysql.Position, error) { - return fmd.CurrentMasterPosition, nil +// PrimaryPosition is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) PrimaryPosition() (mysql.Position, error) { + return fmd.CurrentPrimaryPosition, nil } // IsReadOnly is part of the MysqlDaemon interface @@ -373,14 +373,14 @@ func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos mysq }) } -// SetMaster is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetMaster(ctx context.Context, masterHost string, masterPort int, stopReplicationBefore bool, startReplicationAfter bool) error { +// SetReplicationSource is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, masterHost string, masterPort int, stopReplicationBefore bool, startReplicationAfter bool) error { input := fmt.Sprintf("%v:%v", masterHost, masterPort) - if fmd.SetMasterInput != input { - return fmt.Errorf("wrong input for SetMasterCommands: expected %v got %v", fmd.SetMasterInput, input) + if fmd.SetReplicationSourceInput != input { + return fmt.Errorf("wrong input for SetReplicationSourceCommands: expected %v got %v", fmd.SetReplicationSourceInput, input) } - if fmd.SetMasterError != nil { - return fmd.SetMasterError + if fmd.SetReplicationSourceError != nil { + return fmd.SetReplicationSourceError } cmds := []string{} if stopReplicationBefore { @@ -398,20 +398,20 @@ func (fmd *FakeMysqlDaemon) WaitForReparentJournal(ctx context.Context, timeCrea return nil } -// DemoteMaster is deprecated: use mysqld.MasterPosition() instead +// DemoteMaster is deprecated: use mysqld.PrimaryPosition() instead func (fmd *FakeMysqlDaemon) DemoteMaster() (mysql.Position, error) { - return fmd.CurrentMasterPosition, nil + return fmd.CurrentPrimaryPosition, nil } -// WaitMasterPos is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) WaitMasterPos(_ context.Context, pos mysql.Position) error { +// WaitSourcePos is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos mysql.Position) error { if fmd.TimeoutHook != nil { return fmd.TimeoutHook() } - if reflect.DeepEqual(fmd.WaitMasterPosition, pos) { + if reflect.DeepEqual(fmd.WaitPrimaryPosition, pos) { return nil } - return fmt.Errorf("wrong input for WaitMasterPos: expected %v got %v", fmd.WaitMasterPosition, pos) + return fmt.Errorf("wrong input for WaitSourcePos: expected %v got %v", fmd.WaitPrimaryPosition, pos) } // Promote is part of the MysqlDaemon interface diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 9409851be23..fcc85a4f43e 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -47,24 +47,24 @@ type MysqlDaemon interface { StopReplication(hookExtraEnv map[string]string) error StopIOThread(ctx context.Context) error ReplicationStatus() (mysql.ReplicationStatus, error) - MasterStatus(ctx context.Context) (mysql.MasterStatus, error) - SetSemiSyncEnabled(master, replica bool) error - SemiSyncEnabled() (master, replica bool) + PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) + SetSemiSyncEnabled(source, replica bool) error + SemiSyncEnabled() (source, replica bool) SemiSyncReplicationStatus() (bool, error) // reparenting related methods ResetReplication(ctx context.Context) error - MasterPosition() (mysql.Position, error) + PrimaryPosition() (mysql.Position, error) IsReadOnly() (bool, error) SetReadOnly(on bool) error SetSuperReadOnly(on bool) error SetReplicationPosition(ctx context.Context, pos mysql.Position) error - SetMaster(ctx context.Context, masterHost string, masterPort int, stopReplicationBefore bool, startReplicationAfter bool) error + SetReplicationSource(ctx context.Context, host string, port int, stopReplicationBefore bool, startReplicationAfter bool) error WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error - WaitMasterPos(context.Context, mysql.Position) error + WaitSourcePos(context.Context, mysql.Position) error - // Promote makes the current server master. It will not change + // Promote makes the current server the primary. It will not change // the read_only state of the server. Promote(map[string]string) (mysql.Position, error) diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 5e9c0c8ccf2..8a93bf84131 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -116,5 +116,5 @@ func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, e if err := mysqld.executeSuperQueryListConn(ctx, conn, cmds); err != nil { return mysql.Position{}, err } - return conn.MasterPosition() + return conn.PrimaryPosition() } diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 75b81a66b46..0d0d4b7ecbf 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -190,11 +190,6 @@ func (mysqld *Mysqld) SetReadOnly(on bool) error { return mysqld.ExecuteSuperQuery(context.TODO(), query) } -var ( - // ErrNotMaster means there is no master status - ErrNotMaster = errors.New("no master status") -) - // SetSuperReadOnly set/unset the super_read_only flag func (mysqld *Mysqld) SetSuperReadOnly(on bool) error { query := "SET GLOBAL super_read_only = " @@ -206,8 +201,8 @@ func (mysqld *Mysqld) SetSuperReadOnly(on bool) error { return mysqld.ExecuteSuperQuery(context.TODO(), query) } -// WaitMasterPos lets replicas wait to given replication position -func (mysqld *Mysqld) WaitMasterPos(ctx context.Context, targetPos mysql.Position) error { +// WaitSourcePos lets replicas wait to given replication position +func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Position) error { // Get a connection. conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { @@ -223,9 +218,9 @@ func (mysqld *Mysqld) WaitMasterPos(ctx context.Context, targetPos mysql.Positio // If we are the master, WaitUntilFilePositionCommand will fail. // But position is most likely reached. So, check the position // first. - mpos, err := conn.MasterFilePosition() + mpos, err := conn.PrimaryFilePosition() if err != nil { - return fmt.Errorf("WaitMasterPos: MasterFilePosition failed: %v", err) + return fmt.Errorf("WaitSourcePos: PrimaryFilePosition failed: %v", err) } if mpos.AtLeast(targetPos) { return nil @@ -241,9 +236,9 @@ func (mysqld *Mysqld) WaitMasterPos(ctx context.Context, targetPos mysql.Positio // If we are the master, WaitUntilPositionCommand will fail. // But position is most likely reached. So, check the position // first. - mpos, err := conn.MasterPosition() + mpos, err := conn.PrimaryPosition() if err != nil { - return fmt.Errorf("WaitMasterPos: MasterPosition failed: %v", err) + return fmt.Errorf("WaitSourcePos: PrimaryPosition failed: %v", err) } if mpos.AtLeast(targetPos) { return nil @@ -285,26 +280,26 @@ func (mysqld *Mysqld) ReplicationStatus() (mysql.ReplicationStatus, error) { return conn.ShowReplicationStatus() } -// MasterStatus returns the master replication statuses -func (mysqld *Mysqld) MasterStatus(ctx context.Context) (mysql.MasterStatus, error) { +// PrimaryStatus returns the master replication statuses +func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.MasterStatus{}, err + return mysql.PrimaryStatus{}, err } defer conn.Recycle() - return conn.ShowMasterStatus() + return conn.ShowPrimaryStatus() } -// MasterPosition returns the master replication position. -func (mysqld *Mysqld) MasterPosition() (mysql.Position, error) { +// PrimaryPosition returns the master replication position. +func (mysqld *Mysqld) PrimaryPosition() (mysql.Position, error) { conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) if err != nil { return mysql.Position{}, err } defer conn.Recycle() - return conn.MasterPosition() + return conn.PrimaryPosition() } // SetReplicationPosition sets the replication position at which the replica will resume @@ -321,9 +316,9 @@ func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos mysql.Posi return mysqld.executeSuperQueryListConn(ctx, conn, cmds) } -// SetMaster makes the provided host / port the master. It optionally +// SetReplicationSource makes the provided host / port the master. It optionally // stops replication before, and starts it after. -func (mysqld *Mysqld) SetMaster(ctx context.Context, masterHost string, masterPort int, replicationStopBefore bool, replicationStartAfter bool) error { +func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, masterHost string, masterPort int, replicationStopBefore bool, replicationStartAfter bool) error { params, err := mysqld.dbcfgs.ReplConnector().MysqlParams() if err != nil { return err @@ -338,7 +333,7 @@ func (mysqld *Mysqld) SetMaster(ctx context.Context, masterHost string, masterPo if replicationStopBefore { cmds = append(cmds, conn.StopReplicationCommand()) } - smc := conn.SetMasterCommand(params, masterHost, masterPort, int(masterConnectRetry.Seconds())) + smc := conn.SetReplicationSourceCommand(params, masterHost, masterPort, int(masterConnectRetry.Seconds())) cmds = append(cmds, smc) if replicationStartAfter { cmds = append(cmds, conn.StartReplicationCommand()) diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index a87b6e9beee..d1935acb144 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -139,7 +139,7 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara if err != nil { return false, vterrors.Wrap(err, "unable to obtain a connection to the database") } - pos, err := conn.MasterPosition() + pos, err := conn.PrimaryPosition() if err != nil { return false, vterrors.Wrap(err, "unable to obtain master position") } diff --git a/go/vt/orchestrator/agent/agent.go b/go/vt/orchestrator/agent/agent.go deleted file mode 100644 index 775a24af16a..00000000000 --- a/go/vt/orchestrator/agent/agent.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package agent - -import "vitess.io/vitess/go/vt/orchestrator/inst" - -// LogicalVolume describes an LVM volume -type LogicalVolume struct { - Name string - GroupName string - Path string - IsSnapshot bool - SnapshotPercent float64 -} - -// Mount describes a file system mount point -type Mount struct { - Path string - Device string - LVPath string - FileSystem string - IsMounted bool - DiskUsage int64 - MySQLDataPath string - MySQLDiskUsage int64 -} - -// Agent presents the data of an agent -type Agent struct { - Hostname string - Port int - Token string - LastSubmitted string - AvailableLocalSnapshots []string - AvailableSnapshots []string - LogicalVolumes []LogicalVolume - MountPoint Mount - MySQLRunning bool - MySQLDiskUsage int64 - MySQLPort int64 - MySQLDatadirDiskFree int64 - MySQLErrorLogTail []string -} - -// SeedOperation makes for the high level data & state of a seed operation -type SeedOperation struct { - SeedId int64 - TargetHostname string - SourceHostname string - StartTimestamp string - EndTimestamp string - IsComplete bool - IsSuccessful bool -} - -// SeedOperationState represents a single state (step) in a seed operation -type SeedOperationState struct { - SeedStateId int64 - SeedId int64 - StateTimestamp string - Action string - ErrorMessage string -} - -// Build an instance key for a given agent -func (this *Agent) GetInstance() *inst.InstanceKey { - return &inst.InstanceKey{Hostname: this.Hostname, Port: int(this.MySQLPort)} -} diff --git a/go/vt/orchestrator/agent/agent_dao.go b/go/vt/orchestrator/agent/agent_dao.go deleted file mode 100644 index 5844a1ff7b1..00000000000 --- a/go/vt/orchestrator/agent/agent_dao.go +++ /dev/null @@ -1,944 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package agent - -import ( - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - "sync" - "time" - - "vitess.io/vitess/go/vt/orchestrator/config" - "vitess.io/vitess/go/vt/orchestrator/db" - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" - "vitess.io/vitess/go/vt/orchestrator/inst" -) - -type httpMethodFunc func(uri string) (resp *http.Response, err error) - -var SeededAgents chan *Agent = make(chan *Agent) - -var httpClient *http.Client -var httpClientMutex = &sync.Mutex{} - -// InitHttpClient gets called once, and initializes httpClient according to config.Config -func InitHttpClient() { - httpClientMutex.Lock() - defer httpClientMutex.Unlock() - - if httpClient != nil { - return - } - - httpTimeout := time.Duration(time.Duration(config.AgentHttpTimeoutSeconds) * time.Second) - dialTimeout := func(network, addr string) (net.Conn, error) { - return net.DialTimeout(network, addr, httpTimeout) - } - httpTransport := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: config.Config.AgentSSLSkipVerify}, - Dial: dialTimeout, - ResponseHeaderTimeout: httpTimeout, - } - httpClient = &http.Client{Transport: httpTransport} -} - -// httpGet is a convenience method for getting http response from URL, optionaly skipping SSL cert verification -func httpGet(url string) (resp *http.Response, err error) { - return httpClient.Get(url) -} - -// httpPost is a convenience method for posting text data -func httpPost(url string, bodyType string, content string) (resp *http.Response, err error) { - return httpClient.Post(url, bodyType, strings.NewReader(content)) -} - -// AuditAgentOperation creates and writes a new audit entry by given agent -func auditAgentOperation(auditType string, agent *Agent, message string) error { - instanceKey := &inst.InstanceKey{} - if agent != nil { - instanceKey = &inst.InstanceKey{Hostname: agent.Hostname, Port: int(agent.MySQLPort)} - } - return inst.AuditOperation(auditType, instanceKey, message) -} - -// readResponse returns the body of an HTTP response -func readResponse(res *http.Response, err error) ([]byte, error) { - if err != nil { - return nil, err - } - defer res.Body.Close() - - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - if res.Status == "500" { - return body, errors.New("Response Status 500") - } - - return body, nil -} - -// SubmitAgent submits a new agent for listing -func SubmitAgent(hostname string, port int, token string) (string, error) { - _, err := db.ExecOrchestrator(` - replace - into host_agent ( - hostname, port, token, last_submitted, count_mysql_snapshots - ) VALUES ( - ?, ?, ?, NOW(), 0 - ) - `, - hostname, - port, - token, - ) - if err != nil { - return "", log.Errore(err) - } - - // Try to discover topology instances when an agent submits - go DiscoverAgentInstance(hostname, port) - - return hostname, err -} - -// If a mysql port is available, try to discover against it -func DiscoverAgentInstance(hostname string, port int) error { - agent, err := GetAgent(hostname) - if err != nil { - log.Errorf("Couldn't get agent for %s: %v", hostname, err) - return err - } - - instanceKey := agent.GetInstance() - instance, err := inst.ReadTopologyInstance(instanceKey) - if err != nil { - log.Errorf("Failed to read topology for %v. err=%+v", instanceKey, err) - return err - } - if instance == nil { - log.Errorf("Failed to read topology for %v", instanceKey) - return err - } - log.Infof("Discovered Agent Instance: %v", instance.Key) - return nil -} - -// ForgetLongUnseenAgents will remove entries of all agents that have long since been last seen. -func ForgetLongUnseenAgents() error { - _, err := db.ExecOrchestrator(` - delete - from host_agent - where - last_submitted < NOW() - interval ? hour`, - config.Config.UnseenAgentForgetHours, - ) - return err -} - -// ReadOutdatedAgentsHosts returns agents that need to be updated -func ReadOutdatedAgentsHosts() ([]string, error) { - res := []string{} - query := ` - select - hostname - from - host_agent - where - IFNULL(last_checked < now() - interval ? minute, 1) - ` - err := db.QueryOrchestrator(query, sqlutils.Args(config.Config.AgentPollMinutes), func(m sqlutils.RowMap) error { - hostname := m.GetString("hostname") - res = append(res, hostname) - return nil - }) - - if err != nil { - log.Errore(err) - } - return res, err -} - -// ReadAgents returns a list of all known agents -func ReadAgents() ([]Agent, error) { - res := []Agent{} - query := ` - select - hostname, - port, - token, - last_submitted, - mysql_port - from - host_agent - order by - hostname - ` - err := db.QueryOrchestratorRowsMap(query, func(m sqlutils.RowMap) error { - agent := Agent{} - agent.Hostname = m.GetString("hostname") - agent.Port = m.GetInt("port") - agent.MySQLPort = m.GetInt64("mysql_port") - agent.Token = "" - agent.LastSubmitted = m.GetString("last_submitted") - - res = append(res, agent) - return nil - }) - - if err != nil { - log.Errore(err) - } - return res, err - -} - -// readAgentBasicInfo returns the basic data for an agent directly from backend table (no agent access) -func readAgentBasicInfo(hostname string) (Agent, string, error) { - agent := Agent{} - token := "" - query := ` - select - hostname, - port, - token, - last_submitted, - mysql_port - from - host_agent - where - hostname = ? - ` - err := db.QueryOrchestrator(query, sqlutils.Args(hostname), func(m sqlutils.RowMap) error { - agent.Hostname = m.GetString("hostname") - agent.Port = m.GetInt("port") - agent.LastSubmitted = m.GetString("last_submitted") - agent.MySQLPort = m.GetInt64("mysql_port") - token = m.GetString("token") - - return nil - }) - if err != nil { - return agent, "", err - } - - if token == "" { - return agent, "", log.Errorf("Cannot get agent/token: %s", hostname) - } - return agent, token, nil -} - -// UpdateAgentLastChecked updates the last_check timestamp in the orchestrator backed database -// for a given agent -func UpdateAgentLastChecked(hostname string) error { - _, err := db.ExecOrchestrator(` - update - host_agent - set - last_checked = NOW() - where - hostname = ?`, - hostname, - ) - if err != nil { - return log.Errore(err) - } - - return nil -} - -// UpdateAgentInfo updates some agent state in backend table -func UpdateAgentInfo(hostname string, agent Agent) error { - _, err := db.ExecOrchestrator(` - update - host_agent - set - last_seen = NOW(), - mysql_port = ?, - count_mysql_snapshots = ? - where - hostname = ?`, - agent.MySQLPort, - len(agent.LogicalVolumes), - hostname, - ) - if err != nil { - return log.Errore(err) - } - - return nil -} - -// baseAgentUri returns the base URI for accessing an agent -func baseAgentUri(agentHostname string, agentPort int) string { - protocol := "http" - if config.Config.AgentsUseSSL { - protocol = "https" - } - uri := fmt.Sprintf("%s://%s:%d/api", protocol, agentHostname, agentPort) - log.Debugf("orchestrator-agent uri: %s", uri) - return uri -} - -// GetAgent gets a single agent status from the agent service. This involves multiple HTTP requests. -func GetAgent(hostname string) (Agent, error) { - agent, token, err := readAgentBasicInfo(hostname) - if err != nil { - return agent, log.Errore(err) - } - - // All seems to be in order. Now make some inquiries from orchestrator-agent service: - { - uri := baseAgentUri(agent.Hostname, agent.Port) - log.Debugf("orchestrator-agent uri: %s", uri) - - { - availableLocalSnapshotsUri := fmt.Sprintf("%s/available-snapshots-local?token=%s", uri, token) - body, err := readResponse(httpGet(availableLocalSnapshotsUri)) - if err == nil { - err = json.Unmarshal(body, &agent.AvailableLocalSnapshots) - } - if err != nil { - log.Errore(err) - } - } - { - availableSnapshotsUri := fmt.Sprintf("%s/available-snapshots?token=%s", uri, token) - body, err := readResponse(httpGet(availableSnapshotsUri)) - if err == nil { - err = json.Unmarshal(body, &agent.AvailableSnapshots) - } - if err != nil { - log.Errore(err) - } - } - { - lvSnapshotsUri := fmt.Sprintf("%s/lvs-snapshots?token=%s", uri, token) - body, err := readResponse(httpGet(lvSnapshotsUri)) - if err == nil { - err = json.Unmarshal(body, &agent.LogicalVolumes) - } - if err != nil { - log.Errore(err) - } - } - { - mountUri := fmt.Sprintf("%s/mount?token=%s", uri, token) - body, err := readResponse(httpGet(mountUri)) - if err == nil { - err = json.Unmarshal(body, &agent.MountPoint) - } - if err != nil { - log.Errore(err) - } - } - { - mySQLRunningUri := fmt.Sprintf("%s/mysql-status?token=%s", uri, token) - body, err := readResponse(httpGet(mySQLRunningUri)) - if err == nil { - _ = json.Unmarshal(body, &agent.MySQLRunning) - } - // Actually an error is OK here since "status" returns with non-zero exit code when MySQL not running - } - { - mySQLRunningUri := fmt.Sprintf("%s/mysql-port?token=%s", uri, token) - body, err := readResponse(httpGet(mySQLRunningUri)) - if err == nil { - err = json.Unmarshal(body, &agent.MySQLPort) - } - if err != nil { - log.Errore(err) - } - } - { - mySQLDiskUsageUri := fmt.Sprintf("%s/mysql-du?token=%s", uri, token) - body, err := readResponse(httpGet(mySQLDiskUsageUri)) - if err == nil { - err = json.Unmarshal(body, &agent.MySQLDiskUsage) - } - if err != nil { - log.Errore(err) - } - } - { - mySQLDatadirDiskFreeUri := fmt.Sprintf("%s/mysql-datadir-available-space?token=%s", uri, token) - body, err := readResponse(httpGet(mySQLDatadirDiskFreeUri)) - if err == nil { - err = json.Unmarshal(body, &agent.MySQLDatadirDiskFree) - } - if err != nil { - log.Errore(err) - } - } - { - errorLogTailUri := fmt.Sprintf("%s/mysql-error-log-tail?token=%s", uri, token) - body, err := readResponse(httpGet(errorLogTailUri)) - if err == nil { - err = json.Unmarshal(body, &agent.MySQLErrorLogTail) - } - if err != nil { - log.Errore(err) - } - } - } - return agent, err -} - -// executeAgentCommandWithMethodFunc requests an agent to execute a command via HTTP api, either GET or POST, -// with specific http method implementation by the caller -func executeAgentCommandWithMethodFunc(hostname string, command string, methodFunc httpMethodFunc, onResponse *func([]byte)) (Agent, error) { - agent, token, err := readAgentBasicInfo(hostname) - if err != nil { - return agent, err - } - - // All seems to be in order. Now make some inquiries from orchestrator-agent service: - uri := baseAgentUri(agent.Hostname, agent.Port) - - var fullCommand string - if strings.Contains(command, "?") { - fullCommand = fmt.Sprintf("%s&token=%s", command, token) - } else { - fullCommand = fmt.Sprintf("%s?token=%s", command, token) - } - log.Debugf("orchestrator-agent command: %s", fullCommand) - agentCommandUri := fmt.Sprintf("%s/%s", uri, fullCommand) - - body, err := readResponse(methodFunc(agentCommandUri)) - if err != nil { - return agent, log.Errore(err) - } - if onResponse != nil { - (*onResponse)(body) - } - auditAgentOperation("agent-command", &agent, command) - - return agent, err -} - -// executeAgentCommand requests an agent to execute a command via HTTP api -func executeAgentCommand(hostname string, command string, onResponse *func([]byte)) (Agent, error) { - httpFunc := func(uri string) (resp *http.Response, err error) { - return httpGet(uri) - } - return executeAgentCommandWithMethodFunc(hostname, command, httpFunc, onResponse) -} - -// executeAgentPostCommand requests an agent to execute a command via HTTP POST -func executeAgentPostCommand(hostname string, command string, content string, onResponse *func([]byte)) (Agent, error) { - httpFunc := func(uri string) (resp *http.Response, err error) { - return httpPost(uri, "text/plain", content) - } - return executeAgentCommandWithMethodFunc(hostname, command, httpFunc, onResponse) -} - -// Unmount unmounts the designated snapshot mount point -func Unmount(hostname string) (Agent, error) { - return executeAgentCommand(hostname, "umount", nil) -} - -// MountLV requests an agent to mount the given volume on the designated mount point -func MountLV(hostname string, lv string) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("mountlv?lv=%s", lv), nil) -} - -// RemoveLV requests an agent to remove a snapshot -func RemoveLV(hostname string, lv string) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("removelv?lv=%s", lv), nil) -} - -// CreateSnapshot requests an agent to create a new snapshot -- a DIY implementation -func CreateSnapshot(hostname string) (Agent, error) { - return executeAgentCommand(hostname, "create-snapshot", nil) -} - -// deleteMySQLDatadir requests an agent to purge the MySQL data directory (step before seed) -func deleteMySQLDatadir(hostname string) (Agent, error) { - return executeAgentCommand(hostname, "delete-mysql-datadir", nil) -} - -// MySQLStop requests an agent to stop MySQL service -func MySQLStop(hostname string) (Agent, error) { - return executeAgentCommand(hostname, "mysql-stop", nil) -} - -// MySQLStart requests an agent to start the MySQL service -func MySQLStart(hostname string) (Agent, error) { - return executeAgentCommand(hostname, "mysql-start", nil) -} - -// ReceiveMySQLSeedData requests an agent to start listening for incoming seed data -func ReceiveMySQLSeedData(hostname string, seedId int64) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("receive-mysql-seed-data/%d", seedId), nil) -} - -// SendMySQLSeedData requests an agent to start sending seed data -func SendMySQLSeedData(hostname string, targetHostname string, seedId int64) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("send-mysql-seed-data/%s/%d", targetHostname, seedId), nil) -} - -// AbortSeedCommand requests an agent to abort seed send/receive (depending on the agent's role) -func AbortSeedCommand(hostname string, seedId int64) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("abort-seed/%d", seedId), nil) -} - -func CustomCommand(hostname string, cmd string) (output string, err error) { - onResponse := func(body []byte) { - output = string(body) - log.Debugf("output: %v", output) - } - - _, err = executeAgentCommand(hostname, fmt.Sprintf("custom-commands/%s", cmd), &onResponse) - return output, err -} - -// seedCommandCompleted checks an agent to see if it thinks a seed was completed. -func seedCommandCompleted(hostname string, seedId int64) (Agent, bool, error) { - result := false - onResponse := func(body []byte) { - json.Unmarshal(body, &result) - } - agent, err := executeAgentCommand(hostname, fmt.Sprintf("seed-command-completed/%d", seedId), &onResponse) - return agent, result, err -} - -// seedCommandCompleted checks an agent to see if it thinks a seed was successful. -func seedCommandSucceeded(hostname string, seedId int64) (Agent, bool, error) { - result := false - onResponse := func(body []byte) { - json.Unmarshal(body, &result) - } - agent, err := executeAgentCommand(hostname, fmt.Sprintf("seed-command-succeeded/%d", seedId), &onResponse) - return agent, result, err -} - -// AbortSeed will contact agents associated with a seed and request abort. -func AbortSeed(seedId int64) error { - seedOperations, err := AgentSeedDetails(seedId) - if err != nil { - return log.Errore(err) - } - - for _, seedOperation := range seedOperations { - AbortSeedCommand(seedOperation.TargetHostname, seedId) - AbortSeedCommand(seedOperation.SourceHostname, seedId) - } - updateSeedComplete(seedId, errors.New("Aborted")) - return nil -} - -// PostCopy will request an agent to invoke post-copy commands -func PostCopy(hostname, sourceHostname string) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("post-copy/?sourceHost=%s", sourceHostname), nil) -} - -// SubmitSeedEntry submits a new seed operation entry, returning its unique ID -func SubmitSeedEntry(targetHostname string, sourceHostname string) (int64, error) { - res, err := db.ExecOrchestrator(` - insert - into agent_seed ( - target_hostname, source_hostname, start_timestamp - ) VALUES ( - ?, ?, NOW() - ) - `, - targetHostname, - sourceHostname, - ) - if err != nil { - return 0, log.Errore(err) - } - id, err := res.LastInsertId() - - return id, err -} - -// updateSeedComplete updates the seed entry, signing for completion -func updateSeedComplete(seedId int64, seedError error) error { - _, err := db.ExecOrchestrator(` - update - agent_seed - set end_timestamp = NOW(), - is_complete = 1, - is_successful = ? - where - agent_seed_id = ? - `, - (seedError == nil), - seedId, - ) - if err != nil { - return log.Errore(err) - } - - return nil -} - -// submitSeedStateEntry submits a seed state: a single step in the overall seed process -func submitSeedStateEntry(seedId int64, action string, errorMessage string) (int64, error) { - res, err := db.ExecOrchestrator(` - insert - into agent_seed_state ( - agent_seed_id, state_timestamp, state_action, error_message - ) VALUES ( - ?, NOW(), ?, ? - ) - `, - seedId, - action, - errorMessage, - ) - if err != nil { - return 0, log.Errore(err) - } - id, err := res.LastInsertId() - - return id, err -} - -// updateSeedStateEntry updates seed step state -func updateSeedStateEntry(seedStateId int64, reason error) error { - _, err := db.ExecOrchestrator(` - update - agent_seed_state - set error_message = ? - where - agent_seed_state_id = ? - `, - reason.Error(), - seedStateId, - ) - if err != nil { - return log.Errore(err) - } - - return reason -} - -// FailStaleSeeds marks as failed seeds where no progress have been seen recently -func FailStaleSeeds() error { - _, err := db.ExecOrchestrator(` - update - agent_seed - set - is_complete=1, - is_successful=0 - where - is_complete=0 - and ( - select - max(state_timestamp) as last_state_timestamp - from - agent_seed_state - where - agent_seed.agent_seed_id = agent_seed_state.agent_seed_id - ) < now() - interval ? minute`, - config.Config.StaleSeedFailMinutes, - ) - return err -} - -// executeSeed is *the* function for taking a seed. It is a complex operation of testing, preparing, re-testing -// agents on both sides, initiating data transfer, following up, awaiting completion, diagnosing errors, claning up. -func executeSeed(seedId int64, targetHostname string, sourceHostname string) error { - - var err error - var seedStateId int64 - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("getting target agent info for %s", targetHostname), "") - targetAgent, err := GetAgent(targetHostname) - SeededAgents <- &targetAgent - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("getting source agent info for %s", sourceHostname), "") - sourceAgent, err := GetAgent(sourceHostname) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Checking MySQL status on target %s", targetHostname), "") - if targetAgent.MySQLRunning { - return updateSeedStateEntry(seedStateId, errors.New("MySQL is running on target host. Cowardly refusing to proceeed. Please stop the MySQL service")) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Looking up available snapshots on source %s", sourceHostname), "") - if len(sourceAgent.LogicalVolumes) == 0 { - return updateSeedStateEntry(seedStateId, errors.New("No logical volumes found on source host")) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Checking mount point on source %s", sourceHostname), "") - if sourceAgent.MountPoint.IsMounted { - return updateSeedStateEntry(seedStateId, errors.New("Volume already mounted on source host; please unmount")) - } - - seedFromLogicalVolume := sourceAgent.LogicalVolumes[0] - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s Mounting logical volume: %s", sourceHostname, seedFromLogicalVolume.Path), "") - _, err = MountLV(sourceHostname, seedFromLogicalVolume.Path) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - sourceAgent, _ = GetAgent(sourceHostname) - _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("MySQL data volume on source host %s is %d bytes", sourceHostname, sourceAgent.MountPoint.MySQLDiskUsage), "") - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Erasing MySQL data on %s", targetHostname), "") - _, err = deleteMySQLDatadir(targetHostname) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Aquiring target host datadir free space on %s", targetHostname), "") - targetAgent, err = GetAgent(targetHostname) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - if sourceAgent.MountPoint.MySQLDiskUsage > targetAgent.MySQLDatadirDiskFree { - Unmount(sourceHostname) - return updateSeedStateEntry(seedStateId, fmt.Errorf("Not enough disk space on target host %s. Required: %d, available: %d. Bailing out.", targetHostname, sourceAgent.MountPoint.MySQLDiskUsage, targetAgent.MySQLDatadirDiskFree)) - } - - // ... - _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s will now receive data in background", targetHostname), "") - ReceiveMySQLSeedData(targetHostname, seedId) - - _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Waiting %d seconds for %s to start listening for incoming data", config.Config.SeedWaitSecondsBeforeSend, targetHostname), "") - time.Sleep(time.Duration(config.Config.SeedWaitSecondsBeforeSend) * time.Second) - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s will now send data to %s in background", sourceHostname, targetHostname), "") - SendMySQLSeedData(sourceHostname, targetHostname, seedId) - - copyComplete := false - numStaleIterations := 0 - var bytesCopied int64 = 0 - - for !copyComplete { - targetAgentPoll, err := GetAgent(targetHostname) - if err != nil { - return log.Errore(err) - } - - if targetAgentPoll.MySQLDiskUsage == bytesCopied { - numStaleIterations++ - } - bytesCopied = targetAgentPoll.MySQLDiskUsage - - copyFailed := false - if _, commandCompleted, _ := seedCommandCompleted(targetHostname, seedId); commandCompleted { - copyComplete = true - if _, commandSucceeded, _ := seedCommandSucceeded(targetHostname, seedId); !commandSucceeded { - // failed. - copyFailed = true - } - } - if numStaleIterations > 10 { - copyFailed = true - } - if copyFailed { - AbortSeedCommand(sourceHostname, seedId) - AbortSeedCommand(targetHostname, seedId) - Unmount(sourceHostname) - return updateSeedStateEntry(seedStateId, errors.New("10 iterations have passed without progress. Bailing out.")) - } - - var copyPct int64 = 0 - if sourceAgent.MountPoint.MySQLDiskUsage > 0 { - copyPct = 100 * bytesCopied / sourceAgent.MountPoint.MySQLDiskUsage - } - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Copied %d/%d bytes (%d%%)", bytesCopied, sourceAgent.MountPoint.MySQLDiskUsage, copyPct), "") - - if !copyComplete { - time.Sleep(30 * time.Second) - } - } - - // Cleanup: - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Executing post-copy command on %s", targetHostname), "") - _, err = PostCopy(targetHostname, sourceHostname) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s Unmounting logical volume: %s", sourceHostname, seedFromLogicalVolume.Path), "") - _, err = Unmount(sourceHostname) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Starting MySQL on target: %s", targetHostname), "") - _, err = MySQLStart(targetHostname) - if err != nil { - return updateSeedStateEntry(seedStateId, err) - } - - _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Submitting MySQL instance for discovery: %s", targetHostname), "") - SeededAgents <- &targetAgent - - _, _ = submitSeedStateEntry(seedId, "Done", "") - - return nil -} - -// Seed is the entry point for making a seed -func Seed(targetHostname string, sourceHostname string) (int64, error) { - if targetHostname == sourceHostname { - return 0, log.Errorf("Cannot seed %s onto itself", targetHostname) - } - seedId, err := SubmitSeedEntry(targetHostname, sourceHostname) - if err != nil { - return 0, log.Errore(err) - } - - go func() { - err := executeSeed(seedId, targetHostname, sourceHostname) - updateSeedComplete(seedId, err) - }() - - return seedId, nil -} - -// readSeeds reads seed from the backend table -func readSeeds(whereCondition string, args []interface{}, limit string) ([]SeedOperation, error) { - res := []SeedOperation{} - query := fmt.Sprintf(` - select - agent_seed_id, - target_hostname, - source_hostname, - start_timestamp, - end_timestamp, - is_complete, - is_successful - from - agent_seed - %s - order by - agent_seed_id desc - %s - `, whereCondition, limit) - err := db.QueryOrchestrator(query, args, func(m sqlutils.RowMap) error { - seedOperation := SeedOperation{} - seedOperation.SeedId = m.GetInt64("agent_seed_id") - seedOperation.TargetHostname = m.GetString("target_hostname") - seedOperation.SourceHostname = m.GetString("source_hostname") - seedOperation.StartTimestamp = m.GetString("start_timestamp") - seedOperation.EndTimestamp = m.GetString("end_timestamp") - seedOperation.IsComplete = m.GetBool("is_complete") - seedOperation.IsSuccessful = m.GetBool("is_successful") - - res = append(res, seedOperation) - return nil - }) - - if err != nil { - log.Errore(err) - } - return res, err -} - -// ReadActiveSeedsForHost reads active seeds where host participates either as source or target -func ReadActiveSeedsForHost(hostname string) ([]SeedOperation, error) { - whereCondition := ` - where - is_complete = 0 - and ( - target_hostname = ? - or source_hostname = ? - ) - ` - return readSeeds(whereCondition, sqlutils.Args(hostname, hostname), "") -} - -// ReadRecentCompletedSeedsForHost reads active seeds where host participates either as source or target -func ReadRecentCompletedSeedsForHost(hostname string) ([]SeedOperation, error) { - whereCondition := ` - where - is_complete = 1 - and ( - target_hostname = ? - or source_hostname = ? - ) - ` - return readSeeds(whereCondition, sqlutils.Args(hostname, hostname), "limit 10") -} - -// AgentSeedDetails reads details from backend table -func AgentSeedDetails(seedId int64) ([]SeedOperation, error) { - whereCondition := ` - where - agent_seed_id = ? - ` - return readSeeds(whereCondition, sqlutils.Args(seedId), "") -} - -// ReadRecentSeeds reads seeds from backend table. -func ReadRecentSeeds() ([]SeedOperation, error) { - return readSeeds(``, sqlutils.Args(), "limit 100") -} - -// SeedOperationState reads states for a given seed operation -func ReadSeedStates(seedId int64) ([]SeedOperationState, error) { - res := []SeedOperationState{} - query := ` - select - agent_seed_state_id, - agent_seed_id, - state_timestamp, - state_action, - error_message - from - agent_seed_state - where - agent_seed_id = ? - order by - agent_seed_state_id desc - ` - err := db.QueryOrchestrator(query, sqlutils.Args(seedId), func(m sqlutils.RowMap) error { - seedState := SeedOperationState{} - seedState.SeedStateId = m.GetInt64("agent_seed_state_id") - seedState.SeedId = m.GetInt64("agent_seed_id") - seedState.StateTimestamp = m.GetString("state_timestamp") - seedState.Action = m.GetString("state_action") - seedState.ErrorMessage = m.GetString("error_message") - - res = append(res, seedState) - return nil - }) - - if err != nil { - log.Errore(err) - } - return res, err -} - -func RelaylogContentsTail(hostname string, startCoordinates *inst.BinlogCoordinates, onResponse *func([]byte)) (Agent, error) { - return executeAgentCommand(hostname, fmt.Sprintf("mysql-relaylog-contents-tail/%s/%d", startCoordinates.LogFile, startCoordinates.LogPos), onResponse) -} - -func ApplyRelaylogContents(hostname string, content string) (Agent, error) { - return executeAgentPostCommand(hostname, "apply-relaylog-contents", content, nil) -} diff --git a/go/vt/orchestrator/agent/instance_topology_agent.go b/go/vt/orchestrator/agent/instance_topology_agent.go deleted file mode 100644 index 7a2739999cf..00000000000 --- a/go/vt/orchestrator/agent/instance_topology_agent.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - Copyright 2017 GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package agent - -import ( - "encoding/json" - "fmt" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/inst" -) - -func SyncReplicaRelayLogs(instance, otherInstance *inst.Instance) (*inst.Instance, error) { - var err error - var found bool - var nextCoordinates *inst.BinlogCoordinates - var content string - onResponse := func(contentBytes []byte) { - json.Unmarshal(contentBytes, &content) - } - log.Debugf("SyncReplicaRelayLogs: stopping replication") - - if !instance.ReplicationThreadsStopped() { - return instance, log.Errorf("SyncReplicaRelayLogs: replication on %+v must not run", instance.Key) - } - if !otherInstance.ReplicationThreadsStopped() { - return instance, log.Errorf("SyncReplicaRelayLogs: replication on %+v must not run", otherInstance.Key) - } - - log.Debugf("SyncReplicaRelayLogs: correlating coordinates of %+v on %+v", instance.Key, otherInstance.Key) - _, _, nextCoordinates, found, err = inst.CorrelateRelaylogCoordinates(instance, nil, otherInstance) - if err != nil { - goto Cleanup - } - if !found { - goto Cleanup - } - log.Debugf("SyncReplicaRelayLogs: correlated next-coordinates are %+v", *nextCoordinates) - - InitHttpClient() - if _, err := RelaylogContentsTail(otherInstance.Key.Hostname, nextCoordinates, &onResponse); err != nil { - goto Cleanup - } - log.Debugf("SyncReplicaRelayLogs: got content (%d bytes)", len(content)) - - if _, err := ApplyRelaylogContents(instance.Key.Hostname, content); err != nil { - goto Cleanup - } - log.Debugf("SyncReplicaRelayLogs: applied content (%d bytes)", len(content)) - - instance, err = inst.ChangeMasterTo(&instance.Key, &otherInstance.MasterKey, &otherInstance.ExecBinlogCoordinates, false, inst.GTIDHintNeutral) - if err != nil { - goto Cleanup - } - -Cleanup: - if err != nil { - return instance, log.Errore(err) - } - // and we're done (pending deferred functions) - inst.AuditOperation("align-via-relaylogs", &instance.Key, fmt.Sprintf("aligned %+v by relaylogs from %+v", instance.Key, otherInstance.Key)) - - return instance, err -} diff --git a/go/vt/orchestrator/app/cli.go b/go/vt/orchestrator/app/cli.go index 4ea377c64f3..f0050277e7f 100644 --- a/go/vt/orchestrator/app/cli.go +++ b/go/vt/orchestrator/app/cli.go @@ -26,7 +26,6 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/orchestrator/agent" "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" "vitess.io/vitess/go/vt/orchestrator/external/golib/util" @@ -66,9 +65,6 @@ var commandSynonyms = map[string]string{ "get-candidate-slave": "get-candidate-replica", "move-slaves-gtid": "move-replicas-gtid", "regroup-slaves-gtid": "regroup-replicas-gtid", - "match-slaves": "match-replicas", - "match-up-slaves": "match-up-replicas", - "regroup-slaves-pgtid": "regroup-replicas-pgtid", "which-cluster-osc-slaves": "which-cluster-osc-replicas", "which-cluster-gh-ost-slaves": "which-cluster-gh-ost-replicas", "which-slaves": "which-replicas", @@ -317,18 +313,6 @@ func Cli(command string, strict bool, instance string, destination string, owner } fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) } - case registerCliCommand("move-equivalent", "Classic file:pos relocation", `Moves a replica beneath another server, based on previously recorded "equivalence coordinates"`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) - } - _, err := inst.MoveEquivalent(instanceKey, destinationKey) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) - } case registerCliCommand("repoint", "Classic file:pos relocation", `Make the given instance replicate from another instance without changing the binglog coordinates. Use with care`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) @@ -456,99 +440,6 @@ func Cli(command string, strict bool, instance string, destination string, owner log.Fatale(err) } } - // Pseudo-GTID - case registerCliCommand("match", "Pseudo-GTID relocation", `Matches a replica beneath another (destination) instance using Pseudo-GTID`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) - } - _, _, err := inst.MatchBelow(instanceKey, destinationKey, true) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) - } - case registerCliCommand("match-up", "Pseudo-GTID relocation", `Transport the replica one level up the hierarchy, making it child of its grandparent, using Pseudo-GTID`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - instance, _, err := inst.MatchUp(instanceKey, true) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%s<%s\n", instanceKey.DisplayString(), instance.MasterKey.DisplayString()) - } - case registerCliCommand("rematch", "Pseudo-GTID relocation", `Reconnect a replica onto its master, via PSeudo-GTID.`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - instance, _, err := inst.RematchReplica(instanceKey, true) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%s<%s\n", instanceKey.DisplayString(), instance.MasterKey.DisplayString()) - } - case registerCliCommand("match-replicas", "Pseudo-GTID relocation", `Matches all replicas of a given instance under another (destination) instance using Pseudo-GTID`): - { - // Move all replicas of "instance" beneath "destination" - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) - } - if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) - } - - matchedReplicas, _, err, errs := inst.MultiMatchReplicas(instanceKey, destinationKey, pattern) - if err != nil { - log.Fatale(err) - } else { - for _, e := range errs { - log.Errore(e) - } - for _, replica := range matchedReplicas { - fmt.Println(replica.Key.DisplayString()) - } - } - } - case registerCliCommand("match-up-replicas", "Pseudo-GTID relocation", `Matches replicas of the given instance one level up the topology, making them siblings of given instance, using Pseudo-GTID`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) - } - - matchedReplicas, _, err, errs := inst.MatchUpReplicas(instanceKey, pattern) - if err != nil { - log.Fatale(err) - } else { - for _, e := range errs { - log.Errore(e) - } - for _, replica := range matchedReplicas { - fmt.Println(replica.Key.DisplayString()) - } - } - } - case registerCliCommand("regroup-replicas-pgtid", "Pseudo-GTID relocation", `Given an instance, pick one of its replica and make it local master of its siblings, using Pseudo-GTID.`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) - } - validateInstanceIsFound(instanceKey) - - onCandidateReplicaChosen := func(candidateReplica *inst.Instance) { fmt.Println(candidateReplica.Key.DisplayString()) } - lostReplicas, equalReplicas, aheadReplicas, cannotReplicateReplicas, promotedReplica, err := inst.RegroupReplicasPseudoGTID(instanceKey, false, onCandidateReplicaChosen, postponedFunctionsContainer, nil) - lostReplicas = append(lostReplicas, cannotReplicateReplicas...) - postponedFunctionsContainer.Wait() - if promotedReplica == nil { - log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) - } - fmt.Printf("%s lost: %d, trivial: %d, pseudo-gtid: %d\n", promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas)) - if err != nil { - log.Fatale(err) - } - } // General replication commands case registerCliCommand("enable-gtid", "Replication, general", `If possible, turn on GTID replication`): { @@ -784,25 +675,6 @@ func Cli(command string, strict bool, instance string, destination string, owner } fmt.Println(instanceKey.DisplayString()) } - case registerCliCommand("last-pseudo-gtid", "Binary logs", `Find latest Pseudo-GTID entry in instance's binary logs`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatalf("Unresolved instance") - } - instance, err := inst.ReadTopologyInstance(instanceKey) - if err != nil { - log.Fatale(err) - } - if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) - } - coordinates, text, err := inst.FindLastPseudoGTIDEntry(instance, instance.RelaylogCoordinates, nil, strict, nil) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%+v:%s\n", *coordinates, text) - } case registerCliCommand("locate-gtid-errant", "Binary logs", `List binary logs containing errant GTIDs`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) @@ -817,128 +689,6 @@ func Cli(command string, strict bool, instance string, destination string, owner fmt.Println(binlog) } } - case registerCliCommand("last-executed-relay-entry", "Binary logs", `Find coordinates of last executed relay log entry`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatalf("Unresolved instance") - } - instance, err := inst.ReadTopologyInstance(instanceKey) - if err != nil { - log.Fatale(err) - } - if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) - } - minCoordinates, err := inst.GetPreviousKnownRelayLogCoordinatesForInstance(instance) - if err != nil { - log.Fatalf("Error reading last known coordinates for %+v: %+v", instance.Key, err) - } - binlogEvent, err := inst.GetLastExecutedEntryInRelayLogs(instance, minCoordinates, instance.RelaylogCoordinates) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%+v:%d\n", *binlogEvent, binlogEvent.NextEventPos) - } - case registerCliCommand("correlate-relaylog-pos", "Binary logs", `Given an instance (-i) and relaylog coordinates (--binlog=file:pos), find the correlated coordinates in another instance's relay logs (-d)`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatalf("Unresolved instance") - } - instance, err := inst.ReadTopologyInstance(instanceKey) - if err != nil { - log.Fatale(err) - } - if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) - } - if destinationKey == nil { - log.Fatal("Cannot deduce target instance:", destination) - } - otherInstance, err := inst.ReadTopologyInstance(destinationKey) - if err != nil { - log.Fatale(err) - } - if otherInstance == nil { - log.Fatalf("Instance not found: %+v", *destinationKey) - } - - var relaylogCoordinates *inst.BinlogCoordinates - if *config.RuntimeCLIFlags.BinlogFile != "" { - if relaylogCoordinates, err = inst.ParseBinlogCoordinates(*config.RuntimeCLIFlags.BinlogFile); err != nil { - log.Fatalf("Expecing --binlog argument as file:pos") - } - } - instanceCoordinates, correlatedCoordinates, nextCoordinates, _, err := inst.CorrelateRelaylogCoordinates(instance, relaylogCoordinates, otherInstance) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%+v;%+v;%+v\n", *instanceCoordinates, *correlatedCoordinates, *nextCoordinates) - } - case registerCliCommand("find-binlog-entry", "Binary logs", `Get binlog file:pos of entry given by --pattern (exact full match, not a regular expression) in a given instance`): - { - if pattern == "" { - log.Fatal("No pattern given") - } - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatalf("Unresolved instance") - } - instance, err := inst.ReadTopologyInstance(instanceKey) - if err != nil { - log.Fatale(err) - } - if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) - } - coordinates, err := inst.SearchEntryInInstanceBinlogs(instance, pattern, false, nil) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%+v\n", *coordinates) - } - case registerCliCommand("correlate-binlog-pos", "Binary logs", `Given an instance (-i) and binlog coordinates (--binlog=file:pos), find the correlated coordinates in another instance (-d)`): - { - instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - if instanceKey == nil { - log.Fatalf("Unresolved instance") - } - instance, err := inst.ReadTopologyInstance(instanceKey) - if err != nil { - log.Fatale(err) - } - if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) - } - if !instance.LogBinEnabled { - log.Fatalf("Instance does not have binary logs: %+v", *instanceKey) - } - if destinationKey == nil { - log.Fatal("Cannot deduce target instance:", destination) - } - otherInstance, err := inst.ReadTopologyInstance(destinationKey) - if err != nil { - log.Fatale(err) - } - if otherInstance == nil { - log.Fatalf("Instance not found: %+v", *destinationKey) - } - var binlogCoordinates *inst.BinlogCoordinates - if *config.RuntimeCLIFlags.BinlogFile == "" { - binlogCoordinates = &instance.SelfBinlogCoordinates - } else { - if binlogCoordinates, err = inst.ParseBinlogCoordinates(*config.RuntimeCLIFlags.BinlogFile); err != nil { - log.Fatalf("Expecing --binlog argument as file:pos") - } - } - - coordinates, _, err := inst.CorrelateBinlogCoordinates(instance, binlogCoordinates, otherInstance) - if err != nil { - log.Fatale(err) - } - fmt.Printf("%+v\n", *coordinates) - } // Pool case registerCliCommand("submit-pool-instances", "Pools", `Submit a pool name with a list of instances in that pool`): { @@ -1682,15 +1432,6 @@ func Cli(command string, strict bool, instance string, destination string, owner } fmt.Println(replacement.Key.DisplayString()) } - case registerCliCommand("custom-command", "Agent", "Execute a custom command on the agent as defined in the agent conf"): - { - output, err := agent.CustomCommand(hostnameFlag, pattern) - if err != nil { - log.Fatale(err) - } - - fmt.Printf("%v\n", output) - } case registerCliCommand("disable-global-recoveries", "", `Disallow orchestrator from performing recoveries globally`): { if err := logic.DisableRecovery(); err != nil { diff --git a/go/vt/orchestrator/app/command_help.go b/go/vt/orchestrator/app/command_help.go index 97e80010219..fcc9b7d639b 100644 --- a/go/vt/orchestrator/app/command_help.go +++ b/go/vt/orchestrator/app/command_help.go @@ -78,24 +78,6 @@ func init() { orchestrator -c move-below -d sibling.replica.under.which.to.move.com -i not given, implicitly assumed local hostname - ` - CommandHelp["move-equivalent"] = ` - Moves a replica beneath another server, based on previously recorded "equivalence coordinates". Such coordinates - are obtained whenever orchestrator issues a CHANGE MASTER TO. The "before" and "after" masters coordinates are - persisted. In such cases where the newly relocated replica is unable to replicate (e.g. firewall issues) it is then - easy to revert the relocation via "move-equivalent". - The command works if and only if orchestrator has an exact mapping between the replica's current replication coordinates - and some other coordinates. - Example: - - orchestrator -c move-equivalent -i replica.to.revert.master.position.com -d master.to.move.to.com - ` - CommandHelp["take-siblings"] = ` - Turn all siblings of a replica into its sub-replicas. No action taken for siblings that cannot become - replicas of given instance (e.g. incompatible versions, binlog format etc.). This is a (faster) shortcut - to executing move-below for all siblings of the given instance. Example: - - orchestrator -c take-siblings -i replica.whose.siblings.will.move.below.com ` CommandHelp["take-master"] = ` Turn an instance into a master of its own master; essentially switch the two. Replicas of each of the two @@ -107,7 +89,7 @@ func init() { CommandHelp["repoint"] = ` Make the given instance replicate from another instance without changing the binglog coordinates. There are little sanity checks to this and this is a risky operation. Use cases are: a rename of the master's - host, a corruption in relay-logs, move from beneath MaxScale & Binlog-server. Examples: + host, a corruption in relay-logs, move from beneath Binlog-server. Examples: orchestrator -c repoint -i replica.to.operate.on.com -d new.master.com @@ -219,52 +201,6 @@ func init() { (this command was previously named "multi-match-replicas") ` - CommandHelp["match-up"] = ` - Transport the replica one level up the hierarchy, making it child of its grandparent. This is - similar in essence to move-up, only based on Pseudo-GTID. The master of the given instance - does not need to be alive or connected (and could in fact be crashed). It is never contacted. - Grandparent instance must be alive and accessible. - Examples: - - orchestrator -c match-up -i replica.to.match.up.com:3306 - - orchestrator -c match-up - -i not given, implicitly assumed local hostname - ` - CommandHelp["match-up-replicas"] = ` - Matches replicas of the given instance one level up the topology, making them siblings of given instance. - This is a (faster) shortcut to executing match-up on all replicas of given instance. The instance need - not be alive / accessib;e / functional. It can be crashed. - Example: - - orchestrator -c match-up-replicas -i replica.whose.subreplicas.will.match.up.com - - orchestrator -c match-up-replicas -i replica.whose.subreplicas.will.match.up.com[:3306] --pattern=regexp.filter - only apply to those instances that match given regex - ` - CommandHelp["rematch"] = ` - Reconnect a replica onto its master, via PSeudo-GTID. The use case for this operation is a non-crash-safe - replication configuration (e.g. MySQL 5.5) with sync_binlog=1 and log_slave_updates. This operation - implies crash-safe-replication and makes it possible for the replica to reconnect. Example: - - orchestrator -c rematch -i replica.to.rematch.under.its.master - ` - CommandHelp["regroup-replicas"] = ` - Given an instance (possibly a crashed one; it is never being accessed), pick one of its replica and make it - local master of its siblings, using Pseudo-GTID. It is uncertain that there *is* a replica that will be able to - become master to all its siblings. But if there is one, orchestrator will pick such one. There are many - constraints, most notably the replication positions of all replicas, whether they use log_slave_updates, and - otherwise version compatabilities etc. - As many replicas that can be regrouped under promoted slves are operated on. The rest are untouched. - This command is useful in the event of a crash. For example, in the event that a master dies, this operation - can promote a candidate replacement and set up the remaining topology to correctly replicate from that - replacement replica. Example: - - orchestrator -c regroup-replicas -i instance.with.replicas.one.of.which.will.turn.local.master.if.possible - - --debug is your friend. - ` - CommandHelp["enable-gtid"] = ` If possible, enable GTID replication. This works on Oracle (>= 5.6, gtid-mode=1) and MariaDB (>= 10.0). Replication is stopped for a short duration so as to reconfigure as GTID. In case of error replication remains @@ -387,37 +323,6 @@ func init() { Purges binary logs until given log ` - CommandHelp["last-pseudo-gtid"] = ` - Information command; an authoritative way of detecting whether a Pseudo-GTID event exist for an instance, - and if so, output the last Pseudo-GTID entry and its location. Example: - - orchestrator -c last-pseudo-gtid -i instance.with.possible.pseudo-gtid.injection - ` - CommandHelp["find-binlog-entry"] = ` - Get binlog file:pos of entry given by --pattern (exact full match, not a regular expression) in a given instance. - This will search the instance's binary logs starting with most recent, and terminate as soon as an exact match is found. - The given input is not a regular expression. It must fully match the entry (not a substring). - This is most useful when looking for uniquely identifyable values, such as Pseudo-GTID. Example: - - orchestrator -c find-binlog-entry -i instance.to.search.on.com --pattern "insert into my_data (my_column) values ('distinct_value_01234_56789')" - - Prints out the binlog file:pos where the entry is found, or errors if unfound. - ` - CommandHelp["correlate-binlog-pos"] = ` - Given an instance (-i) and binlog coordinates (--binlog=file:pos), find the correlated coordinates in another instance (-d). - "Correlated coordinates" are those that present the same point-in-time of sequence of binary log events, untangling - the mess of different binlog file:pos coordinates on different servers. - This operation relies on Pseudo-GTID: your servers must have been pre-injected with PSeudo-GTID entries as these are - being used as binlog markers in the correlation process. - You must provide a valid file:pos in the binlogs of the source instance (-i), and in response get the correlated - coordinates in the binlogs of the destination instance (-d). This operation does not work on relay logs. - Example: - - orchestrator -c correlate-binlog-pos -i instance.with.binary.log.com --binlog=mysql-bin.002366:14127 -d other.instance.with.binary.logs.com - - Prints out correlated coordinates, e.g.: "mysql-bin.002302:14220", or errors out. - ` - CommandHelp["submit-pool-instances"] = ` Submit a pool name with a list of instances in that pool. This removes any previous instances associated with that pool. Expecting comma delimited list of instances diff --git a/go/vt/orchestrator/app/http.go b/go/vt/orchestrator/app/http.go index bffb6486cf6..c4e5ca756a1 100644 --- a/go/vt/orchestrator/app/http.go +++ b/go/vt/orchestrator/app/http.go @@ -24,7 +24,6 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/orchestrator/agent" "vitess.io/vitess/go/vt/orchestrator/collection" "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/http" @@ -56,9 +55,6 @@ func Http(continuousDiscovery bool) { process.ContinuousRegistration(string(process.OrchestratorExecutionHttpMode), "") martini.Env = martini.Prod - if config.Config.ServeAgentsHttp { - go agentsHttp() - } standardHttp(continuousDiscovery) } @@ -173,43 +169,3 @@ func standardHttp(continuousDiscovery bool) { } log.Info("Web server started") } - -// agentsHttp startes serving agents HTTP or HTTPS API requests -func agentsHttp() { - m := martini.Classic() - m.Use(gzip.All()) - m.Use(render.Renderer()) - if config.Config.AgentsUseMutualTLS { - m.Use(ssl.VerifyOUs(config.Config.AgentSSLValidOUs)) - } - - log.Info("Starting agents listener") - - agent.InitHttpClient() - go logic.ContinuousAgentsPoll() - - http.AgentsAPI.URLPrefix = config.Config.URLPrefix - http.AgentsAPI.RegisterRequests(m) - - // Serve - if config.Config.AgentsUseSSL { - log.Info("Starting agent HTTPS listener") - tlsConfig, err := ssl.NewTLSConfig(config.Config.AgentSSLCAFile, config.Config.AgentsUseMutualTLS) - if err != nil { - log.Fatale(err) - } - tlsConfig.InsecureSkipVerify = config.Config.AgentSSLSkipVerify - if err = ssl.AppendKeyPairWithPassword(tlsConfig, config.Config.AgentSSLCertFile, config.Config.AgentSSLPrivateKeyFile, agentSSLPEMPassword); err != nil { - log.Fatale(err) - } - if err = ssl.ListenAndServeTLS(config.Config.AgentsServerPort, m, tlsConfig); err != nil { - log.Fatale(err) - } - } else { - log.Info("Starting agent HTTP listener") - if err := nethttp.ListenAndServe(config.Config.AgentsServerPort, m); err != nil { - log.Fatale(err) - } - } - log.Info("Agent server started") -} diff --git a/go/vt/orchestrator/config/cli_flags.go b/go/vt/orchestrator/config/cli_flags.go index d3ded6c2a29..8db2cf19fc2 100644 --- a/go/vt/orchestrator/config/cli_flags.go +++ b/go/vt/orchestrator/config/cli_flags.go @@ -27,7 +27,6 @@ type CLIFlags struct { Statement *string PromotionRule *string ConfiguredVersion string - SkipBinlogSearch *bool SkipContinuousRegistration *bool EnableDatabaseUpdate *bool IgnoreRaftSetup *bool diff --git a/go/vt/orchestrator/config/config.go b/go/vt/orchestrator/config/config.go index 360245a0ad1..f547379e8db 100644 --- a/go/vt/orchestrator/config/config.go +++ b/go/vt/orchestrator/config/config.go @@ -41,25 +41,20 @@ const ( var configurationLoaded chan bool = make(chan bool) const ( - HealthPollSeconds = 1 - RaftHealthPollSeconds = 10 - RecoveryPollSeconds = 1 - ActiveNodeExpireSeconds = 5 - BinlogFileHistoryDays = 1 - MaintenanceOwner = "orchestrator" - AuditPageSize = 20 - MaintenancePurgeDays = 7 - MySQLTopologyMaxPoolConnections = 3 - MaintenanceExpireMinutes = 10 - AgentHttpTimeoutSeconds = 60 - PseudoGTIDCoordinatesHistoryHeuristicMinutes = 2 - DebugMetricsIntervalSeconds = 10 - PseudoGTIDSchema = "_pseudo_gtid_" - PseudoGTIDIntervalSeconds = 5 - PseudoGTIDExpireMinutes = 60 - StaleInstanceCoordinatesExpireSeconds = 60 - CheckAutoPseudoGTIDGrantsIntervalSeconds = 60 - SelectTrueQuery = "select 1" + HealthPollSeconds = 1 + RaftHealthPollSeconds = 10 + RecoveryPollSeconds = 1 + ActiveNodeExpireSeconds = 5 + BinlogFileHistoryDays = 1 + MaintenanceOwner = "orchestrator" + AuditPageSize = 20 + MaintenancePurgeDays = 7 + MySQLTopologyMaxPoolConnections = 3 + MaintenanceExpireMinutes = 10 + AgentHttpTimeoutSeconds = 60 + DebugMetricsIntervalSeconds = 10 + StaleInstanceCoordinatesExpireSeconds = 60 + SelectTrueQuery = "select 1" ) // Configuration makes for orchestrator configuration input, which can be provided by user via JSON formatted file. @@ -125,7 +120,6 @@ type Configuration struct { InstanceWriteBufferSize int // Instance write buffer size (max number of instances to flush in one INSERT ODKU) BufferInstanceWrites bool // Set to 'true' for write-optimization on backend table (compromise: writes can be stale and overwrite non stale data) InstanceFlushIntervalMilliseconds int // Max interval between instance write buffer flushes - SkipMaxScaleCheck bool // If you don't ever have MaxScale BinlogServer in your topology (and most people don't), set this to 'true' to save some pointless queries UnseenInstanceForgetHours uint // Number of hours after which an unseen instance is forgotten SnapshotTopologiesIntervalHours uint // Interval in hour between snapshot-topologies invocation. Default: 0 (disabled) DiscoveryMaxConcurrency uint // Number of goroutines doing hosts discovery @@ -198,13 +192,7 @@ type Configuration struct { StaleSeedFailMinutes uint // Number of minutes after which a stale (no progress) seed is considered failed. SeedAcceptableBytesDiff int64 // Difference in bytes between seed source & target data size that is still considered as successful copy SeedWaitSecondsBeforeSend int64 // Number of seconds for waiting before start send data command on agent - AutoPseudoGTID bool // Should orchestrator automatically inject Pseudo-GTID entries to the masters - PseudoGTIDPattern string // Pattern to look for in binary logs that makes for a unique entry (pseudo GTID). When empty, Pseudo-GTID based refactoring is disabled. - PseudoGTIDPatternIsFixedSubstring bool // If true, then PseudoGTIDPattern is not treated as regular expression but as fixed substring, and can boost search time - PseudoGTIDMonotonicHint string // subtring in Pseudo-GTID entry which indicates Pseudo-GTID entries are expected to be monotonically increasing - DetectPseudoGTIDQuery string // Optional query which is used to authoritatively decide whether pseudo gtid is enabled on instance BinlogEventsChunkSize int // Chunk size (X) for SHOW BINLOG|RELAYLOG EVENTS LIMIT ?,X statements. Smaller means less locking and mroe work to be done - SkipBinlogEventsContaining []string // When scanning/comparing binlogs for Pseudo-GTID, skip entries containing given texts. These are NOT regular expressions (would consume too much CPU while scanning binlogs), just substrings to find. ReduceReplicationAnalysisCount bool // When true, replication analysis will only report instances where possibility of handled problems is possible in the first place (e.g. will not report most leaf nodes, that are mostly uninteresting). When false, provides an entry for every known instance FailureDetectionPeriodBlockMinutes int // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. RecoveryPeriodBlockMinutes int // (supported for backwards compatibility but please use newer `RecoveryPeriodBlockSeconds` instead) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping @@ -237,10 +225,6 @@ type Configuration struct { PostponeSlaveRecoveryOnLagMinutes uint // Synonym to PostponeReplicaRecoveryOnLagMinutes PostponeReplicaRecoveryOnLagMinutes uint // On crash recovery, replicas that are lagging more than given minutes are only resurrected late in the recovery process, after master/IM has been elected and processes executed. Value of 0 disables this feature OSCIgnoreHostnameFilters []string // OSC replicas recommendation will ignore replica hostnames matching given patterns - GraphiteAddr string // Optional; address of graphite port. If supplied, metrics will be written here - GraphitePath string // Prefix for graphite path. May include {hostname} magic placeholder - GraphiteConvertHostnameDotsToUnderscores bool // If true, then hostname's dots are converted to underscores before being used in graphite path - GraphitePollSeconds int // Graphite writes interval. 0 disables. URLPrefix string // URL prefix to run orchestrator on non-root web path, e.g. /orchestrator to put it behind nginx. DiscoveryIgnoreReplicaHostnameFilters []string // Regexp filters to apply to prevent auto-discovering new replicas. Usage: unreachable servers due to firewalls, applications which trigger binlog dumps DiscoveryIgnoreMasterHostnameFilters []string // Regexp filters to apply to prevent auto-discovering a master. Usage: pointing your master temporarily to replicate seom data from external host @@ -305,7 +289,6 @@ func newConfiguration() *Configuration { InstanceWriteBufferSize: 100, BufferInstanceWrites: false, InstanceFlushIntervalMilliseconds: 100, - SkipMaxScaleCheck: true, UnseenInstanceForgetHours: 240, SnapshotTopologiesIntervalHours: 0, DiscoverByShowSlaveHosts: false, @@ -373,13 +356,7 @@ func newConfiguration() *Configuration { StaleSeedFailMinutes: 60, SeedAcceptableBytesDiff: 8192, SeedWaitSecondsBeforeSend: 2, - AutoPseudoGTID: false, - PseudoGTIDPattern: "", - PseudoGTIDPatternIsFixedSubstring: false, - PseudoGTIDMonotonicHint: "", - DetectPseudoGTIDQuery: "", BinlogEventsChunkSize: 10000, - SkipBinlogEventsContaining: []string{}, ReduceReplicationAnalysisCount: true, FailureDetectionPeriodBlockMinutes: 60, RecoveryPeriodBlockMinutes: 60, @@ -409,10 +386,6 @@ func newConfiguration() *Configuration { DelayMasterPromotionIfSQLThreadNotUpToDate: true, PostponeSlaveRecoveryOnLagMinutes: 0, OSCIgnoreHostnameFilters: []string{}, - GraphiteAddr: "", - GraphitePath: "", - GraphiteConvertHostnameDotsToUnderscores: true, - GraphitePollSeconds: 60, URLPrefix: "", DiscoveryIgnoreReplicaHostnameFilters: []string{}, ConsulAddress: "", @@ -552,12 +525,6 @@ func (this *Configuration) postReadAdjustments() error { this.KVClusterMasterPrefix = strings.TrimRight(this.KVClusterMasterPrefix, "/") this.KVClusterMasterPrefix = fmt.Sprintf("%s/", this.KVClusterMasterPrefix) } - if this.AutoPseudoGTID { - this.PseudoGTIDPattern = "drop view if exists `_pseudo_gtid_`" - this.PseudoGTIDPatternIsFixedSubstring = true - this.PseudoGTIDMonotonicHint = "asc:" - this.DetectPseudoGTIDQuery = SelectTrueQuery - } if this.HTTPAdvertise != "" { u, err := url.Parse(this.HTTPAdvertise) if err != nil { diff --git a/go/vt/orchestrator/external/raft/.gitignore b/go/vt/orchestrator/external/raft/.gitignore deleted file mode 100644 index 836562412fe..00000000000 --- a/go/vt/orchestrator/external/raft/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/go/vt/orchestrator/external/raft/.travis.yml b/go/vt/orchestrator/external/raft/.travis.yml deleted file mode 100644 index 94eb8668b66..00000000000 --- a/go/vt/orchestrator/external/raft/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - tip - -install: make deps -script: - - make integ - -notifications: - flowdock: - secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= - diff --git a/go/vt/orchestrator/external/raft/LICENSE b/go/vt/orchestrator/external/raft/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/go/vt/orchestrator/external/raft/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/go/vt/orchestrator/external/raft/Makefile b/go/vt/orchestrator/external/raft/Makefile deleted file mode 100644 index 556aa2e20f8..00000000000 --- a/go/vt/orchestrator/external/raft/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) - -test: - go test -timeout=30s ./... - -integ: test - INTEG_TESTS=yes go test -timeout=3s -run=Integ ./... - -deps: - go get -d -v ./... - echo $(DEPS) | xargs -n1 go get -d - -cov: - INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html - open /tmp/coverage.html - -.PHONY: test cov integ deps diff --git a/go/vt/orchestrator/external/raft/README.md b/go/vt/orchestrator/external/raft/README.md deleted file mode 100644 index 760a45a30fc..00000000000 --- a/go/vt/orchestrator/external/raft/README.md +++ /dev/null @@ -1,89 +0,0 @@ -raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) -==== - -raft is a [Go](http://www.golang.org) library that manages a replicated -log and can be used with an FSM to manage replicated state machines. It -is a library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). - -The use cases for such a library are far-reaching as replicated state -machines are a key component of many distributed systems. They enable -building Consistent, Partition Tolerant (CP) systems, with limited -fault tolerance as well. - -## Building - -If you wish to build raft you'll need Go version 1.2+ installed. - -Please check your installation with: - -``` -go version -``` - -## Documentation - -For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). - -To prevent complications with cgo, the primary backend `MDBStore` is in a separate repository, -called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation -for the `LogStore` and `StableStore`. - -A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called -[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` -and `StableStore`. - -## Protocol - -raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) - -A high level overview of the Raft protocol is described below, but for details please read the full -[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) -followed by the raft source. Any questions about the raft protocol should be sent to the -[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). - -### Protocol Description - -Raft nodes are always in one of three states: follower, candidate or leader. All -nodes initially start out as a follower. In this state, nodes can accept log entries -from a leader and cast votes. If no entries are received for some time, nodes -self-promote to the candidate state. In the candidate state nodes request votes from -their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. -The leader must accept new log entries and replicate to all the other followers. -In addition, if stale reads are not acceptable, all queries must also be performed on -the leader. - -Once a cluster has a leader, it is able to accept new log entries. A client can -request that a leader append a new log entry, which is an opaque binary blob to -Raft. The leader then writes the entry to durable storage and attempts to replicate -to a quorum of followers. Once the log entry is considered *committed*, it can be -*applied* to a finite state machine. The finite state machine is application specific, -and is implemented using an interface. - -An obvious question relates to the unbounded nature of a replicated log. Raft provides -a mechanism by which the current state is snapshotted, and the log is compacted. Because -of the FSM abstraction, restoring the state of the FSM must result in the same state -as a replay of old logs. This allows Raft to capture the FSM state at a point in time, -and then remove all the logs that were used to reach that state. This is performed automatically -without user intervention, and prevents unbounded disk usage as well as minimizing -time spent replaying logs. - -Lastly, there is the issue of updating the peer set when new servers are joining -or existing servers are leaving. As long as a quorum of nodes is available, this -is not an issue as Raft provides mechanisms to dynamically update the peer set. -If a quorum of nodes is unavailable, then this becomes a very challenging issue. -For example, suppose there are only 2 peers, A and B. The quorum size is also -2, meaning both nodes must agree to commit a log entry. If either A or B fails, -it is now impossible to reach quorum. This means the cluster is unable to add, -or remove a node, or commit any additional log entries. This results in *unavailability*. -At this point, manual intervention would be required to remove either A or B, -and to restart the remaining node in bootstrap mode. - -A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster -of 5 can tolerate 2 node failures. The recommended configuration is to either -run 3 or 5 raft servers. This maximizes availability without -greatly sacrificing performance. - -In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, -committing a log entry requires a single round trip to half of the cluster. -Thus performance is bound by disk I/O and network latency. - diff --git a/go/vt/orchestrator/external/raft/bench/bench.go b/go/vt/orchestrator/external/raft/bench/bench.go deleted file mode 100644 index 8228c0e1b3c..00000000000 --- a/go/vt/orchestrator/external/raft/bench/bench.go +++ /dev/null @@ -1,172 +0,0 @@ -package raftbench - -// raftbench provides common benchmarking functions which can be used by -// anything which implements the raft.LogStore and raft.StableStore interfaces. -// All functions accept these interfaces and perform benchmarking. This -// makes comparing backend performance easier by sharing the tests. - -import ( - "testing" - - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -func FirstIndex(b *testing.B, store raft.LogStore) { - // Create some fake data - var logs []*raft.Log - for i := 1; i < 10; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Run FirstIndex a number of times - for n := 0; n < b.N; n++ { - store.FirstIndex() - } -} - -func LastIndex(b *testing.B, store raft.LogStore) { - // Create some fake data - var logs []*raft.Log - for i := 1; i < 10; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Run LastIndex a number of times - for n := 0; n < b.N; n++ { - store.LastIndex() - } -} - -func GetLog(b *testing.B, store raft.LogStore) { - // Create some fake data - var logs []*raft.Log - for i := 1; i < 10; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Run GetLog a number of times - for n := 0; n < b.N; n++ { - if err := store.GetLog(5, new(raft.Log)); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func StoreLog(b *testing.B, store raft.LogStore) { - // Run StoreLog a number of times - for n := 0; n < b.N; n++ { - log := &raft.Log{Index: uint64(n), Data: []byte("data")} - if err := store.StoreLog(log); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func StoreLogs(b *testing.B, store raft.LogStore) { - // Run StoreLogs a number of times. We want to set multiple logs each - // run, so we create 3 logs with incrementing indexes for each iteration. - for n := 0; n < b.N; n++ { - b.StopTimer() - offset := 3 * (n + 1) - logs := []*raft.Log{ - {Index: uint64(offset - 2), Data: []byte("data")}, - {Index: uint64(offset - 1), Data: []byte("data")}, - {Index: uint64(offset), Data: []byte("data")}, - } - b.StartTimer() - - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func DeleteRange(b *testing.B, store raft.LogStore) { - // Create some fake data. In this case, we create 3 new log entries for each - // test case, and separate them by index in multiples of 10. This allows - // some room so that we can test deleting ranges with "extra" logs to - // to ensure we stop going to the database once our max index is hit. - var logs []*raft.Log - for n := 0; n < b.N; n++ { - offset := 10 * n - for i := offset; i < offset+3; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Delete a range of the data - for n := 0; n < b.N; n++ { - offset := 10 * n - if err := store.DeleteRange(uint64(offset), uint64(offset+9)); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func Set(b *testing.B, store raft.StableStore) { - // Run Set a number of times - for n := 0; n < b.N; n++ { - if err := store.Set([]byte{byte(n)}, []byte("val")); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func Get(b *testing.B, store raft.StableStore) { - // Create some fake data - for i := 1; i < 10; i++ { - if err := store.Set([]byte{byte(i)}, []byte("val")); err != nil { - b.Fatalf("err: %s", err) - } - } - b.ResetTimer() - - // Run Get a number of times - for n := 0; n < b.N; n++ { - if _, err := store.Get([]byte{0x05}); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func SetUint64(b *testing.B, store raft.StableStore) { - // Run SetUint64 a number of times - for n := 0; n < b.N; n++ { - if err := store.SetUint64([]byte{byte(n)}, uint64(n)); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func GetUint64(b *testing.B, store raft.StableStore) { - // Create some fake data - for i := 0; i < 10; i++ { - if err := store.SetUint64([]byte{byte(i)}, uint64(i)); err != nil { - b.Fatalf("err: %s", err) - } - } - b.ResetTimer() - - // Run GetUint64 a number of times - for n := 0; n < b.N; n++ { - if _, err := store.Get([]byte{0x05}); err != nil { - b.Fatalf("err: %s", err) - } - } -} diff --git a/go/vt/orchestrator/external/raft/commands.go b/go/vt/orchestrator/external/raft/commands.go deleted file mode 100644 index 739775b3541..00000000000 --- a/go/vt/orchestrator/external/raft/commands.go +++ /dev/null @@ -1,84 +0,0 @@ -package raft - -// AppendEntriesRequest is the command used to append entries to the -// replicated log. -type AppendEntriesRequest struct { - // Provide the current term and leader - Term uint64 - Leader []byte - - // Provide the previous entries for integrity checking - PrevLogEntry uint64 - PrevLogTerm uint64 - - // New entries to commit - Entries []*Log - - // Commit index on the leader - LeaderCommitIndex uint64 -} - -// AppendEntriesResponse is the response returned from an -// AppendEntriesRequest. -type AppendEntriesResponse struct { - // Newer term if leader is out of date - Term uint64 - - // Last Log is a hint to help accelerate rebuilding slow nodes - LastLog uint64 - - // We may not succeed if we have a conflicting entry - Success bool - - // There are scenarios where this request didn't succeed - // but there's no need to wait/back-off the next attempt. - NoRetryBackoff bool -} - -// RequestVoteRequest is the command used by a candidate to ask a Raft peer -// for a vote in an election. -type RequestVoteRequest struct { - // Provide the term and our id - Term uint64 - Candidate []byte - - // Used to ensure safety - LastLogIndex uint64 - LastLogTerm uint64 -} - -// RequestVoteResponse is the response returned from a RequestVoteRequest. -type RequestVoteResponse struct { - // Newer term if leader is out of date - Term uint64 - - // Return the peers, so that a node can shutdown on removal - Peers []byte - - // Is the vote granted - Granted bool -} - -// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its -// log (and state machine) from a snapshot on another peer. -type InstallSnapshotRequest struct { - Term uint64 - Leader []byte - - // These are the last index/term included in the snapshot - LastLogIndex uint64 - LastLogTerm uint64 - - // Peer Set in the snapshot - Peers []byte - - // Size of the snapshot - Size int64 -} - -// InstallSnapshotResponse is the response returned from an -// InstallSnapshotRequest. -type InstallSnapshotResponse struct { - Term uint64 - Success bool -} diff --git a/go/vt/orchestrator/external/raft/config.go b/go/vt/orchestrator/external/raft/config.go deleted file mode 100644 index 2dbd5e601b9..00000000000 --- a/go/vt/orchestrator/external/raft/config.go +++ /dev/null @@ -1,136 +0,0 @@ -package raft - -import ( - "fmt" - "io" - "log" - "time" -) - -// Config provides any necessary configuration to -// the Raft server -type Config struct { - // HeartbeatTimeout specifies the time in follower state without - // a leader before we attempt an election. - HeartbeatTimeout time.Duration - - // ElectionTimeout specifies the time in candidate state without - // a leader before we attempt an election. - ElectionTimeout time.Duration - - // CommitTimeout controls the time without an Apply() operation - // before we heartbeat to ensure a timely commit. Due to random - // staggering, may be delayed as much as 2x this value. - CommitTimeout time.Duration - - // MaxAppendEntries controls the maximum number of append entries - // to send at once. We want to strike a balance between efficiency - // and avoiding waste if the follower is going to reject because of - // an inconsistent log. - MaxAppendEntries int - - // If we are a member of a cluster, and RemovePeer is invoked for the - // local node, then we forget all peers and transition into the follower state. - // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, - // we can become a leader of a cluster containing only this node. - ShutdownOnRemove bool - - // DisableBootstrapAfterElect is used to turn off EnableSingleNode - // after the node is elected. This is used to prevent self-election - // if the node is removed from the Raft cluster via RemovePeer. Setting - // it to false will keep the bootstrap mode, allowing the node to self-elect - // and potentially bootstrap a separate cluster. - DisableBootstrapAfterElect bool - - // TrailingLogs controls how many logs we leave after a snapshot. This is - // used so that we can quickly replay logs on a follower instead of being - // forced to send an entire snapshot. - TrailingLogs uint64 - - // SnapshotInterval controls how often we check if we should perform a snapshot. - // We randomly stagger between this value and 2x this value to avoid the entire - // cluster from performing a snapshot at once. - SnapshotInterval time.Duration - - // SnapshotThreshold controls how many outstanding logs there must be before - // we perform a snapshot. This is to prevent excessive snapshots when we can - // just replay a small set of logs. - SnapshotThreshold uint64 - - // EnableSingleNode allows for a single node mode of operation. This - // is false by default, which prevents a lone node from electing itself. - // leader. - EnableSingleNode bool - - // LeaderLeaseTimeout is used to control how long the "lease" lasts - // for being the leader without being able to contact a quorum - // of nodes. If we reach this interval without contact, we will - // step down as leader. - LeaderLeaseTimeout time.Duration - - // StartAsLeader forces Raft to start in the leader state. This should - // never be used except for testing purposes, as it can cause a split-brain. - StartAsLeader bool - - // NotifyCh is used to provide a channel that will be notified of leadership - // changes. Raft will block writing to this channel, so it should either be - // buffered or aggressively consumed. - NotifyCh chan<- bool - - // LogOutput is used as a sink for logs, unless Logger is specified. - // Defaults to os.Stderr. - LogOutput io.Writer - - // Logger is a user-provided logger. If nil, a logger writing to LogOutput - // is used. - Logger *log.Logger -} - -// DefaultConfig returns a Config with usable defaults. -func DefaultConfig() *Config { - return &Config{ - HeartbeatTimeout: 1000 * time.Millisecond, - ElectionTimeout: 1000 * time.Millisecond, - CommitTimeout: 50 * time.Millisecond, - MaxAppendEntries: 64, - ShutdownOnRemove: true, - DisableBootstrapAfterElect: true, - TrailingLogs: 10240, - SnapshotInterval: 120 * time.Second, - SnapshotThreshold: 8192, - EnableSingleNode: false, - LeaderLeaseTimeout: 500 * time.Millisecond, - } -} - -// ValidateConfig is used to validate a sane configuration -func ValidateConfig(config *Config) error { - if config.HeartbeatTimeout < 5*time.Millisecond { - return fmt.Errorf("Heartbeat timeout is too low") - } - if config.ElectionTimeout < 5*time.Millisecond { - return fmt.Errorf("Election timeout is too low") - } - if config.CommitTimeout < time.Millisecond { - return fmt.Errorf("Commit timeout is too low") - } - if config.MaxAppendEntries <= 0 { - return fmt.Errorf("MaxAppendEntries must be positive") - } - if config.MaxAppendEntries > 1024 { - return fmt.Errorf("MaxAppendEntries is too large") - } - if config.SnapshotInterval < 5*time.Millisecond { - return fmt.Errorf("Snapshot interval is too low") - } - if config.LeaderLeaseTimeout < 5*time.Millisecond { - return fmt.Errorf("Leader lease timeout is too low") - } - if config.LeaderLeaseTimeout > config.HeartbeatTimeout { - return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") - } - if config.ElectionTimeout < config.HeartbeatTimeout { - return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") - } - return nil -} diff --git a/go/vt/orchestrator/external/raft/discard_snapshot.go b/go/vt/orchestrator/external/raft/discard_snapshot.go deleted file mode 100644 index 1b4611d559f..00000000000 --- a/go/vt/orchestrator/external/raft/discard_snapshot.go +++ /dev/null @@ -1,48 +0,0 @@ -package raft - -import ( - "fmt" - "io" -) - -// DiscardSnapshotStore is used to successfully snapshot while -// always discarding the snapshot. This is useful for when the -// log should be truncated but no snapshot should be retained. -// This should never be used for production use, and is only -// suitable for testing. -type DiscardSnapshotStore struct{} - -type DiscardSnapshotSink struct{} - -// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. -func NewDiscardSnapshotStore() *DiscardSnapshotStore { - return &DiscardSnapshotStore{} -} - -func (d *DiscardSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { - return &DiscardSnapshotSink{}, nil -} - -func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { - return nil, nil -} - -func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { - return nil, nil, fmt.Errorf("open is not supported") -} - -func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { - return len(b), nil -} - -func (d *DiscardSnapshotSink) Close() error { - return nil -} - -func (d *DiscardSnapshotSink) ID() string { - return "discard" -} - -func (d *DiscardSnapshotSink) Cancel() error { - return nil -} diff --git a/go/vt/orchestrator/external/raft/discard_snapshot_test.go b/go/vt/orchestrator/external/raft/discard_snapshot_test.go deleted file mode 100644 index 5abedfe2c6d..00000000000 --- a/go/vt/orchestrator/external/raft/discard_snapshot_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package raft - -import "testing" - -func TestDiscardSnapshotStoreImpl(t *testing.T) { - var impl interface{} = &DiscardSnapshotStore{} - if _, ok := impl.(SnapshotStore); !ok { - t.Fatalf("DiscardSnapshotStore not a SnapshotStore") - } -} - -func TestDiscardSnapshotSinkImpl(t *testing.T) { - var impl interface{} = &DiscardSnapshotSink{} - if _, ok := impl.(SnapshotSink); !ok { - t.Fatalf("DiscardSnapshotSink not a SnapshotSink") - } -} diff --git a/go/vt/orchestrator/external/raft/file_snapshot.go b/go/vt/orchestrator/external/raft/file_snapshot.go deleted file mode 100644 index aa6d7e83297..00000000000 --- a/go/vt/orchestrator/external/raft/file_snapshot.go +++ /dev/null @@ -1,479 +0,0 @@ -package raft - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "hash" - "hash/crc64" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - "time" -) - -const ( - testPath = "permTest" - snapPath = "snapshots" - metaFilePath = "meta.json" - stateFilePath = "state.bin" - tmpSuffix = ".tmp" -) - -// FileSnapshotStore implements the SnapshotStore interface and allows -// snapshots to be made on the local disk. -type FileSnapshotStore struct { - path string - retain int - logger *log.Logger -} - -type snapMetaSlice []*fileSnapshotMeta - -// FileSnapshotSink implements SnapshotSink with a file. -type FileSnapshotSink struct { - store *FileSnapshotStore - logger *log.Logger - dir string - meta fileSnapshotMeta - - stateFile *os.File - stateHash hash.Hash64 - buffered *bufio.Writer - - closed bool -} - -// fileSnapshotMeta is stored on disk. We also put a CRC -// on disk so that we can verify the snapshot. -type fileSnapshotMeta struct { - SnapshotMeta - CRC []byte -} - -// bufferedFile is returned when we open a snapshot. This way -// reads are buffered and the file still gets closed. -type bufferedFile struct { - bh *bufio.Reader - fh *os.File -} - -func (b *bufferedFile) Read(p []byte) (n int, err error) { - return b.bh.Read(p) -} - -func (b *bufferedFile) Close() error { - return b.fh.Close() -} - -// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based -// on a base directory. The `retain` parameter controls how many -// snapshots are retained. Must be at least 1. -func NewFileSnapshotStoreWithLogger(base string, retain int, logger *log.Logger) (*FileSnapshotStore, error) { - if retain < 1 { - return nil, fmt.Errorf("must retain at least one snapshot") - } - if logger == nil { - logger = log.New(os.Stderr, "", log.LstdFlags) - } - - // Ensure our path exists - path := filepath.Join(base, snapPath) - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { - return nil, fmt.Errorf("snapshot path not accessible: %v", err) - } - - // Setup the store - store := &FileSnapshotStore{ - path: path, - retain: retain, - logger: logger, - } - - // Do a permissions test - if err := store.testPermissions(); err != nil { - return nil, fmt.Errorf("permissions test failed: %v", err) - } - return store, nil -} - -// NewFileSnapshotStore creates a new FileSnapshotStore based -// on a base directory. The `retain` parameter controls how many -// snapshots are retained. Must be at least 1. -func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { - if logOutput == nil { - logOutput = os.Stderr - } - return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags)) -} - -// testPermissions tries to touch a file in our path to see if it works. -func (f *FileSnapshotStore) testPermissions() error { - path := filepath.Join(f.path, testPath) - fh, err := os.Create(path) - if err != nil { - return err - } - - if err = fh.Close(); err != nil { - return err - } - - if err = os.Remove(path); err != nil { - return err - } - return nil -} - -// snapshotName generates a name for the snapshot. -func snapshotName(term, index uint64) string { - now := time.Now() - msec := now.UnixNano() / int64(time.Millisecond) - return fmt.Sprintf("%d-%d-%d", term, index, msec) -} - -// Create is used to start a new snapshot -func (f *FileSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { - // Create a new path - name := snapshotName(term, index) - path := filepath.Join(f.path, name+tmpSuffix) - f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path) - - // Make the directory - if err := os.MkdirAll(path, 0755); err != nil { - f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err) - return nil, err - } - - // Create the sink - sink := &FileSnapshotSink{ - store: f, - logger: f.logger, - dir: path, - meta: fileSnapshotMeta{ - SnapshotMeta: SnapshotMeta{ - ID: name, - Index: index, - Term: term, - Peers: peers, - }, - CRC: nil, - }, - } - - // Write out the meta data - if err := sink.writeMeta(); err != nil { - f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) - return nil, err - } - - // Open the state file - statePath := filepath.Join(path, stateFilePath) - fh, err := os.Create(statePath) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err) - return nil, err - } - sink.stateFile = fh - - // Create a CRC64 hash - sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) - - // Wrap both the hash and file in a MultiWriter with buffering - multi := io.MultiWriter(sink.stateFile, sink.stateHash) - sink.buffered = bufio.NewWriter(multi) - - // Done - return sink, nil -} - -// List returns available snapshots in the store. -func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { - // Get the eligible snapshots - snapshots, err := f.getSnapshots() - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) - return nil, err - } - - var snapMeta []*SnapshotMeta - for _, meta := range snapshots { - snapMeta = append(snapMeta, &meta.SnapshotMeta) - if len(snapMeta) == f.retain { - break - } - } - return snapMeta, nil -} - -// getSnapshots returns all the known snapshots. -func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { - // Get the eligible snapshots - snapshots, err := ioutil.ReadDir(f.path) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err) - return nil, err - } - - // Populate the metadata - var snapMeta []*fileSnapshotMeta - for _, snap := range snapshots { - // Ignore any files - if !snap.IsDir() { - continue - } - - // Ignore any temporary snapshots - dirName := snap.Name() - if strings.HasSuffix(dirName, tmpSuffix) { - f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName) - continue - } - - // Try to read the meta data - meta, err := f.readMeta(dirName) - if err != nil { - f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err) - continue - } - - // Append, but only return up to the retain count - snapMeta = append(snapMeta, meta) - } - - // Sort the snapshot, reverse so we get new -> old - sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) - - return snapMeta, nil -} - -// readMeta is used to read the meta data for a given named backup -func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { - // Open the meta file - metaPath := filepath.Join(f.path, name, metaFilePath) - fh, err := os.Open(metaPath) - if err != nil { - return nil, err - } - defer fh.Close() - - // Buffer the file IO - buffered := bufio.NewReader(fh) - - // Read in the JSON - meta := &fileSnapshotMeta{} - dec := json.NewDecoder(buffered) - if err := dec.Decode(meta); err != nil { - return nil, err - } - return meta, nil -} - -// Open takes a snapshot ID and returns a ReadCloser for that snapshot. -func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { - // Get the metadata - meta, err := f.readMeta(id) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err) - return nil, nil, err - } - - // Open the state file - statePath := filepath.Join(f.path, id, stateFilePath) - fh, err := os.Open(statePath) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err) - return nil, nil, err - } - - // Create a CRC64 hash - stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) - - // Compute the hash - _, err = io.Copy(stateHash, fh) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err) - fh.Close() - return nil, nil, err - } - - // Verify the hash - computed := stateHash.Sum(nil) - if !bytes.Equal(meta.CRC, computed) { - f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)", - meta.CRC, computed) - fh.Close() - return nil, nil, fmt.Errorf("CRC mismatch") - } - - // Seek to the start - if _, err := fh.Seek(0, 0); err != nil { - f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err) - fh.Close() - return nil, nil, err - } - - // Return a buffered file - buffered := &bufferedFile{ - bh: bufio.NewReader(fh), - fh: fh, - } - - return &meta.SnapshotMeta, buffered, nil -} - -// ReapSnapshots reaps any snapshots beyond the retain count. -func (f *FileSnapshotStore) ReapSnapshots() error { - snapshots, err := f.getSnapshots() - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) - return err - } - - for i := f.retain; i < len(snapshots); i++ { - path := filepath.Join(f.path, snapshots[i].ID) - f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) - if err := os.RemoveAll(path); err != nil { - f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) - return err - } - } - return nil -} - -// ID returns the ID of the snapshot, can be used with Open() -// after the snapshot is finalized. -func (s *FileSnapshotSink) ID() string { - return s.meta.ID -} - -// Write is used to append to the state file. We write to the -// buffered IO object to reduce the amount of context switches. -func (s *FileSnapshotSink) Write(b []byte) (int, error) { - return s.buffered.Write(b) -} - -// Close is used to indicate a successful end. -func (s *FileSnapshotSink) Close() error { - // Make sure close is idempotent - if s.closed { - return nil - } - s.closed = true - - // Close the open handles - if err := s.finalize(); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) - return err - } - - // Write out the meta data - if err := s.writeMeta(); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) - return err - } - - // Move the directory into place - newPath := strings.TrimSuffix(s.dir, tmpSuffix) - if err := os.Rename(s.dir, newPath); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) - return err - } - - // Reap any old snapshots - if err := s.store.ReapSnapshots(); err != nil { - return err - } - - return nil -} - -// Cancel is used to indicate an unsuccessful end. -func (s *FileSnapshotSink) Cancel() error { - // Make sure close is idempotent - if s.closed { - return nil - } - s.closed = true - - // Close the open handles - if err := s.finalize(); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) - return err - } - - // Attempt to remove all artifacts - return os.RemoveAll(s.dir) -} - -// finalize is used to close all of our resources. -func (s *FileSnapshotSink) finalize() error { - // Flush any remaining data - if err := s.buffered.Flush(); err != nil { - return err - } - - // Get the file size - stat, statErr := s.stateFile.Stat() - - // Close the file - if err := s.stateFile.Close(); err != nil { - return err - } - - // Set the file size, check after we close - if statErr != nil { - return statErr - } - s.meta.Size = stat.Size() - - // Set the CRC - s.meta.CRC = s.stateHash.Sum(nil) - return nil -} - -// writeMeta is used to write out the metadata we have. -func (s *FileSnapshotSink) writeMeta() error { - // Open the meta file - metaPath := filepath.Join(s.dir, metaFilePath) - fh, err := os.Create(metaPath) - if err != nil { - return err - } - defer fh.Close() - - // Buffer the file IO - buffered := bufio.NewWriter(fh) - defer buffered.Flush() - - // Write out as JSON - enc := json.NewEncoder(buffered) - if err := enc.Encode(&s.meta); err != nil { - return err - } - return nil -} - -// Implement the sort interface for []*fileSnapshotMeta. -func (s snapMetaSlice) Len() int { - return len(s) -} - -func (s snapMetaSlice) Less(i, j int) bool { - if s[i].Term != s[j].Term { - return s[i].Term < s[j].Term - } - if s[i].Index != s[j].Index { - return s[i].Index < s[j].Index - } - return s[i].ID < s[j].ID -} - -func (s snapMetaSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} diff --git a/go/vt/orchestrator/external/raft/fsm.go b/go/vt/orchestrator/external/raft/fsm.go deleted file mode 100644 index ae52e9a7c18..00000000000 --- a/go/vt/orchestrator/external/raft/fsm.go +++ /dev/null @@ -1,40 +0,0 @@ -package raft - -import ( - "io" -) - -// FSM provides an interface that can be implemented by -// clients to make use of the replicated log. -type FSM interface { - // Apply log is invoked once a log entry is committed. - // It returns a value which will be made available in the - // ApplyFuture returned by Raft.Apply method if that - // method was called on the same Raft node as the FSM. - Apply(*Log) interface{} - - // Snapshot is used to support log compaction. This call should - // return an FSMSnapshot which can be used to save a point-in-time - // snapshot of the FSM. Apply and Snapshot are not called in multiple - // threads, but Apply will be called concurrently with Persist. This means - // the FSM should be implemented in a fashion that allows for concurrent - // updates while a snapshot is happening. - Snapshot() (FSMSnapshot, error) - - // Restore is used to restore an FSM from a snapshot. It is not called - // concurrently with any other command. The FSM must discard all previous - // state. - Restore(io.ReadCloser) error -} - -// FSMSnapshot is returned by an FSM in response to a Snapshot -// It must be safe to invoke FSMSnapshot methods with concurrent -// calls to Apply. -type FSMSnapshot interface { - // Persist should dump all necessary state to the WriteCloser 'sink', - // and call sink.Close() when finished or call sink.Cancel() on error. - Persist(sink SnapshotSink) error - - // Release is invoked when we are finished with the snapshot. - Release() -} diff --git a/go/vt/orchestrator/external/raft/future.go b/go/vt/orchestrator/external/raft/future.go deleted file mode 100644 index b80f9090fa8..00000000000 --- a/go/vt/orchestrator/external/raft/future.go +++ /dev/null @@ -1,203 +0,0 @@ -package raft - -import ( - "sync" - "time" -) - -// Future is used to represent an action that may occur in the future. -type Future interface { - // Error blocks until the future arrives and then - // returns the error status of the future. - // This may be called any number of times - all - // calls will return the same value. - // Note that it is not OK to call this method - // twice concurrently on the same Future instance. - Error() error -} - -// ApplyFuture is used for Apply() and can returns the FSM response. -type ApplyFuture interface { - Future - - // Response returns the FSM response as returned - // by the FSM.Apply method. This must not be called - // until after the Error method has returned. - Response() interface{} - - // Index holds the index of the newly applied log entry. - // This must not be called - // until after the Error method has returned. - Index() uint64 -} - -// errorFuture is used to return a static error. -type errorFuture struct { - err error -} - -func (e errorFuture) Error() error { - return e.err -} - -func (e errorFuture) Response() interface{} { - return nil -} - -func (e errorFuture) Index() uint64 { - return 0 -} - -// deferError can be embedded to allow a future -// to provide an error in the future. -type deferError struct { - err error - errCh chan error - responded bool -} - -func (d *deferError) init() { - d.errCh = make(chan error, 1) -} - -func (d *deferError) Error() error { - if d.err != nil { - // Note that when we've received a nil error, this - // won't trigger, but the channel is closed after - // send so we'll still return nil below. - return d.err - } - if d.errCh == nil { - panic("waiting for response on nil channel") - } - d.err = <-d.errCh - return d.err -} - -func (d *deferError) respond(err error) { - if d.errCh == nil { - return - } - if d.responded { - return - } - d.errCh <- err - close(d.errCh) - d.responded = true -} - -// logFuture is used to apply a log entry and waits until -// the log is considered committed. -type logFuture struct { - deferError - log Log - policy quorumPolicy - response interface{} - dispatch time.Time -} - -func (l *logFuture) Response() interface{} { - return l.response -} - -func (l *logFuture) Index() uint64 { - return l.log.Index -} - -type peerFuture struct { - deferError - peers []string -} - -type shutdownFuture struct { - raft *Raft -} - -func (s *shutdownFuture) Error() error { - if s.raft == nil { - return nil - } - s.raft.waitShutdown() - if closeable, ok := s.raft.trans.(WithClose); ok { - closeable.Close() - } - return nil -} - -// snapshotFuture is used for waiting on a snapshot to complete. -type snapshotFuture struct { - deferError -} - -// reqSnapshotFuture is used for requesting a snapshot start. -// It is only used internally. -type reqSnapshotFuture struct { - deferError - - // snapshot details provided by the FSM runner before responding - index uint64 - term uint64 - peers []string - snapshot FSMSnapshot -} - -// restoreFuture is used for requesting an FSM to perform a -// snapshot restore. Used internally only. -type restoreFuture struct { - deferError - ID string -} - -// verifyFuture is used to verify the current node is still -// the leader. This is to prevent a stale read. -type verifyFuture struct { - deferError - notifyCh chan *verifyFuture - quorumSize int - votes int - voteLock sync.Mutex -} - -// vote is used to respond to a verifyFuture. -// This may block when responding on the notifyCh. -func (v *verifyFuture) vote(leader bool) { - v.voteLock.Lock() - defer v.voteLock.Unlock() - - // Guard against having notified already - if v.notifyCh == nil { - return - } - - if leader { - v.votes++ - if v.votes >= v.quorumSize { - v.notifyCh <- v - v.notifyCh = nil - } - } else { - v.notifyCh <- v - v.notifyCh = nil - } -} - -// appendFuture is used for waiting on a pipelined append -// entries RPC. -type appendFuture struct { - deferError - start time.Time - args *AppendEntriesRequest - resp *AppendEntriesResponse -} - -func (a *appendFuture) Start() time.Time { - return a.start -} - -func (a *appendFuture) Request() *AppendEntriesRequest { - return a.args -} - -func (a *appendFuture) Response() *AppendEntriesResponse { - return a.resp -} diff --git a/go/vt/orchestrator/external/raft/future_test.go b/go/vt/orchestrator/external/raft/future_test.go deleted file mode 100644 index 8bb958329fb..00000000000 --- a/go/vt/orchestrator/external/raft/future_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package raft - -import ( - "errors" - "testing" -) - -func TestDeferFutureSuccess(t *testing.T) { - var f deferError - f.init() - f.respond(nil) - if err := f.Error(); err != nil { - t.Fatalf("unexpected error result; got %#v want nil", err) - } - if err := f.Error(); err != nil { - t.Fatalf("unexpected error result; got %#v want nil", err) - } -} - -func TestDeferFutureError(t *testing.T) { - want := errors.New("x") - var f deferError - f.init() - f.respond(want) - if got := f.Error(); got != want { - t.Fatalf("unexpected error result; got %#v want %#v", got, want) - } - if got := f.Error(); got != want { - t.Fatalf("unexpected error result; got %#v want %#v", got, want) - } -} - -func TestDeferFutureConcurrent(t *testing.T) { - // Food for the race detector. - want := errors.New("x") - var f deferError - f.init() - go f.respond(want) - if got := f.Error(); got != want { - t.Errorf("unexpected error result; got %#v want %#v", got, want) - } -} diff --git a/go/vt/orchestrator/external/raft/inflight.go b/go/vt/orchestrator/external/raft/inflight.go deleted file mode 100644 index 7014ff50394..00000000000 --- a/go/vt/orchestrator/external/raft/inflight.go +++ /dev/null @@ -1,213 +0,0 @@ -package raft - -import ( - "container/list" - "sync" -) - -// QuorumPolicy allows individual logFutures to have different -// commitment rules while still using the inflight mechanism. -type quorumPolicy interface { - // Checks if a commit from a given peer is enough to - // satisfy the commitment rules - Commit() bool - - // Checks if a commit is committed - IsCommitted() bool -} - -// MajorityQuorum is used by Apply transactions and requires -// a simple majority of nodes. -type majorityQuorum struct { - count int - votesNeeded int -} - -func newMajorityQuorum(clusterSize int) *majorityQuorum { - votesNeeded := (clusterSize / 2) + 1 - return &majorityQuorum{count: 0, votesNeeded: votesNeeded} -} - -func (m *majorityQuorum) Commit() bool { - m.count++ - return m.count >= m.votesNeeded -} - -func (m *majorityQuorum) IsCommitted() bool { - return m.count >= m.votesNeeded -} - -// Inflight is used to track operations that are still in-flight. -type inflight struct { - sync.Mutex - committed *list.List - commitCh chan struct{} - minCommit uint64 - maxCommit uint64 - operations map[uint64]*logFuture - stopCh chan struct{} -} - -// NewInflight returns an inflight struct that notifies -// the provided channel when logs are finished committing. -func newInflight(commitCh chan struct{}) *inflight { - return &inflight{ - committed: list.New(), - commitCh: commitCh, - minCommit: 0, - maxCommit: 0, - operations: make(map[uint64]*logFuture), - stopCh: make(chan struct{}), - } -} - -// Start is used to mark a logFuture as being inflight. It -// also commits the entry, as it is assumed the leader is -// starting. -func (i *inflight) Start(l *logFuture) { - i.Lock() - defer i.Unlock() - i.start(l) -} - -// StartAll is used to mark a list of logFuture's as being -// inflight. It also commits each entry as the leader is -// assumed to be starting. -func (i *inflight) StartAll(logs []*logFuture) { - i.Lock() - defer i.Unlock() - for _, l := range logs { - i.start(l) - } -} - -// start is used to mark a single entry as inflight, -// must be invoked with the lock held. -func (i *inflight) start(l *logFuture) { - idx := l.log.Index - i.operations[idx] = l - - if idx > i.maxCommit { - i.maxCommit = idx - } - if i.minCommit == 0 { - i.minCommit = idx - } - i.commit(idx) -} - -// Cancel is used to cancel all in-flight operations. -// This is done when the leader steps down, and all futures -// are sent the given error. -func (i *inflight) Cancel(err error) { - // Close the channel first to unblock any pending commits - close(i.stopCh) - - // Lock after close to avoid deadlock - i.Lock() - defer i.Unlock() - - // Respond to all inflight operations - for _, op := range i.operations { - op.respond(err) - } - - // Clear all the committed but not processed - for e := i.committed.Front(); e != nil; e = e.Next() { - e.Value.(*logFuture).respond(err) - } - - // Clear the map - i.operations = make(map[uint64]*logFuture) - - // Clear the list of committed - i.committed = list.New() - - // Close the commmitCh - close(i.commitCh) - - // Reset indexes - i.minCommit = 0 - i.maxCommit = 0 -} - -// Committed returns all the committed operations in order. -func (i *inflight) Committed() (l *list.List) { - i.Lock() - l, i.committed = i.committed, list.New() - i.Unlock() - return l -} - -// Commit is used by leader replication routines to indicate that -// a follower was finished committing a log to disk. -func (i *inflight) Commit(index uint64) { - i.Lock() - defer i.Unlock() - i.commit(index) -} - -// CommitRange is used to commit a range of indexes inclusively. -// It is optimized to avoid commits for indexes that are not tracked. -func (i *inflight) CommitRange(minIndex, maxIndex uint64) { - i.Lock() - defer i.Unlock() - - // Update the minimum index - minIndex = max(i.minCommit, minIndex) - - // Commit each index - for idx := minIndex; idx <= maxIndex; idx++ { - i.commit(idx) - } -} - -// commit is used to commit a single index. Must be called with the lock held. -func (i *inflight) commit(index uint64) { - op, ok := i.operations[index] - if !ok { - // Ignore if not in the map, as it may be committed already - return - } - - // Check if we've satisfied the commit - if !op.policy.Commit() { - return - } - - // Cannot commit if this is not the minimum inflight. This can happen - // if the quorum size changes, meaning a previous commit requires a larger - // quorum that this commit. We MUST block until the previous log is committed, - // otherwise logs will be applied out of order. - if index != i.minCommit { - return - } - -NOTIFY: - // Add the operation to the committed list - i.committed.PushBack(op) - - // Stop tracking since it is committed - delete(i.operations, index) - - // Update the indexes - if index == i.maxCommit { - i.minCommit = 0 - i.maxCommit = 0 - - } else { - i.minCommit++ - } - - // Check if the next in-flight operation is ready - if i.minCommit != 0 { - op = i.operations[i.minCommit] - if op.policy.IsCommitted() { - index = i.minCommit - goto NOTIFY - } - } - - // Async notify of ready operations - asyncNotifyCh(i.commitCh) -} diff --git a/go/vt/orchestrator/external/raft/inflight_test.go b/go/vt/orchestrator/external/raft/inflight_test.go deleted file mode 100644 index a9f57d6ead7..00000000000 --- a/go/vt/orchestrator/external/raft/inflight_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package raft - -import ( - "fmt" - "testing" -) - -func TestInflight_StartCommit(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a transaction as being in flight - l := &logFuture{log: Log{Index: 1}} - l.policy = newMajorityQuorum(5) - in.Start(l) - - // Commit 3 times - in.Commit(1) - if in.Committed().Len() != 0 { - t.Fatalf("should not be commited") - } - - in.Commit(1) - if in.Committed().Len() != 1 { - t.Fatalf("should be commited") - } - - // Already committed but should work anyways - in.Commit(1) -} - -func TestInflight_Cancel(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a transaction as being in flight - l := &logFuture{ - log: Log{Index: 1}, - } - l.init() - l.policy = newMajorityQuorum(3) - in.Start(l) - - // Cancel with an error - err := fmt.Errorf("error 1") - in.Cancel(err) - - // Should get an error return - if l.Error() != err { - t.Fatalf("expected error") - } -} - -func TestInflight_StartAll(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a few transaction as being in flight - l1 := &logFuture{log: Log{Index: 2}} - l1.policy = newMajorityQuorum(5) - l2 := &logFuture{log: Log{Index: 3}} - l2.policy = newMajorityQuorum(5) - l3 := &logFuture{log: Log{Index: 4}} - l3.policy = newMajorityQuorum(5) - - // Start all the entries - in.StartAll([]*logFuture{l1, l2, l3}) - - // Commit ranges - in.CommitRange(1, 5) - in.CommitRange(1, 4) - in.CommitRange(1, 10) - - // Should get 3 back - if in.Committed().Len() != 3 { - t.Fatalf("expected all 3 to commit") - } -} - -func TestInflight_CommitRange(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a few transaction as being in flight - l1 := &logFuture{log: Log{Index: 2}} - l1.policy = newMajorityQuorum(5) - in.Start(l1) - - l2 := &logFuture{log: Log{Index: 3}} - l2.policy = newMajorityQuorum(5) - in.Start(l2) - - l3 := &logFuture{log: Log{Index: 4}} - l3.policy = newMajorityQuorum(5) - in.Start(l3) - - // Commit ranges - in.CommitRange(1, 5) - in.CommitRange(1, 4) - in.CommitRange(1, 10) - - // Should get 3 back - if in.Committed().Len() != 3 { - t.Fatalf("expected all 3 to commit") - } -} - -// Should panic if we commit non contiguously! -func TestInflight_NonContiguous(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a few transaction as being in flight - l1 := &logFuture{log: Log{Index: 2}} - l1.policy = newMajorityQuorum(5) - in.Start(l1) - - l2 := &logFuture{log: Log{Index: 3}} - l2.policy = newMajorityQuorum(5) - in.Start(l2) - - in.Commit(3) - in.Commit(3) - in.Commit(3) // panic! - - if in.Committed().Len() != 0 { - t.Fatalf("should not commit") - } - - in.Commit(2) - in.Commit(2) - in.Commit(2) // panic! - - committed := in.Committed() - if committed.Len() != 2 { - t.Fatalf("should commit both") - } - - current := committed.Front() - l := current.Value.(*logFuture) - if l.log.Index != 2 { - t.Fatalf("bad: %v", *l) - } - - current = current.Next() - l = current.Value.(*logFuture) - if l.log.Index != 3 { - t.Fatalf("bad: %v", *l) - } -} diff --git a/go/vt/orchestrator/external/raft/inmem_store.go b/go/vt/orchestrator/external/raft/inmem_store.go deleted file mode 100644 index 6e4dfd020f7..00000000000 --- a/go/vt/orchestrator/external/raft/inmem_store.go +++ /dev/null @@ -1,116 +0,0 @@ -package raft - -import ( - "sync" -) - -// InmemStore implements the LogStore and StableStore interface. -// It should NOT EVER be used for production. It is used only for -// unit tests. Use the MDBStore implementation instead. -type InmemStore struct { - l sync.RWMutex - lowIndex uint64 - highIndex uint64 - logs map[uint64]*Log - kv map[string][]byte - kvInt map[string]uint64 -} - -// NewInmemStore returns a new in-memory backend. Do not ever -// use for production. Only for testing. -func NewInmemStore() *InmemStore { - i := &InmemStore{ - logs: make(map[uint64]*Log), - kv: make(map[string][]byte), - kvInt: make(map[string]uint64), - } - return i -} - -// FirstIndex implements the LogStore interface. -func (i *InmemStore) FirstIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.lowIndex, nil -} - -// LastIndex implements the LogStore interface. -func (i *InmemStore) LastIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.highIndex, nil -} - -// GetLog implements the LogStore interface. -func (i *InmemStore) GetLog(index uint64, log *Log) error { - i.l.RLock() - defer i.l.RUnlock() - l, ok := i.logs[index] - if !ok { - return ErrLogNotFound - } - *log = *l - return nil -} - -// StoreLog implements the LogStore interface. -func (i *InmemStore) StoreLog(log *Log) error { - return i.StoreLogs([]*Log{log}) -} - -// StoreLogs implements the LogStore interface. -func (i *InmemStore) StoreLogs(logs []*Log) error { - i.l.Lock() - defer i.l.Unlock() - for _, l := range logs { - i.logs[l.Index] = l - if i.lowIndex == 0 { - i.lowIndex = l.Index - } - if l.Index > i.highIndex { - i.highIndex = l.Index - } - } - return nil -} - -// DeleteRange implements the LogStore interface. -func (i *InmemStore) DeleteRange(min, max uint64) error { - i.l.Lock() - defer i.l.Unlock() - for j := min; j <= max; j++ { - delete(i.logs, j) - } - i.lowIndex = max + 1 - return nil -} - -// Set implements the StableStore interface. -func (i *InmemStore) Set(key []byte, val []byte) error { - i.l.Lock() - defer i.l.Unlock() - i.kv[string(key)] = val - return nil -} - -// Get implements the StableStore interface. -func (i *InmemStore) Get(key []byte) ([]byte, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kv[string(key)], nil -} - -// SetUint64 implements the StableStore interface. -func (i *InmemStore) SetUint64(key []byte, val uint64) error { - i.l.Lock() - defer i.l.Unlock() - i.kvInt[string(key)] = val - return nil -} - -// GetUint64 implements the StableStore interface. -func (i *InmemStore) GetUint64(key []byte) (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kvInt[string(key)], nil -} diff --git a/go/vt/orchestrator/external/raft/inmem_transport.go b/go/vt/orchestrator/external/raft/inmem_transport.go deleted file mode 100644 index 2d5f3190690..00000000000 --- a/go/vt/orchestrator/external/raft/inmem_transport.go +++ /dev/null @@ -1,324 +0,0 @@ -package raft - -import ( - "fmt" - "io" - "sync" - "time" -) - -// NewInmemAddr returns a new in-memory addr with -// a randomly generate UUID as the ID. -func NewInmemAddr() string { - return generateUUID() -} - -// inmemPipeline is used to pipeline requests for the in-mem transport. -type inmemPipeline struct { - trans *InmemTransport - peer *InmemTransport - peerAddr string - - doneCh chan AppendFuture - inprogressCh chan *inmemPipelineInflight - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex -} - -type inmemPipelineInflight struct { - future *appendFuture - respCh <-chan RPCResponse -} - -// InmemTransport Implements the Transport interface, to allow Raft to be -// tested in-memory without going over a network. -type InmemTransport struct { - sync.RWMutex - consumerCh chan RPC - localAddr string - peers map[string]*InmemTransport - pipelines []*inmemPipeline - timeout time.Duration -} - -// NewInmemTransport is used to initialize a new transport -// and generates a random local address if none is specified -func NewInmemTransport(addr string) (string, *InmemTransport) { - if addr == "" { - addr = NewInmemAddr() - } - trans := &InmemTransport{ - consumerCh: make(chan RPC, 16), - localAddr: addr, - peers: make(map[string]*InmemTransport), - timeout: 50 * time.Millisecond, - } - return addr, trans -} - -// SetHeartbeatHandler is used to set optional fast-path for -// heartbeats, not supported for this transport. -func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { -} - -// Consumer implements the Transport interface. -func (i *InmemTransport) Consumer() <-chan RPC { - return i.consumerCh -} - -// LocalAddr implements the Transport interface. -func (i *InmemTransport) LocalAddr() string { - return i.localAddr -} - -// AppendEntriesPipeline returns an interface that can be used to pipeline -// AppendEntries requests. -func (i *InmemTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { - i.RLock() - peer, ok := i.peers[target] - i.RUnlock() - if !ok { - return nil, fmt.Errorf("failed to connect to peer: %v", target) - } - pipeline := newInmemPipeline(i, peer, target) - i.Lock() - i.pipelines = append(i.pipelines, pipeline) - i.Unlock() - return pipeline, nil -} - -// AppendEntries implements the Transport interface. -func (i *InmemTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { - rpcResp, err := i.makeRPC(target, args, nil, i.timeout) - if err != nil { - return err - } - - // Copy the result back - out := rpcResp.Response.(*AppendEntriesResponse) - *resp = *out - return nil -} - -// RequestVote implements the Transport interface. -func (i *InmemTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { - rpcResp, err := i.makeRPC(target, args, nil, i.timeout) - if err != nil { - return err - } - - // Copy the result back - out := rpcResp.Response.(*RequestVoteResponse) - *resp = *out - return nil -} - -// InstallSnapshot implements the Transport interface. -func (i *InmemTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { - rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) - if err != nil { - return err - } - - // Copy the result back - out := rpcResp.Response.(*InstallSnapshotResponse) - *resp = *out - return nil -} - -func (i *InmemTransport) makeRPC(target string, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { - i.RLock() - peer, ok := i.peers[target] - i.RUnlock() - - if !ok { - err = fmt.Errorf("failed to connect to peer: %v", target) - return - } - - // Send the RPC over - respCh := make(chan RPCResponse) - peer.consumerCh <- RPC{ - Command: args, - Reader: r, - RespChan: respCh, - } - - // Wait for a response - select { - case rpcResp = <-respCh: - if rpcResp.Error != nil { - err = rpcResp.Error - } - case <-time.After(timeout): - err = fmt.Errorf("command timed out") - } - return -} - -// EncodePeer implements the Transport interface. It uses the UUID as the -// address directly. -func (i *InmemTransport) EncodePeer(p string) []byte { - return []byte(p) -} - -// DecodePeer implements the Transport interface. It wraps the UUID in an -// InmemAddr. -func (i *InmemTransport) DecodePeer(buf []byte) string { - return string(buf) -} - -// Connect is used to connect this transport to another transport for -// a given peer name. This allows for local routing. -func (i *InmemTransport) Connect(peer string, t Transport) { - trans := t.(*InmemTransport) - i.Lock() - defer i.Unlock() - i.peers[peer] = trans -} - -// Disconnect is used to remove the ability to route to a given peer. -func (i *InmemTransport) Disconnect(peer string) { - i.Lock() - defer i.Unlock() - delete(i.peers, peer) - - // Disconnect any pipelines - n := len(i.pipelines) - for idx := 0; idx < n; idx++ { - if i.pipelines[idx].peerAddr == peer { - i.pipelines[idx].Close() - i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil - idx-- - n-- - } - } - i.pipelines = i.pipelines[:n] -} - -// DisconnectAll is used to remove all routes to peers. -func (i *InmemTransport) DisconnectAll() { - i.Lock() - defer i.Unlock() - i.peers = make(map[string]*InmemTransport) - - // Handle pipelines - for _, pipeline := range i.pipelines { - pipeline.Close() - } - i.pipelines = nil -} - -// Close is used to permanently disable the transport -func (i *InmemTransport) Close() error { - i.DisconnectAll() - return nil -} - -func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr string) *inmemPipeline { - i := &inmemPipeline{ - trans: trans, - peer: peer, - peerAddr: addr, - doneCh: make(chan AppendFuture, 16), - inprogressCh: make(chan *inmemPipelineInflight, 16), - shutdownCh: make(chan struct{}), - } - go i.decodeResponses() - return i -} - -func (i *inmemPipeline) decodeResponses() { - timeout := i.trans.timeout - for { - select { - case inp := <-i.inprogressCh: - var timeoutCh <-chan time.Time - if timeout > 0 { - timeoutCh = time.After(timeout) - } - - select { - case rpcResp := <-inp.respCh: - // Copy the result back - *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) - inp.future.respond(rpcResp.Error) - - select { - case i.doneCh <- inp.future: - case <-i.shutdownCh: - return - } - - case <-timeoutCh: - inp.future.respond(fmt.Errorf("command timed out")) - select { - case i.doneCh <- inp.future: - case <-i.shutdownCh: - return - } - - case <-i.shutdownCh: - return - } - case <-i.shutdownCh: - return - } - } -} - -func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { - // Create a new future - future := &appendFuture{ - start: time.Now(), - args: args, - resp: resp, - } - future.init() - - // Handle a timeout - var timeout <-chan time.Time - if i.trans.timeout > 0 { - timeout = time.After(i.trans.timeout) - } - - // Send the RPC over - respCh := make(chan RPCResponse, 1) - rpc := RPC{ - Command: args, - RespChan: respCh, - } - select { - case i.peer.consumerCh <- rpc: - case <-timeout: - return nil, fmt.Errorf("command enqueue timeout") - case <-i.shutdownCh: - return nil, ErrPipelineShutdown - } - - // Send to be decoded - select { - case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: - return future, nil - case <-i.shutdownCh: - return nil, ErrPipelineShutdown - } -} - -func (i *inmemPipeline) Consumer() <-chan AppendFuture { - return i.doneCh -} - -func (i *inmemPipeline) Close() error { - i.shutdownLock.Lock() - defer i.shutdownLock.Unlock() - if i.shutdown { - return nil - } - - i.shutdown = true - close(i.shutdownCh) - return nil -} diff --git a/go/vt/orchestrator/external/raft/inmem_transport_test.go b/go/vt/orchestrator/external/raft/inmem_transport_test.go deleted file mode 100644 index 82c95348a58..00000000000 --- a/go/vt/orchestrator/external/raft/inmem_transport_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package raft - -import ( - "testing" -) - -func TestInmemTransportImpl(t *testing.T) { - var inm interface{} = &InmemTransport{} - if _, ok := inm.(Transport); !ok { - t.Fatalf("InmemTransport is not a Transport") - } - if _, ok := inm.(LoopbackTransport); !ok { - t.Fatalf("InmemTransport is not a Loopback Transport") - } - if _, ok := inm.(WithPeers); !ok { - t.Fatalf("InmemTransport is not a WithPeers Transport") - } -} diff --git a/go/vt/orchestrator/external/raft/log.go b/go/vt/orchestrator/external/raft/log.go deleted file mode 100644 index 9399154ab44..00000000000 --- a/go/vt/orchestrator/external/raft/log.go +++ /dev/null @@ -1,67 +0,0 @@ -package raft - -// LogType describes various types of log entries. -type LogType uint8 - -const ( - // LogCommand is applied to a user FSM. - LogCommand LogType = iota - - // LogNoop is used to assert leadership. - LogNoop - - // LogAddPeer is used to add a new peer. - LogAddPeer - - // LogRemovePeer is used to remove an existing peer. - LogRemovePeer - - // LogBarrier is used to ensure all preceding operations have been - // applied to the FSM. It is similar to LogNoop, but instead of returning - // once committed, it only returns once the FSM manager acks it. Otherwise - // it is possible there are operations committed but not yet applied to - // the FSM. - LogBarrier -) - -// Log entries are replicated to all members of the Raft cluster -// and form the heart of the replicated state machine. -type Log struct { - // Index holds the index of the log entry. - Index uint64 - - // Term holds the election term of the log entry. - Term uint64 - - // Type holds the type of the log entry. - Type LogType - - // Data holds the log entry's type-specific data. - Data []byte - - // peer is not exported since it is not transmitted, only used - // internally to construct the Data field. - peer string -} - -// LogStore is used to provide an interface for storing -// and retrieving logs in a durable fashion. -type LogStore interface { - // FirstIndex returns the first index written. 0 for no entries. - FirstIndex() (uint64, error) - - // LastIndex returns the last index written. 0 for no entries. - LastIndex() (uint64, error) - - // GetLog gets a log entry at a given index. - GetLog(index uint64, log *Log) error - - // StoreLog stores a log entry. - StoreLog(log *Log) error - - // StoreLogs stores multiple log entries. - StoreLogs(logs []*Log) error - - // DeleteRange deletes a range of log entries. The range is inclusive. - DeleteRange(min, max uint64) error -} diff --git a/go/vt/orchestrator/external/raft/log_cache.go b/go/vt/orchestrator/external/raft/log_cache.go deleted file mode 100644 index 952e98c2282..00000000000 --- a/go/vt/orchestrator/external/raft/log_cache.go +++ /dev/null @@ -1,79 +0,0 @@ -package raft - -import ( - "fmt" - "sync" -) - -// LogCache wraps any LogStore implementation to provide an -// in-memory ring buffer. This is used to cache access to -// the recently written entries. For implementations that do not -// cache themselves, this can provide a substantial boost by -// avoiding disk I/O on recent entries. -type LogCache struct { - store LogStore - - cache []*Log - l sync.RWMutex -} - -// NewLogCache is used to create a new LogCache with the -// given capacity and backend store. -func NewLogCache(capacity int, store LogStore) (*LogCache, error) { - if capacity <= 0 { - return nil, fmt.Errorf("capacity must be positive") - } - c := &LogCache{ - store: store, - cache: make([]*Log, capacity), - } - return c, nil -} - -func (c *LogCache) GetLog(idx uint64, log *Log) error { - // Check the buffer for an entry - c.l.RLock() - cached := c.cache[idx%uint64(len(c.cache))] - c.l.RUnlock() - - // Check if entry is valid - if cached != nil && cached.Index == idx { - *log = *cached - return nil - } - - // Forward request on cache miss - return c.store.GetLog(idx, log) -} - -func (c *LogCache) StoreLog(log *Log) error { - return c.StoreLogs([]*Log{log}) -} - -func (c *LogCache) StoreLogs(logs []*Log) error { - // Insert the logs into the ring buffer - c.l.Lock() - for _, l := range logs { - c.cache[l.Index%uint64(len(c.cache))] = l - } - c.l.Unlock() - - return c.store.StoreLogs(logs) -} - -func (c *LogCache) FirstIndex() (uint64, error) { - return c.store.FirstIndex() -} - -func (c *LogCache) LastIndex() (uint64, error) { - return c.store.LastIndex() -} - -func (c *LogCache) DeleteRange(min, max uint64) error { - // Invalidate the cache on deletes - c.l.Lock() - c.cache = make([]*Log, len(c.cache)) - c.l.Unlock() - - return c.store.DeleteRange(min, max) -} diff --git a/go/vt/orchestrator/external/raft/log_cache_test.go b/go/vt/orchestrator/external/raft/log_cache_test.go deleted file mode 100644 index 7569e78ee70..00000000000 --- a/go/vt/orchestrator/external/raft/log_cache_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package raft - -import ( - "testing" -) - -func TestLogCache(t *testing.T) { - store := NewInmemStore() - c, _ := NewLogCache(16, store) - - // Insert into the in-mem store - for i := 0; i < 32; i++ { - log := &Log{Index: uint64(i) + 1} - store.StoreLog(log) - } - - // Check the indexes - if idx, _ := c.FirstIndex(); idx != 1 { - t.Fatalf("bad: %d", idx) - } - if idx, _ := c.LastIndex(); idx != 32 { - t.Fatalf("bad: %d", idx) - } - - // Try get log with a miss - var out Log - err := c.GetLog(1, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - if out.Index != 1 { - t.Fatalf("bad: %#v", out) - } - - // Store logs - l1 := &Log{Index: 33} - l2 := &Log{Index: 34} - err = c.StoreLogs([]*Log{l1, l2}) - if err != nil { - t.Fatalf("err: %v", err) - } - - if idx, _ := c.LastIndex(); idx != 34 { - t.Fatalf("bad: %d", idx) - } - - // Check that it wrote-through - err = store.GetLog(33, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = store.GetLog(34, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Delete in the backend - err = store.DeleteRange(33, 34) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should be in the ring buffer - err = c.GetLog(33, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = c.GetLog(34, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Purge the ring buffer - err = c.DeleteRange(33, 34) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should not be in the ring buffer - err = c.GetLog(33, &out) - if err != ErrLogNotFound { - t.Fatalf("err: %v", err) - } - err = c.GetLog(34, &out) - if err != ErrLogNotFound { - t.Fatalf("err: %v", err) - } -} diff --git a/go/vt/orchestrator/external/raft/net_transport.go b/go/vt/orchestrator/external/raft/net_transport.go deleted file mode 100644 index 7f3e74450f7..00000000000 --- a/go/vt/orchestrator/external/raft/net_transport.go +++ /dev/null @@ -1,623 +0,0 @@ -package raft - -import ( - "bufio" - "errors" - "fmt" - "io" - "log" - "net" - "os" - "sync" - "time" - - "github.com/hashicorp/go-msgpack/codec" -) - -const ( - rpcAppendEntries uint8 = iota - rpcRequestVote - rpcInstallSnapshot - - // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. - DefaultTimeoutScale = 256 * 1024 // 256KB - - // rpcMaxPipeline controls the maximum number of outstanding - // AppendEntries RPC calls. - rpcMaxPipeline = 128 -) - -var ( - // ErrTransportShutdown is returned when operations on a transport are - // invoked after it's been terminated. - ErrTransportShutdown = errors.New("transport shutdown") - - // ErrPipelineShutdown is returned when the pipeline is closed. - ErrPipelineShutdown = errors.New("append pipeline closed") -) - -/* - -NetworkTransport provides a network based transport that can be -used to communicate with Raft on remote machines. It requires -an underlying stream layer to provide a stream abstraction, which can -be simple TCP, TLS, etc. - -This transport is very simple and lightweight. Each RPC request is -framed by sending a byte that indicates the message type, followed -by the MsgPack encoded request. - -The response is an error string followed by the response object, -both are encoded using MsgPack. - -InstallSnapshot is special, in that after the RPC request we stream -the entire state. That socket is not re-used as the connection state -is not known if there is an error. - -*/ -type NetworkTransport struct { - connPool map[string][]*netConn - connPoolLock sync.Mutex - - consumeCh chan RPC - - heartbeatFn func(RPC) - heartbeatFnLock sync.Mutex - - logger *log.Logger - - maxPool int - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex - - stream StreamLayer - - timeout time.Duration - TimeoutScale int -} - -// StreamLayer is used with the NetworkTransport to provide -// the low level stream abstraction. -type StreamLayer interface { - net.Listener - - // Dial is used to create a new outgoing connection - Dial(address string, timeout time.Duration) (net.Conn, error) -} - -type netConn struct { - target string - conn net.Conn - r *bufio.Reader - w *bufio.Writer - dec *codec.Decoder - enc *codec.Encoder -} - -func (n *netConn) Release() error { - return n.conn.Close() -} - -type netPipeline struct { - conn *netConn - trans *NetworkTransport - - doneCh chan AppendFuture - inprogressCh chan *appendFuture - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex -} - -// NewNetworkTransport creates a new network transport with the given dialer -// and listener. The maxPool controls how many connections we will pool. The -// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply -// the timeout by (SnapshotSize / TimeoutScale). -func NewNetworkTransport( - stream StreamLayer, - maxPool int, - timeout time.Duration, - logOutput io.Writer, -) *NetworkTransport { - if logOutput == nil { - logOutput = os.Stderr - } - return NewNetworkTransportWithLogger(stream, maxPool, timeout, log.New(logOutput, "", log.LstdFlags)) -} - -// NewNetworkTransportWithLogger creates a new network transport with the given dialer -// and listener. The maxPool controls how many connections we will pool. The -// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply -// the timeout by (SnapshotSize / TimeoutScale). -func NewNetworkTransportWithLogger( - stream StreamLayer, - maxPool int, - timeout time.Duration, - logger *log.Logger, -) *NetworkTransport { - if logger == nil { - logger = log.New(os.Stderr, "", log.LstdFlags) - } - trans := &NetworkTransport{ - connPool: make(map[string][]*netConn), - consumeCh: make(chan RPC), - logger: logger, - maxPool: maxPool, - shutdownCh: make(chan struct{}), - stream: stream, - timeout: timeout, - TimeoutScale: DefaultTimeoutScale, - } - go trans.listen() - return trans -} - -// SetHeartbeatHandler is used to setup a heartbeat handler -// as a fast-pass. This is to avoid head-of-line blocking from -// disk IO. -func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { - n.heartbeatFnLock.Lock() - defer n.heartbeatFnLock.Unlock() - n.heartbeatFn = cb -} - -// Close is used to stop the network transport. -func (n *NetworkTransport) Close() error { - n.shutdownLock.Lock() - defer n.shutdownLock.Unlock() - - if !n.shutdown { - close(n.shutdownCh) - n.stream.Close() - n.shutdown = true - } - return nil -} - -// Consumer implements the Transport interface. -func (n *NetworkTransport) Consumer() <-chan RPC { - return n.consumeCh -} - -// LocalAddr implements the Transport interface. -func (n *NetworkTransport) LocalAddr() string { - return n.stream.Addr().String() -} - -// IsShutdown is used to check if the transport is shutdown. -func (n *NetworkTransport) IsShutdown() bool { - select { - case <-n.shutdownCh: - return true - default: - return false - } -} - -// getExistingConn is used to grab a pooled connection. -func (n *NetworkTransport) getPooledConn(target string) *netConn { - n.connPoolLock.Lock() - defer n.connPoolLock.Unlock() - - conns, ok := n.connPool[target] - if !ok || len(conns) == 0 { - return nil - } - - var conn *netConn - num := len(conns) - conn, conns[num-1] = conns[num-1], nil - n.connPool[target] = conns[:num-1] - return conn -} - -// getConn is used to get a connection from the pool. -func (n *NetworkTransport) getConn(target string) (*netConn, error) { - // Check for a pooled conn - if conn := n.getPooledConn(target); conn != nil { - return conn, nil - } - - // Dial a new connection - conn, err := n.stream.Dial(target, n.timeout) - if err != nil { - return nil, err - } - - // Wrap the conn - netConn := &netConn{ - target: target, - conn: conn, - r: bufio.NewReader(conn), - w: bufio.NewWriter(conn), - } - - // Setup encoder/decoders - netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) - netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) - - // Done - return netConn, nil -} - -// returnConn returns a connection back to the pool. -func (n *NetworkTransport) returnConn(conn *netConn) { - n.connPoolLock.Lock() - defer n.connPoolLock.Unlock() - - key := conn.target - conns := n.connPool[key] - - if !n.IsShutdown() && len(conns) < n.maxPool { - n.connPool[key] = append(conns, conn) - } else { - conn.Release() - } -} - -// AppendEntriesPipeline returns an interface that can be used to pipeline -// AppendEntries requests. -func (n *NetworkTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { - // Get a connection - conn, err := n.getConn(target) - if err != nil { - return nil, err - } - - // Create the pipeline - return newNetPipeline(n, conn), nil -} - -// AppendEntries implements the Transport interface. -func (n *NetworkTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { - return n.genericRPC(target, rpcAppendEntries, args, resp) -} - -// RequestVote implements the Transport interface. -func (n *NetworkTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { - return n.genericRPC(target, rpcRequestVote, args, resp) -} - -// genericRPC handles a simple request/response RPC. -func (n *NetworkTransport) genericRPC(target string, rpcType uint8, args interface{}, resp interface{}) error { - // Get a conn - conn, err := n.getConn(target) - if err != nil { - return err - } - - // Set a deadline - if n.timeout > 0 { - conn.conn.SetDeadline(time.Now().Add(n.timeout)) - } - - // Send the RPC - if err = sendRPC(conn, rpcType, args); err != nil { - return err - } - - // Decode the response - canReturn, err := decodeResponse(conn, resp) - if canReturn { - n.returnConn(conn) - } - return err -} - -// InstallSnapshot implements the Transport interface. -func (n *NetworkTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { - // Get a conn, always close for InstallSnapshot - conn, err := n.getConn(target) - if err != nil { - return err - } - defer conn.Release() - - // Set a deadline, scaled by request size - if n.timeout > 0 { - timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) - if timeout < n.timeout { - timeout = n.timeout - } - conn.conn.SetDeadline(time.Now().Add(timeout)) - } - - // Send the RPC - if err = sendRPC(conn, rpcInstallSnapshot, args); err != nil { - return err - } - - // Stream the state - if _, err = io.Copy(conn.w, data); err != nil { - return err - } - - // Flush - if err = conn.w.Flush(); err != nil { - return err - } - - // Decode the response, do not return conn - _, err = decodeResponse(conn, resp) - - return err -} - -// EncodePeer implements the Transport interface. -func (n *NetworkTransport) EncodePeer(p string) []byte { - return []byte(p) -} - -// DecodePeer implements the Transport interface. -func (n *NetworkTransport) DecodePeer(buf []byte) string { - return string(buf) -} - -// listen is used to handling incoming connections. -func (n *NetworkTransport) listen() { - for { - // Accept incoming connections - conn, err := n.stream.Accept() - if err != nil { - if n.IsShutdown() { - return - } - n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err) - continue - } - n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr()) - - // Handle the connection in dedicated routine - go n.handleConn(conn) - } -} - -// handleConn is used to handle an inbound connection for its lifespan. -func (n *NetworkTransport) handleConn(conn net.Conn) { - defer conn.Close() - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) - enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) - - for { - if err := n.handleCommand(r, dec, enc); err != nil { - if err != io.EOF { - n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) - } - return - } - if err := w.Flush(); err != nil { - n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) - return - } - } -} - -// handleCommand is used to decode and dispatch a single command. -func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { - // Get the rpc type - rpcType, err := r.ReadByte() - if err != nil { - return err - } - - // Create the RPC object - respCh := make(chan RPCResponse, 1) - rpc := RPC{ - RespChan: respCh, - } - - // Decode the command - isHeartbeat := false - switch rpcType { - case rpcAppendEntries: - var req AppendEntriesRequest - if err := dec.Decode(&req); err != nil { - return err - } - rpc.Command = &req - - // Check if this is a heartbeat - if req.Term != 0 && req.Leader != nil && - req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && - len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { - isHeartbeat = true - } - - case rpcRequestVote: - var req RequestVoteRequest - if err := dec.Decode(&req); err != nil { - return err - } - rpc.Command = &req - - case rpcInstallSnapshot: - var req InstallSnapshotRequest - if err := dec.Decode(&req); err != nil { - return err - } - rpc.Command = &req - rpc.Reader = io.LimitReader(r, req.Size) - - default: - return fmt.Errorf("unknown rpc type %d", rpcType) - } - - // Check for heartbeat fast-path - if isHeartbeat { - n.heartbeatFnLock.Lock() - fn := n.heartbeatFn - n.heartbeatFnLock.Unlock() - if fn != nil { - fn(rpc) - goto RESP - } - } - - // Dispatch the RPC - select { - case n.consumeCh <- rpc: - case <-n.shutdownCh: - return ErrTransportShutdown - } - - // Wait for response -RESP: - select { - case resp := <-respCh: - // Send the error first - respErr := "" - if resp.Error != nil { - respErr = resp.Error.Error() - } - if err := enc.Encode(respErr); err != nil { - return err - } - - // Send the response - if err := enc.Encode(resp.Response); err != nil { - return err - } - case <-n.shutdownCh: - return ErrTransportShutdown - } - return nil -} - -// decodeResponse is used to decode an RPC response and reports whether -// the connection can be reused. -func decodeResponse(conn *netConn, resp interface{}) (bool, error) { - // Decode the error if any - var rpcError string - if err := conn.dec.Decode(&rpcError); err != nil { - conn.Release() - return false, err - } - - // Decode the response - if err := conn.dec.Decode(resp); err != nil { - conn.Release() - return false, err - } - - // Format an error if any - if rpcError != "" { - return true, fmt.Errorf(rpcError) - } - return true, nil -} - -// sendRPC is used to encode and send the RPC. -func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { - // Write the request type - if err := conn.w.WriteByte(rpcType); err != nil { - conn.Release() - return err - } - - // Send the request - if err := conn.enc.Encode(args); err != nil { - conn.Release() - return err - } - - // Flush - if err := conn.w.Flush(); err != nil { - conn.Release() - return err - } - return nil -} - -// newNetPipeline is used to construct a netPipeline from a given -// transport and connection. -func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { - n := &netPipeline{ - conn: conn, - trans: trans, - doneCh: make(chan AppendFuture, rpcMaxPipeline), - inprogressCh: make(chan *appendFuture, rpcMaxPipeline), - shutdownCh: make(chan struct{}), - } - go n.decodeResponses() - return n -} - -// decodeResponses is a long running routine that decodes the responses -// sent on the connection. -func (n *netPipeline) decodeResponses() { - timeout := n.trans.timeout - for { - select { - case future := <-n.inprogressCh: - if timeout > 0 { - n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) - } - - _, err := decodeResponse(n.conn, future.resp) - future.respond(err) - select { - case n.doneCh <- future: - case <-n.shutdownCh: - return - } - case <-n.shutdownCh: - return - } - } -} - -// AppendEntries is used to pipeline a new append entries request. -func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { - // Create a new future - future := &appendFuture{ - start: time.Now(), - args: args, - resp: resp, - } - future.init() - - // Add a send timeout - if timeout := n.trans.timeout; timeout > 0 { - n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) - } - - // Send the RPC - if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { - return nil, err - } - - // Hand-off for decoding, this can also cause back-pressure - // to prevent too many inflight requests - select { - case n.inprogressCh <- future: - return future, nil - case <-n.shutdownCh: - return nil, ErrPipelineShutdown - } -} - -// Consumer returns a channel that can be used to consume complete futures. -func (n *netPipeline) Consumer() <-chan AppendFuture { - return n.doneCh -} - -// Closed is used to shutdown the pipeline connection. -func (n *netPipeline) Close() error { - n.shutdownLock.Lock() - defer n.shutdownLock.Unlock() - if n.shutdown { - return nil - } - - // Release the connection - n.conn.Release() - - n.shutdown = true - close(n.shutdownCh) - return nil -} diff --git a/go/vt/orchestrator/external/raft/observer.go b/go/vt/orchestrator/external/raft/observer.go deleted file mode 100644 index d41f765a2f4..00000000000 --- a/go/vt/orchestrator/external/raft/observer.go +++ /dev/null @@ -1,120 +0,0 @@ -package raft - -import ( - "sync/atomic" -) - -// Observation is sent along the given channel to observers when an event occurs. -type Observation struct { - // Raft holds the Raft instance generating the observation. - Raft *Raft - // Data holds observation-specific data. Possible types are - // *RequestVoteRequest, RaftState and LeaderObservation. - Data interface{} -} - -// LeaderObservation is used in Observation.Data when leadership changes. -type LeaderObservation struct { - Leader string -} - -// nextObserverId is used to provide a unique ID for each observer to aid in -// deregistration. -var nextObserverID uint64 - -// FilterFn is a function that can be registered in order to filter observations. -// The function reports whether the observation should be included - if -// it returns false, the observation will be filtered out. -type FilterFn func(o *Observation) bool - -// Observer describes what to do with a given observation. -type Observer struct { - // channel receives observations. - channel chan Observation - - // blocking, if true, will cause Raft to block when sending an observation - // to this observer. This should generally be set to false. - blocking bool - - // filter will be called to determine if an observation should be sent to - // the channel. - filter FilterFn - - // id is the ID of this observer in the Raft map. - id uint64 - - // numObserved and numDropped are performance counters for this observer. - numObserved uint64 - numDropped uint64 -} - -// NewObserver creates a new observer that can be registered -// to make observations on a Raft instance. Observations -// will be sent on the given channel if they satisfy the -// given filter. -// -// If blocking is true, the observer will block when it can't -// send on the channel, otherwise it may discard events. -func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer { - return &Observer{ - channel: channel, - blocking: blocking, - filter: filter, - id: atomic.AddUint64(&nextObserverID, 1), - } -} - -// GetNumObserved returns the number of observations. -func (or *Observer) GetNumObserved() uint64 { - return atomic.LoadUint64(&or.numObserved) -} - -// GetNumDropped returns the number of dropped observations due to blocking. -func (or *Observer) GetNumDropped() uint64 { - return atomic.LoadUint64(&or.numDropped) -} - -// RegisterObserver registers a new observer. -func (r *Raft) RegisterObserver(or *Observer) { - r.observersLock.Lock() - defer r.observersLock.Unlock() - r.observers[or.id] = or -} - -// DeregisterObserver deregisters an observer. -func (r *Raft) DeregisterObserver(or *Observer) { - r.observersLock.Lock() - defer r.observersLock.Unlock() - delete(r.observers, or.id) -} - -// observe sends an observation to every observer. -func (r *Raft) observe(o interface{}) { - // In general observers should not block. But in any case this isn't - // disastrous as we only hold a read lock, which merely prevents - // registration / deregistration of observers. - r.observersLock.RLock() - defer r.observersLock.RUnlock() - for _, or := range r.observers { - // It's wasteful to do this in the loop, but for the common case - // where there are no observers we won't create any objects. - ob := Observation{Raft: r, Data: o} - if or.filter != nil && !or.filter(&ob) { - continue - } - if or.channel == nil { - continue - } - if or.blocking { - or.channel <- ob - atomic.AddUint64(&or.numObserved, 1) - } else { - select { - case or.channel <- ob: - atomic.AddUint64(&or.numObserved, 1) - default: - atomic.AddUint64(&or.numDropped, 1) - } - } - } -} diff --git a/go/vt/orchestrator/external/raft/peer.go b/go/vt/orchestrator/external/raft/peer.go deleted file mode 100644 index 6f3bcf85645..00000000000 --- a/go/vt/orchestrator/external/raft/peer.go +++ /dev/null @@ -1,122 +0,0 @@ -package raft - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "sync" -) - -const ( - jsonPeerPath = "peers.json" -) - -// PeerStore provides an interface for persistent storage and -// retrieval of peers. We use a separate interface than StableStore -// since the peers may need to be edited by a human operator. For example, -// in a two node cluster, the failure of either node requires human intervention -// since consensus is impossible. -type PeerStore interface { - // Peers returns the list of known peers. - Peers() ([]string, error) - - // SetPeers sets the list of known peers. This is invoked when a peer is - // added or removed. - SetPeers([]string) error -} - -// StaticPeers is used to provide a static list of peers. -type StaticPeers struct { - StaticPeers []string - l sync.Mutex -} - -// Peers implements the PeerStore interface. -func (s *StaticPeers) Peers() ([]string, error) { - s.l.Lock() - peers := s.StaticPeers - s.l.Unlock() - return peers, nil -} - -// SetPeers implements the PeerStore interface. -func (s *StaticPeers) SetPeers(p []string) error { - s.l.Lock() - s.StaticPeers = p - s.l.Unlock() - return nil -} - -// JSONPeers is used to provide peer persistence on disk in the form -// of a JSON file. This allows human operators to manipulate the file. -type JSONPeers struct { - l sync.Mutex - path string - trans Transport -} - -// NewJSONPeers creates a new JSONPeers store. Requires a transport -// to handle the serialization of network addresses. -func NewJSONPeers(base string, trans Transport) *JSONPeers { - path := filepath.Join(base, jsonPeerPath) - store := &JSONPeers{ - path: path, - trans: trans, - } - return store -} - -// Peers implements the PeerStore interface. -func (j *JSONPeers) Peers() ([]string, error) { - j.l.Lock() - defer j.l.Unlock() - - // Read the file - buf, err := ioutil.ReadFile(j.path) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - - // Check for no peers - if len(buf) == 0 { - return nil, nil - } - - // Decode the peers - var peerSet []string - dec := json.NewDecoder(bytes.NewReader(buf)) - if err := dec.Decode(&peerSet); err != nil { - return nil, err - } - - // Deserialize each peer - var peers []string - for _, p := range peerSet { - peers = append(peers, j.trans.DecodePeer([]byte(p))) - } - return peers, nil -} - -// SetPeers implements the PeerStore interface. -func (j *JSONPeers) SetPeers(peers []string) error { - j.l.Lock() - defer j.l.Unlock() - - // Encode each peer - var peerSet []string - for _, p := range peers { - peerSet = append(peerSet, string(j.trans.EncodePeer(p))) - } - - // Convert to JSON - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - if err := enc.Encode(peerSet); err != nil { - return err - } - - // Write out as JSON - return ioutil.WriteFile(j.path, buf.Bytes(), 0755) -} diff --git a/go/vt/orchestrator/external/raft/peer_test.go b/go/vt/orchestrator/external/raft/peer_test.go deleted file mode 100644 index ff835e026ad..00000000000 --- a/go/vt/orchestrator/external/raft/peer_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package raft - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestJSONPeers(t *testing.T) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(dir) - - // Create the store - _, trans := NewInmemTransport("") - store := NewJSONPeers(dir, trans) - - // Try a read, should get nothing - peers, err := store.Peers() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(peers) != 0 { - t.Fatalf("peers: %v", peers) - } - - // Initialize some peers - newPeers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - if err := store.SetPeers(newPeers); err != nil { - t.Fatalf("err: %v", err) - } - - // Try a read, should peers - peers, err = store.Peers() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(peers) != 3 { - t.Fatalf("peers: %v", peers) - } -} diff --git a/go/vt/orchestrator/external/raft/raft.go b/go/vt/orchestrator/external/raft/raft.go deleted file mode 100644 index 3d16452a81d..00000000000 --- a/go/vt/orchestrator/external/raft/raft.go +++ /dev/null @@ -1,1955 +0,0 @@ -package raft - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/armon/go-metrics" -) - -const ( - minCheckInterval = 10 * time.Millisecond -) - -var ( - keyCurrentTerm = []byte("CurrentTerm") - keyLastVoteTerm = []byte("LastVoteTerm") - keyLastVoteCand = []byte("LastVoteCand") - - // ErrLeader is returned when an operation can't be completed on a - // leader node. - ErrLeader = errors.New("node is the leader") - - // ErrNotLeader is returned when an operation can't be completed on a - // follower or candidate node. - ErrNotLeader = errors.New("node is not the leader") - - // ErrLeadershipLost is returned when a leader fails to commit a log entry - // because it's been deposed in the process. - ErrLeadershipLost = errors.New("leadership lost while committing log") - - // ErrRaftShutdown is returned when operations are requested against an - // inactive Raft. - ErrRaftShutdown = errors.New("raft is already shutdown") - - // ErrEnqueueTimeout is returned when a command fails due to a timeout. - ErrEnqueueTimeout = errors.New("timed out enqueuing operation") - - // ErrKnownPeer is returned when trying to add a peer to the configuration - // that already exists. - ErrKnownPeer = errors.New("peer already known") - - // ErrUnknownPeer is returned when trying to remove a peer from the - // configuration that doesn't exist. - ErrUnknownPeer = errors.New("peer is unknown") - - // ErrNothingNewToSnapshot is returned when trying to create a snapshot - // but there's nothing new commited to the FSM since we started. - ErrNothingNewToSnapshot = errors.New("Nothing new to snapshot") -) - -// commitTuple is used to send an index that was committed, -// with an optional associated future that should be invoked. -type commitTuple struct { - log *Log - future *logFuture -} - -// leaderState is state that is used while we are a leader. -type leaderState struct { - commitCh chan struct{} - inflight *inflight - replState map[string]*followerReplication - notify map[*verifyFuture]struct{} - stepDown chan struct{} -} - -// Raft implements a Raft node. -type Raft struct { - raftState - - // applyCh is used to async send logs to the main thread to - // be committed and applied to the FSM. - applyCh chan *logFuture - - // Configuration provided at Raft initialization - conf *Config - - // FSM is the client state machine to apply commands to - fsm FSM - - // fsmCommitCh is used to trigger async application of logs to the fsm - fsmCommitCh chan commitTuple - - // fsmRestoreCh is used to trigger a restore from snapshot - fsmRestoreCh chan *restoreFuture - - // fsmSnapshotCh is used to trigger a new snapshot being taken - fsmSnapshotCh chan *reqSnapshotFuture - - // lastContact is the last time we had contact from the - // leader node. This can be used to gauge staleness. - lastContact time.Time - lastContactLock sync.RWMutex - - // Leader is the current cluster leader - leader string - leaderLock sync.RWMutex - - // leaderCh is used to notify of leadership changes - leaderCh chan bool - - // leaderState used only while state is leader - leaderState leaderState - - // Stores our local addr - localAddr string - - // Used for our logging - logger *log.Logger - - // LogStore provides durable storage for logs - logs LogStore - - // Track our known peers - peerCh chan *peerFuture - peers []string - peerStore PeerStore - - // RPC chan comes from the transport layer - rpcCh <-chan RPC - - // Shutdown channel to exit, protected to prevent concurrent exits - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex - - // snapshots is used to store and retrieve snapshots - snapshots SnapshotStore - - // snapshotCh is used for user triggered snapshots - snapshotCh chan *snapshotFuture - - // stable is a StableStore implementation for durable state - // It provides stable storage for many fields in raftState - stable StableStore - - // The transport layer we use - trans Transport - - // verifyCh is used to async send verify futures to the main thread - // to verify we are still the leader - verifyCh chan *verifyFuture - - // List of observers and the mutex that protects them. The observers list - // is indexed by an artificial ID which is used for deregistration. - observersLock sync.RWMutex - observers map[uint64]*Observer - - // suspendLeadership is a hint for Raft to not become a leader. This flag is bound by time, and can be used - // to control the identity of the leader in a (stable) group - suspendLeadership int64 -} - -// NewRaft is used to construct a new Raft node. It takes a configuration, as well -// as implementations of various interfaces that are required. If we have any old state, -// such as snapshots, logs, peers, etc, all those will be restored when creating the -// Raft node. -func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, - peerStore PeerStore, trans Transport) (*Raft, error) { - // Validate the configuration - if err := ValidateConfig(conf); err != nil { - return nil, err - } - - // Ensure we have a LogOutput - var logger *log.Logger - if conf.Logger != nil { - logger = conf.Logger - } else { - if conf.LogOutput == nil { - conf.LogOutput = os.Stderr - } - logger = log.New(conf.LogOutput, "", log.LstdFlags) - } - - // Try to restore the current term - currentTerm, err := stable.GetUint64(keyCurrentTerm) - if err != nil && err.Error() != "not found" { - return nil, fmt.Errorf("failed to load current term: %v", err) - } - - // Read the last log value - lastIdx, err := logs.LastIndex() - if err != nil { - return nil, fmt.Errorf("failed to find last log: %v", err) - } - - // Get the log - var lastLog Log - if lastIdx > 0 { - if err = logs.GetLog(lastIdx, &lastLog); err != nil { - return nil, fmt.Errorf("failed to get last log: %v", err) - } - } - - // Construct the list of peers that excludes us - localAddr := trans.LocalAddr() - peers, err := peerStore.Peers() - if err != nil { - return nil, fmt.Errorf("failed to get list of peers: %v", err) - } - peers = ExcludePeer(peers, localAddr) - - // Create Raft struct - r := &Raft{ - applyCh: make(chan *logFuture), - conf: conf, - fsm: fsm, - fsmCommitCh: make(chan commitTuple, 128), - fsmRestoreCh: make(chan *restoreFuture), - fsmSnapshotCh: make(chan *reqSnapshotFuture), - leaderCh: make(chan bool), - localAddr: localAddr, - logger: logger, - logs: logs, - peerCh: make(chan *peerFuture), - peers: peers, - peerStore: peerStore, - rpcCh: trans.Consumer(), - snapshots: snaps, - snapshotCh: make(chan *snapshotFuture), - shutdownCh: make(chan struct{}), - stable: stable, - trans: trans, - verifyCh: make(chan *verifyFuture, 64), - observers: make(map[uint64]*Observer), - } - - // Initialize as a follower - r.setState(Follower) - - // Start as leader if specified. This should only be used - // for testing purposes. - if conf.StartAsLeader { - r.setState(Leader) - r.setLeader(r.localAddr) - } - - // Restore the current term and the last log - r.setCurrentTerm(currentTerm) - r.setLastLog(lastLog.Index, lastLog.Term) - - // Attempt to restore a snapshot if there are any - if err := r.restoreSnapshot(); err != nil { - return nil, err - } - - // Setup a heartbeat fast-path to avoid head-of-line - // blocking where possible. It MUST be safe for this - // to be called concurrently with a blocking RPC. - trans.SetHeartbeatHandler(r.processHeartbeat) - - // Start the background work - r.goFunc(r.run) - r.goFunc(r.runFSM) - r.goFunc(r.runSnapshots) - return r, nil -} - -// Leader is used to return the current leader of the cluster. -// It may return empty string if there is no current leader -// or the leader is unknown. -func (r *Raft) Leader() string { - r.leaderLock.RLock() - leader := r.leader - r.leaderLock.RUnlock() - return leader -} - -// setLeader is used to modify the current leader of the cluster -func (r *Raft) setLeader(leader string) { - r.leaderLock.Lock() - oldLeader := r.leader - r.leader = leader - r.leaderLock.Unlock() - if oldLeader != leader { - r.observe(LeaderObservation{Leader: leader}) - } -} - -// Apply is used to apply a command to the FSM in a highly consistent -// manner. This returns a future that can be used to wait on the application. -// An optional timeout can be provided to limit the amount of time we wait -// for the command to be started. This must be run on the leader or it -// will fail. -func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { - metrics.IncrCounter([]string{"raft", "apply"}, 1) - var timer <-chan time.Time - if timeout > 0 { - timer = time.After(timeout) - } - - // Create a log future, no index or term yet - logFuture := &logFuture{ - log: Log{ - Type: LogCommand, - Data: cmd, - }, - } - logFuture.init() - - select { - case <-timer: - return errorFuture{ErrEnqueueTimeout} - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - case r.applyCh <- logFuture: - return logFuture - } -} - -// Barrier is used to issue a command that blocks until all preceeding -// operations have been applied to the FSM. It can be used to ensure the -// FSM reflects all queued writes. An optional timeout can be provided to -// limit the amount of time we wait for the command to be started. This -// must be run on the leader or it will fail. -func (r *Raft) Barrier(timeout time.Duration) Future { - metrics.IncrCounter([]string{"raft", "barrier"}, 1) - var timer <-chan time.Time - if timeout > 0 { - timer = time.After(timeout) - } - - // Create a log future, no index or term yet - logFuture := &logFuture{ - log: Log{ - Type: LogBarrier, - }, - } - logFuture.init() - - select { - case <-timer: - return errorFuture{ErrEnqueueTimeout} - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - case r.applyCh <- logFuture: - return logFuture - } -} - -// VerifyLeader is used to ensure the current node is still -// the leader. This can be done to prevent stale reads when a -// new leader has potentially been elected. -func (r *Raft) VerifyLeader() Future { - metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) - verifyFuture := &verifyFuture{} - verifyFuture.init() - select { - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - case r.verifyCh <- verifyFuture: - return verifyFuture - } -} - -// AddPeer is used to add a new peer into the cluster. This must be -// run on the leader or it will fail. -func (r *Raft) AddPeer(peer string) Future { - logFuture := &logFuture{ - log: Log{ - Type: LogAddPeer, - peer: peer, - }, - } - logFuture.init() - select { - case r.applyCh <- logFuture: - return logFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } -} - -// RemovePeer is used to remove a peer from the cluster. If the -// current leader is being removed, it will cause a new election -// to occur. This must be run on the leader or it will fail. -func (r *Raft) RemovePeer(peer string) Future { - logFuture := &logFuture{ - log: Log{ - Type: LogRemovePeer, - peer: peer, - }, - } - logFuture.init() - select { - case r.applyCh <- logFuture: - return logFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } -} - -// SetPeers is used to forcibly replace the set of internal peers and -// the peerstore with the ones specified. This can be considered unsafe. -func (r *Raft) SetPeers(p []string) Future { - peerFuture := &peerFuture{ - peers: p, - } - peerFuture.init() - - select { - case r.peerCh <- peerFuture: - return peerFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } -} - -// Shutdown is used to stop the Raft background routines. -// This is not a graceful operation. Provides a future that -// can be used to block until all background routines have exited. -func (r *Raft) Shutdown() Future { - r.shutdownLock.Lock() - defer r.shutdownLock.Unlock() - - if !r.shutdown { - close(r.shutdownCh) - r.shutdown = true - r.setState(Shutdown) - return &shutdownFuture{r} - } - - // avoid closing transport twice - return &shutdownFuture{nil} -} - -// Snapshot is used to manually force Raft to take a snapshot. -// Returns a future that can be used to block until complete. -func (r *Raft) Snapshot() Future { - snapFuture := &snapshotFuture{} - snapFuture.init() - select { - case r.snapshotCh <- snapFuture: - return snapFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } - -} - -// State is used to return the current raft state. -func (r *Raft) State() RaftState { - return r.getState() -} - -// LeaderCh is used to get a channel which delivers signals on -// acquiring or losing leadership. It sends true if we become -// the leader, and false if we lose it. The channel is not buffered, -// and does not block on writes. -func (r *Raft) LeaderCh() <-chan bool { - return r.leaderCh -} - -func (r *Raft) String() string { - return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) -} - -// LastContact returns the time of last contact by a leader. -// This only makes sense if we are currently a follower. -func (r *Raft) LastContact() time.Time { - r.lastContactLock.RLock() - last := r.lastContact - r.lastContactLock.RUnlock() - return last -} - -// Stats is used to return a map of various internal stats. This -// should only be used for informative purposes or debugging. -// -// Keys are: "state", "term", "last_log_index", "last_log_term", -// "commit_index", "applied_index", "fsm_pending", -// "last_snapshot_index", "last_snapshot_term", "num_peers" and -// "last_contact". -// -// The value of "state" is a numerical value representing a -// RaftState const. -// -// The value of "last_contact" is either "never" if there -// has been no contact with a leader, "0" if the node is in the -// leader state, or the time since last contact with a leader -// formatted as a string. -// -// All other values are uint64s, formatted as strings. -func (r *Raft) Stats() map[string]string { - toString := func(v uint64) string { - return strconv.FormatUint(v, 10) - } - lastLogIndex, lastLogTerm := r.getLastLog() - lastSnapIndex, lastSnapTerm := r.getLastSnapshot() - s := map[string]string{ - "state": r.getState().String(), - "term": toString(r.getCurrentTerm()), - "last_log_index": toString(lastLogIndex), - "last_log_term": toString(lastLogTerm), - "commit_index": toString(r.getCommitIndex()), - "applied_index": toString(r.getLastApplied()), - "fsm_pending": toString(uint64(len(r.fsmCommitCh))), - "last_snapshot_index": toString(lastSnapIndex), - "last_snapshot_term": toString(lastSnapTerm), - "num_peers": toString(uint64(len(r.peers))), - } - last := r.LastContact() - if last.IsZero() { - s["last_contact"] = "never" - } else if r.getState() == Leader { - s["last_contact"] = "0" - } else { - s["last_contact"] = fmt.Sprintf("%v", time.Since(last)) - } - return s -} - -// LastIndex returns the last index in stable storage, -// either from the last log or from the last snapshot. -func (r *Raft) LastIndex() uint64 { - return r.getLastIndex() -} - -// AppliedIndex returns the last index applied to the FSM. This is generally -// lagging behind the last index, especially for indexes that are persisted but -// have not yet been considered committed by the leader. NOTE - this reflects -// the last index that was sent to the application's FSM over the apply channel -// but DOES NOT mean that the application's FSM has yet consumed it and applied -// it to its internal state. Thus, the application's state may lag behind this -// index. -func (r *Raft) AppliedIndex() uint64 { - return r.getLastApplied() -} - -// runFSM is a long running goroutine responsible for applying logs -// to the FSM. This is done async of other logs since we don't want -// the FSM to block our internal operations. -func (r *Raft) runFSM() { - var lastIndex, lastTerm uint64 - for { - select { - case req := <-r.fsmRestoreCh: - // Open the snapshot - meta, source, err := r.snapshots.Open(req.ID) - if err != nil { - req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) - continue - } - - // Attempt to restore - start := time.Now() - if err := r.fsm.Restore(source); err != nil { - req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) - source.Close() - continue - } - source.Close() - metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) - - // Update the last index and term - lastIndex = meta.Index - lastTerm = meta.Term - req.respond(nil) - - case req := <-r.fsmSnapshotCh: - // Is there something to snapshot? - if lastIndex == 0 { - req.respond(ErrNothingNewToSnapshot) - continue - } - - // Get our peers - peers, err := r.peerStore.Peers() - if err != nil { - req.respond(err) - continue - } - - // Start a snapshot - start := time.Now() - snap, err := r.fsm.Snapshot() - metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) - - // Respond to the request - req.index = lastIndex - req.term = lastTerm - req.peers = peers - req.snapshot = snap - req.respond(err) - - case commitEntry := <-r.fsmCommitCh: - // Apply the log if a command - var resp interface{} - if commitEntry.log.Type == LogCommand { - start := time.Now() - resp = r.fsm.Apply(commitEntry.log) - metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) - } - - // Update the indexes - lastIndex = commitEntry.log.Index - lastTerm = commitEntry.log.Term - - // Invoke the future if given - if commitEntry.future != nil { - commitEntry.future.response = resp - commitEntry.future.respond(nil) - } - case <-r.shutdownCh: - return - } - } -} - -// run is a long running goroutine that runs the Raft FSM. -func (r *Raft) run() { - for { - // Check if we are doing a shutdown - select { - case <-r.shutdownCh: - // Clear the leader to prevent forwarding - r.setLeader("") - return - default: - } - - // Enter into a sub-FSM - switch r.getState() { - case Follower: - r.runFollower() - case Candidate: - r.runCandidate() - case Leader: - r.runLeader() - } - } -} - -// runFollower runs the FSM for a follower. -func (r *Raft) runFollower() { - didWarn := false - r.logger.Printf("[INFO] raft: %v entering Follower state (Leader: %q)", r, r.Leader()) - metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) - heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) - for { - select { - case rpc := <-r.rpcCh: - r.processRPC(rpc) - - case a := <-r.applyCh: - // Reject any operations since we are not the leader - a.respond(ErrNotLeader) - - case v := <-r.verifyCh: - // Reject any operations since we are not the leader - v.respond(ErrNotLeader) - - case p := <-r.peerCh: - // Set the peers - r.peers = ExcludePeer(p.peers, r.localAddr) - p.respond(r.peerStore.SetPeers(p.peers)) - - case <-heartbeatTimer: - // Restart the heartbeat timer - heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) - - // Check if we have had a successful contact - lastContact := r.LastContact() - if time.Since(lastContact) < r.conf.HeartbeatTimeout { - continue - } - - // Heartbeat failed! Transition to the candidate state - lastLeader := r.Leader() - r.setLeader("") - if len(r.peers) == 0 && !r.conf.EnableSingleNode { - if !didWarn { - r.logger.Printf("[WARN] raft: EnableSingleNode disabled, and no known peers. Aborting election.") - didWarn = true - } - } else { - if atomic.LoadInt64(&r.suspendLeadership) > 0 { - r.logger.Printf(`[WARN] raft: Heartbeat timeout from %q reached, but leadership suspended. Will not enter Candidate mode`, lastLeader) - return - } - r.logger.Printf(`[WARN] raft: Heartbeat timeout from %q reached, starting election`, lastLeader) - - metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1) - r.setState(Candidate) - return - } - - case <-r.shutdownCh: - return - } - } -} - -// runCandidate runs the FSM for a candidate. -func (r *Raft) runCandidate() { - r.logger.Printf("[INFO] raft: %v entering Candidate state", r) - metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) - - // Start vote for us, and set a timeout - voteCh := r.electSelf() - electionTimer := randomTimeout(r.conf.ElectionTimeout) - - // Tally the votes, need a simple majority - grantedVotes := 0 - votesNeeded := r.quorumSize() - r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) - - for r.getState() == Candidate { - select { - case rpc := <-r.rpcCh: - r.processRPC(rpc) - - case vote := <-voteCh: - // Check if the term is greater than ours, bail - if vote.Term > r.getCurrentTerm() { - r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") - r.setState(Follower) - r.setCurrentTerm(vote.Term) - return - } - - // Check if the vote is granted - if vote.Granted { - grantedVotes++ - r.logger.Printf("[DEBUG] raft: Vote granted from %s. Tally: %d", vote.voter, grantedVotes) - } - - // Check if we've become the leader - if grantedVotes >= votesNeeded { - r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) - r.setState(Leader) - r.setLeader(r.localAddr) - return - } - - case a := <-r.applyCh: - // Reject any operations since we are not the leader - a.respond(ErrNotLeader) - - case v := <-r.verifyCh: - // Reject any operations since we are not the leader - v.respond(ErrNotLeader) - - case p := <-r.peerCh: - // Set the peers - r.peers = ExcludePeer(p.peers, r.localAddr) - p.respond(r.peerStore.SetPeers(p.peers)) - // Become a follower again - r.setState(Follower) - return - - case <-electionTimer: - // Election failed! Restart the election. We simply return, - // which will kick us back into runCandidate - r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") - return - - case <-r.shutdownCh: - return - } - } -} - -// runLeader runs the FSM for a leader. Do the setup here and drop into -// the leaderLoop for the hot loop. -func (r *Raft) runLeader() { - r.logger.Printf("[INFO] raft: %v entering Leader state", r) - metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) - - // Notify that we are the leader - asyncNotifyBool(r.leaderCh, true) - - // Push to the notify channel if given - if notify := r.conf.NotifyCh; notify != nil { - select { - case notify <- true: - case <-r.shutdownCh: - } - } - - // Setup leader state - r.leaderState.commitCh = make(chan struct{}, 1) - r.leaderState.inflight = newInflight(r.leaderState.commitCh) - r.leaderState.replState = make(map[string]*followerReplication) - r.leaderState.notify = make(map[*verifyFuture]struct{}) - r.leaderState.stepDown = make(chan struct{}, 1) - - // Cleanup state on step down - defer func() { - // Since we were the leader previously, we update our - // last contact time when we step down, so that we are not - // reporting a last contact time from before we were the - // leader. Otherwise, to a client it would seem our data - // is extremely stale. - r.setLastContact() - - // Stop replication - for _, p := range r.leaderState.replState { - close(p.stopCh) - } - - // Cancel inflight requests - r.leaderState.inflight.Cancel(ErrLeadershipLost) - - // Respond to any pending verify requests - for future := range r.leaderState.notify { - future.respond(ErrLeadershipLost) - } - - // Clear all the state - r.leaderState.commitCh = nil - r.leaderState.inflight = nil - r.leaderState.replState = nil - r.leaderState.notify = nil - r.leaderState.stepDown = nil - - // If we are stepping down for some reason, no known leader. - // We may have stepped down due to an RPC call, which would - // provide the leader, so we cannot always blank this out. - r.leaderLock.Lock() - if r.leader == r.localAddr { - r.leader = "" - } - r.leaderLock.Unlock() - - // Notify that we are not the leader - asyncNotifyBool(r.leaderCh, false) - - // Push to the notify channel if given - if notify := r.conf.NotifyCh; notify != nil { - select { - case notify <- false: - case <-r.shutdownCh: - // On shutdown, make a best effort but do not block - select { - case notify <- false: - default: - } - } - } - }() - - // Start a replication routine for each peer - for _, peer := range r.peers { - r.startReplication(peer) - } - - // Dispatch a no-op log first. Instead of LogNoop, - // we use a LogAddPeer with our peerset. This acts like - // a no-op as well, but when doing an initial bootstrap, ensures - // that all nodes share a common peerset. - peerSet := append([]string{r.localAddr}, r.peers...) - noop := &logFuture{ - log: Log{ - Type: LogAddPeer, - Data: encodePeers(peerSet, r.trans), - }, - } - r.dispatchLogs([]*logFuture{noop}) - - // Disable EnableSingleNode after we've been elected leader. - // This is to prevent a split brain in the future, if we are removed - // from the cluster and then elect ourself as leader. - if r.conf.DisableBootstrapAfterElect && r.conf.EnableSingleNode { - r.logger.Printf("[INFO] raft: Disabling EnableSingleNode (bootstrap)") - r.conf.EnableSingleNode = false - } - - // Sit in the leader loop until we step down - r.leaderLoop() -} - -// startReplication is a helper to setup state and start async replication to a peer. -func (r *Raft) startReplication(peer string) { - lastIdx := r.getLastIndex() - s := &followerReplication{ - peer: peer, - inflight: r.leaderState.inflight, - stopCh: make(chan uint64, 1), - triggerCh: make(chan struct{}, 1), - currentTerm: r.getCurrentTerm(), - matchIndex: 0, - nextIndex: lastIdx + 1, - lastContact: time.Now(), - notifyCh: make(chan struct{}, 1), - stepDown: r.leaderState.stepDown, - } - r.leaderState.replState[peer] = s - r.goFunc(func() { r.replicate(s) }) - asyncNotifyCh(s.triggerCh) -} - -// leaderLoop is the hot loop for a leader. It is invoked -// after all the various leader setup is done. -func (r *Raft) leaderLoop() { - // stepDown is used to track if there is an inflight log that - // would cause us to lose leadership (specifically a RemovePeer of - // ourselves). If this is the case, we must not allow any logs to - // be processed in parallel, otherwise we are basing commit on - // only a single peer (ourself) and replicating to an undefined set - // of peers. - stepDown := false - - lease := time.After(r.conf.LeaderLeaseTimeout) - for r.getState() == Leader { - select { - case rpc := <-r.rpcCh: - r.processRPC(rpc) - - case <-r.leaderState.stepDown: - r.setState(Follower) - - case <-r.leaderState.commitCh: - // Get the committed messages - committed := r.leaderState.inflight.Committed() - for e := committed.Front(); e != nil; e = e.Next() { - // Measure the commit time - commitLog := e.Value.(*logFuture) - metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) - - // Increment the commit index - idx := commitLog.log.Index - r.setCommitIndex(idx) - r.processLogs(idx, commitLog) - } - - case v := <-r.verifyCh: - if v.quorumSize == 0 { - // Just dispatched, start the verification - r.verifyLeader(v) - - } else if v.votes < v.quorumSize { - // Early return, means there must be a new leader - r.logger.Printf("[WARN] raft: New leader elected, stepping down") - r.setState(Follower) - delete(r.leaderState.notify, v) - v.respond(ErrNotLeader) - - } else { - // Quorum of members agree, we are still leader - delete(r.leaderState.notify, v) - v.respond(nil) - } - - case p := <-r.peerCh: - p.respond(ErrLeader) - - case newLog := <-r.applyCh: - // Group commit, gather all the ready commits - ready := []*logFuture{newLog} - for i := 0; i < r.conf.MaxAppendEntries; i++ { - select { - case newLog := <-r.applyCh: - ready = append(ready, newLog) - default: - break - } - } - - // Handle any peer set changes - n := len(ready) - for i := 0; i < n; i++ { - // Fail all future transactions once stepDown is on - if stepDown { - ready[i].respond(ErrNotLeader) - ready[i], ready[n-1] = ready[n-1], nil - n-- - i-- - continue - } - - // Special case AddPeer and RemovePeer - log := ready[i] - if log.log.Type != LogAddPeer && log.log.Type != LogRemovePeer { - continue - } - - // Check if this log should be ignored. The logs can be - // reordered here since we have not yet assigned an index - // and are not violating any promises. - if !r.preparePeerChange(log) { - ready[i], ready[n-1] = ready[n-1], nil - n-- - i-- - continue - } - - // Apply peer set changes early and check if we will step - // down after the commit of this log. If so, we must not - // allow any future entries to make progress to avoid undefined - // behavior. - if ok := r.processLog(&log.log, nil, true); ok { - stepDown = true - } - } - - // Nothing to do if all logs are invalid - if n == 0 { - continue - } - - // Dispatch the logs - ready = ready[:n] - r.dispatchLogs(ready) - - case <-lease: - // Check if we've exceeded the lease, potentially stepping down - maxDiff := r.checkLeaderLease() - - // Next check interval should adjust for the last node we've - // contacted, without going negative - checkInterval := r.conf.LeaderLeaseTimeout - maxDiff - if checkInterval < minCheckInterval { - checkInterval = minCheckInterval - } - - // Renew the lease timer - lease = time.After(checkInterval) - - case <-r.shutdownCh: - return - } - } -} - -// verifyLeader must be called from the main thread for safety. -// Causes the followers to attempt an immediate heartbeat. -func (r *Raft) verifyLeader(v *verifyFuture) { - // Current leader always votes for self - v.votes = 1 - - // Set the quorum size, hot-path for single node - v.quorumSize = r.quorumSize() - if v.quorumSize == 1 { - v.respond(nil) - return - } - - // Track this request - v.notifyCh = r.verifyCh - r.leaderState.notify[v] = struct{}{} - - // Trigger immediate heartbeats - for _, repl := range r.leaderState.replState { - repl.notifyLock.Lock() - repl.notify = append(repl.notify, v) - repl.notifyLock.Unlock() - asyncNotifyCh(repl.notifyCh) - } -} - -// checkLeaderLease is used to check if we can contact a quorum of nodes -// within the last leader lease interval. If not, we need to step down, -// as we may have lost connectivity. Returns the maximum duration without -// contact. -func (r *Raft) checkLeaderLease() time.Duration { - // Track contacted nodes, we can always contact ourself - contacted := 1 - - // Check each follower - var maxDiff time.Duration - now := time.Now() - for peer, f := range r.leaderState.replState { - diff := now.Sub(f.LastContact()) - if diff <= r.conf.LeaderLeaseTimeout { - contacted++ - if diff > maxDiff { - maxDiff = diff - } - } else { - // Log at least once at high value, then debug. Otherwise it gets very verbose. - if diff <= 3*r.conf.LeaderLeaseTimeout { - r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) - } else { - r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) - } - } - metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) - } - - // Verify we can contact a quorum - quorum := r.quorumSize() - if contacted < quorum { - r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") - r.setState(Follower) - metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) - } - return maxDiff -} - -// quorumSize is used to return the quorum size -func (r *Raft) quorumSize() int { - return ((len(r.peers) + 1) / 2) + 1 -} - -// preparePeerChange checks if a LogAddPeer or LogRemovePeer should be performed, -// and properly formats the data field on the log before dispatching it. -func (r *Raft) preparePeerChange(l *logFuture) bool { - // Check if this is a known peer - p := l.log.peer - knownPeer := PeerContained(r.peers, p) || r.localAddr == p - - // Ignore known peers on add - if l.log.Type == LogAddPeer && knownPeer { - l.respond(ErrKnownPeer) - return false - } - - // Ignore unknown peers on remove - if l.log.Type == LogRemovePeer && !knownPeer { - l.respond(ErrUnknownPeer) - return false - } - - // Construct the peer set - var peerSet []string - if l.log.Type == LogAddPeer { - peerSet = append([]string{p, r.localAddr}, r.peers...) - } else { - peerSet = ExcludePeer(append([]string{r.localAddr}, r.peers...), p) - } - - // Setup the log - l.log.Data = encodePeers(peerSet, r.trans) - return true -} - -// dispatchLog is called to push a log to disk, mark it -// as inflight and begin replication of it. -func (r *Raft) dispatchLogs(applyLogs []*logFuture) { - now := time.Now() - defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) - - term := r.getCurrentTerm() - lastIndex := r.getLastIndex() - logs := make([]*Log, len(applyLogs)) - - for idx, applyLog := range applyLogs { - applyLog.dispatch = now - applyLog.log.Index = lastIndex + uint64(idx) + 1 - applyLog.log.Term = term - applyLog.policy = newMajorityQuorum(len(r.peers) + 1) - logs[idx] = &applyLog.log - } - - // Write the log entry locally - if err := r.logs.StoreLogs(logs); err != nil { - r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) - for _, applyLog := range applyLogs { - applyLog.respond(err) - } - r.setState(Follower) - return - } - - // Add this to the inflight logs, commit - r.leaderState.inflight.StartAll(applyLogs) - - // Update the last log since it's on disk now - r.setLastLog(lastIndex+uint64(len(applyLogs)), term) - - // Notify the replicators of the new log - for _, f := range r.leaderState.replState { - asyncNotifyCh(f.triggerCh) - } -} - -// processLogs is used to process all the logs from the lastApplied -// up to the given index. -func (r *Raft) processLogs(index uint64, future *logFuture) { - // Reject logs we've applied already - lastApplied := r.getLastApplied() - if index <= lastApplied { - r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) - return - } - - // Apply all the preceding logs - for idx := r.getLastApplied() + 1; idx <= index; idx++ { - // Get the log, either from the future or from our log store - if future != nil && future.log.Index == idx { - r.processLog(&future.log, future, false) - - } else { - l := new(Log) - if err := r.logs.GetLog(idx, l); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) - panic(err) - } - r.processLog(l, nil, false) - } - - // Update the lastApplied index and term - r.setLastApplied(idx) - } -} - -// processLog is invoked to process the application of a single committed log. -// Returns if this log entry would cause us to stepDown after it commits. -func (r *Raft) processLog(l *Log, future *logFuture, precommit bool) (stepDown bool) { - switch l.Type { - case LogBarrier: - // Barrier is handled by the FSM - fallthrough - - case LogCommand: - // Forward to the fsm handler - select { - case r.fsmCommitCh <- commitTuple{l, future}: - case <-r.shutdownCh: - if future != nil { - future.respond(ErrRaftShutdown) - } - } - - // Return so that the future is only responded to - // by the FSM handler when the application is done - return - - case LogAddPeer: - fallthrough - case LogRemovePeer: - peers := decodePeers(l.Data, r.trans) - r.logger.Printf("[DEBUG] raft: Node %v updated peer set (%v): %v", r.localAddr, l.Type, peers) - - // If the peer set does not include us, remove all other peers - removeSelf := !PeerContained(peers, r.localAddr) && l.Type == LogRemovePeer - if removeSelf { - // Mark that this operation will cause us to step down as - // leader. This prevents the future logs from being Applied - // from this leader. - stepDown = true - - // We only modify the peers after the commit, otherwise we - // would be using a quorum size of 1 for the RemovePeer operation. - // This is used with the stepDown guard to prevent any other logs. - if !precommit { - r.peers = nil - r.peerStore.SetPeers([]string{r.localAddr}) - } - } else { - r.peers = ExcludePeer(peers, r.localAddr) - r.peerStore.SetPeers(peers) - } - - // Handle replication if we are the leader - if r.getState() == Leader { - for _, p := range r.peers { - if _, ok := r.leaderState.replState[p]; !ok { - r.logger.Printf("[INFO] raft: Added peer %v, starting replication", p) - r.startReplication(p) - } - } - } - - // Stop replication for old nodes - if r.getState() == Leader && !precommit { - var toDelete []string - for _, repl := range r.leaderState.replState { - if !PeerContained(r.peers, repl.peer) { - r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication (Index: %d)", repl.peer, l.Index) - - // Replicate up to this index and stop - repl.stopCh <- l.Index - close(repl.stopCh) - toDelete = append(toDelete, repl.peer) - } - } - for _, name := range toDelete { - delete(r.leaderState.replState, name) - } - } - - // Handle removing ourself - if removeSelf && !precommit { - if r.conf.ShutdownOnRemove { - r.logger.Printf("[INFO] raft: Removed ourself, shutting down") - r.Shutdown() - } else { - r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") - r.setState(Follower) - } - } - - case LogNoop: - // Ignore the no-op - default: - r.logger.Printf("[ERR] raft: Got unrecognized log type: %#v", l) - } - - // Invoke the future if given - if future != nil && !precommit { - future.respond(nil) - } - return -} - -// processRPC is called to handle an incoming RPC request. -func (r *Raft) processRPC(rpc RPC) { - switch cmd := rpc.Command.(type) { - case *AppendEntriesRequest: - r.appendEntries(rpc, cmd) - case *RequestVoteRequest: - r.requestVote(rpc, cmd) - case *InstallSnapshotRequest: - r.installSnapshot(rpc, cmd) - default: - r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) - rpc.Respond(nil, fmt.Errorf("unexpected command")) - } -} - -// processHeartbeat is a special handler used just for heartbeat requests -// so that they can be fast-pathed if a transport supports it. -func (r *Raft) processHeartbeat(rpc RPC) { - defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) - - // Check if we are shutdown, just ignore the RPC - select { - case <-r.shutdownCh: - return - default: - } - - // Ensure we are only handling a heartbeat - switch cmd := rpc.Command.(type) { - case *AppendEntriesRequest: - r.appendEntries(rpc, cmd) - default: - r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) - rpc.Respond(nil, fmt.Errorf("unexpected command")) - } -} - -// appendEntries is invoked when we get an append entries RPC call. -func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { - defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) - // Setup a response - resp := &AppendEntriesResponse{ - Term: r.getCurrentTerm(), - LastLog: r.getLastIndex(), - Success: false, - NoRetryBackoff: false, - } - var rpcErr error - defer func() { - rpc.Respond(resp, rpcErr) - }() - - // Ignore an older term - if a.Term < r.getCurrentTerm() { - return - } - - // Increase the term if we see a newer one, also transition to follower - // if we ever get an appendEntries call - if a.Term > r.getCurrentTerm() || r.getState() != Follower { - // Ensure transition to follower - r.setState(Follower) - r.setCurrentTerm(a.Term) - resp.Term = a.Term - } - - // Save the current leader - r.setLeader(r.trans.DecodePeer(a.Leader)) - - // Verify the last log entry - if a.PrevLogEntry > 0 { - lastIdx, lastTerm := r.getLastEntry() - - var prevLogTerm uint64 - if a.PrevLogEntry == lastIdx { - prevLogTerm = lastTerm - - } else { - var prevLog Log - if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { - r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", - a.PrevLogEntry, err, lastIdx) - resp.NoRetryBackoff = true - return - } - prevLogTerm = prevLog.Term - } - - if a.PrevLogTerm != prevLogTerm { - r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", - prevLogTerm, a.PrevLogTerm) - resp.NoRetryBackoff = true - return - } - } - - // Process any new entries - if n := len(a.Entries); n > 0 { - start := time.Now() - first := a.Entries[0] - last := a.Entries[n-1] - - // Delete any conflicting entries - lastLogIdx, _ := r.getLastLog() - if first.Index <= lastLogIdx { - r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", first.Index, lastLogIdx) - if err := r.logs.DeleteRange(first.Index, lastLogIdx); err != nil { - r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) - return - } - } - - // Append the entry - if err := r.logs.StoreLogs(a.Entries); err != nil { - r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) - return - } - - // Update the lastLog - r.setLastLog(last.Index, last.Term) - metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) - } - - // Update the commit index - if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { - start := time.Now() - idx := min(a.LeaderCommitIndex, r.getLastIndex()) - r.setCommitIndex(idx) - r.processLogs(idx, nil) - metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) - } - - // Everything went well, set success - resp.Success = true - r.setLastContact() -} - -// requestVote is invoked when we get an request vote RPC call. -func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { - defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) - r.observe(*req) - - // Setup a response - resp := &RequestVoteResponse{ - Term: r.getCurrentTerm(), - Peers: encodePeers(r.peers, r.trans), - Granted: false, - } - var rpcErr error - defer func() { - rpc.Respond(resp, rpcErr) - }() - - // Check if we have an existing leader [who's not the candidate] - candidate := r.trans.DecodePeer(req.Candidate) - if leader := r.Leader(); leader != "" && leader != candidate { - r.logger.Printf("[WARN] raft: Rejecting vote request from %v since we have a leader: %v", - candidate, leader) - return - } - - // Ignore an older term - if req.Term < r.getCurrentTerm() { - return - } - - // Increase the term if we see a newer one - if req.Term > r.getCurrentTerm() { - // Ensure transition to follower - r.setState(Follower) - r.setCurrentTerm(req.Term) - resp.Term = req.Term - } - - // Check if we have voted yet - lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) - if err != nil && err.Error() != "not found" { - r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) - return - } - lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) - if err != nil && err.Error() != "not found" { - r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) - return - } - - // Check if we've voted in this election before - if lastVoteTerm == req.Term && lastVoteCandBytes != nil { - r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) - if bytes.Equal(lastVoteCandBytes, req.Candidate) { - r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) - resp.Granted = true - } - return - } - - // Reject if their term is older - lastIdx, lastTerm := r.getLastEntry() - if lastTerm > req.LastLogTerm { - r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last term is greater (%d, %d)", - candidate, lastTerm, req.LastLogTerm) - return - } - - if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { - r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last index is greater (%d, %d)", - candidate, lastIdx, req.LastLogIndex) - return - } - - // Persist a vote for safety - if err := r.persistVote(req.Term, req.Candidate); err != nil { - r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) - return - } - - resp.Granted = true - r.setLastContact() -} - -// installSnapshot is invoked when we get a InstallSnapshot RPC call. -// We must be in the follower state for this, since it means we are -// too far behind a leader for log replay. -func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { - defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) - // Setup a response - resp := &InstallSnapshotResponse{ - Term: r.getCurrentTerm(), - Success: false, - } - var rpcErr error - defer func() { - io.Copy(ioutil.Discard, rpc.Reader) // ensure we always consume all the snapshot data from the stream [see issue #212] - rpc.Respond(resp, rpcErr) - }() - - // Ignore an older term - if req.Term < r.getCurrentTerm() { - r.logger.Printf("[INFO] raft: Ignoring installSnapshot request with older term of %d vs currentTerm %d", req.Term, r.getCurrentTerm()) - return - } - - // Increase the term if we see a newer one - if req.Term > r.getCurrentTerm() { - // Ensure transition to follower - r.setState(Follower) - r.setCurrentTerm(req.Term) - resp.Term = req.Term - } - - // Save the current leader - r.setLeader(r.trans.DecodePeer(req.Leader)) - - // Create a new snapshot - sink, err := r.snapshots.Create(req.LastLogIndex, req.LastLogTerm, req.Peers) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) - rpcErr = fmt.Errorf("failed to create snapshot: %v", err) - return - } - - // Spill the remote snapshot to disk - n, err := io.Copy(sink, rpc.Reader) - if err != nil { - sink.Cancel() - r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) - rpcErr = err - return - } - - // Check that we received it all - if n != req.Size { - sink.Cancel() - r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) - rpcErr = fmt.Errorf("short read") - return - } - - // Finalize the snapshot - if err := sink.Close(); err != nil { - r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) - rpcErr = err - return - } - r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) - - // Restore snapshot - future := &restoreFuture{ID: sink.ID()} - future.init() - select { - case r.fsmRestoreCh <- future: - case <-r.shutdownCh: - future.respond(ErrRaftShutdown) - return - } - - // Wait for the restore to happen - if err := future.Error(); err != nil { - r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) - rpcErr = err - return - } - - // Update the lastApplied so we don't replay old logs - r.setLastApplied(req.LastLogIndex) - - // Update the last stable snapshot info - r.setLastSnapshot(req.LastLogIndex, req.LastLogTerm) - - // Restore the peer set - peers := decodePeers(req.Peers, r.trans) - r.peers = ExcludePeer(peers, r.localAddr) - r.peerStore.SetPeers(peers) - - // Compact logs, continue even if this fails - if err := r.compactLogs(req.LastLogIndex); err != nil { - r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) - } - - r.logger.Printf("[INFO] raft: Installed remote snapshot") - resp.Success = true - r.setLastContact() -} - -// setLastContact is used to set the last contact time to now -func (r *Raft) setLastContact() { - r.lastContactLock.Lock() - r.lastContact = time.Now() - r.lastContactLock.Unlock() -} - -type voteResult struct { - RequestVoteResponse - voter string -} - -// electSelf is used to send a RequestVote RPC to all peers, -// and vote for ourself. This has the side affecting of incrementing -// the current term. The response channel returned is used to wait -// for all the responses (including a vote for ourself). -func (r *Raft) electSelf() <-chan *voteResult { - // Create a response channel - respCh := make(chan *voteResult, len(r.peers)+1) - - // Increment the term - r.setCurrentTerm(r.getCurrentTerm() + 1) - - // Construct the request - lastIdx, lastTerm := r.getLastEntry() - req := &RequestVoteRequest{ - Term: r.getCurrentTerm(), - Candidate: r.trans.EncodePeer(r.localAddr), - LastLogIndex: lastIdx, - LastLogTerm: lastTerm, - } - - // Construct a function to ask for a vote - askPeer := func(peer string) { - r.goFunc(func() { - defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) - resp := &voteResult{voter: peer} - err := r.trans.RequestVote(peer, req, &resp.RequestVoteResponse) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) - resp.Term = req.Term - resp.Granted = false - } - - // If we are not a peer, we could have been removed but failed - // to receive the log message. OR it could mean an improperly configured - // cluster. Either way, we should warn - if err == nil { - peerSet := decodePeers(resp.Peers, r.trans) - if !PeerContained(peerSet, r.localAddr) { - r.logger.Printf("[WARN] raft: Remote peer %v does not have local node %v as a peer", - peer, r.localAddr) - } - } - - respCh <- resp - }) - } - - // For each peer, request a vote - for _, peer := range r.peers { - askPeer(peer) - } - - // Persist a vote for ourselves - if err := r.persistVote(req.Term, req.Candidate); err != nil { - r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) - return nil - } - - // Include our own vote - respCh <- &voteResult{ - RequestVoteResponse: RequestVoteResponse{ - Term: req.Term, - Granted: true, - }, - voter: r.localAddr, - } - return respCh -} - -// persistVote is used to persist our vote for safety. -func (r *Raft) persistVote(term uint64, candidate []byte) error { - if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { - return err - } - if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { - return err - } - return nil -} - -// setCurrentTerm is used to set the current term in a durable manner. -func (r *Raft) setCurrentTerm(t uint64) { - // Persist to disk first - if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { - panic(fmt.Errorf("failed to save current term: %v", err)) - } - r.raftState.setCurrentTerm(t) -} - -// setState is used to update the current state. Any state -// transition causes the known leader to be cleared. This means -// that leader should be set only after updating the state. -func (r *Raft) setState(state RaftState) { - r.setLeader("") - oldState := r.raftState.getState() - r.raftState.setState(state) - if oldState != state { - r.observe(state) - } -} - -// runSnapshots is a long running goroutine used to manage taking -// new snapshots of the FSM. It runs in parallel to the FSM and -// main goroutines, so that snapshots do not block normal operation. -func (r *Raft) runSnapshots() { - for { - select { - case <-randomTimeout(r.conf.SnapshotInterval): - // Check if we should snapshot - if !r.shouldSnapshot() { - continue - } - - // Trigger a snapshot - if err := r.takeSnapshot(); err != nil { - r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) - } - - case future := <-r.snapshotCh: - // User-triggered, run immediately - err := r.takeSnapshot() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) - } - future.respond(err) - - case <-r.shutdownCh: - return - } - } -} - -// shouldSnapshot checks if we meet the conditions to take -// a new snapshot. -func (r *Raft) shouldSnapshot() bool { - // Check the last snapshot index - lastSnap, _ := r.getLastSnapshot() - - // Check the last log index - lastIdx, err := r.logs.LastIndex() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) - return false - } - - // Compare the delta to the threshold - delta := lastIdx - lastSnap - return delta >= r.conf.SnapshotThreshold -} - -// takeSnapshot is used to take a new snapshot. -func (r *Raft) takeSnapshot() error { - defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) - // Create a snapshot request - req := &reqSnapshotFuture{} - req.init() - - // Wait for dispatch or shutdown - select { - case r.fsmSnapshotCh <- req: - case <-r.shutdownCh: - return ErrRaftShutdown - } - - // Wait until we get a response - if err := req.Error(); err != nil { - if err != ErrNothingNewToSnapshot { - err = fmt.Errorf("failed to start snapshot: %v", err) - } - return err - } - defer req.snapshot.Release() - - // Log that we are starting the snapshot - r.logger.Printf("[INFO] raft: Starting snapshot up to %d", req.index) - - // Encode the peerset - peerSet := encodePeers(req.peers, r.trans) - - // Create a new snapshot - start := time.Now() - sink, err := r.snapshots.Create(req.index, req.term, peerSet) - if err != nil { - return fmt.Errorf("failed to create snapshot: %v", err) - } - metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) - - // Try to persist the snapshot - start = time.Now() - if err := req.snapshot.Persist(sink); err != nil { - sink.Cancel() - return fmt.Errorf("failed to persist snapshot: %v", err) - } - metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) - - // Close and check for error - if err := sink.Close(); err != nil { - return fmt.Errorf("failed to close snapshot: %v", err) - } - - // Update the last stable snapshot info - r.setLastSnapshot(req.index, req.term) - - // Compact the logs - if err := r.compactLogs(req.index); err != nil { - return err - } - - // Log completion - r.logger.Printf("[INFO] raft: Snapshot to %d complete", req.index) - return nil -} - -// compactLogs takes the last inclusive index of a snapshot -// and trims the logs that are no longer needed. -func (r *Raft) compactLogs(snapIdx uint64) error { - defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) - // Determine log ranges to compact - minLog, err := r.logs.FirstIndex() - if err != nil { - return fmt.Errorf("failed to get first log index: %v", err) - } - - // Check if we have enough logs to truncate - lastLogIdx, _ := r.getLastLog() - if lastLogIdx <= r.conf.TrailingLogs { - return nil - } - - // Truncate up to the end of the snapshot, or `TrailingLogs` - // back from the head, which ever is further back. This ensures - // at least `TrailingLogs` entries, but does not allow logs - // after the snapshot to be removed. - maxLog := min(snapIdx, lastLogIdx-r.conf.TrailingLogs) - - // Log this - r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) - - // Compact the logs - if err := r.logs.DeleteRange(minLog, maxLog); err != nil { - return fmt.Errorf("log compaction failed: %v", err) - } - return nil -} - -// restoreSnapshot attempts to restore the latest snapshots, and fails -// if none of them can be restored. This is called at initialization time, -// and is completely unsafe to call at any other time. -func (r *Raft) restoreSnapshot() error { - snapshots, err := r.snapshots.List() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) - return err - } - - // Try to load in order of newest to oldest - for _, snapshot := range snapshots { - _, source, err := r.snapshots.Open(snapshot.ID) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) - continue - } - defer source.Close() - - if err := r.fsm.Restore(source); err != nil { - r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) - continue - } - - // Log success - r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) - - // Update the lastApplied so we don't replay old logs - r.setLastApplied(snapshot.Index) - - // Update the last stable snapshot info - r.setLastSnapshot(snapshot.Index, snapshot.Term) - - // Success! - return nil - } - - // If we had snapshots and failed to load them, its an error - if len(snapshots) > 0 { - return fmt.Errorf("failed to load any existing snapshots") - } - return nil -} - -// StepDown instructs a leader to voluntarily step down, reentering election cycle. -// Note that the node may yet win elections again immediately following. -func (r *Raft) StepDown() error { - if r.getState() != Leader { - return fmt.Errorf("StepDown() is only applicable to the leader") - } - asyncNotifyCh(r.leaderState.stepDown) - return nil -} - -// Yield instructs the node to not attempt becoming a leader in the -// following duration. -func (r *Raft) Yield() error { - atomic.AddInt64(&r.suspendLeadership, 1) - yieldDuration := r.conf.HeartbeatTimeout * 5 // time enough for the yielded-to peer to become leader - go time.AfterFunc(yieldDuration, func() { - atomic.AddInt64(&r.suspendLeadership, -1) - }) - if r.getState() == Leader { - r.StepDown() - } - return nil -} diff --git a/go/vt/orchestrator/external/raft/replication.go b/go/vt/orchestrator/external/raft/replication.go deleted file mode 100644 index 1f8b923cd80..00000000000 --- a/go/vt/orchestrator/external/raft/replication.go +++ /dev/null @@ -1,522 +0,0 @@ -package raft - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/armon/go-metrics" -) - -const ( - maxFailureScale = 12 - failureWait = 10 * time.Millisecond -) - -var ( - // ErrLogNotFound indicates a given log entry is not available. - ErrLogNotFound = errors.New("log not found") - - // ErrPipelineReplicationNotSupported can be returned by the transport to - // signal that pipeline replication is not supported in general, and that - // no error message should be produced. - ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") -) - -type followerReplication struct { - peer string - inflight *inflight - - stopCh chan uint64 - triggerCh chan struct{} - - currentTerm uint64 - matchIndex uint64 - nextIndex uint64 - - lastContact time.Time - lastContactLock sync.RWMutex - - failures uint64 - - notifyCh chan struct{} - notify []*verifyFuture - notifyLock sync.Mutex - - // stepDown is used to indicate to the leader that we - // should step down based on information from a follower. - stepDown chan struct{} - - // allowPipeline is used to control it seems like - // pipeline replication should be enabled. - allowPipeline bool -} - -// notifyAll is used to notify all the waiting verify futures -// if the follower believes we are still the leader. -func (s *followerReplication) notifyAll(leader bool) { - // Clear the waiting notifies minimizing lock time - s.notifyLock.Lock() - n := s.notify - s.notify = nil - s.notifyLock.Unlock() - - // Submit our votes - for _, v := range n { - v.vote(leader) - } -} - -// LastContact returns the time of last contact. -func (s *followerReplication) LastContact() time.Time { - s.lastContactLock.RLock() - last := s.lastContact - s.lastContactLock.RUnlock() - return last -} - -// setLastContact sets the last contact to the current time. -func (s *followerReplication) setLastContact() { - s.lastContactLock.Lock() - s.lastContact = time.Now() - s.lastContactLock.Unlock() -} - -// replicate is a long running routine that is used to manage -// the process of replicating logs to our followers. -func (r *Raft) replicate(s *followerReplication) { - // Start an async heartbeating routing - stopHeartbeat := make(chan struct{}) - defer close(stopHeartbeat) - r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) - -RPC: - shouldStop := false - for !shouldStop { - select { - case maxIndex := <-s.stopCh: - // Make a best effort to replicate up to this index - if maxIndex > 0 { - r.replicateTo(s, maxIndex) - } - return - case <-s.triggerCh: - lastLogIdx, _ := r.getLastLog() - shouldStop = r.replicateTo(s, lastLogIdx) - case <-randomTimeout(r.conf.CommitTimeout): - lastLogIdx, _ := r.getLastLog() - shouldStop = r.replicateTo(s, lastLogIdx) - } - - // If things looks healthy, switch to pipeline mode - if !shouldStop && s.allowPipeline { - goto PIPELINE - } - } - return - -PIPELINE: - // Disable until re-enabled - s.allowPipeline = false - - // Replicates using a pipeline for high performance. This method - // is not able to gracefully recover from errors, and so we fall back - // to standard mode on failure. - if err := r.pipelineReplicate(s); err != nil { - if err != ErrPipelineReplicationNotSupported { - r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) - } - } - goto RPC -} - -// replicateTo is used to replicate the logs up to a given last index. -// If the follower log is behind, we take care to bring them up to date. -func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { - // Create the base request - var req AppendEntriesRequest - var resp AppendEntriesResponse - var start time.Time -START: - // Prevent an excessive retry rate on errors - if s.failures > 0 { - select { - case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): - case <-r.shutdownCh: - } - } - - // Setup the request - if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { - goto SEND_SNAP - } else if err != nil { - return - } - - // Make the RPC call - start = time.Now() - if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { - r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) - s.failures++ - return - } - appendStats(s.peer, start, float32(len(req.Entries))) - - // Check for a newer term, stop running - if resp.Term > req.Term { - r.handleStaleTerm(s) - return true - } - - // Update the last contact - s.setLastContact() - - // Update s based on success - if resp.Success { - // Update our replication state - updateLastAppended(s, &req) - - // Clear any failures, allow pipelining - s.failures = 0 - s.allowPipeline = true - } else { - s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) - s.matchIndex = s.nextIndex - 1 - if resp.NoRetryBackoff { - s.failures = 0 - } else { - s.failures++ - } - r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) - } - -CHECK_MORE: - // Check if there are more logs to replicate - if s.nextIndex <= lastIndex { - goto START - } - return - - // SEND_SNAP is used when we fail to get a log, usually because the follower - // is too far behind, and we must ship a snapshot down instead -SEND_SNAP: - if stop, err := r.sendLatestSnapshot(s); stop { - return true - } else if err != nil { - r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) - return - } - - // Check if there is more to replicate - goto CHECK_MORE -} - -// sendLatestSnapshot is used to send the latest snapshot we have -// down to our follower. -func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { - // Get the snapshots - snapshots, err := r.snapshots.List() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) - return false, err - } - - // Check we have at least a single snapshot - if len(snapshots) == 0 { - return false, fmt.Errorf("no snapshots found") - } - - // Open the most recent snapshot - snapID := snapshots[0].ID - meta, snapshot, err := r.snapshots.Open(snapID) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) - return false, err - } - defer snapshot.Close() - - // Setup the request - req := InstallSnapshotRequest{ - Term: s.currentTerm, - Leader: r.trans.EncodePeer(r.localAddr), - LastLogIndex: meta.Index, - LastLogTerm: meta.Term, - Peers: meta.Peers, - Size: meta.Size, - } - - // Make the call - start := time.Now() - var resp InstallSnapshotResponse - if err := r.trans.InstallSnapshot(s.peer, &req, &resp, snapshot); err != nil { - r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) - s.failures++ - return false, err - } - metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", s.peer}, start) - - // Check for a newer term, stop running - if resp.Term > req.Term { - r.handleStaleTerm(s) - return true, nil - } - - // Update the last contact - s.setLastContact() - - // Check for success - if resp.Success { - // Mark any inflight logs as committed - s.inflight.CommitRange(s.matchIndex+1, meta.Index) - - // Update the indexes - s.matchIndex = meta.Index - s.nextIndex = s.matchIndex + 1 - - // Clear any failures - s.failures = 0 - - // Notify we are still leader - s.notifyAll(true) - } else { - s.failures++ - r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) - } - return false, nil -} - -// heartbeat is used to periodically invoke AppendEntries on a peer -// to ensure they don't time out. This is done async of replicate(), -// since that routine could potentially be blocked on disk IO. -func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { - var failures uint64 - req := AppendEntriesRequest{ - Term: s.currentTerm, - Leader: r.trans.EncodePeer(r.localAddr), - } - var resp AppendEntriesResponse - for { - // Wait for the next heartbeat interval or forced notify - select { - case <-s.notifyCh: - case <-randomTimeout(r.conf.HeartbeatTimeout / 10): - case <-stopCh: - return - } - - start := time.Now() - if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { - r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer, err) - failures++ - select { - case <-time.After(backoff(failureWait, failures, maxFailureScale)): - case <-stopCh: - } - } else { - s.setLastContact() - failures = 0 - metrics.MeasureSince([]string{"raft", "replication", "heartbeat", s.peer}, start) - s.notifyAll(resp.Success) - } - } -} - -// pipelineReplicate is used when we have synchronized our state with the follower, -// and want to switch to a higher performance pipeline mode of replication. -// We only pipeline AppendEntries commands, and if we ever hit an error, we fall -// back to the standard replication which can handle more complex situations. -func (r *Raft) pipelineReplicate(s *followerReplication) error { - // Create a new pipeline - pipeline, err := r.trans.AppendEntriesPipeline(s.peer) - if err != nil { - return err - } - defer pipeline.Close() - - // Log start and stop of pipeline - r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) - defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) - - // Create a shutdown and finish channel - stopCh := make(chan struct{}) - finishCh := make(chan struct{}) - - // Start a dedicated decoder - r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) - - // Start pipeline sends at the last good nextIndex - nextIndex := s.nextIndex - - shouldStop := false -SEND: - for !shouldStop { - select { - case <-finishCh: - break SEND - case maxIndex := <-s.stopCh: - if maxIndex > 0 { - r.pipelineSend(s, pipeline, &nextIndex, maxIndex) - } - break SEND - case <-s.triggerCh: - lastLogIdx, _ := r.getLastLog() - shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) - case <-randomTimeout(r.conf.CommitTimeout): - lastLogIdx, _ := r.getLastLog() - shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) - } - } - - // Stop our decoder, and wait for it to finish - close(stopCh) - select { - case <-finishCh: - case <-r.shutdownCh: - } - return nil -} - -// pipelineSend is used to send data over a pipeline. -func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { - // Create a new append request - req := new(AppendEntriesRequest) - if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { - return true - } - - // Pipeline the append entries - if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { - r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) - return true - } - - // Increase the next send log to avoid re-sending old logs - if n := len(req.Entries); n > 0 { - last := req.Entries[n-1] - *nextIdx = last.Index + 1 - } - return false -} - -// pipelineDecode is used to decode the responses of pipelined requests. -func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { - defer close(finishCh) - respCh := p.Consumer() - for { - select { - case ready := <-respCh: - req, resp := ready.Request(), ready.Response() - appendStats(s.peer, ready.Start(), float32(len(req.Entries))) - - // Check for a newer term, stop running - if resp.Term > req.Term { - r.handleStaleTerm(s) - return - } - - // Update the last contact - s.setLastContact() - - // Abort pipeline if not successful - if !resp.Success { - return - } - - // Update our replication state - updateLastAppended(s, req) - case <-stopCh: - return - } - } -} - -// setupAppendEntries is used to setup an append entries request. -func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { - req.Term = s.currentTerm - req.Leader = r.trans.EncodePeer(r.localAddr) - req.LeaderCommitIndex = r.getCommitIndex() - if err := r.setPreviousLog(req, nextIndex); err != nil { - return err - } - if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { - return err - } - return nil -} - -// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an -// AppendEntriesRequest given the next index to replicate. -func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { - // Guard for the first index, since there is no 0 log entry - // Guard against the previous index being a snapshot as well - lastSnapIdx, lastSnapTerm := r.getLastSnapshot() - if nextIndex == 1 { - req.PrevLogEntry = 0 - req.PrevLogTerm = 0 - - } else if (nextIndex - 1) == lastSnapIdx { - req.PrevLogEntry = lastSnapIdx - req.PrevLogTerm = lastSnapTerm - - } else { - var l Log - if err := r.logs.GetLog(nextIndex-1, &l); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", - nextIndex-1, err) - return err - } - - // Set the previous index and term (0 if nextIndex is 1) - req.PrevLogEntry = l.Index - req.PrevLogTerm = l.Term - } - return nil -} - -// setNewLogs is used to setup the logs which should be appended for a request. -func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { - // Append up to MaxAppendEntries or up to the lastIndex - req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) - maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) - for i := nextIndex; i <= maxIndex; i++ { - oldLog := new(Log) - if err := r.logs.GetLog(i, oldLog); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) - return err - } - req.Entries = append(req.Entries, oldLog) - } - return nil -} - -// appendStats is used to emit stats about an AppendEntries invocation. -func appendStats(peer string, start time.Time, logs float32) { - metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) - metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) -} - -// handleStaleTerm is used when a follower indicates that we have a stale term. -func (r *Raft) handleStaleTerm(s *followerReplication) { - r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) - s.notifyAll(false) // No longer leader - asyncNotifyCh(s.stepDown) -} - -// updateLastAppended is used to update follower replication state after a successful -// AppendEntries RPC. -func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { - // Mark any inflight logs as committed - if logs := req.Entries; len(logs) > 0 { - first := logs[0] - last := logs[len(logs)-1] - s.inflight.CommitRange(first.Index, last.Index) - - // Update the indexes - s.matchIndex = last.Index - s.nextIndex = last.Index + 1 - } - - // Notify still leader - s.notifyAll(true) -} diff --git a/go/vt/orchestrator/external/raft/snapshot.go b/go/vt/orchestrator/external/raft/snapshot.go deleted file mode 100644 index a4a17f1cc67..00000000000 --- a/go/vt/orchestrator/external/raft/snapshot.go +++ /dev/null @@ -1,40 +0,0 @@ -package raft - -import ( - "io" -) - -// SnapshotMeta is for metadata of a snapshot. -type SnapshotMeta struct { - ID string // ID is opaque to the store, and is used for opening - Index uint64 - Term uint64 - Peers []byte - Size int64 -} - -// SnapshotStore interface is used to allow for flexible implementations -// of snapshot storage and retrieval. For example, a client could implement -// a shared state store such as S3, allowing new nodes to restore snapshots -// without streaming from the leader. -type SnapshotStore interface { - // Create is used to begin a snapshot at a given index and term, - // with the current peer set already encoded. - Create(index, term uint64, peers []byte) (SnapshotSink, error) - - // List is used to list the available snapshots in the store. - // It should return then in descending order, with the highest index first. - List() ([]*SnapshotMeta, error) - - // Open takes a snapshot ID and provides a ReadCloser. Once close is - // called it is assumed the snapshot is no longer needed. - Open(id string) (*SnapshotMeta, io.ReadCloser, error) -} - -// SnapshotSink is returned by StartSnapshot. The FSM will Write state -// to the sink and call Close on completion. On error, Cancel will be invoked. -type SnapshotSink interface { - io.WriteCloser - ID() string - Cancel() error -} diff --git a/go/vt/orchestrator/external/raft/stable.go b/go/vt/orchestrator/external/raft/stable.go deleted file mode 100644 index ff59a8c570a..00000000000 --- a/go/vt/orchestrator/external/raft/stable.go +++ /dev/null @@ -1,15 +0,0 @@ -package raft - -// StableStore is used to provide stable storage -// of key configurations to ensure safety. -type StableStore interface { - Set(key []byte, val []byte) error - - // Get returns the value for key, or an empty byte slice if key was not found. - Get(key []byte) ([]byte, error) - - SetUint64(key []byte, val uint64) error - - // GetUint64 returns the uint64 value for key, or 0 if key was not found. - GetUint64(key []byte) (uint64, error) -} diff --git a/go/vt/orchestrator/external/raft/state.go b/go/vt/orchestrator/external/raft/state.go deleted file mode 100644 index f6d658b8bb4..00000000000 --- a/go/vt/orchestrator/external/raft/state.go +++ /dev/null @@ -1,167 +0,0 @@ -package raft - -import ( - "sync" - "sync/atomic" -) - -// RaftState captures the state of a Raft node: Follower, Candidate, Leader, -// or Shutdown. -type RaftState uint32 - -const ( - // Follower is the initial state of a Raft node. - Follower RaftState = iota - - // Candidate is one of the valid states of a Raft node. - Candidate - - // Leader is one of the valid states of a Raft node. - Leader - - // Shutdown is the terminal state of a Raft node. - Shutdown -) - -func (s RaftState) String() string { - switch s { - case Follower: - return "Follower" - case Candidate: - return "Candidate" - case Leader: - return "Leader" - case Shutdown: - return "Shutdown" - default: - return "Unknown" - } -} - -// raftState is used to maintain various state variables -// and provides an interface to set/get the variables in a -// thread safe manner. -type raftState struct { - // The current term, cache of StableStore - currentTerm uint64 - - // Highest committed log entry - commitIndex uint64 - - // Last applied log to the FSM - lastApplied uint64 - - // protects 4 next fields - lastLock sync.Mutex - - // Cache the latest snapshot index/term - lastSnapshotIndex uint64 - lastSnapshotTerm uint64 - - // Cache the latest log from LogStore - lastLogIndex uint64 - lastLogTerm uint64 - - // Tracks running goroutines - routinesGroup sync.WaitGroup - - // The current state - state RaftState -} - -func (r *raftState) getState() RaftState { - stateAddr := (*uint32)(&r.state) - return RaftState(atomic.LoadUint32(stateAddr)) -} - -func (r *raftState) setState(s RaftState) { - stateAddr := (*uint32)(&r.state) - atomic.StoreUint32(stateAddr, uint32(s)) -} - -func (r *raftState) getCurrentTerm() uint64 { - return atomic.LoadUint64(&r.currentTerm) -} - -func (r *raftState) setCurrentTerm(term uint64) { - atomic.StoreUint64(&r.currentTerm, term) -} - -func (r *raftState) getLastLog() (index, term uint64) { - r.lastLock.Lock() - index = r.lastLogIndex - term = r.lastLogTerm - r.lastLock.Unlock() - return -} - -func (r *raftState) setLastLog(index, term uint64) { - r.lastLock.Lock() - r.lastLogIndex = index - r.lastLogTerm = term - r.lastLock.Unlock() -} - -func (r *raftState) getLastSnapshot() (index, term uint64) { - r.lastLock.Lock() - index = r.lastSnapshotIndex - term = r.lastSnapshotTerm - r.lastLock.Unlock() - return -} - -func (r *raftState) setLastSnapshot(index, term uint64) { - r.lastLock.Lock() - r.lastSnapshotIndex = index - r.lastSnapshotTerm = term - r.lastLock.Unlock() -} - -func (r *raftState) getCommitIndex() uint64 { - return atomic.LoadUint64(&r.commitIndex) -} - -func (r *raftState) setCommitIndex(index uint64) { - atomic.StoreUint64(&r.commitIndex, index) -} - -func (r *raftState) getLastApplied() uint64 { - return atomic.LoadUint64(&r.lastApplied) -} - -func (r *raftState) setLastApplied(index uint64) { - atomic.StoreUint64(&r.lastApplied, index) -} - -// Start a goroutine and properly handle the race between a routine -// starting and incrementing, and exiting and decrementing. -func (r *raftState) goFunc(f func()) { - r.routinesGroup.Add(1) - go func() { - defer r.routinesGroup.Done() - f() - }() -} - -func (r *raftState) waitShutdown() { - r.routinesGroup.Wait() -} - -// getLastIndex returns the last index in stable storage. -// Either from the last log or from the last snapshot. -func (r *raftState) getLastIndex() uint64 { - r.lastLock.Lock() - defer r.lastLock.Unlock() - return max(r.lastLogIndex, r.lastSnapshotIndex) -} - -// getLastEntry returns the last index and term in stable storage. -// Either from the last log or from the last snapshot. -func (r *raftState) getLastEntry() (uint64, uint64) { - r.lastLock.Lock() - defer r.lastLock.Unlock() - if r.lastLogIndex >= r.lastSnapshotIndex { - return r.lastLogIndex, r.lastLogTerm - } - return r.lastSnapshotIndex, r.lastSnapshotTerm -} diff --git a/go/vt/orchestrator/external/raft/tcp_transport.go b/go/vt/orchestrator/external/raft/tcp_transport.go deleted file mode 100644 index 50c6d15df18..00000000000 --- a/go/vt/orchestrator/external/raft/tcp_transport.go +++ /dev/null @@ -1,105 +0,0 @@ -package raft - -import ( - "errors" - "io" - "log" - "net" - "time" -) - -var ( - errNotAdvertisable = errors.New("local bind address is not advertisable") - errNotTCP = errors.New("local address is not a TCP address") -) - -// TCPStreamLayer implements StreamLayer interface for plain TCP. -type TCPStreamLayer struct { - advertise net.Addr - listener *net.TCPListener -} - -// NewTCPTransport returns a NetworkTransport that is built on top of -// a TCP streaming transport layer. -func NewTCPTransport( - bindAddr string, - advertise net.Addr, - maxPool int, - timeout time.Duration, - logOutput io.Writer, -) (*NetworkTransport, error) { - return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport { - return NewNetworkTransport(stream, maxPool, timeout, logOutput) - }) -} - -// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of -// a TCP streaming transport layer, with log output going to the supplied Logger -func NewTCPTransportWithLogger( - bindAddr string, - advertise net.Addr, - maxPool int, - timeout time.Duration, - logger *log.Logger, -) (*NetworkTransport, error) { - return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport { - return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger) - }) -} - -func newTCPTransport(bindAddr string, - advertise net.Addr, - maxPool int, - timeout time.Duration, - transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) { - // Try to bind - list, err := net.Listen("tcp", bindAddr) - if err != nil { - return nil, err - } - - // Create stream - stream := &TCPStreamLayer{ - advertise: advertise, - listener: list.(*net.TCPListener), - } - - // Verify that we have a usable advertise address - addr, ok := stream.Addr().(*net.TCPAddr) - if !ok { - list.Close() - return nil, errNotTCP - } - if addr.IP.IsUnspecified() { - list.Close() - return nil, errNotAdvertisable - } - - // Create the network transport - trans := transportCreator(stream) - return trans, nil -} - -// Dial implements the StreamLayer interface. -func (t *TCPStreamLayer) Dial(address string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("tcp", address, timeout) -} - -// Accept implements the net.Listener interface. -func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { - return t.listener.Accept() -} - -// Close implements the net.Listener interface. -func (t *TCPStreamLayer) Close() (err error) { - return t.listener.Close() -} - -// Addr implements the net.Listener interface. -func (t *TCPStreamLayer) Addr() net.Addr { - // Use an advertise addr if provided - if t.advertise != nil { - return t.advertise - } - return t.listener.Addr() -} diff --git a/go/vt/orchestrator/external/raft/transport.go b/go/vt/orchestrator/external/raft/transport.go deleted file mode 100644 index 2b8b422ff0b..00000000000 --- a/go/vt/orchestrator/external/raft/transport.go +++ /dev/null @@ -1,124 +0,0 @@ -package raft - -import ( - "io" - "time" -) - -// RPCResponse captures both a response and a potential error. -type RPCResponse struct { - Response interface{} - Error error -} - -// RPC has a command, and provides a response mechanism. -type RPC struct { - Command interface{} - Reader io.Reader // Set only for InstallSnapshot - RespChan chan<- RPCResponse -} - -// Respond is used to respond with a response, error or both -func (r *RPC) Respond(resp interface{}, err error) { - r.RespChan <- RPCResponse{resp, err} -} - -// Transport provides an interface for network transports -// to allow Raft to communicate with other nodes. -type Transport interface { - // Consumer returns a channel that can be used to - // consume and respond to RPC requests. - Consumer() <-chan RPC - - // LocalAddr is used to return our local address to distinguish from our peers. - LocalAddr() string - - // AppendEntriesPipeline returns an interface that can be used to pipeline - // AppendEntries requests. - AppendEntriesPipeline(target string) (AppendPipeline, error) - - // AppendEntries sends the appropriate RPC to the target node. - AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error - - // RequestVote sends the appropriate RPC to the target node. - RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error - - // InstallSnapshot is used to push a snapshot down to a follower. The data is read from - // the ReadCloser and streamed to the client. - InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error - - // EncodePeer is used to serialize a peer name. - EncodePeer(string) []byte - - // DecodePeer is used to deserialize a peer name. - DecodePeer([]byte) string - - // SetHeartbeatHandler is used to setup a heartbeat handler - // as a fast-pass. This is to avoid head-of-line blocking from - // disk IO. If a Transport does not support this, it can simply - // ignore the call, and push the heartbeat onto the Consumer channel. - SetHeartbeatHandler(cb func(rpc RPC)) -} - -// WithClose is an interface that a transport may provide which -// allows a transport to be shut down cleanly when a Raft instance -// shuts down. -// -// It is defined separately from Transport as unfortunately it wasn't in the -// original interface specification. -type WithClose interface { - // Close permanently closes a transport, stopping - // any associated goroutines and freeing other resources. - Close() error -} - -// LoopbackTransport is an interface that provides a loopback transport suitable for testing -// e.g. InmemTransport. It's there so we don't have to rewrite tests. -type LoopbackTransport interface { - Transport // Embedded transport reference - WithPeers // Embedded peer management - WithClose // with a close routine -} - -// WithPeers is an interface that a transport may provide which allows for connection and -// disconnection. Unless the transport is a loopback transport, the transport specified to -// "Connect" is likely to be nil. -type WithPeers interface { - Connect(peer string, t Transport) // Connect a peer - Disconnect(peer string) // Disconnect a given peer - DisconnectAll() // Disconnect all peers, possibly to reconnect them later -} - -// AppendPipeline is used for pipelining AppendEntries requests. It is used -// to increase the replication throughput by masking latency and better -// utilizing bandwidth. -type AppendPipeline interface { - // AppendEntries is used to add another request to the pipeline. - // The send may block which is an effective form of back-pressure. - AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) - - // Consumer returns a channel that can be used to consume - // response futures when they are ready. - Consumer() <-chan AppendFuture - - // Close closes the pipeline and cancels all inflight RPCs - Close() error -} - -// AppendFuture is used to return information about a pipelined AppendEntries request. -type AppendFuture interface { - Future - - // Start returns the time that the append request was started. - // It is always OK to call this method. - Start() time.Time - - // Request holds the parameters of the AppendEntries call. - // It is always OK to call this method. - Request() *AppendEntriesRequest - - // Response holds the results of the AppendEntries call. - // This method must only be called after the Error - // method returns, and will only be valid on success. - Response() *AppendEntriesResponse -} diff --git a/go/vt/orchestrator/external/raft/transport_test.go b/go/vt/orchestrator/external/raft/transport_test.go deleted file mode 100644 index 2e375d612a4..00000000000 --- a/go/vt/orchestrator/external/raft/transport_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package raft - -import ( - "bytes" - "reflect" - "testing" - "time" -) - -const ( - TT_Inmem = iota - - // NOTE: must be last - numTestTransports -) - -func NewTestTransport(ttype int, addr string) (string, LoopbackTransport) { - switch ttype { - case TT_Inmem: - addr, lt := NewInmemTransport(addr) - return addr, lt - default: - panic("Unknown transport type") - } -} - -func TestTransport_StartStop(t *testing.T) { - for ttype := 0; ttype < numTestTransports; ttype++ { - _, trans := NewTestTransport(ttype, "") - if err := trans.Close(); err != nil { - t.Fatalf("err: %v", err) - } - } -} - -func TestTransport_AppendEntries(t *testing.T) { - for ttype := 0; ttype < numTestTransports; ttype++ { - addr1, trans1 := NewTestTransport(ttype, "") - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := AppendEntriesRequest{ - Term: 10, - Leader: []byte("cartman"), - PrevLogEntry: 100, - PrevLogTerm: 4, - Entries: []*Log{ - { - Index: 101, - Term: 4, - Type: LogNoop, - }, - }, - LeaderCommitIndex: 90, - } - resp := AppendEntriesResponse{ - Term: 4, - LastLog: 90, - Success: true, - } - - // Listen for a request - go func() { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*AppendEntriesRequest) - if !reflect.DeepEqual(req, &args) { - t.Errorf("command mismatch: %#v %#v", *req, args) - } - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Errorf("timeout") - } - }() - - // Transport 2 makes outbound request - addr2, trans2 := NewTestTransport(ttype, "") - defer trans2.Close() - - trans1.Connect(addr2, trans2) - trans2.Connect(addr1, trans1) - - var out AppendEntriesResponse - if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } - } -} - -func TestTransport_AppendEntriesPipeline(t *testing.T) { - for ttype := 0; ttype < numTestTransports; ttype++ { - addr1, trans1 := NewTestTransport(ttype, "") - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := AppendEntriesRequest{ - Term: 10, - Leader: []byte("cartman"), - PrevLogEntry: 100, - PrevLogTerm: 4, - Entries: []*Log{ - { - Index: 101, - Term: 4, - Type: LogNoop, - }, - }, - LeaderCommitIndex: 90, - } - resp := AppendEntriesResponse{ - Term: 4, - LastLog: 90, - Success: true, - } - - // Listen for a request - go func() { - for i := 0; i < 10; i++ { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*AppendEntriesRequest) - if !reflect.DeepEqual(req, &args) { - t.Errorf("command mismatch: %#v %#v", *req, args) - } - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Errorf("timeout") - } - } - }() - - // Transport 2 makes outbound request - addr2, trans2 := NewTestTransport(ttype, "") - defer trans2.Close() - - trans1.Connect(addr2, trans2) - trans2.Connect(addr1, trans1) - - pipeline, err := trans2.AppendEntriesPipeline(trans1.LocalAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer pipeline.Close() - for i := 0; i < 10; i++ { - out := new(AppendEntriesResponse) - if _, err := pipeline.AppendEntries(&args, out); err != nil { - t.Fatalf("err: %v", err) - } - } - - respCh := pipeline.Consumer() - for i := 0; i < 10; i++ { - select { - case ready := <-respCh: - // Verify the response - if !reflect.DeepEqual(&resp, ready.Response()) { - t.Fatalf("command mismatch: %#v %#v", &resp, ready.Response()) - } - case <-time.After(200 * time.Millisecond): - t.Fatalf("timeout") - } - } - } -} - -func TestTransport_RequestVote(t *testing.T) { - for ttype := 0; ttype < numTestTransports; ttype++ { - addr1, trans1 := NewTestTransport(ttype, "") - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := RequestVoteRequest{ - Term: 20, - Candidate: []byte("butters"), - LastLogIndex: 100, - LastLogTerm: 19, - } - resp := RequestVoteResponse{ - Term: 100, - Peers: []byte("blah"), - Granted: false, - } - - // Listen for a request - go func() { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*RequestVoteRequest) - if !reflect.DeepEqual(req, &args) { - t.Errorf("command mismatch: %#v %#v", *req, args) - } - - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Errorf("timeout") - } - }() - - // Transport 2 makes outbound request - addr2, trans2 := NewTestTransport(ttype, "") - defer trans2.Close() - - trans1.Connect(addr2, trans2) - trans2.Connect(addr1, trans1) - - var out RequestVoteResponse - if err := trans2.RequestVote(trans1.LocalAddr(), &args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } - } -} - -func TestTransport_InstallSnapshot(t *testing.T) { - for ttype := 0; ttype < numTestTransports; ttype++ { - addr1, trans1 := NewTestTransport(ttype, "") - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := InstallSnapshotRequest{ - Term: 10, - Leader: []byte("kyle"), - LastLogIndex: 100, - LastLogTerm: 9, - Peers: []byte("blah blah"), - Size: 10, - } - resp := InstallSnapshotResponse{ - Term: 10, - Success: true, - } - - // Listen for a request - go func() { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*InstallSnapshotRequest) - if !reflect.DeepEqual(req, &args) { - t.Errorf("command mismatch: %#v %#v", *req, args) - } - - // Try to read the bytes - buf := make([]byte, 10) - rpc.Reader.Read(buf) - - // Compare - if !bytes.Equal(buf, []byte("0123456789")) { - t.Errorf("bad buf %v", buf) - } - - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Errorf("timeout") - } - }() - - // Transport 2 makes outbound request - addr2, trans2 := NewTestTransport(ttype, "") - defer trans2.Close() - - trans1.Connect(addr2, trans2) - trans2.Connect(addr1, trans1) - - // Create a buffer - buf := bytes.NewBuffer([]byte("0123456789")) - - var out InstallSnapshotResponse - if err := trans2.InstallSnapshot(trans1.LocalAddr(), &args, &out, buf); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } - } -} - -func TestTransport_EncodeDecode(t *testing.T) { - for ttype := 0; ttype < numTestTransports; ttype++ { - _, trans1 := NewTestTransport(ttype, "") - defer trans1.Close() - - local := trans1.LocalAddr() - enc := trans1.EncodePeer(local) - dec := trans1.DecodePeer(enc) - - if dec != local { - t.Fatalf("enc/dec fail: %v %v", dec, local) - } - } -} diff --git a/go/vt/orchestrator/external/raft/util.go b/go/vt/orchestrator/external/raft/util.go deleted file mode 100644 index 944968a25c2..00000000000 --- a/go/vt/orchestrator/external/raft/util.go +++ /dev/null @@ -1,179 +0,0 @@ -package raft - -import ( - "bytes" - crand "crypto/rand" - "fmt" - "math" - "math/big" - "math/rand" - "time" - - "github.com/hashicorp/go-msgpack/codec" -) - -func init() { - // Ensure we use a high-entropy seed for the psuedo-random generator - rand.Seed(newSeed()) -} - -// returns an int64 from a crypto random source -// can be used to seed a source for a math/rand. -func newSeed() int64 { - r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - return r.Int64() -} - -// randomTimeout returns a value that is between the minVal and 2x minVal. -func randomTimeout(minVal time.Duration) <-chan time.Time { - if minVal == 0 { - return nil - } - extra := (time.Duration(rand.Int63()) % minVal) - return time.After(minVal + extra) -} - -// min returns the minimum. -func min(a, b uint64) uint64 { - if a <= b { - return a - } - return b -} - -// max returns the maximum. -func max(a, b uint64) uint64 { - if a >= b { - return a - } - return b -} - -// generateUUID is used to generate a random UUID. -func generateUUID() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -// asyncNotifyCh is used to do an async channel send -// to a single channel without blocking. -func asyncNotifyCh(ch chan struct{}) { - select { - case ch <- struct{}{}: - default: - } -} - -// asyncNotifyBool is used to do an async notification -// on a bool channel. -func asyncNotifyBool(ch chan bool, v bool) { - select { - case ch <- v: - default: - } -} - -// ExcludePeer is used to exclude a single peer from a list of peers. -func ExcludePeer(peers []string, peer string) []string { - otherPeers := make([]string, 0, len(peers)) - for _, p := range peers { - if p != peer { - otherPeers = append(otherPeers, p) - } - } - return otherPeers -} - -// PeerContained checks if a given peer is contained in a list. -func PeerContained(peers []string, peer string) bool { - for _, p := range peers { - if p == peer { - return true - } - } - return false -} - -// AddUniquePeer is used to add a peer to a list of existing -// peers only if it is not already contained. -func AddUniquePeer(peers []string, peer string) []string { - if PeerContained(peers, peer) { - return peers - } - return append(peers, peer) -} - -// encodePeers is used to serialize a list of peers. -func encodePeers(peers []string, trans Transport) []byte { - // Encode each peer - var encPeers [][]byte - for _, p := range peers { - encPeers = append(encPeers, trans.EncodePeer(p)) - } - - // Encode the entire array - buf, err := encodeMsgPack(encPeers) - if err != nil { - panic(fmt.Errorf("failed to encode peers: %v", err)) - } - - return buf.Bytes() -} - -// decodePeers is used to deserialize a list of peers. -func decodePeers(buf []byte, trans Transport) []string { - // Decode the buffer first - var encPeers [][]byte - if err := decodeMsgPack(buf, &encPeers); err != nil { - panic(fmt.Errorf("failed to decode peers: %v", err)) - } - - // Deserialize each peer - var peers []string - for _, enc := range encPeers { - peers = append(peers, trans.DecodePeer(enc)) - } - - return peers -} - -// Decode reverses the encode operation on a byte slice input. -func decodeMsgPack(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) - hd := codec.MsgpackHandle{} - dec := codec.NewDecoder(r, &hd) - return dec.Decode(out) -} - -// Encode writes an encoded object to a new bytes buffer. -func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { - buf := bytes.NewBuffer(nil) - hd := codec.MsgpackHandle{} - enc := codec.NewEncoder(buf, &hd) - err := enc.Encode(in) - return buf, err -} - -// backoff is used to compute an exponential backoff -// duration. Base time is scaled by the current round, -// up to some maximum scale factor. -func backoff(base time.Duration, round, limit uint64) time.Duration { - power := min(round, limit) - for power > 2 { - base *= 2 - power-- - } - return base -} diff --git a/go/vt/orchestrator/external/raft/util_test.go b/go/vt/orchestrator/external/raft/util_test.go deleted file mode 100644 index be637bc29b6..00000000000 --- a/go/vt/orchestrator/external/raft/util_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package raft - -import ( - "reflect" - "regexp" - "testing" - "time" -) - -func TestRandomTimeout(t *testing.T) { - start := time.Now() - timeout := randomTimeout(time.Millisecond) - - select { - case <-timeout: - diff := time.Since(start) - if diff < time.Millisecond { - t.Fatalf("fired early") - } - case <-time.After(3 * time.Millisecond): - t.Fatalf("timeout") - } -} - -func TestNewSeed(t *testing.T) { - vals := make(map[int64]bool) - for i := 0; i < 1000; i++ { - seed := newSeed() - if _, exists := vals[seed]; exists { - t.Fatal("newSeed() return a value it'd previously returned") - } - vals[seed] = true - } -} - -func TestRandomTimeout_NoTime(t *testing.T) { - timeout := randomTimeout(0) - if timeout != nil { - t.Fatalf("expected nil channel") - } -} - -func TestMin(t *testing.T) { - if min(1, 1) != 1 { - t.Fatalf("bad min") - } - if min(2, 1) != 1 { - t.Fatalf("bad min") - } - if min(1, 2) != 1 { - t.Fatalf("bad min") - } -} - -func TestMax(t *testing.T) { - if max(1, 1) != 1 { - t.Fatalf("bad max") - } - if max(2, 1) != 2 { - t.Fatalf("bad max") - } - if max(1, 2) != 2 { - t.Fatalf("bad max") - } -} - -func TestGenerateUUID(t *testing.T) { - prev := generateUUID() - for i := 0; i < 100; i++ { - id := generateUUID() - if prev == id { - t.Fatalf("Should get a new ID!") - } - - matched, err := regexp.MatchString( //nolint SA6000: calling regexp.MatchString in a loop has poor performance - `[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}`, id) - if !matched || err != nil { - t.Fatalf("expected match %s %v %s", id, matched, err) - } - } -} - -func TestExcludePeer(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - peer := peers[2] - - after := ExcludePeer(peers, peer) - if len(after) != 2 { - t.Fatalf("Bad length") - } - if after[0] == peer || after[1] == peer { - t.Fatalf("should not contain peer") - } -} - -func TestPeerContained(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - - if !PeerContained(peers, peers[2]) { - t.Fatalf("Expect contained") - } - if PeerContained(peers, NewInmemAddr()) { - t.Fatalf("unexpected contained") - } -} - -func TestAddUniquePeer(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - after := AddUniquePeer(peers, peers[2]) - if !reflect.DeepEqual(after, peers) { - t.Fatalf("unexpected append") - } - after = AddUniquePeer(peers, NewInmemAddr()) - if len(after) != 4 { - t.Fatalf("expected append") - } -} - -func TestEncodeDecodePeers(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - _, trans := NewInmemTransport("") - - // Try to encode/decode - buf := encodePeers(peers, trans) - decoded := decodePeers(buf, trans) - - if !reflect.DeepEqual(peers, decoded) { - t.Fatalf("mismatch %v %v", peers, decoded) - } -} - -func TestBackoff(t *testing.T) { - b := backoff(10*time.Millisecond, 1, 8) - if b != 10*time.Millisecond { - t.Fatalf("bad: %v", b) - } - - b = backoff(20*time.Millisecond, 2, 8) - if b != 20*time.Millisecond { - t.Fatalf("bad: %v", b) - } - - b = backoff(10*time.Millisecond, 8, 8) - if b != 640*time.Millisecond { - t.Fatalf("bad: %v", b) - } - - b = backoff(10*time.Millisecond, 9, 8) - if b != 640*time.Millisecond { - t.Fatalf("bad: %v", b) - } -} diff --git a/go/vt/orchestrator/http/agents_api.go b/go/vt/orchestrator/http/agents_api.go index b62ed1fe70b..37682656487 100644 --- a/go/vt/orchestrator/http/agents_api.go +++ b/go/vt/orchestrator/http/agents_api.go @@ -15,119 +15,3 @@ */ package http - -import ( - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/go-martini/martini" - "github.com/martini-contrib/render" - - "vitess.io/vitess/go/vt/orchestrator/agent" - "vitess.io/vitess/go/vt/orchestrator/attributes" -) - -type HttpAgentsAPI struct { - URLPrefix string -} - -var AgentsAPI HttpAgentsAPI = HttpAgentsAPI{} - -// SubmitAgent registeres an agent. It is initiated by an agent to register itself. -func (this *HttpAgentsAPI) SubmitAgent(params martini.Params, r render.Render) { - port, err := strconv.Atoi(params["port"]) - if err != nil { - r.JSON(200, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - output, err := agent.SubmitAgent(params["host"], port, params["token"]) - if err != nil { - r.JSON(200, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - r.JSON(200, output) -} - -// SetHostAttribute is a utility method that allows per-host key-value store. -func (this *HttpAgentsAPI) SetHostAttribute(params martini.Params, r render.Render, req *http.Request) { - err := attributes.SetHostAttributes(params["host"], params["attrVame"], params["attrValue"]) - - if err != nil { - r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(200, (err == nil)) -} - -// GetHostAttributeByAttributeName returns a host attribute -func (this *HttpAgentsAPI) GetHostAttributeByAttributeName(params martini.Params, r render.Render, req *http.Request) { - - output, err := attributes.GetHostAttributesByAttribute(params["attr"], req.URL.Query().Get("valueMatch")) - - if err != nil { - r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(200, output) -} - -// AgentsHosts provides list of agent host names -func (this *HttpAgentsAPI) AgentsHosts(params martini.Params, r render.Render, req *http.Request) string { - agents, err := agent.ReadAgents() - hostnames := []string{} - for _, agent := range agents { - hostnames = append(hostnames, agent.Hostname) - } - - if err != nil { - r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return "" - } - - if req.URL.Query().Get("format") == "txt" { - return strings.Join(hostnames, "\n") - } else { - r.JSON(200, hostnames) - } - return "" -} - -// AgentsInstances provides list of assumed MySQL instances (host:port) -func (this *HttpAgentsAPI) AgentsInstances(params martini.Params, r render.Render, req *http.Request) string { - agents, err := agent.ReadAgents() - hostnames := []string{} - for _, agent := range agents { - hostnames = append(hostnames, fmt.Sprintf("%s:%d", agent.Hostname, agent.MySQLPort)) - } - - if err != nil { - r.JSON(200, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return "" - } - - if req.URL.Query().Get("format") == "txt" { - return strings.Join(hostnames, "\n") - } else { - r.JSON(200, hostnames) - } - return "" -} - -func (this *HttpAgentsAPI) AgentPing(params martini.Params, r render.Render, req *http.Request) { - r.JSON(200, "OK") -} - -// RegisterRequests makes for the de-facto list of known API calls -func (this *HttpAgentsAPI) RegisterRequests(m *martini.ClassicMartini) { - m.Get(this.URLPrefix+"/api/submit-agent/:host/:port/:token", this.SubmitAgent) - m.Get(this.URLPrefix+"/api/host-attribute/:host/:attrVame/:attrValue", this.SetHostAttribute) - m.Get(this.URLPrefix+"/api/host-attribute/attr/:attr/", this.GetHostAttributeByAttributeName) - m.Get(this.URLPrefix+"/api/agents-hosts", this.AgentsHosts) - m.Get(this.URLPrefix+"/api/agents-instances", this.AgentsInstances) - m.Get(this.URLPrefix+"/api/agent-ping", this.AgentPing) -} diff --git a/go/vt/orchestrator/http/api.go b/go/vt/orchestrator/http/api.go index 633157cb084..a7bb28d5529 100644 --- a/go/vt/orchestrator/http/api.go +++ b/go/vt/orchestrator/http/api.go @@ -32,7 +32,6 @@ import ( "vitess.io/vitess/go/vt/orchestrator/external/golib/log" "vitess.io/vitess/go/vt/orchestrator/external/golib/util" - "vitess.io/vitess/go/vt/orchestrator/agent" "vitess.io/vitess/go/vt/orchestrator/collection" "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/discovery" @@ -61,9 +60,6 @@ var apiSynonyms = map[string]string{ "regroup-slaves-bls": "regroup-replicas-bls", "move-slaves-gtid": "move-replicas-gtid", "regroup-slaves-gtid": "regroup-replicas-gtid", - "match-slaves": "match-replicas", - "match-up-slaves": "match-up-replicas", - "regroup-slaves-pgtid": "regroup-replicas-pgtid", "detach-slave": "detach-replica", "reattach-slave": "reattach-replica", "detach-slave-master-host": "detach-replica-master-host", @@ -941,163 +937,8 @@ func (this *HttpAPI) RelocateReplicas(params martini.Params, r render.Render, re Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Relocated %d replicas of %+v below %+v; %d errors: %+v", len(replicas), instanceKey, belowKey, len(errs), errs), Details: replicas}) } -// MoveEquivalent attempts to move an instance below another, baseed on known equivalence master coordinates -func (this *HttpAPI) MoveEquivalent(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - belowKey, err := this.getInstanceKey(params["belowHost"], params["belowPort"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - instance, err := inst.MoveEquivalent(&instanceKey, &belowKey) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance %+v relocated via equivalence coordinates below %+v", instanceKey, belowKey), Details: instance}) -} - -// LastPseudoGTID attempts to find the last pseugo-gtid entry in an instance -func (this *HttpAPI) LastPseudoGTID(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - instance, found, err := inst.ReadInstance(&instanceKey) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - if instance == nil || !found { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Instance not found: %+v", instanceKey)}) - return - } - coordinates, text, err := inst.FindLastPseudoGTIDEntry(instance, instance.RelaylogCoordinates, nil, false, nil) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("%+v", *coordinates), Details: text}) -} - -// MatchBelow attempts to move an instance below another via pseudo GTID matching of binlog entries -func (this *HttpAPI) MatchBelow(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - belowKey, err := this.getInstanceKey(params["belowHost"], params["belowPort"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - instance, matchedCoordinates, err := inst.MatchBelow(&instanceKey, &belowKey, true) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance %+v matched below %+v at %+v", instanceKey, belowKey, *matchedCoordinates), Details: instance}) -} - -// MatchBelow attempts to move an instance below another via pseudo GTID matching of binlog entries -func (this *HttpAPI) MatchUp(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - instance, matchedCoordinates, err := inst.MatchUp(&instanceKey, true) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance %+v matched up at %+v", instanceKey, *matchedCoordinates), Details: instance}) -} - -// MultiMatchReplicas attempts to match all replicas of a given instance below another, efficiently -func (this *HttpAPI) MultiMatchReplicas(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - belowKey, err := this.getInstanceKey(params["belowHost"], params["belowPort"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - replicas, newMaster, err, errs := inst.MultiMatchReplicas(&instanceKey, &belowKey, req.URL.Query().Get("pattern")) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Matched %d replicas of %+v below %+v; %d errors: %+v", len(replicas), instanceKey, newMaster.Key, len(errs), errs), Details: newMaster.Key}) -} - -// MatchUpReplicas attempts to match up all replicas of an instance -func (this *HttpAPI) MatchUpReplicas(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - replicas, newMaster, err, errs := inst.MatchUpReplicas(&instanceKey, req.URL.Query().Get("pattern")) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Matched up %d replicas of %+v below %+v; %d errors: %+v", len(replicas), instanceKey, newMaster.Key, len(errs), errs), Details: newMaster.Key}) -} - // RegroupReplicas attempts to pick a replica of a given instance and make it take its siblings, using any -// method possible (GTID, Pseudo-GTID, binlog servers) +// method possible (GTID, binlog servers) func (this *HttpAPI) RegroupReplicas(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -1120,31 +961,6 @@ func (this *HttpAPI) RegroupReplicas(params martini.Params, r render.Render, req promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas)), Details: promotedReplica.Key}) } -// RegroupReplicas attempts to pick a replica of a given instance and make it take its siblings, efficiently, -// using pseudo-gtid if necessary -func (this *HttpAPI) RegroupReplicasPseudoGTID(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - lostReplicas, equalReplicas, aheadReplicas, cannotReplicateReplicas, promotedReplica, err := inst.RegroupReplicasPseudoGTID(&instanceKey, false, nil, nil, nil) - lostReplicas = append(lostReplicas, cannotReplicateReplicas...) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("promoted replica: %s, lost: %d, trivial: %d, pseudo-gtid: %d", - promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas)), Details: promotedReplica.Key}) -} - // RegroupReplicasGTID attempts to pick a replica of a given instance and make it take its siblings, efficiently, using GTID func (this *HttpAPI) RegroupReplicasGTID(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { @@ -1192,49 +1008,6 @@ func (this *HttpAPI) RegroupReplicasBinlogServers(params martini.Params, r rende promotedBinlogServer.Key.DisplayString()), Details: promotedBinlogServer.Key}) } -// MakeMaster attempts to make the given instance a master, and match its siblings to be its replicas -func (this *HttpAPI) MakeMaster(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - instance, err := inst.MakeMaster(&instanceKey) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance %+v now made master", instanceKey), Details: instance}) -} - -// MakeLocalMaster attempts to make the given instance a local master: take over its master by -// enslaving its siblings and replicating from its grandparent. -func (this *HttpAPI) MakeLocalMaster(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - instance, err := inst.MakeLocalMaster(&instanceKey) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance %+v now made local master", instanceKey), Details: instance}) -} - // SkipQuery skips a single query on a failed replication instance func (this *HttpAPI) SkipQuery(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { @@ -1419,33 +1192,6 @@ func (this *HttpAPI) RestartReplicationStatements(params martini.Params, r rende Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("statements for: %+v", instanceKey), Details: statements}) } -// MasterEquivalent provides (possibly empty) list of master coordinates equivalent to the given ones -func (this *HttpAPI) MasterEquivalent(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - instanceKey, err := this.getInstanceKey(params["host"], params["port"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - coordinates, err := this.getBinlogCoordinates(params["logFile"], params["logPos"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - instanceCoordinates := &inst.InstanceBinlogCoordinates{Key: instanceKey, Coordinates: coordinates} - - equivalentCoordinates, err := inst.GetEquivalentMasterCoordinates(instanceCoordinates) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Found %+v equivalent coordinates", len(equivalentCoordinates)), Details: equivalentCoordinates}) -} - // CanReplicateFrom attempts to move an instance below another via pseudo GTID matching of binlog entries func (this *HttpAPI) CanReplicateFrom(params martini.Params, r render.Render, req *http.Request, user auth.User) { instanceKey, err := this.getInstanceKey(params["host"], params["port"]) @@ -2371,404 +2117,65 @@ func (this *HttpAPI) WriteBufferMetricsAggregated(params martini.Params, r rende r.JSON(http.StatusOK, aggregated) } -// Agents provides complete list of registered agents (See https://github.com/openark/orchestrator-agent) -func (this *HttpAPI) Agents(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) +// Headers is a self-test call which returns HTTP headers +func (this *HttpAPI) Headers(params martini.Params, r render.Render, req *http.Request) { + r.JSON(http.StatusOK, req.Header) +} + +// Health performs a self test +func (this *HttpAPI) Health(params martini.Params, r render.Render, req *http.Request) { + health, err := process.HealthTest() + if err != nil { + Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Application node is unhealthy %+v", err), Details: health}) return } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return + + Respond(r, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health}) + +} + +// LBCheck returns a constant respnse, and this can be used by load balancers that expect a given string. +func (this *HttpAPI) LBCheck(params martini.Params, r render.Render, req *http.Request) { + r.JSON(http.StatusOK, "OK") +} + +// LBCheck returns a constant respnse, and this can be used by load balancers that expect a given string. +func (this *HttpAPI) LeaderCheck(params martini.Params, r render.Render, req *http.Request) { + respondStatus, err := strconv.Atoi(params["errorStatusCode"]) + if err != nil || respondStatus < 0 { + respondStatus = http.StatusNotFound } - agents, err := agent.ReadAgents() + if logic.IsLeader() { + r.JSON(http.StatusOK, "OK") + } else { + r.JSON(respondStatus, "Not leader") + } +} +// A configurable endpoint that can be for regular status checks or whatever. While similar to +// Health() this returns 500 on failure. This will prevent issues for those that have come to +// expect a 200 +// It might be a good idea to deprecate the current Health() behavior and roll this in at some +// point +func (this *HttpAPI) StatusCheck(params martini.Params, r render.Render, req *http.Request) { + health, err := process.HealthTest() if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) + r.JSON(500, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Application node is unhealthy %+v", err), Details: health}) return } - - r.JSON(http.StatusOK, agents) + Respond(r, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health}) } -// Agent returns complete information of a given agent -func (this *HttpAPI) Agent(params martini.Params, r render.Render, req *http.Request, user auth.User) { +// GrabElection forcibly grabs leadership. Use with care!! +func (this *HttpAPI) GrabElection(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) return } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - agent, err := agent.GetAgent(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, agent) -} - -// AgentUnmount instructs an agent to unmount the designated mount point -func (this *HttpAPI) AgentUnmount(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.Unmount(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentMountLV instructs an agent to mount a given volume on the designated mount point -func (this *HttpAPI) AgentMountLV(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.MountLV(params["host"], req.URL.Query().Get("lv")) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentCreateSnapshot instructs an agent to create a new snapshot. Agent's DIY implementation. -func (this *HttpAPI) AgentCreateSnapshot(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.CreateSnapshot(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentRemoveLV instructs an agent to remove a logical volume -func (this *HttpAPI) AgentRemoveLV(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.RemoveLV(params["host"], req.URL.Query().Get("lv")) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentMySQLStop stops MySQL service on agent -func (this *HttpAPI) AgentMySQLStop(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.MySQLStop(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentMySQLStart starts MySQL service on agent -func (this *HttpAPI) AgentMySQLStart(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.MySQLStart(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -func (this *HttpAPI) AgentCustomCommand(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.CustomCommand(params["host"], params["command"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentSeed completely seeds a host with another host's snapshots. This is a complex operation -// governed by orchestrator and executed by the two agents involved. -func (this *HttpAPI) AgentSeed(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.Seed(params["targetHost"], params["sourceHost"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentActiveSeeds lists active seeds and their state -func (this *HttpAPI) AgentActiveSeeds(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.ReadActiveSeedsForHost(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentRecentSeeds lists recent seeds of a given agent -func (this *HttpAPI) AgentRecentSeeds(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.ReadRecentCompletedSeedsForHost(params["host"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentSeedDetails provides details of a given seed -func (this *HttpAPI) AgentSeedDetails(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - seedId, _ := strconv.ParseInt(params["seedId"], 10, 0) - output, err := agent.AgentSeedDetails(seedId) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AgentSeedStates returns the breakdown of states (steps) of a given seed -func (this *HttpAPI) AgentSeedStates(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - seedId, _ := strconv.ParseInt(params["seedId"], 10, 0) - output, err := agent.ReadSeedStates(seedId) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// Seeds retruns all recent seeds -func (this *HttpAPI) Seeds(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - output, err := agent.ReadRecentSeeds() - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, output) -} - -// AbortSeed instructs agents to abort an active seed -func (this *HttpAPI) AbortSeed(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !config.Config.ServeAgentsHttp { - Respond(r, &APIResponse{Code: ERROR, Message: "Agents not served"}) - return - } - - seedId, _ := strconv.ParseInt(params["seedId"], 10, 0) - err := agent.AbortSeed(seedId) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("%+v", err)}) - return - } - - r.JSON(http.StatusOK, err == nil) -} - -// Headers is a self-test call which returns HTTP headers -func (this *HttpAPI) Headers(params martini.Params, r render.Render, req *http.Request) { - r.JSON(http.StatusOK, req.Header) -} - -// Health performs a self test -func (this *HttpAPI) Health(params martini.Params, r render.Render, req *http.Request) { - health, err := process.HealthTest() - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Application node is unhealthy %+v", err), Details: health}) - return - } - - Respond(r, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health}) - -} - -// LBCheck returns a constant respnse, and this can be used by load balancers that expect a given string. -func (this *HttpAPI) LBCheck(params martini.Params, r render.Render, req *http.Request) { - r.JSON(http.StatusOK, "OK") -} - -// LBCheck returns a constant respnse, and this can be used by load balancers that expect a given string. -func (this *HttpAPI) LeaderCheck(params martini.Params, r render.Render, req *http.Request) { - respondStatus, err := strconv.Atoi(params["errorStatusCode"]) - if err != nil || respondStatus < 0 { - respondStatus = http.StatusNotFound - } - - if logic.IsLeader() { - r.JSON(http.StatusOK, "OK") - } else { - r.JSON(respondStatus, "Not leader") - } -} - -// A configurable endpoint that can be for regular status checks or whatever. While similar to -// Health() this returns 500 on failure. This will prevent issues for those that have come to -// expect a 200 -// It might be a good idea to deprecate the current Health() behavior and roll this in at some -// point -func (this *HttpAPI) StatusCheck(params martini.Params, r render.Render, req *http.Request) { - health, err := process.HealthTest() - if err != nil { - r.JSON(500, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Application node is unhealthy %+v", err), Details: health}) - return - } - Respond(r, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health}) -} - -// GrabElection forcibly grabs leadership. Use with care!! -func (this *HttpAPI) GrabElection(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - err := process.GrabElection() - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Unable to grab election: %+v", err)}) + err := process.GrabElection() + if err != nil { + Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Unable to grab election: %+v", err)}) return } @@ -2790,188 +2197,6 @@ func (this *HttpAPI) Reelect(params martini.Params, r render.Render, req *http.R Respond(r, &APIResponse{Code: OK, Message: "Set re-elections"}) } -// RaftAddPeer adds a new node to the raft cluster -func (this *HttpAPI) RaftAddPeer(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-add-peer: not running with raft setup"}) - return - } - addr, err := orcraft.AddPeer(params["addr"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot add raft peer: %+v", err)}) - return - } - - r.JSON(http.StatusOK, addr) -} - -// RaftAddPeer removes a node fro the raft cluster -func (this *HttpAPI) RaftRemovePeer(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-remove-peer: not running with raft setup"}) - return - } - addr, err := orcraft.RemovePeer(params["addr"]) - - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot remove raft peer: %+v", err)}) - return - } - - r.JSON(http.StatusOK, addr) -} - -// RaftYield yields to a specified host -func (this *HttpAPI) RaftYield(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-yield: not running with raft setup"}) - return - } - orcraft.PublishYield(params["node"]) - Respond(r, &APIResponse{Code: OK, Message: "Asynchronously yielded"}) -} - -// RaftYieldHint yields to a host whose name contains given hint (e.g. DC) -func (this *HttpAPI) RaftYieldHint(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !isAuthorizedForAction(req, user) { - Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) - return - } - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-yield-hint: not running with raft setup"}) - return - } - hint := params["hint"] - orcraft.PublishYieldHostnameHint(hint) - Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Asynchronously yielded by hint %s", hint), Details: hint}) -} - -// RaftPeers returns the list of peers in a raft setup -func (this *HttpAPI) RaftPeers(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-nodes: not running with raft setup"}) - return - } - - peers, err := orcraft.GetPeers() - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot get raft peers: %+v", err)}) - return - } - - r.JSON(http.StatusOK, peers) -} - -// RaftState returns the state of this raft node -func (this *HttpAPI) RaftState(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-state: not running with raft setup"}) - return - } - - state := orcraft.GetState().String() - r.JSON(http.StatusOK, state) -} - -// RaftLeader returns the identify of the leader, if possible -func (this *HttpAPI) RaftLeader(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-leader: not running with raft setup"}) - return - } - - leader := orcraft.GetLeader() - r.JSON(http.StatusOK, leader) -} - -// RaftHealth indicates whether this node is part of a healthy raft group -func (this *HttpAPI) RaftHealth(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-state: not running with raft setup"}) - return - } - if !orcraft.IsHealthy() { - Respond(r, &APIResponse{Code: ERROR, Message: "unhealthy"}) - return - } - r.JSON(http.StatusOK, "healthy") -} - -// RaftStatus exports a status summary for a raft node -func (this *HttpAPI) RaftStatus(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-state: not running with raft setup"}) - return - } - peers, err := orcraft.GetPeers() - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot get raft peers: %+v", err)}) - return - } - - status := struct { - RaftBind string - RaftAdvertise string - State string - Healthy bool - IsPartOfQuorum bool - Leader string - LeaderURI string - Peers []string - }{ - RaftBind: orcraft.GetRaftBind(), - RaftAdvertise: orcraft.GetRaftAdvertise(), - State: orcraft.GetState().String(), - Healthy: orcraft.IsHealthy(), - IsPartOfQuorum: orcraft.IsPartOfQuorum(), - Leader: orcraft.GetLeader(), - LeaderURI: orcraft.LeaderURI.Get(), - Peers: peers, - } - r.JSON(http.StatusOK, status) -} - -// RaftFollowerHealthReport is initiated by followers to report their identity and health to the raft leader. -func (this *HttpAPI) RaftFollowerHealthReport(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-state: not running with raft setup"}) - return - } - err := orcraft.OnHealthReport(params["authenticationToken"], params["raftBind"], params["raftAdvertise"]) - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot create snapshot: %+v", err)}) - return - } - r.JSON(http.StatusOK, "health reported") -} - -// RaftSnapshot instructs raft to take a snapshot -func (this *HttpAPI) RaftSnapshot(params martini.Params, r render.Render, req *http.Request, user auth.User) { - if !orcraft.IsRaftEnabled() { - Respond(r, &APIResponse{Code: ERROR, Message: "raft-leader: not running with raft setup"}) - return - } - err := orcraft.Snapshot() - if err != nil { - Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot create snapshot: %+v", err)}) - return - } - r.JSON(http.StatusOK, "snapshot created") -} - // ReloadConfiguration reloads confiug settings (not all of which will apply after change) func (this *HttpAPI) ReloadConfiguration(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { @@ -3581,11 +2806,7 @@ func (this *HttpAPI) registerSingleAPIRequest(m *martini.ClassicMartini, path st registeredPaths = append(registeredPaths, path) fullPath := fmt.Sprintf("%s/api/%s", this.URLPrefix, path) - if allowProxy && config.Config.RaftEnabled { - m.Get(fullPath, raftReverseProxy, handler) - } else { - m.Get(fullPath, handler) - } + m.Get(fullPath, handler) } func (this *HttpAPI) registerAPIRequestInternal(m *martini.ClassicMartini, path string, handler martini.Handler, allowProxy bool) { @@ -3616,13 +2837,11 @@ func (this *HttpAPI) RegisterRequests(m *martini.ClassicMartini) { this.registerAPIRequest(m, "move-up/:host/:port", this.MoveUp) this.registerAPIRequest(m, "move-up-slaves/:host/:port", this.MoveUpReplicas) this.registerAPIRequest(m, "move-below/:host/:port/:siblingHost/:siblingPort", this.MoveBelow) - this.registerAPIRequest(m, "move-equivalent/:host/:port/:belowHost/:belowPort", this.MoveEquivalent) this.registerAPIRequest(m, "repoint/:host/:port/:belowHost/:belowPort", this.Repoint) this.registerAPIRequest(m, "repoint-slaves/:host/:port", this.RepointReplicas) this.registerAPIRequest(m, "make-co-master/:host/:port", this.MakeCoMaster) this.registerAPIRequest(m, "enslave-siblings/:host/:port", this.TakeSiblings) this.registerAPIRequest(m, "enslave-master/:host/:port", this.TakeMaster) - this.registerAPIRequest(m, "master-equivalent/:host/:port/:logFile/:logPos", this.MasterEquivalent) // Binlog server relocation: this.registerAPIRequest(m, "regroup-slaves-bls/:host/:port", this.RegroupReplicasBinlogServers) @@ -3632,17 +2851,6 @@ func (this *HttpAPI) RegisterRequests(m *martini.ClassicMartini) { this.registerAPIRequest(m, "move-slaves-gtid/:host/:port/:belowHost/:belowPort", this.MoveReplicasGTID) this.registerAPIRequest(m, "regroup-slaves-gtid/:host/:port", this.RegroupReplicasGTID) - // Pseudo-GTID relocation: - this.registerAPIRequest(m, "match/:host/:port/:belowHost/:belowPort", this.MatchBelow) - this.registerAPIRequest(m, "match-below/:host/:port/:belowHost/:belowPort", this.MatchBelow) - this.registerAPIRequest(m, "match-up/:host/:port", this.MatchUp) - this.registerAPIRequest(m, "match-slaves/:host/:port/:belowHost/:belowPort", this.MultiMatchReplicas) - this.registerAPIRequest(m, "match-up-slaves/:host/:port", this.MatchUpReplicas) - this.registerAPIRequest(m, "regroup-slaves-pgtid/:host/:port", this.RegroupReplicasPseudoGTID) - // Legacy, need to revisit: - this.registerAPIRequest(m, "make-master/:host/:port", this.MakeMaster) - this.registerAPIRequest(m, "make-local-master/:host/:port", this.MakeLocalMaster) - // Replication, general: this.registerAPIRequest(m, "enable-gtid/:host/:port", this.EnableGTID) this.registerAPIRequest(m, "disable-gtid/:host/:port", this.DisableGTID) @@ -3672,9 +2880,6 @@ func (this *HttpAPI) RegisterRequests(m *martini.ClassicMartini) { this.registerAPIRequest(m, "set-writeable/:host/:port", this.SetWriteable) this.registerAPIRequest(m, "kill-query/:host/:port/:process", this.KillQuery) - // Binary logs: - this.registerAPIRequest(m, "last-pseudo-gtid/:host/:port", this.LastPseudoGTID) - // Pools: this.registerAPIRequest(m, "submit-pool-instances/:pool", this.SubmitPoolInstances) this.registerAPIRequest(m, "cluster-pool-instances/:clusterName", this.ReadClusterPoolInstancesMap) @@ -3814,17 +3019,6 @@ func (this *HttpAPI) RegisterRequests(m *martini.ClassicMartini) { this.registerAPIRequestNoProxy(m, "leader-check", this.LeaderCheck) this.registerAPIRequestNoProxy(m, "leader-check/:errorStatusCode", this.LeaderCheck) this.registerAPIRequestNoProxy(m, "grab-election", this.GrabElection) - this.registerAPIRequest(m, "raft-add-peer/:addr", this.RaftAddPeer) // delegated to the raft leader - this.registerAPIRequest(m, "raft-remove-peer/:addr", this.RaftRemovePeer) // delegated to the raft leader - this.registerAPIRequestNoProxy(m, "raft-yield/:node", this.RaftYield) - this.registerAPIRequestNoProxy(m, "raft-yield-hint/:hint", this.RaftYieldHint) - this.registerAPIRequestNoProxy(m, "raft-peers", this.RaftPeers) - this.registerAPIRequestNoProxy(m, "raft-state", this.RaftState) - this.registerAPIRequestNoProxy(m, "raft-leader", this.RaftLeader) - this.registerAPIRequestNoProxy(m, "raft-health", this.RaftHealth) - this.registerAPIRequestNoProxy(m, "raft-status", this.RaftStatus) - this.registerAPIRequestNoProxy(m, "raft-snapshot", this.RaftSnapshot) - this.registerAPIRequestNoProxy(m, "raft-follower-health-report/:authenticationToken/:raftBind/:raftAdvertise", this.RaftFollowerHealthReport) this.registerAPIRequestNoProxy(m, "reload-configuration", this.ReloadConfiguration) this.registerAPIRequestNoProxy(m, "hostname-resolve-cache", this.HostnameResolveCache) this.registerAPIRequestNoProxy(m, "reset-hostname-resolve-cache", this.ResetHostnameResolveCache) @@ -3849,24 +3043,6 @@ func (this *HttpAPI) RegisterRequests(m *martini.ClassicMartini) { this.registerAPIRequest(m, "write-buffer-metrics-raw/:seconds", this.WriteBufferMetricsRaw) this.registerAPIRequest(m, "write-buffer-metrics-aggregated/:seconds", this.WriteBufferMetricsAggregated) - // Agents - this.registerAPIRequest(m, "agents", this.Agents) - this.registerAPIRequest(m, "agent/:host", this.Agent) - this.registerAPIRequest(m, "agent-umount/:host", this.AgentUnmount) - this.registerAPIRequest(m, "agent-mount/:host", this.AgentMountLV) - this.registerAPIRequest(m, "agent-create-snapshot/:host", this.AgentCreateSnapshot) - this.registerAPIRequest(m, "agent-removelv/:host", this.AgentRemoveLV) - this.registerAPIRequest(m, "agent-mysql-stop/:host", this.AgentMySQLStop) - this.registerAPIRequest(m, "agent-mysql-start/:host", this.AgentMySQLStart) - this.registerAPIRequest(m, "agent-seed/:targetHost/:sourceHost", this.AgentSeed) - this.registerAPIRequest(m, "agent-active-seeds/:host", this.AgentActiveSeeds) - this.registerAPIRequest(m, "agent-recent-seeds/:host", this.AgentRecentSeeds) - this.registerAPIRequest(m, "agent-seed-details/:seedId", this.AgentSeedDetails) - this.registerAPIRequest(m, "agent-seed-states/:seedId", this.AgentSeedStates) - this.registerAPIRequest(m, "agent-abort-seed/:seedId", this.AbortSeed) - this.registerAPIRequest(m, "agent-custom-command/:host/:command", this.AgentCustomCommand) - this.registerAPIRequest(m, "seeds", this.Seeds) - // Configurable status check endpoint if config.Config.StatusEndpoint == config.DefaultStatusAPIEndpoint { this.registerAPIRequestNoProxy(m, "status", this.StatusCheck) diff --git a/go/vt/orchestrator/http/httpbase.go b/go/vt/orchestrator/http/httpbase.go index 7d34278e053..0109d48d469 100644 --- a/go/vt/orchestrator/http/httpbase.go +++ b/go/vt/orchestrator/http/httpbase.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/vt/orchestrator/inst" "vitess.io/vitess/go/vt/orchestrator/os" "vitess.io/vitess/go/vt/orchestrator/process" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" ) func getProxyAuthUser(req *http.Request) string { @@ -44,11 +43,6 @@ func isAuthorizedForAction(req *http.Request, user auth.User) bool { return false } - if orcraft.IsRaftEnabled() && !orcraft.IsLeader() { - // A raft member that is not a leader is unauthorized. - return false - } - switch strings.ToLower(config.Config.AuthenticationMethod) { case "basic": { diff --git a/go/vt/orchestrator/http/raft_reverse_proxy.go b/go/vt/orchestrator/http/raft_reverse_proxy.go deleted file mode 100644 index 8d91a7a22c2..00000000000 --- a/go/vt/orchestrator/http/raft_reverse_proxy.go +++ /dev/null @@ -1,49 +0,0 @@ -package http - -import ( - "net/http" - "net/http/httputil" - "net/url" - "strings" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" - - "github.com/go-martini/martini" - - "vitess.io/vitess/go/vt/orchestrator/config" -) - -func raftReverseProxy(w http.ResponseWriter, r *http.Request, c martini.Context) { - if !orcraft.IsRaftEnabled() { - // No raft, so no reverse proxy to the leader - return - } - if orcraft.IsLeader() { - // I am the leader. I will handle the request directly. - return - } - if orcraft.GetLeader() == "" { - return - } - if orcraft.LeaderURI.IsThisLeaderURI() { - // Although I'm not the leader, the value I see for LeaderURI is my own. - // I'm probably not up-to-date with my raft transaction log and don't have the latest information. - // But anyway, obviously not going to redirect to myself. - // Gonna return: this isn't ideal, because I'm not really the leader. If the user tries to - // run an operation they'll fail. - return - } - url, err := url.Parse(orcraft.LeaderURI.Get()) - if err != nil { - log.Errore(err) - return - } - r.Header.Del("Accept-Encoding") - switch strings.ToLower(config.Config.AuthenticationMethod) { - case "basic", "multi": - r.SetBasicAuth(config.Config.HTTPAuthUser, config.Config.HTTPAuthPassword) - } - proxy := httputil.NewSingleHostReverseProxy(url) - proxy.ServeHTTP(w, r) -} diff --git a/go/vt/orchestrator/http/web.go b/go/vt/orchestrator/http/web.go index 1dbc1396f61..9bfefe1d9af 100644 --- a/go/vt/orchestrator/http/web.go +++ b/go/vt/orchestrator/http/web.go @@ -103,7 +103,6 @@ func (this *HttpWeb) Cluster(params martini.Params, r render.Render, req *http.R "clusterName": clusterName, "autoshow_problems": true, "contextMenuVisible": true, - "pseudoGTIDModeEnabled": (config.Config.PseudoGTIDPattern != ""), "authorizedForAction": isAuthorizedForAction(req, user), "userId": getUserId(req, user), "removeTextFromHostnameDisplay": config.Config.RemoveTextFromHostnameDisplay, @@ -155,7 +154,6 @@ func (this *HttpWeb) ClusterPools(params martini.Params, r render.Render, req *h "clusterName": clusterName, "autoshow_problems": false, // because pool screen by default expands all hosts "contextMenuVisible": true, - "pseudoGTIDModeEnabled": (config.Config.PseudoGTIDPattern != ""), "authorizedForAction": isAuthorizedForAction(req, user), "userId": getUserId(req, user), "removeTextFromHostnameDisplay": config.Config.RemoveTextFromHostnameDisplay, @@ -391,11 +389,7 @@ func (this *HttpWeb) registerWebRequest(m *martini.ClassicMartini, path string, fullPath = fmt.Sprintf("%s/", this.URLPrefix) } - if config.Config.RaftEnabled { - m.Get(fullPath, raftReverseProxy, handler) - } else { - m.Get(fullPath, handler) - } + m.Get(fullPath, handler) } // RegisterRequests makes for the de-facto list of known Web calls diff --git a/go/vt/orchestrator/inst/analysis.go b/go/vt/orchestrator/inst/analysis.go index cfceecdfac6..6debabe9c5a 100644 --- a/go/vt/orchestrator/inst/analysis.go +++ b/go/vt/orchestrator/inst/analysis.go @@ -160,7 +160,6 @@ type ReplicationAnalysis struct { DowntimeEndTimestamp string DowntimeRemainingSeconds int IsBinlogServer bool - PseudoGTIDImmediateTopology bool OracleGTIDImmediateTopology bool MariaDBGTIDImmediateTopology bool BinlogServerImmediateTopology bool diff --git a/go/vt/orchestrator/inst/analysis_dao.go b/go/vt/orchestrator/inst/analysis_dao.go index 66abfa87990..648f078ec50 100644 --- a/go/vt/orchestrator/inst/analysis_dao.go +++ b/go/vt/orchestrator/inst/analysis_dao.go @@ -188,7 +188,6 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) MIN( master_instance.binlog_server ) AS is_binlog_server, - MIN(master_instance.pseudo_gtid) AS is_pseudo_gtid, MIN( master_instance.supports_oracle_gtid ) AS supports_oracle_gtid, @@ -431,7 +430,6 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.MariaDBGTIDImmediateTopology = countValidMariaDBGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 countValidBinlogServerReplicas := m.GetUint("count_valid_binlog_server_replicas") a.BinlogServerImmediateTopology = countValidBinlogServerReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 - a.PseudoGTIDImmediateTopology = m.GetBool("is_pseudo_gtid") a.SemiSyncMasterEnabled = m.GetBool("semi_sync_master_enabled") a.SemiSyncMasterStatus = m.GetBool("semi_sync_master_status") a.SemiSyncReplicaEnabled = m.GetBool("semi_sync_replica_enabled") @@ -694,8 +692,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) if a.IsMaster && a.CountReplicas > 1 && !a.OracleGTIDImmediateTopology && !a.MariaDBGTIDImmediateTopology && - !a.BinlogServerImmediateTopology && - !a.PseudoGTIDImmediateTopology { + !a.BinlogServerImmediateTopology { a.StructureAnalysis = append(a.StructureAnalysis, NoFailoverSupportStructureWarning) } if a.IsMaster && a.CountStatementBasedLoggingReplicas > 0 && a.CountMixedBasedLoggingReplicas > 0 { diff --git a/go/vt/orchestrator/inst/binlog.go b/go/vt/orchestrator/inst/binlog.go index 8e6887b012f..5e901ae68cc 100644 --- a/go/vt/orchestrator/inst/binlog.go +++ b/go/vt/orchestrator/inst/binlog.go @@ -44,21 +44,6 @@ type BinlogCoordinates struct { Type BinlogType } -// rpad formats the binlog coordinates to a given size. If the size -// increases this value is modified so it can be reused later. This -// is to ensure consistent formatting in debug output. -func rpad(coordinates BinlogCoordinates, length *int) string { - s := fmt.Sprintf("%+v", coordinates) - if len(s) > *length { - *length = len(s) - } - - if len(s) >= *length { - return s - } - return fmt.Sprintf("%s%s", s, strings.Repeat(" ", *length-len(s))) -} - // ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306 func ParseBinlogCoordinates(logFileLogPos string) (*BinlogCoordinates, error) { tokens := strings.SplitN(logFileLogPos, ":", 2) diff --git a/go/vt/orchestrator/inst/instance.go b/go/vt/orchestrator/inst/instance.go index 71531c00e30..495f450cb9c 100644 --- a/go/vt/orchestrator/inst/instance.go +++ b/go/vt/orchestrator/inst/instance.go @@ -65,7 +65,7 @@ type Instance struct { SupportsOracleGTID bool UsingOracleGTID bool UsingMariaDBGTID bool - UsingPseudoGTID bool + UsingPseudoGTID bool // Legacy. Always 'false' ReadBinlogCoordinates BinlogCoordinates ExecBinlogCoordinates BinlogCoordinates IsDetached bool @@ -237,11 +237,6 @@ func (this *Instance) IsPercona() bool { return strings.Contains(this.VersionComment, "Percona") } -// isMaxScale checks whether this is any version of MaxScale -func (this *Instance) isMaxScale() bool { - return strings.Contains(this.Version, "maxscale") -} - // isNDB check whether this is NDB Cluster (aka MySQL Cluster) func (this *Instance) IsNDB() bool { return strings.Contains(this.Version, "-ndb-") @@ -262,9 +257,9 @@ func (this *Instance) IsReplicationGroupSecondary() bool { return this.IsReplicationGroupMember() && !this.ReplicationGroupPrimaryInstanceKey.Equals(&this.Key) } -// IsBinlogServer checks whether this is any type of a binlog server (currently only maxscale) +// IsBinlogServer checks whether this is any type of a binlog server func (this *Instance) IsBinlogServer() bool { - return this.isMaxScale() + return false } // IsOracleMySQL checks whether this is an Oracle MySQL distribution @@ -275,9 +270,6 @@ func (this *Instance) IsOracleMySQL() bool { if this.IsPercona() { return false } - if this.isMaxScale() { - return false - } if this.IsBinlogServer() { return false } @@ -302,8 +294,6 @@ func (this *Instance) applyFlavorName() { this.FlavorName = "MariaDB" } else if this.IsPercona() { this.FlavorName = "Percona" - } else if this.isMaxScale() { - this.FlavorName = "MaxScale" } else { this.FlavorName = "unknown" } @@ -522,17 +512,6 @@ func (this *Instance) CanMoveAsCoMaster() (bool, error) { return true, nil } -// CanMoveViaMatch returns true if this instance's state allows it to be repositioned via pseudo-GTID matching -func (this *Instance) CanMoveViaMatch() (bool, error) { - if !this.IsLastCheckValid { - return false, fmt.Errorf("%+v: last check invalid", this.Key) - } - if !this.IsRecentlyChecked { - return false, fmt.Errorf("%+v: not recently checked", this.Key) - } - return true, nil -} - // StatusString returns a human readable description of this instance's status func (this *Instance) StatusString() string { if !this.IsLastCheckValid { @@ -599,9 +578,6 @@ func (this *Instance) descriptionTokens() (tokens []string) { } extraTokens = append(extraTokens, token) } - if this.UsingPseudoGTID { - extraTokens = append(extraTokens, "P-GTID") - } if this.SemiSyncMasterStatus { extraTokens = append(extraTokens, "semi:master") } diff --git a/go/vt/orchestrator/inst/instance_binlog.go b/go/vt/orchestrator/inst/instance_binlog.go index ec39f9ddaf0..d716b387048 100644 --- a/go/vt/orchestrator/inst/instance_binlog.go +++ b/go/vt/orchestrator/inst/instance_binlog.go @@ -19,9 +19,7 @@ package inst import ( "errors" "regexp" - "strings" - "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" ) @@ -38,12 +36,6 @@ var eventInfoTransformations map[*regexp.Regexp]string = map[*regexp.Regexp]stri regexp.MustCompile(`(BEGIN GTID [^ ]+) cid=.*`): "$1", // MariaDB GTID someimtes gets addition of "cid=...". Stripping } -var skippedEventTypes map[string]bool = map[string]bool{ - "Format_desc": true, - "Stop": true, - "Rotate": true, -} - type BinlogEvent struct { Coordinates BinlogCoordinates NextEventPos int64 @@ -148,36 +140,6 @@ func (this *BinlogEventCursor) nextEvent(numEmptyEventsEvents int) (*BinlogEvent } } -// NextRealEvent returns the next event from binlog that is not meta/control event (these are start-of-binary-log, -// rotate-binary-log etc.) -func (this *BinlogEventCursor) nextRealEvent(recursionLevel int) (*BinlogEvent, error) { - if recursionLevel > maxEmptyEventsEvents { - log.Debugf("End of real events") - return nil, nil - } - event, err := this.nextEvent(0) - if err != nil { - return event, err - } - if event == nil { - return event, err - } - - if _, found := skippedEventTypes[event.EventType]; found { - // Recursion will not be deep here. A few entries (end-of-binlog followed by start-of-bin-log) are possible, - // but we really don't expect a huge sequence of those. - return this.nextRealEvent(recursionLevel + 1) - } - for _, skipSubstring := range config.Config.SkipBinlogEventsContaining { - if strings.Contains(event.Info, skipSubstring) { - // Recursion might go deeper here. - return this.nextRealEvent(recursionLevel + 1) - } - } - event.NormalizeInfo() - return event, err -} - // NextCoordinates return the binlog coordinates of the next entry as yet unprocessed by the cursor. // Moreover, when the cursor terminates (consumes last entry), these coordinates indicate what will be the futuristic // coordinates of the next binlog entry. diff --git a/go/vt/orchestrator/inst/instance_binlog_dao.go b/go/vt/orchestrator/inst/instance_binlog_dao.go index 15b8e01b95e..4603b16b408 100644 --- a/go/vt/orchestrator/inst/instance_binlog_dao.go +++ b/go/vt/orchestrator/inst/instance_binlog_dao.go @@ -18,827 +18,12 @@ package inst import ( "fmt" - "regexp" - "strings" - "time" - "github.com/patrickmn/go-cache" - - "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/db" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/external/golib/math" "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" ) -const maxEmptyBinlogFiles int = 10 - -var instanceBinlogEntryCache *cache.Cache - -func init() { - go initializeBinlogDaoPostConfiguration() -} - -func initializeBinlogDaoPostConfiguration() { - config.WaitForConfigurationToBeLoaded() - - instanceBinlogEntryCache = cache.New(time.Duration(10)*time.Minute, time.Minute) -} - -func compilePseudoGTIDPattern() (pseudoGTIDRegexp *regexp.Regexp, err error) { - log.Debugf("PseudoGTIDPatternIsFixedSubstring: %+v", config.Config.PseudoGTIDPatternIsFixedSubstring) - if config.Config.PseudoGTIDPatternIsFixedSubstring { - return nil, nil - } - log.Debugf("Compiling PseudoGTIDPattern: %q", config.Config.PseudoGTIDPattern) - return regexp.Compile(config.Config.PseudoGTIDPattern) -} - -// pseudoGTIDMatches attempts to match given string with pseudo GTID pattern/text. -func pseudoGTIDMatches(pseudoGTIDRegexp *regexp.Regexp, binlogEntryInfo string) (found bool) { - if config.Config.PseudoGTIDPatternIsFixedSubstring { - return strings.Contains(binlogEntryInfo, config.Config.PseudoGTIDPattern) - } - return pseudoGTIDRegexp.MatchString(binlogEntryInfo) -} - -func getInstanceBinlogEntryKey(instance *Instance, entry string) string { - return fmt.Sprintf("%s;%s", instance.Key.DisplayString(), entry) -} - -// Try and find the last position of a pseudo GTID query entry in the given binary log. -// Also return the full text of that entry. -// maxCoordinates is the position beyond which we should not read. This is relevant when reading relay logs; in particular, -// the last relay log. We must be careful not to scan for Pseudo-GTID entries past the position executed by the SQL thread. -// maxCoordinates == nil means no limit. -func getLastPseudoGTIDEntryInBinlog(pseudoGTIDRegexp *regexp.Regexp, instanceKey *InstanceKey, binlog string, binlogType BinlogType, minCoordinates *BinlogCoordinates, maxCoordinates *BinlogCoordinates) (*BinlogCoordinates, string, error) { - if binlog == "" { - return nil, "", log.Errorf("getLastPseudoGTIDEntryInBinlog: empty binlog file name for %+v. maxCoordinates = %+v", *instanceKey, maxCoordinates) - } - binlogCoordinates := BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: binlogType} - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return nil, "", err - } - - moreRowsExpected := true - var nextPos int64 = 0 - var relyLogMinPos int64 = 0 - if minCoordinates != nil && minCoordinates.LogFile == binlog { - log.Debugf("getLastPseudoGTIDEntryInBinlog: starting with %+v", *minCoordinates) - nextPos = minCoordinates.LogPos - relyLogMinPos = minCoordinates.LogPos - } - step := 0 - - entryText := "" - for moreRowsExpected { - query := "" - if binlogCoordinates.Type == BinaryLog { - query = fmt.Sprintf("show binlog events in '%s' FROM %d LIMIT %d", binlog, nextPos, config.Config.BinlogEventsChunkSize) - } else { - query = fmt.Sprintf("show relaylog events in '%s' FROM %d LIMIT %d,%d", binlog, relyLogMinPos, (step * config.Config.BinlogEventsChunkSize), config.Config.BinlogEventsChunkSize) - } - - moreRowsExpected = false - - err = sqlutils.QueryRowsMapBuffered(db, query, func(m sqlutils.RowMap) error { - moreRowsExpected = true - nextPos = m.GetInt64("End_log_pos") - binlogEntryInfo := m.GetString("Info") - if pseudoGTIDMatches(pseudoGTIDRegexp, binlogEntryInfo) { - if maxCoordinates != nil && maxCoordinates.SmallerThan(&BinlogCoordinates{LogFile: binlog, LogPos: m.GetInt64("Pos")}) { - // past the limitation - moreRowsExpected = false - return nil - } - binlogCoordinates.LogPos = m.GetInt64("Pos") - entryText = binlogEntryInfo - // Found a match. But we keep searching: we're interested in the LAST entry, and, alas, - // we can only search in ASCENDING order... - } - return nil - }) - if err != nil { - return nil, "", err - } - step++ - } - - // Not found? return nil. an error is reserved to SQL problems. - if binlogCoordinates.LogPos == 0 { - return nil, "", nil - } - return &binlogCoordinates, entryText, err -} - -// getLastPseudoGTIDEntryInInstance will search for the last pseudo GTID entry in an instance's binary logs. Arguments: -// - instance -// - minBinlogCoordinates: a hint, suggested coordinates to start with. The search will _attempt_ to begin search from -// these coordinates, but if search is empty, then we failback to full search, ignoring this hint -// - maxBinlogCoordinates: a hard limit on the maximum position we're allowed to investigate. -// - exhaustiveSearch: when 'true', continue iterating binary logs. When 'false', only investigate most recent binary log. -func getLastPseudoGTIDEntryInInstance(instance *Instance, minBinlogCoordinates *BinlogCoordinates, maxBinlogCoordinates *BinlogCoordinates, exhaustiveSearch bool) (*BinlogCoordinates, string, error) { - pseudoGTIDRegexp, err := compilePseudoGTIDPattern() - if err != nil { - return nil, "", err - } - // Look for last GTID in instance: - currentBinlog := instance.SelfBinlogCoordinates - - err = nil - for err == nil { - log.Debugf("Searching for latest pseudo gtid entry in binlog %+v of %+v", currentBinlog.LogFile, instance.Key) - resultCoordinates, entryInfo, err := getLastPseudoGTIDEntryInBinlog(pseudoGTIDRegexp, &instance.Key, currentBinlog.LogFile, BinaryLog, minBinlogCoordinates, maxBinlogCoordinates) - if err != nil { - return nil, "", err - } - if resultCoordinates != nil { - log.Debugf("Found pseudo gtid entry in %+v, %+v", instance.Key, resultCoordinates) - return resultCoordinates, entryInfo, err - } - if !exhaustiveSearch { - log.Debugf("Not an exhaustive search. Bailing out") - break - } - if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == currentBinlog.LogFile { - // We tried and failed with the minBinlogCoordinates heuristic/hint. We no longer require it, - // and continue with exhaustive search, on same binlog. - minBinlogCoordinates = nil - log.Debugf("Heuristic binlog search failed; continuing exhaustive search") - // And we do NOT iterate the log file: we scan same log file again, with no heuristic - //return nil, "", log.Errorf("past minBinlogCoordinates (%+v); skipping iteration over rest of binary logs", *minBinlogCoordinates) - } else { - currentBinlog, err = currentBinlog.PreviousFileCoordinates() - if err != nil { - return nil, "", err - } - } - } - return nil, "", log.Errorf("Cannot find pseudo GTID entry in binlogs of %+v", instance.Key) -} - -func getLastPseudoGTIDEntryInRelayLogs(instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates, exhaustiveSearch bool) (*BinlogCoordinates, string, error) { - // Look for last GTID in relay logs: - // Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current - // relay log (indiciated by Relay_log_file) and walk backwards. - // Eventually we will hit a relay log name which does not exist. - pseudoGTIDRegexp, err := compilePseudoGTIDPattern() - if err != nil { - return nil, "", err - } - - currentRelayLog := recordedInstanceRelayLogCoordinates - err = nil - for err == nil { - log.Debugf("Searching for latest pseudo gtid entry in relaylog %+v of %+v, up to pos %+v", currentRelayLog.LogFile, instance.Key, recordedInstanceRelayLogCoordinates) - if resultCoordinates, entryInfo, err := getLastPseudoGTIDEntryInBinlog(pseudoGTIDRegexp, &instance.Key, currentRelayLog.LogFile, RelayLog, minBinlogCoordinates, &recordedInstanceRelayLogCoordinates); err != nil { - return nil, "", err - } else if resultCoordinates != nil { - log.Debugf("Found pseudo gtid entry in %+v, %+v", instance.Key, resultCoordinates) - return resultCoordinates, entryInfo, err - } - if !exhaustiveSearch { - break - } - if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == currentRelayLog.LogFile { - // We tried and failed with the minBinlogCoordinates hint. We no longer require it, - // and continue with exhaustive search. - minBinlogCoordinates = nil - log.Debugf("Heuristic relaylog search failed; continuing exhaustive search") - // And we do NOT iterate to previous log file: we scan same log file again, with no heuristic - } else { - currentRelayLog, err = currentRelayLog.PreviousFileCoordinates() - } - } - return nil, "", log.Errorf("Cannot find pseudo GTID entry in relay logs of %+v", instance.Key) -} - -func readBinlogEvent(binlogEvent *BinlogEvent, m sqlutils.RowMap) error { - binlogEvent.NextEventPos = m.GetInt64("End_log_pos") - binlogEvent.Coordinates.LogPos = m.GetInt64("Pos") - binlogEvent.EventType = m.GetString("Event_type") - binlogEvent.Info = m.GetString("Info") - return nil -} - -func ReadBinlogEventAtRelayLogCoordinates(instanceKey *InstanceKey, relaylogCoordinates *BinlogCoordinates) (binlogEvent *BinlogEvent, err error) { - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return nil, err - } - - query := fmt.Sprintf("show relaylog events in '%s' FROM %d LIMIT 1", relaylogCoordinates.LogFile, relaylogCoordinates.LogPos) - binlogEvent = &BinlogEvent{ - Coordinates: *relaylogCoordinates, - } - err = sqlutils.QueryRowsMapBuffered(db, query, func(m sqlutils.RowMap) error { - return readBinlogEvent(binlogEvent, m) - }) - return binlogEvent, err -} - -// Try and find the last position of a pseudo GTID query entry in the given binary log. -// Also return the full text of that entry. -// maxCoordinates is the position beyond which we should not read. This is relevant when reading relay logs; in particular, -// the last relay log. We must be careful not to scan for Pseudo-GTID entries past the position executed by the SQL thread. -// maxCoordinates == nil means no limit. -func getLastExecutedEntryInRelaylog(instanceKey *InstanceKey, binlog string, minCoordinates *BinlogCoordinates, maxCoordinates *BinlogCoordinates) (binlogEvent *BinlogEvent, err error) { - if binlog == "" { - return nil, log.Errorf("getLastExecutedEntryInRelaylog: empty binlog file name for %+v. maxCoordinates = %+v", *instanceKey, maxCoordinates) - } - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return nil, err - } - binlogEvent = &BinlogEvent{ - Coordinates: BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: RelayLog}, - } - - moreRowsExpected := true - var relyLogMinPos int64 = 0 - if minCoordinates != nil && minCoordinates.LogFile == binlog { - log.Debugf("getLastExecutedEntryInRelaylog: starting with %+v", *minCoordinates) - relyLogMinPos = minCoordinates.LogPos - } - - step := 0 - for moreRowsExpected { - query := fmt.Sprintf("show relaylog events in '%s' FROM %d LIMIT %d,%d", binlog, relyLogMinPos, (step * config.Config.BinlogEventsChunkSize), config.Config.BinlogEventsChunkSize) - - moreRowsExpected = false - err = sqlutils.QueryRowsMapBuffered(db, query, func(m sqlutils.RowMap) error { - moreRowsExpected = true - return readBinlogEvent(binlogEvent, m) - }) - if err != nil { - return nil, err - } - step++ - } - - // Not found? return nil. an error is reserved to SQL problems. - if binlogEvent.Coordinates.LogPos == 0 { - return nil, nil - } - return binlogEvent, err -} - -func GetLastExecutedEntryInRelayLogs(instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates) (binlogEvent *BinlogEvent, err error) { - // Look for last GTID in relay logs: - // Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current - // relay log (indiciated by Relay_log_file) and walk backwards. - - currentRelayLog := recordedInstanceRelayLogCoordinates - for err == nil { - log.Debugf("Searching for latest entry in relaylog %+v of %+v, up to pos %+v", currentRelayLog.LogFile, instance.Key, recordedInstanceRelayLogCoordinates) - if binlogEvent, err = getLastExecutedEntryInRelaylog(&instance.Key, currentRelayLog.LogFile, minBinlogCoordinates, &recordedInstanceRelayLogCoordinates); err != nil { - return nil, err - } else if binlogEvent != nil { - log.Debugf("Found entry in %+v, %+v", instance.Key, binlogEvent.Coordinates) - return binlogEvent, err - } - if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == currentRelayLog.LogFile { - // We tried and failed with the minBinlogCoordinates hint. We no longer require it, - // and continue with exhaustive search. - minBinlogCoordinates = nil - log.Debugf("Heuristic relaylog search failed; continuing exhaustive search") - // And we do NOT iterate to previous log file: we scan same log faile again, with no heuristic - } else { - currentRelayLog, err = currentRelayLog.PreviousFileCoordinates() - } - } - return binlogEvent, err -} - -// SearchBinlogEntryInRelaylog -func searchEventInRelaylog(instanceKey *InstanceKey, binlog string, searchEvent *BinlogEvent, minCoordinates *BinlogCoordinates) (binlogCoordinates, nextCoordinates *BinlogCoordinates, found bool, err error) { - binlogCoordinates = &BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: RelayLog} - nextCoordinates = &BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: RelayLog} - if binlog == "" { - return binlogCoordinates, nextCoordinates, false, log.Errorf("SearchEventInRelaylog: empty relaylog file name for %+v", *instanceKey) - } - - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return binlogCoordinates, nextCoordinates, false, err - } - - moreRowsExpected := true - var relyLogMinPos int64 = 0 - if minCoordinates != nil && minCoordinates.LogFile == binlog { - log.Debugf("SearchEventInRelaylog: starting with %+v", *minCoordinates) - relyLogMinPos = minCoordinates.LogPos - } - binlogEvent := &BinlogEvent{ - Coordinates: BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: RelayLog}, - } - - skipRestOfBinlog := false - - step := 0 - for moreRowsExpected { - query := fmt.Sprintf("show relaylog events in '%s' FROM %d LIMIT %d,%d", binlog, relyLogMinPos, (step * config.Config.BinlogEventsChunkSize), config.Config.BinlogEventsChunkSize) - - // We don't know in advance when we will hit the end of the binlog. We will implicitly understand it when our - // `show binlog events` query does not return any row. - moreRowsExpected = false - err = sqlutils.QueryRowsMapBuffered(db, query, func(m sqlutils.RowMap) error { - if binlogCoordinates.LogPos != 0 && nextCoordinates.LogPos != 0 { - // Entry found! - skipRestOfBinlog = true - return nil - } - if skipRestOfBinlog { - return nil - } - moreRowsExpected = true - - if binlogCoordinates.LogPos == 0 { - readBinlogEvent(binlogEvent, m) - if binlogEvent.EqualsIgnoreCoordinates(searchEvent) { - // found it! - binlogCoordinates.LogPos = m.GetInt64("Pos") - } - } else if nextCoordinates.LogPos == 0 { - // found binlogCoordinates: the next coordinates are nextCoordinates :P - nextCoordinates.LogPos = m.GetInt64("Pos") - } - return nil - }) - if err != nil { - return binlogCoordinates, nextCoordinates, (binlogCoordinates.LogPos != 0), err - } - if skipRestOfBinlog { - return binlogCoordinates, nextCoordinates, (binlogCoordinates.LogPos != 0), err - } - step++ - } - return binlogCoordinates, nextCoordinates, (binlogCoordinates.LogPos != 0), err -} - -func SearchEventInRelayLogs(searchEvent *BinlogEvent, instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates) (binlogCoordinates, nextCoordinates *BinlogCoordinates, found bool, err error) { - // Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current - // relay log (indiciated by Relay_log_file) and walk backwards. - log.Debugf("will search for event %+v", *searchEvent) - if minBinlogCoordinates != nil { - log.Debugf("Starting with coordinates: %+v", *minBinlogCoordinates) - } - currentRelayLog := recordedInstanceRelayLogCoordinates - for err == nil { - log.Debugf("Searching for event in relaylog %+v of %+v, up to pos %+v", currentRelayLog.LogFile, instance.Key, recordedInstanceRelayLogCoordinates) - if binlogCoordinates, nextCoordinates, found, err = searchEventInRelaylog(&instance.Key, currentRelayLog.LogFile, searchEvent, minBinlogCoordinates); err != nil { - return nil, nil, false, err - } else if binlogCoordinates != nil && found { - log.Debugf("Found event in %+v, %+v", instance.Key, *binlogCoordinates) - return binlogCoordinates, nextCoordinates, found, err - } - if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == currentRelayLog.LogFile { - // We tried and failed with the minBinlogCoordinates hint. We no longer require it, - // and continue with exhaustive search. - minBinlogCoordinates = nil - log.Debugf("Heuristic relaylog search failed; continuing exhaustive search") - // And we do NOT iterate to previous log file: we scan same log faile again, with no heuristic - } else { - currentRelayLog, err = currentRelayLog.PreviousFileCoordinates() - } - } - return binlogCoordinates, nextCoordinates, found, err -} - -// SearchEntryInBinlog Given a binlog entry text (query), search it in the given binary log of a given instance -func SearchEntryInBinlog(pseudoGTIDRegexp *regexp.Regexp, instanceKey *InstanceKey, binlog string, entryText string, monotonicPseudoGTIDEntries bool, minBinlogCoordinates *BinlogCoordinates) (BinlogCoordinates, bool, error) { - binlogCoordinates := BinlogCoordinates{LogFile: binlog, LogPos: 0, Type: BinaryLog} - if binlog == "" { - return binlogCoordinates, false, log.Errorf("SearchEntryInBinlog: empty binlog file name for %+v", *instanceKey) - } - - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return binlogCoordinates, false, err - } - - moreRowsExpected := true - skipRestOfBinlog := false - alreadyMatchedAscendingPseudoGTID := false - var nextPos int64 = 0 - if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == binlog { - log.Debugf("SearchEntryInBinlog: starting with %+v", *minBinlogCoordinates) - nextPos = minBinlogCoordinates.LogPos - } - - for moreRowsExpected { - query := fmt.Sprintf("show binlog events in '%s' FROM %d LIMIT %d", binlog, nextPos, config.Config.BinlogEventsChunkSize) - - // We don't know in advance when we will hit the end of the binlog. We will implicitly understand it when our - // `show binlog events` query does not return any row. - moreRowsExpected = false - - err = sqlutils.QueryRowsMapBuffered(db, query, func(m sqlutils.RowMap) error { - if binlogCoordinates.LogPos != 0 { - // Entry found! - skipRestOfBinlog = true - return nil - } - if skipRestOfBinlog { - return nil - } - moreRowsExpected = true - nextPos = m.GetInt64("End_log_pos") - binlogEntryInfo := m.GetString("Info") - // - if binlogEntryInfo == entryText { - // found it! - binlogCoordinates.LogPos = m.GetInt64("Pos") - } else if monotonicPseudoGTIDEntries && !alreadyMatchedAscendingPseudoGTID { - // This part assumes we're searching for Pseudo-GTID.Typically that is the case, however this function can - // also be used for generic searches through the binary log. - // More heavyweight computation here. Need to verify whether the binlog entry we have is a pseudo-gtid entry - // We only want to check for ASCENDING once in the top of the binary log. - // If we find the first entry to be higher than the searched one, clearly we are done. - // If not, then by virtue of binary logs, we still have to full-scan the entrie binlog sequentially; we - // do not check again for ASCENDING (no point), so we save up CPU energy wasted in regexp. - if pseudoGTIDMatches(pseudoGTIDRegexp, binlogEntryInfo) { - alreadyMatchedAscendingPseudoGTID = true - log.Debugf("Matched ascending Pseudo-GTID entry in %+v", binlog) - if binlogEntryInfo > entryText { - // Entries ascending, and current entry is larger than the one we are searching for. - // There is no need to scan further on. We can skip the entire binlog - log.Debugf(`Pseudo GTID entries are monotonic and we hit "%+v" > "%+v"; skipping binlog %+v`, m.GetString("Info"), entryText, binlogCoordinates.LogFile) - skipRestOfBinlog = true - return nil - } - } - } - return nil - }) - if err != nil { - return binlogCoordinates, (binlogCoordinates.LogPos != 0), err - } - if skipRestOfBinlog { - return binlogCoordinates, (binlogCoordinates.LogPos != 0), err - } - } - - return binlogCoordinates, (binlogCoordinates.LogPos != 0), err -} - -// SearchEntryInInstanceBinlogs will search for a specific text entry within the binary logs of a given instance. -func SearchEntryInInstanceBinlogs(instance *Instance, entryText string, monotonicPseudoGTIDEntries bool, minBinlogCoordinates *BinlogCoordinates) (*BinlogCoordinates, error) { - pseudoGTIDRegexp, err := compilePseudoGTIDPattern() - if err != nil { - return nil, err - } - cacheKey := getInstanceBinlogEntryKey(instance, entryText) - coords, found := instanceBinlogEntryCache.Get(cacheKey) - if found { - // This is wonderful. We can skip the tedious GTID search in the binary log - log.Debugf("Found instance Pseudo GTID entry coordinates in cache: %+v, %+v, %+v", instance.Key, entryText, coords) - return coords.(*BinlogCoordinates), nil - } - - // Look for GTID entry in given instance: - log.Debugf("Searching for given pseudo gtid entry in %+v. monotonicPseudoGTIDEntries=%+v", instance.Key, monotonicPseudoGTIDEntries) - currentBinlog := instance.SelfBinlogCoordinates - err = nil - for { - log.Debugf("Searching for given pseudo gtid entry in binlog %+v of %+v", currentBinlog.LogFile, instance.Key) - // loop iteration per binary log. This might turn to be a heavyweight operation. We wish to throttle the operation such that - // the instance does not suffer. If it is a replica, we will only act as long as it's not lagging too much. - if instance.ReplicaRunning() { - for { - log.Debugf("%+v is a replicating replica. Verifying lag", instance.Key) - instance, err = ReadTopologyInstance(&instance.Key) - if err != nil { - break - } - if instance.HasReasonableMaintenanceReplicationLag() { - // is good to go! - break - } - log.Debugf("lag is too high on %+v. Throttling the search for pseudo gtid entry", instance.Key) - time.Sleep(time.Duration(config.Config.ReasonableMaintenanceReplicationLagSeconds) * time.Second) - } - } - var resultCoordinates BinlogCoordinates - var found bool - resultCoordinates, found, err = SearchEntryInBinlog(pseudoGTIDRegexp, &instance.Key, currentBinlog.LogFile, entryText, monotonicPseudoGTIDEntries, minBinlogCoordinates) - if err != nil { - break - } - if found { - log.Debugf("Matched entry in %+v: %+v", instance.Key, resultCoordinates) - instanceBinlogEntryCache.Set(cacheKey, &resultCoordinates, 0) - return &resultCoordinates, nil - } - // Got here? Unfound. Keep looking - if minBinlogCoordinates != nil && minBinlogCoordinates.LogFile == currentBinlog.LogFile { - log.Debugf("Heuristic master binary logs search failed; continuing exhaustive search") - minBinlogCoordinates = nil - } else { - currentBinlog, err = currentBinlog.PreviousFileCoordinates() - if err != nil { - break - } - log.Debugf("- Will move next to binlog %+v", currentBinlog.LogFile) - } - } - - return nil, log.Errorf("Cannot match pseudo GTID entry in binlogs of %+v; err: %+v", instance.Key, err) -} - -// Read (as much as possible of) a chunk of binary log events starting the given startingCoordinates -func readBinlogEventsChunk(instanceKey *InstanceKey, startingCoordinates BinlogCoordinates) ([]BinlogEvent, error) { - events := []BinlogEvent{} - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return events, err - } - commandToken := math.TernaryString(startingCoordinates.Type == BinaryLog, "binlog", "relaylog") - if startingCoordinates.LogFile == "" { - return events, log.Errorf("readBinlogEventsChunk: empty binlog file name for %+v.", *instanceKey) - } - query := fmt.Sprintf("show %s events in '%s' FROM %d LIMIT %d", commandToken, startingCoordinates.LogFile, startingCoordinates.LogPos, config.Config.BinlogEventsChunkSize) - err = sqlutils.QueryRowsMap(db, query, func(m sqlutils.RowMap) error { - binlogEvent := BinlogEvent{} - binlogEvent.Coordinates.LogFile = m.GetString("Log_name") - binlogEvent.Coordinates.LogPos = m.GetInt64("Pos") - binlogEvent.Coordinates.Type = startingCoordinates.Type - binlogEvent.NextEventPos = m.GetInt64("End_log_pos") - binlogEvent.EventType = m.GetString("Event_type") - binlogEvent.Info = m.GetString("Info") - - events = append(events, binlogEvent) - return nil - }) - return events, err -} - -// Return the next chunk of binlog events; skip to next binary log file if need be; return empty result only -// if reached end of binary logs -func getNextBinlogEventsChunk(instance *Instance, startingCoordinates BinlogCoordinates, numEmptyBinlogs int) ([]BinlogEvent, error) { - if numEmptyBinlogs > maxEmptyBinlogFiles { - log.Debugf("Reached maxEmptyBinlogFiles (%d) at %+v", maxEmptyBinlogFiles, startingCoordinates) - // Give up and return empty results - return []BinlogEvent{}, nil - } - coordinatesExceededCurrent := false - switch startingCoordinates.Type { - case BinaryLog: - coordinatesExceededCurrent = instance.SelfBinlogCoordinates.FileSmallerThan(&startingCoordinates) - case RelayLog: - coordinatesExceededCurrent = instance.RelaylogCoordinates.FileSmallerThan(&startingCoordinates) - } - if coordinatesExceededCurrent { - // We're past the last file. This is a non-error: there are no more events. - log.Debugf("Coordinates overflow: %+v; terminating search", startingCoordinates) - return []BinlogEvent{}, nil - } - events, err := readBinlogEventsChunk(&instance.Key, startingCoordinates) - if err != nil { - return events, err - } - if len(events) > 0 { - log.Debugf("Returning %d events at %+v", len(events), startingCoordinates) - return events, nil - } - - // events are empty - if nextCoordinates, err := instance.GetNextBinaryLog(startingCoordinates); err == nil { - log.Debugf("Recursing into %+v", nextCoordinates) - return getNextBinlogEventsChunk(instance, nextCoordinates, numEmptyBinlogs+1) - } - // on error - return events, err -} - -// used by GetNextBinlogCoordinatesToMatch to format debug information appropriately -// format the event information in debug output -func formatEventCleanly(event BinlogEvent, length *int) string { - return fmt.Sprintf("%+v %+v; %+v", rpad(event.Coordinates, length), event.EventType, strings.Split(strings.TrimSpace(event.Info), "\n")[0]) -} - -// Only do special filtering if instance is MySQL-5.7 and other -// is MySQL-5.6 and in pseudo-gtid mode. -// returns applyInstanceSpecialFiltering, applyOtherSpecialFiltering, err -func special56To57filterProcessing(instance *Instance, other *Instance) (bool, bool, error) { - // be paranoid - if instance == nil || other == nil { - return false, false, fmt.Errorf("special56To57filterProcessing: instance or other is nil. Should not happen") - } - - filterInstance := instance.FlavorNameAndMajorVersion() == "MySQL-5.7" && // 5.7 replica - other.FlavorNameAndMajorVersion() == "MySQL-5.6" // replicating under 5.6 master - - // The logic for other is a bit weird and may require us - // to check the instance's master. To avoid this do some - // preliminary checks first to avoid the "master" access - // unless absolutely needed. - if instance.LogBinEnabled || // instance writes binlogs (not relay logs) - instance.FlavorNameAndMajorVersion() != "MySQL-5.7" || // instance NOT 5.7 replica - other.FlavorNameAndMajorVersion() != "MySQL-5.7" { // new master is NOT 5.7 - return filterInstance, false /* good exit status avoiding checking master */, nil - } - - // We need to check if the master is 5.6 - // - Do not call GetInstanceMaster() as that requires the - // master to be available, and this code may be called - // during a master/intermediate master failover when the - // master may not actually be reachable. - master, _, err := ReadInstance(&instance.MasterKey) - if err != nil { - return false, false, log.Errorf("special56To57filterProcessing: ReadInstance(%+v) fails: %+v", instance.MasterKey, err) - } - - filterOther := master.FlavorNameAndMajorVersion() == "MySQL-5.6" // master(instance) == 5.6 - - return filterInstance, filterOther, nil -} - -// The event type to filter out -const anonymousGTIDNextEvent = "SET @@SESSION.GTID_NEXT= 'ANONYMOUS'" - -// check if the event is one we want to skip. -func specialEventToSkip(event *BinlogEvent) bool { - if event != nil && strings.Contains(event.Info, anonymousGTIDNextEvent) { - return true - } - return false -} - -// GetNextBinlogCoordinatesToMatch is given a twin-coordinates couple for a would-be replica (instance) and another -// instance (other). -// This is part of the match-below process, and is the heart of the operation: matching the binlog events starting -// the twin-coordinates (where both share the same Pseudo-GTID) until "instance" runs out of entries, hopefully -// before "other" runs out. -// If "other" runs out that means "instance" is more advanced in replication than "other", in which case we can't -// turn it into a replica of "other". -func GetNextBinlogCoordinatesToMatch( - instance *Instance, - instanceCoordinates BinlogCoordinates, - recordedInstanceRelayLogCoordinates BinlogCoordinates, - maxBinlogCoordinates *BinlogCoordinates, - other *Instance, - otherCoordinates BinlogCoordinates) (*BinlogCoordinates, int, error) { - - const noMatchedEvents int = 0 // to make return statements' intent clearer - - // create instanceCursor for scanning instance binlog events - fetchNextEvents := func(binlogCoordinates BinlogCoordinates) ([]BinlogEvent, error) { - return getNextBinlogEventsChunk(instance, binlogCoordinates, 0) - } - instanceCursor := NewBinlogEventCursor(instanceCoordinates, fetchNextEvents) - - // create otherCursor for scanning other binlog events - fetchOtherNextEvents := func(binlogCoordinates BinlogCoordinates) ([]BinlogEvent, error) { - return getNextBinlogEventsChunk(other, binlogCoordinates, 0) - } - otherCursor := NewBinlogEventCursor(otherCoordinates, fetchOtherNextEvents) - - // for 5.6 to 5.7 replication special processing may be needed. - applyInstanceSpecialFiltering, applyOtherSpecialFiltering, err := special56To57filterProcessing(instance, other) - if err != nil { - return nil, noMatchedEvents, log.Errore(err) - } - - var ( - beautifyCoordinatesLength int = 0 - countMatchedEvents int = 0 - lastConsumedEventCoordinates BinlogCoordinates - ) - - for { - // Exhaust binlogs/relaylogs on instance. While iterating them, also iterate the otherInstance binlogs. - // We expect entries on both to match, sequentially, until instance's binlogs/relaylogs are exhausted. - var ( - // the whole event to make things simpler - instanceEvent BinlogEvent - otherEvent BinlogEvent - ) - - { - // we may need to skip Anonymous GTID Next Events so loop here over any we find - var event *BinlogEvent - var err error - for done := false; !done; { - // Extract next binlog/relaylog entry from instance: - event, err = instanceCursor.nextRealEvent(0) - if err != nil { - return nil, noMatchedEvents, log.Errore(err) - } - if event != nil { - lastConsumedEventCoordinates = event.Coordinates - } - if event == nil || !applyInstanceSpecialFiltering || !specialEventToSkip(event) { - done = true - } - } - - switch instanceCoordinates.Type { - case BinaryLog: - if event == nil { - // end of binary logs for instance: - otherNextCoordinates, err := otherCursor.getNextCoordinates() - if err != nil { - return nil, noMatchedEvents, log.Errore(err) - } - instanceNextCoordinates, err := instanceCursor.getNextCoordinates() - if err != nil { - return nil, noMatchedEvents, log.Errore(err) - } - // sanity check - if instanceNextCoordinates.SmallerThan(&instance.SelfBinlogCoordinates) { - return nil, noMatchedEvents, log.Errorf("Unexpected problem: instance binlog iteration ended before self coordinates. Ended with: %+v, self coordinates: %+v", instanceNextCoordinates, instance.SelfBinlogCoordinates) - } - // Possible good exit point. - log.Debugf("Reached end of binary logs for instance, at %+v. Other coordinates: %+v", instanceNextCoordinates, otherNextCoordinates) - return &otherNextCoordinates, countMatchedEvents, nil - } - case RelayLog: - // Argghhhh! SHOW RELAY LOG EVENTS IN '...' statement returns CRAPPY values for End_log_pos: - // instead of returning the end log pos of the current statement in the *relay log*, it shows - // the end log pos of the matching statement in the *master's binary log*! - // Yes, there's logic to this. But this means the next-ccordinates are meaningless. - // As result, in the case where we exhaust (following) the relay log, we cannot do our last - // nice sanity test that we've indeed reached the Relay_log_pos coordinate; we are only at the - // last statement, which is SMALLER than Relay_log_pos; and there isn't a "Rotate" entry to make - // a place holder or anything. The log just ends and we can't be absolutely certain that the next - // statement is indeed (futuristically) as End_log_pos. - endOfScan := false - if event == nil { - // End of relay log... - endOfScan = true - log.Debugf("Reached end of relay log at %+v", recordedInstanceRelayLogCoordinates) - } else if recordedInstanceRelayLogCoordinates.Equals(&event.Coordinates) { - // We've passed the maxScanInstanceCoordinates (applies for relay logs) - endOfScan = true - log.Debugf("Reached replica relay log coordinates at %+v", recordedInstanceRelayLogCoordinates) - } else if recordedInstanceRelayLogCoordinates.SmallerThan(&event.Coordinates) { - return nil, noMatchedEvents, log.Errorf("Unexpected problem: relay log scan passed relay log position without hitting it. Ended with: %+v, relay log position: %+v", event.Coordinates, recordedInstanceRelayLogCoordinates) - } - if endOfScan { - // end of binary logs for instance: - otherNextCoordinates, err := otherCursor.getNextCoordinates() - if err != nil { - log.Debugf("otherCursor.getNextCoordinates() failed. otherCoordinates=%+v, cached events in cursor: %d; index=%d", otherCoordinates, len(otherCursor.cachedEvents), otherCursor.currentEventIndex) - return nil, noMatchedEvents, log.Errore(err) - } - // Possible good exit point. - // No further sanity checks (read the above lengthy explanation) - log.Debugf("Reached limit of relay logs for instance, just after %+v. Other coordinates: %+v", lastConsumedEventCoordinates, otherNextCoordinates) - return &otherNextCoordinates, countMatchedEvents, nil - } - } - - instanceEvent = *event // make a physical copy - log.Debugf("> %s", formatEventCleanly(instanceEvent, &beautifyCoordinatesLength)) - } - { - // Extract next binlog/relaylog entry from other (intended master): - // - this must have binlogs. We may need to filter anonymous events if we were processing - // a relay log on instance and the instance's master runs 5.6 - var event *BinlogEvent - var err error - for done := false; !done; { - // Extract next binlog entry from other: - event, err = otherCursor.nextRealEvent(0) - if err != nil { - return nil, noMatchedEvents, log.Errore(err) - } - if event == nil || !applyOtherSpecialFiltering || !specialEventToSkip(event) { - done = true - } - } - - if event == nil { - // end of binary logs for otherInstance: this is unexpected and means instance is more advanced - // than otherInstance - return nil, noMatchedEvents, log.Errorf("Unexpected end of binary logs for assumed master (%+v). This means the instance which attempted to be a replica (%+v) was more advanced. Try the other way round", other.Key, instance.Key) - } - - otherEvent = *event // make a physical copy - log.Debugf("< %s", formatEventCleanly(otherEvent, &beautifyCoordinatesLength)) - } - // Verify things are sane (the two extracted entries are identical): - // (not strictly required by the algorithm but adds such a lovely self-sanity-testing essence) - if instanceEvent.Info != otherEvent.Info { - return nil, noMatchedEvents, log.Errorf("Mismatching entries, aborting: %+v <-> %+v", instanceEvent.Info, otherEvent.Info) - } - countMatchedEvents++ - if maxBinlogCoordinates != nil { - // Possible good exit point. - // Not searching till end of binary logs/relay log exec pos. Instead, we're stopping at an instructed position. - if instanceEvent.Coordinates.Equals(maxBinlogCoordinates) { - log.Debugf("maxBinlogCoordinates specified as %+v and reached. Stopping", *maxBinlogCoordinates) - return &otherEvent.Coordinates, countMatchedEvents, nil - } else if maxBinlogCoordinates.SmallerThan(&instanceEvent.Coordinates) { - return nil, noMatchedEvents, log.Errorf("maxBinlogCoordinates (%+v) exceeded but not met", *maxBinlogCoordinates) - } - } - } - // Won't get here -} - func GetPreviousGTIDs(instanceKey *InstanceKey, binlog string) (previousGTIDs *OracleGtidSet, err error) { if binlog == "" { return nil, log.Errorf("GetPreviousGTIDs: empty binlog file name for %+v", *instanceKey) diff --git a/go/vt/orchestrator/inst/instance_dao.go b/go/vt/orchestrator/inst/instance_dao.go index 656238b2618..d972b6c62df 100644 --- a/go/vt/orchestrator/inst/instance_dao.go +++ b/go/vt/orchestrator/inst/instance_dao.go @@ -55,10 +55,6 @@ const ( backendDBConcurrency = 20 retryInstanceFunctionCount = 5 retryInterval = 500 * time.Millisecond - error1045AccessDenied = "Error 1045: Access denied for user" - errorConnectionRefused = "getsockopt: connection refused" - errorNoSuchHost = "no such host" - errorIOTimeout = "i/o timeout" ) var instanceReadChan = make(chan bool, backendDBConcurrency) @@ -99,7 +95,6 @@ var GroupReplicationNotSupportedErrors = map[uint16]bool{ // instanceKeyInformativeClusterName is a non-authoritative cache; used for auditing or general purpose. var instanceKeyInformativeClusterName *cache.Cache var forgetInstanceKeys *cache.Cache -var clusterInjectedPseudoGTIDCache *cache.Cache var accessDeniedCounter = metrics.NewCounter() var readTopologyInstanceCounter = metrics.NewCounter() @@ -127,7 +122,6 @@ func initializeInstanceDao() { instanceWriteBuffer = make(chan instanceUpdateObject, config.Config.InstanceWriteBufferSize) instanceKeyInformativeClusterName = cache.New(time.Duration(config.Config.InstancePollSeconds/2)*time.Second, time.Second) forgetInstanceKeys = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second) - clusterInjectedPseudoGTIDCache = cache.New(time.Minute, time.Second) // spin off instance write buffer flushing go func() { flushTick := time.Tick(time.Duration(config.Config.InstanceFlushIntervalMilliseconds) * time.Millisecond) //nolint SA1015: using time.Tick leaks the underlying ticker @@ -218,73 +212,6 @@ func RetryInstanceFunction(f func() (*Instance, error)) (instance *Instance, err return instance, err } -// Is this an error which means that we shouldn't try going more queries for this discovery attempt? -func unrecoverableError(err error) bool { - contains := []string{ - error1045AccessDenied, - errorConnectionRefused, - errorIOTimeout, - errorNoSuchHost, - } - for _, k := range contains { - if strings.Contains(err.Error(), k) { - return true - } - } - return false -} - -// Check if the instance is a MaxScale binlog server (a proxy not a real -// MySQL server) and also update the resolved hostname -func (instance *Instance) checkMaxScale(db *sql.DB, latency *stopwatch.NamedStopwatch) (isMaxScale bool, resolvedHostname string, err error) { - if config.Config.SkipMaxScaleCheck { - return isMaxScale, resolvedHostname, err - } - - latency.Start("instance") - err = sqlutils.QueryRowsMap(db, "show variables like 'maxscale%'", func(m sqlutils.RowMap) error { - if m.GetString("Variable_name") == "MAXSCALE_VERSION" { - originalVersion := m.GetString("Value") - if originalVersion == "" { - originalVersion = m.GetString("value") - } - if originalVersion == "" { - originalVersion = "0.0.0" - } - instance.Version = originalVersion + "-maxscale" - instance.ServerID = 0 - instance.ServerUUID = "" - instance.Uptime = 0 - instance.Binlog_format = "INHERIT" - instance.ReadOnly = true - instance.LogBinEnabled = true - instance.LogReplicationUpdatesEnabled = true - resolvedHostname = instance.Key.Hostname - latency.Start("backend") - UpdateResolvedHostname(resolvedHostname, resolvedHostname) - latency.Stop("backend") - isMaxScale = true - } - return nil - }) - latency.Stop("instance") - - // Detect failed connection attempts and don't report the command - // we are executing as that might be confusing. - if err != nil { - if strings.Contains(err.Error(), error1045AccessDenied) { - accessDeniedCounter.Inc(1) - } - if unrecoverableError(err) { - logReadTopologyInstanceError(&instance.Key, "", err) - } else { - logReadTopologyInstanceError(&instance.Key, "show variables like 'maxscale%'", err) - } - } - - return isMaxScale, resolvedHostname, err -} - // expectReplicationThreadsState expects both replication threads to be running, or both to be not running. // Specifically, it looks for both to be "Yes" or for both to be "No". func expectReplicationThreadsState(instanceKey *InstanceKey, expectedState ReplicationThreadState) (expectationMet bool, err error) { @@ -325,10 +252,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, partialSuccess := false foundByShowSlaveHosts := false resolvedHostname := "" - maxScaleMasterHostname := "" - isMaxScale := false - isMaxScale110 := false - slaveStatusFound := false errorChan := make(chan error, 32) var resolveErr error @@ -363,42 +286,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, } instance.Key = *instanceKey - - if isMaxScale, resolvedHostname, err = instance.checkMaxScale(db, latency); err != nil { - // We do not "goto Cleanup" here, although it should be the correct flow. - // Reason is 5.7's new security feature that requires GRANTs on performance_schema.session_variables. - // There is a wrong decision making in this design and the migration path to 5.7 will be difficult. - // I don't want orchestrator to put even more burden on this. - // If the statement errors, then we are unable to determine that this is maxscale, hence assume it is not. - // In which case there would be other queries sent to the server that are not affected by 5.7 behavior, and that will fail. - - // Certain errors are not recoverable (for this discovery process) so it's fine to go to Cleanup - if unrecoverableError(err) { - goto Cleanup - } - } - - if isMaxScale { - if strings.Contains(instance.Version, "1.1.0") { - isMaxScale110 = true - - // Buggy buggy maxscale 1.1.0. Reported Master_Host can be corrupted. - // Therefore we (currently) take @@hostname (which is masquerading as master host anyhow) - err = db.QueryRow("select @@hostname").Scan(&maxScaleMasterHostname) - if err != nil { - goto Cleanup - } - } - if isMaxScale110 { - // Only this is supported: - db.QueryRow("select @@server_id").Scan(&instance.ServerID) - } else { - db.QueryRow("select @@global.server_id").Scan(&instance.ServerID) - db.QueryRow("select @@global.server_uuid").Scan(&instance.ServerUUID) - } - } else { - // NOT MaxScale - + { // We begin with a few operations we can run concurrently, and which do not depend on anything { waitGroup.Add(1) @@ -571,10 +459,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.ReplicationIOThreadState = ReplicationThreadStateFromStatus(m.GetString("Slave_IO_Running")) instance.ReplicationSQLThreadState = ReplicationThreadStateFromStatus(m.GetString("Slave_SQL_Running")) instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadState.IsRunning() - if isMaxScale110 { - // Covering buggy MaxScale 1.1.0 - instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadRuning && (m.GetString("Slave_IO_State") == "Binlog Dump") - } instance.ReplicationSQLThreadRuning = instance.ReplicationSQLThreadState.IsRunning() instance.ReadBinlogCoordinates.LogFile = m.GetString("Master_Log_File") instance.ReadBinlogCoordinates.LogPos = m.GetInt64("Read_Master_Log_Pos") @@ -593,11 +477,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.HasReplicationFilters = ((m.GetStringD("Replicate_Do_DB", "") != "") || (m.GetStringD("Replicate_Ignore_DB", "") != "") || (m.GetStringD("Replicate_Do_Table", "") != "") || (m.GetStringD("Replicate_Ignore_Table", "") != "") || (m.GetStringD("Replicate_Wild_Do_Table", "") != "") || (m.GetStringD("Replicate_Wild_Ignore_Table", "") != "")) masterHostname := m.GetString("Master_Host") - if isMaxScale110 { - // Buggy buggy maxscale 1.1.0. Reported Master_Host can be corrupted. - // Therefore we (currently) take @@hostname (which is masquarading as master host anyhow) - masterHostname = maxScaleMasterHostname - } masterKey, err := NewResolveInstanceKey(masterHostname, m.GetInt("Master_Port")) if err != nil { logReadTopologyInstanceError(instanceKey, "NewResolveInstanceKey", err) @@ -618,7 +497,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.AllowTLS = (m.GetString("Master_SSL_Allowed") == "Yes") // Not breaking the flow even on error - slaveStatusFound = true return nil }) if err != nil { @@ -635,12 +513,8 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, goto Cleanup } } - if isMaxScale && !slaveStatusFound { - err = fmt.Errorf("No 'SHOW SLAVE STATUS' output found for a MaxScale instance: %+v", instanceKey) - goto Cleanup - } - if config.Config.ReplicationLagQuery != "" && !isMaxScale { + if config.Config.ReplicationLagQuery != "" { waitGroup.Add(1) go func() { defer waitGroup.Done() @@ -664,22 +538,12 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, // ------------------------------------------------------------------------- // Get replicas, either by SHOW SLAVE HOSTS or via PROCESSLIST - // MaxScale does not support PROCESSLIST, so SHOW SLAVE HOSTS is the only option - if config.Config.DiscoverByShowSlaveHosts || isMaxScale { + if config.Config.DiscoverByShowSlaveHosts { err := sqlutils.QueryRowsMap(db, `show slave hosts`, func(m sqlutils.RowMap) error { - // MaxScale 1.1 may trigger an error with this command, but - // also we may see issues if anything on the MySQL server locks up. - // Consequently it's important to validate the values received look - // good prior to calling ResolveHostname() host := m.GetString("Host") port := m.GetIntD("Port", 0) if host == "" || port == 0 { - if isMaxScale && host == "" && port == 0 { - // MaxScale reports a bad response sometimes so ignore it. - // - seen in 1.1.0 and 1.4.3.4 - return nil - } // otherwise report the error to the caller return fmt.Errorf("ReadTopologyInstance(%+v) 'show slave hosts' returned row with : <%v,%v>", instanceKey, host, port) } @@ -696,7 +560,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, logReadTopologyInstanceError(instanceKey, "show slave hosts", err) } - if !foundByShowSlaveHosts && !isMaxScale { + if !foundByShowSlaveHosts { // Either not configured to read SHOW SLAVE HOSTS or nothing was there. // Discover by information_schema.processlist waitGroup.Add(1) @@ -754,7 +618,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, } // TODO(sougou): delete DetectDataCenterQuery - if config.Config.DetectDataCenterQuery != "" && !isMaxScale { + if config.Config.DetectDataCenterQuery != "" { waitGroup.Add(1) go func() { defer waitGroup.Done() @@ -765,7 +629,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.DataCenter = tablet.Alias.Cell // TODO(sougou): use cell alias to identify regions. - if config.Config.DetectRegionQuery != "" && !isMaxScale { + if config.Config.DetectRegionQuery != "" { waitGroup.Add(1) go func() { defer waitGroup.Done() @@ -774,7 +638,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, }() } - if config.Config.DetectPhysicalEnvironmentQuery != "" && !isMaxScale { + if config.Config.DetectPhysicalEnvironmentQuery != "" { waitGroup.Add(1) go func() { defer waitGroup.Done() @@ -784,7 +648,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, } // TODO(sougou): delete DetectInstanceAliasQuery - if config.Config.DetectInstanceAliasQuery != "" && !isMaxScale { + if config.Config.DetectInstanceAliasQuery != "" { waitGroup.Add(1) go func() { defer waitGroup.Done() @@ -795,7 +659,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.InstanceAlias = topoproto.TabletAliasString(tablet.Alias) // TODO(sougou): come up with a strategy for semi-sync - if config.Config.DetectSemiSyncEnforcedQuery != "" && !isMaxScale { + if config.Config.DetectSemiSyncEnforcedQuery != "" { waitGroup.Add(1) go func() { defer waitGroup.Done() @@ -811,33 +675,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, logReadTopologyInstanceError(instanceKey, "ReadInstanceClusterAttributes", err) } - { - // Pseudo GTID - // Depends on ReadInstanceClusterAttributes above - instance.UsingPseudoGTID = false - if config.Config.AutoPseudoGTID { - var err error - instance.UsingPseudoGTID, err = isInjectedPseudoGTID(instance.ClusterName) - log.Errore(err) - } else if config.Config.DetectPseudoGTIDQuery != "" { - waitGroup.Add(1) - go func() { - defer waitGroup.Done() - if resultData, err := sqlutils.QueryResultData(db, config.Config.DetectPseudoGTIDQuery); err == nil { - if len(resultData) > 0 { - if len(resultData[0]) > 0 { - if resultData[0][0].Valid && resultData[0][0].String == "1" { - instance.UsingPseudoGTID = true - } - } - } - } else { - logReadTopologyInstanceError(instanceKey, "DetectPseudoGTIDQuery", err) - } - }() - } - } - // We need to update candidate_database_instance. // We register the rule even if it hasn't changed, // to bump the last_suggested time. @@ -848,7 +685,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, // TODO(sougou): delete cluster_alias_override metadata instance.SuggestedClusterAlias = fmt.Sprintf("%v:%v", tablet.Keyspace, tablet.Shard) - if instance.ReplicationDepth == 0 && config.Config.DetectClusterDomainQuery != "" && !isMaxScale { + if instance.ReplicationDepth == 0 && config.Config.DetectClusterDomainQuery != "" { // Only need to do on masters domainName := "" if err := db.QueryRow(config.Config.DetectClusterDomainQuery).Scan(&domainName); err != nil { @@ -1144,7 +981,6 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.GtidPurged = m.GetString("gtid_purged") instance.GtidErrant = m.GetString("gtid_errant") instance.UsingMariaDBGTID = m.GetBool("mariadb_gtid") - instance.UsingPseudoGTID = m.GetBool("pseudo_gtid") instance.SelfBinlogCoordinates.LogFile = m.GetString("binary_log_file") instance.SelfBinlogCoordinates.LogPos = m.GetInt64("binary_log_pos") instance.ReadBinlogCoordinates.LogFile = m.GetString("master_log_file") @@ -2910,68 +2746,6 @@ func ReadHistoryClusterInstances(clusterName string, historyTimestampPattern str return instances, err } -// RecordInstanceCoordinatesHistory snapshots the binlog coordinates of instances -func RecordInstanceCoordinatesHistory() error { - { - writeFunc := func() error { - _, err := db.ExecOrchestrator(` - delete from database_instance_coordinates_history - where - recorded_timestamp < NOW() - INTERVAL ? MINUTE - `, (config.PseudoGTIDCoordinatesHistoryHeuristicMinutes + 2), - ) - return log.Errore(err) - } - ExecDBWriteFunc(writeFunc) - } - writeFunc := func() error { - _, err := db.ExecOrchestrator(` - insert into - database_instance_coordinates_history ( - hostname, port, last_seen, recorded_timestamp, - binary_log_file, binary_log_pos, relay_log_file, relay_log_pos - ) - select - hostname, port, last_seen, NOW(), - binary_log_file, binary_log_pos, relay_log_file, relay_log_pos - from - database_instance - where - ( - binary_log_file != '' - or relay_log_file != '' - ) - `, - ) - return log.Errore(err) - } - return ExecDBWriteFunc(writeFunc) -} - -// GetHeuristiclyRecentCoordinatesForInstance returns valid and reasonably recent coordinates for given instance. -func GetHeuristiclyRecentCoordinatesForInstance(instanceKey *InstanceKey) (selfCoordinates *BinlogCoordinates, relayLogCoordinates *BinlogCoordinates, err error) { - query := ` - select - binary_log_file, binary_log_pos, relay_log_file, relay_log_pos - from - database_instance_coordinates_history - where - hostname = ? - and port = ? - and recorded_timestamp <= NOW() - INTERVAL ? MINUTE - order by - recorded_timestamp desc - limit 1 - ` - err = db.QueryOrchestrator(query, sqlutils.Args(instanceKey.Hostname, instanceKey.Port, config.PseudoGTIDCoordinatesHistoryHeuristicMinutes), func(m sqlutils.RowMap) error { - selfCoordinates = &BinlogCoordinates{LogFile: m.GetString("binary_log_file"), LogPos: m.GetInt64("binary_log_pos")} - relayLogCoordinates = &BinlogCoordinates{LogFile: m.GetString("relay_log_file"), LogPos: m.GetInt64("relay_log_pos")} - - return nil - }) - return selfCoordinates, relayLogCoordinates, err -} - // RecordInstanceCoordinatesHistory snapshots the binlog coordinates of instances func RecordStaleInstanceBinlogCoordinates(instanceKey *InstanceKey, binlogCoordinates *BinlogCoordinates) error { args := sqlutils.Args( @@ -3218,59 +2992,3 @@ func PopulateGroupReplicationInformation(instance *Instance, db *sql.DB) error { } return nil } - -// RegisterInjectedPseudoGTID -func RegisterInjectedPseudoGTID(clusterName string) error { - query := ` - insert into cluster_injected_pseudo_gtid ( - cluster_name, - time_injected - ) values (?, now()) - on duplicate key update - cluster_name=values(cluster_name), - time_injected=now() - ` - args := sqlutils.Args(clusterName) - writeFunc := func() error { - _, err := db.ExecOrchestrator(query, args...) - if err == nil { - clusterInjectedPseudoGTIDCache.Set(clusterName, true, cache.DefaultExpiration) - } - return log.Errore(err) - } - return ExecDBWriteFunc(writeFunc) -} - -// ExpireInjectedPseudoGTID -func ExpireInjectedPseudoGTID() error { - writeFunc := func() error { - _, err := db.ExecOrchestrator(` - delete from cluster_injected_pseudo_gtid - where time_injected < NOW() - INTERVAL ? MINUTE - `, config.PseudoGTIDExpireMinutes, - ) - return log.Errore(err) - } - return ExecDBWriteFunc(writeFunc) -} - -// isInjectedPseudoGTID reads from backend DB / cache -func isInjectedPseudoGTID(clusterName string) (injected bool, err error) { - if injectedValue, found := clusterInjectedPseudoGTIDCache.Get(clusterName); found { - return injectedValue.(bool), err - } - query := ` - select - count(*) as is_injected - from - cluster_injected_pseudo_gtid - where - cluster_name = ? - ` - err = db.QueryOrchestrator(query, sqlutils.Args(clusterName), func(m sqlutils.RowMap) error { - injected = m.GetBool("is_injected") - return nil - }) - clusterInjectedPseudoGTIDCache.Set(clusterName, injected, cache.DefaultExpiration) - return injected, log.Errore(err) -} diff --git a/go/vt/orchestrator/inst/instance_test.go b/go/vt/orchestrator/inst/instance_test.go index c775a9e876f..c957d294229 100644 --- a/go/vt/orchestrator/inst/instance_test.go +++ b/go/vt/orchestrator/inst/instance_test.go @@ -198,12 +198,12 @@ func TestHumanReadableDescription(t *testing.T) { test.S(t).ExpectEquals(desc, "[unknown,invalid,5.7.8-log,rw,nobinlog]") } { - i57.UsingPseudoGTID = true + i57.UsingOracleGTID = true i57.LogBinEnabled = true i57.Binlog_format = "ROW" i57.LogReplicationUpdatesEnabled = true desc := i57.HumanReadableDescription() - test.S(t).ExpectEquals(desc, "[unknown,invalid,5.7.8-log,rw,ROW,>>,P-GTID]") + test.S(t).ExpectEquals(desc, "[unknown,invalid,5.7.8-log,rw,ROW,>>,GTID]") } } @@ -214,12 +214,12 @@ func TestTabulatedDescription(t *testing.T) { test.S(t).ExpectEquals(desc, "unknown|invalid|5.7.8-log|rw|nobinlog|") } { - i57.UsingPseudoGTID = true + i57.UsingOracleGTID = true i57.LogBinEnabled = true i57.Binlog_format = "ROW" i57.LogReplicationUpdatesEnabled = true desc := i57.TabulatedDescription("|") - test.S(t).ExpectEquals(desc, "unknown|invalid|5.7.8-log|rw|ROW|>>,P-GTID") + test.S(t).ExpectEquals(desc, "unknown|invalid|5.7.8-log|rw|ROW|>>,GTID") } } diff --git a/go/vt/orchestrator/inst/instance_topology.go b/go/vt/orchestrator/inst/instance_topology.go index 9170fcc5363..fdaad913683 100644 --- a/go/vt/orchestrator/inst/instance_topology.go +++ b/go/vt/orchestrator/inst/instance_topology.go @@ -212,56 +212,6 @@ func InstanceIsMasterOf(allegedMaster, allegedReplica *Instance) bool { return allegedMaster.Key.Equals(&allegedReplica.MasterKey) } -// MoveEquivalent will attempt moving instance indicated by instanceKey below another instance, -// based on known master coordinates equivalence -func MoveEquivalent(instanceKey, otherKey *InstanceKey) (*Instance, error) { - instance, found, err := ReadInstance(instanceKey) - if err != nil || !found { - return instance, err - } - if instance.Key.Equals(otherKey) { - return instance, fmt.Errorf("MoveEquivalent: attempt to move an instance below itself %+v", instance.Key) - } - - // Are there equivalent coordinates to this instance? - instanceCoordinates := &InstanceBinlogCoordinates{Key: instance.MasterKey, Coordinates: instance.ExecBinlogCoordinates} - binlogCoordinates, err := GetEquivalentBinlogCoordinatesFor(instanceCoordinates, otherKey) - if err != nil { - return instance, err - } - if binlogCoordinates == nil { - return instance, fmt.Errorf("No equivalent coordinates found for %+v replicating from %+v at %+v", instance.Key, instance.MasterKey, instance.ExecBinlogCoordinates) - } - // For performance reasons, we did all the above before even checking the replica is stopped or stopping it at all. - // This allows us to quickly skip the entire operation should there NOT be coordinates. - // To elaborate: if the replica is actually running AND making progress, it is unlikely/impossible for it to have - // equivalent coordinates, as the current coordinates are like to have never been seen. - // This excludes the case, for example, that the master is itself not replicating. - // Now if we DO get to happen on equivalent coordinates, we need to double check. For CHANGE MASTER to happen we must - // stop the replica anyhow. But then let's verify the position hasn't changed. - knownExecBinlogCoordinates := instance.ExecBinlogCoordinates - instance, err = StopReplication(instanceKey) - if err != nil { - goto Cleanup - } - if !instance.ExecBinlogCoordinates.Equals(&knownExecBinlogCoordinates) { - // Seems like things were still running... We don't have an equivalence point - err = fmt.Errorf("MoveEquivalent(): ExecBinlogCoordinates changed after stopping replication on %+v; aborting", instance.Key) - goto Cleanup - } - _, err = ChangeMasterTo(instanceKey, otherKey, binlogCoordinates, false, GTIDHintNeutral) - -Cleanup: - instance, _ = StartReplication(instanceKey) - - if err == nil { - message := fmt.Sprintf("moved %+v via equivalence coordinates below %+v", *instanceKey, *otherKey) - log.Debugf(message) - AuditOperation("move-equivalent", instanceKey, message) - } - return instance, err -} - // MoveUp will attempt moving instance indicated by instanceKey up the topology hierarchy. // It will perform all safety and sanity checks and will tamper with this instance's replication // as well as its master. @@ -611,11 +561,6 @@ func CheckMoveViaGTID(instance, otherInstance *Instance) (err error) { // moveInstanceBelowViaGTID will attempt moving given instance below another instance using either Oracle GTID or MariaDB GTID. func moveInstanceBelowViaGTID(instance, otherInstance *Instance) (*Instance, error) { - rinstance, _, _ := ReadInstance(&instance.Key) - if canMove, merr := rinstance.CanMoveViaMatch(); !canMove { - return instance, merr - } - if canReplicate, err := instance.CanReplicateFrom(otherInstance); !canReplicate { return instance, err } @@ -1423,275 +1368,9 @@ func ErrantGTIDInjectEmpty(instanceKey *InstanceKey) (instance *Instance, cluste return instance, clusterMaster, countInjectedTransactions, err } -// FindLastPseudoGTIDEntry will search an instance's binary logs or relay logs for the last pseudo-GTID entry, -// and return found coordinates as well as entry text -func FindLastPseudoGTIDEntry(instance *Instance, recordedInstanceRelayLogCoordinates BinlogCoordinates, maxBinlogCoordinates *BinlogCoordinates, exhaustiveSearch bool, expectedBinlogFormat *string) (instancePseudoGtidCoordinates *BinlogCoordinates, instancePseudoGtidText string, err error) { - - if config.Config.PseudoGTIDPattern == "" { - return instancePseudoGtidCoordinates, instancePseudoGtidText, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID") - } - - if instance.LogBinEnabled && instance.LogReplicationUpdatesEnabled && !*config.RuntimeCLIFlags.SkipBinlogSearch && (expectedBinlogFormat == nil || instance.Binlog_format == *expectedBinlogFormat) { - minBinlogCoordinates, _, _ := GetHeuristiclyRecentCoordinatesForInstance(&instance.Key) - // Well no need to search this instance's binary logs if it doesn't have any... - // With regard log-slave-updates, some edge cases are possible, like having this instance's log-slave-updates - // enabled/disabled (of course having restarted it) - // The approach is not to take chances. If log-slave-updates is disabled, fail and go for relay-logs. - // If log-slave-updates was just enabled then possibly no pseudo-gtid is found, and so again we will go - // for relay logs. - // Also, if master has STATEMENT binlog format, and the replica has ROW binlog format, then comparing binlog entries would urely fail if based on the replica's binary logs. - // Instead, we revert to the relay logs. - instancePseudoGtidCoordinates, instancePseudoGtidText, err = getLastPseudoGTIDEntryInInstance(instance, minBinlogCoordinates, maxBinlogCoordinates, exhaustiveSearch) - } - if err != nil || instancePseudoGtidCoordinates == nil { - minRelaylogCoordinates, _ := GetPreviousKnownRelayLogCoordinatesForInstance(instance) - // Unable to find pseudo GTID in binary logs. - // Then MAYBE we are lucky enough (chances are we are, if this replica did not crash) that we can - // extract the Pseudo GTID entry from the last (current) relay log file. - instancePseudoGtidCoordinates, instancePseudoGtidText, err = getLastPseudoGTIDEntryInRelayLogs(instance, minRelaylogCoordinates, recordedInstanceRelayLogCoordinates, exhaustiveSearch) - } - return instancePseudoGtidCoordinates, instancePseudoGtidText, err -} - -// CorrelateBinlogCoordinates find out, if possible, the binlog coordinates of given otherInstance that correlate -// with given coordinates of given instance. -func CorrelateBinlogCoordinates(instance *Instance, binlogCoordinates *BinlogCoordinates, otherInstance *Instance) (*BinlogCoordinates, int, error) { - // We record the relay log coordinates just after the instance stopped since the coordinates can change upon - // a FLUSH LOGS/FLUSH RELAY LOGS (or a START SLAVE, though that's an altogether different problem) etc. - // We want to be on the safe side; we don't utterly trust that we are the only ones playing with the instance. - recordedInstanceRelayLogCoordinates := instance.RelaylogCoordinates - instancePseudoGtidCoordinates, instancePseudoGtidText, err := FindLastPseudoGTIDEntry(instance, recordedInstanceRelayLogCoordinates, binlogCoordinates, true, &otherInstance.Binlog_format) - - if err != nil { - return nil, 0, err - } - entriesMonotonic := (config.Config.PseudoGTIDMonotonicHint != "") && strings.Contains(instancePseudoGtidText, config.Config.PseudoGTIDMonotonicHint) - minBinlogCoordinates, _, _ := GetHeuristiclyRecentCoordinatesForInstance(&otherInstance.Key) - otherInstancePseudoGtidCoordinates, err := SearchEntryInInstanceBinlogs(otherInstance, instancePseudoGtidText, entriesMonotonic, minBinlogCoordinates) - if err != nil { - return nil, 0, err - } - - // We've found a match: the latest Pseudo GTID position within instance and its identical twin in otherInstance - // We now iterate the events in both, up to the completion of events in instance (recall that we looked for - // the last entry in instance, hence, assuming pseudo GTID entries are frequent, the amount of entries to read - // from instance is not long) - // The result of the iteration will be either: - // - bad conclusion that instance is actually more advanced than otherInstance (we find more entries in instance - // following the pseudo gtid than we can match in otherInstance), hence we cannot ask instance to replicate - // from otherInstance - // - good result: both instances are exactly in same shape (have replicated the exact same number of events since - // the last pseudo gtid). Since they are identical, it is easy to point instance into otherInstance. - // - good result: the first position within otherInstance where instance has not replicated yet. It is easy to point - // instance into otherInstance. - nextBinlogCoordinatesToMatch, countMatchedEvents, err := GetNextBinlogCoordinatesToMatch(instance, *instancePseudoGtidCoordinates, - recordedInstanceRelayLogCoordinates, binlogCoordinates, otherInstance, *otherInstancePseudoGtidCoordinates) - if err != nil { - return nil, 0, err - } - if countMatchedEvents == 0 { - err = fmt.Errorf("Unexpected: 0 events processed while iterating logs. Something went wrong; aborting. nextBinlogCoordinatesToMatch: %+v", nextBinlogCoordinatesToMatch) - return nil, 0, err - } - return nextBinlogCoordinatesToMatch, countMatchedEvents, nil -} - -func CorrelateRelaylogCoordinates(instance *Instance, relaylogCoordinates *BinlogCoordinates, otherInstance *Instance) (instanceCoordinates, correlatedCoordinates, nextCoordinates *BinlogCoordinates, found bool, err error) { - // The two servers are expected to have the same master, or this doesn't work - if !instance.MasterKey.Equals(&otherInstance.MasterKey) { - return instanceCoordinates, correlatedCoordinates, nextCoordinates, found, log.Errorf("CorrelateRelaylogCoordinates requires sibling instances, however %+v has master %+v, and %+v has master %+v", instance.Key, instance.MasterKey, otherInstance.Key, otherInstance.MasterKey) - } - var binlogEvent *BinlogEvent - if relaylogCoordinates == nil { - instanceCoordinates = &instance.RelaylogCoordinates - if minCoordinates, err := GetPreviousKnownRelayLogCoordinatesForInstance(instance); err != nil { - return instanceCoordinates, correlatedCoordinates, nextCoordinates, found, err - } else if binlogEvent, err = GetLastExecutedEntryInRelayLogs(instance, minCoordinates, instance.RelaylogCoordinates); err != nil { - return instanceCoordinates, correlatedCoordinates, nextCoordinates, found, err - } - } else { - instanceCoordinates = relaylogCoordinates - relaylogCoordinates.Type = RelayLog - if binlogEvent, err = ReadBinlogEventAtRelayLogCoordinates(&instance.Key, relaylogCoordinates); err != nil { - return instanceCoordinates, correlatedCoordinates, nextCoordinates, found, err - } - } - - _, minCoordinates, err := GetHeuristiclyRecentCoordinatesForInstance(&otherInstance.Key) - if err != nil { - return instanceCoordinates, correlatedCoordinates, nextCoordinates, found, err - } - correlatedCoordinates, nextCoordinates, found, err = SearchEventInRelayLogs(binlogEvent, otherInstance, minCoordinates, otherInstance.RelaylogCoordinates) - return instanceCoordinates, correlatedCoordinates, nextCoordinates, found, err -} - -// MatchBelow will attempt moving instance indicated by instanceKey below its the one indicated by otherKey. -// The refactoring is based on matching binlog entries, not on "classic" positions comparisons. -// The "other instance" could be the sibling of the moving instance any of its ancestors. It may actually be -// a cousin of some sort (though unlikely). The only important thing is that the "other instance" is more -// advanced in replication than given instance. -func MatchBelow(instanceKey, otherKey *InstanceKey, requireInstanceMaintenance bool) (*Instance, *BinlogCoordinates, error) { - instance, err := ReadTopologyInstance(instanceKey) - if err != nil { - return instance, nil, err - } - // Relocation of group secondaries makes no sense, group secondaries, by definition, always replicate from the group - // primary - if instance.IsReplicationGroupSecondary() { - return instance, nil, fmt.Errorf("MatchBelow: %+v is a secondary replication group member, hence, it cannot be relocated", *instanceKey) - } - if config.Config.PseudoGTIDPattern == "" { - return instance, nil, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID") - } - if instanceKey.Equals(otherKey) { - return instance, nil, fmt.Errorf("MatchBelow: attempt to match an instance below itself %+v", *instanceKey) - } - otherInstance, err := ReadTopologyInstance(otherKey) - if err != nil { - return instance, nil, err - } - - rinstance, _, _ := ReadInstance(&instance.Key) - if canMove, merr := rinstance.CanMoveViaMatch(); !canMove { - return instance, nil, merr - } - - if canReplicate, err := instance.CanReplicateFrom(otherInstance); !canReplicate { - return instance, nil, err - } - var nextBinlogCoordinatesToMatch *BinlogCoordinates - var countMatchedEvents int - - if otherInstance.IsBinlogServer() { - // A Binlog Server does not do all the SHOW BINLOG EVENTS stuff - err = fmt.Errorf("Cannot use PseudoGTID with Binlog Server %+v", otherInstance.Key) - goto Cleanup - } - - log.Infof("Will match %+v below %+v", *instanceKey, *otherKey) - - if requireInstanceMaintenance { - if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("match below %+v", *otherKey)); merr != nil { - err = fmt.Errorf("Cannot begin maintenance on %+v: %v", *instanceKey, merr) - goto Cleanup - } else { - defer EndMaintenance(maintenanceToken) - } - - // We don't require grabbing maintenance lock on otherInstance, but we do request - // that it is not already under maintenance. - if inMaintenance, merr := InMaintenance(&otherInstance.Key); merr != nil { - err = merr - goto Cleanup - } else if inMaintenance { - err = fmt.Errorf("Cannot match below %+v; it is in maintenance", otherInstance.Key) - goto Cleanup - } - } - - log.Debugf("Stopping replica on %+v", *instanceKey) - instance, err = StopReplication(instanceKey) - if err != nil { - goto Cleanup - } - - nextBinlogCoordinatesToMatch, countMatchedEvents, _ = CorrelateBinlogCoordinates(instance, nil, otherInstance) - - if countMatchedEvents == 0 { - err = fmt.Errorf("Unexpected: 0 events processed while iterating logs. Something went wrong; aborting. nextBinlogCoordinatesToMatch: %+v", nextBinlogCoordinatesToMatch) - goto Cleanup - } - log.Debugf("%+v will match below %+v at %+v; validated events: %d", *instanceKey, *otherKey, *nextBinlogCoordinatesToMatch, countMatchedEvents) - - // Drum roll... - _, err = ChangeMasterTo(instanceKey, otherKey, nextBinlogCoordinatesToMatch, false, GTIDHintDeny) - if err != nil { - goto Cleanup - } - -Cleanup: - instance, _ = StartReplication(instanceKey) - if err != nil { - return instance, nextBinlogCoordinatesToMatch, log.Errore(err) - } - // and we're done (pending deferred functions) - AuditOperation("match-below", instanceKey, fmt.Sprintf("matched %+v below %+v", *instanceKey, *otherKey)) - - return instance, nextBinlogCoordinatesToMatch, err -} - -// RematchReplica will re-match a replica to its master, using pseudo-gtid -func RematchReplica(instanceKey *InstanceKey, requireInstanceMaintenance bool) (*Instance, *BinlogCoordinates, error) { - instance, err := ReadTopologyInstance(instanceKey) - if err != nil { - return instance, nil, err - } - masterInstance, found, err := ReadInstance(&instance.MasterKey) - if err != nil || !found { - return instance, nil, err - } - return MatchBelow(instanceKey, &masterInstance.Key, requireInstanceMaintenance) -} - -// MakeMaster will take an instance, make all its siblings its replicas (via pseudo-GTID) and make it master -// (stop its replicaiton, make writeable). -func MakeMaster(instanceKey *InstanceKey) (*Instance, error) { - instance, err := ReadTopologyInstance(instanceKey) - if err != nil { - return instance, err - } - masterInstance, err := ReadTopologyInstance(&instance.MasterKey) - if err == nil { - // If the read succeeded, check the master status. - if masterInstance.IsReplica() { - return instance, fmt.Errorf("MakeMaster: instance's master %+v seems to be replicating", masterInstance.Key) - } - if masterInstance.IsLastCheckValid { - return instance, fmt.Errorf("MakeMaster: instance's master %+v seems to be accessible", masterInstance.Key) - } - } - // Continue anyway if the read failed, because that means the master is - // inaccessible... So it's OK to do the promotion. - if !instance.SQLThreadUpToDate() { - return instance, fmt.Errorf("MakeMaster: instance's SQL thread must be up-to-date with I/O thread for %+v", *instanceKey) - } - siblings, err := ReadReplicaInstances(&masterInstance.Key) - if err != nil { - return instance, err - } - for _, sibling := range siblings { - if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) { - return instance, fmt.Errorf("MakeMaster: instance %+v has more advanced sibling: %+v", *instanceKey, sibling.Key) - } - } - - if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("siblings match below this: %+v", *instanceKey)); merr != nil { - err = fmt.Errorf("Cannot begin maintenance on %+v: %v", *instanceKey, merr) - goto Cleanup - } else { - defer EndMaintenance(maintenanceToken) - } - - _, _, err, _ = MultiMatchBelow(siblings, instanceKey, nil) - if err != nil { - goto Cleanup - } - - SetReadOnly(instanceKey, false) - -Cleanup: - if err != nil { - return instance, log.Errore(err) - } - // and we're done (pending deferred functions) - AuditOperation("make-master", instanceKey, fmt.Sprintf("made master of %+v", *instanceKey)) - - return instance, err -} - // TakeSiblings is a convenience method for turning siblings of a replica to be its subordinates. // This operation is a syntatctic sugar on top relocate-replicas, which uses any available means to the objective: -// GTID, Pseudo-GTID, binlog servers, standard replication... +// GTID, binlog servers, standard replication... func TakeSiblings(instanceKey *InstanceKey) (instance *Instance, takenSiblings int, err error) { instance, err = ReadTopologyInstance(instanceKey) if err != nil { @@ -1819,58 +1498,6 @@ Cleanup: return instance, err } -// MakeLocalMaster promotes a replica above its master, making it replica of its grandparent, while also enslaving its siblings. -// This serves as a convenience method to recover replication when a local master fails; the instance promoted is one of its replicas, -// which is most advanced among its siblings. -// This method utilizes Pseudo GTID -func MakeLocalMaster(instanceKey *InstanceKey) (*Instance, error) { - instance, err := ReadTopologyInstance(instanceKey) - if err != nil { - return instance, err - } - masterInstance, found, err := ReadInstance(&instance.MasterKey) - if err != nil || !found { - return instance, err - } - grandparentInstance, err := ReadTopologyInstance(&masterInstance.MasterKey) - if err != nil { - return instance, err - } - siblings, err := ReadReplicaInstances(&masterInstance.Key) - if err != nil { - return instance, err - } - for _, sibling := range siblings { - if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) { - return instance, fmt.Errorf("MakeMaster: instance %+v has more advanced sibling: %+v", *instanceKey, sibling.Key) - } - } - - instance, err = StopReplicationNicely(instanceKey, 0) - if err != nil { - goto Cleanup - } - - _, _, err = MatchBelow(instanceKey, &grandparentInstance.Key, true) - if err != nil { - goto Cleanup - } - - _, _, err, _ = MultiMatchBelow(siblings, instanceKey, nil) - if err != nil { - goto Cleanup - } - -Cleanup: - if err != nil { - return instance, log.Errore(err) - } - // and we're done (pending deferred functions) - AuditOperation("make-local-master", instanceKey, fmt.Sprintf("made master of %+v", *instanceKey)) - - return instance, err -} - // sortInstances shuffles given list of instances according to some logic func sortInstancesDataCenterHint(instances [](*Instance), dataCenterHint string) { sort.Sort(sort.Reverse(NewInstancesSorterByExec(instances, dataCenterHint))) @@ -1927,160 +1554,6 @@ func GetSortedReplicas(masterKey *InstanceKey, stopReplicationMethod StopReplica return replicas, err } -// MultiMatchBelow will efficiently match multiple replicas below a given instance. -// It is assumed that all given replicas are siblings -func MultiMatchBelow(replicas [](*Instance), belowKey *InstanceKey, postponedFunctionsContainer *PostponedFunctionsContainer) (matchedReplicas [](*Instance), belowInstance *Instance, err error, errs []error) { - belowInstance, found, err := ReadInstance(belowKey) - if err != nil || !found { - return matchedReplicas, belowInstance, err, errs - } - - replicas = RemoveInstance(replicas, belowKey) - if len(replicas) == 0 { - // Nothing to do - return replicas, belowInstance, err, errs - } - - log.Infof("Will match %+v replicas below %+v via Pseudo-GTID, independently", len(replicas), belowKey) - - barrier := make(chan *InstanceKey) - replicaMutex := &sync.Mutex{} - - for _, replica := range replicas { - replica := replica - - // Parallelize repoints - go func() { - defer func() { barrier <- &replica.Key }() - matchFunc := func() error { - replica, _, replicaErr := MatchBelow(&replica.Key, belowKey, true) - - replicaMutex.Lock() - defer replicaMutex.Unlock() - - if replicaErr == nil { - matchedReplicas = append(matchedReplicas, replica) - } else { - errs = append(errs, replicaErr) - } - return replicaErr - } - if shouldPostponeRelocatingReplica(replica, postponedFunctionsContainer) { - postponedFunctionsContainer.AddPostponedFunction(matchFunc, fmt.Sprintf("multi-match-below-independent %+v", replica.Key)) - // We bail out and trust our invoker to later call upon this postponed function - } else { - ExecuteOnTopology(func() { matchFunc() }) - } - }() - } - for range replicas { - <-barrier - } - if len(errs) == len(replicas) { - // All returned with error - return matchedReplicas, belowInstance, fmt.Errorf("MultiMatchBelowIndependently: Error on all %+v operations", len(errs)), errs - } - AuditOperation("multi-match-below-independent", belowKey, fmt.Sprintf("matched %d/%d replicas below %+v via Pseudo-GTID", len(matchedReplicas), len(replicas), belowKey)) - - return matchedReplicas, belowInstance, err, errs -} - -// MultiMatchReplicas will match (via pseudo-gtid) all replicas of given master below given instance. -func MultiMatchReplicas(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) { - res := [](*Instance){} - errs := []error{} - - belowInstance, err := ReadTopologyInstance(belowKey) - if err != nil { - // Can't access "below" ==> can't match replicas beneath it - return res, nil, err, errs - } - - masterInstance, found, err := ReadInstance(masterKey) - if err != nil || !found { - return res, nil, err, errs - } - - // See if we have a binlog server case (special handling): - binlogCase := false - if masterInstance.IsBinlogServer() && masterInstance.MasterKey.Equals(belowKey) { - // repoint-up - log.Debugf("MultiMatchReplicas: pointing replicas up from binlog server") - binlogCase = true - } else if belowInstance.IsBinlogServer() && belowInstance.MasterKey.Equals(masterKey) { - // repoint-down - log.Debugf("MultiMatchReplicas: pointing replicas down to binlog server") - binlogCase = true - } else if masterInstance.IsBinlogServer() && belowInstance.IsBinlogServer() && masterInstance.MasterKey.Equals(&belowInstance.MasterKey) { - // Both BLS, siblings - log.Debugf("MultiMatchReplicas: pointing replicas to binlong sibling") - binlogCase = true - } - if binlogCase { - replicas, err, errors := RepointReplicasTo(masterKey, pattern, belowKey) - // Bail out! - return replicas, masterInstance, err, errors - } - - // Not binlog server - - // replicas involved - replicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(masterKey) - if err != nil { - return res, belowInstance, err, errs - } - replicas = filterInstancesByPattern(replicas, pattern) - matchedReplicas, belowInstance, err, errs := MultiMatchBelow(replicas, &belowInstance.Key, nil) - - if len(matchedReplicas) != len(replicas) { - err = fmt.Errorf("MultiMatchReplicas: only matched %d out of %d replicas of %+v; error is: %+v", len(matchedReplicas), len(replicas), *masterKey, err) - } - AuditOperation("multi-match-replicas", masterKey, fmt.Sprintf("matched %d replicas under %+v", len(matchedReplicas), *belowKey)) - - return matchedReplicas, belowInstance, err, errs -} - -// MatchUp will move a replica up the replication chain, so that it becomes sibling of its master, via Pseudo-GTID -func MatchUp(instanceKey *InstanceKey, requireInstanceMaintenance bool) (*Instance, *BinlogCoordinates, error) { - instance, found, err := ReadInstance(instanceKey) - if err != nil || !found { - return nil, nil, err - } - if !instance.IsReplica() { - return instance, nil, fmt.Errorf("instance is not a replica: %+v", instanceKey) - } - // Relocation of group secondaries makes no sense, group secondaries, by definition, always replicate from the group - // primary - if instance.IsReplicationGroupSecondary() { - return instance, nil, fmt.Errorf("MatchUp: %+v is a secondary replication group member, hence, it cannot be relocated", instance.Key) - } - master, found, err := ReadInstance(&instance.MasterKey) - if err != nil || !found { - return instance, nil, log.Errorf("Cannot get master for %+v. error=%+v", instance.Key, err) - } - - if !master.IsReplica() { - return instance, nil, fmt.Errorf("master is not a replica itself: %+v", master.Key) - } - - return MatchBelow(instanceKey, &master.MasterKey, requireInstanceMaintenance) -} - -// MatchUpReplicas will move all replicas of given master up the replication chain, -// so that they become siblings of their master. -// This should be called when the local master dies, and all its replicas are to be resurrected via Pseudo-GTID -func MatchUpReplicas(masterKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) { - res := [](*Instance){} - errs := []error{} - - masterInstance, found, err := ReadInstance(masterKey) - if err != nil || !found { - return res, nil, err, errs - } - - return MultiMatchReplicas(masterKey, &masterInstance.MasterKey, pattern) -} - func isGenerallyValidAsBinlogSource(replica *Instance) bool { if !replica.IsLastCheckValid { // something wrong with this replica right now. We shouldn't hope to be able to promote it @@ -2306,88 +1779,6 @@ func GetCandidateReplicaOfBinlogServerTopology(masterKey *InstanceKey) (candidat return candidateReplica, err } -// RegroupReplicasPseudoGTID will choose a candidate replica of a given instance, and take its siblings using pseudo-gtid -func RegroupReplicasPseudoGTID( - masterKey *InstanceKey, - returnReplicaEvenOnFailureToRegroup bool, - onCandidateReplicaChosen func(*Instance), - postponedFunctionsContainer *PostponedFunctionsContainer, - postponeAllMatchOperations func(*Instance, bool) bool, -) ( - aheadReplicas [](*Instance), - equalReplicas [](*Instance), - laterReplicas [](*Instance), - cannotReplicateReplicas [](*Instance), - candidateReplica *Instance, - err error, -) { - candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err = GetCandidateReplica(masterKey, true) - if err != nil { - if !returnReplicaEvenOnFailureToRegroup { - candidateReplica = nil - } - return aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, candidateReplica, err - } - - if config.Config.PseudoGTIDPattern == "" { - return aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, candidateReplica, fmt.Errorf("PseudoGTIDPattern not configured; cannot use Pseudo-GTID") - } - - if onCandidateReplicaChosen != nil { - onCandidateReplicaChosen(candidateReplica) - } - - allMatchingFunc := func() error { - log.Debugf("RegroupReplicas: working on %d equals replicas", len(equalReplicas)) - barrier := make(chan *InstanceKey) - for _, replica := range equalReplicas { - replica := replica - // This replica has the exact same executing coordinates as the candidate replica. This replica - // is *extremely* easy to attach below the candidate replica! - go func() { - defer func() { barrier <- &candidateReplica.Key }() - ExecuteOnTopology(func() { - ChangeMasterTo(&replica.Key, &candidateReplica.Key, &candidateReplica.SelfBinlogCoordinates, false, GTIDHintDeny) - }) - }() - } - for range equalReplicas { - <-barrier - } - - log.Debugf("RegroupReplicas: multi matching %d later replicas", len(laterReplicas)) - // As for the laterReplicas, we'll have to apply pseudo GTID - laterReplicas, candidateReplica, err, _ = MultiMatchBelow(laterReplicas, &candidateReplica.Key, postponedFunctionsContainer) - - operatedReplicas := append(equalReplicas, candidateReplica) - operatedReplicas = append(operatedReplicas, laterReplicas...) - log.Debugf("RegroupReplicas: starting %d replicas", len(operatedReplicas)) - barrier = make(chan *InstanceKey) - for _, replica := range operatedReplicas { - replica := replica - go func() { - defer func() { barrier <- &candidateReplica.Key }() - ExecuteOnTopology(func() { - StartReplication(&replica.Key) - }) - }() - } - for range operatedReplicas { - <-barrier - } - AuditOperation("regroup-replicas", masterKey, fmt.Sprintf("regrouped %+v replicas below %+v", len(operatedReplicas), *masterKey)) - return err - } - if postponedFunctionsContainer != nil && postponeAllMatchOperations != nil && postponeAllMatchOperations(candidateReplica, false) { - postponedFunctionsContainer.AddPostponedFunction(allMatchingFunc, fmt.Sprintf("regroup-replicas-pseudo-gtid %+v", candidateReplica.Key)) - } else { - err = allMatchingFunc() - } - log.Debugf("RegroupReplicas: done") - // aheadReplicas are lost (they were ahead in replication as compared to promoted replica) - return aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, candidateReplica, err -} - func getMostUpToDateActiveBinlogServer(masterKey *InstanceKey) (mostAdvancedBinlogServer *Instance, binlogServerReplicas [](*Instance), err error) { if binlogServerReplicas, err = ReadBinlogServerReplicaInstances(masterKey); err == nil && len(binlogServerReplicas) > 0 { // Pick the most advanced binlog sever that is good to go @@ -2405,88 +1796,6 @@ func getMostUpToDateActiveBinlogServer(masterKey *InstanceKey) (mostAdvancedBinl return mostAdvancedBinlogServer, binlogServerReplicas, err } -// RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers uses Pseugo-GTID to regroup replicas -// of given instance. The function also drill in to replicas of binlog servers that are replicating from given instance, -// and other recursive binlog servers, as long as they're in the same binlog-server-family. -func RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers( - masterKey *InstanceKey, - returnReplicaEvenOnFailureToRegroup bool, - onCandidateReplicaChosen func(*Instance), - postponedFunctionsContainer *PostponedFunctionsContainer, - postponeAllMatchOperations func(*Instance, bool) bool, -) ( - aheadReplicas [](*Instance), - equalReplicas [](*Instance), - laterReplicas [](*Instance), - cannotReplicateReplicas [](*Instance), - candidateReplica *Instance, - err error, -) { - // First, handle binlog server issues: - func() error { - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: starting on replicas of %+v", *masterKey) - // Find the most up to date binlog server: - mostUpToDateBinlogServer, binlogServerReplicas, err := getMostUpToDateActiveBinlogServer(masterKey) - if err != nil { - return log.Errore(err) - } - if mostUpToDateBinlogServer == nil { - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: no binlog server replicates from %+v", *masterKey) - // No binlog server; proceed as normal - return nil - } - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: most up to date binlog server of %+v: %+v", *masterKey, mostUpToDateBinlogServer.Key) - - // Find the most up to date candidate replica: - candidateReplica, _, _, _, _, err := GetCandidateReplica(masterKey, true) - if err != nil { - return log.Errore(err) - } - if candidateReplica == nil { - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: no candidate replica for %+v", *masterKey) - // Let the followup code handle that - return nil - } - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: candidate replica of %+v: %+v", *masterKey, candidateReplica.Key) - - if candidateReplica.ExecBinlogCoordinates.SmallerThan(&mostUpToDateBinlogServer.ExecBinlogCoordinates) { - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: candidate replica %+v coordinates smaller than binlog server %+v", candidateReplica.Key, mostUpToDateBinlogServer.Key) - // Need to align under binlog server... - candidateReplica, err = Repoint(&candidateReplica.Key, &mostUpToDateBinlogServer.Key, GTIDHintDeny) - if err != nil { - return log.Errore(err) - } - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: repointed candidate replica %+v under binlog server %+v", candidateReplica.Key, mostUpToDateBinlogServer.Key) - candidateReplica, err = StartReplicationUntilMasterCoordinates(&candidateReplica.Key, &mostUpToDateBinlogServer.ExecBinlogCoordinates) - if err != nil { - return log.Errore(err) - } - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: aligned candidate replica %+v under binlog server %+v", candidateReplica.Key, mostUpToDateBinlogServer.Key) - // and move back - candidateReplica, err = Repoint(&candidateReplica.Key, masterKey, GTIDHintDeny) - if err != nil { - return log.Errore(err) - } - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: repointed candidate replica %+v under master %+v", candidateReplica.Key, *masterKey) - return nil - } - // Either because it _was_ like that, or we _made_ it so, - // candidate replica is as/more up to date than all binlog servers - for _, binlogServer := range binlogServerReplicas { - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: matching replicas of binlog server %+v below %+v", binlogServer.Key, candidateReplica.Key) - // Right now sequentially. - // At this point just do what you can, don't return an error - MultiMatchReplicas(&binlogServer.Key, &candidateReplica.Key, "") - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: done matching replicas of binlog server %+v below %+v", binlogServer.Key, candidateReplica.Key) - } - log.Debugf("RegroupReplicasIncludingSubReplicasOfBinlogServers: done handling binlog regrouping for %+v; will proceed with normal RegroupReplicas", *masterKey) - AuditOperation("regroup-replicas-including-bls", masterKey, fmt.Sprintf("matched replicas of binlog server replicas of %+v under %+v", *masterKey, candidateReplica.Key)) - return nil - }() - // Proceed to normal regroup: - return RegroupReplicasPseudoGTID(masterKey, returnReplicaEvenOnFailureToRegroup, onCandidateReplicaChosen, postponedFunctionsContainer, postponeAllMatchOperations) -} - // RegroupReplicasGTID will choose a candidate replica of a given instance, and take its siblings using GTID func RegroupReplicasGTID( masterKey *InstanceKey, @@ -2575,7 +1884,7 @@ func RegroupReplicasBinlogServers(masterKey *InstanceKey, returnReplicaEvenOnFai } // RegroupReplicas is a "smart" method of promoting one replica over the others ("promoting" it on top of its siblings) -// This method decides which strategy to use: GTID, Pseudo-GTID, Binlog Servers. +// This method decides which strategy to use: GTID, Binlog Servers. func RegroupReplicas(masterKey *InstanceKey, returnReplicaEvenOnFailureToRegroup bool, onCandidateReplicaChosen func(*Instance), postponedFunctionsContainer *PostponedFunctionsContainer) ( @@ -2602,7 +1911,6 @@ func RegroupReplicas(masterKey *InstanceKey, returnReplicaEvenOnFailureToRegroup } allGTID := true allBinlogServers := true - allPseudoGTID := true for _, replica := range replicas { if !replica.UsingGTID() { allGTID = false @@ -2610,9 +1918,6 @@ func RegroupReplicas(masterKey *InstanceKey, returnReplicaEvenOnFailureToRegroup if !replica.IsBinlogServer() { allBinlogServers = false } - if !replica.UsingPseudoGTID { - allPseudoGTID = false - } } if allGTID { log.Debugf("RegroupReplicas: using GTID to regroup replicas of %+v", *masterKey) @@ -2624,17 +1929,11 @@ func RegroupReplicas(masterKey *InstanceKey, returnReplicaEvenOnFailureToRegroup movedReplicas, candidateReplica, err := RegroupReplicasBinlogServers(masterKey, returnReplicaEvenOnFailureToRegroup) return emptyReplicas, emptyReplicas, movedReplicas, cannotReplicateReplicas, candidateReplica, err } - if allPseudoGTID { - log.Debugf("RegroupReplicas: using Pseudo-GTID to regroup replicas of %+v", *masterKey) - return RegroupReplicasPseudoGTID(masterKey, returnReplicaEvenOnFailureToRegroup, onCandidateReplicaChosen, postponedFunctionsContainer, nil) - } - // And, as last resort, we do PseudoGTID & binlog servers - log.Warningf("RegroupReplicas: unsure what method to invoke for %+v; trying Pseudo-GTID+Binlog Servers", *masterKey) - return RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(masterKey, returnReplicaEvenOnFailureToRegroup, onCandidateReplicaChosen, postponedFunctionsContainer, nil) + return emptyReplicas, emptyReplicas, emptyReplicas, emptyReplicas, instance, log.Errorf("No solution path found for RegroupReplicas") } // relocateBelowInternal is a protentially recursive function which chooses how to relocate an instance below another. -// It may choose to use Pseudo-GTID, or normal binlog positions, or take advantage of binlog servers, +// It may choose to use normal binlog positions, or take advantage of binlog servers, // or it may combine any of the above in a multi-step operation. func relocateBelowInternal(instance, other *Instance) (*Instance, error) { if canReplicate, err := instance.CanReplicateFrom(other); !canReplicate { @@ -2645,12 +1944,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { // already the desired setup. return Repoint(&instance.Key, &other.Key, GTIDHintNeutral) } - // Do we have record of equivalent coordinates? - if !instance.IsBinlogServer() { - if movedInstance, err := MoveEquivalent(&instance.Key, &other.Key); err == nil { - return movedInstance, nil - } - } + // Try and take advantage of binlog servers: if InstancesAreSiblings(instance, other) && other.IsBinlogServer() { return MoveBelow(&instance.Key, &other.Key) @@ -2698,14 +1992,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { return moveInstanceBelowViaGTID(instance, other) } - // Next, try Pseudo-GTID - if instance.UsingPseudoGTID && other.UsingPseudoGTID { - // We prefer PseudoGTID to anything else because, while it takes longer to run, it does not issue - // a STOP SLAVE on any server other than "instance" itself. - instance, _, err := MatchBelow(&instance.Key, &other.Key, true) - return instance, err - } - // No Pseudo-GTID; cehck simple binlog file/pos operations: + // Check simple binlog file/pos operations: if InstancesAreSiblings(instance, other) { // If comastering, only move below if it's read-only if !other.IsCoMaster || other.ReadOnly { @@ -2730,7 +2017,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { // RelocateBelow will attempt moving instance indicated by instanceKey below another instance. // Orchestrator will try and figure out the best way to relocate the server. This could span normal -// binlog-position, pseudo-gtid, repointing, binlog servers... +// binlog-position, repointing, binlog servers... func RelocateBelow(instanceKey, otherKey *InstanceKey) (*Instance, error) { instance, found, err := ReadInstance(instanceKey) if err != nil || !found { @@ -2761,11 +2048,10 @@ func RelocateBelow(instanceKey, otherKey *InstanceKey) (*Instance, error) { // relocateReplicasInternal is a protentially recursive function which chooses how to relocate // replicas of an instance below another. -// It may choose to use Pseudo-GTID, or normal binlog positions, or take advantage of binlog servers, +// It may choose to use normal binlog positions, or take advantage of binlog servers, // or it may combine any of the above in a multi-step operation. func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) ([](*Instance), error, []error) { errs := []error{} - var err error // simplest: if instance.Key.Equals(&other.Key) { // already the desired setup. @@ -2811,27 +2097,13 @@ func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) // Otherwise nothing was moved via GTID. Maybe we don't have any GTIDs, we continue. } - // Pseudo GTID - if other.UsingPseudoGTID { - // Which replicas are using Pseudo GTID? - var pseudoGTIDReplicas [](*Instance) - for _, replica := range replicas { - _, _, hasToBeGTID := instancesAreGTIDAndCompatible(replica, other) - if replica.UsingPseudoGTID && !hasToBeGTID { - pseudoGTIDReplicas = append(pseudoGTIDReplicas, replica) - } - } - pseudoGTIDReplicas, _, err, errs = MultiMatchBelow(pseudoGTIDReplicas, &other.Key, nil) - return pseudoGTIDReplicas, err, errs - } - // Too complex return nil, log.Errorf("Relocating %+v replicas of %+v below %+v turns to be too complex; please do it manually", len(replicas), instance.Key, other.Key), errs } // RelocateReplicas will attempt moving replicas of an instance indicated by instanceKey below another instance. // Orchestrator will try and figure out the best way to relocate the servers. This could span normal -// binlog-position, pseudo-gtid, repointing, binlog servers... +// binlog-position, repointing, binlog servers... func RelocateReplicas(instanceKey, otherKey *InstanceKey, pattern string) (replicas [](*Instance), other *Instance, err error, errs []error) { instance, found, err := ReadInstance(instanceKey) diff --git a/go/vt/orchestrator/inst/instance_topology_dao.go b/go/vt/orchestrator/inst/instance_topology_dao.go index 4203dfee2da..b5c69eb594f 100644 --- a/go/vt/orchestrator/inst/instance_topology_dao.go +++ b/go/vt/orchestrator/inst/instance_topology_dao.go @@ -23,20 +23,16 @@ import ( "strings" "time" - "github.com/patrickmn/go-cache" - "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/db" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" - "vitess.io/vitess/go/vt/orchestrator/util" ) // Max concurrency for bulk topology operations const topologyConcurrency = 128 var topologyConcurrencyChan = make(chan bool, topologyConcurrency) -var supportedAutoPseudoGTIDWriters *cache.Cache = cache.New(config.CheckAutoPseudoGTIDGrantsIntervalSeconds*time.Second, time.Second) type OperationGTIDHint string @@ -270,12 +266,6 @@ func StopReplicationNicely(instanceKey *InstanceKey, timeout time.Duration) (*In } _, err = ExecInstance(instanceKey, `stop slave`) - if err != nil { - // Patch; current MaxScale behavior for STOP SLAVE is to throw an error if replica already stopped. - if instance.isMaxScale() && err.Error() == "Error 1199: Slave connection is not running" { - err = nil - } - } if err != nil { return instance, log.Errore(err) } @@ -380,12 +370,6 @@ func StopReplication(instanceKey *InstanceKey) (*Instance, error) { return instance, log.Errore(err) } _, err = ExecInstance(instanceKey, `stop slave`) - if err != nil { - // Patch; current MaxScale behavior for STOP SLAVE is to throw an error if replica already stopped. - if instance.isMaxScale() && err.Error() == "Error 1199: Slave connection is not running" { - err = nil - } - } if err != nil { return instance, log.Errore(err) } @@ -612,9 +596,6 @@ func ChangeMasterTo(instanceKey *InstanceKey, masterKey *InstanceKey, masterBinl return instance, fmt.Errorf("noop: aborting CHANGE MASTER TO operation on %+v; signalling error but nothing went wrong.", *instanceKey) } - originalMasterKey := instance.MasterKey - originalExecBinlogCoordinates := instance.ExecBinlogCoordinates - var changeMasterFunc func() error changedViaGTID := false if instance.UsingMariaDBGTID && gtidHint != GTIDHintDeny { @@ -695,7 +676,6 @@ func ChangeMasterTo(instanceKey *InstanceKey, masterKey *InstanceKey, masterBinl return instance, log.Errore(err) } - WriteMasterPositionEquivalence(&originalMasterKey, &originalExecBinlogCoordinates, changeToMasterKey, masterBinlogCoordinates) ResetInstanceRelaylogCoordinatesHistory(instanceKey) log.Infof("ChangeMasterTo: Changed master on %+v to: %+v, %+v. GTID: %+v", *instanceKey, masterKey, masterBinlogCoordinates, changedViaGTID) @@ -968,103 +948,6 @@ func KillQuery(instanceKey *InstanceKey, process int64) (*Instance, error) { return instance, err } -// injectPseudoGTID injects a Pseudo-GTID statement on a writable instance -func injectPseudoGTID(instance *Instance) (hint string, err error) { - if *config.RuntimeCLIFlags.Noop { - return hint, fmt.Errorf("noop: aborting inject-pseudo-gtid operation on %+v; signalling error but nothing went wrong.", instance.Key) - } - - now := time.Now() - randomHash := util.RandomHash()[0:16] - hint = fmt.Sprintf("%.8x:%.8x:%s", now.Unix(), instance.ServerID, randomHash) - query := fmt.Sprintf("drop view if exists `%s`.`_asc:%s`", config.PseudoGTIDSchema, hint) - _, err = ExecInstance(&instance.Key, query) - return hint, log.Errore(err) -} - -// canInjectPseudoGTID checks orchestrator's grants to determine whether is has the -// privilege of auto-injecting pseudo-GTID -func canInjectPseudoGTID(instanceKey *InstanceKey) (canInject bool, err error) { - if canInject, found := supportedAutoPseudoGTIDWriters.Get(instanceKey.StringCode()); found { - return canInject.(bool), nil - } - db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return canInject, err - } - - foundAll := false - foundDropOnAll := false - foundAllOnSchema := false - foundDropOnSchema := false - - err = sqlutils.QueryRowsMap(db, `show grants for current_user()`, func(m sqlutils.RowMap) error { - for _, grantData := range m { - grant := grantData.String - if strings.Contains(grant, `GRANT ALL PRIVILEGES ON *.*`) { - foundAll = true - } - if strings.Contains(grant, `DROP`) && strings.Contains(grant, ` ON *.*`) { - foundDropOnAll = true - } - if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", config.PseudoGTIDSchema)) { - foundAllOnSchema = true - } - if strings.Contains(grant, fmt.Sprintf(`GRANT ALL PRIVILEGES ON "%s".*`, config.PseudoGTIDSchema)) { - foundAllOnSchema = true - } - if strings.Contains(grant, `DROP`) && strings.Contains(grant, fmt.Sprintf(" ON `%s`.*", config.PseudoGTIDSchema)) { - foundDropOnSchema = true - } - if strings.Contains(grant, `DROP`) && strings.Contains(grant, fmt.Sprintf(` ON "%s".*`, config.PseudoGTIDSchema)) { - foundDropOnSchema = true - } - } - return nil - }) - if err != nil { - return canInject, err - } - - canInject = foundAll || foundDropOnAll || foundAllOnSchema || foundDropOnSchema - supportedAutoPseudoGTIDWriters.Set(instanceKey.StringCode(), canInject, cache.DefaultExpiration) - - return canInject, nil -} - -// CheckAndInjectPseudoGTIDOnWriter checks whether pseudo-GTID can and -// should be injected on given instance, and if so, attempts to inject. -func CheckAndInjectPseudoGTIDOnWriter(instance *Instance) (injected bool, err error) { - if instance == nil { - return injected, log.Errorf("CheckAndInjectPseudoGTIDOnWriter: instance is nil") - } - if instance.ReadOnly { - return injected, log.Errorf("CheckAndInjectPseudoGTIDOnWriter: instance is read-only: %+v", instance.Key) - } - if !instance.IsLastCheckValid { - return injected, nil - } - canInject, err := canInjectPseudoGTID(&instance.Key) - if err != nil { - return injected, log.Errore(err) - } - if !canInject { - if util.ClearToLog("CheckAndInjectPseudoGTIDOnWriter", instance.Key.StringCode()) { - log.Warningf("AutoPseudoGTID enabled, but orchestrator has no priviliges on %+v to inject pseudo-gtid", instance.Key) - } - - return injected, nil - } - if _, err := injectPseudoGTID(instance); err != nil { - return injected, log.Errore(err) - } - injected = true - if err := RegisterInjectedPseudoGTID(instance.ClusterName); err != nil { - return injected, log.Errore(err) - } - return injected, nil -} - func GTIDSubtract(instanceKey *InstanceKey, gtidSet string, gtidSubset string) (gtidSubtract string, err error) { db, err := db.OpenTopology(instanceKey.Hostname, instanceKey.Port) if err != nil { diff --git a/go/vt/orchestrator/inst/master_equivalence.go b/go/vt/orchestrator/inst/master_equivalence.go deleted file mode 100644 index 41f50e848ea..00000000000 --- a/go/vt/orchestrator/inst/master_equivalence.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -// InstanceBinlogCoordinates is a convenice wrapper for instance key + binlog coordinates -type InstanceBinlogCoordinates struct { - Key InstanceKey - Coordinates BinlogCoordinates -} diff --git a/go/vt/orchestrator/inst/master_equivalence_dao.go b/go/vt/orchestrator/inst/master_equivalence_dao.go deleted file mode 100644 index 275fd183009..00000000000 --- a/go/vt/orchestrator/inst/master_equivalence_dao.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "vitess.io/vitess/go/vt/orchestrator/config" - "vitess.io/vitess/go/vt/orchestrator/db" - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" -) - -func WriteMasterPositionEquivalence(master1Key *InstanceKey, master1BinlogCoordinates *BinlogCoordinates, - master2Key *InstanceKey, master2BinlogCoordinates *BinlogCoordinates) error { - if master1Key.Equals(master2Key) { - // Not interesting - return nil - } - writeFunc := func() error { - _, err := db.ExecOrchestrator(` - insert into master_position_equivalence ( - master1_hostname, master1_port, master1_binary_log_file, master1_binary_log_pos, - master2_hostname, master2_port, master2_binary_log_file, master2_binary_log_pos, - last_suggested) - values (?, ?, ?, ?, ?, ?, ?, ?, NOW()) - on duplicate key update last_suggested=values(last_suggested) - - `, master1Key.Hostname, master1Key.Port, master1BinlogCoordinates.LogFile, master1BinlogCoordinates.LogPos, - master2Key.Hostname, master2Key.Port, master2BinlogCoordinates.LogFile, master2BinlogCoordinates.LogPos, - ) - return log.Errore(err) - } - return ExecDBWriteFunc(writeFunc) -} - -func GetEquivalentMasterCoordinates(instanceCoordinates *InstanceBinlogCoordinates) (result [](*InstanceBinlogCoordinates), err error) { - query := ` - select - master1_hostname as hostname, - master1_port as port, - master1_binary_log_file as binlog_file, - master1_binary_log_pos as binlog_pos - from - master_position_equivalence - where - master2_hostname = ? - and master2_port = ? - and master2_binary_log_file = ? - and master2_binary_log_pos = ? - union - select - master2_hostname as hostname, - master2_port as port, - master2_binary_log_file as binlog_file, - master2_binary_log_pos as binlog_pos - from - master_position_equivalence - where - master1_hostname = ? - and master1_port = ? - and master1_binary_log_file = ? - and master1_binary_log_pos = ? - ` - args := sqlutils.Args( - instanceCoordinates.Key.Hostname, - instanceCoordinates.Key.Port, - instanceCoordinates.Coordinates.LogFile, - instanceCoordinates.Coordinates.LogPos, - instanceCoordinates.Key.Hostname, - instanceCoordinates.Key.Port, - instanceCoordinates.Coordinates.LogFile, - instanceCoordinates.Coordinates.LogPos, - ) - - err = db.QueryOrchestrator(query, args, func(m sqlutils.RowMap) error { - equivalentCoordinates := InstanceBinlogCoordinates{} - equivalentCoordinates.Key.Hostname = m.GetString("hostname") - equivalentCoordinates.Key.Port = m.GetInt("port") - equivalentCoordinates.Coordinates.LogFile = m.GetString("binlog_file") - equivalentCoordinates.Coordinates.LogPos = m.GetInt64("binlog_pos") - - result = append(result, &equivalentCoordinates) - return nil - }) - - if err != nil { - return nil, err - } - - return result, nil -} - -func GetEquivalentBinlogCoordinatesFor(instanceCoordinates *InstanceBinlogCoordinates, belowKey *InstanceKey) (*BinlogCoordinates, error) { - possibleCoordinates, err := GetEquivalentMasterCoordinates(instanceCoordinates) - if err != nil { - return nil, err - } - for _, instanceCoordinates := range possibleCoordinates { - if instanceCoordinates.Key.Equals(belowKey) { - return &instanceCoordinates.Coordinates, nil - } - } - return nil, nil -} - -// ExpireMasterPositionEquivalence expires old master_position_equivalence -func ExpireMasterPositionEquivalence() error { - writeFunc := func() error { - _, err := db.ExecOrchestrator(` - delete from master_position_equivalence - where last_suggested < NOW() - INTERVAL ? HOUR - `, config.Config.UnseenInstanceForgetHours, - ) - return log.Errore(err) - } - return ExecDBWriteFunc(writeFunc) -} diff --git a/go/vt/orchestrator/logic/command_applier.go b/go/vt/orchestrator/logic/command_applier.go index 6738de93d48..275b0c104e8 100644 --- a/go/vt/orchestrator/logic/command_applier.go +++ b/go/vt/orchestrator/logic/command_applier.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/orchestrator/inst" "vitess.io/vitess/go/vt/orchestrator/kv" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" ) @@ -39,14 +38,12 @@ func (applier *CommandApplier) ApplyCommand(op string, value []byte) interface{} switch op { case "heartbeat": return nil - case "async-snapshot": - return applier.asyncSnapshot(value) case "register-node": return applier.registerNode(value) case "discover": return applier.discover(value) case "injected-pseudo-gtid": - return applier.injectedPseudoGTID(value) + return nil // depracated case "forget": return applier.forget(value) case "forget-cluster": @@ -81,21 +78,12 @@ func (applier *CommandApplier) ApplyCommand(op string, value []byte) interface{} return applier.putInstanceTag(value) case "delete-instance-tag": return applier.deleteInstanceTag(value) - case "leader-uri": - return applier.leaderURI(value) - case "request-health-report": - return applier.healthReport(value) case "set-cluster-alias-manual-override": return applier.setClusterAliasManualOverride(value) } return log.Errorf("Unknown command op: %s", op) } -func (applier *CommandApplier) asyncSnapshot(value []byte) interface{} { - err := orcraft.AsyncSnapshot() - return err -} - func (applier *CommandApplier) registerNode(value []byte) interface{} { return nil } @@ -109,15 +97,6 @@ func (applier *CommandApplier) discover(value []byte) interface{} { return nil } -func (applier *CommandApplier) injectedPseudoGTID(value []byte) interface{} { - var clusterName string - if err := json.Unmarshal(value, &clusterName); err != nil { - return log.Errore(err) - } - inst.RegisterInjectedPseudoGTID(clusterName) - return nil -} - func (applier *CommandApplier) forget(value []byte) interface{} { instanceKey := inst.InstanceKey{} if err := json.Unmarshal(value, &instanceKey); err != nil { @@ -282,24 +261,6 @@ func (applier *CommandApplier) deleteInstanceTag(value []byte) interface{} { return err } -func (applier *CommandApplier) leaderURI(value []byte) interface{} { - var uri string - if err := json.Unmarshal(value, &uri); err != nil { - return log.Errore(err) - } - orcraft.LeaderURI.Set(uri) - return nil -} - -func (applier *CommandApplier) healthReport(value []byte) interface{} { - var authenticationToken string - if err := json.Unmarshal(value, &authenticationToken); err != nil { - return log.Errore(err) - } - orcraft.ReportToRaftLeader(authenticationToken) - return nil -} - func (applier *CommandApplier) setClusterAliasManualOverride(value []byte) interface{} { var params [2]string if err := json.Unmarshal(value, ¶ms); err != nil { diff --git a/go/vt/orchestrator/logic/orchestrator.go b/go/vt/orchestrator/logic/orchestrator.go index 6bf5d71b70f..f8d05577a1c 100644 --- a/go/vt/orchestrator/logic/orchestrator.go +++ b/go/vt/orchestrator/logic/orchestrator.go @@ -17,8 +17,6 @@ package logic import ( - "fmt" - "math/rand" "os" "os/signal" "sync" @@ -30,7 +28,6 @@ import ( "github.com/rcrowley/go-metrics" "github.com/sjmudd/stopwatch" - "vitess.io/vitess/go/vt/orchestrator/agent" "vitess.io/vitess/go/vt/orchestrator/collection" "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/discovery" @@ -39,14 +36,11 @@ import ( "vitess.io/vitess/go/vt/orchestrator/kv" ometrics "vitess.io/vitess/go/vt/orchestrator/metrics" "vitess.io/vitess/go/vt/orchestrator/process" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" "vitess.io/vitess/go/vt/orchestrator/util" ) const ( - discoveryMetricsName = "DISCOVERY_METRICS" - yieldAfterUnhealthyDuration = 5 * config.HealthPollSeconds * time.Second - fatalAfterUnhealthyDuration = 30 * config.HealthPollSeconds * time.Second + discoveryMetricsName = "DISCOVERY_METRICS" ) // discoveryQueue is a channel of deduplicated instanceKey-s @@ -63,14 +57,11 @@ var discoveryQueueLengthGauge = metrics.NewGauge() var discoveryRecentCountGauge = metrics.NewGauge() var isElectedGauge = metrics.NewGauge() var isHealthyGauge = metrics.NewGauge() -var isRaftHealthyGauge = metrics.NewGauge() -var isRaftLeaderGauge = metrics.NewGauge() var discoveryMetrics = collection.CreateOrReturnCollection(discoveryMetricsName) var isElectedNode int64 = 0 var recentDiscoveryOperationKeys *cache.Cache -var pseudoGTIDPublishCache = cache.New(time.Minute, time.Second) var kvFoundCache = cache.New(10*time.Minute, time.Minute) func init() { @@ -83,8 +74,6 @@ func init() { metrics.Register("discoveries.recent_count", discoveryRecentCountGauge) metrics.Register("elect.is_elected", isElectedGauge) metrics.Register("health.is_healthy", isHealthyGauge) - metrics.Register("raft.is_healthy", isRaftHealthyGauge) - metrics.Register("raft.is_leader", isRaftLeaderGauge) ometrics.OnMetricsTick(func() { discoveryQueueLengthGauge.Update(int64(discoveryQueue.QueueLen())) @@ -101,29 +90,13 @@ func init() { ometrics.OnMetricsTick(func() { isHealthyGauge.Update(atomic.LoadInt64(&process.LastContinousCheckHealthy)) }) - ometrics.OnMetricsTick(func() { - var healthy int64 - if orcraft.IsHealthy() { - healthy = 1 - } - isRaftHealthyGauge.Update(healthy) - }) - ometrics.OnMetricsTick(func() { - isRaftLeaderGauge.Update(atomic.LoadInt64(&isElectedNode)) - }) } func IsLeader() bool { - if orcraft.IsRaftEnabled() { - return orcraft.IsLeader() - } return atomic.LoadInt64(&isElectedNode) == 1 } func IsLeaderOrActive() bool { - if orcraft.IsRaftEnabled() { - return orcraft.IsPartOfQuorum() - } return atomic.LoadInt64(&isElectedNode) == 1 } @@ -279,21 +252,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { func onHealthTick() { wasAlreadyElected := IsLeader() - if orcraft.IsRaftEnabled() { - if orcraft.IsLeader() { - atomic.StoreInt64(&isElectedNode, 1) - } else { - atomic.StoreInt64(&isElectedNode, 0) - } - if process.SinceLastGoodHealthCheck() > yieldAfterUnhealthyDuration { - log.Errorf("Heath test is failing for over %+v seconds. raft yielding", yieldAfterUnhealthyDuration.Seconds()) - orcraft.Yield() - } - if process.SinceLastGoodHealthCheck() > fatalAfterUnhealthyDuration { - orcraft.FatalRaftError(fmt.Errorf("Node is unable to register health. Please check database connnectivity.")) - } - } - if !orcraft.IsRaftEnabled() { + { myIsElectedNode, err := process.AttemptElection() if err != nil { log.Errore(err) @@ -346,48 +305,6 @@ func onHealthTick() { } } -// publishDiscoverMasters will publish to raft a discovery request for all known masters. -// This makes for a best-effort keep-in-sync between raft nodes, where some may have -// inconsistent data due to hosts being forgotten, for example. -func publishDiscoverMasters() error { - instances, err := inst.ReadWriteableClustersMasters() - if err == nil { - for _, instance := range instances { - key := instance.Key - go orcraft.PublishCommand("discover", key) - } - } - return log.Errore(err) -} - -// InjectPseudoGTIDOnWriters will inject a PseudoGTID entry on all writable, accessible, -// supported writers. -func InjectPseudoGTIDOnWriters() error { - instances, err := inst.ReadWriteableClustersMasters() - if err != nil { - return log.Errore(err) - } - for i := range rand.Perm(len(instances)) { - instance := instances[i] - go func() { - if injected, _ := inst.CheckAndInjectPseudoGTIDOnWriter(instance); injected { - clusterName := instance.ClusterName - if orcraft.IsRaftEnabled() { - // We prefer not saturating our raft communication. Pseudo-GTID information is - // OK to be cached for a while. - if _, found := pseudoGTIDPublishCache.Get(clusterName); !found { - pseudoGTIDPublishCache.Set(clusterName, true, cache.DefaultExpiration) - orcraft.PublishCommand("injected-pseudo-gtid", clusterName) - } - } else { - inst.RegisterInjectedPseudoGTID(clusterName) - } - } - }() - } - return nil -} - // Write a cluster's master (or all clusters masters) to kv stores. // This should generally only happen once in a lifetime of a cluster. Otherwise KV // stores are updated via failovers. @@ -418,11 +335,7 @@ func SubmitMastersToKvStores(clusterName string, force bool) (kvPairs [](*kv.KVP } log.Debugf("kv.SubmitMastersToKvStores: submitKvPairs: %+v", len(submitKvPairs)) for _, kvPair := range submitKvPairs { - if orcraft.IsRaftEnabled() { - _, err = orcraft.PublishCommand("put-key-value", kvPair) - } else { - err = kv.PutKVPair(kvPair) - } + err := kv.PutKVPair(kvPair) if err == nil { submittedCount++ } else { @@ -464,9 +377,7 @@ func ContinuousDiscovery() { healthTick := time.Tick(config.HealthPollSeconds * time.Second) instancePollTick := time.Tick(instancePollSecondsDuration()) caretakingTick := time.Tick(time.Minute) - raftCaretakingTick := time.Tick(10 * time.Minute) recoveryTick := time.Tick(time.Duration(config.RecoveryPollSeconds) * time.Second) - autoPseudoGTIDTick := time.Tick(time.Duration(config.PseudoGTIDIntervalSeconds) * time.Second) tabletTopoTick := OpenTabletDiscovery() var recoveryEntrance int64 var snapshotTopologiesTick <-chan time.Time @@ -481,16 +392,9 @@ func ContinuousDiscovery() { var seedOnce sync.Once go ometrics.InitMetrics() - go ometrics.InitGraphiteMetrics() go acceptSignals() go kv.InitKVStores() inst.SetDurabilityPolicy(config.Config.Durability) - if config.Config.RaftEnabled { - if err := orcraft.Setup(NewCommandApplier(), NewSnapshotDataCreatorApplier(), process.ThisHostname); err != nil { - log.Fatale(err) - } - go orcraft.Monitor() - } if *config.RuntimeCLIFlags.GrabElection { process.GrabElection() @@ -514,17 +418,10 @@ func ContinuousDiscovery() { go injectSeeds(&seedOnce) } }() - case <-autoPseudoGTIDTick: - go func() { - if config.Config.AutoPseudoGTID && IsLeader() { - go InjectPseudoGTIDOnWriters() - } - }() case <-caretakingTick: // Various periodic internal maintenance tasks go func() { if IsLeaderOrActive() { - go inst.RecordInstanceCoordinatesHistory() go inst.ReviewUnseenInstances() go inst.InjectUnseenMasters() @@ -538,10 +435,8 @@ func ContinuousDiscovery() { go inst.ExpireHostnameUnresolve() go inst.ExpireClusterDomainName() go inst.ExpireAudit() - go inst.ExpireMasterPositionEquivalence() go inst.ExpirePoolInstances() go inst.FlushNontrivialResolveCacheToDatabase() - go inst.ExpireInjectedPseudoGTID() go inst.ExpireStaleInstanceBinlogCoordinates() go process.ExpireNodesHistory() go process.ExpireAccessTokens() @@ -558,10 +453,6 @@ func ContinuousDiscovery() { go inst.LoadHostnameResolveCache() } }() - case <-raftCaretakingTick: - if orcraft.IsRaftEnabled() && orcraft.IsLeader() { - go publishDiscoverMasters() - } case <-recoveryTick: go func() { if IsLeaderOrActive() { @@ -597,52 +488,3 @@ func ContinuousDiscovery() { } } } - -func pollAgent(hostname string) error { - polledAgent, err := agent.GetAgent(hostname) - agent.UpdateAgentLastChecked(hostname) - - if err != nil { - return log.Errore(err) - } - - err = agent.UpdateAgentInfo(hostname, polledAgent) - if err != nil { - return log.Errore(err) - } - - return nil -} - -// ContinuousAgentsPoll starts an asynchronuous infinite process where agents are -// periodically investigated and their status captured, and long since unseen agents are -// purged and forgotten. -func ContinuousAgentsPoll() { - log.Infof("Starting continuous agents poll") - - go discoverSeededAgents() - - tick := time.Tick(config.HealthPollSeconds * time.Second) - caretakingTick := time.Tick(time.Hour) - for range tick { - agentsHosts, _ := agent.ReadOutdatedAgentsHosts() - log.Debugf("outdated agents hosts: %+v", agentsHosts) - for _, hostname := range agentsHosts { - go pollAgent(hostname) - } - // See if we should also forget agents (lower frequency) - select { - case <-caretakingTick: - agent.ForgetLongUnseenAgents() - agent.FailStaleSeeds() - default: - } - } -} - -func discoverSeededAgents() { - for seededAgent := range agent.SeededAgents { - instanceKey := &inst.InstanceKey{Hostname: seededAgent.Hostname, Port: int(seededAgent.MySQLPort)} - go inst.ReadTopologyInstance(instanceKey) - } -} diff --git a/go/vt/orchestrator/logic/snapshot_data.go b/go/vt/orchestrator/logic/snapshot_data.go deleted file mode 100644 index e131f3df30e..00000000000 --- a/go/vt/orchestrator/logic/snapshot_data.go +++ /dev/null @@ -1,218 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package logic - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "io" - - "vitess.io/vitess/go/vt/orchestrator/db" - "vitess.io/vitess/go/vt/orchestrator/inst" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" -) - -type SnapshotData struct { - Keys []inst.InstanceKey // Kept for backwards comapatibility - MinimalInstances []inst.MinimalInstance - RecoveryDisabled bool - - ClusterAlias, - ClusterAliasOverride, - ClusterDomainName, - HostAttributes, - InstanceTags, - AccessToken, - PoolInstances, - InjectedPseudoGTIDClusters, - HostnameResolves, - HostnameUnresolves, - DowntimedInstances, - Candidates, - Detections, - KVStore, - Recovery, - RecoverySteps sqlutils.NamedResultData - - LeaderURI string -} - -func NewSnapshotData() *SnapshotData { - return &SnapshotData{} -} - -func readTableData(tableName string, data *sqlutils.NamedResultData) error { - orcdb, err := db.OpenOrchestrator() - if err != nil { - return log.Errore(err) - } - *data, err = sqlutils.ScanTable(orcdb, tableName) - return log.Errore(err) -} - -func writeTableData(tableName string, data *sqlutils.NamedResultData) error { - orcdb, err := db.OpenOrchestrator() - if err != nil { - return log.Errore(err) - } - err = sqlutils.WriteTable(orcdb, tableName, *data) - return log.Errore(err) -} - -func CreateSnapshotData() *SnapshotData { - snapshotData := NewSnapshotData() - - snapshotData.LeaderURI = orcraft.LeaderURI.Get() - // keys - snapshotData.Keys, _ = inst.ReadAllInstanceKeys() - snapshotData.MinimalInstances, _ = inst.ReadAllMinimalInstances() - snapshotData.RecoveryDisabled, _ = IsRecoveryDisabled() - - readTableData("cluster_alias", &snapshotData.ClusterAlias) - readTableData("cluster_alias_override", &snapshotData.ClusterAliasOverride) - readTableData("cluster_domain_name", &snapshotData.ClusterDomainName) - readTableData("access_token", &snapshotData.AccessToken) - readTableData("host_attributes", &snapshotData.HostAttributes) - readTableData("database_instance_tags", &snapshotData.InstanceTags) - readTableData("database_instance_pool", &snapshotData.PoolInstances) - readTableData("hostname_resolve", &snapshotData.HostnameResolves) - readTableData("hostname_unresolve", &snapshotData.HostnameUnresolves) - readTableData("database_instance_downtime", &snapshotData.DowntimedInstances) - readTableData("candidate_database_instance", &snapshotData.Candidates) - readTableData("topology_failure_detection", &snapshotData.Detections) - readTableData("kv_store", &snapshotData.KVStore) - readTableData("topology_recovery", &snapshotData.Recovery) - readTableData("topology_recovery_steps", &snapshotData.RecoverySteps) - readTableData("cluster_injected_pseudo_gtid", &snapshotData.InjectedPseudoGTIDClusters) - - log.Debugf("raft snapshot data created") - return snapshotData -} - -type SnapshotDataCreatorApplier struct { -} - -func NewSnapshotDataCreatorApplier() *SnapshotDataCreatorApplier { - generator := &SnapshotDataCreatorApplier{} - return generator -} - -func (this *SnapshotDataCreatorApplier) GetData() (data []byte, err error) { - snapshotData := CreateSnapshotData() - b, err := json.Marshal(snapshotData) - if err != nil { - return b, err - } - var buf bytes.Buffer - zw := gzip.NewWriter(&buf) - if _, err := zw.Write(b); err != nil { - return b, err - } - if err := zw.Close(); err != nil { - return b, err - } - return buf.Bytes(), nil -} - -func (this *SnapshotDataCreatorApplier) Restore(rc io.ReadCloser) error { - snapshotData := NewSnapshotData() - zr, err := gzip.NewReader(rc) - if err != nil { - return err - } - if err := json.NewDecoder(zr).Decode(&snapshotData); err != nil { - return err - } - - orcraft.LeaderURI.Set(snapshotData.LeaderURI) - // keys - { - snapshotInstanceKeyMap := inst.NewInstanceKeyMap() - snapshotInstanceKeyMap.AddKeys(snapshotData.Keys) - for _, minimalInstance := range snapshotData.MinimalInstances { - snapshotInstanceKeyMap.AddKey(minimalInstance.Key) - } - - discardedKeys := 0 - // Forget instances that were not in snapshot - existingKeys, _ := inst.ReadAllInstanceKeys() - for _, existingKey := range existingKeys { - if !snapshotInstanceKeyMap.HasKey(existingKey) { - inst.ForgetInstance(&existingKey) - discardedKeys++ - } - } - log.Debugf("raft snapshot restore: discarded %+v keys", discardedKeys) - existingKeysMap := inst.NewInstanceKeyMap() - existingKeysMap.AddKeys(existingKeys) - - // Discover instances that are in snapshot and not in our own database. - // Instances that _are_ in our own database will self-discover. No need - // to explicitly discover them. - discoveredKeys := 0 - // v2: read keys + master keys - for _, minimalInstance := range snapshotData.MinimalInstances { - if !existingKeysMap.HasKey(minimalInstance.Key) { - if err := inst.WriteInstance(minimalInstance.ToInstance(), false, nil); err == nil { - discoveredKeys++ - } else { - log.Errore(err) - } - } - } - if len(snapshotData.MinimalInstances) == 0 { - // v1: read keys (backwards support) - for _, snapshotKey := range snapshotData.Keys { - if !existingKeysMap.HasKey(snapshotKey) { - snapshotKey := snapshotKey - go func() { - snapshotDiscoveryKeys <- snapshotKey - }() - discoveredKeys++ - } - } - } - log.Debugf("raft snapshot restore: discovered %+v keys", discoveredKeys) - } - writeTableData("cluster_alias", &snapshotData.ClusterAlias) - writeTableData("cluster_alias_override", &snapshotData.ClusterAliasOverride) - writeTableData("cluster_domain_name", &snapshotData.ClusterDomainName) - writeTableData("access_token", &snapshotData.AccessToken) - writeTableData("host_attributes", &snapshotData.HostAttributes) - writeTableData("database_instance_tags", &snapshotData.InstanceTags) - writeTableData("database_instance_pool", &snapshotData.PoolInstances) - writeTableData("hostname_resolve", &snapshotData.HostnameResolves) - writeTableData("hostname_unresolve", &snapshotData.HostnameUnresolves) - writeTableData("database_instance_downtime", &snapshotData.DowntimedInstances) - writeTableData("candidate_database_instance", &snapshotData.Candidates) - writeTableData("kv_store", &snapshotData.KVStore) - writeTableData("topology_recovery", &snapshotData.Recovery) - writeTableData("topology_failure_detection", &snapshotData.Detections) - writeTableData("topology_recovery_steps", &snapshotData.RecoverySteps) - writeTableData("cluster_injected_pseudo_gtid", &snapshotData.InjectedPseudoGTIDClusters) - - // recovery disable - { - SetRecoveryDisabled(snapshotData.RecoveryDisabled) - } - log.Debugf("raft snapshot restore applied") - return nil -} diff --git a/go/vt/orchestrator/logic/topology_recovery.go b/go/vt/orchestrator/logic/topology_recovery.go index 6faab72aba7..8fad8ec44d7 100644 --- a/go/vt/orchestrator/logic/topology_recovery.go +++ b/go/vt/orchestrator/logic/topology_recovery.go @@ -37,7 +37,6 @@ import ( ometrics "vitess.io/vitess/go/vt/orchestrator/metrics" "vitess.io/vitess/go/vt/orchestrator/os" "vitess.io/vitess/go/vt/orchestrator/process" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" "vitess.io/vitess/go/vt/orchestrator/util" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -161,8 +160,8 @@ type MasterRecoveryType string const ( NotMasterRecovery MasterRecoveryType = "NotMasterRecovery" MasterRecoveryGTID MasterRecoveryType = "MasterRecoveryGTID" - MasterRecoveryPseudoGTID MasterRecoveryType = "MasterRecoveryPseudoGTID" MasterRecoveryBinlogServer MasterRecoveryType = "MasterRecoveryBinlogServer" + MasterRecoveryUnknown MasterRecoveryType = "MasterRecoveryUnknown" ) var emergencyReadTopologyInstanceMap *cache.Cache @@ -232,12 +231,7 @@ func AuditTopologyRecovery(topologyRecovery *TopologyRecovery, message string) e } recoveryStep := NewTopologyRecoveryStep(topologyRecovery.UID, message) - if orcraft.IsRaftEnabled() { - _, err := orcraft.PublishCommand("write-recovery-step", recoveryStep) - return err - } else { - return writeTopologyRecoveryStep(recoveryStep) - } + return writeTopologyRecoveryStep(recoveryStep) } func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst.Instance) error { @@ -246,12 +240,7 @@ func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst topologyRecovery.SuccessorAlias = successorInstance.InstanceAlias topologyRecovery.IsSuccessful = true } - if orcraft.IsRaftEnabled() { - _, err := orcraft.PublishCommand("resolve-recovery", topologyRecovery) - return err - } else { - return writeResolveRecovery(topologyRecovery) - } + return writeResolveRecovery(topologyRecovery) } // prepareCommand replaces agreed-upon placeholders with analysis data @@ -482,7 +471,7 @@ func recoverDeadMasterInBinlogServerTopology(topologyRecovery *TopologyRecovery) } func GetMasterRecoveryType(analysisEntry *inst.ReplicationAnalysis) (masterRecoveryType MasterRecoveryType) { - masterRecoveryType = MasterRecoveryPseudoGTID + masterRecoveryType = MasterRecoveryUnknown if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology { masterRecoveryType = MasterRecoveryGTID } else if analysisEntry.BinlogServerImmediateTopology { @@ -534,16 +523,15 @@ func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey return false } switch topologyRecovery.RecoveryType { + case MasterRecoveryUnknown: + { + return false, nil, lostReplicas, topologyRecovery.AddError(log.Errorf("RecoveryType unknown/unsupported")) + } case MasterRecoveryGTID: { AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: regrouping replicas via GTID") lostReplicas, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasGTID(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal) } - case MasterRecoveryPseudoGTID: - { - AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: regrouping replicas via Pseudo-GTID") - lostReplicas, _, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal) - } case MasterRecoveryBinlogServer: { AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: recovering via binlog servers") @@ -940,19 +928,9 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate kvPairs := inst.GetClusterMasterKVPairs(analysisEntry.ClusterDetails.ClusterAlias, &promotedReplica.Key) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Writing KV %+v", kvPairs)) - if orcraft.IsRaftEnabled() { - for _, kvPair := range kvPairs { - _, err := orcraft.PublishCommand("put-key-value", kvPair) - log.Errore(err) - } - // since we'll be affecting 3rd party tools here, we _prefer_ to mitigate re-applying - // of the put-key-value event upon startup. We _recommend_ a snapshot in the near future. - go orcraft.PublishCommand("async-snapshot", "") - } else { - for _, kvPair := range kvPairs { - err := kv.PutKVPair(kvPair) - log.Errore(err) - } + for _, kvPair := range kvPairs { + err := kv.PutKVPair(kvPair) + log.Errore(err) } { AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Distributing KV %+v", kvPairs)) @@ -1283,7 +1261,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadCoMaster: will recover %+v", *failedInstanceKey)) - var coMasterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID + var coMasterRecoveryType MasterRecoveryType = MasterRecoveryUnknown if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology { coMasterRecoveryType = MasterRecoveryGTID } @@ -1292,13 +1270,13 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) var cannotReplicateReplicas [](*inst.Instance) switch coMasterRecoveryType { - case MasterRecoveryGTID: + case MasterRecoveryUnknown: { - lostReplicas, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasGTID(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, nil) + return nil, lostReplicas, topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: RecoveryType unknown/unsupported")) } - case MasterRecoveryPseudoGTID: + case MasterRecoveryGTID: { - lostReplicas, _, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, nil) + lostReplicas, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasGTID(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, nil) } } topologyRecovery.AddError(err) @@ -1654,25 +1632,8 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // At this point we have validated there's a failure scenario for which we have a recovery path. - if orcraft.IsRaftEnabled() { - // with raft, all nodes can (and should) run analysis, - // but only the leader proceeds to execute detection hooks and then to failover. - if !orcraft.IsLeader() { - log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ - "skipProcesses: %v: NOT detecting/recovering host (raft non-leader)", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses) - return false, nil, err - } - } - // Initiate detection: - registrationSuccess, _, err := checkAndExecuteFailureDetectionProcesses(analysisEntry, skipProcesses) - if registrationSuccess { - if orcraft.IsRaftEnabled() { - _, err := orcraft.PublishCommand("register-failure-detection", analysisEntry) - log.Errore(err) - } - } + _, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry, skipProcesses) if err != nil { log.Errorf("executeCheckAndRecoverFunction: error on failure detection: %+v", err) return false, nil, err diff --git a/go/vt/orchestrator/logic/topology_recovery_dao.go b/go/vt/orchestrator/logic/topology_recovery_dao.go index 9e9a2fd3b1c..23514f08657 100644 --- a/go/vt/orchestrator/logic/topology_recovery_dao.go +++ b/go/vt/orchestrator/logic/topology_recovery_dao.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" "vitess.io/vitess/go/vt/orchestrator/inst" "vitess.io/vitess/go/vt/orchestrator/process" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" "vitess.io/vitess/go/vt/orchestrator/util" ) @@ -242,11 +241,6 @@ func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis, failIf if err != nil { return nil, log.Errore(err) } - if orcraft.IsRaftEnabled() { - if _, err := orcraft.PublishCommand("write-recovery", topologyRecovery); err != nil { - return nil, log.Errore(err) - } - } return topologyRecovery, nil } diff --git a/go/vt/orchestrator/metrics/graphite.go b/go/vt/orchestrator/metrics/graphite.go deleted file mode 100644 index 1a57f627c65..00000000000 --- a/go/vt/orchestrator/metrics/graphite.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package metrics - -import ( - "net" - "strings" - "time" - - graphite "github.com/cyberdelia/go-metrics-graphite" - "github.com/rcrowley/go-metrics" - - "vitess.io/vitess/go/vt/orchestrator/config" - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/process" -) - -// InitGraphiteMetrics is called once in the lifetime of the app, after config has been loaded -func InitGraphiteMetrics() error { - if config.Config.GraphiteAddr == "" { - return nil - } - if config.Config.GraphitePollSeconds <= 0 { - return nil - } - if config.Config.GraphitePath == "" { - return log.Errorf("No graphite path provided (see GraphitePath config variable). Will not log to graphite") - } - addr, err := net.ResolveTCPAddr("tcp", config.Config.GraphiteAddr) - if err != nil { - return log.Errore(err) - } - graphitePathHostname := process.ThisHostname - if config.Config.GraphiteConvertHostnameDotsToUnderscores { - graphitePathHostname = strings.Replace(graphitePathHostname, ".", "_", -1) - } - graphitePath := config.Config.GraphitePath - graphitePath = strings.Replace(graphitePath, "{hostname}", graphitePathHostname, -1) - - log.Debugf("Will log to graphite on %+v, %+v", config.Config.GraphiteAddr, graphitePath) - - go graphite.Graphite(metrics.DefaultRegistry, 1*time.Minute, graphitePath, addr) - - return nil -} diff --git a/go/vt/orchestrator/process/election_dao.go b/go/vt/orchestrator/process/election_dao.go index 23b4101b641..f0aa969c3f5 100644 --- a/go/vt/orchestrator/process/election_dao.go +++ b/go/vt/orchestrator/process/election_dao.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/orchestrator/db" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" "vitess.io/vitess/go/vt/orchestrator/util" ) @@ -104,9 +103,6 @@ func AttemptElection() (bool, error) { // GrabElection forcibly grabs leadership. Use with care!! func GrabElection() error { - if orcraft.IsRaftEnabled() { - return log.Errorf("Cannot GrabElection on raft setup") - } _, err := db.ExecOrchestrator(` replace into active_node ( anchor, hostname, token, first_seen_active, last_seen_active @@ -121,15 +117,13 @@ func GrabElection() error { // Reelect clears the way for re-elections. Active node is immediately demoted. func Reelect() error { - if orcraft.IsRaftEnabled() { - orcraft.StepDown() - } _, err := db.ExecOrchestrator(`delete from active_node where anchor = 1`) return log.Errore(err) } // ElectedNode returns the details of the elected node, as well as answering the question "is this process the elected one"? -func ElectedNode() (node NodeHealth, isElected bool, err error) { +func ElectedNode() (node *NodeHealth, isElected bool, err error) { + node = &NodeHealth{} query := ` select hostname, diff --git a/go/vt/orchestrator/process/health.go b/go/vt/orchestrator/process/health.go index 7d22a56002c..2127afa8914 100644 --- a/go/vt/orchestrator/process/health.go +++ b/go/vt/orchestrator/process/health.go @@ -27,7 +27,6 @@ import ( "github.com/patrickmn/go-cache" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - orcraft "vitess.io/vitess/go/vt/orchestrator/raft" ) var lastHealthCheckUnixNano int64 @@ -76,7 +75,7 @@ type HealthStatus struct { Hostname string Token string IsActiveNode bool - ActiveNode NodeHealth + ActiveNode *NodeHealth Error error AvailableNodes [](*NodeHealth) RaftLeader string @@ -122,19 +121,9 @@ func HealthTest() (health *HealthStatus, err error) { health.Healthy = healthy } - if orcraft.IsRaftEnabled() { - health.ActiveNode.Hostname = orcraft.GetLeader() - health.IsActiveNode = orcraft.IsLeader() - health.RaftLeader = orcraft.GetLeader() - health.RaftLeaderURI = orcraft.LeaderURI.Get() - health.IsRaftLeader = orcraft.IsLeader() - health.RaftAdvertise = config.Config.RaftAdvertise - health.RaftHealthyMembers = orcraft.HealthyMembers() - } else { - if health.ActiveNode, health.IsActiveNode, err = ElectedNode(); err != nil { - health.Error = err - return health, log.Errore(err) - } + if health.ActiveNode, health.IsActiveNode, err = ElectedNode(); err != nil { + health.Error = err + return health, log.Errore(err) } health.AvailableNodes, _ = ReadAvailableNodes(true) diff --git a/go/vt/orchestrator/raft/file_snapshot.go b/go/vt/orchestrator/raft/file_snapshot.go deleted file mode 100644 index 537ecebd57d..00000000000 --- a/go/vt/orchestrator/raft/file_snapshot.go +++ /dev/null @@ -1,494 +0,0 @@ -package orcraft - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "hash" - "hash/crc64" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -const ( - testPath = "permTest" - snapPath = "snapshots" - metaFilePath = "meta.json" - stateFilePath = "state.bin" - tmpSuffix = ".tmp" -) - -// FileSnapshotStore implements the SnapshotStore interface and allows -// snapshots to be made on the local disk. -type FileSnapshotStore struct { - path string - retain int -} - -type snapMetaSlice []*fileSnapshotMeta - -// FileSnapshotSink implements SnapshotSink with a file. -type FileSnapshotSink struct { - store *FileSnapshotStore - dir string - meta fileSnapshotMeta - - stateFile *os.File - stateHash hash.Hash64 - buffered *bufio.Writer - - closed bool -} - -// fileSnapshotMeta is stored on disk. We also put a CRC -// on disk so that we can verify the snapshot. -type fileSnapshotMeta struct { - raft.SnapshotMeta - CRC []byte -} - -// bufferedFile is returned when we open a snapshot. This way -// reads are buffered and the file still gets closed. -type bufferedFile struct { - bh *bufio.Reader - fh *os.File -} - -func (b *bufferedFile) Read(p []byte) (n int, err error) { - return b.bh.Read(p) -} - -func (b *bufferedFile) Close() error { - return b.fh.Close() -} - -// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based -// on a base directory. The `retain` parameter controls how many -// snapshots are retained. Must be at least 1. -func NewFileSnapshotStoreWithLogger(base string, retain int) (*FileSnapshotStore, error) { - if retain < 1 { - return nil, fmt.Errorf("must retain at least one snapshot") - } - - // Ensure our path exists - path := filepath.Join(base, snapPath) - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { - return nil, fmt.Errorf("snapshot path not accessible: %v", err) - } - - // Setup the store - store := &FileSnapshotStore{ - path: path, - retain: retain, - } - - // Do a permissions test - if err := store.testPermissions(); err != nil { - return nil, fmt.Errorf("permissions test failed: %v", err) - } - return store, nil -} - -// NewFileSnapshotStore creates a new FileSnapshotStore based -// on a base directory. The `retain` parameter controls how many -// snapshots are retained. Must be at least 1. -func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { - return NewFileSnapshotStoreWithLogger(base, retain) -} - -// testPermissions tries to touch a file in our path to see if it works. -func (f *FileSnapshotStore) testPermissions() error { - path := filepath.Join(f.path, testPath) - fh, err := os.Create(path) - if err != nil { - return err - } - - if err = fh.Close(); err != nil { - return err - } - - if err = os.Remove(path); err != nil { - return err - } - return nil -} - -// snapshotName generates a name for the snapshot. -func snapshotName(term, index uint64) string { - now := time.Now() - msec := now.UnixNano() / int64(time.Millisecond) - return fmt.Sprintf("%d-%d-%d", term, index, msec) -} - -// Create is used to start a new snapshot -func (f *FileSnapshotStore) Create(index, term uint64, peers []byte) (raft.SnapshotSink, error) { - // Create a new path - name := snapshotName(term, index) - path := filepath.Join(f.path, name+tmpSuffix) - log.Infof("snapshot: Creating new snapshot at %s", path) - - // Make the directory - if err := os.MkdirAll(path, 0755); err != nil { - _ = log.Error("snapshot: Failed to make snapshot directory: %v", err) - return nil, err - } - - // Create the sink - sink := &FileSnapshotSink{ - store: f, - dir: path, - meta: fileSnapshotMeta{ - SnapshotMeta: raft.SnapshotMeta{ - ID: name, - Index: index, - Term: term, - Peers: peers, - }, - CRC: nil, - }, - } - - // Write out the meta data - if err := sink.writeMeta(); err != nil { - _ = log.Errorf("snapshot: Failed to write metadata: %v", err) - return nil, err - } - - // Open the state file - statePath := filepath.Join(path, stateFilePath) - fh, err := os.Create(statePath) - if err != nil { - _ = log.Errorf("snapshot: Failed to create state file: %v", err) - return nil, err - } - sink.stateFile = fh - - // Create a CRC64 hash - sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) - - // Wrap both the hash and file in a MultiWriter with buffering - multi := io.MultiWriter(sink.stateFile, sink.stateHash) - sink.buffered = bufio.NewWriter(multi) - - // Done - return sink, nil -} - -// List returns available snapshots in the store. -func (f *FileSnapshotStore) List() ([]*raft.SnapshotMeta, error) { - // Get the eligible snapshots - snapshots, err := f.getSnapshots() - if err != nil { - _ = log.Errorf("snapshot: Failed to get snapshots: %v", err) - return nil, err - } - - var snapMeta []*raft.SnapshotMeta - for _, meta := range snapshots { - snapMeta = append(snapMeta, &meta.SnapshotMeta) - if len(snapMeta) == f.retain { - break - } - } - return snapMeta, nil -} - -// getSnapshots returns all the known snapshots. -func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { - // Get the eligible snapshots - snapshots, err := ioutil.ReadDir(f.path) - if err != nil { - _ = log.Errorf("snapshot: Failed to scan snapshot dir: %v", err) - return nil, err - } - - // Populate the metadata - var snapMeta []*fileSnapshotMeta - for _, snap := range snapshots { - // Ignore any files - if !snap.IsDir() { - continue - } - - // Ignore any temporary snapshots - dirName := snap.Name() - if strings.HasSuffix(dirName, tmpSuffix) { - _ = log.Warningf("snapshot: Found temporary snapshot: %v", dirName) - continue - } - - // Try to read the meta data - meta, err := f.readMeta(dirName) - if err != nil { - _ = log.Warningf("snapshot: Failed to read metadata for %v: %v", dirName, err) - continue - } - - // Append, but only return up to the retain count - snapMeta = append(snapMeta, meta) - } - - // Sort the snapshot, reverse so we get new -> old - sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) - - return snapMeta, nil -} - -// readMeta is used to read the meta data for a given named backup -func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { - // Open the meta file - metaPath := filepath.Join(f.path, name, metaFilePath) - fh, err := os.Open(metaPath) - if err != nil { - return nil, err - } - defer fh.Close() - - // Buffer the file IO - buffered := bufio.NewReader(fh) - - // Read in the JSON - meta := &fileSnapshotMeta{} - dec := json.NewDecoder(buffered) - if err := dec.Decode(meta); err != nil { - return nil, err - } - return meta, nil -} - -// Open takes a snapshot ID and returns a ReadCloser for that snapshot. -func (f *FileSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { - // Get the metadata - meta, err := f.readMeta(id) - if err != nil { - _ = log.Errorf("snapshot: Failed to get meta data to open snapshot: %v", err) - return nil, nil, err - } - - // Open the state file - statePath := filepath.Join(f.path, id, stateFilePath) - fh, err := os.Open(statePath) - if err != nil { - _ = log.Errorf("snapshot: Failed to open state file: %v", err) - return nil, nil, err - } - - // Create a CRC64 hash - stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) - - // Compute the hash - _, err = io.Copy(stateHash, fh) - if err != nil { - _ = log.Errorf("snapshot: Failed to read state file: %v", err) - fh.Close() - return nil, nil, err - } - - // Verify the hash - computed := stateHash.Sum(nil) - if !bytes.Equal(meta.CRC, computed) { - _ = log.Errorf("snapshot: CRC checksum failed (stored: %v computed: %v)", - meta.CRC, computed) - fh.Close() - return nil, nil, fmt.Errorf("CRC mismatch") - } - - // Seek to the start - if _, err := fh.Seek(0, 0); err != nil { - _ = log.Errorf("snapshot: State file seek failed: %v", err) - fh.Close() - return nil, nil, err - } - - // Return a buffered file - buffered := &bufferedFile{ - bh: bufio.NewReader(fh), - fh: fh, - } - - return &meta.SnapshotMeta, buffered, nil -} - -// ReapSnapshots reaps any snapshots beyond the retain count. -func (f *FileSnapshotStore) ReapSnapshots(currentSnapshotMeta *fileSnapshotMeta) error { - - reapSnapshot := func(snapshot *fileSnapshotMeta) error { - path := filepath.Join(f.path, snapshot.ID) - log.Infof("snapshot: reaping snapshot %v", path) - if err := os.RemoveAll(path); err != nil { - _ = log.Errorf("snapshot: Failed to reap snapshot %v: %v", path, err) - return err - } - return nil - } - snapshots, err := f.getSnapshots() - if err != nil { - _ = log.Errorf("snapshot: Failed to get snapshots: %v", err) - return err - } - - deprecatedSnapshotsReaped := false - for _, snapshot := range snapshots { - if snapshot.Term > currentSnapshotMeta.Term || - snapshot.Term == currentSnapshotMeta.Term && snapshot.Index > currentSnapshotMeta.Index { - reapSnapshot(snapshot) - deprecatedSnapshotsReaped = true - } - } - - if deprecatedSnapshotsReaped { - // re-read list, since we've removed files - snapshots, err = f.getSnapshots() - if err != nil { - _ = log.Errorf("snapshot: Failed to get snapshots: %v", err) - return err - } - } - for i := f.retain; i < len(snapshots); i++ { - reapSnapshot(snapshots[i]) - } - return nil -} - -// ID returns the ID of the snapshot, can be used with Open() -// after the snapshot is finalized. -func (s *FileSnapshotSink) ID() string { - return s.meta.ID -} - -// Write is used to append to the state file. We write to the -// buffered IO object to reduce the amount of context switches. -func (s *FileSnapshotSink) Write(b []byte) (int, error) { - return s.buffered.Write(b) -} - -// Close is used to indicate a successful end. -func (s *FileSnapshotSink) Close() error { - // Make sure close is idempotent - if s.closed { - return nil - } - s.closed = true - - // Close the open handles - if err := s.finalize(); err != nil { - _ = log.Errorf("snapshot: Failed to finalize snapshot: %v", err) - return err - } - - // Write out the meta data - if err := s.writeMeta(); err != nil { - _ = log.Errorf("snapshot: Failed to write metadata: %v", err) - return err - } - - // Move the directory into place - newPath := strings.TrimSuffix(s.dir, tmpSuffix) - if err := os.Rename(s.dir, newPath); err != nil { - _ = log.Errorf("snapshot: Failed to move snapshot into place: %v", err) - return err - } - - // Reap any old snapshots - if err := s.store.ReapSnapshots(&s.meta); err != nil { - return err - } - - return nil -} - -// Cancel is used to indicate an unsuccessful end. -func (s *FileSnapshotSink) Cancel() error { - // Make sure close is idempotent - if s.closed { - return nil - } - s.closed = true - - // Close the open handles - if err := s.finalize(); err != nil { - _ = log.Errorf("snapshot: Failed to finalize snapshot: %v", err) - return err - } - - // Attempt to remove all artifacts - return os.RemoveAll(s.dir) -} - -// finalize is used to close all of our resources. -func (s *FileSnapshotSink) finalize() error { - // Flush any remaining data - if err := s.buffered.Flush(); err != nil { - return err - } - - // Get the file size - stat, statErr := s.stateFile.Stat() - - // Close the file - if err := s.stateFile.Close(); err != nil { - return err - } - - // Set the file size, check after we close - if statErr != nil { - return statErr - } - s.meta.Size = stat.Size() - - // Set the CRC - s.meta.CRC = s.stateHash.Sum(nil) - return nil -} - -// writeMeta is used to write out the metadata we have. -func (s *FileSnapshotSink) writeMeta() error { - // Open the meta file - metaPath := filepath.Join(s.dir, metaFilePath) - fh, err := os.Create(metaPath) - if err != nil { - return err - } - defer fh.Close() - - // Buffer the file IO - buffered := bufio.NewWriter(fh) - defer buffered.Flush() - - // Write out as JSON - enc := json.NewEncoder(buffered) - if err := enc.Encode(&s.meta); err != nil { - return err - } - return nil -} - -// Implement the sort interface for []*fileSnapshotMeta. -func (s snapMetaSlice) Len() int { - return len(s) -} - -func (s snapMetaSlice) Less(i, j int) bool { - if s[i].Term != s[j].Term { - return s[i].Term < s[j].Term - } - if s[i].Index != s[j].Index { - return s[i].Index < s[j].Index - } - return s[i].ID < s[j].ID -} - -func (s snapMetaSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} diff --git a/go/vt/orchestrator/raft/fsm.go b/go/vt/orchestrator/raft/fsm.go deleted file mode 100644 index aac5eb8e293..00000000000 --- a/go/vt/orchestrator/raft/fsm.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package orcraft - -import ( - "encoding/json" - "io" - "strings" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -// fsm is a raft finite state machine -type fsm Store - -// Apply applies a Raft log entry to the key-value store. -func (f *fsm) Apply(l *raft.Log) interface{} { - var c storeCommand - if err := json.Unmarshal(l.Data, &c); err != nil { - log.Errorf("failed to unmarshal command: %s", err.Error()) - } - - if c.Op == YieldCommand { - toPeer, err := normalizeRaftNode(string(c.Value)) - if err != nil { - return log.Errore(err) - } - return f.yield(toPeer) - } - if c.Op == YieldHintCommand { - hint := string(c.Value) - return f.yieldByHint(hint) - } - log.Debugf("orchestrator/raft: applying command %+v: %s", l.Index, c.Op) - return store.applier.ApplyCommand(c.Op, c.Value) -} - -// yield yields to a suggested peer, or does nothing if this peer IS the suggested peer -func (f *fsm) yield(toPeer string) interface{} { - isThisPeer, err := IsPeer(toPeer) - if err != nil { - return log.Errorf("failed to unmarshal command: %s", err.Error()) - } - if isThisPeer { - log.Debugf("Will not yield to myself") - return nil - } - log.Debugf("Yielding to %s", toPeer) - return Yield() -} - -// yieldByHint yields to a host that contains given hint -func (f *fsm) yieldByHint(hint string) interface{} { - if hint == "" { - log.Debugf("Will not yield by empty hint") - return nil - } - isThisHost := strings.Contains(ThisHostname, hint) - if isThisHost { - log.Debugf("Will not yield to myself") - return nil - } - log.Debugf("Yielding to hinted %s", hint) - return Yield() -} - -// Snapshot returns a snapshot object of freno's state -func (f *fsm) Snapshot() (raft.FSMSnapshot, error) { - snapshot := newFsmSnapshot(f.snapshotCreatorApplier) - return snapshot, nil -} - -// Restore restores freno state -func (f *fsm) Restore(rc io.ReadCloser) error { - defer rc.Close() - - return f.snapshotCreatorApplier.Restore(rc) -} diff --git a/go/vt/orchestrator/raft/fsm_snapshot.go b/go/vt/orchestrator/raft/fsm_snapshot.go deleted file mode 100644 index fcd1e670ff5..00000000000 --- a/go/vt/orchestrator/raft/fsm_snapshot.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package orcraft - -import ( - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -// fsmSnapshot handles raft persisting of snapshots -type fsmSnapshot struct { - snapshotCreatorApplier SnapshotCreatorApplier -} - -func newFsmSnapshot(snapshotCreatorApplier SnapshotCreatorApplier) *fsmSnapshot { - return &fsmSnapshot{ - snapshotCreatorApplier: snapshotCreatorApplier, - } -} - -// Persist -func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error { - data, err := f.snapshotCreatorApplier.GetData() - if err != nil { - return err - } - if _, err := sink.Write(data); err != nil { - return err - } - return sink.Close() -} - -// Release -func (f *fsmSnapshot) Release() { -} diff --git a/go/vt/orchestrator/raft/http_client.go b/go/vt/orchestrator/raft/http_client.go deleted file mode 100644 index 9e69bb86024..00000000000 --- a/go/vt/orchestrator/raft/http_client.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package orcraft - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - "time" - - "vitess.io/vitess/go/vt/orchestrator/config" - "vitess.io/vitess/go/vt/orchestrator/ssl" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" -) - -var httpClient *http.Client - -func setupHttpClient() error { - httpTimeout := time.Duration(config.ActiveNodeExpireSeconds) * time.Second - dialTimeout := func(network, addr string) (net.Conn, error) { - return net.DialTimeout(network, addr, httpTimeout) - } - - tlsConfig := &tls.Config{ - InsecureSkipVerify: config.Config.SSLSkipVerify, - } - if config.Config.UseSSL { - caPool, err := ssl.ReadCAFile(config.Config.SSLCAFile) - if err != nil { - return err - } - tlsConfig.RootCAs = caPool - - if config.Config.UseMutualTLS { - var sslPEMPassword []byte - if ssl.IsEncryptedPEM(config.Config.SSLPrivateKeyFile) { - sslPEMPassword = ssl.GetPEMPassword(config.Config.SSLPrivateKeyFile) - } - if err := ssl.AppendKeyPairWithPassword(tlsConfig, config.Config.SSLCertFile, config.Config.SSLPrivateKeyFile, sslPEMPassword); err != nil { - return err - } - } - } - - httpTransport := &http.Transport{ - TLSClientConfig: tlsConfig, - Dial: dialTimeout, - ResponseHeaderTimeout: httpTimeout, - } - httpClient = &http.Client{Transport: httpTransport} - - return nil -} - -func HttpGetLeader(path string) (response []byte, err error) { - leaderURI := LeaderURI.Get() - if leaderURI == "" { - return nil, fmt.Errorf("Raft leader URI unknown") - } - leaderAPI := leaderURI - if config.Config.URLPrefix != "" { - // We know URLPrefix begind with "/" - leaderAPI = fmt.Sprintf("%s%s", leaderAPI, config.Config.URLPrefix) - } - leaderAPI = fmt.Sprintf("%s/api", leaderAPI) - - url := fmt.Sprintf("%s/%s", leaderAPI, path) - - req, err := http.NewRequest("GET", url, nil) - switch strings.ToLower(config.Config.AuthenticationMethod) { - case "basic", "multi": - req.SetBasicAuth(config.Config.HTTPAuthUser, config.Config.HTTPAuthPassword) - } - - res, err := httpClient.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - if res.StatusCode != http.StatusOK { - return body, log.Errorf("HttpGetLeader: got %d status on %s", res.StatusCode, url) - } - - return body, nil -} diff --git a/go/vt/orchestrator/raft/raft.go b/go/vt/orchestrator/raft/raft.go index 60c213aa85f..6316cbb2a8c 100644 --- a/go/vt/orchestrator/raft/raft.go +++ b/go/vt/orchestrator/raft/raft.go @@ -17,395 +17,16 @@ package orcraft import ( - "encoding/json" "fmt" - "math/rand" - "net" - "strings" - "sync" - "sync/atomic" - "time" - - "vitess.io/vitess/go/vt/orchestrator/config" - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/util" - - "github.com/patrickmn/go-cache" - - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -const ( - YieldCommand = "yield" - YieldHintCommand = "yield-hint" -) - -const ( - retainSnapshotCount = 10 - snapshotInterval = 30 * time.Minute - asyncSnapshotTimeframe = 1 * time.Minute - raftTimeout = 10 * time.Second ) var RaftNotRunning error = fmt.Errorf("raft is not configured/running") -var store *Store -var raftSetupComplete int64 -var ThisHostname string -var healthRequestAuthenticationTokenCache = cache.New(config.RaftHealthPollSeconds*2*time.Second, time.Second) -var healthReportsCache = cache.New(config.RaftHealthPollSeconds*2*time.Second, time.Second) -var healthRequestReportCache = cache.New(time.Second, time.Second) - -var fatalRaftErrorChan = make(chan error) - -type leaderURI struct { - uri string - sync.Mutex -} - -var LeaderURI leaderURI -var thisLeaderURI string // How this node identifies itself assuming it is the leader - -func (luri *leaderURI) Get() string { - luri.Lock() - defer luri.Unlock() - return luri.uri -} - -func (luri *leaderURI) Set(uri string) { - luri.Lock() - defer luri.Unlock() - luri.uri = uri -} - -func (luri *leaderURI) IsThisLeaderURI() bool { - luri.Lock() - defer luri.Unlock() - return luri.uri == thisLeaderURI -} - -func IsRaftEnabled() bool { - return store != nil -} - -func FatalRaftError(err error) error { - if err != nil { - go func() { fatalRaftErrorChan <- err }() - } - return err -} - -func computeLeaderURI() (uri string, err error) { - if config.Config.HTTPAdvertise != "" { - // Explicitly given - return config.Config.HTTPAdvertise, nil - } - // Not explicitly given. Let's heuristically compute using RaftAdvertise - scheme := "http" - if config.Config.UseSSL { - scheme = "https" - } - - hostname := strings.Split(config.Config.RaftAdvertise, ":")[0] - listenTokens := strings.Split(config.Config.ListenAddress, ":") - if len(listenTokens) < 2 { - return uri, fmt.Errorf("computeLeaderURI: cannot determine listen port out of config.Config.ListenAddress: %+v", config.Config.ListenAddress) - } - port := listenTokens[1] - - uri = fmt.Sprintf("%s://%s:%s", scheme, hostname, port) - return uri, nil -} - -// Setup creates the entire raft shananga. Creates the store, associates with the throttler, -// contacts peer nodes, and subscribes to leader changes to export them. -func Setup(applier CommandApplier, snapshotCreatorApplier SnapshotCreatorApplier, thisHostname string) error { - log.Debugf("Setting up raft") - ThisHostname = thisHostname - raftBind, err := normalizeRaftNode(config.Config.RaftBind) - if err != nil { - return err - } - raftAdvertise, err := normalizeRaftNode(config.Config.RaftAdvertise) - if err != nil { - return err - } - store = NewStore(config.Config.RaftDataDir, raftBind, raftAdvertise, applier, snapshotCreatorApplier) - peerNodes := []string{} - for _, raftNode := range config.Config.RaftNodes { - peerNode, err := normalizeRaftNode(raftNode) - if err != nil { - return err - } - peerNodes = append(peerNodes, peerNode) - } - if len(peerNodes) == 1 && peerNodes[0] == raftAdvertise { - // To run in single node setup we will either specify an empty RaftNodes, or a single - // raft node that is exactly RaftAdvertise - peerNodes = []string{} - } - if err := store.Open(peerNodes); err != nil { - return log.Errorf("failed to open raft store: %s", err.Error()) - } - - thisLeaderURI, err = computeLeaderURI() - if err != nil { - return FatalRaftError(err) - } - - leaderCh := store.raft.LeaderCh() - go func() { - for isTurnedLeader := range leaderCh { - if isTurnedLeader { - PublishCommand("leader-uri", thisLeaderURI) - } - } - }() - - setupHttpClient() - - atomic.StoreInt64(&raftSetupComplete, 1) - return nil -} - -func isRaftSetupComplete() bool { - return atomic.LoadInt64(&raftSetupComplete) == 1 -} - -// getRaft is a convenience method -func getRaft() *raft.Raft { - return store.raft -} - -func normalizeRaftHostnameIP(host string) (string, error) { - if ip := net.ParseIP(host); ip != nil { - // this is a valid IP address. - return host, nil - } - ips, err := net.LookupIP(host) - if err != nil { - // resolve failed. But we don't want to fail the entire operation for that - log.Errore(err) - return host, nil - } - // resolve success! - for _, ip := range ips { - return ip.String(), nil - } - return host, fmt.Errorf("%+v resolved but no IP found", host) -} - -// normalizeRaftNode attempts to make sure there's a port to the given node. -// It consults the DefaultRaftPort when there isn't -func normalizeRaftNode(node string) (string, error) { - hostPort := strings.Split(node, ":") - host, err := normalizeRaftHostnameIP(hostPort[0]) - if err != nil { - return host, err - } - if len(hostPort) > 1 { - return fmt.Sprintf("%s:%s", host, hostPort[1]), nil - } else if config.Config.DefaultRaftPort != 0 { - // No port specified, add one - return fmt.Sprintf("%s:%d", host, config.Config.DefaultRaftPort), nil - } else { - return host, nil - } -} - -// IsPartOfQuorum returns `true` when this node is part of the raft quorum, meaning its -// data and opinion are trustworthy. -// Comapre that to a node which has left (or has not yet joined) the quorum: it has stale data. -func IsPartOfQuorum() bool { - if GetLeader() == "" { - return false - } - state := GetState() - return state == raft.Leader || state == raft.Follower -} - -// IsLeader tells if this node is the current raft leader -func IsLeader() bool { - return GetState() == raft.Leader -} - -// GetLeader returns identity of raft leader -func GetLeader() string { - if !isRaftSetupComplete() { - return "" - } - return getRaft().Leader() -} - -func QuorumSize() (int, error) { - peers, err := GetPeers() - if err != nil { - return 0, err - } - return len(peers)/2 + 1, nil -} - -// GetState returns current raft state -func GetState() raft.RaftState { - if !isRaftSetupComplete() { - return raft.Candidate - } - return getRaft().State() -} - -// IsHealthy checks whether this node is healthy in the raft group -func IsHealthy() bool { - if !isRaftSetupComplete() { - return false - } - state := GetState() - return state == raft.Leader || state == raft.Follower -} - -func Snapshot() error { - future := getRaft().Snapshot() - return future.Error() -} - -func AsyncSnapshot() error { - asyncDuration := (time.Duration(rand.Int63()) % asyncSnapshotTimeframe) - go time.AfterFunc(asyncDuration, func() { - Snapshot() - }) - return nil -} - -func StepDown() { - getRaft().StepDown() -} - -func Yield() error { - if !IsRaftEnabled() { - return RaftNotRunning - } - return getRaft().Yield() -} - -func GetRaftBind() string { - return store.raftBind -} - -func GetRaftAdvertise() string { - return store.raftAdvertise -} - -func GetPeers() ([]string, error) { - if !IsRaftEnabled() { - return []string{}, RaftNotRunning - } - return store.peerStore.Peers() -} - -func IsPeer(peer string) (bool, error) { - if !IsRaftEnabled() { - return false, RaftNotRunning - } - return (store.raftBind == peer), nil -} // PublishCommand will distribute a command across the group func PublishCommand(op string, value interface{}) (response interface{}, err error) { - if !IsRaftEnabled() { - return nil, RaftNotRunning - } - b, err := json.Marshal(value) - if err != nil { - return nil, err - } - return store.genericCommand(op, b) -} - -func AddPeer(addr string) (response interface{}, err error) { - addr, err = normalizeRaftNode(addr) - if err != nil { - return "", err - } - err = store.AddPeer(addr) - return addr, err + return nil, RaftNotRunning } -func RemovePeer(addr string) (response interface{}, err error) { - addr, err = normalizeRaftNode(addr) - if err != nil { - return "", err - } - err = store.RemovePeer(addr) - return addr, err -} - -func PublishYield(toPeer string) (response interface{}, err error) { - toPeer, err = normalizeRaftNode(toPeer) - if err != nil { - return "", err - } - return store.genericCommand(YieldCommand, []byte(toPeer)) -} - -func PublishYieldHostnameHint(hostnameHint string) (response interface{}, err error) { - return store.genericCommand(YieldHintCommand, []byte(hostnameHint)) -} - -// ReportToRaftLeader tells the leader this raft node is raft-healthy -func ReportToRaftLeader(authenticationToken string) (err error) { - if err := healthRequestReportCache.Add(config.Config.RaftBind, true, cache.DefaultExpiration); err != nil { - // Recently reported - return nil - } - path := fmt.Sprintf("raft-follower-health-report/%s/%s/%s", authenticationToken, config.Config.RaftBind, config.Config.RaftAdvertise) - _, err = HttpGetLeader(path) - return err -} - -// OnHealthReport acts on a raft-member reporting its health -func OnHealthReport(authenticationToken, raftBind, raftAdvertise string) (err error) { - if _, found := healthRequestAuthenticationTokenCache.Get(authenticationToken); !found { - return log.Errorf("Raft health report: unknown token %s", authenticationToken) - } - healthReportsCache.Set(raftAdvertise, true, cache.DefaultExpiration) - return nil -} - -func HealthyMembers() (advertised []string) { - items := healthReportsCache.Items() - for raftAdvertised := range items { - advertised = append(advertised, raftAdvertised) - } - return advertised -} - -// Monitor is a utility function to routinely observe leadership state. -// It doesn't actually do much; merely takes notes. -//nolint SA1015: using time.Tick leaks the underlying ticker -func Monitor() { - t := time.Tick(5 * time.Second) - heartbeat := time.Tick(1 * time.Minute) - followerHealthTick := time.Tick(config.RaftHealthPollSeconds * time.Second) - for { - select { - case <-t: - leaderHint := GetLeader() - - if IsLeader() { - leaderHint = fmt.Sprintf("%s (this host)", leaderHint) - } - log.Debugf("raft leader is %s; state: %s", leaderHint, GetState().String()) - - case <-heartbeat: - if IsLeader() { - go PublishCommand("heartbeat", "") - } - case <-followerHealthTick: - if IsLeader() { - athenticationToken := util.NewToken().Short() - healthRequestAuthenticationTokenCache.Set(athenticationToken, true, cache.DefaultExpiration) - go PublishCommand("request-health-report", athenticationToken) - } - case err := <-fatalRaftErrorChan: - log.Fatale(err) - } - } +func IsRaftEnabled() bool { + return false } diff --git a/go/vt/orchestrator/raft/rel_store.go b/go/vt/orchestrator/raft/rel_store.go deleted file mode 100644 index 0be7e27f1fb..00000000000 --- a/go/vt/orchestrator/raft/rel_store.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package orcraft - -import ( - "database/sql" - "encoding/binary" - "path/filepath" - "sync" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" - - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -const raftStoreFile = "raft_store.db" - -var createQueries = []string{ - ` - CREATE TABLE IF NOT EXISTS raft_log ( - log_index integer, - term bigint not null, - log_type int not null, - data blob not null, - PRIMARY KEY (log_index) - ) - `, - ` - CREATE TABLE IF NOT EXISTS raft_store ( - store_id integer, - store_key varbinary(512) not null, - store_value blob not null, - PRIMARY KEY (store_id) - ) - `, - ` - CREATE INDEX IF NOT EXISTS store_key_idx_raft_store ON raft_store (store_key) - `, -} - -var dbMutex sync.Mutex - -// RelationalStoreimplements: -// - hashicorp/raft.StableStore -// - hashicorp/log.LogStore -type RelationalStore struct { - dataDir string - backend *sql.DB -} - -func NewRelationalStore(dataDir string) *RelationalStore { - return &RelationalStore{ - dataDir: dataDir, - } -} - -func (relStore *RelationalStore) openDB() (*sql.DB, error) { - dbMutex.Lock() - defer dbMutex.Unlock() - - if relStore.backend == nil { - relStoreFile := filepath.Join(relStore.dataDir, raftStoreFile) - sqliteDB, _, err := sqlutils.GetSQLiteDB(relStoreFile) - if err != nil { - return nil, err - } - sqliteDB.SetMaxOpenConns(1) - sqliteDB.SetMaxIdleConns(1) - for _, query := range createQueries { - if _, err := sqliteDB.Exec(sqlutils.ToSqlite3Dialect(query)); err != nil { - return nil, err - } - } - relStore.backend = sqliteDB - log.Infof("raft: store initialized at %+v", relStoreFile) - } - return relStore.backend, nil -} - -func (relStore *RelationalStore) Set(key []byte, val []byte) error { - db, err := relStore.openDB() - if err != nil { - return err - } - tx, err := db.Begin() - if err != nil { - return err - } - stmt, err := tx.Prepare(`delete from raft_store where store_key = ?`) - if err != nil { - return err - } - _, err = stmt.Exec(key) - if err != nil { - tx.Rollback() - return err - } - stmt, err = tx.Prepare(`insert into raft_store (store_key, store_value) values (?, ?)`) - if err != nil { - tx.Rollback() - return err - } - _, err = stmt.Exec(key, val) - if err != nil { - tx.Rollback() - return err - } - err = tx.Commit() - - return err -} - -// Get returns the value for key, or an empty byte slice if key was not found. -func (relStore *RelationalStore) Get(key []byte) (val []byte, err error) { - db, err := relStore.openDB() - if err != nil { - return val, err - } - err = db.QueryRow("select min(store_value) from raft_store where store_key = ?", key).Scan(&val) - return val, err -} - -func (relStore *RelationalStore) SetUint64(key []byte, val uint64) error { - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b, val) - - return relStore.Set(key, b) -} - -// GetUint64 returns the uint64 value for key, or 0 if key was not found. -func (relStore *RelationalStore) GetUint64(key []byte) (uint64, error) { - b, err := relStore.Get(key) - if err != nil { - return 0, err - } - if len(b) == 0 { - // Not found - return 0, nil - } - i := binary.LittleEndian.Uint64(b) - return i, nil -} - -func (relStore *RelationalStore) FirstIndex() (idx uint64, err error) { - db, err := relStore.openDB() - if err != nil { - return idx, err - } - err = db.QueryRow("select ifnull(min(log_index), 0) from raft_log").Scan(&idx) - return idx, err -} - -// LastIndex returns the last index written. 0 for no entries. -func (relStore *RelationalStore) LastIndex() (idx uint64, err error) { - db, err := relStore.openDB() - if err != nil { - return idx, err - } - err = db.QueryRow("select ifnull(max(log_index), 0) from raft_log").Scan(&idx) - return idx, err -} - -// GetLog gets a log entry at a given index. -func (relStore *RelationalStore) GetLog(index uint64, log *raft.Log) error { - db, err := relStore.openDB() - if err != nil { - return err - } - err = db.QueryRow(` - select log_index, term, log_type, data - from raft_log - where log_index = ? - `, index).Scan(&log.Index, &log.Term, &log.Type, &log.Data) - if err == sql.ErrNoRows { - return raft.ErrLogNotFound - } - return err -} - -// StoreLog stores a log entry. -func (relStore *RelationalStore) StoreLog(log *raft.Log) error { - return relStore.StoreLogs([]*raft.Log{log}) -} - -// StoreLogs stores multiple log entries. -func (relStore *RelationalStore) StoreLogs(logs []*raft.Log) error { - db, err := relStore.openDB() - if err != nil { - return err - } - tx, err := db.Begin() - if err != nil { - return err - } - stmt, err := tx.Prepare(` - replace into raft_log ( - log_index, term, log_type, data - ) values ( - ?, ?, ?, ? - )`) - if err != nil { - return err - } - for _, raftLog := range logs { - _, err = stmt.Exec(raftLog.Index, raftLog.Term, int(raftLog.Type), raftLog.Data) - if err != nil { - tx.Rollback() - return err - } - } - return tx.Commit() -} - -// DeleteRange deletes a range of log entries. The range is inclusive. -func (relStore *RelationalStore) DeleteRange(min, max uint64) error { - db, err := relStore.openDB() - if err != nil { - return err - } - _, err = db.Exec("delete from raft_log where log_index >= ? and log_index <= ?", min, max) - return err -} - -func (relStore *RelationalStore) DeleteAll() error { - firstIndex, err := relStore.FirstIndex() - if err != nil { - return err - } - lastIndex, err := relStore.LastIndex() - if err != nil { - return err - } - return relStore.DeleteRange(firstIndex, lastIndex) -} diff --git a/go/vt/orchestrator/raft/snapshot.go b/go/vt/orchestrator/raft/snapshot.go deleted file mode 100644 index 62a87fc467e..00000000000 --- a/go/vt/orchestrator/raft/snapshot.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package orcraft - -import ( - "io" -) - -type SnapshotCreatorApplier interface { - GetData() (data []byte, err error) - Restore(rc io.ReadCloser) error -} diff --git a/go/vt/orchestrator/raft/store.go b/go/vt/orchestrator/raft/store.go deleted file mode 100644 index 5a8c1ca438b..00000000000 --- a/go/vt/orchestrator/raft/store.go +++ /dev/null @@ -1,168 +0,0 @@ -package orcraft - -import ( - "encoding/json" - "fmt" - "net" - "os" - "strings" - "time" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - - "vitess.io/vitess/go/vt/orchestrator/external/raft" -) - -type Store struct { - raftDir string - raftBind string - raftAdvertise string - - raft *raft.Raft // The consensus mechanism - peerStore raft.PeerStore - - applier CommandApplier - snapshotCreatorApplier SnapshotCreatorApplier -} - -type storeCommand struct { - Op string `json:"op,omitempty"` - Value []byte `json:"value,omitempty"` -} - -// NewStore inits and returns a new store -func NewStore(raftDir string, raftBind string, raftAdvertise string, applier CommandApplier, snapshotCreatorApplier SnapshotCreatorApplier) *Store { - return &Store{ - raftDir: raftDir, - raftBind: raftBind, - raftAdvertise: raftAdvertise, - applier: applier, - snapshotCreatorApplier: snapshotCreatorApplier, - } -} - -// Open opens the store. If enableSingle is set, and there are no existing peers, -// then this node becomes the first node, and therefore leader, of the cluster. -func (store *Store) Open(peerNodes []string) error { - // Setup Raft configuration. - config := raft.DefaultConfig() - config.SnapshotThreshold = 1 - config.SnapshotInterval = snapshotInterval - config.ShutdownOnRemove = false - - // Setup Raft communication. - advertise, err := net.ResolveTCPAddr("tcp", store.raftAdvertise) - if err != nil { - return err - } - log.Debugf("raft: advertise=%+v", advertise) - - transport, err := raft.NewTCPTransport(store.raftBind, advertise, 3, 10*time.Second, os.Stderr) - if err != nil { - return err - } - log.Debugf("raft: transport=%+v", transport) - - peers := make([]string, 0, 10) - for _, peerNode := range peerNodes { - peerNode = strings.TrimSpace(peerNode) - peers = raft.AddUniquePeer(peers, peerNode) - } - log.Debugf("raft: peers=%+v", peers) - - // Create peer storage. - peerStore := &raft.StaticPeers{} - if err := peerStore.SetPeers(peers); err != nil { - return err - } - - // Allow the node to enter single-mode, potentially electing itself, if - // explicitly enabled and there is only 1 node in the cluster already. - if len(peerNodes) == 0 && len(peers) <= 1 { - log.Infof("enabling single-node mode") - config.EnableSingleNode = true - config.DisableBootstrapAfterElect = false - } - - if _, err := os.Stat(store.raftDir); err != nil { - if os.IsNotExist(err) { - // path does not exist - log.Debugf("raft: creating data dir %s", store.raftDir) - if err := os.MkdirAll(store.raftDir, os.ModePerm); err != nil { - return log.Errorf("RaftDataDir (%s) does not exist and cannot be created: %+v", store.raftDir, err) - } - } else { - // Other error - return log.Errorf("RaftDataDir (%s) error: %+v", store.raftDir, err) - } - } - - // Create the snapshot store. This allows the Raft to truncate the log. - snapshots, err := NewFileSnapshotStore(store.raftDir, retainSnapshotCount, os.Stderr) - if err != nil { - return log.Errorf("file snapshot store: %s", err) - } - - // Create the log store and stable store. - logStore := NewRelationalStore(store.raftDir) - log.Debugf("raft: logStore=%+v", logStore) - - // Instantiate the Raft systems. - if store.raft, err = raft.NewRaft(config, (*fsm)(store), logStore, logStore, snapshots, peerStore, transport); err != nil { - return fmt.Errorf("error creating new raft: %s", err) - } - store.peerStore = peerStore - log.Infof("new raft created") - - return nil -} - -// AddPeer adds a node, located at addr, to this store. The node must be ready to -// respond to Raft communications at that address. -func (store *Store) AddPeer(addr string) error { - log.Infof("received join request for remote node %s", addr) - - f := store.raft.AddPeer(addr) - if f.Error() != nil { - return f.Error() - } - log.Infof("node at %s joined successfully", addr) - return nil -} - -// RemovePeer removes a node from this raft setup -func (store *Store) RemovePeer(addr string) error { - log.Infof("received remove request for remote node %s", addr) - - f := store.raft.RemovePeer(addr) - if f.Error() != nil { - return f.Error() - } - log.Infof("node at %s removed successfully", addr) - return nil -} - -// genericCommand requests consensus for applying a single command. -// This is an internal orchestrator implementation -func (store *Store) genericCommand(op string, bytes []byte) (response interface{}, err error) { - if store.raft.State() != raft.Leader { - return nil, fmt.Errorf("not leader") - } - - b, err := json.Marshal(&storeCommand{Op: op, Value: bytes}) - if err != nil { - return nil, err - } - - f := store.raft.Apply(b, raftTimeout) - if err = f.Error(); err != nil { - return nil, err - } - r := f.Response() - if err, ok := r.(error); ok && err != nil { - // This code checks whether the response itself was an error object. If so, it should - // indicate failure of the operation. - return r, err - } - return r, nil -} diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 976419391fe..d5b740efead 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -819,6 +819,140 @@ func (*ApplyRoutingRulesResponse) Descriptor() ([]byte, []int) { return file_vtctldata_proto_rawDescGZIP(), []int{12} } +type ApplyVSchemaRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,5,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + Sql string `protobuf:"bytes,6,opt,name=sql,proto3" json:"sql,omitempty"` +} + +func (x *ApplyVSchemaRequest) Reset() { + *x = ApplyVSchemaRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyVSchemaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyVSchemaRequest) ProtoMessage() {} + +func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyVSchemaRequest.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{13} +} + +func (x *ApplyVSchemaRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ApplyVSchemaRequest) GetSkipRebuild() bool { + if x != nil { + return x.SkipRebuild + } + return false +} + +func (x *ApplyVSchemaRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +func (x *ApplyVSchemaRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +func (x *ApplyVSchemaRequest) GetVSchema() *vschema.Keyspace { + if x != nil { + return x.VSchema + } + return nil +} + +func (x *ApplyVSchemaRequest) GetSql() string { + if x != nil { + return x.Sql + } + return "" +} + +type ApplyVSchemaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` +} + +func (x *ApplyVSchemaResponse) Reset() { + *x = ApplyVSchemaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyVSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyVSchemaResponse) ProtoMessage() {} + +func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyVSchemaResponse.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{14} +} + +func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { + if x != nil { + return x.VSchema + } + return nil +} + type ChangeTabletTypeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -832,7 +966,7 @@ type ChangeTabletTypeRequest struct { func (x *ChangeTabletTypeRequest) Reset() { *x = ChangeTabletTypeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[13] + mi := &file_vtctldata_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -845,7 +979,7 @@ func (x *ChangeTabletTypeRequest) String() string { func (*ChangeTabletTypeRequest) ProtoMessage() {} func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[13] + mi := &file_vtctldata_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -858,7 +992,7 @@ func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeTabletTypeRequest.ProtoReflect.Descriptor instead. func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{13} + return file_vtctldata_proto_rawDescGZIP(), []int{15} } func (x *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { @@ -895,7 +1029,7 @@ type ChangeTabletTypeResponse struct { func (x *ChangeTabletTypeResponse) Reset() { *x = ChangeTabletTypeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[14] + mi := &file_vtctldata_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -908,7 +1042,7 @@ func (x *ChangeTabletTypeResponse) String() string { func (*ChangeTabletTypeResponse) ProtoMessage() {} func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[14] + mi := &file_vtctldata_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -921,7 +1055,7 @@ func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeTabletTypeResponse.ProtoReflect.Descriptor instead. func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{14} + return file_vtctldata_proto_rawDescGZIP(), []int{16} } func (x *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { @@ -977,7 +1111,7 @@ type CreateKeyspaceRequest struct { func (x *CreateKeyspaceRequest) Reset() { *x = CreateKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[15] + mi := &file_vtctldata_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -990,7 +1124,7 @@ func (x *CreateKeyspaceRequest) String() string { func (*CreateKeyspaceRequest) ProtoMessage() {} func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[15] + mi := &file_vtctldata_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1003,7 +1137,7 @@ func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{15} + return file_vtctldata_proto_rawDescGZIP(), []int{17} } func (x *CreateKeyspaceRequest) GetName() string { @@ -1081,7 +1215,7 @@ type CreateKeyspaceResponse struct { func (x *CreateKeyspaceResponse) Reset() { *x = CreateKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[16] + mi := &file_vtctldata_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1094,7 +1228,7 @@ func (x *CreateKeyspaceResponse) String() string { func (*CreateKeyspaceResponse) ProtoMessage() {} func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[16] + mi := &file_vtctldata_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1107,7 +1241,7 @@ func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{16} + return file_vtctldata_proto_rawDescGZIP(), []int{18} } func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { @@ -1137,7 +1271,7 @@ type CreateShardRequest struct { func (x *CreateShardRequest) Reset() { *x = CreateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[17] + mi := &file_vtctldata_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1150,7 +1284,7 @@ func (x *CreateShardRequest) String() string { func (*CreateShardRequest) ProtoMessage() {} func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[17] + mi := &file_vtctldata_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1163,7 +1297,7 @@ func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. func (*CreateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{17} + return file_vtctldata_proto_rawDescGZIP(), []int{19} } func (x *CreateShardRequest) GetKeyspace() string { @@ -1212,7 +1346,7 @@ type CreateShardResponse struct { func (x *CreateShardResponse) Reset() { *x = CreateShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[18] + mi := &file_vtctldata_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1225,7 +1359,7 @@ func (x *CreateShardResponse) String() string { func (*CreateShardResponse) ProtoMessage() {} func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[18] + mi := &file_vtctldata_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1238,7 +1372,7 @@ func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateShardResponse.ProtoReflect.Descriptor instead. func (*CreateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{18} + return file_vtctldata_proto_rawDescGZIP(), []int{20} } func (x *CreateShardResponse) GetKeyspace() *Keyspace { @@ -1274,7 +1408,7 @@ type DeleteCellInfoRequest struct { func (x *DeleteCellInfoRequest) Reset() { *x = DeleteCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[19] + mi := &file_vtctldata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1287,7 +1421,7 @@ func (x *DeleteCellInfoRequest) String() string { func (*DeleteCellInfoRequest) ProtoMessage() {} func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[19] + mi := &file_vtctldata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1300,7 +1434,7 @@ func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellInfoRequest.ProtoReflect.Descriptor instead. func (*DeleteCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{19} + return file_vtctldata_proto_rawDescGZIP(), []int{21} } func (x *DeleteCellInfoRequest) GetName() string { @@ -1326,7 +1460,7 @@ type DeleteCellInfoResponse struct { func (x *DeleteCellInfoResponse) Reset() { *x = DeleteCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[20] + mi := &file_vtctldata_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1339,7 +1473,7 @@ func (x *DeleteCellInfoResponse) String() string { func (*DeleteCellInfoResponse) ProtoMessage() {} func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[20] + mi := &file_vtctldata_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1352,7 +1486,7 @@ func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellInfoResponse.ProtoReflect.Descriptor instead. func (*DeleteCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{20} + return file_vtctldata_proto_rawDescGZIP(), []int{22} } type DeleteCellsAliasRequest struct { @@ -1366,7 +1500,7 @@ type DeleteCellsAliasRequest struct { func (x *DeleteCellsAliasRequest) Reset() { *x = DeleteCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[21] + mi := &file_vtctldata_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1379,7 +1513,7 @@ func (x *DeleteCellsAliasRequest) String() string { func (*DeleteCellsAliasRequest) ProtoMessage() {} func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[21] + mi := &file_vtctldata_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1392,7 +1526,7 @@ func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellsAliasRequest.ProtoReflect.Descriptor instead. func (*DeleteCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{21} + return file_vtctldata_proto_rawDescGZIP(), []int{23} } func (x *DeleteCellsAliasRequest) GetName() string { @@ -1411,7 +1545,7 @@ type DeleteCellsAliasResponse struct { func (x *DeleteCellsAliasResponse) Reset() { *x = DeleteCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[22] + mi := &file_vtctldata_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1424,7 +1558,7 @@ func (x *DeleteCellsAliasResponse) String() string { func (*DeleteCellsAliasResponse) ProtoMessage() {} func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[22] + mi := &file_vtctldata_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1437,7 +1571,7 @@ func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellsAliasResponse.ProtoReflect.Descriptor instead. func (*DeleteCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{22} + return file_vtctldata_proto_rawDescGZIP(), []int{24} } type DeleteKeyspaceRequest struct { @@ -1456,7 +1590,7 @@ type DeleteKeyspaceRequest struct { func (x *DeleteKeyspaceRequest) Reset() { *x = DeleteKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[23] + mi := &file_vtctldata_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1469,7 +1603,7 @@ func (x *DeleteKeyspaceRequest) String() string { func (*DeleteKeyspaceRequest) ProtoMessage() {} func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[23] + mi := &file_vtctldata_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1482,7 +1616,7 @@ func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{23} + return file_vtctldata_proto_rawDescGZIP(), []int{25} } func (x *DeleteKeyspaceRequest) GetKeyspace() string { @@ -1508,7 +1642,7 @@ type DeleteKeyspaceResponse struct { func (x *DeleteKeyspaceResponse) Reset() { *x = DeleteKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[24] + mi := &file_vtctldata_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1521,7 +1655,7 @@ func (x *DeleteKeyspaceResponse) String() string { func (*DeleteKeyspaceResponse) ProtoMessage() {} func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[24] + mi := &file_vtctldata_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1534,7 +1668,7 @@ func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteKeyspaceResponse.ProtoReflect.Descriptor instead. func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{24} + return file_vtctldata_proto_rawDescGZIP(), []int{26} } type DeleteShardsRequest struct { @@ -1557,7 +1691,7 @@ type DeleteShardsRequest struct { func (x *DeleteShardsRequest) Reset() { *x = DeleteShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[25] + mi := &file_vtctldata_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1570,7 +1704,7 @@ func (x *DeleteShardsRequest) String() string { func (*DeleteShardsRequest) ProtoMessage() {} func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[25] + mi := &file_vtctldata_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1583,7 +1717,7 @@ func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{25} + return file_vtctldata_proto_rawDescGZIP(), []int{27} } func (x *DeleteShardsRequest) GetShards() []*Shard { @@ -1616,7 +1750,7 @@ type DeleteShardsResponse struct { func (x *DeleteShardsResponse) Reset() { *x = DeleteShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[26] + mi := &file_vtctldata_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1629,7 +1763,7 @@ func (x *DeleteShardsResponse) String() string { func (*DeleteShardsResponse) ProtoMessage() {} func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[26] + mi := &file_vtctldata_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1642,7 +1776,7 @@ func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteShardsResponse.ProtoReflect.Descriptor instead. func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{26} + return file_vtctldata_proto_rawDescGZIP(), []int{28} } type DeleteTabletsRequest struct { @@ -1660,7 +1794,7 @@ type DeleteTabletsRequest struct { func (x *DeleteTabletsRequest) Reset() { *x = DeleteTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[27] + mi := &file_vtctldata_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1673,7 +1807,7 @@ func (x *DeleteTabletsRequest) String() string { func (*DeleteTabletsRequest) ProtoMessage() {} func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[27] + mi := &file_vtctldata_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1686,7 +1820,7 @@ func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTabletsRequest.ProtoReflect.Descriptor instead. func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{27} + return file_vtctldata_proto_rawDescGZIP(), []int{29} } func (x *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { @@ -1712,7 +1846,7 @@ type DeleteTabletsResponse struct { func (x *DeleteTabletsResponse) Reset() { *x = DeleteTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[28] + mi := &file_vtctldata_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1725,7 +1859,7 @@ func (x *DeleteTabletsResponse) String() string { func (*DeleteTabletsResponse) ProtoMessage() {} func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[28] + mi := &file_vtctldata_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1738,7 +1872,7 @@ func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTabletsResponse.ProtoReflect.Descriptor instead. func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{28} + return file_vtctldata_proto_rawDescGZIP(), []int{30} } type EmergencyReparentShardRequest struct { @@ -1766,7 +1900,7 @@ type EmergencyReparentShardRequest struct { func (x *EmergencyReparentShardRequest) Reset() { *x = EmergencyReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[29] + mi := &file_vtctldata_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1779,7 +1913,7 @@ func (x *EmergencyReparentShardRequest) String() string { func (*EmergencyReparentShardRequest) ProtoMessage() {} func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[29] + mi := &file_vtctldata_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1792,7 +1926,7 @@ func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EmergencyReparentShardRequest.ProtoReflect.Descriptor instead. func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{29} + return file_vtctldata_proto_rawDescGZIP(), []int{31} } func (x *EmergencyReparentShardRequest) GetKeyspace() string { @@ -1850,7 +1984,7 @@ type EmergencyReparentShardResponse struct { func (x *EmergencyReparentShardResponse) Reset() { *x = EmergencyReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[30] + mi := &file_vtctldata_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1863,7 +1997,7 @@ func (x *EmergencyReparentShardResponse) String() string { func (*EmergencyReparentShardResponse) ProtoMessage() {} func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[30] + mi := &file_vtctldata_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1876,7 +2010,7 @@ func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EmergencyReparentShardResponse.ProtoReflect.Descriptor instead. func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{30} + return file_vtctldata_proto_rawDescGZIP(), []int{32} } func (x *EmergencyReparentShardResponse) GetKeyspace() string { @@ -1918,7 +2052,7 @@ type FindAllShardsInKeyspaceRequest struct { func (x *FindAllShardsInKeyspaceRequest) Reset() { *x = FindAllShardsInKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[31] + mi := &file_vtctldata_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1931,7 +2065,7 @@ func (x *FindAllShardsInKeyspaceRequest) String() string { func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[31] + mi := &file_vtctldata_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1944,7 +2078,7 @@ func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{31} + return file_vtctldata_proto_rawDescGZIP(), []int{33} } func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { @@ -1965,7 +2099,7 @@ type FindAllShardsInKeyspaceResponse struct { func (x *FindAllShardsInKeyspaceResponse) Reset() { *x = FindAllShardsInKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[32] + mi := &file_vtctldata_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1978,7 +2112,7 @@ func (x *FindAllShardsInKeyspaceResponse) String() string { func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[32] + mi := &file_vtctldata_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1991,7 +2125,7 @@ func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FindAllShardsInKeyspaceResponse.ProtoReflect.Descriptor instead. func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{32} + return file_vtctldata_proto_rawDescGZIP(), []int{34} } func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { @@ -2013,7 +2147,7 @@ type GetBackupsRequest struct { func (x *GetBackupsRequest) Reset() { *x = GetBackupsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[33] + mi := &file_vtctldata_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2026,7 +2160,7 @@ func (x *GetBackupsRequest) String() string { func (*GetBackupsRequest) ProtoMessage() {} func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[33] + mi := &file_vtctldata_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2039,7 +2173,7 @@ func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. func (*GetBackupsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{33} + return file_vtctldata_proto_rawDescGZIP(), []int{35} } func (x *GetBackupsRequest) GetKeyspace() string { @@ -2067,7 +2201,7 @@ type GetBackupsResponse struct { func (x *GetBackupsResponse) Reset() { *x = GetBackupsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[34] + mi := &file_vtctldata_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2080,7 +2214,7 @@ func (x *GetBackupsResponse) String() string { func (*GetBackupsResponse) ProtoMessage() {} func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[34] + mi := &file_vtctldata_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2093,7 +2227,7 @@ func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. func (*GetBackupsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{34} + return file_vtctldata_proto_rawDescGZIP(), []int{36} } func (x *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { @@ -2114,7 +2248,7 @@ type GetCellInfoRequest struct { func (x *GetCellInfoRequest) Reset() { *x = GetCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[35] + mi := &file_vtctldata_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2127,7 +2261,7 @@ func (x *GetCellInfoRequest) String() string { func (*GetCellInfoRequest) ProtoMessage() {} func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[35] + mi := &file_vtctldata_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2140,7 +2274,7 @@ func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoRequest.ProtoReflect.Descriptor instead. func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{35} + return file_vtctldata_proto_rawDescGZIP(), []int{37} } func (x *GetCellInfoRequest) GetCell() string { @@ -2161,7 +2295,7 @@ type GetCellInfoResponse struct { func (x *GetCellInfoResponse) Reset() { *x = GetCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[36] + mi := &file_vtctldata_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2174,7 +2308,7 @@ func (x *GetCellInfoResponse) String() string { func (*GetCellInfoResponse) ProtoMessage() {} func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[36] + mi := &file_vtctldata_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2187,7 +2321,7 @@ func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoResponse.ProtoReflect.Descriptor instead. func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{36} + return file_vtctldata_proto_rawDescGZIP(), []int{38} } func (x *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { @@ -2206,7 +2340,7 @@ type GetCellInfoNamesRequest struct { func (x *GetCellInfoNamesRequest) Reset() { *x = GetCellInfoNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[37] + mi := &file_vtctldata_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2219,7 +2353,7 @@ func (x *GetCellInfoNamesRequest) String() string { func (*GetCellInfoNamesRequest) ProtoMessage() {} func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[37] + mi := &file_vtctldata_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2232,7 +2366,7 @@ func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoNamesRequest.ProtoReflect.Descriptor instead. func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{37} + return file_vtctldata_proto_rawDescGZIP(), []int{39} } type GetCellInfoNamesResponse struct { @@ -2246,7 +2380,7 @@ type GetCellInfoNamesResponse struct { func (x *GetCellInfoNamesResponse) Reset() { *x = GetCellInfoNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[38] + mi := &file_vtctldata_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2259,7 +2393,7 @@ func (x *GetCellInfoNamesResponse) String() string { func (*GetCellInfoNamesResponse) ProtoMessage() {} func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[38] + mi := &file_vtctldata_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2272,7 +2406,7 @@ func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoNamesResponse.ProtoReflect.Descriptor instead. func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{38} + return file_vtctldata_proto_rawDescGZIP(), []int{40} } func (x *GetCellInfoNamesResponse) GetNames() []string { @@ -2291,7 +2425,7 @@ type GetCellsAliasesRequest struct { func (x *GetCellsAliasesRequest) Reset() { *x = GetCellsAliasesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[39] + mi := &file_vtctldata_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2304,7 +2438,7 @@ func (x *GetCellsAliasesRequest) String() string { func (*GetCellsAliasesRequest) ProtoMessage() {} func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[39] + mi := &file_vtctldata_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2317,7 +2451,7 @@ func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{39} + return file_vtctldata_proto_rawDescGZIP(), []int{41} } type GetCellsAliasesResponse struct { @@ -2331,7 +2465,7 @@ type GetCellsAliasesResponse struct { func (x *GetCellsAliasesResponse) Reset() { *x = GetCellsAliasesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[40] + mi := &file_vtctldata_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2344,7 +2478,7 @@ func (x *GetCellsAliasesResponse) String() string { func (*GetCellsAliasesResponse) ProtoMessage() {} func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[40] + mi := &file_vtctldata_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2357,7 +2491,7 @@ func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{40} + return file_vtctldata_proto_rawDescGZIP(), []int{42} } func (x *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { @@ -2376,7 +2510,7 @@ type GetKeyspacesRequest struct { func (x *GetKeyspacesRequest) Reset() { *x = GetKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[41] + mi := &file_vtctldata_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2389,7 +2523,7 @@ func (x *GetKeyspacesRequest) String() string { func (*GetKeyspacesRequest) ProtoMessage() {} func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[41] + mi := &file_vtctldata_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2402,7 +2536,7 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{41} + return file_vtctldata_proto_rawDescGZIP(), []int{43} } type GetKeyspacesResponse struct { @@ -2416,7 +2550,7 @@ type GetKeyspacesResponse struct { func (x *GetKeyspacesResponse) Reset() { *x = GetKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[42] + mi := &file_vtctldata_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2429,7 +2563,7 @@ func (x *GetKeyspacesResponse) String() string { func (*GetKeyspacesResponse) ProtoMessage() {} func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[42] + mi := &file_vtctldata_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2442,7 +2576,7 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{42} + return file_vtctldata_proto_rawDescGZIP(), []int{44} } func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { @@ -2463,7 +2597,7 @@ type GetKeyspaceRequest struct { func (x *GetKeyspaceRequest) Reset() { *x = GetKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[43] + mi := &file_vtctldata_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2476,7 +2610,7 @@ func (x *GetKeyspaceRequest) String() string { func (*GetKeyspaceRequest) ProtoMessage() {} func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[43] + mi := &file_vtctldata_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2489,7 +2623,7 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{43} + return file_vtctldata_proto_rawDescGZIP(), []int{45} } func (x *GetKeyspaceRequest) GetKeyspace() string { @@ -2510,7 +2644,7 @@ type GetKeyspaceResponse struct { func (x *GetKeyspaceResponse) Reset() { *x = GetKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[44] + mi := &file_vtctldata_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2523,7 +2657,7 @@ func (x *GetKeyspaceResponse) String() string { func (*GetKeyspaceResponse) ProtoMessage() {} func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[44] + mi := &file_vtctldata_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2536,7 +2670,7 @@ func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspaceResponse.ProtoReflect.Descriptor instead. func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{44} + return file_vtctldata_proto_rawDescGZIP(), []int{46} } func (x *GetKeyspaceResponse) GetKeyspace() *Keyspace { @@ -2555,7 +2689,7 @@ type GetRoutingRulesRequest struct { func (x *GetRoutingRulesRequest) Reset() { *x = GetRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[45] + mi := &file_vtctldata_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2568,7 +2702,7 @@ func (x *GetRoutingRulesRequest) String() string { func (*GetRoutingRulesRequest) ProtoMessage() {} func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[45] + mi := &file_vtctldata_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2581,7 +2715,7 @@ func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRoutingRulesRequest.ProtoReflect.Descriptor instead. func (*GetRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{45} + return file_vtctldata_proto_rawDescGZIP(), []int{47} } type GetRoutingRulesResponse struct { @@ -2595,7 +2729,7 @@ type GetRoutingRulesResponse struct { func (x *GetRoutingRulesResponse) Reset() { *x = GetRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[46] + mi := &file_vtctldata_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2608,7 +2742,7 @@ func (x *GetRoutingRulesResponse) String() string { func (*GetRoutingRulesResponse) ProtoMessage() {} func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[46] + mi := &file_vtctldata_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2621,7 +2755,7 @@ func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRoutingRulesResponse.ProtoReflect.Descriptor instead. func (*GetRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{46} + return file_vtctldata_proto_rawDescGZIP(), []int{48} } func (x *GetRoutingRulesResponse) GetRoutingRules() *vschema.RoutingRules { @@ -2657,7 +2791,7 @@ type GetSchemaRequest struct { func (x *GetSchemaRequest) Reset() { *x = GetSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[47] + mi := &file_vtctldata_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2670,7 +2804,7 @@ func (x *GetSchemaRequest) String() string { func (*GetSchemaRequest) ProtoMessage() {} func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[47] + mi := &file_vtctldata_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2683,7 +2817,7 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. func (*GetSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{47} + return file_vtctldata_proto_rawDescGZIP(), []int{49} } func (x *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { @@ -2739,7 +2873,7 @@ type GetSchemaResponse struct { func (x *GetSchemaResponse) Reset() { *x = GetSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[48] + mi := &file_vtctldata_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2752,7 +2886,7 @@ func (x *GetSchemaResponse) String() string { func (*GetSchemaResponse) ProtoMessage() {} func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[48] + mi := &file_vtctldata_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2765,7 +2899,7 @@ func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. func (*GetSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{48} + return file_vtctldata_proto_rawDescGZIP(), []int{50} } func (x *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { @@ -2787,7 +2921,7 @@ type GetShardRequest struct { func (x *GetShardRequest) Reset() { *x = GetShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[49] + mi := &file_vtctldata_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2800,7 +2934,7 @@ func (x *GetShardRequest) String() string { func (*GetShardRequest) ProtoMessage() {} func (x *GetShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[49] + mi := &file_vtctldata_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2813,7 +2947,7 @@ func (x *GetShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. func (*GetShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{49} + return file_vtctldata_proto_rawDescGZIP(), []int{51} } func (x *GetShardRequest) GetKeyspace() string { @@ -2841,7 +2975,7 @@ type GetShardResponse struct { func (x *GetShardResponse) Reset() { *x = GetShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[50] + mi := &file_vtctldata_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2854,7 +2988,7 @@ func (x *GetShardResponse) String() string { func (*GetShardResponse) ProtoMessage() {} func (x *GetShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[50] + mi := &file_vtctldata_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2867,7 +3001,7 @@ func (x *GetShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. func (*GetShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{50} + return file_vtctldata_proto_rawDescGZIP(), []int{52} } func (x *GetShardResponse) GetShard() *Shard { @@ -2891,7 +3025,7 @@ type GetSrvKeyspacesRequest struct { func (x *GetSrvKeyspacesRequest) Reset() { *x = GetSrvKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[51] + mi := &file_vtctldata_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2904,7 +3038,7 @@ func (x *GetSrvKeyspacesRequest) String() string { func (*GetSrvKeyspacesRequest) ProtoMessage() {} func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[51] + mi := &file_vtctldata_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2917,7 +3051,7 @@ func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{51} + return file_vtctldata_proto_rawDescGZIP(), []int{53} } func (x *GetSrvKeyspacesRequest) GetKeyspace() string { @@ -2946,7 +3080,7 @@ type GetSrvKeyspacesResponse struct { func (x *GetSrvKeyspacesResponse) Reset() { *x = GetSrvKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[52] + mi := &file_vtctldata_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2959,7 +3093,7 @@ func (x *GetSrvKeyspacesResponse) String() string { func (*GetSrvKeyspacesResponse) ProtoMessage() {} func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[52] + mi := &file_vtctldata_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2972,7 +3106,7 @@ func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{52} + return file_vtctldata_proto_rawDescGZIP(), []int{54} } func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { @@ -2993,7 +3127,7 @@ type GetSrvVSchemaRequest struct { func (x *GetSrvVSchemaRequest) Reset() { *x = GetSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[53] + mi := &file_vtctldata_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3006,7 +3140,7 @@ func (x *GetSrvVSchemaRequest) String() string { func (*GetSrvVSchemaRequest) ProtoMessage() {} func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[53] + mi := &file_vtctldata_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3019,7 +3153,7 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{53} + return file_vtctldata_proto_rawDescGZIP(), []int{55} } func (x *GetSrvVSchemaRequest) GetCell() string { @@ -3040,7 +3174,7 @@ type GetSrvVSchemaResponse struct { func (x *GetSrvVSchemaResponse) Reset() { *x = GetSrvVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[54] + mi := &file_vtctldata_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3053,7 +3187,7 @@ func (x *GetSrvVSchemaResponse) String() string { func (*GetSrvVSchemaResponse) ProtoMessage() {} func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[54] + mi := &file_vtctldata_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3066,7 +3200,7 @@ func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead. func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{54} + return file_vtctldata_proto_rawDescGZIP(), []int{56} } func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { @@ -3087,7 +3221,7 @@ type GetSrvVSchemasRequest struct { func (x *GetSrvVSchemasRequest) Reset() { *x = GetSrvVSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[55] + mi := &file_vtctldata_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3100,7 +3234,7 @@ func (x *GetSrvVSchemasRequest) String() string { func (*GetSrvVSchemasRequest) ProtoMessage() {} func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[55] + mi := &file_vtctldata_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3113,7 +3247,7 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{55} + return file_vtctldata_proto_rawDescGZIP(), []int{57} } func (x *GetSrvVSchemasRequest) GetCells() []string { @@ -3135,7 +3269,7 @@ type GetSrvVSchemasResponse struct { func (x *GetSrvVSchemasResponse) Reset() { *x = GetSrvVSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[56] + mi := &file_vtctldata_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3148,7 +3282,7 @@ func (x *GetSrvVSchemasResponse) String() string { func (*GetSrvVSchemasResponse) ProtoMessage() {} func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[56] + mi := &file_vtctldata_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3161,7 +3295,7 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{56} + return file_vtctldata_proto_rawDescGZIP(), []int{58} } func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema { @@ -3182,7 +3316,7 @@ type GetTabletRequest struct { func (x *GetTabletRequest) Reset() { *x = GetTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[57] + mi := &file_vtctldata_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3195,7 +3329,7 @@ func (x *GetTabletRequest) String() string { func (*GetTabletRequest) ProtoMessage() {} func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[57] + mi := &file_vtctldata_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3208,7 +3342,7 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. func (*GetTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{57} + return file_vtctldata_proto_rawDescGZIP(), []int{59} } func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { @@ -3229,7 +3363,7 @@ type GetTabletResponse struct { func (x *GetTabletResponse) Reset() { *x = GetTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[58] + mi := &file_vtctldata_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3242,7 +3376,7 @@ func (x *GetTabletResponse) String() string { func (*GetTabletResponse) ProtoMessage() {} func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[58] + mi := &file_vtctldata_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3255,7 +3389,7 @@ func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead. func (*GetTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{58} + return file_vtctldata_proto_rawDescGZIP(), []int{60} } func (x *GetTabletResponse) GetTablet() *topodata.Tablet { @@ -3294,7 +3428,7 @@ type GetTabletsRequest struct { func (x *GetTabletsRequest) Reset() { *x = GetTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[59] + mi := &file_vtctldata_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3307,7 +3441,7 @@ func (x *GetTabletsRequest) String() string { func (*GetTabletsRequest) ProtoMessage() {} func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[59] + mi := &file_vtctldata_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3320,7 +3454,7 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. func (*GetTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{59} + return file_vtctldata_proto_rawDescGZIP(), []int{61} } func (x *GetTabletsRequest) GetKeyspace() string { @@ -3369,7 +3503,7 @@ type GetTabletsResponse struct { func (x *GetTabletsResponse) Reset() { *x = GetTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[60] + mi := &file_vtctldata_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3382,7 +3516,7 @@ func (x *GetTabletsResponse) String() string { func (*GetTabletsResponse) ProtoMessage() {} func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[60] + mi := &file_vtctldata_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3395,7 +3529,7 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. func (*GetTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{60} + return file_vtctldata_proto_rawDescGZIP(), []int{62} } func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet { @@ -3416,7 +3550,7 @@ type GetVSchemaRequest struct { func (x *GetVSchemaRequest) Reset() { *x = GetVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[61] + mi := &file_vtctldata_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3429,7 +3563,7 @@ func (x *GetVSchemaRequest) String() string { func (*GetVSchemaRequest) ProtoMessage() {} func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[61] + mi := &file_vtctldata_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3442,7 +3576,7 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{61} + return file_vtctldata_proto_rawDescGZIP(), []int{63} } func (x *GetVSchemaRequest) GetKeyspace() string { @@ -3463,7 +3597,7 @@ type GetVSchemaResponse struct { func (x *GetVSchemaResponse) Reset() { *x = GetVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[62] + mi := &file_vtctldata_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3476,7 +3610,7 @@ func (x *GetVSchemaResponse) String() string { func (*GetVSchemaResponse) ProtoMessage() {} func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[62] + mi := &file_vtctldata_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3489,7 +3623,7 @@ func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead. func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{62} + return file_vtctldata_proto_rawDescGZIP(), []int{64} } func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { @@ -3511,7 +3645,7 @@ type GetWorkflowsRequest struct { func (x *GetWorkflowsRequest) Reset() { *x = GetWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[63] + mi := &file_vtctldata_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3524,7 +3658,7 @@ func (x *GetWorkflowsRequest) String() string { func (*GetWorkflowsRequest) ProtoMessage() {} func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[63] + mi := &file_vtctldata_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3537,7 +3671,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{63} + return file_vtctldata_proto_rawDescGZIP(), []int{65} } func (x *GetWorkflowsRequest) GetKeyspace() string { @@ -3565,7 +3699,7 @@ type GetWorkflowsResponse struct { func (x *GetWorkflowsResponse) Reset() { *x = GetWorkflowsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[64] + mi := &file_vtctldata_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3578,7 +3712,7 @@ func (x *GetWorkflowsResponse) String() string { func (*GetWorkflowsResponse) ProtoMessage() {} func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[64] + mi := &file_vtctldata_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3591,7 +3725,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{64} + return file_vtctldata_proto_rawDescGZIP(), []int{66} } func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow { @@ -3616,7 +3750,7 @@ type InitShardPrimaryRequest struct { func (x *InitShardPrimaryRequest) Reset() { *x = InitShardPrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[65] + mi := &file_vtctldata_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3629,7 +3763,7 @@ func (x *InitShardPrimaryRequest) String() string { func (*InitShardPrimaryRequest) ProtoMessage() {} func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[65] + mi := &file_vtctldata_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3642,7 +3776,7 @@ func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead. func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{65} + return file_vtctldata_proto_rawDescGZIP(), []int{67} } func (x *InitShardPrimaryRequest) GetKeyspace() string { @@ -3691,7 +3825,7 @@ type InitShardPrimaryResponse struct { func (x *InitShardPrimaryResponse) Reset() { *x = InitShardPrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[66] + mi := &file_vtctldata_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3704,7 +3838,7 @@ func (x *InitShardPrimaryResponse) String() string { func (*InitShardPrimaryResponse) ProtoMessage() {} func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[66] + mi := &file_vtctldata_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3717,7 +3851,7 @@ func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead. func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{66} + return file_vtctldata_proto_rawDescGZIP(), []int{68} } func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event { @@ -3759,7 +3893,7 @@ type PlannedReparentShardRequest struct { func (x *PlannedReparentShardRequest) Reset() { *x = PlannedReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[67] + mi := &file_vtctldata_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3772,7 +3906,7 @@ func (x *PlannedReparentShardRequest) String() string { func (*PlannedReparentShardRequest) ProtoMessage() {} func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[67] + mi := &file_vtctldata_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3785,7 +3919,7 @@ func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead. func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{67} + return file_vtctldata_proto_rawDescGZIP(), []int{69} } func (x *PlannedReparentShardRequest) GetKeyspace() string { @@ -3843,7 +3977,7 @@ type PlannedReparentShardResponse struct { func (x *PlannedReparentShardResponse) Reset() { *x = PlannedReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[68] + mi := &file_vtctldata_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3856,7 +3990,7 @@ func (x *PlannedReparentShardResponse) String() string { func (*PlannedReparentShardResponse) ProtoMessage() {} func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[68] + mi := &file_vtctldata_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3869,7 +4003,7 @@ func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead. func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{68} + return file_vtctldata_proto_rawDescGZIP(), []int{70} } func (x *PlannedReparentShardResponse) GetKeyspace() string { @@ -3913,7 +4047,7 @@ type RebuildVSchemaGraphRequest struct { func (x *RebuildVSchemaGraphRequest) Reset() { *x = RebuildVSchemaGraphRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[69] + mi := &file_vtctldata_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3926,7 +4060,7 @@ func (x *RebuildVSchemaGraphRequest) String() string { func (*RebuildVSchemaGraphRequest) ProtoMessage() {} func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[69] + mi := &file_vtctldata_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3939,7 +4073,7 @@ func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead. func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{69} + return file_vtctldata_proto_rawDescGZIP(), []int{71} } func (x *RebuildVSchemaGraphRequest) GetCells() []string { @@ -3958,7 +4092,7 @@ type RebuildVSchemaGraphResponse struct { func (x *RebuildVSchemaGraphResponse) Reset() { *x = RebuildVSchemaGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[70] + mi := &file_vtctldata_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3971,7 +4105,7 @@ func (x *RebuildVSchemaGraphResponse) String() string { func (*RebuildVSchemaGraphResponse) ProtoMessage() {} func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[70] + mi := &file_vtctldata_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3984,7 +4118,7 @@ func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead. func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{70} + return file_vtctldata_proto_rawDescGZIP(), []int{72} } type RemoveKeyspaceCellRequest struct { @@ -4006,7 +4140,7 @@ type RemoveKeyspaceCellRequest struct { func (x *RemoveKeyspaceCellRequest) Reset() { *x = RemoveKeyspaceCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[71] + mi := &file_vtctldata_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4019,7 +4153,7 @@ func (x *RemoveKeyspaceCellRequest) String() string { func (*RemoveKeyspaceCellRequest) ProtoMessage() {} func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[71] + mi := &file_vtctldata_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4032,7 +4166,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{71} + return file_vtctldata_proto_rawDescGZIP(), []int{73} } func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { @@ -4072,7 +4206,7 @@ type RemoveKeyspaceCellResponse struct { func (x *RemoveKeyspaceCellResponse) Reset() { *x = RemoveKeyspaceCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[72] + mi := &file_vtctldata_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4085,7 +4219,7 @@ func (x *RemoveKeyspaceCellResponse) String() string { func (*RemoveKeyspaceCellResponse) ProtoMessage() {} func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[72] + mi := &file_vtctldata_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4098,7 +4232,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{72} + return file_vtctldata_proto_rawDescGZIP(), []int{74} } type RemoveShardCellRequest struct { @@ -4121,7 +4255,7 @@ type RemoveShardCellRequest struct { func (x *RemoveShardCellRequest) Reset() { *x = RemoveShardCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[73] + mi := &file_vtctldata_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4134,7 +4268,7 @@ func (x *RemoveShardCellRequest) String() string { func (*RemoveShardCellRequest) ProtoMessage() {} func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[73] + mi := &file_vtctldata_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4147,7 +4281,7 @@ func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead. func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{73} + return file_vtctldata_proto_rawDescGZIP(), []int{75} } func (x *RemoveShardCellRequest) GetKeyspace() string { @@ -4194,7 +4328,7 @@ type RemoveShardCellResponse struct { func (x *RemoveShardCellResponse) Reset() { *x = RemoveShardCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[74] + mi := &file_vtctldata_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4207,7 +4341,7 @@ func (x *RemoveShardCellResponse) String() string { func (*RemoveShardCellResponse) ProtoMessage() {} func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[74] + mi := &file_vtctldata_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4220,7 +4354,7 @@ func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead. func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{74} + return file_vtctldata_proto_rawDescGZIP(), []int{76} } type ReparentTabletRequest struct { @@ -4236,7 +4370,7 @@ type ReparentTabletRequest struct { func (x *ReparentTabletRequest) Reset() { *x = ReparentTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[75] + mi := &file_vtctldata_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4249,7 +4383,7 @@ func (x *ReparentTabletRequest) String() string { func (*ReparentTabletRequest) ProtoMessage() {} func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[75] + mi := &file_vtctldata_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4262,7 +4396,7 @@ func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead. func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{75} + return file_vtctldata_proto_rawDescGZIP(), []int{77} } func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { @@ -4288,7 +4422,7 @@ type ReparentTabletResponse struct { func (x *ReparentTabletResponse) Reset() { *x = ReparentTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[76] + mi := &file_vtctldata_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4301,7 +4435,7 @@ func (x *ReparentTabletResponse) String() string { func (*ReparentTabletResponse) ProtoMessage() {} func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[76] + mi := &file_vtctldata_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4314,7 +4448,7 @@ func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead. func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{76} + return file_vtctldata_proto_rawDescGZIP(), []int{78} } func (x *ReparentTabletResponse) GetKeyspace() string { @@ -4350,7 +4484,7 @@ type ShardReplicationPositionsRequest struct { func (x *ShardReplicationPositionsRequest) Reset() { *x = ShardReplicationPositionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[77] + mi := &file_vtctldata_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4363,7 +4497,7 @@ func (x *ShardReplicationPositionsRequest) String() string { func (*ShardReplicationPositionsRequest) ProtoMessage() {} func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[77] + mi := &file_vtctldata_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4376,7 +4510,7 @@ func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{77} + return file_vtctldata_proto_rawDescGZIP(), []int{79} } func (x *ShardReplicationPositionsRequest) GetKeyspace() string { @@ -4409,7 +4543,7 @@ type ShardReplicationPositionsResponse struct { func (x *ShardReplicationPositionsResponse) Reset() { *x = ShardReplicationPositionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[78] + mi := &file_vtctldata_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4422,7 +4556,7 @@ func (x *ShardReplicationPositionsResponse) String() string { func (*ShardReplicationPositionsResponse) ProtoMessage() {} func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[78] + mi := &file_vtctldata_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4435,7 +4569,7 @@ func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message // Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{78} + return file_vtctldata_proto_rawDescGZIP(), []int{80} } func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { @@ -4465,7 +4599,7 @@ type TabletExternallyReparentedRequest struct { func (x *TabletExternallyReparentedRequest) Reset() { *x = TabletExternallyReparentedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[79] + mi := &file_vtctldata_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4478,7 +4612,7 @@ func (x *TabletExternallyReparentedRequest) String() string { func (*TabletExternallyReparentedRequest) ProtoMessage() {} func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[79] + mi := &file_vtctldata_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4491,7 +4625,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message // Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{79} + return file_vtctldata_proto_rawDescGZIP(), []int{81} } func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { @@ -4515,7 +4649,7 @@ type TabletExternallyReparentedResponse struct { func (x *TabletExternallyReparentedResponse) Reset() { *x = TabletExternallyReparentedResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[80] + mi := &file_vtctldata_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4528,7 +4662,7 @@ func (x *TabletExternallyReparentedResponse) String() string { func (*TabletExternallyReparentedResponse) ProtoMessage() {} func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[80] + mi := &file_vtctldata_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4541,7 +4675,7 @@ func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message // Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead. func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{80} + return file_vtctldata_proto_rawDescGZIP(), []int{82} } func (x *TabletExternallyReparentedResponse) GetKeyspace() string { @@ -4584,7 +4718,7 @@ type UpdateCellInfoRequest struct { func (x *UpdateCellInfoRequest) Reset() { *x = UpdateCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[81] + mi := &file_vtctldata_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4597,7 +4731,7 @@ func (x *UpdateCellInfoRequest) String() string { func (*UpdateCellInfoRequest) ProtoMessage() {} func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[81] + mi := &file_vtctldata_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4610,7 +4744,7 @@ func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead. func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{81} + return file_vtctldata_proto_rawDescGZIP(), []int{83} } func (x *UpdateCellInfoRequest) GetName() string { @@ -4639,7 +4773,7 @@ type UpdateCellInfoResponse struct { func (x *UpdateCellInfoResponse) Reset() { *x = UpdateCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[82] + mi := &file_vtctldata_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4652,7 +4786,7 @@ func (x *UpdateCellInfoResponse) String() string { func (*UpdateCellInfoResponse) ProtoMessage() {} func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[82] + mi := &file_vtctldata_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4665,7 +4799,7 @@ func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead. func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{82} + return file_vtctldata_proto_rawDescGZIP(), []int{84} } func (x *UpdateCellInfoResponse) GetName() string { @@ -4694,7 +4828,7 @@ type UpdateCellsAliasRequest struct { func (x *UpdateCellsAliasRequest) Reset() { *x = UpdateCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[83] + mi := &file_vtctldata_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4707,7 +4841,7 @@ func (x *UpdateCellsAliasRequest) String() string { func (*UpdateCellsAliasRequest) ProtoMessage() {} func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[83] + mi := &file_vtctldata_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4720,7 +4854,7 @@ func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead. func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{83} + return file_vtctldata_proto_rawDescGZIP(), []int{85} } func (x *UpdateCellsAliasRequest) GetName() string { @@ -4749,7 +4883,7 @@ type UpdateCellsAliasResponse struct { func (x *UpdateCellsAliasResponse) Reset() { *x = UpdateCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[84] + mi := &file_vtctldata_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4762,7 +4896,7 @@ func (x *UpdateCellsAliasResponse) String() string { func (*UpdateCellsAliasResponse) ProtoMessage() {} func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[84] + mi := &file_vtctldata_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4775,7 +4909,7 @@ func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead. func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{84} + return file_vtctldata_proto_rawDescGZIP(), []int{86} } func (x *UpdateCellsAliasResponse) GetName() string { @@ -4804,7 +4938,7 @@ type Workflow_ReplicationLocation struct { func (x *Workflow_ReplicationLocation) Reset() { *x = Workflow_ReplicationLocation{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[86] + mi := &file_vtctldata_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4817,7 +4951,7 @@ func (x *Workflow_ReplicationLocation) String() string { func (*Workflow_ReplicationLocation) ProtoMessage() {} func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[86] + mi := &file_vtctldata_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4860,7 +4994,7 @@ type Workflow_ShardStream struct { func (x *Workflow_ShardStream) Reset() { *x = Workflow_ShardStream{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[87] + mi := &file_vtctldata_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4873,7 +5007,7 @@ func (x *Workflow_ShardStream) String() string { func (*Workflow_ShardStream) ProtoMessage() {} func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[87] + mi := &file_vtctldata_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4932,7 +5066,7 @@ type Workflow_Stream struct { func (x *Workflow_Stream) Reset() { *x = Workflow_Stream{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[88] + mi := &file_vtctldata_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4945,7 +5079,7 @@ func (x *Workflow_Stream) String() string { func (*Workflow_Stream) ProtoMessage() {} func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[88] + mi := &file_vtctldata_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5057,7 +5191,7 @@ type Workflow_Stream_CopyState struct { func (x *Workflow_Stream_CopyState) Reset() { *x = Workflow_Stream_CopyState{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[89] + mi := &file_vtctldata_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5070,7 +5204,7 @@ func (x *Workflow_Stream_CopyState) String() string { func (*Workflow_Stream_CopyState) ProtoMessage() {} func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[89] + mi := &file_vtctldata_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5264,116 +5398,365 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, - 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x06, 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, - 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, - 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, - 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, - 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x77, 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xb6, 0x03, - 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, + 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x17, 0x0a, + 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x08, + 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x44, 0x0a, 0x14, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, + 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x06, 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, + 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, + 0x22, 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, + 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, + 0x5f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x77, 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xb6, 0x03, 0x0a, 0x15, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, + 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, + 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x4a, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, + 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, + 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x22, 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, + 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, + 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, + 0x41, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x12, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x06, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, + 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f, 0x69, 0x66, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, + 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x16, 0x0a, 0x14, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, + 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8f, 0x02, 0x0a, 0x1d, 0x45, 0x6d, 0x65, + 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, + 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x45, + 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, + 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, + 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x45, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, + 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, + 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, + 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, + 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, + 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, + 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, + 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, + 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, - 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, - 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x52, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x18, - 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x28, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, - 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, - 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, - 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8f, 0x02, 0x0a, - 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, + 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, + 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, + 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, + 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, @@ -5381,376 +5764,144 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, - 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, - 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xbc, - 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, - 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3c, 0x0a, - 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, - 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, - 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, - 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x45, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, - 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, - 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x15, 0x0a, 0x13, 0x47, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, - 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, - 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x22, - 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, - 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, + 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, + 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, + 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x4a, 0x0a, 0x16, 0x47, - 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, - 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, - 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, - 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, - 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, - 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2f, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x42, 0x0a, - 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, - 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, - 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, - 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, - 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, - 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, - 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, - 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, - 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, - 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, + 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, + 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, - 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, - 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, - 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, - 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, - 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, + 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, - 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x54, 0x0a, 0x20, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, - 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, - 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, - 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x52, - 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, - 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, - 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, - 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, - 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, - 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, + 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, + 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -5765,7 +5916,7 @@ func file_vtctldata_proto_rawDescGZIP() []byte { return file_vtctldata_proto_rawDescData } -var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 96) +var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 98) var file_vtctldata_proto_goTypes = []interface{}{ (*ExecuteVtctlCommandRequest)(nil), // 0: vtctldata.ExecuteVtctlCommandRequest (*ExecuteVtctlCommandResponse)(nil), // 1: vtctldata.ExecuteVtctlCommandResponse @@ -5780,197 +5931,201 @@ var file_vtctldata_proto_goTypes = []interface{}{ (*AddCellsAliasResponse)(nil), // 10: vtctldata.AddCellsAliasResponse (*ApplyRoutingRulesRequest)(nil), // 11: vtctldata.ApplyRoutingRulesRequest (*ApplyRoutingRulesResponse)(nil), // 12: vtctldata.ApplyRoutingRulesResponse - (*ChangeTabletTypeRequest)(nil), // 13: vtctldata.ChangeTabletTypeRequest - (*ChangeTabletTypeResponse)(nil), // 14: vtctldata.ChangeTabletTypeResponse - (*CreateKeyspaceRequest)(nil), // 15: vtctldata.CreateKeyspaceRequest - (*CreateKeyspaceResponse)(nil), // 16: vtctldata.CreateKeyspaceResponse - (*CreateShardRequest)(nil), // 17: vtctldata.CreateShardRequest - (*CreateShardResponse)(nil), // 18: vtctldata.CreateShardResponse - (*DeleteCellInfoRequest)(nil), // 19: vtctldata.DeleteCellInfoRequest - (*DeleteCellInfoResponse)(nil), // 20: vtctldata.DeleteCellInfoResponse - (*DeleteCellsAliasRequest)(nil), // 21: vtctldata.DeleteCellsAliasRequest - (*DeleteCellsAliasResponse)(nil), // 22: vtctldata.DeleteCellsAliasResponse - (*DeleteKeyspaceRequest)(nil), // 23: vtctldata.DeleteKeyspaceRequest - (*DeleteKeyspaceResponse)(nil), // 24: vtctldata.DeleteKeyspaceResponse - (*DeleteShardsRequest)(nil), // 25: vtctldata.DeleteShardsRequest - (*DeleteShardsResponse)(nil), // 26: vtctldata.DeleteShardsResponse - (*DeleteTabletsRequest)(nil), // 27: vtctldata.DeleteTabletsRequest - (*DeleteTabletsResponse)(nil), // 28: vtctldata.DeleteTabletsResponse - (*EmergencyReparentShardRequest)(nil), // 29: vtctldata.EmergencyReparentShardRequest - (*EmergencyReparentShardResponse)(nil), // 30: vtctldata.EmergencyReparentShardResponse - (*FindAllShardsInKeyspaceRequest)(nil), // 31: vtctldata.FindAllShardsInKeyspaceRequest - (*FindAllShardsInKeyspaceResponse)(nil), // 32: vtctldata.FindAllShardsInKeyspaceResponse - (*GetBackupsRequest)(nil), // 33: vtctldata.GetBackupsRequest - (*GetBackupsResponse)(nil), // 34: vtctldata.GetBackupsResponse - (*GetCellInfoRequest)(nil), // 35: vtctldata.GetCellInfoRequest - (*GetCellInfoResponse)(nil), // 36: vtctldata.GetCellInfoResponse - (*GetCellInfoNamesRequest)(nil), // 37: vtctldata.GetCellInfoNamesRequest - (*GetCellInfoNamesResponse)(nil), // 38: vtctldata.GetCellInfoNamesResponse - (*GetCellsAliasesRequest)(nil), // 39: vtctldata.GetCellsAliasesRequest - (*GetCellsAliasesResponse)(nil), // 40: vtctldata.GetCellsAliasesResponse - (*GetKeyspacesRequest)(nil), // 41: vtctldata.GetKeyspacesRequest - (*GetKeyspacesResponse)(nil), // 42: vtctldata.GetKeyspacesResponse - (*GetKeyspaceRequest)(nil), // 43: vtctldata.GetKeyspaceRequest - (*GetKeyspaceResponse)(nil), // 44: vtctldata.GetKeyspaceResponse - (*GetRoutingRulesRequest)(nil), // 45: vtctldata.GetRoutingRulesRequest - (*GetRoutingRulesResponse)(nil), // 46: vtctldata.GetRoutingRulesResponse - (*GetSchemaRequest)(nil), // 47: vtctldata.GetSchemaRequest - (*GetSchemaResponse)(nil), // 48: vtctldata.GetSchemaResponse - (*GetShardRequest)(nil), // 49: vtctldata.GetShardRequest - (*GetShardResponse)(nil), // 50: vtctldata.GetShardResponse - (*GetSrvKeyspacesRequest)(nil), // 51: vtctldata.GetSrvKeyspacesRequest - (*GetSrvKeyspacesResponse)(nil), // 52: vtctldata.GetSrvKeyspacesResponse - (*GetSrvVSchemaRequest)(nil), // 53: vtctldata.GetSrvVSchemaRequest - (*GetSrvVSchemaResponse)(nil), // 54: vtctldata.GetSrvVSchemaResponse - (*GetSrvVSchemasRequest)(nil), // 55: vtctldata.GetSrvVSchemasRequest - (*GetSrvVSchemasResponse)(nil), // 56: vtctldata.GetSrvVSchemasResponse - (*GetTabletRequest)(nil), // 57: vtctldata.GetTabletRequest - (*GetTabletResponse)(nil), // 58: vtctldata.GetTabletResponse - (*GetTabletsRequest)(nil), // 59: vtctldata.GetTabletsRequest - (*GetTabletsResponse)(nil), // 60: vtctldata.GetTabletsResponse - (*GetVSchemaRequest)(nil), // 61: vtctldata.GetVSchemaRequest - (*GetVSchemaResponse)(nil), // 62: vtctldata.GetVSchemaResponse - (*GetWorkflowsRequest)(nil), // 63: vtctldata.GetWorkflowsRequest - (*GetWorkflowsResponse)(nil), // 64: vtctldata.GetWorkflowsResponse - (*InitShardPrimaryRequest)(nil), // 65: vtctldata.InitShardPrimaryRequest - (*InitShardPrimaryResponse)(nil), // 66: vtctldata.InitShardPrimaryResponse - (*PlannedReparentShardRequest)(nil), // 67: vtctldata.PlannedReparentShardRequest - (*PlannedReparentShardResponse)(nil), // 68: vtctldata.PlannedReparentShardResponse - (*RebuildVSchemaGraphRequest)(nil), // 69: vtctldata.RebuildVSchemaGraphRequest - (*RebuildVSchemaGraphResponse)(nil), // 70: vtctldata.RebuildVSchemaGraphResponse - (*RemoveKeyspaceCellRequest)(nil), // 71: vtctldata.RemoveKeyspaceCellRequest - (*RemoveKeyspaceCellResponse)(nil), // 72: vtctldata.RemoveKeyspaceCellResponse - (*RemoveShardCellRequest)(nil), // 73: vtctldata.RemoveShardCellRequest - (*RemoveShardCellResponse)(nil), // 74: vtctldata.RemoveShardCellResponse - (*ReparentTabletRequest)(nil), // 75: vtctldata.ReparentTabletRequest - (*ReparentTabletResponse)(nil), // 76: vtctldata.ReparentTabletResponse - (*ShardReplicationPositionsRequest)(nil), // 77: vtctldata.ShardReplicationPositionsRequest - (*ShardReplicationPositionsResponse)(nil), // 78: vtctldata.ShardReplicationPositionsResponse - (*TabletExternallyReparentedRequest)(nil), // 79: vtctldata.TabletExternallyReparentedRequest - (*TabletExternallyReparentedResponse)(nil), // 80: vtctldata.TabletExternallyReparentedResponse - (*UpdateCellInfoRequest)(nil), // 81: vtctldata.UpdateCellInfoRequest - (*UpdateCellInfoResponse)(nil), // 82: vtctldata.UpdateCellInfoResponse - (*UpdateCellsAliasRequest)(nil), // 83: vtctldata.UpdateCellsAliasRequest - (*UpdateCellsAliasResponse)(nil), // 84: vtctldata.UpdateCellsAliasResponse - nil, // 85: vtctldata.Workflow.ShardStreamsEntry - (*Workflow_ReplicationLocation)(nil), // 86: vtctldata.Workflow.ReplicationLocation - (*Workflow_ShardStream)(nil), // 87: vtctldata.Workflow.ShardStream - (*Workflow_Stream)(nil), // 88: vtctldata.Workflow.Stream - (*Workflow_Stream_CopyState)(nil), // 89: vtctldata.Workflow.Stream.CopyState - nil, // 90: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - nil, // 91: vtctldata.GetCellsAliasesResponse.AliasesEntry - nil, // 92: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - nil, // 93: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - nil, // 94: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - nil, // 95: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - (*logutil.Event)(nil), // 96: logutil.Event - (*topodata.Keyspace)(nil), // 97: topodata.Keyspace - (*topodata.Shard)(nil), // 98: topodata.Shard - (*topodata.CellInfo)(nil), // 99: topodata.CellInfo - (*vschema.RoutingRules)(nil), // 100: vschema.RoutingRules - (*topodata.TabletAlias)(nil), // 101: topodata.TabletAlias - (topodata.TabletType)(0), // 102: topodata.TabletType - (*topodata.Tablet)(nil), // 103: topodata.Tablet - (topodata.KeyspaceIdType)(0), // 104: topodata.KeyspaceIdType - (*topodata.Keyspace_ServedFrom)(nil), // 105: topodata.Keyspace.ServedFrom - (topodata.KeyspaceType)(0), // 106: topodata.KeyspaceType - (*vttime.Time)(nil), // 107: vttime.Time - (*vttime.Duration)(nil), // 108: vttime.Duration - (*mysqlctl.BackupInfo)(nil), // 109: mysqlctl.BackupInfo - (*tabletmanagerdata.SchemaDefinition)(nil), // 110: tabletmanagerdata.SchemaDefinition - (*vschema.SrvVSchema)(nil), // 111: vschema.SrvVSchema - (*vschema.Keyspace)(nil), // 112: vschema.Keyspace - (*topodata.CellsAlias)(nil), // 113: topodata.CellsAlias - (*topodata.Shard_TabletControl)(nil), // 114: topodata.Shard.TabletControl - (*binlogdata.BinlogSource)(nil), // 115: binlogdata.BinlogSource - (*topodata.SrvKeyspace)(nil), // 116: topodata.SrvKeyspace - (*replicationdata.Status)(nil), // 117: replicationdata.Status + (*ApplyVSchemaRequest)(nil), // 13: vtctldata.ApplyVSchemaRequest + (*ApplyVSchemaResponse)(nil), // 14: vtctldata.ApplyVSchemaResponse + (*ChangeTabletTypeRequest)(nil), // 15: vtctldata.ChangeTabletTypeRequest + (*ChangeTabletTypeResponse)(nil), // 16: vtctldata.ChangeTabletTypeResponse + (*CreateKeyspaceRequest)(nil), // 17: vtctldata.CreateKeyspaceRequest + (*CreateKeyspaceResponse)(nil), // 18: vtctldata.CreateKeyspaceResponse + (*CreateShardRequest)(nil), // 19: vtctldata.CreateShardRequest + (*CreateShardResponse)(nil), // 20: vtctldata.CreateShardResponse + (*DeleteCellInfoRequest)(nil), // 21: vtctldata.DeleteCellInfoRequest + (*DeleteCellInfoResponse)(nil), // 22: vtctldata.DeleteCellInfoResponse + (*DeleteCellsAliasRequest)(nil), // 23: vtctldata.DeleteCellsAliasRequest + (*DeleteCellsAliasResponse)(nil), // 24: vtctldata.DeleteCellsAliasResponse + (*DeleteKeyspaceRequest)(nil), // 25: vtctldata.DeleteKeyspaceRequest + (*DeleteKeyspaceResponse)(nil), // 26: vtctldata.DeleteKeyspaceResponse + (*DeleteShardsRequest)(nil), // 27: vtctldata.DeleteShardsRequest + (*DeleteShardsResponse)(nil), // 28: vtctldata.DeleteShardsResponse + (*DeleteTabletsRequest)(nil), // 29: vtctldata.DeleteTabletsRequest + (*DeleteTabletsResponse)(nil), // 30: vtctldata.DeleteTabletsResponse + (*EmergencyReparentShardRequest)(nil), // 31: vtctldata.EmergencyReparentShardRequest + (*EmergencyReparentShardResponse)(nil), // 32: vtctldata.EmergencyReparentShardResponse + (*FindAllShardsInKeyspaceRequest)(nil), // 33: vtctldata.FindAllShardsInKeyspaceRequest + (*FindAllShardsInKeyspaceResponse)(nil), // 34: vtctldata.FindAllShardsInKeyspaceResponse + (*GetBackupsRequest)(nil), // 35: vtctldata.GetBackupsRequest + (*GetBackupsResponse)(nil), // 36: vtctldata.GetBackupsResponse + (*GetCellInfoRequest)(nil), // 37: vtctldata.GetCellInfoRequest + (*GetCellInfoResponse)(nil), // 38: vtctldata.GetCellInfoResponse + (*GetCellInfoNamesRequest)(nil), // 39: vtctldata.GetCellInfoNamesRequest + (*GetCellInfoNamesResponse)(nil), // 40: vtctldata.GetCellInfoNamesResponse + (*GetCellsAliasesRequest)(nil), // 41: vtctldata.GetCellsAliasesRequest + (*GetCellsAliasesResponse)(nil), // 42: vtctldata.GetCellsAliasesResponse + (*GetKeyspacesRequest)(nil), // 43: vtctldata.GetKeyspacesRequest + (*GetKeyspacesResponse)(nil), // 44: vtctldata.GetKeyspacesResponse + (*GetKeyspaceRequest)(nil), // 45: vtctldata.GetKeyspaceRequest + (*GetKeyspaceResponse)(nil), // 46: vtctldata.GetKeyspaceResponse + (*GetRoutingRulesRequest)(nil), // 47: vtctldata.GetRoutingRulesRequest + (*GetRoutingRulesResponse)(nil), // 48: vtctldata.GetRoutingRulesResponse + (*GetSchemaRequest)(nil), // 49: vtctldata.GetSchemaRequest + (*GetSchemaResponse)(nil), // 50: vtctldata.GetSchemaResponse + (*GetShardRequest)(nil), // 51: vtctldata.GetShardRequest + (*GetShardResponse)(nil), // 52: vtctldata.GetShardResponse + (*GetSrvKeyspacesRequest)(nil), // 53: vtctldata.GetSrvKeyspacesRequest + (*GetSrvKeyspacesResponse)(nil), // 54: vtctldata.GetSrvKeyspacesResponse + (*GetSrvVSchemaRequest)(nil), // 55: vtctldata.GetSrvVSchemaRequest + (*GetSrvVSchemaResponse)(nil), // 56: vtctldata.GetSrvVSchemaResponse + (*GetSrvVSchemasRequest)(nil), // 57: vtctldata.GetSrvVSchemasRequest + (*GetSrvVSchemasResponse)(nil), // 58: vtctldata.GetSrvVSchemasResponse + (*GetTabletRequest)(nil), // 59: vtctldata.GetTabletRequest + (*GetTabletResponse)(nil), // 60: vtctldata.GetTabletResponse + (*GetTabletsRequest)(nil), // 61: vtctldata.GetTabletsRequest + (*GetTabletsResponse)(nil), // 62: vtctldata.GetTabletsResponse + (*GetVSchemaRequest)(nil), // 63: vtctldata.GetVSchemaRequest + (*GetVSchemaResponse)(nil), // 64: vtctldata.GetVSchemaResponse + (*GetWorkflowsRequest)(nil), // 65: vtctldata.GetWorkflowsRequest + (*GetWorkflowsResponse)(nil), // 66: vtctldata.GetWorkflowsResponse + (*InitShardPrimaryRequest)(nil), // 67: vtctldata.InitShardPrimaryRequest + (*InitShardPrimaryResponse)(nil), // 68: vtctldata.InitShardPrimaryResponse + (*PlannedReparentShardRequest)(nil), // 69: vtctldata.PlannedReparentShardRequest + (*PlannedReparentShardResponse)(nil), // 70: vtctldata.PlannedReparentShardResponse + (*RebuildVSchemaGraphRequest)(nil), // 71: vtctldata.RebuildVSchemaGraphRequest + (*RebuildVSchemaGraphResponse)(nil), // 72: vtctldata.RebuildVSchemaGraphResponse + (*RemoveKeyspaceCellRequest)(nil), // 73: vtctldata.RemoveKeyspaceCellRequest + (*RemoveKeyspaceCellResponse)(nil), // 74: vtctldata.RemoveKeyspaceCellResponse + (*RemoveShardCellRequest)(nil), // 75: vtctldata.RemoveShardCellRequest + (*RemoveShardCellResponse)(nil), // 76: vtctldata.RemoveShardCellResponse + (*ReparentTabletRequest)(nil), // 77: vtctldata.ReparentTabletRequest + (*ReparentTabletResponse)(nil), // 78: vtctldata.ReparentTabletResponse + (*ShardReplicationPositionsRequest)(nil), // 79: vtctldata.ShardReplicationPositionsRequest + (*ShardReplicationPositionsResponse)(nil), // 80: vtctldata.ShardReplicationPositionsResponse + (*TabletExternallyReparentedRequest)(nil), // 81: vtctldata.TabletExternallyReparentedRequest + (*TabletExternallyReparentedResponse)(nil), // 82: vtctldata.TabletExternallyReparentedResponse + (*UpdateCellInfoRequest)(nil), // 83: vtctldata.UpdateCellInfoRequest + (*UpdateCellInfoResponse)(nil), // 84: vtctldata.UpdateCellInfoResponse + (*UpdateCellsAliasRequest)(nil), // 85: vtctldata.UpdateCellsAliasRequest + (*UpdateCellsAliasResponse)(nil), // 86: vtctldata.UpdateCellsAliasResponse + nil, // 87: vtctldata.Workflow.ShardStreamsEntry + (*Workflow_ReplicationLocation)(nil), // 88: vtctldata.Workflow.ReplicationLocation + (*Workflow_ShardStream)(nil), // 89: vtctldata.Workflow.ShardStream + (*Workflow_Stream)(nil), // 90: vtctldata.Workflow.Stream + (*Workflow_Stream_CopyState)(nil), // 91: vtctldata.Workflow.Stream.CopyState + nil, // 92: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + nil, // 93: vtctldata.GetCellsAliasesResponse.AliasesEntry + nil, // 94: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + nil, // 95: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + nil, // 96: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + nil, // 97: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + (*logutil.Event)(nil), // 98: logutil.Event + (*topodata.Keyspace)(nil), // 99: topodata.Keyspace + (*topodata.Shard)(nil), // 100: topodata.Shard + (*topodata.CellInfo)(nil), // 101: topodata.CellInfo + (*vschema.RoutingRules)(nil), // 102: vschema.RoutingRules + (*vschema.Keyspace)(nil), // 103: vschema.Keyspace + (*topodata.TabletAlias)(nil), // 104: topodata.TabletAlias + (topodata.TabletType)(0), // 105: topodata.TabletType + (*topodata.Tablet)(nil), // 106: topodata.Tablet + (topodata.KeyspaceIdType)(0), // 107: topodata.KeyspaceIdType + (*topodata.Keyspace_ServedFrom)(nil), // 108: topodata.Keyspace.ServedFrom + (topodata.KeyspaceType)(0), // 109: topodata.KeyspaceType + (*vttime.Time)(nil), // 110: vttime.Time + (*vttime.Duration)(nil), // 111: vttime.Duration + (*mysqlctl.BackupInfo)(nil), // 112: mysqlctl.BackupInfo + (*tabletmanagerdata.SchemaDefinition)(nil), // 113: tabletmanagerdata.SchemaDefinition + (*vschema.SrvVSchema)(nil), // 114: vschema.SrvVSchema + (*topodata.CellsAlias)(nil), // 115: topodata.CellsAlias + (*topodata.Shard_TabletControl)(nil), // 116: topodata.Shard.TabletControl + (*binlogdata.BinlogSource)(nil), // 117: binlogdata.BinlogSource + (*topodata.SrvKeyspace)(nil), // 118: topodata.SrvKeyspace + (*replicationdata.Status)(nil), // 119: replicationdata.Status } var file_vtctldata_proto_depIdxs = []int32{ - 96, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event + 98, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event 2, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings - 97, // 2: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace - 98, // 3: vtctldata.Shard.shard:type_name -> topodata.Shard - 86, // 4: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation - 86, // 5: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation - 85, // 6: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry - 99, // 7: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 100, // 8: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules - 101, // 9: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias - 102, // 10: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType - 103, // 11: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet - 103, // 12: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet - 104, // 13: vtctldata.CreateKeyspaceRequest.sharding_column_type:type_name -> topodata.KeyspaceIdType - 105, // 14: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom - 106, // 15: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType - 107, // 16: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time - 4, // 17: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 4, // 18: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace - 5, // 19: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard - 5, // 20: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard - 101, // 21: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 101, // 22: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 101, // 23: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias - 108, // 24: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 101, // 25: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 96, // 26: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event - 90, // 27: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - 109, // 28: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo - 99, // 29: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 91, // 30: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry - 4, // 31: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace - 4, // 32: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 100, // 33: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules - 101, // 34: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 110, // 35: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition - 5, // 36: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard - 92, // 37: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - 111, // 38: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema - 93, // 39: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - 101, // 40: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 103, // 41: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet - 101, // 42: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 103, // 43: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet - 112, // 44: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 6, // 45: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow - 101, // 46: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias - 108, // 47: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration - 96, // 48: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event - 101, // 49: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 101, // 50: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias - 108, // 51: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 101, // 52: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 96, // 53: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event - 101, // 54: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias - 101, // 55: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias - 94, // 56: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - 95, // 57: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - 101, // 58: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias - 101, // 59: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias - 101, // 60: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias - 99, // 61: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 99, // 62: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 113, // 63: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias - 113, // 64: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias - 87, // 65: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream - 88, // 66: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream - 114, // 67: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl - 101, // 68: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias - 115, // 69: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource - 107, // 70: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time - 107, // 71: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time - 89, // 72: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState - 5, // 73: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard - 113, // 74: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias - 116, // 75: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace - 111, // 76: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema - 117, // 77: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status - 103, // 78: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet - 79, // [79:79] is the sub-list for method output_type - 79, // [79:79] is the sub-list for method input_type - 79, // [79:79] is the sub-list for extension type_name - 79, // [79:79] is the sub-list for extension extendee - 0, // [0:79] is the sub-list for field type_name + 99, // 2: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace + 100, // 3: vtctldata.Shard.shard:type_name -> topodata.Shard + 88, // 4: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation + 88, // 5: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation + 87, // 6: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry + 101, // 7: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 102, // 8: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules + 103, // 9: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace + 103, // 10: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 104, // 11: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias + 105, // 12: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType + 106, // 13: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet + 106, // 14: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet + 107, // 15: vtctldata.CreateKeyspaceRequest.sharding_column_type:type_name -> topodata.KeyspaceIdType + 108, // 16: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom + 109, // 17: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType + 110, // 18: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time + 4, // 19: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 4, // 20: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace + 5, // 21: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard + 5, // 22: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard + 104, // 23: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 104, // 24: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 104, // 25: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias + 111, // 26: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 104, // 27: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 98, // 28: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event + 92, // 29: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + 112, // 30: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo + 101, // 31: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 93, // 32: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry + 4, // 33: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace + 4, // 34: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 102, // 35: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules + 104, // 36: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 113, // 37: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition + 5, // 38: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard + 94, // 39: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + 114, // 40: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema + 95, // 41: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + 104, // 42: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 106, // 43: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet + 104, // 44: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 106, // 45: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet + 103, // 46: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 6, // 47: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow + 104, // 48: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias + 111, // 49: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration + 98, // 50: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event + 104, // 51: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 104, // 52: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias + 111, // 53: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 104, // 54: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 98, // 55: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event + 104, // 56: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias + 104, // 57: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias + 96, // 58: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + 97, // 59: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + 104, // 60: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias + 104, // 61: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias + 104, // 62: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias + 101, // 63: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 101, // 64: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 115, // 65: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias + 115, // 66: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias + 89, // 67: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream + 90, // 68: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream + 116, // 69: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl + 104, // 70: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias + 117, // 71: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource + 110, // 72: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time + 110, // 73: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time + 91, // 74: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState + 5, // 75: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard + 115, // 76: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias + 118, // 77: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace + 114, // 78: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema + 119, // 79: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status + 106, // 80: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_vtctldata_proto_init() } @@ -6136,7 +6291,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeRequest); i { + switch v := v.(*ApplyVSchemaRequest); i { case 0: return &v.state case 1: @@ -6148,7 +6303,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeResponse); i { + switch v := v.(*ApplyVSchemaResponse); i { case 0: return &v.state case 1: @@ -6160,7 +6315,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceRequest); i { + switch v := v.(*ChangeTabletTypeRequest); i { case 0: return &v.state case 1: @@ -6172,7 +6327,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceResponse); i { + switch v := v.(*ChangeTabletTypeResponse); i { case 0: return &v.state case 1: @@ -6184,7 +6339,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardRequest); i { + switch v := v.(*CreateKeyspaceRequest); i { case 0: return &v.state case 1: @@ -6196,7 +6351,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardResponse); i { + switch v := v.(*CreateKeyspaceResponse); i { case 0: return &v.state case 1: @@ -6208,7 +6363,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoRequest); i { + switch v := v.(*CreateShardRequest); i { case 0: return &v.state case 1: @@ -6220,7 +6375,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoResponse); i { + switch v := v.(*CreateShardResponse); i { case 0: return &v.state case 1: @@ -6232,7 +6387,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasRequest); i { + switch v := v.(*DeleteCellInfoRequest); i { case 0: return &v.state case 1: @@ -6244,7 +6399,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasResponse); i { + switch v := v.(*DeleteCellInfoResponse); i { case 0: return &v.state case 1: @@ -6256,7 +6411,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceRequest); i { + switch v := v.(*DeleteCellsAliasRequest); i { case 0: return &v.state case 1: @@ -6268,7 +6423,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceResponse); i { + switch v := v.(*DeleteCellsAliasResponse); i { case 0: return &v.state case 1: @@ -6280,7 +6435,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsRequest); i { + switch v := v.(*DeleteKeyspaceRequest); i { case 0: return &v.state case 1: @@ -6292,7 +6447,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsResponse); i { + switch v := v.(*DeleteKeyspaceResponse); i { case 0: return &v.state case 1: @@ -6304,7 +6459,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsRequest); i { + switch v := v.(*DeleteShardsRequest); i { case 0: return &v.state case 1: @@ -6316,7 +6471,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsResponse); i { + switch v := v.(*DeleteShardsResponse); i { case 0: return &v.state case 1: @@ -6328,7 +6483,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardRequest); i { + switch v := v.(*DeleteTabletsRequest); i { case 0: return &v.state case 1: @@ -6340,7 +6495,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardResponse); i { + switch v := v.(*DeleteTabletsResponse); i { case 0: return &v.state case 1: @@ -6352,7 +6507,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceRequest); i { + switch v := v.(*EmergencyReparentShardRequest); i { case 0: return &v.state case 1: @@ -6364,7 +6519,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceResponse); i { + switch v := v.(*EmergencyReparentShardResponse); i { case 0: return &v.state case 1: @@ -6376,7 +6531,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupsRequest); i { + switch v := v.(*FindAllShardsInKeyspaceRequest); i { case 0: return &v.state case 1: @@ -6388,7 +6543,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupsResponse); i { + switch v := v.(*FindAllShardsInKeyspaceResponse); i { case 0: return &v.state case 1: @@ -6400,7 +6555,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoRequest); i { + switch v := v.(*GetBackupsRequest); i { case 0: return &v.state case 1: @@ -6412,7 +6567,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoResponse); i { + switch v := v.(*GetBackupsResponse); i { case 0: return &v.state case 1: @@ -6424,7 +6579,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoNamesRequest); i { + switch v := v.(*GetCellInfoRequest); i { case 0: return &v.state case 1: @@ -6436,7 +6591,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoNamesResponse); i { + switch v := v.(*GetCellInfoResponse); i { case 0: return &v.state case 1: @@ -6448,7 +6603,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellsAliasesRequest); i { + switch v := v.(*GetCellInfoNamesRequest); i { case 0: return &v.state case 1: @@ -6460,7 +6615,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellsAliasesResponse); i { + switch v := v.(*GetCellInfoNamesResponse); i { case 0: return &v.state case 1: @@ -6472,7 +6627,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspacesRequest); i { + switch v := v.(*GetCellsAliasesRequest); i { case 0: return &v.state case 1: @@ -6484,7 +6639,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspacesResponse); i { + switch v := v.(*GetCellsAliasesResponse); i { case 0: return &v.state case 1: @@ -6496,7 +6651,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspaceRequest); i { + switch v := v.(*GetKeyspacesRequest); i { case 0: return &v.state case 1: @@ -6508,7 +6663,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspaceResponse); i { + switch v := v.(*GetKeyspacesResponse); i { case 0: return &v.state case 1: @@ -6520,7 +6675,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRoutingRulesRequest); i { + switch v := v.(*GetKeyspaceRequest); i { case 0: return &v.state case 1: @@ -6532,7 +6687,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRoutingRulesResponse); i { + switch v := v.(*GetKeyspaceResponse); i { case 0: return &v.state case 1: @@ -6544,7 +6699,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaRequest); i { + switch v := v.(*GetRoutingRulesRequest); i { case 0: return &v.state case 1: @@ -6556,7 +6711,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaResponse); i { + switch v := v.(*GetRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -6568,7 +6723,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRequest); i { + switch v := v.(*GetSchemaRequest); i { case 0: return &v.state case 1: @@ -6580,7 +6735,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardResponse); i { + switch v := v.(*GetSchemaResponse); i { case 0: return &v.state case 1: @@ -6592,7 +6747,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspacesRequest); i { + switch v := v.(*GetShardRequest); i { case 0: return &v.state case 1: @@ -6604,7 +6759,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspacesResponse); i { + switch v := v.(*GetShardResponse); i { case 0: return &v.state case 1: @@ -6616,7 +6771,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaRequest); i { + switch v := v.(*GetSrvKeyspacesRequest); i { case 0: return &v.state case 1: @@ -6628,7 +6783,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaResponse); i { + switch v := v.(*GetSrvKeyspacesResponse); i { case 0: return &v.state case 1: @@ -6640,7 +6795,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasRequest); i { + switch v := v.(*GetSrvVSchemaRequest); i { case 0: return &v.state case 1: @@ -6652,7 +6807,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasResponse); i { + switch v := v.(*GetSrvVSchemaResponse); i { case 0: return &v.state case 1: @@ -6664,7 +6819,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletRequest); i { + switch v := v.(*GetSrvVSchemasRequest); i { case 0: return &v.state case 1: @@ -6676,7 +6831,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletResponse); i { + switch v := v.(*GetSrvVSchemasResponse); i { case 0: return &v.state case 1: @@ -6688,7 +6843,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsRequest); i { + switch v := v.(*GetTabletRequest); i { case 0: return &v.state case 1: @@ -6700,7 +6855,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsResponse); i { + switch v := v.(*GetTabletResponse); i { case 0: return &v.state case 1: @@ -6712,7 +6867,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaRequest); i { + switch v := v.(*GetTabletsRequest); i { case 0: return &v.state case 1: @@ -6724,7 +6879,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaResponse); i { + switch v := v.(*GetTabletsResponse); i { case 0: return &v.state case 1: @@ -6736,7 +6891,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsRequest); i { + switch v := v.(*GetVSchemaRequest); i { case 0: return &v.state case 1: @@ -6748,7 +6903,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsResponse); i { + switch v := v.(*GetVSchemaResponse); i { case 0: return &v.state case 1: @@ -6760,7 +6915,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitShardPrimaryRequest); i { + switch v := v.(*GetWorkflowsRequest); i { case 0: return &v.state case 1: @@ -6772,7 +6927,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitShardPrimaryResponse); i { + switch v := v.(*GetWorkflowsResponse); i { case 0: return &v.state case 1: @@ -6784,7 +6939,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedReparentShardRequest); i { + switch v := v.(*InitShardPrimaryRequest); i { case 0: return &v.state case 1: @@ -6796,7 +6951,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedReparentShardResponse); i { + switch v := v.(*InitShardPrimaryResponse); i { case 0: return &v.state case 1: @@ -6808,7 +6963,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildVSchemaGraphRequest); i { + switch v := v.(*PlannedReparentShardRequest); i { case 0: return &v.state case 1: @@ -6820,7 +6975,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildVSchemaGraphResponse); i { + switch v := v.(*PlannedReparentShardResponse); i { case 0: return &v.state case 1: @@ -6832,7 +6987,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellRequest); i { + switch v := v.(*RebuildVSchemaGraphRequest); i { case 0: return &v.state case 1: @@ -6844,7 +6999,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellResponse); i { + switch v := v.(*RebuildVSchemaGraphResponse); i { case 0: return &v.state case 1: @@ -6856,7 +7011,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveShardCellRequest); i { + switch v := v.(*RemoveKeyspaceCellRequest); i { case 0: return &v.state case 1: @@ -6868,7 +7023,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveShardCellResponse); i { + switch v := v.(*RemoveKeyspaceCellResponse); i { case 0: return &v.state case 1: @@ -6880,7 +7035,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReparentTabletRequest); i { + switch v := v.(*RemoveShardCellRequest); i { case 0: return &v.state case 1: @@ -6892,7 +7047,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReparentTabletResponse); i { + switch v := v.(*RemoveShardCellResponse); i { case 0: return &v.state case 1: @@ -6904,7 +7059,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationPositionsRequest); i { + switch v := v.(*ReparentTabletRequest); i { case 0: return &v.state case 1: @@ -6916,7 +7071,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationPositionsResponse); i { + switch v := v.(*ReparentTabletResponse); i { case 0: return &v.state case 1: @@ -6928,7 +7083,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedRequest); i { + switch v := v.(*ShardReplicationPositionsRequest); i { case 0: return &v.state case 1: @@ -6940,7 +7095,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedResponse); i { + switch v := v.(*ShardReplicationPositionsResponse); i { case 0: return &v.state case 1: @@ -6952,7 +7107,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellInfoRequest); i { + switch v := v.(*TabletExternallyReparentedRequest); i { case 0: return &v.state case 1: @@ -6964,7 +7119,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellInfoResponse); i { + switch v := v.(*TabletExternallyReparentedResponse); i { case 0: return &v.state case 1: @@ -6976,7 +7131,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellsAliasRequest); i { + switch v := v.(*UpdateCellInfoRequest); i { case 0: return &v.state case 1: @@ -6988,7 +7143,19 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellsAliasResponse); i { + switch v := v.(*UpdateCellInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellsAliasRequest); i { case 0: return &v.state case 1: @@ -7000,6 +7167,18 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellsAliasResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_ReplicationLocation); i { case 0: return &v.state @@ -7011,7 +7190,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_ShardStream); i { case 0: return &v.state @@ -7023,7 +7202,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_Stream); i { case 0: return &v.state @@ -7035,7 +7214,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_Stream_CopyState); i { case 0: return &v.state @@ -7054,7 +7233,7 @@ func file_vtctldata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtctldata_proto_rawDesc, NumEnums: 0, - NumMessages: 96, + NumMessages: 98, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go index bc3894a319c..19110ed7d6c 100644 --- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go +++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go @@ -1048,6 +1048,139 @@ func (m *ApplyRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *ApplyVSchemaRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0x32 + } + if m.VSchema != nil { + { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.SkipRebuild { + i-- + if m.SkipRebuild { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApplyVSchemaResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ApplyVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.VSchema != nil { + { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ChangeTabletTypeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -5210,6 +5343,58 @@ func (m *ApplyRoutingRulesResponse) SizeVT() (n int) { return n } +func (m *ApplyVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if m.DryRun { + n += 2 + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *ApplyVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + func (m *ChangeTabletTypeRequest) SizeVT() (n int) { if m == nil { return 0 @@ -9212,6 +9397,316 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index d9a9ed0e2fa..5085e544edd 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -51,7 +51,7 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xdc, 0x1b, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xaf, 0x1c, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -68,215 +68,220 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x22, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x45, - 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, - 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, - 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, - 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1c, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, - 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, + 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, - 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, - 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, + 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, + 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, + 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, - 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, + 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, + 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x20, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, - 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, - 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, - 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, - 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, - 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, - 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, + 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, + 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, + 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1c, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, + 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, + 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, + 0x10, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, + 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, + 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, - 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, - 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, - 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, - 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, - 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, + 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_vtctlservice_proto_goTypes = []interface{}{ @@ -284,166 +289,170 @@ var file_vtctlservice_proto_goTypes = []interface{}{ (*vtctldata.AddCellInfoRequest)(nil), // 1: vtctldata.AddCellInfoRequest (*vtctldata.AddCellsAliasRequest)(nil), // 2: vtctldata.AddCellsAliasRequest (*vtctldata.ApplyRoutingRulesRequest)(nil), // 3: vtctldata.ApplyRoutingRulesRequest - (*vtctldata.ChangeTabletTypeRequest)(nil), // 4: vtctldata.ChangeTabletTypeRequest - (*vtctldata.CreateKeyspaceRequest)(nil), // 5: vtctldata.CreateKeyspaceRequest - (*vtctldata.CreateShardRequest)(nil), // 6: vtctldata.CreateShardRequest - (*vtctldata.DeleteCellInfoRequest)(nil), // 7: vtctldata.DeleteCellInfoRequest - (*vtctldata.DeleteCellsAliasRequest)(nil), // 8: vtctldata.DeleteCellsAliasRequest - (*vtctldata.DeleteKeyspaceRequest)(nil), // 9: vtctldata.DeleteKeyspaceRequest - (*vtctldata.DeleteShardsRequest)(nil), // 10: vtctldata.DeleteShardsRequest - (*vtctldata.DeleteTabletsRequest)(nil), // 11: vtctldata.DeleteTabletsRequest - (*vtctldata.EmergencyReparentShardRequest)(nil), // 12: vtctldata.EmergencyReparentShardRequest - (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 13: vtctldata.FindAllShardsInKeyspaceRequest - (*vtctldata.GetBackupsRequest)(nil), // 14: vtctldata.GetBackupsRequest - (*vtctldata.GetCellInfoRequest)(nil), // 15: vtctldata.GetCellInfoRequest - (*vtctldata.GetCellInfoNamesRequest)(nil), // 16: vtctldata.GetCellInfoNamesRequest - (*vtctldata.GetCellsAliasesRequest)(nil), // 17: vtctldata.GetCellsAliasesRequest - (*vtctldata.GetKeyspaceRequest)(nil), // 18: vtctldata.GetKeyspaceRequest - (*vtctldata.GetKeyspacesRequest)(nil), // 19: vtctldata.GetKeyspacesRequest - (*vtctldata.GetRoutingRulesRequest)(nil), // 20: vtctldata.GetRoutingRulesRequest - (*vtctldata.GetSchemaRequest)(nil), // 21: vtctldata.GetSchemaRequest - (*vtctldata.GetShardRequest)(nil), // 22: vtctldata.GetShardRequest - (*vtctldata.GetSrvKeyspacesRequest)(nil), // 23: vtctldata.GetSrvKeyspacesRequest - (*vtctldata.GetSrvVSchemaRequest)(nil), // 24: vtctldata.GetSrvVSchemaRequest - (*vtctldata.GetSrvVSchemasRequest)(nil), // 25: vtctldata.GetSrvVSchemasRequest - (*vtctldata.GetTabletRequest)(nil), // 26: vtctldata.GetTabletRequest - (*vtctldata.GetTabletsRequest)(nil), // 27: vtctldata.GetTabletsRequest - (*vtctldata.GetVSchemaRequest)(nil), // 28: vtctldata.GetVSchemaRequest - (*vtctldata.GetWorkflowsRequest)(nil), // 29: vtctldata.GetWorkflowsRequest - (*vtctldata.InitShardPrimaryRequest)(nil), // 30: vtctldata.InitShardPrimaryRequest - (*vtctldata.PlannedReparentShardRequest)(nil), // 31: vtctldata.PlannedReparentShardRequest - (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 32: vtctldata.RebuildVSchemaGraphRequest - (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 33: vtctldata.RemoveKeyspaceCellRequest - (*vtctldata.RemoveShardCellRequest)(nil), // 34: vtctldata.RemoveShardCellRequest - (*vtctldata.ReparentTabletRequest)(nil), // 35: vtctldata.ReparentTabletRequest - (*vtctldata.ShardReplicationPositionsRequest)(nil), // 36: vtctldata.ShardReplicationPositionsRequest - (*vtctldata.TabletExternallyReparentedRequest)(nil), // 37: vtctldata.TabletExternallyReparentedRequest - (*vtctldata.UpdateCellInfoRequest)(nil), // 38: vtctldata.UpdateCellInfoRequest - (*vtctldata.UpdateCellsAliasRequest)(nil), // 39: vtctldata.UpdateCellsAliasRequest - (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 40: vtctldata.ExecuteVtctlCommandResponse - (*vtctldata.AddCellInfoResponse)(nil), // 41: vtctldata.AddCellInfoResponse - (*vtctldata.AddCellsAliasResponse)(nil), // 42: vtctldata.AddCellsAliasResponse - (*vtctldata.ApplyRoutingRulesResponse)(nil), // 43: vtctldata.ApplyRoutingRulesResponse - (*vtctldata.ChangeTabletTypeResponse)(nil), // 44: vtctldata.ChangeTabletTypeResponse - (*vtctldata.CreateKeyspaceResponse)(nil), // 45: vtctldata.CreateKeyspaceResponse - (*vtctldata.CreateShardResponse)(nil), // 46: vtctldata.CreateShardResponse - (*vtctldata.DeleteCellInfoResponse)(nil), // 47: vtctldata.DeleteCellInfoResponse - (*vtctldata.DeleteCellsAliasResponse)(nil), // 48: vtctldata.DeleteCellsAliasResponse - (*vtctldata.DeleteKeyspaceResponse)(nil), // 49: vtctldata.DeleteKeyspaceResponse - (*vtctldata.DeleteShardsResponse)(nil), // 50: vtctldata.DeleteShardsResponse - (*vtctldata.DeleteTabletsResponse)(nil), // 51: vtctldata.DeleteTabletsResponse - (*vtctldata.EmergencyReparentShardResponse)(nil), // 52: vtctldata.EmergencyReparentShardResponse - (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 53: vtctldata.FindAllShardsInKeyspaceResponse - (*vtctldata.GetBackupsResponse)(nil), // 54: vtctldata.GetBackupsResponse - (*vtctldata.GetCellInfoResponse)(nil), // 55: vtctldata.GetCellInfoResponse - (*vtctldata.GetCellInfoNamesResponse)(nil), // 56: vtctldata.GetCellInfoNamesResponse - (*vtctldata.GetCellsAliasesResponse)(nil), // 57: vtctldata.GetCellsAliasesResponse - (*vtctldata.GetKeyspaceResponse)(nil), // 58: vtctldata.GetKeyspaceResponse - (*vtctldata.GetKeyspacesResponse)(nil), // 59: vtctldata.GetKeyspacesResponse - (*vtctldata.GetRoutingRulesResponse)(nil), // 60: vtctldata.GetRoutingRulesResponse - (*vtctldata.GetSchemaResponse)(nil), // 61: vtctldata.GetSchemaResponse - (*vtctldata.GetShardResponse)(nil), // 62: vtctldata.GetShardResponse - (*vtctldata.GetSrvKeyspacesResponse)(nil), // 63: vtctldata.GetSrvKeyspacesResponse - (*vtctldata.GetSrvVSchemaResponse)(nil), // 64: vtctldata.GetSrvVSchemaResponse - (*vtctldata.GetSrvVSchemasResponse)(nil), // 65: vtctldata.GetSrvVSchemasResponse - (*vtctldata.GetTabletResponse)(nil), // 66: vtctldata.GetTabletResponse - (*vtctldata.GetTabletsResponse)(nil), // 67: vtctldata.GetTabletsResponse - (*vtctldata.GetVSchemaResponse)(nil), // 68: vtctldata.GetVSchemaResponse - (*vtctldata.GetWorkflowsResponse)(nil), // 69: vtctldata.GetWorkflowsResponse - (*vtctldata.InitShardPrimaryResponse)(nil), // 70: vtctldata.InitShardPrimaryResponse - (*vtctldata.PlannedReparentShardResponse)(nil), // 71: vtctldata.PlannedReparentShardResponse - (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 72: vtctldata.RebuildVSchemaGraphResponse - (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 73: vtctldata.RemoveKeyspaceCellResponse - (*vtctldata.RemoveShardCellResponse)(nil), // 74: vtctldata.RemoveShardCellResponse - (*vtctldata.ReparentTabletResponse)(nil), // 75: vtctldata.ReparentTabletResponse - (*vtctldata.ShardReplicationPositionsResponse)(nil), // 76: vtctldata.ShardReplicationPositionsResponse - (*vtctldata.TabletExternallyReparentedResponse)(nil), // 77: vtctldata.TabletExternallyReparentedResponse - (*vtctldata.UpdateCellInfoResponse)(nil), // 78: vtctldata.UpdateCellInfoResponse - (*vtctldata.UpdateCellsAliasResponse)(nil), // 79: vtctldata.UpdateCellsAliasResponse + (*vtctldata.ApplyVSchemaRequest)(nil), // 4: vtctldata.ApplyVSchemaRequest + (*vtctldata.ChangeTabletTypeRequest)(nil), // 5: vtctldata.ChangeTabletTypeRequest + (*vtctldata.CreateKeyspaceRequest)(nil), // 6: vtctldata.CreateKeyspaceRequest + (*vtctldata.CreateShardRequest)(nil), // 7: vtctldata.CreateShardRequest + (*vtctldata.DeleteCellInfoRequest)(nil), // 8: vtctldata.DeleteCellInfoRequest + (*vtctldata.DeleteCellsAliasRequest)(nil), // 9: vtctldata.DeleteCellsAliasRequest + (*vtctldata.DeleteKeyspaceRequest)(nil), // 10: vtctldata.DeleteKeyspaceRequest + (*vtctldata.DeleteShardsRequest)(nil), // 11: vtctldata.DeleteShardsRequest + (*vtctldata.DeleteTabletsRequest)(nil), // 12: vtctldata.DeleteTabletsRequest + (*vtctldata.EmergencyReparentShardRequest)(nil), // 13: vtctldata.EmergencyReparentShardRequest + (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 14: vtctldata.FindAllShardsInKeyspaceRequest + (*vtctldata.GetBackupsRequest)(nil), // 15: vtctldata.GetBackupsRequest + (*vtctldata.GetCellInfoRequest)(nil), // 16: vtctldata.GetCellInfoRequest + (*vtctldata.GetCellInfoNamesRequest)(nil), // 17: vtctldata.GetCellInfoNamesRequest + (*vtctldata.GetCellsAliasesRequest)(nil), // 18: vtctldata.GetCellsAliasesRequest + (*vtctldata.GetKeyspaceRequest)(nil), // 19: vtctldata.GetKeyspaceRequest + (*vtctldata.GetKeyspacesRequest)(nil), // 20: vtctldata.GetKeyspacesRequest + (*vtctldata.GetRoutingRulesRequest)(nil), // 21: vtctldata.GetRoutingRulesRequest + (*vtctldata.GetSchemaRequest)(nil), // 22: vtctldata.GetSchemaRequest + (*vtctldata.GetShardRequest)(nil), // 23: vtctldata.GetShardRequest + (*vtctldata.GetSrvKeyspacesRequest)(nil), // 24: vtctldata.GetSrvKeyspacesRequest + (*vtctldata.GetSrvVSchemaRequest)(nil), // 25: vtctldata.GetSrvVSchemaRequest + (*vtctldata.GetSrvVSchemasRequest)(nil), // 26: vtctldata.GetSrvVSchemasRequest + (*vtctldata.GetTabletRequest)(nil), // 27: vtctldata.GetTabletRequest + (*vtctldata.GetTabletsRequest)(nil), // 28: vtctldata.GetTabletsRequest + (*vtctldata.GetVSchemaRequest)(nil), // 29: vtctldata.GetVSchemaRequest + (*vtctldata.GetWorkflowsRequest)(nil), // 30: vtctldata.GetWorkflowsRequest + (*vtctldata.InitShardPrimaryRequest)(nil), // 31: vtctldata.InitShardPrimaryRequest + (*vtctldata.PlannedReparentShardRequest)(nil), // 32: vtctldata.PlannedReparentShardRequest + (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 33: vtctldata.RebuildVSchemaGraphRequest + (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 34: vtctldata.RemoveKeyspaceCellRequest + (*vtctldata.RemoveShardCellRequest)(nil), // 35: vtctldata.RemoveShardCellRequest + (*vtctldata.ReparentTabletRequest)(nil), // 36: vtctldata.ReparentTabletRequest + (*vtctldata.ShardReplicationPositionsRequest)(nil), // 37: vtctldata.ShardReplicationPositionsRequest + (*vtctldata.TabletExternallyReparentedRequest)(nil), // 38: vtctldata.TabletExternallyReparentedRequest + (*vtctldata.UpdateCellInfoRequest)(nil), // 39: vtctldata.UpdateCellInfoRequest + (*vtctldata.UpdateCellsAliasRequest)(nil), // 40: vtctldata.UpdateCellsAliasRequest + (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 41: vtctldata.ExecuteVtctlCommandResponse + (*vtctldata.AddCellInfoResponse)(nil), // 42: vtctldata.AddCellInfoResponse + (*vtctldata.AddCellsAliasResponse)(nil), // 43: vtctldata.AddCellsAliasResponse + (*vtctldata.ApplyRoutingRulesResponse)(nil), // 44: vtctldata.ApplyRoutingRulesResponse + (*vtctldata.ApplyVSchemaResponse)(nil), // 45: vtctldata.ApplyVSchemaResponse + (*vtctldata.ChangeTabletTypeResponse)(nil), // 46: vtctldata.ChangeTabletTypeResponse + (*vtctldata.CreateKeyspaceResponse)(nil), // 47: vtctldata.CreateKeyspaceResponse + (*vtctldata.CreateShardResponse)(nil), // 48: vtctldata.CreateShardResponse + (*vtctldata.DeleteCellInfoResponse)(nil), // 49: vtctldata.DeleteCellInfoResponse + (*vtctldata.DeleteCellsAliasResponse)(nil), // 50: vtctldata.DeleteCellsAliasResponse + (*vtctldata.DeleteKeyspaceResponse)(nil), // 51: vtctldata.DeleteKeyspaceResponse + (*vtctldata.DeleteShardsResponse)(nil), // 52: vtctldata.DeleteShardsResponse + (*vtctldata.DeleteTabletsResponse)(nil), // 53: vtctldata.DeleteTabletsResponse + (*vtctldata.EmergencyReparentShardResponse)(nil), // 54: vtctldata.EmergencyReparentShardResponse + (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 55: vtctldata.FindAllShardsInKeyspaceResponse + (*vtctldata.GetBackupsResponse)(nil), // 56: vtctldata.GetBackupsResponse + (*vtctldata.GetCellInfoResponse)(nil), // 57: vtctldata.GetCellInfoResponse + (*vtctldata.GetCellInfoNamesResponse)(nil), // 58: vtctldata.GetCellInfoNamesResponse + (*vtctldata.GetCellsAliasesResponse)(nil), // 59: vtctldata.GetCellsAliasesResponse + (*vtctldata.GetKeyspaceResponse)(nil), // 60: vtctldata.GetKeyspaceResponse + (*vtctldata.GetKeyspacesResponse)(nil), // 61: vtctldata.GetKeyspacesResponse + (*vtctldata.GetRoutingRulesResponse)(nil), // 62: vtctldata.GetRoutingRulesResponse + (*vtctldata.GetSchemaResponse)(nil), // 63: vtctldata.GetSchemaResponse + (*vtctldata.GetShardResponse)(nil), // 64: vtctldata.GetShardResponse + (*vtctldata.GetSrvKeyspacesResponse)(nil), // 65: vtctldata.GetSrvKeyspacesResponse + (*vtctldata.GetSrvVSchemaResponse)(nil), // 66: vtctldata.GetSrvVSchemaResponse + (*vtctldata.GetSrvVSchemasResponse)(nil), // 67: vtctldata.GetSrvVSchemasResponse + (*vtctldata.GetTabletResponse)(nil), // 68: vtctldata.GetTabletResponse + (*vtctldata.GetTabletsResponse)(nil), // 69: vtctldata.GetTabletsResponse + (*vtctldata.GetVSchemaResponse)(nil), // 70: vtctldata.GetVSchemaResponse + (*vtctldata.GetWorkflowsResponse)(nil), // 71: vtctldata.GetWorkflowsResponse + (*vtctldata.InitShardPrimaryResponse)(nil), // 72: vtctldata.InitShardPrimaryResponse + (*vtctldata.PlannedReparentShardResponse)(nil), // 73: vtctldata.PlannedReparentShardResponse + (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 74: vtctldata.RebuildVSchemaGraphResponse + (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 75: vtctldata.RemoveKeyspaceCellResponse + (*vtctldata.RemoveShardCellResponse)(nil), // 76: vtctldata.RemoveShardCellResponse + (*vtctldata.ReparentTabletResponse)(nil), // 77: vtctldata.ReparentTabletResponse + (*vtctldata.ShardReplicationPositionsResponse)(nil), // 78: vtctldata.ShardReplicationPositionsResponse + (*vtctldata.TabletExternallyReparentedResponse)(nil), // 79: vtctldata.TabletExternallyReparentedResponse + (*vtctldata.UpdateCellInfoResponse)(nil), // 80: vtctldata.UpdateCellInfoResponse + (*vtctldata.UpdateCellsAliasResponse)(nil), // 81: vtctldata.UpdateCellsAliasResponse } var file_vtctlservice_proto_depIdxs = []int32{ 0, // 0: vtctlservice.Vtctl.ExecuteVtctlCommand:input_type -> vtctldata.ExecuteVtctlCommandRequest 1, // 1: vtctlservice.Vtctld.AddCellInfo:input_type -> vtctldata.AddCellInfoRequest 2, // 2: vtctlservice.Vtctld.AddCellsAlias:input_type -> vtctldata.AddCellsAliasRequest 3, // 3: vtctlservice.Vtctld.ApplyRoutingRules:input_type -> vtctldata.ApplyRoutingRulesRequest - 4, // 4: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest - 5, // 5: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest - 6, // 6: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest - 7, // 7: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest - 8, // 8: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest - 9, // 9: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest - 10, // 10: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest - 11, // 11: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest - 12, // 12: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest - 13, // 13: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest - 14, // 14: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest - 15, // 15: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest - 16, // 16: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest - 17, // 17: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest - 18, // 18: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest - 19, // 19: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest - 20, // 20: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest - 21, // 21: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest - 22, // 22: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest - 23, // 23: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest - 24, // 24: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest - 25, // 25: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest - 26, // 26: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest - 27, // 27: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest - 28, // 28: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest - 29, // 29: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest - 30, // 30: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest - 31, // 31: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest - 32, // 32: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest - 33, // 33: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest - 34, // 34: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest - 35, // 35: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest - 36, // 36: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest - 37, // 37: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest - 38, // 38: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest - 39, // 39: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest - 40, // 40: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse - 41, // 41: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse - 42, // 42: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse - 43, // 43: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse - 44, // 44: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse - 45, // 45: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse - 46, // 46: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse - 47, // 47: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse - 48, // 48: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse - 49, // 49: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse - 50, // 50: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse - 51, // 51: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse - 52, // 52: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse - 53, // 53: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse - 54, // 54: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse - 55, // 55: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse - 56, // 56: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse - 57, // 57: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse - 58, // 58: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse - 59, // 59: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse - 60, // 60: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse - 61, // 61: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse - 62, // 62: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse - 63, // 63: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse - 64, // 64: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse - 65, // 65: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse - 66, // 66: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse - 67, // 67: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse - 68, // 68: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse - 69, // 69: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse - 70, // 70: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse - 71, // 71: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse - 72, // 72: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse - 73, // 73: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse - 74, // 74: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse - 75, // 75: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse - 76, // 76: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse - 77, // 77: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse - 78, // 78: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse - 79, // 79: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse - 40, // [40:80] is the sub-list for method output_type - 0, // [0:40] is the sub-list for method input_type + 4, // 4: vtctlservice.Vtctld.ApplyVSchema:input_type -> vtctldata.ApplyVSchemaRequest + 5, // 5: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest + 6, // 6: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest + 7, // 7: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest + 8, // 8: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest + 9, // 9: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest + 10, // 10: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest + 11, // 11: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest + 12, // 12: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest + 13, // 13: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest + 14, // 14: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest + 15, // 15: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest + 16, // 16: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest + 17, // 17: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest + 18, // 18: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest + 19, // 19: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest + 20, // 20: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest + 21, // 21: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest + 22, // 22: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest + 23, // 23: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest + 24, // 24: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest + 25, // 25: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest + 26, // 26: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest + 27, // 27: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest + 28, // 28: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest + 29, // 29: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest + 30, // 30: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest + 31, // 31: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest + 32, // 32: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest + 33, // 33: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest + 34, // 34: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest + 35, // 35: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest + 36, // 36: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest + 37, // 37: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest + 38, // 38: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest + 39, // 39: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest + 40, // 40: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest + 41, // 41: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse + 42, // 42: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse + 43, // 43: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse + 44, // 44: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse + 45, // 45: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse + 46, // 46: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse + 47, // 47: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse + 48, // 48: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse + 49, // 49: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse + 50, // 50: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse + 51, // 51: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse + 52, // 52: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse + 53, // 53: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse + 54, // 54: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse + 55, // 55: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse + 56, // 56: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse + 57, // 57: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse + 58, // 58: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse + 59, // 59: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse + 60, // 60: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse + 61, // 61: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse + 62, // 62: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse + 63, // 63: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse + 64, // 64: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse + 65, // 65: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse + 66, // 66: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse + 67, // 67: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse + 68, // 68: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse + 69, // 69: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse + 70, // 70: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse + 71, // 71: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse + 72, // 72: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse + 73, // 73: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse + 74, // 74: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse + 75, // 75: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse + 76, // 76: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse + 77, // 77: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse + 78, // 78: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse + 79, // 79: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse + 80, // 80: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse + 81, // 81: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse + 41, // [41:82] is the sub-list for method output_type + 0, // [0:41] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go index 08d74564bc7..1b548d562d9 100644 --- a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go @@ -144,6 +144,8 @@ type VtctldClient interface { AddCellsAlias(ctx context.Context, in *vtctldata.AddCellsAliasRequest, opts ...grpc.CallOption) (*vtctldata.AddCellsAliasResponse, error) // ApplyRoutingRules applies the VSchema routing rules. ApplyRoutingRules(ctx context.Context, in *vtctldata.ApplyRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.ApplyRoutingRulesResponse, error) + // ApplyVSchema applies a vschema to a keyspace. + ApplyVSchema(ctx context.Context, in *vtctldata.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ApplyVSchemaResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. @@ -304,6 +306,15 @@ func (c *vtctldClient) ApplyRoutingRules(ctx context.Context, in *vtctldata.Appl return out, nil } +func (c *vtctldClient) ApplyVSchema(ctx context.Context, in *vtctldata.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ApplyVSchemaResponse, error) { + out := new(vtctldata.ApplyVSchemaResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ApplyVSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) { out := new(vtctldata.ChangeTabletTypeResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ChangeTabletType", in, out, opts...) @@ -644,6 +655,8 @@ type VtctldServer interface { AddCellsAlias(context.Context, *vtctldata.AddCellsAliasRequest) (*vtctldata.AddCellsAliasResponse, error) // ApplyRoutingRules applies the VSchema routing rules. ApplyRoutingRules(context.Context, *vtctldata.ApplyRoutingRulesRequest) (*vtctldata.ApplyRoutingRulesResponse, error) + // ApplyVSchema applies a vschema to a keyspace. + ApplyVSchema(context.Context, *vtctldata.ApplyVSchemaRequest) (*vtctldata.ApplyVSchemaResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. @@ -783,6 +796,9 @@ func (UnimplementedVtctldServer) AddCellsAlias(context.Context, *vtctldata.AddCe func (UnimplementedVtctldServer) ApplyRoutingRules(context.Context, *vtctldata.ApplyRoutingRulesRequest) (*vtctldata.ApplyRoutingRulesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplyRoutingRules not implemented") } +func (UnimplementedVtctldServer) ApplyVSchema(context.Context, *vtctldata.ApplyVSchemaRequest) (*vtctldata.ApplyVSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyVSchema not implemented") +} func (UnimplementedVtctldServer) ChangeTabletType(context.Context, *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ChangeTabletType not implemented") } @@ -958,6 +974,24 @@ func _Vtctld_ApplyRoutingRules_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Vtctld_ApplyVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ApplyVSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ApplyVSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ApplyVSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ApplyVSchema(ctx, req.(*vtctldata.ApplyVSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_ChangeTabletType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.ChangeTabletTypeRequest) if err := dec(in); err != nil { @@ -1625,6 +1659,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplyRoutingRules", Handler: _Vtctld_ApplyRoutingRules_Handler, }, + { + MethodName: "ApplyVSchema", + Handler: _Vtctld_ApplyVSchema_Handler, + }, { MethodName: "ChangeTabletType", Handler: _Vtctld_ChangeTabletType_Handler, diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index fbe9d5d1b0f..50e75bd4cfc 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -22,6 +22,7 @@ import ( stderrors "errors" "fmt" "net/http" + "net/http/pprof" stdsort "sort" "strings" "sync" @@ -40,6 +41,7 @@ import ( "vitess.io/vitess/go/vt/vtadmin/errors" "vitess.io/vitess/go/vt/vtadmin/grpcserver" vtadminhttp "vitess.io/vitess/go/vt/vtadmin/http" + "vitess.io/vitess/go/vt/vtadmin/http/debug" "vitess.io/vitess/go/vt/vtadmin/http/experimental" vthandlers "vitess.io/vitess/go/vt/vtadmin/http/handlers" "vitess.io/vitess/go/vt/vtadmin/sort" @@ -126,6 +128,19 @@ func NewAPI(clusters []*cluster.Cluster, opts grpcserver.Options, httpOpts vtadm experimentalRouter := router.PathPrefix("/experimental").Subrouter() experimentalRouter.HandleFunc("/tablet/{tablet}/debug/vars", httpAPI.Adapt(experimental.TabletDebugVarsPassthrough)).Name("API.TabletDebugVarsPassthrough") + if !httpOpts.DisableDebug { + // Due to the way net/http/pprof insists on registering its handlers, we + // have to put these on the root router, and not on the /debug prefixed + // subrouter, which would make way more sense, but alas. Additional + // debug routes should still go on the /debug subrouter, though. + serv.Router().HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + serv.Router().HandleFunc("/debug/pprof/profile", pprof.Profile) + serv.Router().HandleFunc("/debug/pprof/symbol", pprof.Symbol) + serv.Router().PathPrefix("/debug/pprof").HandlerFunc(pprof.Index) + debugRouter := serv.Router().PathPrefix("/debug").Subrouter() + debugRouter.HandleFunc("/env", debug.Env) + } + // Middlewares are executed in order of addition. Our ordering (all // middlewares being optional) is: // 1. CORS. CORS is a special case and is applied globally, the rest are applied only to the subrouter. diff --git a/go/vt/vtadmin/http/api.go b/go/vt/vtadmin/http/api.go index f2474efe06f..95253571b1f 100644 --- a/go/vt/vtadmin/http/api.go +++ b/go/vt/vtadmin/http/api.go @@ -36,7 +36,10 @@ type Options struct { // DisableCompression specifies whether to turn off gzip compression for API // endpoints. It is named as the negative (as opposed to EnableTracing) so // the zero value has compression enabled. - DisableCompression bool + DisableCompression bool + // DisableDebug specifies whether to omit the /debug/pprof/* and /debug/env + // routes. + DisableDebug bool ExperimentalOptions struct { TabletURLTmpl string } diff --git a/go/vt/vtadmin/http/debug/debug.go b/go/vt/vtadmin/http/debug/debug.go new file mode 100644 index 00000000000..189a3cb76f3 --- /dev/null +++ b/go/vt/vtadmin/http/debug/debug.go @@ -0,0 +1,102 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "fmt" + "net/http" + "os" + "sort" + "strings" + + "vitess.io/vitess/go/flagutil" +) + +var ( + // SanitizeEnv is the set of environment variables to sanitize their values + // in the Env http handler. + SanitizeEnv flagutil.StringSetFlag + // OmitEnv is the set of environment variables to omit entirely in the Env + // http handler. + OmitEnv flagutil.StringSetFlag +) + +const sanitized = "********" + +// Env responds with a plaintext listing of key=value pairs of the environment +// variables, sorted by key name. +// +// If a variable appears in OmitEnv, it is excluded entirely. If a variable +// appears in SanitizeEnv, its value is replaced with a sanitized string, +// including if there was no value set in the environment. +func Env(w http.ResponseWriter, r *http.Request) { + vars := readEnv() + + msg := &strings.Builder{} + for i, kv := range vars { + msg.WriteString(fmt.Sprintf("%s=%s", kv[0], kv[1])) + if i < len(vars)-1 { + msg.WriteByte('\n') + } + } + + w.Write([]byte(msg.String())) +} + +func readEnv() [][2]string { + env := os.Environ() + vars := make([][2]string, 0, len(env)) + + var key, value string + for _, ev := range env { + parts := strings.SplitN(ev, "=", 2) + switch len(parts) { + case 0: + key = ev + case 1: + key = parts[0] + default: + key = parts[0] + value = parts[1] + } + + if key == "" { + continue + } + + if OmitEnv.ToSet().Has(key) { + continue + } + + if SanitizeEnv.ToSet().Has(key) { + value = sanitized + } + + vars = append(vars, [2]string{ + key, + value, + }) + } + + // Sort by env var name, ascending. + sort.SliceStable(vars, func(i, j int) bool { + left, right := vars[i], vars[j] + return left[0] < right[0] + }) + + return vars +} diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go index 7625f167b45..4057b14994a 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_gen.go +++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go @@ -55,6 +55,15 @@ func (client *gRPCVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtctl return client.c.ApplyRoutingRules(ctx, in, opts...) } +// ApplyVSchema is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ApplyVSchema(ctx context.Context, in *vtctldatapb.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyVSchemaResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ApplyVSchema(ctx, in, opts...) +} + // ChangeTabletType is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) ChangeTabletType(ctx context.Context, in *vtctldatapb.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldatapb.ChangeTabletTypeResponse, error) { if client.c == nil { diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index 1b685104a7e..5ec1803845f 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -66,7 +66,7 @@ func TestInitShardPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet2.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet2.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", @@ -75,7 +75,7 @@ func TestInitShardPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet3.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet3.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) for _, tablet := range []*testlib.FakeTablet{tablet1, tablet2, tablet3} { tablet.StartActionLoop(t, wr) @@ -122,7 +122,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet2.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet2.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", @@ -130,7 +130,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet3.FakeMysqlDaemon.SetMasterInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet3.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) for _, tablet := range []*testlib.FakeTablet{tablet1, tablet2, tablet3} { tablet.StartActionLoop(t, wr) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index ec7f05c6576..55beb6a9b17 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -38,6 +38,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/mysqlctl/mysqlctlproto" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -129,6 +130,65 @@ func (s *VtctldServer) ApplyRoutingRules(ctx context.Context, req *vtctldatapb.A return resp, nil } +// ApplyVSchema is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyVSchemaRequest) (*vtctldatapb.ApplyVSchemaResponse, error) { + if _, err := s.ts.GetKeyspace(ctx, req.Keyspace); err != nil { + if topo.IsErrType(err, topo.NoNode) { + return nil, vterrors.Wrapf(err, "keyspace(%s) doesn't exist, check if the keyspace is initialized", req.Keyspace) + } + return nil, vterrors.Wrapf(err, "GetKeyspace(%s)", req.Keyspace) + } + + if (req.Sql != "" && req.VSchema != nil) || (req.Sql == "" && req.VSchema == nil) { + return nil, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "must pass exactly one of req.VSchema and req.Sql") + } + + var vs *vschemapb.Keyspace + var err error + + if req.Sql != "" { + stmt, err := sqlparser.Parse(req.Sql) + if err != nil { + return nil, vterrors.Wrapf(err, "Parse(%s)", req.Sql) + } + ddl, ok := stmt.(*sqlparser.AlterVschema) + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error parsing VSchema DDL statement `%s`", req.Sql) + } + + vs, err = s.ts.GetVSchema(ctx, req.Keyspace) + if err != nil && !topo.IsErrType(err, topo.NoNode) { + return nil, vterrors.Wrapf(err, "GetVSchema(%s)", req.Keyspace) + } // otherwise, we keep the empty vschema object from above + + vs, err = topotools.ApplyVSchemaDDL(req.Keyspace, vs, ddl) + if err != nil { + return nil, vterrors.Wrapf(err, "ApplyVSchemaDDL(%s,%v,%v)", req.Keyspace, vs, ddl) + } + } else { // "jsonMode" + vs = req.VSchema + } + + if req.DryRun { // we return what was passed in and parsed, rather than current + return &vtctldatapb.ApplyVSchemaResponse{VSchema: vs}, nil + } + + if err = s.ts.SaveVSchema(ctx, req.Keyspace, vs); err != nil { + return nil, vterrors.Wrapf(err, "SaveVSchema(%s, %v)", req.Keyspace, req.VSchema) + } + + if !req.SkipRebuild { + if err := s.ts.RebuildSrvVSchema(ctx, req.Cells); err != nil { + return nil, vterrors.Wrapf(err, "RebuildSrvVSchema") + } + } + updatedVS, err := s.ts.GetVSchema(ctx, req.Keyspace) + if err != nil { + return nil, vterrors.Wrapf(err, "GetVSchema(%s)", req.Keyspace) + } + return &vtctldatapb.ApplyVSchemaResponse{VSchema: updatedVS}, nil +} + // ChangeTabletType is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.ChangeTabletTypeRequest) (*vtctldatapb.ChangeTabletTypeResponse, error) { ctx, cancel := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 7eeaf3008dd..21a2fa21805 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -44,7 +44,6 @@ import ( tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/proto/vtctldata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" "vitess.io/vitess/go/vt/proto/vttime" @@ -141,7 +140,7 @@ func TestAddCellsAlias(t *testing.T) { name string ts *topo.Server setup func(ts *topo.Server) error - req *vtctldata.AddCellsAliasRequest + req *vtctldatapb.AddCellsAliasRequest shouldErr bool }{ { @@ -321,6 +320,162 @@ func TestApplyRoutingRules(t *testing.T) { } } +func TestApplyVSchema(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + req *vtctldatapb.ApplyVSchemaRequest + exp *vtctldatapb.ApplyVSchemaResponse + shouldErr bool + }{ + { + name: "normal", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspace", + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + }, + shouldErr: false, + }, { + name: "skip rebuild", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspace", + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + SkipRebuild: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + }, + shouldErr: false, + }, { + name: "both", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspace", + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + Sql: "some vschema ddl here", + }, + shouldErr: true, + }, { + name: "neither", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspace", + }, + shouldErr: true, + }, { + name: "dry run", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspace", + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + DryRun: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + }, + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts := memorytopo.NewServer("zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ + Name: tt.req.Keyspace, + Keyspace: &topodatapb.Keyspace{ + KeyspaceType: topodatapb.KeyspaceType_NORMAL, + }, + }) + + origVSchema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "v1": { + Type: "hash", + }, + }, + } + err := ts.SaveVSchema(ctx, tt.req.Keyspace, origVSchema) + require.NoError(t, err) + + origSrvVSchema := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "v1": { + Type: "hash", + }, + }, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + } + err = ts.UpdateSrvVSchema(ctx, "zone1", origSrvVSchema) + require.NoError(t, err) + + res, err := vtctld.ApplyVSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + utils.MustMatch(t, tt.exp, res) + + if tt.req.DryRun { + actual, err := ts.GetVSchema(ctx, tt.req.Keyspace) + require.NoError(t, err) + utils.MustMatch(t, origVSchema, actual) + } + + finalSrvVSchema, err := ts.GetSrvVSchema(ctx, "zone1") + require.NoError(t, err) + + if tt.req.SkipRebuild || tt.req.DryRun { + utils.MustMatch(t, origSrvVSchema, finalSrvVSchema) + } else { + changedSrvVSchema := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "testkeyspace": { + Sharded: false, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{}, + }, + } + utils.MustMatch(t, changedSrvVSchema, finalSrvVSchema) + } + }) + } +} + func TestChangeTabletType(t *testing.T) { t.Parallel() @@ -1132,7 +1287,7 @@ func TestDeleteCellsAlias(t *testing.T) { name string ts *topo.Server setup func(ts *topo.Server) error - req *vtctldata.DeleteCellsAliasRequest + req *vtctldatapb.DeleteCellsAliasRequest shouldErr bool }{ { diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go index dd753f44fb7..a7f2bac8bf0 100644 --- a/go/vt/vtgate/endtoend/vstream_test.go +++ b/go/vt/vtgate/endtoend/vstream_test.go @@ -65,7 +65,7 @@ func TestVStream(t *testing.T) { gconn, conn, mconn, closeConnections := initialize(ctx, t) defer closeConnections() - mpos, err := mconn.MasterPosition() + mpos, err := mconn.PrimaryPosition() if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index b97a9edf88d..b76cc1e2365 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -260,7 +260,7 @@ func (s *server) ReplicationStatus(ctx context.Context, request *tabletmanagerda } func (s *server) MasterStatus(ctx context.Context, request *tabletmanagerdatapb.MasterStatusRequest) (response *tabletmanagerdatapb.MasterStatusResponse, err error) { - defer s.tm.HandleRPCPanic(ctx, "MasterStatus", request, response, false /*verbose*/, &err) + defer s.tm.HandleRPCPanic(ctx, "PrimaryStatus", request, response, false /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.MasterStatusResponse{} status, err := s.tm.MasterStatus(ctx) @@ -271,7 +271,7 @@ func (s *server) MasterStatus(ctx context.Context, request *tabletmanagerdatapb. } func (s *server) MasterPosition(ctx context.Context, request *tabletmanagerdatapb.MasterPositionRequest) (response *tabletmanagerdatapb.MasterPositionResponse, err error) { - defer s.tm.HandleRPCPanic(ctx, "MasterPosition", request, response, false /*verbose*/, &err) + defer s.tm.HandleRPCPanic(ctx, "PrimaryPosition", request, response, false /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.MasterPositionResponse{} position, err := s.tm.MasterPosition(ctx) diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index e6df4faa881..1c0eb121d1e 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -340,7 +340,7 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Pos return "", "", err } defer binlogConn.Close() - lastPos, err := binlogConn.MasterPosition() + lastPos, err := binlogConn.PrimaryPosition() if err != nil { return "", "", err } @@ -441,14 +441,14 @@ func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, return vterrors.Wrap(err, fmt.Sprintf("failed to restart the replication until %s GTID", afterGTIDStr)) } log.Infof("Waiting for position to reach", beforeGTIDPosParsed.GTIDSet.Last()) - // Could not use `agent.MysqlDaemon.WaitMasterPos` as replication is stopped with `START SLAVE UNTIL SQL_BEFORE_GTIDS` + // Could not use `agent.MysqlDaemon.WaitSourcePos` as replication is stopped with `START SLAVE UNTIL SQL_BEFORE_GTIDS` // this is as per https://dev.mysql.com/doc/refman/5.6/en/start-slave.html // We need to wait until replication catches upto the specified afterGTIDPos chGTIDCaughtup := make(chan bool) go func() { timeToWait := time.Now().Add(*timeoutForGTIDLookup) for time.Now().Before(timeToWait) { - pos, err := tm.MysqlDaemon.MasterPosition() + pos, err := tm.MysqlDaemon.PrimaryPosition() if err != nil { chGTIDCaughtup <- false } @@ -530,8 +530,8 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio } // Set master and start replication. - if err := tm.MysqlDaemon.SetMaster(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), false /* stopReplicationBefore */, !*mysqlctl.DisableActiveReparents /* startReplicationAfter */); err != nil { - return vterrors.Wrap(err, "MysqlDaemon.SetMaster failed") + if err := tm.MysqlDaemon.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), false /* stopReplicationBefore */, !*mysqlctl.DisableActiveReparents /* startReplicationAfter */); err != nil { + return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") } // If active reparents are disabled, we don't restart replication. So it makes no sense to wait for an update on the replica. @@ -541,7 +541,7 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio } // wait for reliable seconds behind master // we have pos where we want to resume from - // if MasterPosition is the same, that means no writes + // if PrimaryPosition is the same, that means no writes // have happened to master, so we are up-to-date // otherwise, wait for replica's Position to change from // the initial pos before proceeding diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go index 473cac18aff..3b9b108ab7f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_agent.go +++ b/go/vt/vttablet/tabletmanager/rpc_agent.go @@ -79,8 +79,11 @@ type RPCTM interface { ExecuteFetchAsApp(ctx context.Context, query []byte, maxrows int) (*querypb.QueryResult, error) // Replication related methods + // Deprecated, use PrimaryStatus instead MasterStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) + PrimaryStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) + ReplicationStatus(ctx context.Context) (*replicationdatapb.Status, error) StopReplication(ctx context.Context) error @@ -92,9 +95,11 @@ type RPCTM interface { StartReplicationUntilAfter(ctx context.Context, position string, waitTime time.Duration) error GetReplicas(ctx context.Context) ([]string, error) - + // Deprecated, use PrimaryPosition instead MasterPosition(ctx context.Context) (string, error) + PrimaryPosition(ctx context.Context) (string, error) + WaitForPosition(ctx context.Context, pos string) error // VExec generic API @@ -108,20 +113,32 @@ type RPCTM interface { ResetReplication(ctx context.Context) error + // Deprecated, use InitPrimary instead InitMaster(ctx context.Context) (string, error) + InitPrimary(ctx context.Context) (string, error) + PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, pos string) error InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, replicationPosition string, timeCreatedNS int64) error + // Deprecated, use DemotePrimary instead DemoteMaster(ctx context.Context) (*replicationdatapb.MasterStatus, error) + // Deprecated, use UndoDemotePrimary instead UndoDemoteMaster(ctx context.Context) error + DemotePrimary(ctx context.Context) (*replicationdatapb.MasterStatus, error) + + UndoDemotePrimary(ctx context.Context) error + ReplicaWasPromoted(ctx context.Context) error + // Deprecated, use SetReplicationSource instead SetMaster(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error + SetReplicationSource(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error + StopReplicationAndGetStatus(ctx context.Context, stopReplicationMode replicationdatapb.StopReplicationMode) (StopReplicationAndGetStatusResponse, error) ReplicaWasRestarted(ctx context.Context, parent *topodatapb.TabletAlias) error diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index 4b88f4c854a..ccaef04a22f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -53,29 +53,39 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat // MasterStatus returns the replication status fopr a master tablet. func (tm *TabletManager) MasterStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) { - status, err := tm.MysqlDaemon.MasterStatus(ctx) + return tm.PrimaryStatus(ctx) +} + +// PrimaryStatus returns the replication status fopr a master tablet. +func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) { + status, err := tm.MysqlDaemon.PrimaryStatus(ctx) if err != nil { return nil, err } - return mysql.MasterStatusToProto(status), nil + return mysql.PrimaryStatusToProto(status), nil } // MasterPosition returns the master position func (tm *TabletManager) MasterPosition(ctx context.Context) (string, error) { - pos, err := tm.MysqlDaemon.MasterPosition() + return tm.PrimaryPosition(ctx) +} + +// PrimaryPosition returns the position of a primary database +func (tm *TabletManager) PrimaryPosition(ctx context.Context) (string, error) { + pos, err := tm.MysqlDaemon.PrimaryPosition() if err != nil { return "", err } return mysql.EncodePosition(pos), nil } -// WaitForPosition returns the master position +// WaitForPosition waits until replication reaches the desired position func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error { mpos, err := mysql.DecodePosition(pos) if err != nil { return err } - return tm.MysqlDaemon.WaitMasterPos(ctx, mpos) + return tm.MysqlDaemon.WaitSourcePos(ctx, mpos) } // StopReplication will stop the mysql. Works both when Vitess manages @@ -144,13 +154,13 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st } waitCtx, cancel := context.WithTimeout(ctx, waitTime) defer cancel() - if err := tm.MysqlDaemon.WaitMasterPos(waitCtx, pos); err != nil { + if err := tm.MysqlDaemon.WaitSourcePos(waitCtx, pos); err != nil { return "", err } if err := tm.stopReplicationLocked(ctx); err != nil { return "", err } - pos, err = tm.MysqlDaemon.MasterPosition() + pos, err = tm.MysqlDaemon.PrimaryPosition() if err != nil { return "", err } @@ -222,12 +232,17 @@ func (tm *TabletManager) ResetReplication(ctx context.Context) error { // InitMaster enables writes and returns the replication position. func (tm *TabletManager) InitMaster(ctx context.Context) (string, error) { + return tm.InitPrimary(ctx) +} + +// InitPrimary enables writes and returns the replication position. +func (tm *TabletManager) InitPrimary(ctx context.Context) (string, error) { if err := tm.lock(ctx); err != nil { return "", err } defer tm.unlock() - // Initializing as master implies undoing any previous "do not replicate". + // Initializing as primary implies undoing any previous "do not replicate". tm.replManager.setReplicationStopped(false) // we need to insert something in the binlogs, so we can get the @@ -238,7 +253,7 @@ func (tm *TabletManager) InitMaster(ctx context.Context) (string, error) { } // get the current replication position - pos, err := tm.MysqlDaemon.MasterPosition() + pos, err := tm.MysqlDaemon.PrimaryPosition() if err != nil { return "", err } @@ -250,8 +265,8 @@ func (tm *TabletManager) InitMaster(ctx context.Context) (string, error) { return "", err } - // Enforce semi-sync after changing the type to master. Otherwise, the - // master will hang while trying to create the database. + // Enforce semi-sync after changing the tablet)type to MASTER. Otherwise, the + // primary will hang while trying to create the database. if err := tm.fixSemiSync(topodatapb.TabletType_MASTER); err != nil { return "", err } @@ -313,7 +328,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab if err := tm.MysqlDaemon.SetReplicationPosition(ctx, pos); err != nil { return err } - if err := tm.MysqlDaemon.SetMaster(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), false /* stopReplicationBefore */, true /* stopReplicationAfter */); err != nil { + if err := tm.MysqlDaemon.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), false /* stopReplicationBefore */, true /* stopReplicationAfter */); err != nil { return err } @@ -321,7 +336,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab return tm.MysqlDaemon.WaitForReparentJournal(ctx, timeCreatedNS) } -// DemoteMaster prepares a MASTER tablet to give up mastership to another tablet. +// DemotePrimary prepares a MASTER tablet to give up leadership to another tablet. // // It attemps to idempotently ensure the following guarantees upon returning // successfully: @@ -336,16 +351,21 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // or on a tablet that already transitioned to REPLICA. // // If a step fails in the middle, it will try to undo any changes it made. -func (tm *TabletManager) DemoteMaster(ctx context.Context) (*replicationdatapb.MasterStatus, error) { +func (tm *TabletManager) DemotePrimary(ctx context.Context) (*replicationdatapb.MasterStatus, error) { // The public version always reverts on partial failure. - return tm.demoteMaster(ctx, true /* revertPartialFailure */) + return tm.demotePrimary(ctx, true /* revertPartialFailure */) +} + +// DemoteMaster is the old version of DemotePrimary +func (tm *TabletManager) DemoteMaster(ctx context.Context) (*replicationdatapb.MasterStatus, error) { + return tm.DemotePrimary(ctx) } -// demoteMaster implements DemoteMaster with an additional, private option. +// demotePrimary implements DemotePrimary with an additional, private option. // // If revertPartialFailure is true, and a step fails in the middle, it will try // to undo any changes it made. -func (tm *TabletManager) demoteMaster(ctx context.Context, revertPartialFailure bool) (masterStatus *replicationdatapb.MasterStatus, finalErr error) { +func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure bool) (masterStatus *replicationdatapb.MasterStatus, finalErr error) { if err := tm.lock(ctx); err != nil { return nil, err } @@ -371,7 +391,7 @@ func (tm *TabletManager) demoteMaster(ctx context.Context, revertPartialFailure if tm.orc == nil { return } - if err := tm.orc.BeginMaintenance(tm.Tablet(), "vttablet has been told to DemoteMaster"); err != nil { + if err := tm.orc.BeginMaintenance(tm.Tablet(), "vttablet has been told to DemotePrimary"); err != nil { log.Warningf("Orchestrator BeginMaintenance failed: %v", err) } }() @@ -381,7 +401,7 @@ func (tm *TabletManager) demoteMaster(ctx context.Context, revertPartialFailure // have to be killed at the end of their timeout, this will be // considered successful. If we are already not serving, this will be // idempotent. - log.Infof("DemoteMaster disabling query service") + log.Infof("DemotePrimary disabling query service") if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.MasterTermStartTime), false, "demotion in progress"); err != nil { return nil, vterrors.Wrap(err, "SetServingType(serving=false) failed") } @@ -431,24 +451,29 @@ func (tm *TabletManager) demoteMaster(ctx context.Context, revertPartialFailure }() // Return the current replication position. - status, err := tm.MysqlDaemon.MasterStatus(ctx) + status, err := tm.MysqlDaemon.PrimaryStatus(ctx) if err != nil { return nil, err } - masterStatusProto := mysql.MasterStatusToProto(status) + masterStatusProto := mysql.PrimaryStatusToProto(status) return masterStatusProto, nil } -// UndoDemoteMaster reverts a previous call to DemoteMaster +// UndoDemoteMaster is the old version of UndoDemotePrimary +func (tm *TabletManager) UndoDemoteMaster(ctx context.Context) error { + return tm.UndoDemotePrimary(ctx) +} + +// UndoDemotePrimary reverts a previous call to DemotePrimary // it sets read-only to false, fixes semi-sync // and returns its master position. -func (tm *TabletManager) UndoDemoteMaster(ctx context.Context) error { +func (tm *TabletManager) UndoDemotePrimary(ctx context.Context) error { if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - // If using semi-sync, we need to enable master-side. + // If using semi-sync, we need to enable source-side. if err := tm.fixSemiSync(topodatapb.TabletType_MASTER); err != nil { return err } @@ -460,7 +485,7 @@ func (tm *TabletManager) UndoDemoteMaster(ctx context.Context) error { // Update serving graph tablet := tm.Tablet() - log.Infof("UndoDemoteMaster re-enabling query service") + log.Infof("UndoDemotePrimary re-enabling query service") if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.MasterTermStartTime), true, ""); err != nil { return vterrors.Wrap(err, "SetServingType(serving=true) failed") } @@ -482,18 +507,23 @@ func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { return tm.ChangeType(ctx, topodatapb.TabletType_MASTER) } -// SetMaster sets replication master, and waits for the +// SetReplicationSource sets replication master, and waits for the // reparent_journal table entry up to context timeout -func (tm *TabletManager) SetMaster(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { +func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - return tm.setMasterLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication) + return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication) } -func (tm *TabletManager) setMasterRepairReplication(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) { +// SetMaster is the old version of SetReplicationSource +func (tm *TabletManager) SetMaster(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { + return tm.SetReplicationSource(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication) +} + +func (tm *TabletManager) setReplicationSourceRepairReplication(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) { parent, err := tm.TopoServer.GetTablet(ctx, parentAlias) if err != nil { return err @@ -506,10 +536,10 @@ func (tm *TabletManager) setMasterRepairReplication(ctx context.Context, parentA defer unlock(&err) - return tm.setMasterLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication) + return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication) } -func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) { +func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) { // End orchestrator maintenance at the end of fixing replication. // This is a best effort operation, so it should happen in a goroutine defer func() { @@ -524,7 +554,7 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod }() // Change our type to REPLICA if we used to be MASTER. - // Being sent SetMaster means another MASTER has been successfully promoted, + // Being sent SetReplicationSource means another MASTER has been successfully promoted, // so we convert to REPLICA first, since we want to do it even if other // steps fail below. // Note it is important to check for MASTER here so that we don't @@ -561,7 +591,7 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod shouldbeReplicating = true } - // If using semi-sync, we need to enable it before connecting to master. + // If using semi-sync, we need to enable it before connecting to primary. // If we are currently MASTER, assume we are about to become REPLICA. tabletType := tm.Tablet().Type if tabletType == topodatapb.TabletType_MASTER { @@ -570,7 +600,7 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod if err := tm.fixSemiSync(tabletType); err != nil { return err } - // Update the master address only if needed. + // Update the primary/source address only if needed. // We don't want to interrupt replication for no reason. if parentAlias == nil { // if there is no master in the shard, return an error so that we can retry @@ -584,7 +614,7 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod masterPort := int(parent.Tablet.MysqlPort) if status.MasterHost != masterHost || status.MasterPort != masterPort { // This handles both changing the address and starting replication. - if err := tm.MysqlDaemon.SetMaster(ctx, masterHost, masterPort, wasReplicating, shouldbeReplicating); err != nil { + if err := tm.MysqlDaemon.SetReplicationSource(ctx, masterHost, masterPort, wasReplicating, shouldbeReplicating); err != nil { if err := tm.handleRelayLogError(err); err != nil { return err } @@ -610,7 +640,7 @@ func (tm *TabletManager) setMasterLocked(ctx context.Context, parentAlias *topod if err != nil { return err } - if err := tm.MysqlDaemon.WaitMasterPos(ctx, pos); err != nil { + if err := tm.MysqlDaemon.WaitSourcePos(ctx, pos); err != nil { return err } } @@ -884,5 +914,5 @@ func (tm *TabletManager) repairReplication(ctx context.Context) error { } } - return tm.setMasterRepairReplication(ctx, si.MasterAlias, 0, "", true) + return tm.setReplicationSourceRepairReplication(ctx, si.MasterAlias, 0, "", true) } diff --git a/go/vt/vttablet/tabletmanager/rpc_schema.go b/go/vt/vttablet/tabletmanager/rpc_schema.go index fe29d67929e..5a5b1425d69 100644 --- a/go/vt/vttablet/tabletmanager/rpc_schema.go +++ b/go/vt/vttablet/tabletmanager/rpc_schema.go @@ -49,7 +49,7 @@ func (tm *TabletManager) ReloadSchema(ctx context.Context, waitPosition string) return vterrors.Wrapf(err, "ReloadSchema: can't parse wait position (%q)", waitPosition) } log.Infof("ReloadSchema: waiting for replication position: %v", waitPosition) - if err := tm.MysqlDaemon.WaitMasterPos(ctx, pos); err != nil { + if err := tm.MysqlDaemon.WaitSourcePos(ctx, pos); err != nil { return err } } diff --git a/go/vt/vttablet/tabletmanager/shard_sync.go b/go/vt/vttablet/tabletmanager/shard_sync.go index 4de0c17c911..155009898ae 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync.go +++ b/go/vt/vttablet/tabletmanager/shard_sync.go @@ -219,7 +219,7 @@ func (tm *TabletManager) abortMasterTerm(ctx context.Context, masterAlias *topod log.Infof("Active reparents are enabled; converting MySQL to replica.") demoteMasterCtx, cancelDemoteMaster := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) defer cancelDemoteMaster() - if _, err := tm.demoteMaster(demoteMasterCtx, false /* revertPartialFailure */); err != nil { + if _, err := tm.demotePrimary(demoteMasterCtx, false /* revertPartialFailure */); err != nil { return vterrors.Wrap(err, "failed to demote master") } setMasterCtx, cancelSetMaster := context.WithTimeout(ctx, *topo.RemoteOperationTimeout) diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 90782b5d2a1..21bae5ba6ec 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -153,7 +153,7 @@ func resetBinlogClient() { func masterPosition(t *testing.T) string { t.Helper() - pos, err := env.Mysqld.MasterPosition() + pos, err := env.Mysqld.PrimaryPosition() if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index c76dd5ca6fc..dc3362d957d 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -183,7 +183,7 @@ func (tr *Tracker) currentPosition(ctx context.Context) (mysql.Position, error) return mysql.Position{}, err } defer conn.Close() - return conn.MasterPosition() + return conn.PrimaryPosition() } func (tr *Tracker) isSchemaVersionTableEmpty(ctx context.Context) (bool, error) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go index b5cac8cad9e..ce3f1eb69c8 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go +++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go @@ -81,7 +81,7 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid log.Infof("Error locking table %s to read", tableIdent) return "", err } - mpos, err := lockConn.MasterPosition() + mpos, err := lockConn.PrimaryPosition() if err != nil { return "", err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go index 19369801a46..8ceae9f9416 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -100,7 +100,7 @@ func Init() (*Env, error) { config.DB = te.Dbcfgs te.TabletEnv = tabletenv.NewEnv(config, "VStreamerTest") te.Mysqld = mysqlctl.NewMysqld(te.Dbcfgs) - pos, _ := te.Mysqld.MasterPosition() + pos, _ := te.Mysqld.PrimaryPosition() te.Flavor = pos.GTIDSet.Flavor() te.SchemaEngine = schema.NewEngine(te.TabletEnv) diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go index 07da3aff5c2..50d84cf166d 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go @@ -338,7 +338,7 @@ func (uvs *uvstreamer) currentPosition() (mysql.Position, error) { return mysql.Position{}, err } defer conn.Close() - return conn.MasterPosition() + return conn.PrimaryPosition() } func (uvs *uvstreamer) init() error { diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go index e30f4b82719..8f749036875 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -2101,7 +2101,7 @@ func masterPosition(t *testing.T) string { t.Fatal(err) } defer conn.Close() - pos, err := conn.MasterPosition() + pos, err := conn.PrimaryPosition() if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go index f3256a98541..89fb39de3b6 100644 --- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go +++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go @@ -716,6 +716,14 @@ var testReplicationStatus = &replicationdatapb.Status{ var testMasterStatus = &replicationdatapb.MasterStatus{Position: "MariaDB/1-345-789"} +func (fra *fakeRPCTM) PrimaryStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + return testMasterStatus, nil +} + +// Deprecated func (fra *fakeRPCTM) MasterStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) @@ -742,6 +750,14 @@ func tmRPCTestReplicationStatusPanic(ctx context.Context, t *testing.T, client t var testReplicationPosition = "MariaDB/5-456-890" +func (fra *fakeRPCTM) PrimaryPosition(ctx context.Context) (string, error) { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + return testReplicationPosition, nil +} + +// Deprecated func (fra *fakeRPCTM) MasterPosition(ctx context.Context) (string, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) @@ -755,12 +771,12 @@ func (fra *fakeRPCTM) WaitForPosition(ctx context.Context, pos string) error { func tmRPCTestMasterPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { rs, err := client.MasterPosition(ctx, tablet) - compareError(t, "MasterPosition", err, rs, testReplicationPosition) + compareError(t, "PrimaryPosition", err, rs, testReplicationPosition) } func tmRPCTestMasterPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { _, err := client.MasterPosition(ctx, tablet) - expectHandleRPCPanic(t, "MasterPosition", false /*verbose*/, err) + expectHandleRPCPanic(t, "PrimaryPosition", false /*verbose*/, err) } var testStopReplicationCalled = false @@ -934,6 +950,14 @@ func tmRPCTestResetReplicationPanic(ctx context.Context, t *testing.T, client tm expectHandleRPCPanic(t, "ResetReplication", true /*verbose*/, err) } +func (fra *fakeRPCTM) InitPrimary(ctx context.Context) (string, error) { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + return testReplicationPosition, nil +} + +// Deprecated func (fra *fakeRPCTM) InitMaster(ctx context.Context) (string, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) @@ -1005,6 +1029,14 @@ func tmRPCTestInitReplicaPanic(ctx context.Context, t *testing.T, client tmclien expectHandleRPCPanic(t, "InitReplica", true /*verbose*/, err) } +func (fra *fakeRPCTM) DemotePrimary(ctx context.Context) (*replicationdatapb.MasterStatus, error) { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + return testMasterStatus, nil +} + +// Deprecated func (fra *fakeRPCTM) DemoteMaster(ctx context.Context) (*replicationdatapb.MasterStatus, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) @@ -1024,6 +1056,14 @@ func tmRPCTestDemoteMasterPanic(ctx context.Context, t *testing.T, client tmclie var testUndoDemoteMasterCalled = false +func (fra *fakeRPCTM) UndoDemotePrimary(ctx context.Context) error { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + return nil +} + +// Deprecated func (fra *fakeRPCTM) UndoDemoteMaster(ctx context.Context) error { if fra.panics { panic(fmt.Errorf("test-triggered panic")) @@ -1067,6 +1107,19 @@ func tmRPCTestReplicaWasPromotedPanic(ctx context.Context, t *testing.T, client var testSetMasterCalled = false var testForceStartReplica = true +func (fra *fakeRPCTM) SetReplicationSource(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplica bool) error { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + compare(fra.t, "SetMaster parent", parent, testMasterAlias) + compare(fra.t, "SetMaster timeCreatedNS", timeCreatedNS, testTimeCreatedNS) + compare(fra.t, "SetMaster waitPosition", waitPosition, testWaitPosition) + compare(fra.t, "SetMaster forceStartReplica", forceStartReplica, testForceStartReplica) + testSetMasterCalled = true + return nil +} + +// Deprecated func (fra *fakeRPCTM) SetMaster(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplica bool) error { if fra.panics { panic(fmt.Errorf("test-triggered panic")) diff --git a/go/vt/worker/legacy_split_clone_test.go b/go/vt/worker/legacy_split_clone_test.go index 2f32e4f7f43..c60fa1caccd 100644 --- a/go/vt/worker/legacy_split_clone_test.go +++ b/go/vt/worker/legacy_split_clone_test.go @@ -171,7 +171,7 @@ func (tc *legacySplitCloneTestCase) setUp(v3 bool) { }, }, } - sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceRdonly.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{12: mysql.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ diff --git a/go/vt/worker/split_clone_flaky_test.go b/go/vt/worker/split_clone_flaky_test.go index bd2cc6c5308..ddfa95fb8d3 100644 --- a/go/vt/worker/split_clone_flaky_test.go +++ b/go/vt/worker/split_clone_flaky_test.go @@ -215,7 +215,7 @@ func (tc *splitCloneTestCase) setUpWithConcurrency(v3 bool, concurrency, writeQu }, }, } - sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceRdonly.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{12: mysql.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 21ebe28b182..032a5cb32b9 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -139,7 +139,7 @@ func TestVerticalSplitClone(t *testing.T) { }, }, } - sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceRdonly.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{12: mysql.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 249bc1afa24..dfbdc2f5c28 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -102,7 +102,7 @@ func TestBackupRestore(t *testing.T) { master := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_MASTER, db) master.FakeMysqlDaemon.ReadOnly = false master.FakeMysqlDaemon.Replicating = false - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -121,7 +121,7 @@ func TestBackupRestore(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -155,7 +155,7 @@ func TestBackupRestore(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -174,8 +174,8 @@ func TestBackupRestore(t *testing.T) { destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SHOW DATABASES": {}, } - destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentMasterPosition - destTablet.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition + destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) @@ -221,7 +221,7 @@ func TestBackupRestore(t *testing.T) { "START SLAVE", } - master.FakeMysqlDaemon.SetReplicationPositionPos = master.FakeMysqlDaemon.CurrentMasterPosition + master.FakeMysqlDaemon.SetReplicationPositionPos = master.FakeMysqlDaemon.CurrentPrimaryPosition // restore master from backup require.NoError(t, master.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */), "RestoreData failed") @@ -303,7 +303,7 @@ func TestBackupRestoreLagged(t *testing.T) { master := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_MASTER, db) master.FakeMysqlDaemon.ReadOnly = false master.FakeMysqlDaemon.Replicating = false - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -322,7 +322,7 @@ func TestBackupRestoreLagged(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -351,7 +351,7 @@ func TestBackupRestoreLagged(t *testing.T) { timer := time.NewTicker(1 * time.Second) <-timer.C - sourceTablet.FakeMysqlDaemon.CurrentMasterPositionLocked(mysql.Position{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -370,7 +370,7 @@ func TestBackupRestoreLagged(t *testing.T) { require.NoError(t, sourceTablet.FakeMysqlDaemon.CheckSuperQueryList()) assert.True(t, sourceTablet.FakeMysqlDaemon.Replicating) assert.True(t, sourceTablet.FakeMysqlDaemon.Running) - assert.Equal(t, master.FakeMysqlDaemon.CurrentMasterPosition, sourceTablet.FakeMysqlDaemon.CurrentMasterPosition) + assert.Equal(t, master.FakeMysqlDaemon.CurrentPrimaryPosition, sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition) case <-timer2.C: require.FailNow(t, "Backup timed out") } @@ -379,7 +379,7 @@ func TestBackupRestoreLagged(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -398,8 +398,8 @@ func TestBackupRestoreLagged(t *testing.T) { destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SHOW DATABASES": {}, } - destTablet.FakeMysqlDaemon.SetReplicationPositionPos = destTablet.FakeMysqlDaemon.CurrentMasterPosition - destTablet.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationPositionPos = destTablet.FakeMysqlDaemon.CurrentPrimaryPosition + destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) @@ -421,7 +421,7 @@ func TestBackupRestoreLagged(t *testing.T) { timer = time.NewTicker(1 * time.Second) <-timer.C - destTablet.FakeMysqlDaemon.CurrentMasterPositionLocked(mysql.Position{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -439,7 +439,7 @@ func TestBackupRestoreLagged(t *testing.T) { require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.True(t, destTablet.FakeMysqlDaemon.Replicating) assert.True(t, destTablet.FakeMysqlDaemon.Running) - assert.Equal(t, master.FakeMysqlDaemon.CurrentMasterPosition, destTablet.FakeMysqlDaemon.CurrentMasterPosition) + assert.Equal(t, master.FakeMysqlDaemon.CurrentPrimaryPosition, destTablet.FakeMysqlDaemon.CurrentPrimaryPosition) case <-timer2.C: require.FailNow(t, "Restore timed out") } @@ -500,7 +500,7 @@ func TestRestoreUnreachableMaster(t *testing.T) { master := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_MASTER, db) master.FakeMysqlDaemon.ReadOnly = false master.FakeMysqlDaemon.Replicating = false - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -518,7 +518,7 @@ func TestRestoreUnreachableMaster(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -547,7 +547,7 @@ func TestRestoreUnreachableMaster(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -566,8 +566,8 @@ func TestRestoreUnreachableMaster(t *testing.T) { destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SHOW DATABASES": {}, } - destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentMasterPosition - destTablet.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition + destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) @@ -653,7 +653,7 @@ func TestDisableActiveReparents(t *testing.T) { master := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_MASTER, db) master.FakeMysqlDaemon.ReadOnly = false master.FakeMysqlDaemon.Replicating = false - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -672,7 +672,7 @@ func TestDisableActiveReparents(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -705,7 +705,7 @@ func TestDisableActiveReparents(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -723,8 +723,8 @@ func TestDisableActiveReparents(t *testing.T) { destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SHOW DATABASES": {}, } - destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentMasterPosition - destTablet.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition + destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 217f29e1bc7..8f3e3330c75 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -56,7 +56,7 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) oldMaster.FakeMysqlDaemon.Replicating = false - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -73,7 +73,7 @@ func TestEmergencyReparentShard(t *testing.T) { // new master newMaster.FakeMysqlDaemon.ReadOnly = true newMaster.FakeMysqlDaemon.Replicating = true - newMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -86,7 +86,7 @@ func TestEmergencyReparentShard(t *testing.T) { newMaster.FakeMysqlDaemon.CurrentMasterFilePosition = mysql.Position{ GTIDSet: newMasterRelayLogPos, } - newMaster.FakeMysqlDaemon.WaitMasterPosition = newMaster.FakeMysqlDaemon.CurrentMasterFilePosition + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = newMaster.FakeMysqlDaemon.CurrentMasterFilePosition newMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", "CREATE DATABASE IF NOT EXISTS _vt", @@ -108,7 +108,7 @@ func TestEmergencyReparentShard(t *testing.T) { // old master, will be scrapped oldMaster.FakeMysqlDaemon.ReadOnly = false oldMaster.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", } @@ -118,7 +118,7 @@ func TestEmergencyReparentShard(t *testing.T) { // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + goodReplica1.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -131,8 +131,8 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica1.FakeMysqlDaemon.CurrentMasterFilePosition = mysql.Position{ GTIDSet: goodReplica1RelayLogPos, } - goodReplica1.FakeMysqlDaemon.WaitMasterPosition = goodReplica1.FakeMysqlDaemon.CurrentMasterFilePosition - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica1.FakeMysqlDaemon.WaitPrimaryPosition = goodReplica1.FakeMysqlDaemon.CurrentMasterFilePosition + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", "STOP SLAVE", @@ -145,7 +145,7 @@ func TestEmergencyReparentShard(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + goodReplica2.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -158,8 +158,8 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica2.FakeMysqlDaemon.CurrentMasterFilePosition = mysql.Position{ GTIDSet: goodReplica2RelayLogPos, } - goodReplica2.FakeMysqlDaemon.WaitMasterPosition = goodReplica2.FakeMysqlDaemon.CurrentMasterFilePosition - goodReplica2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica2.FakeMysqlDaemon.WaitPrimaryPosition = goodReplica2.FakeMysqlDaemon.CurrentMasterFilePosition + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -203,7 +203,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { // new master newMaster.FakeMysqlDaemon.Replicating = true // this server has executed upto 455, which is the highest among replicas - newMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -214,7 +214,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { } // It has more transactions in its relay log, but not as many as // moreAdvancedReplica - newMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -227,7 +227,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { newMaster.FakeMysqlDaemon.CurrentMasterFilePosition = mysql.Position{ GTIDSet: newMasterRelayLogPos, } - newMaster.FakeMysqlDaemon.WaitMasterPosition = newMaster.FakeMysqlDaemon.CurrentMasterFilePosition + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = newMaster.FakeMysqlDaemon.CurrentMasterFilePosition newMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", } @@ -242,7 +242,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { // more advanced replica moreAdvancedReplica.FakeMysqlDaemon.Replicating = true // position up to which this replica has executed is behind desired new master - moreAdvancedReplica.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -252,7 +252,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { }, } // relay log position is more advanced than desired new master - moreAdvancedReplica.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 2: mysql.MariadbGTID{ Domain: 2, @@ -265,7 +265,7 @@ func TestEmergencyReparentShardMasterElectNotBest(t *testing.T) { moreAdvancedReplica.FakeMysqlDaemon.CurrentMasterFilePosition = mysql.Position{ GTIDSet: moreAdvancedReplicaLogPos, } - moreAdvancedReplica.FakeMysqlDaemon.WaitMasterPosition = moreAdvancedReplica.FakeMysqlDaemon.CurrentMasterFilePosition + moreAdvancedReplica.FakeMysqlDaemon.WaitPrimaryPosition = moreAdvancedReplica.FakeMysqlDaemon.CurrentMasterFilePosition moreAdvancedReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", } diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go index 87987769164..11f627ce673 100644 --- a/go/vt/wrangler/testlib/external_reparent_test.go +++ b/go/vt/wrangler/testlib/external_reparent_test.go @@ -89,7 +89,7 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { t.Fatalf("old master should be MASTER but is: %v", tablet.Type) } - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -168,7 +168,7 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { // Second test: reparent to a replica, and pretend the old // master is still good to go. - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -246,7 +246,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -327,7 +327,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedMaster(t *testing.T) { newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -404,7 +404,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { newMaster.StartActionLoop(t, wr) defer newMaster.StopActionLoop(t) - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -414,7 +414,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { oldMaster.StartActionLoop(t, wr) defer oldMaster.StopActionLoop(t) - goodReplica.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) // On the good replica, we will respond to // TabletActionReplicaWasRestarted. goodReplica.StartActionLoop(t, wr) diff --git a/go/vt/wrangler/testlib/migrate_served_from_test.go b/go/vt/wrangler/testlib/migrate_served_from_test.go index 74256448981..c0023086afe 100644 --- a/go/vt/wrangler/testlib/migrate_served_from_test.go +++ b/go/vt/wrangler/testlib/migrate_served_from_test.go @@ -89,7 +89,7 @@ func TestMigrateServedFrom(t *testing.T) { // sourceMaster will see the refresh, and has to respond to it // also will be asked about its replication position. - sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, diff --git a/go/vt/wrangler/testlib/migrate_served_types_test.go b/go/vt/wrangler/testlib/migrate_served_types_test.go index d3c1bb6b637..d48cd31fed1 100644 --- a/go/vt/wrangler/testlib/migrate_served_types_test.go +++ b/go/vt/wrangler/testlib/migrate_served_types_test.go @@ -130,7 +130,7 @@ func TestMigrateServedTypes(t *testing.T) { // sourceMaster will see the refresh, and has to respond to it // also will be asked about its replication position. - sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + sourceMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, @@ -339,7 +339,7 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { // source1Master will see the refresh, and has to respond to it // also will be asked about its replication position. - source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + source1Master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, @@ -385,7 +385,7 @@ func TestMultiShardMigrateServedTypes(t *testing.T) { // sourceMaster will see the refresh, and has to respond to it // also will be asked about its replication position. - source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + source2Master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index e84a1043311..eee5df4a9d3 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -58,7 +58,7 @@ func TestPlannedReparentShardNoMasterProvided(t *testing.T) { // new master newMaster.FakeMysqlDaemon.ReadOnly = true newMaster.FakeMysqlDaemon.Replicating = true - newMaster.FakeMysqlDaemon.WaitMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -91,8 +91,8 @@ func TestPlannedReparentShardNoMasterProvided(t *testing.T) { oldMaster.FakeMysqlDaemon.ReadOnly = false oldMaster.FakeMysqlDaemon.Replicating = false oldMaster.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = newMaster.FakeMysqlDaemon.WaitMasterPosition - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = newMaster.FakeMysqlDaemon.WaitPrimaryPosition + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -105,12 +105,12 @@ func TestPlannedReparentShardNoMasterProvided(t *testing.T) { oldMaster.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetMaster is called on new master to make sure it's replicating before reparenting. - newMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + newMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -168,7 +168,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { // new master newMaster.FakeMysqlDaemon.ReadOnly = true newMaster.FakeMysqlDaemon.Replicating = true - newMaster.FakeMysqlDaemon.WaitMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -201,8 +201,8 @@ func TestPlannedReparentShardNoError(t *testing.T) { oldMaster.FakeMysqlDaemon.ReadOnly = false oldMaster.FakeMysqlDaemon.Replicating = false oldMaster.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = newMaster.FakeMysqlDaemon.WaitMasterPosition - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = newMaster.FakeMysqlDaemon.WaitPrimaryPosition + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -215,12 +215,12 @@ func TestPlannedReparentShardNoError(t *testing.T) { oldMaster.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetMaster is called on new master to make sure it's replicating before reparenting. - newMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + newMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) // goodReplica1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -232,7 +232,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { // goodReplica2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -316,7 +316,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { // new master newMaster.FakeMysqlDaemon.ReadOnly = true newMaster.FakeMysqlDaemon.Replicating = true - newMaster.FakeMysqlDaemon.WaitMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -349,8 +349,8 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { oldMaster.FakeMysqlDaemon.ReadOnly = false oldMaster.FakeMysqlDaemon.Replicating = false // set to incorrect value to make promote fail on WaitForMasterPos - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = newMaster.FakeMysqlDaemon.PromoteResult - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = newMaster.FakeMysqlDaemon.PromoteResult + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -359,12 +359,12 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { defer oldMaster.StopActionLoop(t) oldMaster.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetMaster is called on new master to make sure it's replicating before reparenting. - newMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + newMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -376,7 +376,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -417,7 +417,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { newMaster.FakeMysqlDaemon.TimeoutHook = func() error { return context.DeadlineExceeded } newMaster.FakeMysqlDaemon.ReadOnly = true newMaster.FakeMysqlDaemon.Replicating = true - newMaster.FakeMysqlDaemon.WaitMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -449,8 +449,8 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { // old master oldMaster.FakeMysqlDaemon.ReadOnly = false oldMaster.FakeMysqlDaemon.Replicating = false - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = newMaster.FakeMysqlDaemon.WaitMasterPosition - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = newMaster.FakeMysqlDaemon.WaitPrimaryPosition + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -460,11 +460,11 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { oldMaster.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetMaster is called on new master to make sure it's replicating before reparenting. - newMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + newMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -476,7 +476,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -513,7 +513,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { master.FakeMysqlDaemon.ReadOnly = false master.FakeMysqlDaemon.Replicating = false master.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -534,9 +534,9 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { // goodReplica1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) // simulate error that will trigger a call to RestartReplication - goodReplica1.FakeMysqlDaemon.SetMasterError = errors.New("Slave failed to initialize relay log info structure from the repository") + goodReplica1.FakeMysqlDaemon.SetReplicationSourceError = errors.New("Slave failed to initialize relay log info structure from the repository") goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "RESET SLAVE", @@ -588,7 +588,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { master.FakeMysqlDaemon.ReadOnly = false master.FakeMysqlDaemon.Replicating = false master.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -610,7 +610,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true goodReplica1.FakeMysqlDaemon.IOThreadRunning = false - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) goodReplica1.FakeMysqlDaemon.CurrentMasterHost = master.Tablet.MysqlHostname goodReplica1.FakeMysqlDaemon.CurrentMasterPort = int(master.Tablet.MysqlPort) // simulate error that will trigger a call to RestartReplication @@ -667,7 +667,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { newMaster.FakeMysqlDaemon.Replicating = true // make promote fail newMaster.FakeMysqlDaemon.PromoteError = errors.New("some error") - newMaster.FakeMysqlDaemon.WaitMasterPosition = mysql.Position{ + newMaster.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -700,8 +700,8 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { oldMaster.FakeMysqlDaemon.ReadOnly = false oldMaster.FakeMysqlDaemon.Replicating = false oldMaster.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = newMaster.FakeMysqlDaemon.WaitMasterPosition - oldMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = newMaster.FakeMysqlDaemon.WaitPrimaryPosition + oldMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) oldMaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -711,11 +711,11 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { oldMaster.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetMaster is called on new master to make sure it's replicating before reparenting. - newMaster.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + newMaster.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -727,7 +727,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(newMaster.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newMaster.Tablet) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -799,7 +799,7 @@ func TestPlannedReparentShardSameMaster(t *testing.T) { oldMaster.FakeMysqlDaemon.ReadOnly = true oldMaster.FakeMysqlDaemon.Replicating = false oldMaster.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + oldMaster.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -820,7 +820,7 @@ func TestPlannedReparentShardSameMaster(t *testing.T) { // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -832,7 +832,7 @@ func TestPlannedReparentShardSameMaster(t *testing.T) { // goodReplica2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(oldMaster.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldMaster.Tablet) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 2a4fc475cf0..f102502db41 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -62,7 +62,7 @@ func TestShardReplicationStatuses(t *testing.T) { } // master action loop (to initialize host and port) - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, @@ -75,7 +75,7 @@ func TestShardReplicationStatuses(t *testing.T) { defer master.StopActionLoop(t) // replica loop - replica.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + replica.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, @@ -147,7 +147,7 @@ func TestReparentTablet(t *testing.T) { // which ends up making this test unpredictable. replica.FakeMysqlDaemon.Replicating = true replica.FakeMysqlDaemon.IOThreadRunning = true - replica.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet) + replica.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(master.Tablet) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index d9e49802f87..3754c602173 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -404,7 +404,7 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { func (tme *testMigraterEnv) setMasterPositions() { for _, master := range tme.sourceMasters { - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, @@ -415,7 +415,7 @@ func (tme *testMigraterEnv) setMasterPositions() { } } for _, master := range tme.targetMasters { - master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + master.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 5: mysql.MariadbGTID{ Domain: 5, diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index c778ce1c4e5..c03d4da471e 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -63,6 +63,7 @@ type DiffReport struct { ExtraRowsTarget int ExtraRowsTargetSample []*RowDiff MismatchedRowsSample []*DiffMismatch + TableName string } // DiffMismatch is a sample of row diffs between source and target. @@ -247,6 +248,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou } // Perform the diff of source and target streams. dr, err := td.diff(ctx, df.ts.wr, &rowsToCompare, debug, onlyPks) + dr.TableName = table if err != nil { return nil, vterrors.Wrap(err, "diff") } diff --git a/go/vt/wrangler/vdiff_test.go b/go/vt/wrangler/vdiff_test.go index 0c93d02473a..e121e7786aa 100644 --- a/go/vt/wrangler/vdiff_test.go +++ b/go/vt/wrangler/vdiff_test.go @@ -508,6 +508,7 @@ func TestVDiffUnsharded(t *testing.T) { dr: &DiffReport{ ProcessedRows: 3, MatchingRows: 3, + TableName: "t1", }, }, { id: "2", @@ -524,6 +525,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 1, ExtraRowsTarget: 2, + TableName: "t1", ExtraRowsTargetSample: []*RowDiff{ { Row: map[string]sqltypes.Value{ @@ -549,6 +551,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 1, ExtraRowsSource: 2, + TableName: "t1", ExtraRowsSourceSample: []*RowDiff{ { Row: map[string]sqltypes.Value{ @@ -576,6 +579,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 2, ExtraRowsSource: 1, + TableName: "t1", ExtraRowsSourceSample: []*RowDiff{ { Row: map[string]sqltypes.Value{ @@ -603,6 +607,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 2, ExtraRowsTarget: 1, + TableName: "t1", ExtraRowsTargetSample: []*RowDiff{ { Row: map[string]sqltypes.Value{ @@ -631,6 +636,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 2, MismatchedRows: 1, + TableName: "t1", MismatchedRowsSample: []*DiffMismatch{ { Source: &RowDiff{Row: map[string]sqltypes.Value{ @@ -667,6 +673,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 2, MismatchedRows: 1, + TableName: "t1", MismatchedRowsSample: []*DiffMismatch{ { Source: &RowDiff{Row: map[string]sqltypes.Value{ @@ -701,6 +708,7 @@ func TestVDiffUnsharded(t *testing.T) { ProcessedRows: 3, MatchingRows: 2, MismatchedRows: 1, + TableName: "t1", MismatchedRowsSample: []*DiffMismatch{ { Source: &RowDiff{Row: map[string]sqltypes.Value{ @@ -793,6 +801,7 @@ func TestVDiffSharded(t *testing.T) { wantdr := &DiffReport{ ProcessedRows: 3, MatchingRows: 3, + TableName: "t1", } assert.Equal(t, wantdr, dr["t1"]) } @@ -859,6 +868,7 @@ func TestVDiffAggregates(t *testing.T) { wantdr := &DiffReport{ ProcessedRows: 5, MatchingRows: 5, + TableName: "t1", } assert.Equal(t, wantdr, dr["t1"]) } @@ -923,6 +933,7 @@ func TestVDiffPKWeightString(t *testing.T) { wantdr := &DiffReport{ ProcessedRows: 4, MatchingRows: 4, + TableName: "t1", } assert.Equal(t, wantdr, dr["t1"]) } @@ -987,6 +998,7 @@ func TestVDiffNoPKWeightString(t *testing.T) { wantdr := &DiffReport{ ProcessedRows: 4, MatchingRows: 4, + TableName: "t1", } assert.Equal(t, wantdr, dr["t1"]) } @@ -1242,6 +1254,7 @@ func TestVDiffNullWeightString(t *testing.T) { dr: &DiffReport{ ProcessedRows: 3, MatchingRows: 3, + TableName: "t1", }, }, { name: "must not match", @@ -1259,6 +1272,7 @@ func TestVDiffNullWeightString(t *testing.T) { dr: &DiffReport{ ProcessedRows: 3, MismatchedRows: 3, + TableName: "t1", MismatchedRowsSample: []*DiffMismatch{ { Source: &RowDiff{Row: map[string]sqltypes.Value{ @@ -1323,6 +1337,7 @@ func TestVDiffNullWeightString(t *testing.T) { dr: &DiffReport{ ProcessedRows: 1, MismatchedRows: 1, + TableName: "t1", MismatchedRowsSample: []*DiffMismatch{ { Source: &RowDiff{Row: map[string]sqltypes.Value{ diff --git a/helm/vitess/templates/_orchestrator-conf.tpl b/helm/vitess/templates/_orchestrator-conf.tpl index 950c0768ea0..fadef940f95 100644 --- a/helm/vitess/templates/_orchestrator-conf.tpl +++ b/helm/vitess/templates/_orchestrator-conf.tpl @@ -128,7 +128,6 @@ data: "SkipBinlogEventsContaining": [ ], "SkipBinlogServerUnresolveCheck": true, - "SkipMaxScaleCheck": true, "SkipOrchestratorDatabaseUpdate": false, "SlaveStartPostWaitMilliseconds": 1000, "SnapshotTopologiesIntervalHours": 0, diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 3c942a1cf5d..746f34c4cbc 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -127,6 +127,7 @@ message Workflow { /* Request/response types for VtctldServer */ + message AddCellInfoRequest { string name = 1; topodata.CellInfo cell_info = 2; @@ -158,6 +159,19 @@ message ApplyRoutingRulesRequest { message ApplyRoutingRulesResponse { } +message ApplyVSchemaRequest { + string keyspace = 1; + bool skip_rebuild = 2; + bool dry_run = 3; + repeated string cells = 4; + vschema.Keyspace v_schema = 5; + string sql = 6; +} + +message ApplyVSchemaResponse { + vschema.Keyspace v_schema = 1; +} + message ChangeTabletTypeRequest { topodata.TabletAlias tablet_alias = 1; topodata.TabletType db_type = 2; diff --git a/proto/vtctlservice.proto b/proto/vtctlservice.proto index 373a8e6d387..93571e506c5 100644 --- a/proto/vtctlservice.proto +++ b/proto/vtctlservice.proto @@ -43,6 +43,8 @@ service Vtctld { rpc AddCellsAlias(vtctldata.AddCellsAliasRequest) returns (vtctldata.AddCellsAliasResponse) {}; // ApplyRoutingRules applies the VSchema routing rules. rpc ApplyRoutingRules(vtctldata.ApplyRoutingRulesRequest) returns (vtctldata.ApplyRoutingRulesResponse) {}; + // ApplyVSchema applies a vschema to a keyspace. + rpc ApplyVSchema(vtctldata.ApplyVSchemaRequest) returns (vtctldata.ApplyVSchemaResponse) {}; // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. diff --git a/web/orchestrator/public/js/cluster.js b/web/orchestrator/public/js/cluster.js index b67b3e08787..38646e34a67 100644 --- a/web/orchestrator/public/js/cluster.js +++ b/web/orchestrator/public/js/cluster.js @@ -39,14 +39,6 @@ function Cluster() { apiCommand("/api/relocate-replicas/" + _instancesMap[e.draggedNodeId].Key.Hostname + "/" + _instancesMap[e.draggedNodeId].Key.Port + "/" + belowHost + "/" + belowPort); return true; }, - "make-master": function(e) { - makeMaster(_instancesMap[e.draggedNodeId]); - return false; - }, - "make-local-master": function(e) { - makeLocalMaster(_instancesMap[e.draggedNodeId]); - return false; - }, }; Object.defineProperties(_this, { @@ -432,7 +424,7 @@ function Cluster() { } var droppableTitle = getInstanceDiv(droppableNode.id).find("h3 .pull-left").html(); if (moveInstanceMethod == "smart") { - // Moving via GTID or Pseudo GTID + // Moving via GTID if (node.hasConnectivityProblem || droppableNode.hasConnectivityProblem || droppableNode.isAggregate) { // Obviously can't handle. return { @@ -495,16 +487,12 @@ function Cluster() { } var gtidBelowFunc = null; var gtidOperationName = ""; - if (moveInstanceMethod == "pseudo-gtid") { - gtidBelowFunc = matchBelow; - gtidOperationName = "match"; - } if (moveInstanceMethod == "gtid") { gtidBelowFunc = moveBelowGTID; gtidOperationName = "move:gtid"; } if (gtidBelowFunc != null) { - // Moving via GTID or Pseudo GTID + // Moving via GTID if (node.hasConnectivityProblem || droppableNode.hasConnectivityProblem || droppableNode.isAggregate) { // Obviously can't handle. return { @@ -583,7 +571,7 @@ function Cluster() { }; } if (moveInstanceMethod == "classic") { - // Not pseudo-GTID mode, non GTID mode + // non GTID mode if (node.id == droppableNode.id) { return { accept: false @@ -778,7 +766,7 @@ function Cluster() { } var droppableTitle = getInstanceDiv(droppableNode.id).find("h3 .pull-left").html(); if (moveInstanceMethod == "smart") { - // Moving via GTID or Pseudo GTID + // Moving via GTID if (droppableNode.hasConnectivityProblem || droppableNode.isAggregate) { // Obviously can't handle. return { @@ -824,16 +812,12 @@ function Cluster() { var gtidBelowFunc = null; var gtidOperationName = ""; - if (moveInstanceMethod == "pseudo-gtid") { - gtidBelowFunc = matchReplicas; - gtidOperationName = "match"; - } if (moveInstanceMethod == "gtid") { gtidBelowFunc = moveReplicasGTID; gtidOperationName = "move:gtid"; } if (gtidBelowFunc != null) { - // Moving via GTID or Pseudo GTID + // Moving via GTID if (droppableNode.hasConnectivityProblem || droppableNode.isAggregate) { // Obviously can't handle. return { @@ -888,7 +872,7 @@ function Cluster() { }; } if (moveInstanceMethod == "classic") { - // Not pseudo-GTID mode, non GTID mode + // non GTID mode if (node.id == droppableNode.id) { if (shouldApply) { repointReplicas(node); @@ -1028,16 +1012,6 @@ function Cluster() { return executeMoveOperation(message, apiUrl); } - function matchBelow(node, otherNode) { - var message = "

PSEUDO-GTID MODE, match-below

Are you sure you wish to turn " + - node.Key.Hostname + ":" + node.Key.Port + - " into a replica of " + - otherNode.Key.Hostname + ":" + otherNode.Key.Port + - "?"; - var apiUrl = "/api/match-below/" + node.Key.Hostname + "/" + node.Key.Port + "/" + otherNode.Key.Hostname + "/" + otherNode.Key.Port; - return executeMoveOperation(message, apiUrl); - } - function moveBelowGTID(node, otherNode) { var message = "

GTID MODE, move-below

Are you sure you wish to turn " + node.Key.Hostname + ":" + node.Key.Port + @@ -1318,7 +1292,6 @@ function Cluster() { } } - function refreshClusterOperationModeButton() { if (moveInstanceMethod == "smart") { $("#move-instance-method-button").removeClass("btn-success").removeClass("btn-primary").removeClass("btn-warning").addClass("btn-info"); @@ -1326,27 +1299,10 @@ function Cluster() { $("#move-instance-method-button").removeClass("btn-info").removeClass("btn-primary").removeClass("btn-warning").addClass("btn-success"); } else if (moveInstanceMethod == "gtid") { $("#move-instance-method-button").removeClass("btn-success").removeClass("btn-info").removeClass("btn-warning").addClass("btn-primary"); - } else if (moveInstanceMethod == "pseudo-gtid") { - $("#move-instance-method-button").removeClass("btn-success").removeClass("btn-primary").removeClass("btn-info").addClass("btn-warning"); } $("#move-instance-method-button").html(moveInstanceMethod + ' mode ') } - // This is legacy and will be removed - function makeMaster(instance) { - var message = "Are you sure you wish to make " + instance.Key.Hostname + ":" + instance.Key.Port + " the new master?" + "

Siblings of " + instance.Key.Hostname + ":" + instance.Key.Port + " will turn to be its children, " + "via Pseudo-GTID." + "

The instance will be set to be writeable (read_only = 0)." + "

Replication on this instance will be stopped, but not reset. You should run RESET SLAVE yourself " + "if this instance will indeed become the master." + "

Pointing your application servers to the new master is on you."; - var apiUrl = "/api/make-master/" + instance.Key.Hostname + "/" + instance.Key.Port; - return executeMoveOperation(message, apiUrl); - } - - //This is legacy and will be removed - function makeLocalMaster(instance) { - var message = "Are you sure you wish to make " + instance.Key.Hostname + ":" + instance.Key.Port + " a local master?" + "

Siblings of " + instance.Key.Hostname + ":" + instance.Key.Port + " will turn to be its children, " + "via Pseudo-GTID." + "

The instance will replicate from its grandparent."; - var apiUrl = "/api/make-local-master/" + instance.Key.Hostname + "/" + instance.Key.Port; - return executeMoveOperation(message, apiUrl); - } - - function promptForAlias(oldAlias) { bootbox.prompt({ title: "Enter alias for this cluster", diff --git a/web/orchestrator/public/js/orchestrator.js b/web/orchestrator/public/js/orchestrator.js index b00f11f8869..af157a0df6b 100644 --- a/web/orchestrator/public/js/orchestrator.js +++ b/web/orchestrator/public/js/orchestrator.js @@ -264,7 +264,6 @@ function openNodeModal(node) { if (node.UnresolvedHostname) { addNodeModalDataAttribute("Unresolved hostname", node.UnresolvedHostname); } - $('#node_modal [data-btn-group=move-equivalent]').appendTo(hiddenZone); if (node.MasterKey.Hostname) { var td = addNodeModalDataAttribute("Master", node.masterTitle); if (node.IsDetachedMaster) { @@ -293,24 +292,6 @@ function openNodeModal(node) { addNodeModalDataAttribute("SQL delay", node.SQLDelay); var masterCoordinatesEl = addNodeModalDataAttribute("Master coordinates", node.ExecBinlogCoordinates.LogFile + ":" + node.ExecBinlogCoordinates.LogPos); - $('#node_modal [data-btn-group=move-equivalent] ul').empty(); - $.get(appUrl("/api/master-equivalent/") + node.MasterKey.Hostname + "/" + node.MasterKey.Port + "/" + node.ExecBinlogCoordinates.LogFile + "/" + node.ExecBinlogCoordinates.LogPos, function(equivalenceResult) { - if (!equivalenceResult.Details) { - return false; - } - equivalenceResult.Details.forEach(function(equivalence) { - if (equivalence.Key.Hostname == node.Key.Hostname && equivalence.Key.Port == node.Key.Port) { - // This very instance; will not move below itself - return; - } - var title = canonizeInstanceTitle(equivalence.Key.Hostname + ':' + equivalence.Key.Port); - $('#node_modal [data-btn-group=move-equivalent] ul').append('

  • ' + title + '
  • '); - }); - - if ($('#node_modal [data-btn-group=move-equivalent] ul li').length) { - $('#node_modal [data-btn-group=move-equivalent]').appendTo(masterCoordinatesEl.find("div")); - } - }, "json"); if (node.IsDetached) { $('#node_modal button[data-btn=detach-replica]').appendTo(hiddenZone) $('#node_modal button[data-btn=reattach-replica]').appendTo(masterCoordinatesEl.find("div")) @@ -492,12 +473,6 @@ function openNodeModal(node) { return false; }); - $("body").on("click", "#node_modal a[data-btn=move-equivalent]", function(event) { - var targetHostname = $(event.target).attr("data-hostname"); - var targetPort = $(event.target).attr("data-port"); - apiCommand("/api/move-equivalent/" + node.Key.Hostname + "/" + node.Key.Port + "/" + targetHostname + "/" + targetPort); - }); - if (node.IsDowntimed) { $('#node_modal .end-downtime .panel-heading').html("Downtimed by " + node.DowntimeOwner + " until " + node.DowntimeEndTimestamp); $('#node_modal .end-downtime .panel-body').html( @@ -630,7 +605,7 @@ function normalizeInstance(instance) { instance.isSeenRecently = instance.SecondsSinceLastSeen.Valid && instance.SecondsSinceLastSeen.Int64 <= 3600; instance.supportsGTID = instance.SupportsOracleGTID || instance.UsingMariaDBGTID; instance.usingGTID = instance.UsingOracleGTID || instance.UsingMariaDBGTID; - instance.isMaxScale = (instance.Version.indexOf("maxscale") >= 0); + instance.isMaxScale = false; // used by cluster-tree instance.children = []; @@ -911,9 +886,6 @@ function renderInstanceElement(popoverElement, instance, renderType) { popoverElement.find("h3 div.pull-right").prepend(' '); } } - if (instance.UsingPseudoGTID) { - popoverElement.find("h3 div.pull-right").prepend(' '); - } if (!instance.ReadOnly) { popoverElement.find("h3 div.pull-right").prepend(' '); } diff --git a/web/orchestrator/templates/cluster.tmpl b/web/orchestrator/templates/cluster.tmpl index 05ec6595077..e321899c4d6 100644 --- a/web/orchestrator/templates/cluster.tmpl +++ b/web/orchestrator/templates/cluster.tmpl @@ -62,7 +62,6 @@
  • Smart mode
  • Classic mode
  • GTID mode
  • -
  • Pseudo GTID mode
  • @@ -73,7 +72,7 @@ } function isPseudoGTIDModeEnabled() { - return "{{.pseudoGTIDModeEnabled}}" == "true"; + return false; } function removeTextFromHostnameDisplay() { diff --git a/web/orchestrator/templates/layout.tmpl b/web/orchestrator/templates/layout.tmpl index 4a0be270258..b19549507be 100644 --- a/web/orchestrator/templates/layout.tmpl +++ b/web/orchestrator/templates/layout.tmpl @@ -220,13 +220,6 @@ -
    - - -