diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index cfab3ff0172..52291078329 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -217,7 +217,10 @@ func main() { vtg := vtgate.Init(context.Background(), resilientServer, tpb.Cells[0], tabletTypesToWait) // vtctld configuration and init - vtctld.InitVtctld(ts) + err = vtctld.InitVtctld(ts) + if err != nil { + exit.Return(1) + } servenv.OnRun(func() { addStatusParts(vtg) diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index ced6d2626d5..9f6d7136d0b 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -38,6 +38,7 @@ import ( "vitess.io/vitess/go/vt/vtctl" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/workflow" "vitess.io/vitess/go/vt/wrangler" @@ -46,6 +47,7 @@ import ( var ( waitTime = flag.Duration("wait-time", 24*time.Hour, "time to wait on an action") detachedMode = flag.Bool("detach", false, "detached mode - run vtcl detached from the terminal") + durability = flag.String("durability", "none", "type of durability to enforce. Default is none. Other values are dictated by registered plugins") ) func init() { @@ -91,6 +93,11 @@ func main() { log.Warningf("cannot connect to syslog: %v", err) } + if err := reparentutil.SetDurabilityPolicy(*durability, nil); err != nil { + log.Errorf("error in setting durability policy: %v", err) + exit.Return(1) + } + closer := trace.StartTracing("vtctl") defer trace.LogErrorsWhenClosing(closer) diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go index 82c80a53dba..aa10badce33 100644 --- a/go/cmd/vtctld/main.go +++ b/go/cmd/vtctld/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctld" @@ -40,7 +41,10 @@ func main() { defer ts.Close() // Init the vtctld core - vtctld.InitVtctld(ts) + err := vtctld.InitVtctld(ts) + if err != nil { + exit.Return(1) + } // Register http debug/health vtctld.RegisterDebugHealthHandler(ts) diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go index 1469098fe66..a298c9afdf2 100644 --- a/go/cmd/vtctldclient/command/reparents.go +++ b/go/cmd/vtctldclient/command/reparents.go @@ -76,6 +76,7 @@ var emergencyReparentShardOptions = struct { WaitReplicasTimeout time.Duration NewPrimaryAliasStr string IgnoreReplicaAliasStrList []string + PreventCrossCellPromotion bool }{} func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { @@ -108,11 +109,12 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) resp, err := client.EmergencyReparentShard(commandCtx, &vtctldatapb.EmergencyReparentShardRequest{ - Keyspace: keyspace, - Shard: shard, - NewPrimary: newPrimaryAlias, - IgnoreReplicas: ignoreReplicaAliases, - WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout), + Keyspace: keyspace, + Shard: shard, + NewPrimary: newPrimaryAlias, + IgnoreReplicas: ignoreReplicaAliases, + WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout), + PreventCrossCellPromotion: emergencyReparentShardOptions.PreventCrossCellPromotion, }) if err != nil { return err @@ -261,6 +263,7 @@ func commandTabletExternallyReparented(cmd *cobra.Command, args []string) error func init() { EmergencyReparentShard.Flags().DurationVar(&emergencyReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", *topo.RemoteOperationTimeout, "Time to wait for replicas to catch up in reparenting.") EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary. If not specified, the vtctld will select the best candidate to promote.") + EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.PreventCrossCellPromotion, "prevent-cross-cell-promotion", false, "Only promotes a new primary from the same cell as the previous primary") EmergencyReparentShard.Flags().StringSliceVarP(&emergencyReparentShardOptions.IgnoreReplicaAliasStrList, "ignore-replicas", "i", nil, "Comma-separated, repeated list of replica tablet aliases to ignore during the emergency reparent.") Root.AddCommand(EmergencyReparentShard) diff --git a/go/mysql/replication_status.go b/go/mysql/replication_status.go index 852e68d3f3c..801d38a680f 100644 --- a/go/mysql/replication_status.go +++ b/go/mysql/replication_status.go @@ -123,16 +123,7 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS if !ok { panic("The receiver ReplicationStatus contained a Mysql56GTIDSet in its relay log, but a replica's ReplicationStatus is of another flavor. This should never happen.") } - // Copy and throw out primary SID from consideration, so we don't mutate input. - otherSetNoPrimarySID := make(Mysql56GTIDSet, len(otherSet)) - for sid, intervals := range otherSet { - if sid == status.SourceUUID { - continue - } - otherSetNoPrimarySID[sid] = intervals - } - - otherSets = append(otherSets, otherSetNoPrimarySID) + otherSets = append(otherSets, otherSet) } // Copy set for final diffSet so we don't mutate receiver. diff --git a/go/mysql/replication_status_test.go b/go/mysql/replication_status_test.go index 0a9358fb912..6c23eee3aef 100644 --- a/go/mysql/replication_status_test.go +++ b/go/mysql/replication_status_test.go @@ -18,6 +18,8 @@ package mysql import ( "testing" + + "github.com/stretchr/testify/require" ) func TestStatusReplicationRunning(t *testing.T) { @@ -81,22 +83,33 @@ func TestFindErrantGTIDs(t *testing.T) { sourceSID: []interval{{2, 6}, {15, 45}}, } - status1 := ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}} - status2 := ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}} - status3 := ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}} - - got, err := status1.FindErrantGTIDs([]*ReplicationStatus{&status2, &status3}) - if err != nil { - t.Errorf("%v", err) - } - - want := Mysql56GTIDSet{ - sid1: []interval{{39, 39}, {40, 49}, {71, 75}}, - sid2: []interval{{1, 2}, {6, 7}, {20, 21}, {26, 31}, {38, 50}, {60, 66}}, - sid4: []interval{{1, 30}}, - } - - if !got.Equal(want) { - t.Errorf("got %#v; want %#v", got, want) + testcases := []struct { + mainRepStatus *ReplicationStatus + otherRepStatuses []*ReplicationStatus + want Mysql56GTIDSet + }{{ + mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, + otherRepStatuses: []*ReplicationStatus{ + {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}}, + {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}}, + }, + want: Mysql56GTIDSet{ + sid1: []interval{{39, 39}, {40, 49}, {71, 75}}, + sid2: []interval{{1, 2}, {6, 7}, {20, 21}, {26, 31}, {38, 50}, {60, 66}}, + sid4: []interval{{1, 30}}, + }, + }, { + mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, + otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}}, + // servers with the same GTID sets should not be diagnosed with errant GTIDs + want: nil, + }} + + for _, testcase := range testcases { + t.Run("", func(t *testing.T) { + got, err := testcase.mainRepStatus.FindErrantGTIDs(testcase.otherRepStatuses) + require.NoError(t, err) + require.Equal(t, testcase.want, got) + }) } } diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go new file mode 100644 index 00000000000..a5e0a4ec5d1 --- /dev/null +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package emergencyreparent + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" +) + +func TestTrivialERS(t *testing.T) { + defer cluster.PanicHandler(t) + setupReparentCluster(t) + defer teardownCluster() + + confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) + + // We should be able to do a series of ERS-es, even if nothing + // is down, without issue + for i := 1; i <= 4; i++ { + out, err := ers(nil, "60s", "30s") + log.Infof("ERS loop %d. EmergencyReparentShard Output: %v", i, out) + require.NoError(t, err) + time.Sleep(5 * time.Second) + } + // We should do the same for vtctl binary + for i := 1; i <= 4; i++ { + out, err := ersWithVtctl() + log.Infof("ERS-vtctl loop %d. EmergencyReparentShard Output: %v", i, out) + require.NoError(t, err) + time.Sleep(5 * time.Second) + } +} + +func TestReparentIgnoreReplicas(t *testing.T) { + defer cluster.PanicHandler(t) + setupReparentCluster(t) + defer teardownCluster() + var err error + + ctx := context.Background() + + confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) + + // Make the current primary agent and database unavailable. + stopTablet(t, tab1, true) + + // Take down a replica - this should cause the emergency reparent to fail. + stopTablet(t, tab3, true) + + // We expect this one to fail because we have an unreachable replica + out, err := ers(nil, "60s", "30s") + require.NotNil(t, err, out) + + // Now let's run it again, but set the command to ignore the unreachable replica. + out, err = ersIgnoreTablet(nil, "60s", "30s", []*cluster.Vttablet{tab3}, false) + require.Nil(t, err, out) + + // We'll bring back the replica we took down. + restartTablet(t, tab3) + + // Check that old primary tablet is left around for human intervention. + confirmOldPrimaryIsHangingAround(t) + deleteTablet(t, tab1) + validateTopology(t, false) + + newPrimary := getNewPrimary(t) + // Check new primary has latest transaction. + err = checkInsertedValues(ctx, t, newPrimary, insertVal) + require.Nil(t, err) + + // bring back the old primary as a replica, check that it catches up + resurrectTablet(ctx, t, tab1) +} + +// TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary +func TestERSPromoteRdonly(t *testing.T) { + defer cluster.PanicHandler(t) + setupReparentCluster(t) + defer teardownCluster() + var err error + + err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tab2.Alias, "rdonly") + require.NoError(t, err) + + err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tab3.Alias, "rdonly") + require.NoError(t, err) + + confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) + + // Make the current primary agent and database unavailable. + stopTablet(t, tab1, true) + + // We expect this one to fail because we have ignored all the replicas and have only the rdonly's which should not be promoted + out, err := ersIgnoreTablet(nil, "30s", "30s", []*cluster.Vttablet{tab4}, false) + require.NotNil(t, err, out) + + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", keyspaceShard) + require.NoError(t, err) + require.Contains(t, out, `"uid": 101`, "the primary should still be 101 in the shard info") +} + +// TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set +func TestERSPreventCrossCellPromotion(t *testing.T) { + defer cluster.PanicHandler(t) + setupReparentCluster(t) + defer teardownCluster() + var err error + + // confirm that replication is going smoothly + confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) + + // Make the current primary agent and database unavailable. + stopTablet(t, tab1, true) + + // We expect that tab3 will be promoted since it is in the same cell as the previous primary + out, err := ersIgnoreTablet(nil, "60s", "30s", []*cluster.Vttablet{tab2}, true) + require.NoError(t, err, out) + + newPrimary := getNewPrimary(t) + require.Equal(t, newPrimary.Alias, tab3.Alias, "tab3 should be the promoted primary") +} + +// TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have +// caught up to it by pulling transactions from it +func TestPullFromRdonly(t *testing.T) { + defer cluster.PanicHandler(t) + setupReparentCluster(t) + defer teardownCluster() + var err error + + ctx := context.Background() + // make tab2 a rdonly tablet. + // rename tablet so that the test is not confusing + rdonly := tab2 + err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") + require.NoError(t, err) + + // confirm that all the tablets can replicate successfully right now + confirmReplication(t, tab1, []*cluster.Vttablet{rdonly, tab3, tab4}) + + // stop replication on the other two tablets + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tab3.Alias) + require.NoError(t, err) + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tab4.Alias) + require.NoError(t, err) + + // stop semi-sync on the primary so that any transaction now added does not require an ack + runSQL(ctx, t, "SET GLOBAL rpl_semi_sync_master_enabled = false", tab1) + + // confirm that rdonly is able to replicate from our primary + // This will also introduce a new transaction into the rdonly tablet which the other 2 replicas don't have + confirmReplication(t, tab1, []*cluster.Vttablet{rdonly}) + + // Make the current primary agent and database unavailable. + stopTablet(t, tab1, true) + + // start the replication back on the two tablets + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tab3.Alias) + require.NoError(t, err) + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tab4.Alias) + require.NoError(t, err) + + // check that tab3 and tab4 still only has 1 value + err = checkCountOfInsertedValues(ctx, t, tab3, 1) + require.NoError(t, err) + err = checkCountOfInsertedValues(ctx, t, tab4, 1) + require.NoError(t, err) + + // At this point we have successfully made our rdonly tablet more advanced than tab3 and tab4 without introducing errant GTIDs + // We have simulated a network partition in which the primary and rdonly got isolated and then the primary went down leaving the rdonly most advanced + + // We expect that tab3 will be promoted since it is in the same cell as the previous primary + // since we are preventing cross cell promotions + // Also it must be fully caught up + out, err := ersIgnoreTablet(nil, "60s", "30s", nil, true) + require.NoError(t, err, out) + + newPrimary := getNewPrimary(t) + require.Equal(t, newPrimary.Alias, tab3.Alias, "tab3 should be the promoted primary") + + // check that the new primary has the last transaction that only the rdonly had + err = checkInsertedValues(ctx, t, newPrimary, insertVal) + require.NoError(t, err) +} diff --git a/go/test/endtoend/reparent/emergencyreparent/utils_test.go b/go/test/endtoend/reparent/emergencyreparent/utils_test.go new file mode 100644 index 00000000000..eb3199fa2db --- /dev/null +++ b/go/test/endtoend/reparent/emergencyreparent/utils_test.go @@ -0,0 +1,442 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package emergencyreparent + +import ( + "context" + "fmt" + "os" + "os/exec" + "path" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + tmc "vitess.io/vitess/go/vt/vttablet/grpctmclient" +) + +var ( + // ClusterInstance instance to be used for test with different params + clusterInstance *cluster.LocalProcessCluster + tmClient *tmc.Client + keyspaceName = "ks" + dbName = "vt_" + keyspaceName + username = "vt_dba" + hostname = "localhost" + insertVal = 1 + insertSQL = "insert into vt_insert_test(id, msg) values (%d, 'test %d')" + sqlSchema = ` + create table vt_insert_test ( + id bigint, + msg varchar(64), + primary key (id) + ) Engine=InnoDB +` + cell1 = "zone1" + cell2 = "zone2" + shardName = "0" + keyspaceShard = keyspaceName + "/" + shardName + + tab1, tab2, tab3, tab4 *cluster.Vttablet +) + +func setupReparentCluster(t *testing.T) { + tablets := setupCluster(context.Background(), t, shardName, []string{cell1, cell2}, []int{3, 1}) + tab1, tab2, tab3, tab4 = tablets[0], tablets[1], tablets[2], tablets[3] +} + +func teardownCluster() { + clusterInstance.Teardown() +} + +func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int) []*cluster.Vttablet { + var tablets []*cluster.Vttablet + clusterInstance = cluster.NewCluster(cells[0], hostname) + keyspace := &cluster.Keyspace{Name: keyspaceName} + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + t.Fatalf("Error starting topo: %s", err.Error()) + } + err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+cells[0]) + if err != nil { + t.Fatalf("Error managing topo: %s", err.Error()) + } + numCell := 1 + for numCell < len(cells) { + err = clusterInstance.VtctlProcess.AddCellInfo(cells[numCell]) + if err != nil { + t.Fatalf("Error managing topo: %s", err.Error()) + } + numCell++ + } + + // Adding another cell in the same cluster + numCell = 0 + for numCell < len(cells) { + i := 0 + for i < numTablets[numCell] { + i++ + tablet := clusterInstance.NewVttabletInstance("replica", 100*(numCell+1)+i, cells[numCell]) + tablets = append(tablets, tablet) + } + numCell++ + } + + shard := &cluster.Shard{Name: shardName} + shard.Vttablets = tablets + + clusterInstance.VtTabletExtraArgs = []string{ + "-lock_tables_timeout", "5s", + "-enable_semi_sync", + "-init_populate_metadata", + "-track_schema_versions=true", + } + + // Initialize Cluster + err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard}) + if err != nil { + t.Fatalf("Cannot launch cluster: %s", err.Error()) + } + + //Start MySql + var mysqlCtlProcessList []*exec.Cmd + for _, shard := range clusterInstance.Keyspaces[0].Shards { + for _, tablet := range shard.Vttablets { + log.Infof("Starting MySql for tablet %v", tablet.Alias) + proc, err := tablet.MysqlctlProcess.StartProcess() + if err != nil { + t.Fatalf("Error starting start mysql: %s", err.Error()) + } + mysqlCtlProcessList = append(mysqlCtlProcessList, proc) + } + } + + // Wait for mysql processes to start + for _, proc := range mysqlCtlProcessList { + if err := proc.Wait(); err != nil { + t.Fatalf("Error starting mysql: %s", err.Error()) + } + } + + // create tablet manager client + tmClient = tmc.NewClient() + setupShard(ctx, t, shardName, tablets) + return tablets +} + +func setupShard(ctx context.Context, t *testing.T, shardName string, tablets []*cluster.Vttablet) { + for _, tablet := range tablets { + // create database + err := tablet.VttabletProcess.CreateDB(keyspaceName) + require.NoError(t, err) + // Start the tablet + err = tablet.VttabletProcess.Setup() + require.NoError(t, err) + } + + for _, tablet := range tablets { + err := tablet.VttabletProcess.WaitForTabletStatuses([]string{"SERVING", "NOT_SERVING"}) + require.NoError(t, err) + } + + // Force the replica to reparent assuming that all the datasets are identical. + err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardPrimary", + "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablets[0].Alias) + require.NoError(t, err) + + validateTopology(t, true) + + // create Tables + runSQL(ctx, t, sqlSchema, tablets[0]) + + checkPrimaryTablet(t, tablets[0]) + + validateTopology(t, false) + time.Sleep(100 * time.Millisecond) // wait for replication to catchup + strArray := getShardReplicationPositions(t, keyspaceName, shardName, true) + assert.Equal(t, len(tablets), len(strArray)) + assert.Contains(t, strArray[0], "primary") // primary first +} + +//endregion + +//region database queries +func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams { + connParams := mysql.ConnParams{ + Uname: username, + DbName: dbName, + UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.sock", tablet.TabletUID)), + } + return connParams +} + +func runSQL(ctx context.Context, t *testing.T, sql string, tablet *cluster.Vttablet) *sqltypes.Result { + tabletParams := getMysqlConnParam(tablet) + conn, err := mysql.Connect(ctx, &tabletParams) + require.Nil(t, err) + defer conn.Close() + return execute(t, conn, sql) +} + +func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.Nil(t, err) + return qr +} + +//endregion + +// region ers + +func ers(tab *cluster.Vttablet, totalTimeout, waitReplicasTimeout string) (string, error) { + return ersIgnoreTablet(tab, totalTimeout, waitReplicasTimeout, nil, false) +} + +func ersIgnoreTablet(tab *cluster.Vttablet, timeout, waitReplicasTimeout string, tabletsToIgnore []*cluster.Vttablet, preventCrossCellPromotion bool) (string, error) { + var args []string + if timeout != "" { + args = append(args, "-action_timeout", timeout) + } + args = append(args, "EmergencyReparentShard", "-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName)) + if tab != nil { + args = append(args, "-new_primary", tab.Alias) + } + if waitReplicasTimeout != "" { + args = append(args, "-wait_replicas_timeout", waitReplicasTimeout) + } + if preventCrossCellPromotion { + args = append(args, "-prevent_cross_cell_promotion=true") + } + if len(tabletsToIgnore) != 0 { + tabsString := "" + for _, vttablet := range tabletsToIgnore { + if tabsString == "" { + tabsString = vttablet.Alias + } else { + tabsString = tabsString + "," + vttablet.Alias + } + } + args = append(args, "-ignore_replicas", tabsString) + } + return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) +} + +func ersWithVtctl() (string, error) { + args := []string{"EmergencyReparentShard", "-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName)} + return clusterInstance.VtctlProcess.ExecuteCommandWithOutput(args...) +} + +// endregion + +// region validations + +func validateTopology(t *testing.T, pingTablets bool) { + args := []string{"Validate"} + + if pingTablets { + args = append(args, "-ping-tablets=true") + } + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + require.Empty(t, out) + require.NoError(t, err) +} + +func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet) { + ctx := context.Background() + insertVal++ + n := insertVal // unique value ... + // insert data into the new primary, check the connected replica work + insertSQL := fmt.Sprintf(insertSQL, n, n) + runSQL(ctx, t, insertSQL, primary) + time.Sleep(100 * time.Millisecond) + for _, tab := range replicas { + err := checkInsertedValues(ctx, t, tab, n) + require.NoError(t, err) + } +} + +func confirmOldPrimaryIsHangingAround(t *testing.T) { + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + require.Error(t, err) + require.Contains(t, out, "already has primary") +} + +// Makes sure the tablet type is primary, and its health check agrees. +func checkPrimaryTablet(t *testing.T, tablet *cluster.Vttablet) { + result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) + require.NoError(t, err) + var tabletInfo topodatapb.Tablet + err = json2.Unmarshal([]byte(result), &tabletInfo) + require.NoError(t, err) + assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletInfo.GetType()) + + // make sure the health stream is updated + result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias) + require.NoError(t, err) + var streamHealthResponse querypb.StreamHealthResponse + + err = json2.Unmarshal([]byte(result), &streamHealthResponse) + require.NoError(t, err) + + assert.True(t, streamHealthResponse.GetServing()) + tabletType := streamHealthResponse.GetTarget().GetTabletType() + assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletType) +} + +// isHealthyPrimaryTablet will return if tablet is primary AND healthy. +func isHealthyPrimaryTablet(t *testing.T, tablet *cluster.Vttablet) bool { + result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) + require.Nil(t, err) + var tabletInfo topodatapb.Tablet + err = json2.Unmarshal([]byte(result), &tabletInfo) + require.Nil(t, err) + if tabletInfo.GetType() != topodatapb.TabletType_PRIMARY { + return false + } + + // make sure the health stream is updated + result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias) + require.Nil(t, err) + var streamHealthResponse querypb.StreamHealthResponse + + err = json2.Unmarshal([]byte(result), &streamHealthResponse) + require.Nil(t, err) + + assert.True(t, streamHealthResponse.GetServing()) + tabletType := streamHealthResponse.GetTarget().GetTabletType() + return tabletType == topodatapb.TabletType_PRIMARY +} + +func checkInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, index int) error { + // wait until it gets the data + timeout := time.Now().Add(15 * time.Second) + i := 0 + for time.Now().Before(timeout) { + selectSQL := fmt.Sprintf("select msg from vt_insert_test where id=%d", index) + qr := runSQL(ctx, t, selectSQL, tablet) + if len(qr.Rows) == 1 { + return nil + } + t := time.Duration(300 * i) + time.Sleep(t * time.Millisecond) + i++ + } + return fmt.Errorf("data is not yet replicated on tablet %s", tablet.Alias) +} + +func checkCountOfInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, count int) error { + selectSQL := "select * from vt_insert_test" + qr := runSQL(ctx, t, selectSQL, tablet) + if len(qr.Rows) == count { + return nil + } + return fmt.Errorf("count does not match on the tablet %s", tablet.Alias) +} + +// endregion + +// region tablet operations + +func stopTablet(t *testing.T, tab *cluster.Vttablet, stopDatabase bool) { + err := tab.VttabletProcess.TearDownWithTimeout(30 * time.Second) + require.NoError(t, err) + if stopDatabase { + err = tab.MysqlctlProcess.Stop() + require.NoError(t, err) + } +} + +func restartTablet(t *testing.T, tab *cluster.Vttablet) { + tab.MysqlctlProcess.InitMysql = false + err := tab.MysqlctlProcess.Start() + require.NoError(t, err) + err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, keyspaceName, hostname, shardName) + require.NoError(t, err) +} + +func resurrectTablet(ctx context.Context, t *testing.T, tab *cluster.Vttablet) { + tab.MysqlctlProcess.InitMysql = false + err := tab.MysqlctlProcess.Start() + require.NoError(t, err) + err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, keyspaceName, hostname, shardName) + require.NoError(t, err) + + // As there is already a primary the new replica will come directly in SERVING state + tab1.VttabletProcess.ServingStatus = "SERVING" + // Start the tablet + err = tab.VttabletProcess.Setup() + require.NoError(t, err) + + err = checkInsertedValues(ctx, t, tab, insertVal) + require.NoError(t, err) +} + +func deleteTablet(t *testing.T, tab *cluster.Vttablet) { + err := clusterInstance.VtctlclientProcess.ExecuteCommand( + "DeleteTablet", + "-allow_primary", + tab.Alias) + require.NoError(t, err) +} + +// endregion + +// region get info + +func getNewPrimary(t *testing.T) *cluster.Vttablet { + var newPrimary *cluster.Vttablet + for _, tablet := range []*cluster.Vttablet{tab2, tab3, tab4} { + if isHealthyPrimaryTablet(t, tablet) { + newPrimary = tablet + break + } + } + require.NotNil(t, newPrimary) + return newPrimary +} + +func getShardReplicationPositions(t *testing.T, keyspaceName, shardName string, doPrint bool) []string { + output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + "ShardReplicationPositions", fmt.Sprintf("%s/%s", keyspaceName, shardName)) + require.NoError(t, err) + strArray := strings.Split(output, "\n") + if strArray[len(strArray)-1] == "" { + strArray = strArray[:len(strArray)-1] // Truncate slice, remove empty line + } + if doPrint { + log.Infof("Positions:") + for _, pos := range strArray { + log.Infof("\t%s", pos) + } + } + return strArray +} + +// endregion diff --git a/go/test/endtoend/reparent/reparent_range_based_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go similarity index 98% rename from go/test/endtoend/reparent/reparent_range_based_test.go rename to go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go index eb9a23b1c13..b3911de21ba 100644 --- a/go/test/endtoend/reparent/reparent_range_based_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparent +package plannedreparent import ( "context" diff --git a/go/test/endtoend/reparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go similarity index 89% rename from go/test/endtoend/reparent/reparent_test.go rename to go/test/endtoend/reparent/plannedreparent/reparent_test.go index bd178d0587d..f16695fae93 100644 --- a/go/test/endtoend/reparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparent +package plannedreparent import ( "context" @@ -118,64 +118,6 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { resurrectTablet(ctx, t, tab1) } -func TestTrivialERS(t *testing.T) { - defer cluster.PanicHandler(t) - setupReparentCluster(t) - defer teardownCluster() - - confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) - - // We should be able to do a series of ERS-es, even if nothing - // is down, without issue - for i := 1; i <= 4; i++ { - out, err := ers(t, nil, "30s") - log.Infof("ERS loop %d. EmergencyReparentShard Output: %v", i, out) - require.NoError(t, err) - time.Sleep(5 * time.Second) - } -} - -func TestReparentIgnoreReplicas(t *testing.T) { - defer cluster.PanicHandler(t) - setupReparentCluster(t) - defer teardownCluster() - var err error - - ctx := context.Background() - - confirmReplication(t, tab1, []*cluster.Vttablet{tab2, tab3, tab4}) - - // Make the current primary agent and database unavailable. - stopTablet(t, tab1, true) - - // Take down a replica - this should cause the emergency reparent to fail. - stopTablet(t, tab3, true) - - // We expect this one to fail because we have an unreachable replica - out, err := ers(t, nil, "30s") - require.NotNil(t, err, out) - - // Now let's run it again, but set the command to ignore the unreachable replica. - out, err = ersIgnoreTablet(t, nil, "30s", tab3) - require.Nil(t, err, out) - - // We'll bring back the replica we took down. - restartTablet(t, tab3) - - // Check that old primary tablet is left around for human intervention. - confirmOldPrimaryIsHangingAround(t) - deleteTablet(t, tab1) - validateTopology(t, false) - - newPrimary := getNewPrimary(t) - // Check new primary has latest transaction. - err = checkInsertedValues(ctx, t, newPrimary, insertVal) - require.Nil(t, err) - - // bring back the old primary as a replica, check that it catches up - resurrectTablet(ctx, t, tab1) -} - func TestReparentCrossCell(t *testing.T) { defer cluster.PanicHandler(t) setupReparentCluster(t) diff --git a/go/test/endtoend/reparent/utils_test.go b/go/test/endtoend/reparent/plannedreparent/utils_test.go similarity index 97% rename from go/test/endtoend/reparent/utils_test.go rename to go/test/endtoend/reparent/plannedreparent/utils_test.go index 3146491a90a..e9314b9cfd9 100644 --- a/go/test/endtoend/reparent/utils_test.go +++ b/go/test/endtoend/reparent/plannedreparent/utils_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparent +package plannedreparent import ( "context" @@ -29,14 +29,13 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -250,7 +249,7 @@ func ers(t *testing.T, tab *cluster.Vttablet, timeout string) (string, error) { return ersIgnoreTablet(t, tab, timeout, nil) } -func ersIgnoreTablet(t *testing.T, tab *cluster.Vttablet, timeout string, tabToIgnore *cluster.Vttablet) (string, error) { +func ersIgnoreTablet(t *testing.T, tab *cluster.Vttablet, timeout string, tabletsToIgnore []*cluster.Vttablet) (string, error) { args := []string{"EmergencyReparentShard", "-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName)} if tab != nil { args = append(args, "-new_primary", tab.Alias) @@ -258,8 +257,16 @@ func ersIgnoreTablet(t *testing.T, tab *cluster.Vttablet, timeout string, tabToI if timeout != "" { args = append(args, "-wait_replicas_timeout", "30s") } - if tabToIgnore != nil { - args = append(args, "-ignore_replicas", tabToIgnore.Alias) + if len(tabletsToIgnore) != 0 { + tabsString := "" + for _, vttablet := range tabletsToIgnore { + if tabsString == "" { + tabsString = vttablet.Alias + } else { + tabsString = tabsString + "," + vttablet.Alias + } + } + args = append(args, "-ignore_replicas", tabsString) } return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) } @@ -471,14 +478,6 @@ func stopTablet(t *testing.T, tab *cluster.Vttablet, stopDatabase bool) { } } -func restartTablet(t *testing.T, tab *cluster.Vttablet) { - tab.MysqlctlProcess.InitMysql = false - err := tab.MysqlctlProcess.Start() - require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, keyspaceName, hostname, shardName) - require.NoError(t, err) -} - func resurrectTablet(ctx context.Context, t *testing.T, tab *cluster.Vttablet) { tab.MysqlctlProcess.InitMysql = false err := tab.MysqlctlProcess.Start() diff --git a/go/test/endtoend/vtorc/main_test.go b/go/test/endtoend/vtorc/main_test.go index 2b4dd740e8f..3612dec89b6 100644 --- a/go/test/endtoend/vtorc/main_test.go +++ b/go/test/endtoend/vtorc/main_test.go @@ -324,13 +324,13 @@ func TestMain(m *testing.M) { // setup cellInfos before creating the cluster cellInfos = append(cellInfos, &cellInfo{ cellName: cell1, - numReplicas: 10, - numRdonly: 1, + numReplicas: 12, + numRdonly: 2, uidBase: 100, }) cellInfos = append(cellInfos, &cellInfo{ cellName: cell2, - numReplicas: 1, + numReplicas: 2, numRdonly: 0, uidBase: 200, }) @@ -345,6 +345,13 @@ func TestMain(m *testing.M) { }() cluster.PanicHandler(nil) + + // stop vtorc first otherwise its logs get polluted + // with instances being unreachable triggering unnecessary operations + if clusterInstance.VtorcProcess != nil { + _ = clusterInstance.VtorcProcess.TearDown() + } + for _, cellInfo := range cellInfos { killTablets(cellInfo.replicaTablets) killTablets(cellInfo.rdonlyTablets) @@ -387,7 +394,7 @@ func shardPrimaryTablet(t *testing.T, cluster *cluster.LocalProcessCluster, keys } // Makes sure the tablet type is primary, and its health check agrees. -func checkPrimaryTablet(t *testing.T, cluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet) { +func checkPrimaryTablet(t *testing.T, cluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, checkServing bool) { start := time.Now() for { now := time.Now() @@ -405,24 +412,26 @@ func checkPrimaryTablet(t *testing.T, cluster *cluster.LocalProcessCluster, tabl log.Warningf("Tablet %v is not primary yet, sleep for 1 second\n", tablet.Alias) time.Sleep(time.Second) continue - } else { - // allow time for tablet state to be updated after topo is updated - time.Sleep(2 * time.Second) - // make sure the health stream is updated - result, err = cluster.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias) - require.NoError(t, err) - var streamHealthResponse querypb.StreamHealthResponse - - err = json2.Unmarshal([]byte(result), &streamHealthResponse) - require.NoError(t, err) - //if !streamHealthResponse.GetServing() { - // log.Exitf("stream health not updated") - //} - assert.True(t, streamHealthResponse.GetServing(), "stream health: %v", &streamHealthResponse) - tabletType := streamHealthResponse.GetTarget().GetTabletType() - require.Equal(t, topodatapb.TabletType_PRIMARY, tabletType) - break } + // make sure the health stream is updated + result, err = cluster.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias) + require.NoError(t, err) + var streamHealthResponse querypb.StreamHealthResponse + + err = json2.Unmarshal([]byte(result), &streamHealthResponse) + require.NoError(t, err) + if checkServing && !streamHealthResponse.GetServing() { + log.Warningf("Tablet %v is not serving in health stream yet, sleep for 1 second\n", tablet.Alias) + time.Sleep(time.Second) + continue + } + tabletType := streamHealthResponse.GetTarget().GetTabletType() + if tabletType != topodatapb.TabletType_PRIMARY { + log.Warningf("Tablet %v is not primary in health stream yet, sleep for 1 second\n", tablet.Alias) + time.Sleep(time.Second) + continue + } + break } } @@ -459,7 +468,7 @@ func checkReplication(t *testing.T, clusterInstance *cluster.LocalProcessCluster // call this function only after check replication. // it inserts more data into the table vt_insert_test and checks that it is replicated too -func runAdditionalCommands(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration) { +func verifyWritesSucceed(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration) { confirmReplication(t, primary, replicas, timeToWait, lastUsedValue) lastUsedValue++ } @@ -664,7 +673,7 @@ func permanentlyRemoveVttablet(tablet *cluster.Vttablet) { } func changePrivileges(t *testing.T, sql string, tablet *cluster.Vttablet, user string) { - _, err := runSQL(t, sql, tablet, "") + _, err := runSQL(t, "SET sql_log_bin = OFF;"+sql+";SET sql_log_bin = ON;", tablet, "") require.NoError(t, err) res, err := runSQL(t, fmt.Sprintf("SELECT id FROM INFORMATION_SCHEMA.PROCESSLIST WHERE user = '%s'", user), tablet, "") @@ -676,3 +685,17 @@ func changePrivileges(t *testing.T, sql string, tablet *cluster.Vttablet, user s require.NoError(t, err) } } + +func resetPrimaryLogs(t *testing.T, curPrimary *cluster.Vttablet) { + _, err := runSQL(t, "FLUSH BINARY LOGS", curPrimary, "") + require.NoError(t, err) + + binLogsOutput, err := runSQL(t, "SHOW BINARY LOGS", curPrimary, "") + require.NoError(t, err) + require.True(t, len(binLogsOutput.Rows) >= 2, "there should be atlease 2 binlog files") + + lastLogFile := binLogsOutput.Rows[len(binLogsOutput.Rows)-1][0].ToString() + + _, err = runSQL(t, "PURGE BINARY LOGS TO '"+lastLogFile+"'", curPrimary, "") + require.NoError(t, err) +} diff --git a/go/test/endtoend/vtorc/primary_failure_test.go b/go/test/endtoend/vtorc/primary_failure_test.go index d95274da68c..08605d612ac 100644 --- a/go/test/endtoend/vtorc/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primary_failure_test.go @@ -30,13 +30,30 @@ import ( // covers the test case master-failover from orchestrator func TestDownPrimary(t *testing.T) { defer cluster.PanicHandler(t) - setupVttabletsAndVtorc(t, 2, 0, nil, "test_config.json") + setupVttabletsAndVtorc(t, 2, 1, nil, "test_config.json") keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) assert.NotNil(t, curPrimary, "should have elected a primary") + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{rdonly, replica}, 10*time.Second) + // Make the current primary database unavailable. err := curPrimary.MysqlctlProcess.Stop() require.NoError(t, err) @@ -45,13 +62,10 @@ func TestDownPrimary(t *testing.T) { permanentlyRemoveVttablet(curPrimary) }() - for _, tablet := range shard0.Vttablets { - // we know we have only two tablets, so the "other" one must be the new primary - if tablet.Alias != curPrimary.Alias { - checkPrimaryTablet(t, clusterInstance, tablet) - break - } - } + // check that the replica gets promoted + checkPrimaryTablet(t, clusterInstance, replica, true) + // also check that the replication is working correctly after failover + verifyWritesSucceed(t, replica, []*cluster.Vttablet{rdonly}, 10*time.Second) } // Failover should not be cross data centers, according to the configuration file @@ -65,18 +79,23 @@ func TestCrossDataCenterFailure(t *testing.T) { curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) assert.NotNil(t, curPrimary, "should have elected a primary") - var replicaInSameCell *cluster.Vttablet + // find the replica and rdonly tablets + var replicaInSameCell, rdonly *cluster.Vttablet for _, tablet := range shard0.Vttablets { // we know we have only two replcia tablets, so the one not the primary must be the other replica if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { replicaInSameCell = tablet - break + } + if tablet.Type == "rdonly" { + rdonly = tablet } } + assert.NotNil(t, replicaInSameCell, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") crossCellReplica := startVttablet(t, cell2, false) // newly started tablet does not replicate from anyone yet, we will allow orchestrator to fix this too - checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica, replicaInSameCell}, 25*time.Second) + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica, replicaInSameCell, rdonly}, 25*time.Second) // Make the current primary database unavailable. err := curPrimary.MysqlctlProcess.Stop() @@ -87,47 +106,109 @@ func TestCrossDataCenterFailure(t *testing.T) { }() // we have a replica in the same cell, so that is the one which should be promoted and not the one from another cell - checkPrimaryTablet(t, clusterInstance, replicaInSameCell) + checkPrimaryTablet(t, clusterInstance, replicaInSameCell, true) + // also check that the replication is working correctly after failover + verifyWritesSucceed(t, replicaInSameCell, []*cluster.Vttablet{crossCellReplica, rdonly}, 10*time.Second) +} + +// Failover should not be cross data centers, according to the configuration file +// In case of no viable candidates, we should error out +func TestCrossDataCenterFailureError(t *testing.T) { + defer cluster.PanicHandler(t) + setupVttabletsAndVtorc(t, 1, 1, nil, "test_config.json") + keyspace := &clusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + // find primary from topo + curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + + // find the rdonly tablet + var rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + crossCellReplica1 := startVttablet(t, cell2, false) + crossCellReplica2 := startVttablet(t, cell2, false) + // newly started tablet does not replicate from anyone yet, we will allow orchestrator to fix this too + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica1, crossCellReplica2, rdonly}, 25*time.Second) + + // Make the current primary database unavailable. + err := curPrimary.MysqlctlProcess.Stop() + require.NoError(t, err) + defer func() { + // we remove the tablet from our global list since its mysqlctl process has stopped and cannot be reused for other tests + permanentlyRemoveVttablet(curPrimary) + }() + + // wait for 20 seconds + time.Sleep(20 * time.Second) + + // the previous primary should still be the primary since recovery of dead primary should fail + checkPrimaryTablet(t, clusterInstance, curPrimary, false) } -// Failover will sometimes lead to a replica which can no longer replicate. +// Failover will sometimes lead to a rdonly which can no longer replicate. // covers part of the test case master-failover-lost-replicas from orchestrator -func TestLostReplicasOnPrimaryFailure(t *testing.T) { +func TestLostRdonlyOnPrimaryFailure(t *testing.T) { + // new version of ERS does not check for lost replicas yet + // Earlier any replicas that were not able to replicate from the previous primary + // were detected by vtorc and could be configured to have their sources detached + t.Skip() defer cluster.PanicHandler(t) - setupVttabletsAndVtorc(t, 2, 1, nil, "test_config.json") + setupVttabletsAndVtorc(t, 2, 2, nil, "test_config.json") keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) assert.NotNil(t, curPrimary, "should have elected a primary") - // get the replicas - var replica, rdonly *cluster.Vttablet + // get the tablets + var replica, rdonly, aheadRdonly *cluster.Vttablet for _, tablet := range shard0.Vttablets { // find tablets which are not the primary if tablet.Alias != curPrimary.Alias { if tablet.Type == "replica" { replica = tablet } else { - rdonly = tablet + if rdonly == nil { + rdonly = tablet + } else { + aheadRdonly = tablet + } } } } assert.NotNil(t, replica, "could not find replica tablet") - assert.NotNil(t, rdonly, "could not find rdonly tablet") + assert.NotNil(t, rdonly, "could not find any rdonly tablet") + assert.NotNil(t, aheadRdonly, "could not find both rdonly tablet") // check that replication is setup correctly - checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{rdonly, replica}, 15*time.Second) + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{rdonly, aheadRdonly, replica}, 15*time.Second) - // revoke super privileges from vtorc on replica so that it is unable to repair the replication + // revoke super privileges from vtorc on replica and rdonly so that it is unable to repair the replication changePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, replica, "orc_client_user") + changePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, rdonly, "orc_client_user") - // stop replication on the replica. + // stop replication on the replica and rdonly. err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly.Alias) + require.NoError(t, err) - // check that rdonly is able to replicate. We also want to add some queries to rdonly which will not be there in replica - runAdditionalCommands(t, curPrimary, []*cluster.Vttablet{rdonly}, 15*time.Second) + // check that aheadRdonly is able to replicate. We also want to add some queries to aheadRdonly which will not be there in replica and rdonly + verifyWritesSucceed(t, curPrimary, []*cluster.Vttablet{aheadRdonly}, 15*time.Second) + + // assert that the replica and rdonly are indeed lagging and do not have the new insertion by checking the count of rows in the tables + out, err := runSQL(t, "SELECT * FROM vt_insert_test", replica, "vt_ks") + require.NoError(t, err) + require.Equal(t, 1, len(out.Rows)) + out, err = runSQL(t, "SELECT * FROM vt_insert_test", rdonly, "vt_ks") + require.NoError(t, err) + require.Equal(t, 1, len(out.Rows)) // Make the current primary database unavailable. err = curPrimary.MysqlctlProcess.Stop() @@ -137,14 +218,18 @@ func TestLostReplicasOnPrimaryFailure(t *testing.T) { permanentlyRemoveVttablet(curPrimary) }() - // grant super privileges back to vtorc on replica so that it can repair + // grant super privileges back to vtorc on replica and rdonly so that it can repair changePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, replica, "orc_client_user") + changePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, rdonly, "orc_client_user") // vtorc must promote the lagging replica and not the rdonly, since it has a MustNotPromoteRule promotion rule - checkPrimaryTablet(t, clusterInstance, replica) + checkPrimaryTablet(t, clusterInstance, replica, true) - // check that the rdonly replica is lost. The lost replica has is detached and its host is prepended with `//` - out, err := runSQL(t, "SELECT HOST FROM performance_schema.replication_connection_configuration", rdonly, "") + // also check that the replication is setup correctly + verifyWritesSucceed(t, replica, []*cluster.Vttablet{rdonly}, 15*time.Second) + + // check that the rdonly is lost. The lost replica has is detached and its host is prepended with `//` + out, err = runSQL(t, "SELECT HOST FROM performance_schema.replication_connection_configuration", aheadRdonly, "") require.NoError(t, err) require.Equal(t, "//localhost", out.Rows[0][0].ToString()) } @@ -153,13 +238,30 @@ func TestLostReplicasOnPrimaryFailure(t *testing.T) { // covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator func TestPromotionLagSuccess(t *testing.T) { defer cluster.PanicHandler(t) - setupVttabletsAndVtorc(t, 2, 0, nil, "test_config_promotion_success.json") + setupVttabletsAndVtorc(t, 2, 1, nil, "test_config_promotion_success.json") keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) assert.NotNil(t, curPrimary, "should have elected a primary") + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{rdonly, replica}, 10*time.Second) + // Make the current primary database unavailable. err := curPrimary.MysqlctlProcess.Stop() require.NoError(t, err) @@ -168,28 +270,49 @@ func TestPromotionLagSuccess(t *testing.T) { permanentlyRemoveVttablet(curPrimary) }() - for _, tablet := range shard0.Vttablets { - // we know we have only two tablets, so the "other" one must be the new primary - if tablet.Alias != curPrimary.Alias { - checkPrimaryTablet(t, clusterInstance, tablet) - break - } - } + // check that the replica gets promoted + checkPrimaryTablet(t, clusterInstance, replica, true) + // also check that the replication is working correctly after failover + verifyWritesSucceed(t, replica, []*cluster.Vttablet{rdonly}, 10*time.Second) } // This test checks that the promotion of a tablet succeeds if it passes the promotion lag test // covers the test case master-failover-fail-promotion-lag-minutes-failure from orchestrator func TestPromotionLagFailure(t *testing.T) { - // skip the test since it fails now + // new version of ERS does not check for promotion lag yet + // Earlier vtorc used to check that the promotion lag between the new primary and the old one + // was smaller than the configured value, otherwise it would fail the promotion t.Skip() defer cluster.PanicHandler(t) - setupVttabletsAndVtorc(t, 2, 0, nil, "test_config_promotion_failure.json") + setupVttabletsAndVtorc(t, 3, 1, nil, "test_config_promotion_failure.json") keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) assert.NotNil(t, curPrimary, "should have elected a primary") + // find the replica and rdonly tablets + var replica1, replica2, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + if replica1 == nil { + replica1 = tablet + } else { + replica2 = tablet + } + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica1, "could not find replica tablet") + assert.NotNil(t, replica2, "could not find second replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{rdonly, replica1, replica2}, 10*time.Second) + // Make the current primary database unavailable. err := curPrimary.MysqlctlProcess.Stop() require.NoError(t, err) @@ -202,7 +325,7 @@ func TestPromotionLagFailure(t *testing.T) { time.Sleep(20 * time.Second) // the previous primary should still be the primary since recovery of dead primary should fail - checkPrimaryTablet(t, clusterInstance, curPrimary) + checkPrimaryTablet(t, clusterInstance, curPrimary, false) } // covers the test case master-failover-candidate from orchestrator @@ -217,9 +340,23 @@ func TestDownPrimaryPromotionRule(t *testing.T) { curPrimary := shardPrimaryTablet(t, clusterInstance, keyspace, shard0) assert.NotNil(t, curPrimary, "should have elected a primary") + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + crossCellReplica := startVttablet(t, cell2, false) // newly started tablet does not replicate from anyone yet, we will allow orchestrator to fix this too - checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica}, 25*time.Second) + checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica, rdonly, replica}, 25*time.Second) // Make the current primary database unavailable. err := curPrimary.MysqlctlProcess.Stop() @@ -229,8 +366,10 @@ func TestDownPrimaryPromotionRule(t *testing.T) { permanentlyRemoveVttablet(curPrimary) }() - // we have a replica in the same cell, so that is the one which should be promoted and not the one from another cell - checkPrimaryTablet(t, clusterInstance, crossCellReplica) + // we have a replica with a preferred promotion rule, so that is the one which should be promoted + checkPrimaryTablet(t, clusterInstance, crossCellReplica, true) + // also check that the replication is working correctly after failover + verifyWritesSucceed(t, crossCellReplica, []*cluster.Vttablet{rdonly, replica}, 10*time.Second) } // covers the test case master-failover-candidate-lag from orchestrator @@ -265,20 +404,25 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // newly started tablet does not replicate from anyone yet, we will allow orchestrator to fix this too checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica, replica, rdonly}, 25*time.Second) - // make the crossCellReplica lag by setting the source_delay to 20 seconds - runSQL(t, "STOP SLAVE", crossCellReplica, "") - runSQL(t, "CHANGE MASTER TO MASTER_DELAY = 20", crossCellReplica, "") - runSQL(t, "START SLAVE", crossCellReplica, "") + // revoke super privileges from vtorc on crossCellReplica so that it is unable to repair the replication + changePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, crossCellReplica, "orc_client_user") - defer func() { - // fix the crossCell replica back so that no other tests see this as a side effect - runSQL(t, "STOP SLAVE", crossCellReplica, "") - runSQL(t, "CHANGE MASTER TO MASTER_DELAY = 0", crossCellReplica, "") - runSQL(t, "START SLAVE", crossCellReplica, "") - }() + // stop replication on the crossCellReplica. + err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) + require.NoError(t, err) // check that rdonly and replica are able to replicate. We also want to add some queries to replica which will not be there in crossCellReplica - runAdditionalCommands(t, curPrimary, []*cluster.Vttablet{replica, rdonly}, 15*time.Second) + verifyWritesSucceed(t, curPrimary, []*cluster.Vttablet{replica, rdonly}, 15*time.Second) + + // reset the primary logs so that crossCellReplica can never catch up + resetPrimaryLogs(t, curPrimary) + + // start replication back on the crossCellReplica. + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) + require.NoError(t, err) + + // grant super privileges back to vtorc on crossCellReplica so that it can repair + changePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, crossCellReplica, "orc_client_user") // assert that the crossCellReplica is indeed lagging and does not have the new insertion by checking the count of rows in the table out, err := runSQL(t, "SELECT * FROM vt_insert_test", crossCellReplica, "vt_ks") @@ -294,10 +438,15 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { }() // the crossCellReplica is set to be preferred according to the durability requirements. So it must be promoted - checkPrimaryTablet(t, clusterInstance, crossCellReplica) + checkPrimaryTablet(t, clusterInstance, crossCellReplica, true) + + // assert that the crossCellReplica has indeed caught up + out, err = runSQL(t, "SELECT * FROM vt_insert_test", crossCellReplica, "vt_ks") + require.NoError(t, err) + require.Equal(t, 2, len(out.Rows)) // check that rdonly and replica are able to replicate from the crossCellReplica - runAdditionalCommands(t, crossCellReplica, []*cluster.Vttablet{replica, rdonly}, 15*time.Second) + verifyWritesSucceed(t, crossCellReplica, []*cluster.Vttablet{replica, rdonly}, 15*time.Second) } // covers the test case master-failover-candidate-lag-cross-datacenter from orchestrator @@ -332,20 +481,25 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { // newly started tablet does not replicate from anyone yet, we will allow orchestrator to fix this too checkReplication(t, clusterInstance, curPrimary, []*cluster.Vttablet{crossCellReplica, replica, rdonly}, 25*time.Second) - // make the replica lag by setting the source_delay to 20 seconds - runSQL(t, "STOP SLAVE", replica, "") - runSQL(t, "CHANGE MASTER TO MASTER_DELAY = 20", replica, "") - runSQL(t, "START SLAVE", replica, "") + // revoke super privileges from vtorc on replica so that it is unable to repair the replication + changePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, replica, "orc_client_user") - defer func() { - // fix the replica back so that no other tests see this as a side effect - runSQL(t, "STOP SLAVE", replica, "") - runSQL(t, "CHANGE MASTER TO MASTER_DELAY = 0", replica, "") - runSQL(t, "START SLAVE", replica, "") - }() + // stop replication on the replica. + err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + require.NoError(t, err) // check that rdonly and crossCellReplica are able to replicate. We also want to add some queries to crossCenterReplica which will not be there in replica - runAdditionalCommands(t, curPrimary, []*cluster.Vttablet{rdonly, crossCellReplica}, 15*time.Second) + verifyWritesSucceed(t, curPrimary, []*cluster.Vttablet{rdonly, crossCellReplica}, 15*time.Second) + + // reset the primary logs so that crossCellReplica can never catch up + resetPrimaryLogs(t, curPrimary) + + // start replication back on the replica. + err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replica.Alias) + require.NoError(t, err) + + // grant super privileges back to vtorc on replica so that it can repair + changePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, replica, "orc_client_user") // assert that the replica is indeed lagging and does not have the new insertion by checking the count of rows in the table out, err := runSQL(t, "SELECT * FROM vt_insert_test", replica, "vt_ks") @@ -361,8 +515,13 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { }() // the replica should be promoted since we have prevented cross cell promotions - checkPrimaryTablet(t, clusterInstance, replica) + checkPrimaryTablet(t, clusterInstance, replica, true) + + // assert that the replica has indeed caught up + out, err = runSQL(t, "SELECT * FROM vt_insert_test", replica, "vt_ks") + require.NoError(t, err) + require.Equal(t, 2, len(out.Rows)) // check that rdonly and crossCellReplica are able to replicate from the replica - runAdditionalCommands(t, replica, []*cluster.Vttablet{crossCellReplica, rdonly}, 15*time.Second) + verifyWritesSucceed(t, replica, []*cluster.Vttablet{crossCellReplica, rdonly}, 15*time.Second) } diff --git a/go/test/endtoend/vtorc/test_config_crosscenter_prefer.json b/go/test/endtoend/vtorc/test_config_crosscenter_prefer.json index 7afd417ffcb..35ddf1b2184 100644 --- a/go/test/endtoend/vtorc/test_config_crosscenter_prefer.json +++ b/go/test/endtoend/vtorc/test_config_crosscenter_prefer.json @@ -6,6 +6,7 @@ "MySQLReplicaPassword": "", "RecoveryPeriodBlockSeconds": 1, "InstancePollSeconds": 1, + "LockShardTimeoutSeconds": 5, "Durability": "specified", "DurabilityParams": { "zone2-0000000200": "prefer" diff --git a/go/test/endtoend/vtorc/test_config_crosscenter_prefer_prevent.json b/go/test/endtoend/vtorc/test_config_crosscenter_prefer_prevent.json index b2232a99ce7..27885da6fc6 100644 --- a/go/test/endtoend/vtorc/test_config_crosscenter_prefer_prevent.json +++ b/go/test/endtoend/vtorc/test_config_crosscenter_prefer_prevent.json @@ -6,6 +6,7 @@ "MySQLReplicaPassword": "", "RecoveryPeriodBlockSeconds": 1, "InstancePollSeconds": 1, + "LockShardTimeoutSeconds": 5, "Durability": "specified", "DurabilityParams": { "zone2-0000000200": "prefer" diff --git a/go/test/endtoend/vtorc/vtorc_test.go b/go/test/endtoend/vtorc/vtorc_test.go index 01d4cea5764..e3009f67e61 100644 --- a/go/test/endtoend/vtorc/vtorc_test.go +++ b/go/test/endtoend/vtorc/vtorc_test.go @@ -42,7 +42,7 @@ func TestPrimaryElection(t *testing.T) { keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] - checkPrimaryTablet(t, clusterInstance, shard0.Vttablets[0]) + checkPrimaryTablet(t, clusterInstance, shard0.Vttablets[0], true) checkReplication(t, clusterInstance, shard0.Vttablets[0], shard0.Vttablets[1:], 10*time.Second) } @@ -56,7 +56,7 @@ func TestSingleKeyspace(t *testing.T) { keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] - checkPrimaryTablet(t, clusterInstance, shard0.Vttablets[0]) + checkPrimaryTablet(t, clusterInstance, shard0.Vttablets[0], true) checkReplication(t, clusterInstance, shard0.Vttablets[0], shard0.Vttablets[1:], 10*time.Second) } @@ -70,7 +70,7 @@ func TestKeyspaceShard(t *testing.T) { keyspace := &clusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] - checkPrimaryTablet(t, clusterInstance, shard0.Vttablets[0]) + checkPrimaryTablet(t, clusterInstance, shard0.Vttablets[0], true) checkReplication(t, clusterInstance, shard0.Vttablets[0], shard0.Vttablets[1:], 10*time.Second) } diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go index 7f5701c1adf..95333b97494 100644 --- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go @@ -109,16 +109,16 @@ type FakeMysqlDaemon struct { // StartReplicationUntilAfterPos is matched against the input StartReplicationUntilAfterPos mysql.Position - // SetReplicationSourceInput is matched against the input of SetReplicationSource - // (as "%v:%v"). If it doesn't match, SetReplicationSource will return an error. - SetReplicationSourceInput string + // SetReplicationSourceInputs are matched against the input of SetReplicationSource + // (as "%v:%v"). If all of them don't match, SetReplicationSource will return an error. + SetReplicationSourceInputs []string // SetReplicationSourceError is used by SetReplicationSource SetReplicationSourceError error - // WaitPrimaryPosition is checked by WaitSourcePos, if the - // same it returns nil, if different it returns an error - WaitPrimaryPosition mysql.Position + // WaitPrimaryPositions is checked by WaitSourcePos, if the value is found + // in it, then the function returns nil, else the function returns an error + WaitPrimaryPositions []mysql.Position // PromoteResult is returned by Promote PromoteResult mysql.Position @@ -376,8 +376,14 @@ func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos mysq // SetReplicationSource is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host string, port int, stopReplicationBefore bool, startReplicationAfter bool) error { input := fmt.Sprintf("%v:%v", host, port) - if fmd.SetReplicationSourceInput != input { - return fmt.Errorf("wrong input for SetReplicationSourceCommands: expected %v got %v", fmd.SetReplicationSourceInput, input) + found := false + for _, sourceInput := range fmd.SetReplicationSourceInputs { + if sourceInput == input { + found = true + } + } + if !found { + return fmt.Errorf("wrong input for SetReplicationSourceCommands: expected a value in %v got %v", fmd.SetReplicationSourceInputs, input) } if fmd.SetReplicationSourceError != nil { return fmd.SetReplicationSourceError @@ -408,10 +414,12 @@ func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos mysql.Position) if fmd.TimeoutHook != nil { return fmd.TimeoutHook() } - if reflect.DeepEqual(fmd.WaitPrimaryPosition, pos) { - return nil + for _, position := range fmd.WaitPrimaryPositions { + if reflect.DeepEqual(position, pos) { + return nil + } } - return fmt.Errorf("wrong input for WaitSourcePos: expected %v got %v", fmd.WaitPrimaryPosition, pos) + return fmt.Errorf("wrong input for WaitSourcePos: expected a value in %v got %v", fmd.WaitPrimaryPositions, pos) } // Promote is part of the MysqlDaemon interface diff --git a/go/vt/orchestrator/app/cli.go b/go/vt/orchestrator/app/cli.go index b3bbec86b13..0dc179af848 100644 --- a/go/vt/orchestrator/app/cli.go +++ b/go/vt/orchestrator/app/cli.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/kv" "vitess.io/vitess/go/vt/orchestrator/logic" "vitess.io/vitess/go/vt/orchestrator/process" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) var thisInstanceKey *inst.InstanceKey @@ -1273,7 +1274,7 @@ func Cli(command string, strict bool, instance string, destination string, owner case registerCliCommand("register-candidate", "Instance, meta", `Indicate that a specific instance is a preferred candidate for primary promotion`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - promotionRule, err := inst.ParseCandidatePromotionRule(*config.RuntimeCLIFlags.PromotionRule) + promotionRule, err := promotionrule.Parse(*config.RuntimeCLIFlags.PromotionRule) if err != nil { log.Fatale(err) } diff --git a/go/vt/orchestrator/config/config.go b/go/vt/orchestrator/config/config.go index 86d6dd2a04e..ee2320a3058 100644 --- a/go/vt/orchestrator/config/config.go +++ b/go/vt/orchestrator/config/config.go @@ -236,6 +236,7 @@ type Configuration struct { MaxConcurrentReplicaOperations int // Maximum number of concurrent operations on replicas InstanceDBExecContextTimeoutSeconds int // Timeout on context used while calling ExecContext on instance database LockShardTimeoutSeconds int // Timeout on context used to lock shard. Should be a small value because we should fail-fast + WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockShardTimeoutSeconds since that is the total time we use for an ERS. } // ToJSONString will marshal this configuration as JSON @@ -396,6 +397,7 @@ func newConfiguration() *Configuration { MaxConcurrentReplicaOperations: 5, InstanceDBExecContextTimeoutSeconds: 30, LockShardTimeoutSeconds: 1, + WaitReplicasTimeoutSeconds: 1, } } diff --git a/go/vt/orchestrator/http/api.go b/go/vt/orchestrator/http/api.go index 0d33f5c81d5..15278508bfc 100644 --- a/go/vt/orchestrator/http/api.go +++ b/go/vt/orchestrator/http/api.go @@ -40,6 +40,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/metrics/query" "vitess.io/vitess/go/vt/orchestrator/process" orcraft "vitess.io/vitess/go/vt/orchestrator/raft" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) // APIResponseCode is an OK/ERROR response code @@ -216,7 +217,7 @@ func (this *HttpAPI) Discover(params martini.Params, r render.Render, req *http. if orcraft.IsRaftEnabled() { orcraft.PublishCommand("discover", instanceKey) } else { - logic.DiscoverInstance(instanceKey) + logic.DiscoverInstance(instanceKey, false /* forceDiscovery */) } Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Instance discovered: %+v", instance.Key), Details: instance}) @@ -2386,7 +2387,7 @@ func (this *HttpAPI) RegisterCandidate(params martini.Params, r render.Render, r Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return } - promotionRule, err := inst.ParseCandidatePromotionRule(params["promotionRule"]) + promotionRule, err := promotionrule.Parse(params["promotionRule"]) if err != nil { Respond(r, &APIResponse{Code: ERROR, Message: err.Error()}) return diff --git a/go/vt/orchestrator/inst/analysis_dao.go b/go/vt/orchestrator/inst/analysis_dao.go index cc05aa219d1..487af0d0dde 100644 --- a/go/vt/orchestrator/inst/analysis_dao.go +++ b/go/vt/orchestrator/inst/analysis_dao.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/util" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "github.com/patrickmn/go-cache" "github.com/rcrowley/go-metrics" @@ -503,11 +504,11 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.Analysis = PrimaryIsReadOnly a.Description = "Primary is read-only" // - } else if a.IsClusterPrimary && PrimarySemiSync(a.AnalyzedInstanceKey) != 0 && !a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(tablet) != 0 && !a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustBeSet a.Description = "Primary semi-sync must be set" // - } else if a.IsClusterPrimary && PrimarySemiSync(a.AnalyzedInstanceKey) == 0 && a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(tablet) == 0 && a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustNotBeSet a.Description = "Primary semi-sync must not be set" // @@ -531,11 +532,11 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.Analysis = ReplicationStopped a.Description = "Replication is stopped" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && ReplicaSemiSyncFromTablet(primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && reparentutil.ReplicaSemiSync(primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustBeSet a.Description = "Replica semi-sync must be set" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !ReplicaSemiSyncFromTablet(primaryTablet, tablet) && a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !reparentutil.ReplicaSemiSync(primaryTablet, tablet) && a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustNotBeSet a.Description = "Replica semi-sync must not be set" // diff --git a/go/vt/orchestrator/inst/candidate_database_instance.go b/go/vt/orchestrator/inst/candidate_database_instance.go index 493a1e7034b..5851273b53e 100644 --- a/go/vt/orchestrator/inst/candidate_database_instance.go +++ b/go/vt/orchestrator/inst/candidate_database_instance.go @@ -20,18 +20,19 @@ import ( "fmt" "vitess.io/vitess/go/vt/orchestrator/db" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) // CandidateDatabaseInstance contains information about explicit promotion rules for an instance type CandidateDatabaseInstance struct { Hostname string Port int - PromotionRule CandidatePromotionRule + PromotionRule promotionrule.CandidatePromotionRule LastSuggestedString string PromotionRuleExpiry string // generated when retrieved from database for consistency reasons } -func NewCandidateDatabaseInstance(instanceKey *InstanceKey, promotionRule CandidatePromotionRule) *CandidateDatabaseInstance { +func NewCandidateDatabaseInstance(instanceKey *InstanceKey, promotionRule promotionrule.CandidatePromotionRule) *CandidateDatabaseInstance { return &CandidateDatabaseInstance{ Hostname: instanceKey.Hostname, Port: instanceKey.Port, diff --git a/go/vt/orchestrator/inst/candidate_database_instance_dao.go b/go/vt/orchestrator/inst/candidate_database_instance_dao.go index 30a2220eb95..2479cb56a2b 100644 --- a/go/vt/orchestrator/inst/candidate_database_instance_dao.go +++ b/go/vt/orchestrator/inst/candidate_database_instance_dao.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/db" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) // RegisterCandidateInstance markes a given instance as suggested for succeeding a primary in the event of failover. @@ -93,7 +94,7 @@ func BulkReadCandidateDatabaseInstance() ([]CandidateDatabaseInstance, error) { cdi := CandidateDatabaseInstance{ Hostname: m.GetString("hostname"), Port: m.GetInt("port"), - PromotionRule: CandidatePromotionRule(m.GetString("promotion_rule")), + PromotionRule: promotionrule.CandidatePromotionRule(m.GetString("promotion_rule")), LastSuggestedString: m.GetString("last_suggested"), PromotionRuleExpiry: m.GetString("promotion_rule_expiry"), } diff --git a/go/vt/orchestrator/inst/durability.go b/go/vt/orchestrator/inst/durability.go index 6036094e793..547ae03916f 100644 --- a/go/vt/orchestrator/inst/durability.go +++ b/go/vt/orchestrator/inst/durability.go @@ -17,80 +17,9 @@ limitations under the License. package inst import ( - "fmt" - - "vitess.io/vitess/go/vt/topo/topoproto" - - "vitess.io/vitess/go/vt/orchestrator/external/golib/log" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) -//======================================================================= - -// A newDurabler is a function that creates a new durabler based on the -// properties specified in the input map. Every durabler must -// register a newDurabler function. -type newDurabler func(map[string]string) durabler - -var ( - // durabilityPolicies is a map that stores the functions needed to create a new durabler - durabilityPolicies = make(map[string]newDurabler) - // curDurabilityPolicy is the current durability policy in use - curDurabilityPolicy durabler -) - -func init() { - // register all the durability rules with their functions to create them - registerDurability("none", func(map[string]string) durabler { - return &durabilityNone{} - }) - registerDurability("semi_sync", func(map[string]string) durabler { - return &durabilitySemiSync{} - }) - registerDurability("cross_cell", func(map[string]string) durabler { - return &durabilityCrossCell{} - }) - registerDurability("specified", newDurabilitySpecified) -} - -// durabler is the interface which is used to get the promotion rules for candidates and the semi sync setup -type durabler interface { - promotionRule(*topodatapb.Tablet) CandidatePromotionRule - primarySemiSync(InstanceKey) int - replicaSemiSync(primary, replica *topodatapb.Tablet) bool -} - -func registerDurability(name string, newDurablerFunc newDurabler) { - if durabilityPolicies[name] != nil { - log.Fatalf("durability policy %v already registered", name) - } - durabilityPolicies[name] = newDurablerFunc -} - -//======================================================================= - -// SetDurabilityPolicy is used to set the durability policy from the registered policies -func SetDurabilityPolicy(name string, durabilityParams map[string]string) error { - newDurabilityCreationFunc, found := durabilityPolicies[name] - if !found { - return fmt.Errorf("durability policy %v not found", name) - } - log.Infof("Durability setting: %v", name) - curDurabilityPolicy = newDurabilityCreationFunc(durabilityParams) - return nil -} - -// PromotionRule returns the promotion rule for the instance. -func PromotionRule(tablet *topodatapb.Tablet) CandidatePromotionRule { - return curDurabilityPolicy.promotionRule(tablet) -} - -// PrimarySemiSync returns the primary semi-sync setting for the instance. -// 0 means none. Non-zero specifies the number of required ackers. -func PrimarySemiSync(instanceKey InstanceKey) int { - return curDurabilityPolicy.primarySemiSync(instanceKey) -} - // ReplicaSemiSync returns the replica semi-sync setting for the instance. func ReplicaSemiSync(primaryKey, replicaKey InstanceKey) bool { primary, err := ReadTablet(primaryKey) @@ -101,139 +30,15 @@ func ReplicaSemiSync(primaryKey, replicaKey InstanceKey) bool { if err != nil { return false } - return curDurabilityPolicy.replicaSemiSync(primary, replica) -} - -// ReplicaSemiSyncFromTablet returns the replica semi-sync setting from the tablet record. -// Prefer using this function if tablet record is available. -func ReplicaSemiSyncFromTablet(primary, replica *topodatapb.Tablet) bool { - return curDurabilityPolicy.replicaSemiSync(primary, replica) -} - -//======================================================================= - -// durabilityNone has no semi-sync and returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilityNone struct{} - -func (d *durabilityNone) promotionRule(tablet *topodatapb.Tablet) CandidatePromotionRule { - switch tablet.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: - return NeutralPromoteRule - } - return MustNotPromoteRule + return reparentutil.ReplicaSemiSync(primary, replica) } -func (d *durabilityNone) primarySemiSync(instanceKey InstanceKey) int { - return 0 -} - -func (d *durabilityNone) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { - return false -} - -//======================================================================= - -// durabilitySemiSync has 1 semi-sync setup. It only allows Primary and Replica type servers to acknowledge semi sync -// It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilitySemiSync struct{} - -func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) CandidatePromotionRule { - switch tablet.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: - return NeutralPromoteRule - } - return MustNotPromoteRule -} - -func (d *durabilitySemiSync) primarySemiSync(instanceKey InstanceKey) int { - return 1 -} - -func (d *durabilitySemiSync) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { - switch replica.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: - return true - } - return false -} - -//======================================================================= - -// durabilityCrossCell has 1 semi-sync setup. It only allows Primary and Replica type servers from a different cell to acknowledge semi sync. -// This means that a transaction must be in two cells for it to be acknowledged -// It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilityCrossCell struct{} - -func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) CandidatePromotionRule { - switch tablet.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: - return NeutralPromoteRule - } - return MustNotPromoteRule -} - -func (d *durabilityCrossCell) primarySemiSync(instanceKey InstanceKey) int { - return 1 -} - -func (d *durabilityCrossCell) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { - // Prevent panics. - if primary.Alias == nil || replica.Alias == nil { - return false - } - switch replica.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: - return primary.Alias.Cell != replica.Alias.Cell - } - return false -} - -//======================================================================= - -// durabilitySpecified is like durabilityNone. It has an additional map which it first queries with the tablet alias as the key -// If a CandidatePromotionRule is found in that map, then that is used as the promotion rule. Otherwise, it reverts to the same logic as durabilityNone -type durabilitySpecified struct { - promotionRules map[string]CandidatePromotionRule -} - -func (d *durabilitySpecified) promotionRule(tablet *topodatapb.Tablet) CandidatePromotionRule { - promoteRule, isFound := d.promotionRules[topoproto.TabletAliasString(tablet.Alias)] - if isFound { - return promoteRule - } - - switch tablet.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: - return NeutralPromoteRule - } - return MustNotPromoteRule -} - -func (d *durabilitySpecified) primarySemiSync(instanceKey InstanceKey) int { - return 0 -} - -func (d *durabilitySpecified) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { - return false -} - -// newDurabilitySpecified is a function that is used to create a new durabilitySpecified struct -func newDurabilitySpecified(m map[string]string) durabler { - promotionRules := map[string]CandidatePromotionRule{} - // range over the map given by the user - for tabletAliasStr, promotionRuleStr := range m { - // parse the promotion rule - promotionRule, err := ParseCandidatePromotionRule(promotionRuleStr) - // if parsing is not successful, skip over this rule - if err != nil { - log.Errorf("invalid promotion rule %s found, received error - %v", promotionRuleStr, err) - continue - } - // set the promotion rule in the map at the given tablet alias - promotionRules[tabletAliasStr] = promotionRule - } - - return &durabilitySpecified{ - promotionRules: promotionRules, +// SemiSyncAckers returns the primary semi-sync setting for the instance. +// 0 means none. Non-zero specifies the number of required ackers. +func SemiSyncAckers(instanceKey InstanceKey) int { + primary, err := ReadTablet(instanceKey) + if err != nil { + return 0 } + return reparentutil.SemiSyncAckers(primary) } diff --git a/go/vt/orchestrator/inst/durability_test.go b/go/vt/orchestrator/inst/durability_test.go deleted file mode 100644 index 58083760999..00000000000 --- a/go/vt/orchestrator/inst/durability_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inst - -import ( - "testing" - - "vitess.io/vitess/go/vt/topo/topoproto" - - "github.com/stretchr/testify/assert" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestDurabilitySpecified(t *testing.T) { - cellName := "cell" - durabilityRules := newDurabilitySpecified( - map[string]string{ - "cell-0000000000": string(MustPromoteRule), - "cell-0000000001": string(PreferPromoteRule), - "cell-0000000002": string(NeutralPromoteRule), - "cell-0000000003": string(PreferNotPromoteRule), - "cell-0000000004": string(MustNotPromoteRule), - }) - - testcases := []struct { - tablet *topodatapb.Tablet - promotionRule CandidatePromotionRule - }{ - { - tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: cellName, - Uid: 0, - }, - }, - promotionRule: MustNotPromoteRule, - }, { - tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: cellName, - Uid: 1, - }, - }, - promotionRule: PreferPromoteRule, - }, { - tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: cellName, - Uid: 2, - }, - }, - promotionRule: NeutralPromoteRule, - }, { - tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: cellName, - Uid: 3, - }, - }, - promotionRule: PreferNotPromoteRule, - }, { - tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: cellName, - Uid: 4, - }, - }, - promotionRule: MustNotPromoteRule, - }, - } - - for _, testcase := range testcases { - t.Run(topoproto.TabletAliasString(testcase.tablet.Alias), func(t *testing.T) { - rule := durabilityRules.promotionRule(testcase.tablet) - assert.Equal(t, testcase.promotionRule, rule) - }) - } -} diff --git a/go/vt/orchestrator/inst/instance.go b/go/vt/orchestrator/inst/instance.go index 1f6e7dd453a..8a5095c97a3 100644 --- a/go/vt/orchestrator/inst/instance.go +++ b/go/vt/orchestrator/inst/instance.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/math" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) const ReasonableDiscoveryLatency = 500 * time.Millisecond @@ -110,7 +111,7 @@ type Instance struct { // be picked up from daabase_candidate_instance's value when // reading an instance from the db. IsCandidate bool - PromotionRule CandidatePromotionRule + PromotionRule promotionrule.CandidatePromotionRule IsDowntimed bool DowntimeReason string DowntimeOwner string diff --git a/go/vt/orchestrator/inst/instance_dao.go b/go/vt/orchestrator/inst/instance_dao.go index 8f0de503743..25f77b6c868 100644 --- a/go/vt/orchestrator/inst/instance_dao.go +++ b/go/vt/orchestrator/inst/instance_dao.go @@ -39,6 +39,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/external/golib/log" "vitess.io/vitess/go/vt/orchestrator/external/golib/math" "vitess.io/vitess/go/vt/orchestrator/external/golib/sqlutils" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" @@ -49,6 +50,8 @@ import ( "vitess.io/vitess/go/vt/orchestrator/kv" "vitess.io/vitess/go/vt/orchestrator/metrics/query" "vitess.io/vitess/go/vt/orchestrator/util" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) const ( @@ -679,7 +682,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, // We need to update candidate_database_instance. // We register the rule even if it hasn't changed, // to bump the last_suggested time. - instance.PromotionRule = PromotionRule(tablet) + instance.PromotionRule = reparentutil.PromotionRule(tablet) err = RegisterCandidateInstance(NewCandidateDatabaseInstance(instanceKey, instance.PromotionRule).WithCurrentTime()) logReadTopologyInstanceError(instanceKey, "RegisterCandidateInstance", err) @@ -932,7 +935,7 @@ func BulkReadInstance() ([](*InstanceKey), error) { } func ReadInstancePromotionRule(instance *Instance) (err error) { - var promotionRule CandidatePromotionRule = NeutralPromoteRule + var promotionRule promotionrule.CandidatePromotionRule = promotionrule.Neutral query := ` select ifnull(nullif(promotion_rule, ''), 'neutral') as promotion_rule @@ -942,7 +945,7 @@ func ReadInstancePromotionRule(instance *Instance) (err error) { args := sqlutils.Args(instance.Key.Hostname, instance.Key.Port) err = db.QueryOrchestrator(query, args, func(m sqlutils.RowMap) error { - promotionRule = CandidatePromotionRule(m.GetString("promotion_rule")) + promotionRule = promotionrule.CandidatePromotionRule(m.GetString("promotion_rule")) return nil }) instance.PromotionRule = promotionRule @@ -1022,7 +1025,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.IsLastCheckValid = m.GetBool("is_last_check_valid") instance.SecondsSinceLastSeen = m.GetNullInt64("seconds_since_last_seen") instance.IsCandidate = m.GetBool("is_candidate") - instance.PromotionRule = CandidatePromotionRule(m.GetString("promotion_rule")) + instance.PromotionRule = promotionrule.CandidatePromotionRule(m.GetString("promotion_rule")) instance.IsDowntimed = m.GetBool("is_downtimed") instance.DowntimeReason = m.GetString("downtime_reason") instance.DowntimeOwner = m.GetString("downtime_owner") @@ -1426,7 +1429,7 @@ func ReadClusterNeutralPromotionRuleInstances(clusterName string) (neutralInstan return neutralInstances, err } for _, instance := range instances { - if instance.PromotionRule == NeutralPromoteRule { + if instance.PromotionRule == promotionrule.Neutral { neutralInstances = append(neutralInstances, instance) } } diff --git a/go/vt/orchestrator/inst/instance_topology.go b/go/vt/orchestrator/inst/instance_topology.go index 257332812b6..c5fbad0f444 100644 --- a/go/vt/orchestrator/inst/instance_topology.go +++ b/go/vt/orchestrator/inst/instance_topology.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/external/golib/math" "vitess.io/vitess/go/vt/orchestrator/external/golib/util" "vitess.io/vitess/go/vt/orchestrator/os" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) type StopReplicationMethod string @@ -618,9 +619,9 @@ func MoveBelowGTID(instanceKey, otherKey *InstanceKey) (*Instance, error) { return moveInstanceBelowViaGTID(instance, other) } -// moveReplicasViaGTID moves a list of replicas under another instance via GTID, returning those replicas +// MoveReplicasViaGTID moves a list of replicas under another instance via GTID, returning those replicas // that could not be moved (do not use GTID or had GTID errors) -func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunctionsContainer *PostponedFunctionsContainer) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), err error, errs []error) { +func MoveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunctionsContainer *PostponedFunctionsContainer) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), err error, errs []error) { replicas = RemoveNilInstances(replicas) replicas = RemoveInstance(replicas, &other.Key) if len(replicas) == 0 { @@ -628,7 +629,7 @@ func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunct return movedReplicas, unmovedReplicas, nil, errs } - log.Infof("moveReplicasViaGTID: Will move %+v replicas below %+v via GTID, max concurrency: %v", + log.Infof("MoveReplicasViaGTID: Will move %+v replicas below %+v via GTID, max concurrency: %v", len(replicas), other.Key, config.Config.MaxConcurrentReplicaOperations) @@ -679,7 +680,7 @@ func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunct if len(errs) == len(replicas) { // All returned with error - return movedReplicas, unmovedReplicas, fmt.Errorf("moveReplicasViaGTID: Error on all %+v operations", len(errs)), errs + return movedReplicas, unmovedReplicas, fmt.Errorf("MoveReplicasViaGTID: Error on all %+v operations", len(errs)), errs } AuditOperation("move-replicas-gtid", &other.Key, fmt.Sprintf("moved %d/%d replicas below %+v via GTID", len(movedReplicas), len(replicas), other.Key)) @@ -700,7 +701,7 @@ func MoveReplicasGTID(primaryKey *InstanceKey, belowKey *InstanceKey, pattern st return movedReplicas, unmovedReplicas, err, errs } replicas = filterInstancesByPattern(replicas, pattern) - movedReplicas, unmovedReplicas, err, errs = moveReplicasViaGTID(replicas, belowInstance, nil) + movedReplicas, unmovedReplicas, err, errs = MoveReplicasViaGTID(replicas, belowInstance, nil) if err != nil { log.Errore(err) } @@ -1504,13 +1505,13 @@ Cleanup: } // sortInstances shuffles given list of instances according to some logic -func sortInstancesDataCenterHint(instances [](*Instance), dataCenterHint string) { +func SortInstancesDataCenterHint(instances [](*Instance), dataCenterHint string) { sort.Sort(sort.Reverse(NewInstancesSorterByExec(instances, dataCenterHint))) } // sortInstances shuffles given list of instances according to some logic func sortInstances(instances [](*Instance)) { - sortInstancesDataCenterHint(instances, "") + SortInstancesDataCenterHint(instances, "") } // getReplicasForSorting returns a list of replicas of a given primary potentially for candidate choosing @@ -1538,7 +1539,7 @@ func sortedReplicasDataCenterHint(replicas [](*Instance), stopReplicationMethod replicas = StopReplicas(replicas, stopReplicationMethod, time.Duration(config.Config.InstanceBulkOperationsWaitTimeoutSeconds)*time.Second) replicas = RemoveNilInstances(replicas) - sortInstancesDataCenterHint(replicas, dataCenterHint) + SortInstancesDataCenterHint(replicas, dataCenterHint) for _, replica := range replicas { log.Debugf("- sorted replica: %+v %+v", replica.Key, replica.ExecBinlogCoordinates) } @@ -1609,7 +1610,7 @@ func isValidAsCandidatePrimaryInBinlogServerTopology(replica *Instance) bool { } func IsBannedFromBeingCandidateReplica(replica *Instance) bool { - if replica.PromotionRule == MustNotPromoteRule { + if replica.PromotionRule == promotionrule.MustNot { log.Debugf("instance %+v is banned because of promotion rule", replica.Key) return true } @@ -1659,10 +1660,10 @@ func getPriorityBinlogFormatForCandidate(replicas [](*Instance)) (priorityBinlog return sorted.First(), nil } -// chooseCandidateReplica -func chooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas [](*Instance), err error) { +// ChooseCandidateReplica +func ChooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas [](*Instance), err error) { if len(replicas) == 0 { - return candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, fmt.Errorf("No replicas found given in chooseCandidateReplica") + return candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, fmt.Errorf("No replicas found given in ChooseCandidateReplica") } priorityMajorVersion, _ := getPriorityMajorVersionForCandidate(replicas) priorityBinlogFormat, _ := getPriorityBinlogFormatForCandidate(replicas) @@ -1692,7 +1693,7 @@ func chooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, if candidateReplica != nil { replicas = RemoveInstance(replicas, &candidateReplica.Key) } - return candidateReplica, replicas, equalReplicas, laterReplicas, cannotReplicateReplicas, fmt.Errorf("chooseCandidateReplica: no candidate replica found") + return candidateReplica, replicas, equalReplicas, laterReplicas, cannotReplicateReplicas, fmt.Errorf("ChooseCandidateReplica: no candidate replica found") } replicas = RemoveInstance(replicas, &candidateReplica.Key) for _, replica := range replicas { @@ -1701,7 +1702,7 @@ func chooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, // lost due to inability to replicate cannotReplicateReplicas = append(cannotReplicateReplicas, replica) if err != nil { - log.Errorf("chooseCandidateReplica(): error checking CanReplicateFrom(). replica: %v; error: %v", replica.Key, err) + log.Errorf("ChooseCandidateReplica(): error checking CanReplicateFrom(). replica: %v; error: %v", replica.Key, err) } } else if replica.ExecBinlogCoordinates.SmallerThan(&candidateReplica.ExecBinlogCoordinates) { laterReplicas = append(laterReplicas, replica) @@ -1742,7 +1743,7 @@ func GetCandidateReplica(primaryKey *InstanceKey, forRematchPurposes bool) (*Ins if len(replicas) == 0 { return candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, fmt.Errorf("No replicas found for %+v", *primaryKey) } - candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err = chooseCandidateReplica(replicas) + candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err = ChooseCandidateReplica(replicas) if err != nil { return candidateReplica, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err } @@ -1845,7 +1846,7 @@ func RegroupReplicasGTID( moveGTIDFunc := func() error { log.Debugf("RegroupReplicasGTID: working on %d replicas", len(replicasToMove)) - movedReplicas, unmovedReplicas, err, _ = moveReplicasViaGTID(replicasToMove, candidateReplica, postponedFunctionsContainer) + movedReplicas, unmovedReplicas, err, _ = MoveReplicasViaGTID(replicasToMove, candidateReplica, postponedFunctionsContainer) unmovedReplicas = append(unmovedReplicas, aheadReplicas...) return log.Errore(err) } @@ -2090,7 +2091,7 @@ func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) } // GTID { - movedReplicas, unmovedReplicas, err, errs := moveReplicasViaGTID(replicas, other, nil) + movedReplicas, unmovedReplicas, err, errs := MoveReplicasViaGTID(replicas, other, nil) if len(movedReplicas) == len(replicas) { // Moved (or tried moving) everything via GTID diff --git a/go/vt/orchestrator/inst/instance_topology_dao.go b/go/vt/orchestrator/inst/instance_topology_dao.go index e24a9cf2bf0..d2405f688f4 100644 --- a/go/vt/orchestrator/inst/instance_topology_dao.go +++ b/go/vt/orchestrator/inst/instance_topology_dao.go @@ -360,7 +360,12 @@ func StopReplicas(replicas [](*Instance), stopReplicationMethod StopReplicationM // StopReplicasNicely will attemt to stop all given replicas nicely, up to timeout func StopReplicasNicely(replicas [](*Instance), timeout time.Duration) [](*Instance) { - return StopReplicas(replicas, StopReplicationNice, timeout) + stoppedReplicas := StopReplicas(replicas, StopReplicationNice, timeout) + // We remove nil instances because StopReplicas might introduce nils in the array that it returns in case of + // failures while reading the tablet from the backend. This could happen when the tablet is forgotten while we are + // trying to stop the replication on the tablets. + stoppedReplicas = RemoveNilInstances(stoppedReplicas) + return stoppedReplicas } // StopReplication stops replication on a given instance diff --git a/go/vt/orchestrator/inst/instance_topology_test.go b/go/vt/orchestrator/inst/instance_topology_test.go index ab2f2f04557..477cae9c098 100644 --- a/go/vt/orchestrator/inst/instance_topology_test.go +++ b/go/vt/orchestrator/inst/instance_topology_test.go @@ -8,6 +8,7 @@ import ( "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" test "vitess.io/vitess/go/vt/orchestrator/external/golib/tests" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) var ( @@ -99,7 +100,7 @@ func TestSortInstancesDataCenterHint(t *testing.T) { instance.DataCenter = "somedc" } instancesMap[i810Key.StringCode()].DataCenter = "localdc" - sortInstancesDataCenterHint(instances, "localdc") + SortInstancesDataCenterHint(instances, "localdc") test.S(t).ExpectEquals(instances[0].Key, i810Key) } @@ -251,7 +252,7 @@ func TestIsBannedFromBeingCandidateReplica(t *testing.T) { { instances, _ := generateTestInstances() for _, instance := range instances { - instance.PromotionRule = MustNotPromoteRule + instance.PromotionRule = promotionrule.MustNot } for _, instance := range instances { test.S(t).ExpectTrue(IsBannedFromBeingCandidateReplica(instance)) @@ -277,7 +278,7 @@ func TestChooseCandidateReplicaNoCandidateReplica(t *testing.T) { instance.LogBinEnabled = true instance.LogReplicationUpdatesEnabled = false } - _, _, _, _, _, err := chooseCandidateReplica(instances) + _, _, _, _, _, err := ChooseCandidateReplica(instances) test.S(t).ExpectNotNil(err) } @@ -285,7 +286,7 @@ func TestChooseCandidateReplica(t *testing.T) { instances, _ := generateTestInstances() applyGeneralGoodToGoReplicationParams(instances) instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -300,7 +301,7 @@ func TestChooseCandidateReplica2(t *testing.T) { instancesMap[i830Key.StringCode()].LogReplicationUpdatesEnabled = false instancesMap[i820Key.StringCode()].LogBinEnabled = false instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i810Key) test.S(t).ExpectEquals(len(aheadReplicas), 2) @@ -318,7 +319,7 @@ func TestChooseCandidateReplicaSameCoordinatesDifferentVersions(t *testing.T) { instancesMap[i810Key.StringCode()].Version = "5.5.1" instancesMap[i720Key.StringCode()].Version = "5.7.8" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i810Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -332,7 +333,7 @@ func TestChooseCandidateReplicaPriorityVersionNoLoss(t *testing.T) { applyGeneralGoodToGoReplicationParams(instances) instancesMap[i830Key.StringCode()].Version = "5.5.1" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -346,7 +347,7 @@ func TestChooseCandidateReplicaPriorityVersionLosesOne(t *testing.T) { applyGeneralGoodToGoReplicationParams(instances) instancesMap[i830Key.StringCode()].Version = "5.7.8" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i820Key) test.S(t).ExpectEquals(len(aheadReplicas), 1) @@ -361,7 +362,7 @@ func TestChooseCandidateReplicaPriorityVersionLosesTwo(t *testing.T) { instancesMap[i830Key.StringCode()].Version = "5.7.8" instancesMap[i820Key.StringCode()].Version = "5.7.18" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i810Key) test.S(t).ExpectEquals(len(aheadReplicas), 2) @@ -378,7 +379,7 @@ func TestChooseCandidateReplicaPriorityVersionHigherVersionOverrides(t *testing. instancesMap[i810Key.StringCode()].Version = "5.7.5" instancesMap[i730Key.StringCode()].Version = "5.7.30" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -396,7 +397,7 @@ func TestChooseCandidateReplicaLosesOneDueToBinlogFormat(t *testing.T) { instancesMap[i730Key.StringCode()].Binlog_format = "STATEMENT" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -413,7 +414,7 @@ func TestChooseCandidateReplicaPriorityBinlogFormatNoLoss(t *testing.T) { } instancesMap[i830Key.StringCode()].Binlog_format = "STATEMENT" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -427,7 +428,7 @@ func TestChooseCandidateReplicaPriorityBinlogFormatLosesOne(t *testing.T) { applyGeneralGoodToGoReplicationParams(instances) instancesMap[i830Key.StringCode()].Binlog_format = "ROW" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i820Key) test.S(t).ExpectEquals(len(aheadReplicas), 1) @@ -442,7 +443,7 @@ func TestChooseCandidateReplicaPriorityBinlogFormatLosesTwo(t *testing.T) { instancesMap[i830Key.StringCode()].Binlog_format = "ROW" instancesMap[i820Key.StringCode()].Binlog_format = "ROW" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i810Key) test.S(t).ExpectEquals(len(aheadReplicas), 2) @@ -459,7 +460,7 @@ func TestChooseCandidateReplicaPriorityBinlogFormatRowOverrides(t *testing.T) { instancesMap[i810Key.StringCode()].Binlog_format = "ROW" instancesMap[i730Key.StringCode()].Binlog_format = "ROW" instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -471,9 +472,9 @@ func TestChooseCandidateReplicaPriorityBinlogFormatRowOverrides(t *testing.T) { func TestChooseCandidateReplicaMustNotPromoteRule(t *testing.T) { instances, instancesMap := generateTestInstances() applyGeneralGoodToGoReplicationParams(instances) - instancesMap[i830Key.StringCode()].PromotionRule = MustNotPromoteRule + instancesMap[i830Key.StringCode()].PromotionRule = promotionrule.MustNot instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i820Key) test.S(t).ExpectEquals(len(aheadReplicas), 1) @@ -485,10 +486,10 @@ func TestChooseCandidateReplicaMustNotPromoteRule(t *testing.T) { func TestChooseCandidateReplicaPreferNotPromoteRule(t *testing.T) { instances, instancesMap := generateTestInstances() applyGeneralGoodToGoReplicationParams(instances) - instancesMap[i830Key.StringCode()].PromotionRule = MustNotPromoteRule - instancesMap[i820Key.StringCode()].PromotionRule = PreferNotPromoteRule + instancesMap[i830Key.StringCode()].PromotionRule = promotionrule.MustNot + instancesMap[i820Key.StringCode()].PromotionRule = promotionrule.PreferNot instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i820Key) test.S(t).ExpectEquals(len(aheadReplicas), 1) @@ -501,11 +502,11 @@ func TestChooseCandidateReplicaPreferNotPromoteRule2(t *testing.T) { instances, instancesMap := generateTestInstances() applyGeneralGoodToGoReplicationParams(instances) for _, instance := range instances { - instance.PromotionRule = PreferNotPromoteRule + instance.PromotionRule = promotionrule.PreferNot } - instancesMap[i830Key.StringCode()].PromotionRule = MustNotPromoteRule + instancesMap[i830Key.StringCode()].PromotionRule = promotionrule.MustNot instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i820Key) test.S(t).ExpectEquals(len(aheadReplicas), 1) @@ -519,11 +520,11 @@ func TestChooseCandidateReplicaPromoteRuleOrdering(t *testing.T) { applyGeneralGoodToGoReplicationParams(instances) for _, instance := range instances { instance.ExecBinlogCoordinates = instancesMap[i710Key.StringCode()].ExecBinlogCoordinates - instance.PromotionRule = NeutralPromoteRule + instance.PromotionRule = promotionrule.Neutral } - instancesMap[i830Key.StringCode()].PromotionRule = PreferPromoteRule + instancesMap[i830Key.StringCode()].PromotionRule = promotionrule.Prefer instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i830Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -537,11 +538,11 @@ func TestChooseCandidateReplicaPromoteRuleOrdering2(t *testing.T) { applyGeneralGoodToGoReplicationParams(instances) for _, instance := range instances { instance.ExecBinlogCoordinates = instancesMap[i710Key.StringCode()].ExecBinlogCoordinates - instance.PromotionRule = PreferPromoteRule + instance.PromotionRule = promotionrule.Prefer } - instancesMap[i820Key.StringCode()].PromotionRule = MustPromoteRule + instancesMap[i820Key.StringCode()].PromotionRule = promotionrule.Must instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i820Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) @@ -555,13 +556,13 @@ func TestChooseCandidateReplicaPromoteRuleOrdering3(t *testing.T) { applyGeneralGoodToGoReplicationParams(instances) for _, instance := range instances { instance.ExecBinlogCoordinates = instancesMap[i710Key.StringCode()].ExecBinlogCoordinates - instance.PromotionRule = NeutralPromoteRule + instance.PromotionRule = promotionrule.Neutral } - instancesMap[i730Key.StringCode()].PromotionRule = MustPromoteRule - instancesMap[i810Key.StringCode()].PromotionRule = PreferPromoteRule - instancesMap[i830Key.StringCode()].PromotionRule = PreferNotPromoteRule + instancesMap[i730Key.StringCode()].PromotionRule = promotionrule.Must + instancesMap[i810Key.StringCode()].PromotionRule = promotionrule.Prefer + instancesMap[i830Key.StringCode()].PromotionRule = promotionrule.PreferNot instances = sortedReplicas(instances, NoStopReplication) - candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := chooseCandidateReplica(instances) + candidate, aheadReplicas, equalReplicas, laterReplicas, cannotReplicateReplicas, err := ChooseCandidateReplica(instances) test.S(t).ExpectNil(err) test.S(t).ExpectEquals(candidate.Key, i730Key) test.S(t).ExpectEquals(len(aheadReplicas), 0) diff --git a/go/vt/orchestrator/logic/command_applier.go b/go/vt/orchestrator/logic/command_applier.go index 275b0c104e8..25e1968984e 100644 --- a/go/vt/orchestrator/logic/command_applier.go +++ b/go/vt/orchestrator/logic/command_applier.go @@ -93,7 +93,7 @@ func (applier *CommandApplier) discover(value []byte) interface{} { if err := json.Unmarshal(value, &instanceKey); err != nil { return log.Errore(err) } - DiscoverInstance(instanceKey) + DiscoverInstance(instanceKey, false /* forceDiscovery */) return nil } diff --git a/go/vt/orchestrator/logic/orchestrator.go b/go/vt/orchestrator/logic/orchestrator.go index 1ae49e0cb93..2d2138c4ce9 100644 --- a/go/vt/orchestrator/logic/orchestrator.go +++ b/go/vt/orchestrator/logic/orchestrator.go @@ -37,6 +37,7 @@ import ( ometrics "vitess.io/vitess/go/vt/orchestrator/metrics" "vitess.io/vitess/go/vt/orchestrator/process" "vitess.io/vitess/go/vt/orchestrator/util" + "vitess.io/vitess/go/vt/vtctl/reparentutil" ) const ( @@ -50,6 +51,8 @@ var discoveryQueue *discovery.Queue var snapshotDiscoveryKeys chan inst.InstanceKey var snapshotDiscoveryKeysMutex sync.Mutex var hasReceivedSIGTERM int32 +var ersInProgressMutex sync.Mutex +var ersInProgress bool var discoveriesCounter = metrics.NewCounter() var failedDiscoveriesCounter = metrics.NewCounter() @@ -168,7 +171,7 @@ func handleDiscoveryRequests() { continue } - DiscoverInstance(instanceKey) + DiscoverInstance(instanceKey, false /* forceDiscovery */) discoveryQueue.Release(instanceKey) } }() @@ -178,7 +181,7 @@ func handleDiscoveryRequests() { // DiscoverInstance will attempt to discover (poll) an instance (unless // it is already up to date) and will also ensure that its primary and // replicas (if any) are also checked. -func DiscoverInstance(instanceKey inst.InstanceKey) { +func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { if inst.InstanceIsForgotten(&instanceKey) { log.Debugf("discoverInstance: skipping discovery of %+v because it is set to be forgotten", instanceKey) return @@ -213,7 +216,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { // Calculate the expiry period each time as InstancePollSeconds // _may_ change during the run of the process (via SIGHUP) and // it is not possible to change the cache's default expiry.. - if existsInCacheError := recentDiscoveryOperationKeys.Add(instanceKey.DisplayString(), true, instancePollSecondsDuration()); existsInCacheError != nil { + if existsInCacheError := recentDiscoveryOperationKeys.Add(instanceKey.DisplayString(), true, instancePollSecondsDuration()); existsInCacheError != nil && !forceDiscovery { // Just recently attempted return } @@ -221,7 +224,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { latency.Start("backend") instance, found, _ := inst.ReadInstance(&instanceKey) latency.Stop("backend") - if found && instance.IsUpToDate && instance.IsLastCheckValid { + if !forceDiscovery && found && instance.IsUpToDate && instance.IsLastCheckValid { // we've already discovered this one. Skip! return } @@ -413,7 +416,7 @@ func ContinuousDiscovery() { go ometrics.InitMetrics() go acceptSignals() go kv.InitKVStores() - inst.SetDurabilityPolicy(config.Config.Durability, config.Config.DurabilityParams) + reparentutil.SetDurabilityPolicy(config.Config.Durability, config.Config.DurabilityParams) if *config.RuntimeCLIFlags.GrabElection { process.GrabElection() @@ -503,7 +506,7 @@ func ContinuousDiscovery() { } }() case <-tabletTopoTick: - go RefreshTablets() + go RefreshTablets(false /* forceRefresh */) } } } diff --git a/go/vt/orchestrator/logic/tablet_discovery.go b/go/vt/orchestrator/logic/tablet_discovery.go index 7e3c30a05bd..8b0ee13221e 100644 --- a/go/vt/orchestrator/logic/tablet_discovery.go +++ b/go/vt/orchestrator/logic/tablet_discovery.go @@ -61,19 +61,19 @@ func OpenTabletDiscovery() <-chan time.Time { } refreshTabletsUsing(func(instanceKey *inst.InstanceKey) { _ = inst.InjectSeed(instanceKey) - }) + }, false /* forceRefresh */) // TODO(sougou): parameterize poll interval. return time.Tick(15 * time.Second) //nolint SA1015: using time.Tick leaks the underlying ticker } // RefreshTablets reloads the tablets from topo. -func RefreshTablets() { +func RefreshTablets(forceRefresh bool) { refreshTabletsUsing(func(instanceKey *inst.InstanceKey) { - DiscoverInstance(*instanceKey) - }) + DiscoverInstance(*instanceKey, forceRefresh) + }, forceRefresh) } -func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey)) { +func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { if !IsLeaderOrActive() { return } @@ -93,7 +93,7 @@ func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey)) { wg.Add(1) go func(cell string) { defer wg.Done() - refreshTabletsInCell(refreshCtx, cell, loader) + refreshTabletsInCell(refreshCtx, cell, loader, forceRefresh) }(cell) } wg.Wait() @@ -136,14 +136,14 @@ func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey)) { wg.Add(1) go func(ks *topo.KeyspaceShard) { defer wg.Done() - refreshTabletsInKeyspaceShard(refreshCtx, ks.Keyspace, ks.Shard, loader) + refreshTabletsInKeyspaceShard(refreshCtx, ks.Keyspace, ks.Shard, loader, forceRefresh) }(ks) } wg.Wait() } } -func refreshTabletsInCell(ctx context.Context, cell string, loader func(instanceKey *inst.InstanceKey)) { +func refreshTabletsInCell(ctx context.Context, cell string, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { tablets, err := topotools.GetTabletMapForCell(ctx, ts, cell) if err != nil { log.Errorf("Error fetching topo info for cell %v: %v", cell, err) @@ -151,10 +151,10 @@ func refreshTabletsInCell(ctx context.Context, cell string, loader func(instance } query := "select hostname, port, info from vitess_tablet where cell = ?" args := sqlutils.Args(cell) - refreshTablets(tablets, query, args, loader) + refreshTablets(tablets, query, args, loader, forceRefresh) } -func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(instanceKey *inst.InstanceKey)) { +func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { tablets, err := ts.GetTabletMapForShard(ctx, keyspace, shard) if err != nil { log.Errorf("Error fetching tablets for keyspace/shard %v/%v: %v", keyspace, shard, err) @@ -162,10 +162,10 @@ func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, } query := "select hostname, port, info from vitess_tablet where keyspace = ? and shard = ?" args := sqlutils.Args(keyspace, shard) - refreshTablets(tablets, query, args, loader) + refreshTablets(tablets, query, args, loader, forceRefresh) } -func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []interface{}, loader func(instanceKey *inst.InstanceKey)) { +func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []interface{}, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { // Discover new tablets. // TODO(sougou): enhance this to work with multi-schema, // where each instanceKey can have multiple tablets. @@ -188,7 +188,7 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []in log.Errore(err) continue } - if proto.Equal(tablet, old) { + if !forceRefresh && proto.Equal(tablet, old) { continue } if err := inst.SaveTablet(tablet); err != nil { @@ -239,30 +239,31 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []in } // LockShard locks the keyspace-shard preventing others from performing conflicting actions. -func LockShard(instanceKey inst.InstanceKey) (func(*error), error) { +func LockShard(ctx context.Context, instanceKey inst.InstanceKey) (context.Context, func(*error), error) { if instanceKey.Hostname == "" { - return nil, errors.New("Can't lock shard: instance is unspecified") + return nil, nil, errors.New("Can't lock shard: instance is unspecified") } val := atomic.LoadInt32(&hasReceivedSIGTERM) if val > 0 { - return nil, errors.New("Can't lock shard: SIGTERM received") + return nil, nil, errors.New("Can't lock shard: SIGTERM received") } tablet, err := inst.ReadTablet(instanceKey) if err != nil { - return nil, err + return nil, nil, err } - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(config.Config.LockShardTimeoutSeconds)*time.Second) - defer cancel() + ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Config.LockShardTimeoutSeconds)*time.Second) atomic.AddInt32(&shardsLockCounter, 1) - _, unlock, err := ts.LockShard(ctx, tablet.Keyspace, tablet.Shard, "Orc Recovery") + ctx, unlock, err := ts.LockShard(ctx, tablet.Keyspace, tablet.Shard, "Orc Recovery") if err != nil { + cancel() atomic.AddInt32(&shardsLockCounter, -1) - return nil, err + return nil, nil, err } - return func(e *error) { + return ctx, func(e *error) { defer atomic.AddInt32(&shardsLockCounter, -1) unlock(e) + cancel() }, nil } diff --git a/go/vt/orchestrator/logic/topology_recovery.go b/go/vt/orchestrator/logic/topology_recovery.go index f412d54f4bd..765695bd74d 100644 --- a/go/vt/orchestrator/logic/topology_recovery.go +++ b/go/vt/orchestrator/logic/topology_recovery.go @@ -17,7 +17,9 @@ package logic import ( + "context" "encoding/json" + "errors" "fmt" "math/rand" goos "os" @@ -29,6 +31,10 @@ import ( "github.com/patrickmn/go-cache" "github.com/rcrowley/go-metrics" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/orchestrator/attributes" "vitess.io/vitess/go/vt/orchestrator/config" "vitess.io/vitess/go/vt/orchestrator/external/golib/log" @@ -38,7 +44,9 @@ import ( "vitess.io/vitess/go/vt/orchestrator/os" "vitess.io/vitess/go/vt/orchestrator/process" "vitess.io/vitess/go/vt/orchestrator/util" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" + "vitess.io/vitess/go/vt/vttablet/tmclient" ) var countPendingRecoveries int64 @@ -181,9 +189,6 @@ func (this InstancesByCountReplicas) Less(i, j int) bool { return len(this[i].Replicas) < len(this[j].Replicas) } -var recoverDeadPrimaryCounter = metrics.NewCounter() -var recoverDeadPrimarySuccessCounter = metrics.NewCounter() -var recoverDeadPrimaryFailureCounter = metrics.NewCounter() var recoverDeadIntermediatePrimaryCounter = metrics.NewCounter() var recoverDeadIntermediatePrimarySuccessCounter = metrics.NewCounter() var recoverDeadIntermediatePrimaryFailureCounter = metrics.NewCounter() @@ -193,9 +198,6 @@ var recoverDeadCoPrimaryFailureCounter = metrics.NewCounter() var countPendingRecoveriesGauge = metrics.NewGauge() func init() { - metrics.Register("recover.dead_primary.start", recoverDeadPrimaryCounter) - metrics.Register("recover.dead_primary.success", recoverDeadPrimarySuccessCounter) - metrics.Register("recover.dead_primary.fail", recoverDeadPrimaryFailureCounter) metrics.Register("recover.dead_intermediate_primary.start", recoverDeadIntermediatePrimaryCounter) metrics.Register("recover.dead_intermediate_primary.success", recoverDeadIntermediatePrimarySuccessCounter) metrics.Register("recover.dead_intermediate_primary.fail", recoverDeadIntermediatePrimaryFailureCounter) @@ -372,101 +374,6 @@ func executeProcesses(processes []string, description string, topologyRecovery * return err } -func recoverDeadPrimaryInBinlogServerTopology(topologyRecovery *TopologyRecovery) (promotedReplica *inst.Instance, err error) { - failedPrimaryKey := &topologyRecovery.AnalysisEntry.AnalyzedInstanceKey - - var promotedBinlogServer *inst.Instance - - _, promotedBinlogServer, err = inst.RegroupReplicasBinlogServers(failedPrimaryKey, true) - if err != nil { - return nil, log.Errore(err) - } - promotedBinlogServer, err = inst.StopReplication(&promotedBinlogServer.Key) - if err != nil { - return promotedReplica, log.Errore(err) - } - // Find candidate replica - promotedReplica, err = inst.GetCandidateReplicaOfBinlogServerTopology(&promotedBinlogServer.Key) - if err != nil { - return promotedReplica, log.Errore(err) - } - // Align it with binlog server coordinates - promotedReplica, err = inst.StopReplication(&promotedReplica.Key) - if err != nil { - return promotedReplica, log.Errore(err) - } - promotedReplica, err = inst.StartReplicationUntilPrimaryCoordinates(&promotedReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) - if err != nil { - return promotedReplica, log.Errore(err) - } - promotedReplica, err = inst.StopReplication(&promotedReplica.Key) - if err != nil { - return promotedReplica, log.Errore(err) - } - // Detach, flush binary logs forward - promotedReplica, err = inst.ResetReplication(&promotedReplica.Key) - if err != nil { - return promotedReplica, log.Errore(err) - } - promotedReplica, err = inst.FlushBinaryLogsTo(&promotedReplica.Key, promotedBinlogServer.ExecBinlogCoordinates.LogFile) - if err != nil { - return promotedReplica, log.Errore(err) - } - promotedReplica, err = inst.FlushBinaryLogs(&promotedReplica.Key, 1) - if err != nil { - return promotedReplica, log.Errore(err) - } - promotedReplica, err = inst.PurgeBinaryLogsToLatest(&promotedReplica.Key, false) - if err != nil { - return promotedReplica, log.Errore(err) - } - // Reconnect binlog servers to promoted replica (now primary): - promotedBinlogServer, err = inst.SkipToNextBinaryLog(&promotedBinlogServer.Key) - if err != nil { - return promotedReplica, log.Errore(err) - } - promotedBinlogServer, err = inst.Repoint(&promotedBinlogServer.Key, &promotedReplica.Key, inst.GTIDHintDeny) - if err != nil { - return nil, log.Errore(err) - } - - func() { - // Move binlog server replicas up to replicate from primary. - // This can only be done once a BLS has skipped to the next binlog - // We postpone this operation. The primary is already promoted and we're happy. - binlogServerReplicas, err := inst.ReadBinlogServerReplicaInstances(&promotedBinlogServer.Key) - if err != nil { - return - } - maxBinlogServersToPromote := 3 - for i, binlogServerReplica := range binlogServerReplicas { - binlogServerReplica := binlogServerReplica - if i >= maxBinlogServersToPromote { - return - } - postponedFunction := func() error { - binlogServerReplica, err := inst.StopReplication(&binlogServerReplica.Key) - if err != nil { - return err - } - // Make sure the BLS has the "next binlog" -- the one the primary flushed & purged to. Otherwise the BLS - // will request a binlog the primary does not have - if binlogServerReplica.ExecBinlogCoordinates.SmallerThan(&promotedBinlogServer.ExecBinlogCoordinates) { - binlogServerReplica, err = inst.StartReplicationUntilPrimaryCoordinates(&binlogServerReplica.Key, &promotedBinlogServer.ExecBinlogCoordinates) - if err != nil { - return err - } - } - _, err = inst.Repoint(&binlogServerReplica.Key, &promotedReplica.Key, inst.GTIDHintDeny) - return err - } - topologyRecovery.AddPostponedFunction(postponedFunction, fmt.Sprintf("recoverDeadPrimaryInBinlogServerTopology, moving binlog server %+v", binlogServerReplica.Key)) - } - }() - - return promotedReplica, err -} - func GetPrimaryRecoveryType(analysisEntry *inst.ReplicationAnalysis) (primaryRecoveryType PrimaryRecoveryType) { primaryRecoveryType = PrimaryRecoveryUnknown if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology { @@ -477,115 +384,6 @@ func GetPrimaryRecoveryType(analysisEntry *inst.ReplicationAnalysis) (primaryRec return primaryRecoveryType } -// recoverDeadPrimary recovers a dead primary, complete logic inside -func recoverDeadPrimary(topologyRecovery *TopologyRecovery, candidateInstanceKey *inst.InstanceKey, skipProcesses bool) (recoveryAttempted bool, promotedReplica *inst.Instance, lostReplicas [](*inst.Instance), err error) { - topologyRecovery.Type = PrimaryRecovery - analysisEntry := &topologyRecovery.AnalysisEntry - failedInstanceKey := &analysisEntry.AnalyzedInstanceKey - var cannotReplicateReplicas [](*inst.Instance) - postponedAll := false - - inst.AuditOperation("recover-dead-primary", failedInstanceKey, "problem found; will recover") - if !skipProcesses { - if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { - return false, nil, lostReplicas, topologyRecovery.AddError(err) - } - } - - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: will recover %+v", *failedInstanceKey)) - - err = TabletDemotePrimary(*failedInstanceKey) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: TabletDemotePrimary: %v", err)) - - topologyRecovery.RecoveryType = GetPrimaryRecoveryType(analysisEntry) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: primaryRecoveryType=%+v", topologyRecovery.RecoveryType)) - - promotedReplicaIsIdeal := func(promoted *inst.Instance, hasBestPromotionRule bool) bool { - if promoted == nil { - return false - } - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: promotedReplicaIsIdeal(%+v)", promoted.Key)) - if candidateInstanceKey != nil { //explicit request to promote a specific server - return promoted.Key.Equals(candidateInstanceKey) - } - if promoted.DataCenter == topologyRecovery.AnalysisEntry.AnalyzedInstanceDataCenter && - promoted.PhysicalEnvironment == topologyRecovery.AnalysisEntry.AnalyzedInstancePhysicalEnvironment { - if promoted.PromotionRule == inst.MustPromoteRule || promoted.PromotionRule == inst.PreferPromoteRule || - (hasBestPromotionRule && promoted.PromotionRule != inst.MustNotPromoteRule) { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: found %+v to be ideal candidate; will optimize recovery", promoted.Key)) - postponedAll = true - return true - } - } - return false - } - switch topologyRecovery.RecoveryType { - case PrimaryRecoveryUnknown: - { - return false, nil, lostReplicas, topologyRecovery.AddError(log.Errorf("RecoveryType unknown/unsupported")) - } - case PrimaryRecoveryGTID: - { - AuditTopologyRecovery(topologyRecovery, "RecoverDeadPrimary: regrouping replicas via GTID") - lostReplicas, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasGTID(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal) - } - case PrimaryRecoveryBinlogServer: - { - AuditTopologyRecovery(topologyRecovery, "RecoverDeadPrimary: recovering via binlog servers") - promotedReplica, err = recoverDeadPrimaryInBinlogServerTopology(topologyRecovery) - } - } - topologyRecovery.AddError(err) - lostReplicas = append(lostReplicas, cannotReplicateReplicas...) - for _, replica := range lostReplicas { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: - lost replica: %+v", replica.Key)) - } - - if promotedReplica != nil && len(lostReplicas) > 0 && config.Config.DetachLostReplicasAfterPrimaryFailover { - postponedFunction := func() error { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: lost %+v replicas during recovery process; detaching them", len(lostReplicas))) - for _, replica := range lostReplicas { - replica := replica - inst.DetachReplicaPrimaryHost(&replica.Key) - } - return nil - } - topologyRecovery.AddPostponedFunction(postponedFunction, fmt.Sprintf("RecoverDeadPrimary, detach %+v lost replicas", len(lostReplicas))) - } - - func() error { - // TODO(sougou): Commented out: this downtime feels a little aggressive. - //inst.BeginDowntime(inst.NewDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) - acknowledgeInstanceFailureDetection(&analysisEntry.AnalyzedInstanceKey) - for _, replica := range lostReplicas { - replica := replica - inst.BeginDowntime(inst.NewDowntime(&replica.Key, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) - } - return nil - }() - - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) - - if promotedReplica != nil && !postponedAll { - promotedReplica, err = replacePromotedReplicaWithCandidate(topologyRecovery, &analysisEntry.AnalyzedInstanceKey, promotedReplica, candidateInstanceKey) - topologyRecovery.AddError(err) - } - - if promotedReplica == nil { - err := TabletUndoDemotePrimary(*failedInstanceKey) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: TabletUndoDemotePrimary: %v", err)) - message := "Failure: no replica promoted." - AuditTopologyRecovery(topologyRecovery, message) - inst.AuditOperation("recover-dead-primary", failedInstanceKey, message) - return true, promotedReplica, lostReplicas, err - } - - message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) - AuditTopologyRecovery(topologyRecovery, message) - inst.AuditOperation("recover-dead-primary", failedInstanceKey, message) - return true, promotedReplica, lostReplicas, err -} - func PrimaryFailoverGeographicConstraintSatisfied(analysisEntry *inst.ReplicationAnalysis, suggestedInstance *inst.Instance) (satisfied bool, dissatisfiedReason string) { if config.Config.PreventCrossDataCenterPrimaryFailover { if suggestedInstance.DataCenter != analysisEntry.AnalyzedInstanceDataCenter { @@ -697,7 +495,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de keepSearchingHint := "" if satisfied, reason := PrimaryFailoverGeographicConstraintSatisfied(&topologyRecovery.AnalysisEntry, promotedReplica); !satisfied { keepSearchingHint = fmt.Sprintf("Will keep searching; %s", reason) - } else if promotedReplica.PromotionRule == inst.PreferNotPromoteRule { + } else if promotedReplica.PromotionRule == promotionrule.PreferNot { keepSearchingHint = fmt.Sprintf("Will keep searching because we have promoted a server with prefer_not rule: %+v", promotedReplica.Key) } if keepSearchingHint != "" { @@ -815,6 +613,19 @@ func checkAndRecoverDeadPrimary(analysisEntry inst.ReplicationAnalysis, candidat if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery) { return false, nil, nil } + tablet, err := TabletRefresh(analysisEntry.AnalyzedInstanceKey) + if err != nil { + return false, nil, err + } + + var candidateTabletAlias *topodatapb.TabletAlias + if candidateInstanceKey != nil { + candidateTablet, err := inst.ReadTablet(*candidateInstanceKey) + if err != nil { + return false, nil, err + } + candidateTabletAlias = candidateTablet.Alias + } topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) if topologyRecovery == nil { AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another RecoverDeadPrimary.", analysisEntry.AnalyzedInstanceKey)) @@ -822,106 +633,100 @@ func checkAndRecoverDeadPrimary(analysisEntry inst.ReplicationAnalysis, candidat } log.Infof("Analysis: %v, deadprimary %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) - // That's it! We must do recovery! - // TODO(sougou): This function gets called by GracefulPrimaryTakeover which may - // need to obtain shard lock before getting here. - unlock, err := LockShard(analysisEntry.AnalyzedInstanceKey) - if err != nil { - log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ - "skipProcesses: %v: NOT detecting/recovering host, could not obtain shard lock (%v)", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses, err) - return false, nil, err - } - defer unlock(&err) - - // Check if someone else fixed the problem. - tablet, err := TabletRefresh(analysisEntry.AnalyzedInstanceKey) - if err == nil && tablet.Type != topodatapb.TabletType_PRIMARY { - // TODO(sougou); use a version that only refreshes the current shard. - RefreshTablets() + // this check is needed because sometimes DeadPrimary code path is forcefully spawned off from other recoveries like PrimaryHasPrimary. + // So we need to check that we only run an ERS if the instance that we analyzed was actually a primary! Otherwise, we would end up running an ERS + // even when the cluster is fine or the problem can be fixed via some other recovery + if tablet.Type != topodatapb.TabletType_PRIMARY { + RefreshTablets(true /* forceRefresh */) AuditTopologyRecovery(topologyRecovery, "another agent seems to have fixed the problem") - // TODO(sougou): see if we have to reset the cluster as healthy. return false, topologyRecovery, nil } - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("will handle DeadPrimary event on %+v", analysisEntry.ClusterDetails.ClusterName)) - recoverDeadPrimaryCounter.Inc(1) - recoveryAttempted, promotedReplica, lostReplicas, err := recoverDeadPrimary(topologyRecovery, candidateInstanceKey, skipProcesses) - if err != nil { - AuditTopologyRecovery(topologyRecovery, err.Error()) + // check if we have received SIGTERM, if we have, we should not continue with the recovery + val := atomic.LoadInt32(&hasReceivedSIGTERM) + if val > 0 { + return false, topologyRecovery, errors.New("Can't lock shard: SIGTERM received") } - topologyRecovery.LostReplicas.AddInstances(lostReplicas) - if !recoveryAttempted { - return false, topologyRecovery, err + + // check if we have received an ERS in progress, if we do, we should not continue with the recovery + if checkAndSetIfERSInProgress() { + AuditTopologyRecovery(topologyRecovery, "an ERS is already in progress, not issuing another") + return false, topologyRecovery, nil } + defer setERSCompleted() + + // add to the shard lock counter since ERS will lock the shard + atomic.AddInt32(&shardsLockCounter, 1) + defer atomic.AddInt32(&shardsLockCounter, -1) + + ev, err := reparentutil.NewEmergencyReparenter(ts, tmclient.NewTabletManagerClient(), logutil.NewCallbackLogger(func(event *logutilpb.Event) { + level := event.GetLevel() + value := event.GetValue() + // we only log the warnings and errors explicitly, everything gets logged as an information message anyways in auditing topology recovery + switch level { + case logutilpb.Level_WARNING: + log.Warningf("ERS - %s", value) + case logutilpb.Level_ERROR: + log.Errorf("ERS - %s", value) + } + AuditTopologyRecovery(topologyRecovery, value) + })).ReparentShard(context.Background(), + tablet.Keyspace, + tablet.Shard, + reparentutil.EmergencyReparentOptions{ + NewPrimaryAlias: candidateTabletAlias, + IgnoreReplicas: nil, + WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second, + PreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover, + }, + ) + + // here we need to forcefully refresh all the tablets otherwise old information is used and failover scenarios are spawned off which are not required + // For example, if we do not refresh the tablets forcefully and the new primary is found in the cache then its source key is not updated and this spawns off + // PrimaryHasPrimary analysis which runs another ERS + RefreshTablets(true /* forceRefresh */) + var promotedReplica *inst.Instance + if ev.NewPrimary != nil { + promotedReplica, _, _ = inst.ReadInstance(&inst.InstanceKey{ + Hostname: ev.NewPrimary.MysqlHostname, + Port: int(ev.NewPrimary.MysqlPort), + }) + } + postErsCompletion(topologyRecovery, analysisEntry, skipProcesses, promotedReplica) - overridePrimaryPromotion := func() (*inst.Instance, error) { - if promotedReplica == nil { - // No promotion; nothing to override. - return promotedReplica, err - } - // Scenarios where we might cancel the promotion. - if satisfied, reason := PrimaryFailoverGeographicConstraintSatisfied(&analysisEntry, promotedReplica); !satisfied { - return nil, fmt.Errorf("RecoverDeadPrimary: failed %+v promotion; %s", promotedReplica.Key, reason) - } - if config.Config.FailPrimaryPromotionOnLagMinutes > 0 && - time.Duration(promotedReplica.ReplicationLagSeconds.Int64)*time.Second >= time.Duration(config.Config.FailPrimaryPromotionOnLagMinutes)*time.Minute { - // candidate replica lags too much - return nil, fmt.Errorf("RecoverDeadPrimary: failed promotion. FailPrimaryPromotionOnLagMinutes is set to %d (minutes) and promoted replica %+v 's lag is %d (seconds)", config.Config.FailPrimaryPromotionOnLagMinutes, promotedReplica.Key, promotedReplica.ReplicationLagSeconds.Int64) - } - if config.Config.FailPrimaryPromotionIfSQLThreadNotUpToDate && !promotedReplica.SQLThreadUpToDate() { - return nil, fmt.Errorf("RecoverDeadPrimary: failed promotion. FailPrimaryPromotionIfSQLThreadNotUpToDate is set and promoted replica %+v 's sql thread is not up to date (relay logs still unapplied). Aborting promotion", promotedReplica.Key) - } - if config.Config.DelayPrimaryPromotionIfSQLThreadNotUpToDate && !promotedReplica.SQLThreadUpToDate() { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("DelayPrimaryPromotionIfSQLThreadNotUpToDate: waiting for SQL thread on %+v", promotedReplica.Key)) - if _, err := inst.WaitForSQLThreadUpToDate(&promotedReplica.Key, 0, 0); err != nil { - return nil, fmt.Errorf("DelayPrimaryPromotionIfSQLThreadNotUpToDate error: %+v", err) - } - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("DelayPrimaryPromotionIfSQLThreadNotUpToDate: SQL thread caught up on %+v", promotedReplica.Key)) - } - // All seems well. No override done. - return promotedReplica, err + return true, topologyRecovery, err +} + +// checkAndSetIfERSInProgress checks if an ERS is already in progress. If it is not in progress, then we set it to be in progress. +func checkAndSetIfERSInProgress() bool { + ersInProgressMutex.Lock() + defer ersInProgressMutex.Unlock() + if ersInProgress { + return true } - if promotedReplica, err = overridePrimaryPromotion(); err != nil { - AuditTopologyRecovery(topologyRecovery, err.Error()) + ersInProgress = true + return false +} + +// setERSCompleted sets the variable tracking if an ers is in progress to false. +func setERSCompleted() { + ersInProgressMutex.Lock() + defer ersInProgressMutex.Unlock() + ersInProgress = false +} + +func postErsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.ReplicationAnalysis, skipProcesses bool, promotedReplica *inst.Instance) { + if promotedReplica != nil { + message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) + AuditTopologyRecovery(topologyRecovery, message) + inst.AuditOperation("recover-dead-primary", &analysisEntry.AnalyzedInstanceKey, message) } // And this is the end; whether successful or not, we're done. resolveRecovery(topologyRecovery, promotedReplica) // Now, see whether we are successful or not. From this point there's no going back. if promotedReplica != nil { // Success! - recoverDeadPrimarySuccessCounter.Inc(1) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: successfully promoted %+v", promotedReplica.Key)) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadPrimary: promoted server coordinates: %+v", promotedReplica.SelfBinlogCoordinates)) - - AuditTopologyRecovery(topologyRecovery, "- RecoverDeadPrimary: will apply MySQL changes to promoted primary") - { - _, err := inst.ResetReplicationOperation(&promotedReplica.Key) - if err != nil { - // Ugly, but this is important. Let's give it another try - _, err = inst.ResetReplicationOperation(&promotedReplica.Key) - } - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadPrimary: applying RESET SLAVE ALL on promoted primary: success=%t", (err == nil))) - if err != nil { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadPrimary: NOTE that %+v is promoted even though SHOW SLAVE STATUS may still show it has a primary", promotedReplica.Key)) - } - } - { - count := inst.PrimarySemiSync(promotedReplica.Key) - err := inst.SetSemiSyncPrimary(&promotedReplica.Key, count > 0) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadPrimary: applying semi-sync %v: success=%t", count > 0, (err == nil))) - - // Dont' allow writes if semi-sync settings fail. - if err == nil { - _, err := inst.SetReadOnly(&promotedReplica.Key, false) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadPrimary: applying read-only=0 on promoted primary: success=%t", (err == nil))) - } - } - // Let's attempt, though we won't necessarily succeed, to set old primary as read-only - go func() { - _, err := inst.SetReadOnly(&analysisEntry.AnalyzedInstanceKey, true) - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadPrimary: applying read-only=1 on demoted primary: success=%t", (err == nil))) - }() kvPairs := inst.GetClusterPrimaryKVPairs(analysisEntry.ClusterDetails.ClusterAlias, &promotedReplica.Key) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Writing KV %+v", kvPairs)) @@ -961,11 +766,7 @@ func checkAndRecoverDeadPrimary(analysisEntry inst.ReplicationAnalysis, candidat // Execute post primary-failover processes executeProcesses(config.Config.PostPrimaryFailoverProcesses, "PostPrimaryFailoverProcesses", topologyRecovery, false) } - } else { - recoverDeadPrimaryFailureCounter.Inc(1) } - - return true, topologyRecovery, err } // isGenerallyValidAsCandidateSiblingOfIntermediatePrimary sees that basic server configuration and state are valid @@ -2011,7 +1812,7 @@ func electNewPrimary(analysisEntry inst.ReplicationAnalysis, candidateInstanceKe } log.Infof("Analysis: %v, will elect a new primary: %v", analysisEntry.Analysis, analysisEntry.SuggestedClusterAlias) - unlock, err := LockShard(analysisEntry.AnalyzedInstanceKey) + _, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceKey) if err != nil { log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ "skipProcesses: %v: NOT detecting/recovering host, could not obtain shard lock (%v)", @@ -2077,7 +1878,7 @@ func electNewPrimary(analysisEntry inst.ReplicationAnalysis, candidateInstanceKe return false, topologyRecovery, err } } - count := inst.PrimarySemiSync(candidate.Key) + count := inst.SemiSyncAckers(candidate.Key) err = inst.SetSemiSyncPrimary(&candidate.Key, count > 0) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- electNewPrimary: applying semi-sync %v: success=%t", count > 0, (err == nil))) if err != nil { @@ -2130,7 +1931,7 @@ func fixPrimary(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *in } log.Infof("Analysis: %v, will fix primary to read-write %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) - unlock, err := LockShard(analysisEntry.AnalyzedInstanceKey) + _, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceKey) if err != nil { log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ "skipProcesses: %v: NOT detecting/recovering host, could not obtain shard lock (%v)", @@ -2140,7 +1941,7 @@ func fixPrimary(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *in defer unlock(&err) // TODO(sougou): this code pattern has reached DRY limits. Reuse. - count := inst.PrimarySemiSync(analysisEntry.AnalyzedInstanceKey) + count := inst.SemiSyncAckers(analysisEntry.AnalyzedInstanceKey) err = inst.SetSemiSyncPrimary(&analysisEntry.AnalyzedInstanceKey, count > 0) //AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- fixPrimary: applying semi-sync %v: success=%t", count > 0, (err == nil))) if err != nil { @@ -2162,7 +1963,7 @@ func fixReplica(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *in } log.Infof("Analysis: %v, will fix replica %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) - unlock, err := LockShard(analysisEntry.AnalyzedInstanceKey) + _, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceKey) if err != nil { log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ "skipProcesses: %v: NOT detecting/recovering host, could not obtain shard lock (%v)", diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 9c5a805a918..49e85b34b6e 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -2042,6 +2042,9 @@ type EmergencyReparentShardRequest struct { // WaitReplicasTimeout is the duration of time to wait for replicas to catch // up in reparenting. WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // PreventCrossCellPromotion is used to only promote the new primary from the same cell + // as the failed primary. + PreventCrossCellPromotion bool `protobuf:"varint,6,opt,name=prevent_cross_cell_promotion,json=preventCrossCellPromotion,proto3" json:"prevent_cross_cell_promotion,omitempty"` } func (x *EmergencyReparentShardRequest) Reset() { @@ -2111,6 +2114,13 @@ func (x *EmergencyReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duratio return nil } +func (x *EmergencyReparentShardRequest) GetPreventCrossCellPromotion() bool { + if x != nil { + return x.PreventCrossCellPromotion + } + return false +} + type EmergencyReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -7674,7 +7684,7 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8f, 0x02, 0x0a, 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a, 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, @@ -7691,587 +7701,591 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, - 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, - 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, - 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, - 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, - 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, + 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x65, 0x6c, 0x6c, 0x50, + 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, + 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, - 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, - 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, - 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, - 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x15, - 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, - 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, - 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x84, 0x02, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, - 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, - 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, - 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, + 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, + 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, + 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, + 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, + 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, + 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, + 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, + 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, + 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, + 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x32, - 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, - 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, - 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, - 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, - 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, - 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x47, + 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x84, 0x02, 0x0a, + 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, + 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, + 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, + 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, + 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, + 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, + 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, + 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, + 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, - 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, - 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x52, - 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, + 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x42, 0x0a, 0x12, 0x47, + 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, + 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, + 0x6e, 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, + 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, + 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, + 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, + 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, - 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, - 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, + 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, + 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, + 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, + 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, + 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x4b, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, + 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, + 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, + 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x22, 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, - 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, - 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, - 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, - 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, - 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, - 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, - 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, - 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, - 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, - 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc8, 0x01, + 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x1e, 0x53, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, + 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, - 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, - 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, + 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a, + 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a, + 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x4b, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, - 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, - 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, - 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, - 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a, + 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, - 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, - 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc8, 0x01, 0x0a, - 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, + 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, + 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x1e, 0x53, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a, 0x1f, - 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a, 0x1c, - 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, - 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, - 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a, 0x1d, - 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x12, 0x53, + 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, + 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, - 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, - 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, - 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, - 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, - 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, - 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, + 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, + 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, + 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, - 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, - 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, - 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, - 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, - 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x56, 0x45, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x52, 0x45, 0x41, - 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x10, 0x02, 0x42, - 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, + 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, + 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, + 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x56, + 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x10, 0x02, + 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, + 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go index 9a8ee6e4013..16d06b1b1f5 100644 --- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go +++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go @@ -2195,6 +2195,16 @@ func (m *EmergencyReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.PreventCrossCellPromotion { + i-- + if m.PreventCrossCellPromotion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if m.WaitReplicasTimeout != nil { size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -7553,6 +7563,9 @@ func (m *EmergencyReparentShardRequest) SizeVT() (n int) { l = m.WaitReplicasTimeout.SizeVT() n += 1 + l + sov(uint64(l)) } + if m.PreventCrossCellPromotion { + n += 2 + } if m.unknownFields != nil { n += len(m.unknownFields) } @@ -14645,6 +14658,26 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreventCrossCellPromotion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreventCrossCellPromotion = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index 37627e1c68b..98bc07e5d40 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -66,7 +66,7 @@ func TestInitShardPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet2.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet2.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet2.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", @@ -75,7 +75,7 @@ func TestInitShardPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet3.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet3.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet3.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) for _, tablet := range []*testlib.FakeTablet{tablet1, tablet2, tablet3} { tablet.StartActionLoop(t, wr) @@ -122,7 +122,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet2.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet2.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet2.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", @@ -130,7 +130,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", } - tablet3.FakeMysqlDaemon.SetReplicationSourceInput = fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort) + tablet3.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet3.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) for _, tablet := range []*testlib.FakeTablet{tablet1, tablet2, tablet3} { tablet.StartActionLoop(t, wr) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index c17272c4d94..131d90eb016 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -596,6 +596,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat } span.Annotate("wait_replicas_timeout_sec", waitReplicasTimeout.Seconds()) + span.Annotate("prevent_cross_cell_promotion", req.PreventCrossCellPromotion) m := sync.RWMutex{} logstream := []*logutilpb.Event{} @@ -610,9 +611,10 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat req.Keyspace, req.Shard, reparentutil.EmergencyReparentOptions{ - NewPrimaryAlias: req.NewPrimary, - IgnoreReplicas: sets.NewString(ignoreReplicaAliases...), - WaitReplicasTimeout: waitReplicasTimeout, + NewPrimaryAlias: req.NewPrimary, + IgnoreReplicas: sets.NewString(ignoreReplicaAliases...), + WaitReplicasTimeout: waitReplicasTimeout, + PreventCrossCellPromotion: req.PreventCrossCellPromotion, }, ) diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go index 04e0dea5642..0f6a4d7cf25 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -118,6 +118,12 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }{ "zone1-0000000200": {}, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000200": {}, + }, SetMasterResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -231,6 +237,12 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }{ "zone1-0000000200": {}, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000200": {}, + }, SetMasterResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 447293e2a60..226f3b42191 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -37,6 +37,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vttablet/tmclient" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" @@ -64,6 +65,7 @@ func init() { tmclient.RegisterTabletManagerClientFactory("grpcvtctldserver.test", func() tmclient.TabletManagerClient { return nil }) + _ = reparentutil.SetDurabilityPolicy("none", nil) } func TestAddCellInfo(t *testing.T) { @@ -2652,6 +2654,12 @@ func TestEmergencyReparentShard(t *testing.T) { }{ "zone1-0000000200": {}, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000200": {}, + }, SetMasterResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 8088998d176..76dfb23498e 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -128,6 +128,8 @@ type TabletManagerClient struct { // a test, set tmc.TopoServer = nil. TopoServer *topo.Server // keyed by tablet alias. + ChangeTabletTypeResult map[string]error + // keyed by tablet alias. DemoteMasterDelays map[string]time.Duration // keyed by tablet alias. DemoteMasterResults map[string]struct { @@ -238,6 +240,10 @@ type TabletManagerClient struct { // ChangeType is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, newType topodatapb.TabletType) error { + if result, ok := fake.ChangeTabletTypeResult[topoproto.TabletAliasString(tablet.Alias)]; ok { + return result + } + if fake.TopoServer == nil { return assert.AnError } diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 5a5ac0fce5f..7b00cef869c 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -60,7 +60,7 @@ func init() { addCommand("Shards", command{ name: "EmergencyReparentShard", method: commandEmergencyReparentShard, - params: "-keyspace_shard= [-new_primary=] [-wait_replicas_timeout=] [-ignore_replicas=]", + params: "-keyspace_shard= [-new_primary=] [-wait_replicas_timeout=] [-ignore_replicas=] [-prevent_cross_cell_promotion=]", help: "Reparents the shard to the new primary. Assumes the old primary is dead and not responding.", }) addCommand("Shards", command{ @@ -177,6 +177,7 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", *topo.RemoteOperationTimeout, "time to wait for replicas to catch up in reparenting") keyspaceShard := subFlags.String("keyspace_shard", "", "keyspace/shard of the shard that needs to be reparented") newPrimary := subFlags.String("new_primary", "", "optional alias of a tablet that should be the new primary. If not specified, Vitess will select the best candidate") + preventCrossCellPromotion := subFlags.Bool("prevent_cross_cell_promotion", false, "only promotes a new primary from the same cell as the previous primary") ignoreReplicasList := subFlags.String("ignore_replicas", "", "comma-separated list of replica tablet aliases to ignore during emergency reparent") // handle deprecated flags @@ -213,7 +214,7 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s } } unreachableReplicas := topoproto.ParseTabletSet(*ignoreReplicasList) - return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas) + return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas, *preventCrossCellPromotion) } func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/durability.go new file mode 100644 index 00000000000..8b80cc5cf9f --- /dev/null +++ b/go/vt/vtctl/reparentutil/durability.go @@ -0,0 +1,238 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "fmt" + "sync" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/topoproto" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" +) + +//======================================================================= + +// A newDurabler is a function that creates a new durabler based on the +// properties specified in the input map. Every durabler must +// register a newDurabler function. +type newDurabler func(map[string]string) durabler + +var ( + // durabilityPolicies is a map that stores the functions needed to create a new durabler + durabilityPolicies = make(map[string]newDurabler) + // curDurabilityPolicy is the current durability policy in use + curDurabilityPolicy durabler + // curDurabilityPolicyMutex is the mutex protecting the curDurabilityPolicy variable + curDurabilityPolicyMutex sync.Mutex +) + +func init() { + // register all the durability rules with their functions to create them + registerDurability("none", func(map[string]string) durabler { + return &durabilityNone{} + }) + registerDurability("semi_sync", func(map[string]string) durabler { + return &durabilitySemiSync{} + }) + registerDurability("cross_cell", func(map[string]string) durabler { + return &durabilityCrossCell{} + }) + registerDurability("specified", newDurabilitySpecified) +} + +// durabler is the interface which is used to get the promotion rules for candidates and the semi sync setup +type durabler interface { + promotionRule(*topodatapb.Tablet) promotionrule.CandidatePromotionRule + semiSyncAckers(*topodatapb.Tablet) int + replicaSemiSync(primary, replica *topodatapb.Tablet) bool +} + +func registerDurability(name string, newDurablerFunc newDurabler) { + if durabilityPolicies[name] != nil { + log.Fatalf("durability policy %v already registered", name) + } + durabilityPolicies[name] = newDurablerFunc +} + +//======================================================================= + +// SetDurabilityPolicy is used to set the durability policy from the registered policies +func SetDurabilityPolicy(name string, durabilityParams map[string]string) error { + newDurabilityCreationFunc, found := durabilityPolicies[name] + if !found { + return fmt.Errorf("durability policy %v not found", name) + } + log.Infof("Setting durability policy to %v", name) + curDurabilityPolicyMutex.Lock() + defer curDurabilityPolicyMutex.Unlock() + curDurabilityPolicy = newDurabilityCreationFunc(durabilityParams) + return nil +} + +// PromotionRule returns the promotion rule for the instance. +func PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { + curDurabilityPolicyMutex.Lock() + defer curDurabilityPolicyMutex.Unlock() + return curDurabilityPolicy.promotionRule(tablet) +} + +// SemiSyncAckers returns the primary semi-sync setting for the instance. +// 0 means none. Non-zero specifies the number of required ackers. +func SemiSyncAckers(tablet *topodatapb.Tablet) int { + curDurabilityPolicyMutex.Lock() + defer curDurabilityPolicyMutex.Unlock() + return curDurabilityPolicy.semiSyncAckers(tablet) +} + +// ReplicaSemiSync returns the replica semi-sync setting from the tablet record. +// Prefer using this function if tablet record is available. +func ReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { + curDurabilityPolicyMutex.Lock() + defer curDurabilityPolicyMutex.Unlock() + return curDurabilityPolicy.replicaSemiSync(primary, replica) +} + +//======================================================================= + +// durabilityNone has no semi-sync and returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else +type durabilityNone struct{} + +func (d *durabilityNone) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { + switch tablet.Type { + case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: + return promotionrule.Neutral + } + return promotionrule.MustNot +} + +func (d *durabilityNone) semiSyncAckers(tablet *topodatapb.Tablet) int { + return 0 +} + +func (d *durabilityNone) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { + return false +} + +//======================================================================= + +// durabilitySemiSync has 1 semi-sync setup. It only allows Primary and Replica type servers to acknowledge semi sync +// It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else +type durabilitySemiSync struct{} + +func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { + switch tablet.Type { + case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: + return promotionrule.Neutral + } + return promotionrule.MustNot +} + +func (d *durabilitySemiSync) semiSyncAckers(tablet *topodatapb.Tablet) int { + return 1 +} + +func (d *durabilitySemiSync) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { + switch replica.Type { + case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: + return true + } + return false +} + +//======================================================================= + +// durabilityCrossCell has 1 semi-sync setup. It only allows Primary and Replica type servers from a different cell to acknowledge semi sync. +// This means that a transaction must be in two cells for it to be acknowledged +// It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else +type durabilityCrossCell struct{} + +func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { + switch tablet.Type { + case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: + return promotionrule.Neutral + } + return promotionrule.MustNot +} + +func (d *durabilityCrossCell) semiSyncAckers(tablet *topodatapb.Tablet) int { + return 1 +} + +func (d *durabilityCrossCell) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { + // Prevent panics. + if primary.Alias == nil || replica.Alias == nil { + return false + } + switch replica.Type { + case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: + return primary.Alias.Cell != replica.Alias.Cell + } + return false +} + +//======================================================================= + +// durabilitySpecified is like durabilityNone. It has an additional map which it first queries with the tablet alias as the key +// If a CandidatePromotionRule is found in that map, then that is used as the promotion rule. Otherwise, it reverts to the same logic as durabilityNone +type durabilitySpecified struct { + promotionRules map[string]promotionrule.CandidatePromotionRule +} + +func (d *durabilitySpecified) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { + promoteRule, isFound := d.promotionRules[topoproto.TabletAliasString(tablet.Alias)] + if isFound { + return promoteRule + } + + switch tablet.Type { + case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: + return promotionrule.Neutral + } + return promotionrule.MustNot +} + +func (d *durabilitySpecified) semiSyncAckers(tablet *topodatapb.Tablet) int { + return 0 +} + +func (d *durabilitySpecified) replicaSemiSync(primary, replica *topodatapb.Tablet) bool { + return false +} + +// newDurabilitySpecified is a function that is used to create a new durabilitySpecified struct +func newDurabilitySpecified(m map[string]string) durabler { + promotionRules := map[string]promotionrule.CandidatePromotionRule{} + // range over the map given by the user + for tabletAliasStr, promotionRuleStr := range m { + // parse the promotion rule + promotionRule, err := promotionrule.Parse(promotionRuleStr) + // if parsing is not successful, skip over this rule + if err != nil { + log.Errorf("invalid promotion rule %s found, received error - %v", promotionRuleStr, err) + continue + } + // set the promotion rule in the map at the given tablet alias + promotionRules[tabletAliasStr] = promotionRule + } + + return &durabilitySpecified{ + promotionRules: promotionRules, + } +} diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/durability_test.go new file mode 100644 index 00000000000..f862b7b6cbe --- /dev/null +++ b/go/vt/vtctl/reparentutil/durability_test.go @@ -0,0 +1,218 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/topoproto" + + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" +) + +func TestDurabilityNone(t *testing.T) { + err := SetDurabilityPolicy("none", nil) + require.NoError(t, err) + + promoteRule := PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 0, SemiSyncAckers(nil)) + assert.Equal(t, false, ReplicaSemiSync(nil, nil)) +} + +func TestDurabilitySemiSync(t *testing.T) { + err := SetDurabilityPolicy("semi_sync", nil) + require.NoError(t, err) + + promoteRule := PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 1, SemiSyncAckers(nil)) + assert.Equal(t, true, ReplicaSemiSync(nil, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + })) + assert.Equal(t, false, ReplicaSemiSync(nil, &topodatapb.Tablet{ + Type: topodatapb.TabletType_EXPERIMENTAL, + })) +} + +func TestDurabilityCrossCell(t *testing.T) { + err := SetDurabilityPolicy("cross_cell", nil) + require.NoError(t, err) + + promoteRule := PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(&topodatapb.Tablet{ + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 1, SemiSyncAckers(nil)) + assert.Equal(t, false, ReplicaSemiSync(&topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + })) + assert.Equal(t, true, ReplicaSemiSync(&topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + assert.Equal(t, false, ReplicaSemiSync(&topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_EXPERIMENTAL, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) +} + +func TestError(t *testing.T) { + err := SetDurabilityPolicy("unknown", nil) + assert.EqualError(t, err, "durability policy unknown not found") +} + +func TestDurabilitySpecified(t *testing.T) { + cellName := "cell" + durabilityRules := newDurabilitySpecified( + map[string]string{ + "cell-0000000000": string(promotionrule.Must), + "cell-0000000001": string(promotionrule.Prefer), + "cell-0000000002": string(promotionrule.Neutral), + "cell-0000000003": string(promotionrule.PreferNot), + "cell-0000000004": string(promotionrule.MustNot), + }) + + testcases := []struct { + tablet *topodatapb.Tablet + promotionRule promotionrule.CandidatePromotionRule + }{ + { + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cellName, + Uid: 0, + }, + }, + promotionRule: promotionrule.MustNot, + }, { + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cellName, + Uid: 1, + }, + }, + promotionRule: promotionrule.Prefer, + }, { + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cellName, + Uid: 2, + }, + }, + promotionRule: promotionrule.Neutral, + }, { + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cellName, + Uid: 3, + }, + }, + promotionRule: promotionrule.PreferNot, + }, { + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cellName, + Uid: 4, + }, + }, + promotionRule: promotionrule.MustNot, + }, + } + + for _, testcase := range testcases { + t.Run(topoproto.TabletAliasString(testcase.tablet.Alias), func(t *testing.T) { + rule := durabilityRules.promotionRule(testcase.tablet) + assert.Equal(t, testcase.promotionRule, rule) + }) + } +} diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index ae2f15e8241..bcbaefe7e11 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -28,11 +28,13 @@ import ( "vitess.io/vitess/go/event" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -53,16 +55,23 @@ type EmergencyReparenter struct { // EmergencyReparentShard operations. Options are passed by value, so it is safe // for callers to mutate and reuse options structs for multiple calls. type EmergencyReparentOptions struct { - NewPrimaryAlias *topodatapb.TabletAlias - IgnoreReplicas sets.String - WaitReplicasTimeout time.Duration + NewPrimaryAlias *topodatapb.TabletAlias + IgnoreReplicas sets.String + WaitReplicasTimeout time.Duration + PreventCrossCellPromotion bool // Private options managed internally. We use value passing to avoid leaking // these details back out. - lockAction string } +// counters for Emergency Reparent Shard +var ( + ersCounter = stats.NewGauge("ers_counter", "Number of times Emergency Reparent Shard has been run") + ersSuccessCounter = stats.NewGauge("ers_success_counter", "Number of times Emergency Reparent Shard has succeeded") + ersFailureCounter = stats.NewGauge("ers_failure_counter", "Number of times Emergency Reparent Shard has failed") +) + // NewEmergencyReparenter returns a new EmergencyReparenter object, ready to // perform EmergencyReparentShard operations using the given topo.Server, // TabletManagerClient, and logger. @@ -87,21 +96,23 @@ func NewEmergencyReparenter(ts *topo.Server, tmc tmclient.TabletManagerClient, l // ReparentShard performs the EmergencyReparentShard operation on the given // keyspace and shard. func (erp *EmergencyReparenter) ReparentShard(ctx context.Context, keyspace string, shard string, opts EmergencyReparentOptions) (*events.Reparent, error) { + // First step is to lock the shard for the given operation opts.lockAction = erp.getLockAction(opts.NewPrimaryAlias) - ctx, unlock, err := erp.ts.LockShard(ctx, keyspace, shard, opts.lockAction) if err != nil { return nil, err } - defer unlock(&err) + // dispatch success or failure of ERS ev := &events.Reparent{} defer func() { switch err { case nil: + ersSuccessCounter.Add(1) event.DispatchUpdate(ev, "finished EmergencyReparentShard") default: + ersFailureCounter.Add(1) event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error()) } }() @@ -121,35 +132,320 @@ func (erp *EmergencyReparenter) getLockAction(newPrimaryAlias *topodatapb.Tablet return action } -func (erp *EmergencyReparenter) promoteNewPrimary( +// reparentShardLocked performs Emergency Reparent Shard operation assuming that the shard is already locked +func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, opts EmergencyReparentOptions) (err error) { + // log the starting of the operation and increment the counter + erp.logger.Infof("will initiate emergency reparent shard in keyspace - %s, shard - %s", keyspace, shard) + ersCounter.Add(1) + + var ( + shardInfo *topo.ShardInfo + prevPrimary *topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + statusMap map[string]*replicationdatapb.StopReplicationStatus + primaryStatusMap map[string]*replicationdatapb.PrimaryStatus + validCandidates map[string]mysql.Position + intermediateSource *topodatapb.Tablet + validCandidateTablets []*topodatapb.Tablet + validReplacementCandidates []*topodatapb.Tablet + betterCandidate *topodatapb.Tablet + isIdeal bool + ) + + shardInfo, err = erp.ts.GetShard(ctx, keyspace, shard) + if err != nil { + return err + } + ev.ShardInfo = *shardInfo + + // get the previous primary according to the topology server, + // we use this information to choose the best candidate in the same cell + // and to undo promotion in case of failure + if shardInfo.PrimaryAlias != nil { + prevPrimaryInfo, err := erp.ts.GetTablet(ctx, shardInfo.PrimaryAlias) + if err != nil { + return err + } + prevPrimary = prevPrimaryInfo.Tablet + } + + // read all the tablets and their information + event.DispatchUpdate(ev, "reading all tablets") + tabletMap, err = erp.ts.GetTabletMapForShard(ctx, keyspace, shard) + if err != nil { + return vterrors.Wrapf(err, "failed to get tablet map for %v/%v: %v", keyspace, shard, err) + } + + // Stop replication on all the tablets and build their status map + statusMap, primaryStatusMap, err = StopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, opts.WaitReplicasTimeout, opts.IgnoreReplicas, erp.logger) + if err != nil { + return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err) + } + + // check that we still have the shard lock. If we don't then we can terminate at this point + if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + } + + // find the valid candidates for becoming the primary + // this is where we check for errant GTIDs and remove the tablets that have them from consideration + validCandidates, err = FindValidEmergencyReparentCandidates(statusMap, primaryStatusMap) + if err != nil { + return err + } + // Restrict the valid candidates list. We remove any tablet which is of the type DRAINED, RESTORE or BACKUP. + validCandidates, err = restrictValidCandidates(validCandidates, tabletMap) + if err != nil { + return err + } else if len(validCandidates) == 0 { + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no valid candidates for emergency reparent") + } + + // Wait for all candidates to apply relay logs + if err = erp.waitForAllRelayLogsToApply(ctx, validCandidates, tabletMap, statusMap, opts.WaitReplicasTimeout); err != nil { + return err + } + + // Find the intermediate source for replication that we want other tablets to replicate from. + // This step chooses the most advanced tablet. Further ties are broken by using the promotion rule. + // In case the user has specified a tablet specifically, then it is selected, as long as it is the most advanced. + // Here we also check for split brain scenarios and check that the selected replica must be more advanced than all the other valid candidates. + // We fail in case there is a split brain detected. + intermediateSource, validCandidateTablets, err = erp.findMostAdvanced(validCandidates, tabletMap, opts) + if err != nil { + return err + } + erp.logger.Infof("intermediate source selected - %v", intermediateSource.Alias) + + // check weather the intermediate source candidate selected is ideal or if it can be improved later + isIdeal, err = erp.intermediateSourceIsIdeal(intermediateSource, prevPrimary, validCandidateTablets, tabletMap, opts) + if err != nil { + return err + } + erp.logger.Infof("intermediate source is ideal candidate- %v", isIdeal) + + // Check (again) we still have the topology lock. + if err = topo.CheckShardLocked(ctx, keyspace, shard); err != nil { + return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + } + + // initialize the newPrimary with the intermediate source, override this value if it is not the ideal candidate + newPrimary := intermediateSource + if !isIdeal { + // we now reparent all the tablets to start replicating from the intermediate source + // we do not promote the tablet or change the shard record. We only change the replication for all the other tablets + // it also returns the list of the tablets that started replication successfully including itself. These are the candidates that we can use to find a replacement + validReplacementCandidates, err = erp.promoteIntermediateSource(ctx, ev, intermediateSource, tabletMap, statusMap, opts) + if err != nil { + return err + } + + // try to find a better candidate using the list we got back + // We prefer to choose a candidate which is in the same cell as our previous primary and of the best possible durability rule. + // However, if there is an explicit request from the user to promote a specific tablet, then we choose that tablet. + betterCandidate, err = erp.identifyPrimaryCandidate(intermediateSource, prevPrimary, validReplacementCandidates, tabletMap, opts) + if err != nil { + return err + } + + // if our better candidate is different from our intermediate source, then we wait for it to catch up to the intermediate source + if !topoproto.TabletAliasEqual(betterCandidate.Alias, intermediateSource.Alias) { + err = waitForCatchUp(ctx, erp.tmc, erp.logger, betterCandidate, intermediateSource, opts.WaitReplicasTimeout) + if err != nil { + return err + } + newPrimary = betterCandidate + } + } + + // now we check if all the constraints are satisfied. If they are not, then we should exit + constraintFailure := erp.checkIfConstraintsSatisfied(newPrimary, prevPrimary, opts) + if constraintFailure != nil { + erp.logger.Errorf("have to override promotion because of constraint failure - %v", constraintFailure) + // we want to send both the errors to the user, constraint failure and also any error encountered in undoing the promotion + defer func() { + if err != nil { + err = vterrors.Errorf(vtrpc.Code_ABORTED, "error in undoing promotion - %v, constraint failure - %v", err, constraintFailure) + } else { + err = constraintFailure + } + }() + // we now try to undo are changes. We can do so by promoting the previous primary instead of the new one we selected + if prevPrimary == nil { + return vterrors.Errorf(vtrpc.Code_ABORTED, "could not undo promotion, since shard record has no primary information") + } + newPrimary = prevPrimary + } + + // Final step is to promote our primary candidate + err = erp.promoteNewPrimary(ctx, ev, newPrimary, opts, tabletMap, statusMap) + if err != nil { + return err + } + + ev.NewPrimary = proto.Clone(newPrimary).(*topodatapb.Tablet) + return err +} + +func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( ctx context.Context, - ev *events.Reparent, - keyspace string, - shard string, - newPrimaryTabletAlias string, + validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo, statusMap map[string]*replicationdatapb.StopReplicationStatus, - opts EmergencyReparentOptions, + waitReplicasTimeout time.Duration, ) error { - erp.logger.Infof("promoting tablet %v to primary", newPrimaryTabletAlias) - event.DispatchUpdate(ev, "promoting replica") + errCh := make(chan error) + defer close(errCh) + + groupCtx, groupCancel := context.WithTimeout(ctx, waitReplicasTimeout) + defer groupCancel() + + waiterCount := 0 + + for candidate := range validCandidates { + // When we called StopReplicationAndBuildStatusMaps, we got back two + // maps: (1) the StopReplicationStatus of any replicas that actually + // stopped replication; and (2) the MasterStatus of anything that + // returned ErrNotReplica, which is a tablet that is either the current + // primary or is stuck thinking it is a MASTER but is not in actuality. + // + // If we have a tablet in the validCandidates map that does not appear + // in the statusMap, then we have either (a) the current primary, which + // is not replicating, so it is not applying relay logs; or (b) a tablet + // that is stuck thinking it is MASTER but is not in actuality. In that + // second case - (b) - we will most likely find that the stuck MASTER + // does not have a winning position, and fail the ERS. If, on the other + // hand, it does have a winning position, we are trusting the operator + // to know what they are doing by emergency-reparenting onto that + // tablet. In either case, it does not make sense to wait for relay logs + // to apply on a tablet that was never applying relay logs in the first + // place, so we skip it, and log that we did. + status, ok := statusMap[candidate] + if !ok { + erp.logger.Infof("EmergencyReparent candidate %v not in replica status map; this means it was not running replication (because it was formerly PRIMARY), so skipping WaitForRelayLogsToApply step for this candidate", candidate) + continue + } + + go func(alias string, status *replicationdatapb.StopReplicationStatus) { + var err error + defer func() { errCh <- err }() + err = WaitForRelayLogsToApply(groupCtx, erp.tmc, tabletMap[alias], status) + }(candidate, status) + + waiterCount++ + } + + errgroup := concurrency.ErrorGroup{ + NumGoroutines: waiterCount, + NumRequiredSuccesses: waiterCount, + NumAllowedErrors: 0, + } + rec := errgroup.Wait(groupCancel, errCh) + + if len(rec.Errors) != 0 { + return vterrors.Wrapf(rec.Error(), "could not apply all relay logs within the provided waitReplicasTimeout (%s): %v", waitReplicasTimeout, rec.Error()) + } + + return nil +} - newPrimaryTabletInfo, ok := tabletMap[newPrimaryTabletAlias] - if !ok { - return vterrors.Errorf(vtrpc.Code_INTERNAL, "attempted to promote primary-elect %v that was not in the tablet map; this an impossible situation", newPrimaryTabletAlias) +// findMostAdvanced finds the intermediate source for ERS. We always choose the most advanced one from our valid candidates list. Further ties are broken by looking at the promotion rules. +func (erp *EmergencyReparenter) findMostAdvanced( + validCandidates map[string]mysql.Position, + tabletMap map[string]*topo.TabletInfo, + opts EmergencyReparentOptions, +) (*topodatapb.Tablet, []*topodatapb.Tablet, error) { + erp.logger.Infof("started finding the intermediate source") + // convert the valid candidates into a list so that we can use it for sorting + validTablets, tabletPositions, err := getValidCandidatesAndPositionsAsList(validCandidates, tabletMap) + if err != nil { + return nil, nil, err } - rp, err := erp.tmc.PromoteReplica(ctx, newPrimaryTabletInfo.Tablet) + // sort the tablets for finding the best intermediate source in ERS + err = sortTabletsForERS(validTablets, tabletPositions) if err != nil { - return vterrors.Wrapf(err, "primary-elect tablet %v failed to be upgraded to primary: %v", newPrimaryTabletAlias, err) + return nil, nil, err + } + for _, tablet := range validTablets { + erp.logger.Infof("finding intermediate source - sorted replica: %v", tablet.Alias) } - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + // The first tablet in the sorted list will be the most eligible candidate unless explicitly asked for some other tablet + winningPrimaryTablet := validTablets[0] + winningPosition := tabletPositions[0] + + // We have already removed the tablets with errant GTIDs before calling this function. At this point our winning position must be a + // superset of all the other valid positions. If that is not the case, then we have a split brain scenario, and we should cancel the ERS + for i, position := range tabletPositions { + if !winningPosition.AtLeast(position) { + return nil, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "split brain detected between servers - %v and %v", winningPrimaryTablet.Alias, validTablets[i].Alias) + } + } + + // If we were requested to elect a particular primary, verify it's a valid + // candidate (non-zero position, no errant GTIDs) + if opts.NewPrimaryAlias != nil { + requestedPrimaryAlias := topoproto.TabletAliasString(opts.NewPrimaryAlias) + pos, ok := validCandidates[requestedPrimaryAlias] + if !ok { + return nil, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "requested primary elect %v has errant GTIDs", requestedPrimaryAlias) + } + // if the requested tablet is as advanced as the most advanced tablet, then we can just use it for promotion. + // otherwise, we should let it catchup to the most advanced tablet and not change the intermediate source + if pos.AtLeast(winningPosition) { + requestedPrimaryInfo, isFound := tabletMap[requestedPrimaryAlias] + if !isFound { + return nil, nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "candidate %v not found in the tablet map; this an impossible situation", requestedPrimaryAlias) + } + winningPrimaryTablet = requestedPrimaryInfo.Tablet + } } + return winningPrimaryTablet, validTablets, nil +} + +// promoteIntermediateSource reparents all the other tablets to start replicating from the intermediate source. +// It does not promote this tablet to a primary instance, we only let other replicas start replicating from this tablet +func (erp *EmergencyReparenter) promoteIntermediateSource( + ctx context.Context, + ev *events.Reparent, + source *topodatapb.Tablet, + tabletMap map[string]*topo.TabletInfo, + statusMap map[string]*replicationdatapb.StopReplicationStatus, + opts EmergencyReparentOptions, +) ([]*topodatapb.Tablet, error) { + // we reparent all the other tablets to start replication from our new source + // we wait for all the replicas so that we can choose a better candidate from the ones that started replication later + validCandidatesForImprovement, err := erp.reparentReplicas(ctx, ev, source, tabletMap, statusMap, opts, true /* waitForAllReplicas */, false /* populateReparentJournal */) + if err != nil { + return nil, err + } + + // also include the current tablet for being considered as part of valid candidates for ERS promotion + validCandidatesForImprovement = append(validCandidatesForImprovement, source) + return validCandidatesForImprovement, nil +} + +// reparentReplicas reparents all the replicas provided and populates the reparent journal on the primary if asked. +// Also, it returns the replicas which started replicating only in the case where we wait for all the replicas +func (erp *EmergencyReparenter) reparentReplicas( + ctx context.Context, + ev *events.Reparent, + newPrimaryTablet *topodatapb.Tablet, + tabletMap map[string]*topo.TabletInfo, + statusMap map[string]*replicationdatapb.StopReplicationStatus, + opts EmergencyReparentOptions, + waitForAllReplicas bool, + populateReparentJournal bool, +) ([]*topodatapb.Tablet, error) { + + var ( + replicasStartedReplication []*topodatapb.Tablet + replicaMutex sync.Mutex + ) + replCtx, replCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) - defer replCancel() event.DispatchUpdate(ev, "reparenting all tablets") @@ -170,9 +466,16 @@ func (erp *EmergencyReparenter) promoteNewPrimary( replWg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} - handlePrimary := func(alias string, ti *topo.TabletInfo) error { - erp.logger.Infof("populating reparent journal on new primary %v", alias) - return erp.tmc.PopulateReparentJournal(replCtx, ti.Tablet, now, opts.lockAction, newPrimaryTabletInfo.Alias, rp) + handlePrimary := func(alias string, tablet *topodatapb.Tablet) error { + position, err := erp.tmc.MasterPosition(replCtx, tablet) + if err != nil { + return err + } + if populateReparentJournal { + erp.logger.Infof("populating reparent journal on new primary %v", alias) + return erp.tmc.PopulateReparentJournal(replCtx, tablet, now, opts.lockAction, newPrimaryTablet.Alias, position) + } + return nil } handleReplica := func(alias string, ti *topo.TabletInfo) { @@ -192,23 +495,30 @@ func (erp *EmergencyReparenter) promoteNewPrimary( forceStart = fs } - err := erp.tmc.SetMaster(replCtx, ti.Tablet, newPrimaryTabletInfo.Alias, now, "", forceStart) + err := erp.tmc.SetMaster(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart) if err != nil { - err = vterrors.Wrapf(err, "tablet %v SetMaster failed: %v", alias, err) + err = vterrors.Wrapf(err, "tablet %v SetReplicationSource failed: %v", alias, err) rec.RecordError(err) return } - // Signal that at least one goroutine succeeded to SetMaster. - replSuccessCancel() + replicaMutex.Lock() + replicasStartedReplication = append(replicasStartedReplication, ti.Tablet) + replicaMutex.Unlock() + + // Signal that at least one goroutine succeeded to SetReplicationSource. + // We do this only when we do not want to wait for all the replicas + if !waitForAllReplicas { + replSuccessCancel() + } } numReplicas := 0 for alias, ti := range tabletMap { switch { - case alias == newPrimaryTabletAlias: + case alias == topoproto.TabletAliasString(newPrimaryTablet.Alias): continue case !opts.IgnoreReplicas.Has(alias): replWg.Add(1) @@ -218,7 +528,7 @@ func (erp *EmergencyReparenter) promoteNewPrimary( } // Spin up a background goroutine to wait until all replica goroutines - // finished. Polling this way allows us to have promoteNewPrimary return + // finished. Polling this way allows us to have reparentReplicas return // success as soon as (a) the primary successfully populates its reparent // journal and (b) at least one replica successfully begins replicating. // @@ -231,18 +541,28 @@ func (erp *EmergencyReparenter) promoteNewPrimary( allReplicasDoneCancel() }() - primaryErr := handlePrimary(newPrimaryTabletAlias, newPrimaryTabletInfo) + primaryErr := handlePrimary(topoproto.TabletAliasString(newPrimaryTablet.Alias), newPrimaryTablet) if primaryErr != nil { erp.logger.Warningf("primary failed to PopulateReparentJournal") replCancel() - return vterrors.Wrapf(primaryErr, "failed to PopulateReparentJournal on primary: %v", primaryErr) + return nil, vterrors.Wrapf(primaryErr, "failed to PopulateReparentJournal on primary: %v", primaryErr) } + // We should only cancel the context that all the replicas are using when they are done. + // Since this function can return early when only 1 replica succeeds, if we cancel this context as a deferred call from this function, + // then we would end up having cancelled the context for the replicas who have not yet finished running all the commands. + // This leads to some replicas not starting replication properly. So we must wait for all the replicas to finish before cancelling this context. + go func() { + replWg.Wait() + defer replCancel() + }() + select { case <-replSuccessCtx.Done(): // At least one replica was able to SetMaster successfully - return nil + // Here we do not need to return the replicas which started replicating + return nil, nil case <-allReplicasDoneCtx.Done(): // There are certain timing issues between replSuccessCtx.Done firing // and allReplicasDoneCtx.Done firing, so we check again if truly all @@ -255,150 +575,150 @@ func (erp *EmergencyReparenter) promoteNewPrimary( // Technically, rec.Errors should never be greater than numReplicas, // but it's better to err on the side of caution here, but also // we're going to be explicit that this is doubly unexpected. - return vterrors.Wrapf(rec.Error(), "received more errors (= %d) than replicas (= %d), which should be impossible: %v", errCount, numReplicas, rec.Error()) + return nil, vterrors.Wrapf(rec.Error(), "received more errors (= %d) than replicas (= %d), which should be impossible: %v", errCount, numReplicas, rec.Error()) case errCount == numReplicas: - return vterrors.Wrapf(rec.Error(), "%d replica(s) failed: %v", numReplicas, rec.Error()) + return nil, vterrors.Wrapf(rec.Error(), "%d replica(s) failed: %v", numReplicas, rec.Error()) default: - return nil + return replicasStartedReplication, nil } } -} - -func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace string, shard string, opts EmergencyReparentOptions) error { - shardInfo, err := erp.ts.GetShard(ctx, keyspace, shard) - if err != nil { - return err - } - - ev.ShardInfo = *shardInfo - - event.DispatchUpdate(ev, "reading all tablets") - tabletMap, err := erp.ts.GetTabletMapForShard(ctx, keyspace, shard) - if err != nil { - return vterrors.Wrapf(err, "failed to get tablet map for %v/%v: %v", keyspace, shard, err) - } +} - statusMap, primaryStatusMap, err := StopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, opts.WaitReplicasTimeout, opts.IgnoreReplicas, erp.logger) +// intermediateSourceIsIdeal is used to find whether the intermediate source that ERS chose is also the ideal one or not +func (erp *EmergencyReparenter) intermediateSourceIsIdeal( + intermediateSource *topodatapb.Tablet, + prevPrimary *topodatapb.Tablet, + validCandidates []*topodatapb.Tablet, + tabletMap map[string]*topo.TabletInfo, + opts EmergencyReparentOptions, +) (bool, error) { + // we try to find a better candidate with the current list of valid candidates, and if it matches our current primary candidate, then we return true + candidate, err := erp.identifyPrimaryCandidate(intermediateSource, prevPrimary, validCandidates, tabletMap, opts) if err != nil { - return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err) - } - - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + return false, err } + return candidate == intermediateSource, nil +} - validCandidates, err := FindValidEmergencyReparentCandidates(statusMap, primaryStatusMap) - if err != nil { - return err - } else if len(validCandidates) == 0 { - return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no valid candidates for emergency reparent") - } +// identifyPrimaryCandidate is used to find the final candidate for ERS promotion +func (erp *EmergencyReparenter) identifyPrimaryCandidate( + intermediateSource *topodatapb.Tablet, + prevPrimary *topodatapb.Tablet, + validCandidates []*topodatapb.Tablet, + tabletMap map[string]*topo.TabletInfo, + opts EmergencyReparentOptions, +) (candidate *topodatapb.Tablet, err error) { + defer func() { + if candidate != nil { + erp.logger.Infof("found better candidate - %v", candidate.Alias) + } + }() - // Wait for all candidates to apply relay logs - if err := erp.waitForAllRelayLogsToApply(ctx, validCandidates, tabletMap, statusMap, opts); err != nil { - return err + if opts.NewPrimaryAlias != nil { + // explicit request to promote a specific tablet + requestedPrimaryAlias := topoproto.TabletAliasString(opts.NewPrimaryAlias) + requestedPrimaryInfo, isFound := tabletMap[requestedPrimaryAlias] + if !isFound { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "candidate %v not found in the tablet map; this an impossible situation", requestedPrimaryAlias) + } + for _, validCandidate := range validCandidates { + if topoproto.TabletAliasEqual(validCandidate.Alias, opts.NewPrimaryAlias) { + return requestedPrimaryInfo.Tablet, nil + } + } + return nil, vterrors.Errorf(vtrpc.Code_ABORTED, "requested candidate %v is not in valid candidates list", requestedPrimaryAlias) } - - // Elect the candidate with the most up-to-date position. var ( - winningPosition mysql.Position - winningPrimaryTabletAliasStr string + preferredCandidates []*topodatapb.Tablet + neutralReplicas []*topodatapb.Tablet ) - - for alias, position := range validCandidates { - if winningPosition.IsZero() || position.AtLeast(winningPosition) { - winningPosition = position - winningPrimaryTabletAliasStr = alias + for _, candidate := range validCandidates { + promotionRule := PromotionRule(candidate) + if promotionRule == promotionrule.Must || promotionRule == promotionrule.Prefer { + preferredCandidates = append(preferredCandidates, candidate) + } + if promotionRule == promotionrule.Neutral { + neutralReplicas = append(neutralReplicas, candidate) } } - // If we were requested to elect a particular primary, verify it's a valid - // candidate (non-zero position, no errant GTIDs) and is at least as - // advanced as the winning position. - if opts.NewPrimaryAlias != nil { - winningPrimaryTabletAliasStr = topoproto.TabletAliasString(opts.NewPrimaryAlias) - pos, ok := validCandidates[winningPrimaryTabletAliasStr] - switch { - case !ok: - return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary elect %v has errant GTIDs", winningPrimaryTabletAliasStr) - case !pos.AtLeast(winningPosition): - return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary elect %v at position %v is not fully caught up. Winning position: %v", winningPrimaryTabletAliasStr, pos, winningPosition) + // So we already have an intermediate source. What if our intermediate source was a rdonly? + // So we will try to improve our candidate selection. + // Are there any replicas with better promotion rules? + // Maybe we actually promoted such a replica. Does that mean we should keep it? + // Maybe we promoted a "neutral", and some "prefer" server is available. + // Maybe we promoted a "prefer_not" + // Maybe we promoted a server in a different cell than the primary + // There's many options. We may wish to replace the server we promoted with a better one. + + // If the user requested for prevention of cross cell promotion then we should only search for valid candidates in the same cell + // otherwise we can search in any cell + if opts.PreventCrossCellPromotion { + // find candidates in the same cell from the preferred candidates list + candidate = findCandidateSameCell(intermediateSource, prevPrimary, preferredCandidates) + if candidate != nil { + return candidate, nil + } + // we do not have a preferred candidate in the same cell + } else { + // find candidates in any cell from the preferred candidates list + candidate = findCandidateAnyCell(intermediateSource, preferredCandidates) + if candidate != nil { + return candidate, nil } } - // Check (again) we still have the topology lock. - if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return vterrors.Wrapf(err, "lost topology lock, aborting: %v", err) + // repeat the same process for the neutral candidates list + if opts.PreventCrossCellPromotion { + // find candidates in the same cell from the neutral candidates list + candidate = findCandidateSameCell(intermediateSource, prevPrimary, neutralReplicas) + if candidate != nil { + return candidate, nil + } + // we do not have a neutral candidate in the same cell + } else { + // find candidates in any cell from the neutral candidates list + candidate = findCandidateAnyCell(intermediateSource, neutralReplicas) + if candidate != nil { + return candidate, nil + } } - // Do the promotion. - if err := erp.promoteNewPrimary(ctx, ev, keyspace, shard, winningPrimaryTabletAliasStr, tabletMap, statusMap, opts); err != nil { - return err - } + // return the one that we have if nothing is found + return intermediateSource, nil +} - ev.NewPrimary = proto.Clone(tabletMap[winningPrimaryTabletAliasStr].Tablet).(*topodatapb.Tablet) +// checkIfConstraintsSatisfied is used to check whether the constraints for ERS are satisfied or not. +func (erp *EmergencyReparenter) checkIfConstraintsSatisfied(newPrimary, prevPrimary *topodatapb.Tablet, opts EmergencyReparentOptions) error { + if opts.PreventCrossCellPromotion && prevPrimary != nil && newPrimary.Alias.Cell != prevPrimary.Alias.Cell { + return vterrors.Errorf(vtrpc.Code_ABORTED, "elected primary does not satisfy geographic constraint - %s", topoproto.TabletAliasString(newPrimary.Alias)) + } + if PromotionRule(newPrimary) == promotionrule.MustNot { + return vterrors.Errorf(vtrpc.Code_ABORTED, "elected primary does not satisfy promotion rule constraint - %s", topoproto.TabletAliasString(newPrimary.Alias)) + } return nil } -func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( +func (erp *EmergencyReparenter) promoteNewPrimary( ctx context.Context, - validCandidates map[string]mysql.Position, + ev *events.Reparent, + newPrimary *topodatapb.Tablet, + opts EmergencyReparentOptions, tabletMap map[string]*topo.TabletInfo, statusMap map[string]*replicationdatapb.StopReplicationStatus, - opts EmergencyReparentOptions, ) error { - errCh := make(chan error) - defer close(errCh) - - groupCtx, groupCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) - defer groupCancel() - - waiterCount := 0 - - for candidate := range validCandidates { - // When we called StopReplicationAndBuildStatusMaps, we got back two - // maps: (1) the StopReplicationStatus of any replicas that actually - // stopped replication; and (2) the MasterStatus of anything that - // returned ErrNotReplica, which is a tablet that is either the current - // primary or is stuck thinking it is a PRIMARY but is not in actuality. - // - // If we have a tablet in the validCandidates map that does not appear - // in the statusMap, then we have either (a) the current primary, which - // is not replicating, so it is not applying relay logs; or (b) a tablet - // that is stuck thinking it is PRIMARY but is not in actuality. In that - // second case - (b) - we will most likely find that the stuck PRIMARY - // does not have a winning position, and fail the ERS. If, on the other - // hand, it does have a winning position, we are trusting the operator - // to know what they are doing by emergency-reparenting onto that - // tablet. In either case, it does not make sense to wait for relay logs - // to apply on a tablet that was never applying relay logs in the first - // place, so we skip it, and log that we did. - status, ok := statusMap[candidate] - if !ok { - erp.logger.Infof("EmergencyReparent candidate %v not in replica status map; this means it was not running replication (because it was formerly PRIMARY), so skipping WaitForRelayLogsToApply step for this candidate", candidate) - continue - } - - go func(alias string, status *replicationdatapb.StopReplicationStatus) { - var err error - defer func() { errCh <- err }() - err = WaitForRelayLogsToApply(groupCtx, erp.tmc, tabletMap[alias], status) - }(candidate, status) - - waiterCount++ - } - - errgroup := concurrency.ErrorGroup{ - NumGoroutines: waiterCount, - NumRequiredSuccesses: waiterCount, - NumAllowedErrors: 0, + erp.logger.Infof("starting promotion for the new primary - %v", newPrimary.Alias) + // we call PromoteReplica which changes the tablet type, fixes the semi-sync, set the primary to read-write and flushes the binlogs + _, err := erp.tmc.PromoteReplica(ctx, newPrimary) + if err != nil { + return vterrors.Wrapf(err, "primary-elect tablet %v failed to be upgraded to primary: %v", newPrimary.Alias, err) } - rec := errgroup.Wait(groupCancel, errCh) - - if len(rec.Errors) != 0 { - return vterrors.Wrapf(rec.Error(), "could not apply all relay logs within the provided WaitReplicasTimeout (%s): %v", opts.WaitReplicasTimeout, rec.Error()) + // we now reparent all the replicas to the new primary we have promoted. + // Here we do not need to wait for all the replicas, We can finish early when even 1 succeeds. + _, err = erp.reparentReplicas(ctx, ev, newPrimary, tabletMap, statusMap, opts, false /* waitForAllReplicas */, true /* populateReparentJournal */) + if err != nil { + return err } - return nil } diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index cea43ab129b..af254126094 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -18,6 +18,7 @@ package reparentutil import ( "context" + "fmt" "testing" "time" @@ -29,6 +30,8 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" @@ -110,23 +113,23 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { t.Parallel() tests := []struct { - name string + name string + emergencyReparentOps EmergencyReparentOptions + tmc *testutil.TabletManagerClient // setup ts *topo.Server - tmc *testutil.TabletManagerClient + keyspace string + shard string unlockTopo bool shards []*vtctldatapb.Shard tablets []*topodatapb.Tablet - // params - keyspace string - shard string - opts EmergencyReparentOptions // results - shouldErr bool + shouldErr bool + errShouldContain string }{ { - name: "success", - ts: memorytopo.NewServer("zone1"), + name: "success", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000102": nil, @@ -140,6 +143,14 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000102": { + Error: nil, + }, + }, SetMasterResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -224,18 +235,29 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - opts: EmergencyReparentOptions{}, + ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { // Here, all our tablets are tied, so we're going to explicitly pick // zone1-101. name: "success with requested primary-elect", - ts: memorytopo.NewServer("zone1"), + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000101": nil, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000101": { + Error: nil, + }, + }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -326,19 +348,14 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Shard: "-", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{ - NewPrimaryAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, - }, - }, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { - name: "success with existing primary", - ts: memorytopo.NewServer("zone1"), + name: "success with existing primary", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ DemoteMasterResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -350,6 +367,14 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, }, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000102": { + Error: nil, + }, + }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000102": nil, }, @@ -406,6 +431,12 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { { Keyspace: "testkeyspace", Name: "-", + Shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, }, }, tablets: []*topodatapb.Tablet{ @@ -438,23 +469,24 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - opts: EmergencyReparentOptions{}, + ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { - name: "shard not found", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{}, - unlockTopo: true, // we shouldn't try to lock the nonexistent shard - shards: nil, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{}, - shouldErr: true, + name: "shard not found", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{}, + unlockTopo: true, // we shouldn't try to lock the nonexistent shard + shards: nil, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "node doesn't exist: keyspaces/testkeyspace/shards/-/Shard", }, { - name: "cannot stop replication", - ts: memorytopo.NewServer("zone1"), + name: "cannot stop replication", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status @@ -505,14 +537,15 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Shard: "-", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{}, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "failed to stop replication and build status maps", }, { - name: "lost topo lock", - ts: memorytopo.NewServer("zone1"), + name: "lost topo lock", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status @@ -563,14 +596,15 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Shard: "-", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{}, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "lost topology lock, aborting", }, { - name: "cannot get reparent candidates", - ts: memorytopo.NewServer("zone1"), + name: "cannot get reparent candidates", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status @@ -633,29 +667,34 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Hostname: "has a zero relay log position", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{}, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "encountered tablet zone1-0000000102 with no relay log position", }, { - name: "zero valid reparent candidates", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{}, + name: "zero valid reparent candidates", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{}, shards: []*vtctldatapb.Shard{ { Keyspace: "testkeyspace", Name: "-", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{}, - shouldErr: true, + shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + errShouldContain: "no valid candidates for emergency reparent", }, { name: "error waiting for relay logs to apply", - ts: memorytopo.NewServer("zone1"), + // one replica is going to take a minute to apply relay logs + emergencyReparentOps: EmergencyReparentOptions{ + WaitReplicasTimeout: time.Millisecond * 50, + }, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status @@ -736,16 +775,18 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Hostname: "fails to apply relay logs", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{ - WaitReplicasTimeout: time.Millisecond * 50, // one replica is going to take a minute to apply relay logs - }, - shouldErr: true, + shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + errShouldContain: "could not apply all relay logs within the provided waitReplicasTimeout", }, { name: "requested primary-elect is not in tablet map", - ts: memorytopo.NewServer("zone1"), + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status @@ -821,20 +862,51 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Shard: "-", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{ - NewPrimaryAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 200, - }, - }, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "primary elect zone1-0000000200 has errant GTIDs", }, { name: "requested primary-elect is not winning primary-elect", - ts: memorytopo.NewServer("zone1"), + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ // we're requesting a tablet that's behind in replication + Cell: "zone1", + Uid: 102, + }}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000102": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000102": { + Result: "ok", + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + "zone1-0000000102": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + Error: nil, + }, + "zone1-0000000102": { + Error: nil, + }, + }, StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status StopStatus *replicationdatapb.StopReplicationStatus @@ -842,6 +914,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -850,14 +923,16 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", - RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", }, }, }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", @@ -870,10 +945,11 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, }, "zone1-0000000101": { - "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20": nil, }, "zone1-0000000102": { "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20": nil, + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, }, }, }, @@ -910,19 +986,14 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Hostname: "not most up-to-date position", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{ - NewPrimaryAlias: &topodatapb.TabletAlias{ // we're requesting a tablet that's behind in replication - Cell: "zone1", - Uid: 102, - }, - }, - shouldErr: true, + shouldErr: false, }, { name: "cannot promote new primary", - ts: memorytopo.NewServer("zone1"), + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }}, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { Result string @@ -932,6 +1003,17 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: assert.AnError, }, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000102": { + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000102": nil, + }, StopReplicationAndGetStatusResults: map[string]struct { Status *replicationdatapb.Status StopStatus *replicationdatapb.StopReplicationStatus @@ -939,6 +1021,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -947,6 +1030,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -955,6 +1039,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -973,6 +1058,11 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, }, }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + "zone1-0000000102": nil, + }, }, shards: []*vtctldatapb.Shard{ { @@ -1007,213 +1097,336 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Hostname: "not most up-to-date position", }, }, - keyspace: "testkeyspace", - shard: "-", - opts: EmergencyReparentOptions{ - // We're explicitly requesting a primary-elect in this test case - // because we don't care about the correctness of the selection - // code (it's covered by other test cases), and it simplifies - // the error mocking. - NewPrimaryAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 102, - }, - }, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "failed to be upgraded to primary", }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - logger := logutil.NewMemoryLogger() - ev := &events.Reparent{} - - testutil.AddShards(ctx, t, tt.ts, tt.shards...) - testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) - - if !tt.unlockTopo { - lctx, unlock, lerr := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") - require.NoError(t, lerr, "could not lock %s/%s for testing", tt.keyspace, tt.shard) - - defer func() { - unlock(&lerr) - require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) - }() - - ctx = lctx // make the reparentShardLocked call use the lock ctx - } - - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) - - err := erp.reparentShardLocked(ctx, ev, tt.keyspace, tt.shard, tt.opts) - if tt.shouldErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - }) - } -} - -func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - ts *topo.Server - tmc *testutil.TabletManagerClient - unlockTopo bool - keyspace string - shard string - newPrimaryTabletAlias string - tabletMap map[string]*topo.TabletInfo - statusMap map[string]*replicationdatapb.StopReplicationStatus - opts EmergencyReparentOptions - shouldErr bool - }{ { - name: "success", - ts: memorytopo.NewServer("zone1"), + name: "constraint failure - promotion-rule", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ - "zone1-0000000100": nil, + "zone1-0000000102": nil, }, PromoteReplicaResults: map[string]struct { Result string Error error }{ - "zone1-0000000100": { + "zone1-0000000102": { + Result: "ok", + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000102": { Error: nil, }, }, SetMasterResults: map[string]error{ + "zone1-0000000100": nil, "zone1-0000000101": nil, - "zone1-0000000102": nil, - "zone1-0000000404": assert.AnError, // okay, because we're ignoring it. }, - }, - keyspace: "testkeyspace", - shard: "-", - newPrimaryTabletAlias: "zone1-0000000100", - tabletMap: map[string]*topo.TabletInfo{ - "zone1-0000000100": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, }, - Hostname: "primary-elect", }, - }, - "zone1-0000000101": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, }, }, - }, - "zone1-0000000102": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 102, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", + }, }, - Hostname: "requires force start", }, }, - "zone1-0000000404": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 404, - }, - Hostname: "ignored tablet", + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26": nil, }, }, }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{ - "zone1-0000000101": { // forceStart = false - Before: &replicationdatapb.Status{ - IoThreadRunning: false, - SqlThreadRunning: false, + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", }, - "zone1-0000000102": { // forceStart = true - Before: &replicationdatapb.Status{ - IoThreadRunning: true, - SqlThreadRunning: true, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "most up-to-date position, wins election", }, }, - opts: EmergencyReparentOptions{ - IgnoreReplicas: sets.NewString("zone1-0000000404"), - }, - shouldErr: false, - }, - { - name: "primary not in tablet map", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{}, - keyspace: "testkeyspace", - shard: "-", - newPrimaryTabletAlias: "zone2-0000000200", - tabletMap: map[string]*topo.TabletInfo{ - "zone1-0000000100": {}, - "zone1-0000000101": {}, - }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, - opts: EmergencyReparentOptions{}, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "elected primary does not satisfy promotion rule constraint", }, { - name: "PromoteReplica error", - ts: memorytopo.NewServer("zone1"), + name: "constraint failure - cross-cell", + emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000102": nil, + }, PromoteReplicaResults: map[string]struct { Result string Error error + }{ + "zone1-0000000102": { + Result: "ok", + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000102": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error }{ "zone1-0000000100": { - Error: assert.AnError, + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26": nil, }, }, }, - keyspace: "testkeyspace", - shard: "-", - newPrimaryTabletAlias: "zone1-0000000100", - tabletMap: map[string]*topo.TabletInfo{ - "zone1-0000000100": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", + shards: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone2", Uid: 100, }, }, }, - "zone1-0000000101": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, - }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "most up-to-date position, wins election", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 100, }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "failed previous primary", }, }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, - opts: EmergencyReparentOptions{}, - shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1", "zone2"), + shouldErr: true, + errShouldContain: "elected primary does not satisfy geographic constraint", }, + } + + _ = SetDurabilityPolicy("none", nil) + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + ev := &events.Reparent{} + + for i, tablet := range tt.tablets { + if tablet.Type == topodatapb.TabletType_UNKNOWN { + tablet.Type = topodatapb.TabletType_REPLICA + } + tt.tablets[i] = tablet + } + + testutil.AddShards(ctx, t, tt.ts, tt.shards...) + testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) + + if !tt.unlockTopo { + lctx, unlock, lerr := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, lerr, "could not lock %s/%s for testing", tt.keyspace, tt.shard) + + defer func() { + unlock(&lerr) + require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) + }() + + ctx = lctx // make the reparentShardLocked call use the lock ctx + } + + erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + + err := erp.reparentShardLocked(ctx, ev, tt.keyspace, tt.shard, tt.emergencyReparentOps) + if tt.shouldErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errShouldContain) + return + } + + assert.NoError(t, err) + }) + } +} + +func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + emergencyReparentOps EmergencyReparentOptions + tmc *testutil.TabletManagerClient + unlockTopo bool + newPrimaryTabletAlias string + ts *topo.Server + keyspace string + shard string + tablets []*topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + statusMap map[string]*replicationdatapb.StopReplicationStatus + shouldErr bool + errShouldContain string + }{ { - name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), + name: "success", + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -1222,10 +1435,12 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { Error: nil, }, }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + "zone1-0000000404": assert.AnError, // okay, because we're ignoring it. + }, }, - unlockTopo: true, - keyspace: "testkeyspace", - shard: "-", newPrimaryTabletAlias: "zone1-0000000100", tabletMap: map[string]*topo.TabletInfo{ "zone1-0000000100": { @@ -1234,6 +1449,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { Cell: "zone1", Uid: 100, }, + Hostname: "primary-elect", }, }, "zone1-0000000101": { @@ -1244,59 +1460,156 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { }, }, }, - }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, - opts: EmergencyReparentOptions{}, - shouldErr: true, - }, - { - name: "cannot repopulate reparent journal on new primary", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{ - PopulateReparentJournalResults: map[string]error{ - "zone1-0000000100": assert.AnError, - }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, - }, - keyspace: "testkeyspace", - shard: "-", - newPrimaryTabletAlias: "zone1-0000000100", - tabletMap: map[string]*topo.TabletInfo{ - "zone1-0000000100": { + "zone1-0000000102": { Tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", - Uid: 100, + Uid: 102, }, + Hostname: "requires force start", }, }, - "zone1-0000000101": { + "zone1-0000000404": { Tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", - Uid: 101, + Uid: 404, }, + Hostname: "ignored tablet", }, }, }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, - opts: EmergencyReparentOptions{}, - shouldErr: true, - }, - { - name: "all replicas failing to SetMaster does fail the promotion", - ts: memorytopo.NewServer("zone1"), + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { // forceStart = false + Before: &replicationdatapb.Status{ + IoThreadRunning: false, + SqlThreadRunning: false, + }, + }, + "zone1-0000000102": { // forceStart = true + Before: &replicationdatapb.Status{ + IoThreadRunning: true, + SqlThreadRunning: true, + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: false, + }, + { + name: "MasterPosition error", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: fmt.Errorf("primary position error"), + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: fmt.Errorf("primary position error"), + }, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "primary position error", + }, + { + name: "cannot repopulate reparent journal on new primary", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "failed to PopulateReparentJournal on primary", + }, + { + name: "all replicas failing to SetMaster does fail the promotion", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -1305,14 +1618,13 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { Error: nil, }, }, + SetMasterResults: map[string]error{ // everyone fails, we all fail "zone1-0000000101": assert.AnError, "zone1-0000000102": assert.AnError, }, }, - keyspace: "testkeyspace", - shard: "-", newPrimaryTabletAlias: "zone1-0000000100", tabletMap: map[string]*topo.TabletInfo{ "zone1-0000000100": { @@ -1340,17 +1652,28 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { }, }, }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, - opts: EmergencyReparentOptions{}, - shouldErr: true, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: " replica(s) failed", }, { - name: "all replicas slow to SetMaster does fail the promotion", - ts: memorytopo.NewServer("zone1"), + name: "all replicas slow to SetMaster does fail the promotion", + emergencyReparentOps: EmergencyReparentOptions{WaitReplicasTimeout: time.Millisecond * 10}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -1359,6 +1682,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { Error: nil, }, }, + SetMasterDelays: map[string]time.Duration{ // nothing is failing, we're just slow "zone1-0000000101": time.Millisecond * 100, @@ -1369,8 +1693,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { "zone1-0000000102": nil, }, }, - keyspace: "testkeyspace", - shard: "-", newPrimaryTabletAlias: "zone1-0000000100", tabletMap: map[string]*topo.TabletInfo{ "zone1-0000000100": { @@ -1398,19 +1720,28 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { }, }, }, - statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, - opts: EmergencyReparentOptions{ - WaitReplicasTimeout: time.Millisecond * 10, - }, - shouldErr: true, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + errShouldContain: "context deadline exceeded", }, { - name: "one replica failing to SetMaster does not fail the promotion", - ts: memorytopo.NewServer("zone1"), + name: "one replica failing to SetMaster does not fail the promotion", + emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -1419,13 +1750,12 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { Error: nil, }, }, + SetMasterResults: map[string]error{ "zone1-0000000101": nil, // this one succeeds, so we're good "zone1-0000000102": assert.AnError, }, }, - keyspace: "testkeyspace", - shard: "-", newPrimaryTabletAlias: "zone1-0000000100", tabletMap: map[string]*topo.TabletInfo{ "zone1-0000000100": { @@ -1454,6 +1784,9 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { }, }, statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), shouldErr: false, }, } @@ -1487,12 +1820,13 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) }() } + tabletInfo := tt.tabletMap[tt.newPrimaryTabletAlias] erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) - - err := erp.promoteNewPrimary(ctx, ev, tt.keyspace, tt.shard, tt.newPrimaryTabletAlias, tt.tabletMap, tt.statusMap, tt.opts) + err := erp.promoteNewPrimary(ctx, ev, tabletInfo.Tablet, tt.emergencyReparentOps, tt.tabletMap, tt.statusMap) if tt.shouldErr { assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errShouldContain) return } @@ -1506,9 +1840,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { ctx := context.Background() logger := logutil.NewMemoryLogger() - opts := EmergencyReparentOptions{ - WaitReplicasTimeout: time.Millisecond * 50, - } + waitReplicasTimeout := 50 * time.Millisecond tests := []struct { name string tmc *testutil.TabletManagerClient @@ -1738,7 +2070,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { t.Parallel() erp := NewEmergencyReparenter(nil, tt.tmc, logger) - err := erp.waitForAllRelayLogsToApply(ctx, tt.candidates, tt.tabletMap, tt.statusMap, opts) + err := erp.waitForAllRelayLogsToApply(ctx, tt.candidates, tt.tabletMap, tt.statusMap, waitReplicasTimeout) if tt.shouldErr { assert.Error(t, err) return @@ -1748,3 +2080,1554 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }) } } + +func TestEmergencyReparenterCounters(t *testing.T) { + ersCounter.Set(0) + ersSuccessCounter.Set(0) + ersFailureCounter.Set(0) + _ = SetDurabilityPolicy("none", nil) + + emergencyReparentOps := EmergencyReparentOptions{} + tmc := &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000102": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000102": { + Result: "ok", + Error: nil, + }, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000102": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000101": nil, + }, + StopReplicationAndGetStatusResults: map[string]struct { + Status *replicationdatapb.Status + StopStatus *replicationdatapb.StopReplicationStatus + Error error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", + }, + }, + }, + "zone1-0000000102": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{}, + After: &replicationdatapb.Status{ + SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", + RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", + }, + }, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000100": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000101": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21": nil, + }, + "zone1-0000000102": { + "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26": nil, + }, + }, + } + shards := []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + } + tablets := []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Keyspace: "testkeyspace", + Shard: "-", + Hostname: "most up-to-date position, wins election", + }, + } + keyspace := "testkeyspace" + shard := "-" + ts := memorytopo.NewServer("zone1") + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + for i, tablet := range tablets { + tablet.Type = topodatapb.TabletType_REPLICA + tablets[i] = tablet + } + + testutil.AddShards(ctx, t, ts, shards...) + testutil.AddTablets(ctx, t, ts, nil, tablets...) + + erp := NewEmergencyReparenter(ts, tmc, logger) + + // run a successful ers + _, err := erp.ReparentShard(ctx, keyspace, shard, emergencyReparentOps) + require.NoError(t, err) + + // check the counter values + require.EqualValues(t, 1, ersCounter.Get()) + require.EqualValues(t, 1, ersSuccessCounter.Get()) + require.EqualValues(t, 0, ersFailureCounter.Get()) + + // set emergencyReparentOps to request a non existent tablet + emergencyReparentOps.NewPrimaryAlias = &topodatapb.TabletAlias{ + Cell: "bogus", + Uid: 100, + } + + // run a failing ers + _, err = erp.ReparentShard(ctx, keyspace, shard, emergencyReparentOps) + require.Error(t, err) + + // check the counter values + require.EqualValues(t, 2, ersCounter.Get()) + require.EqualValues(t, 1, ersSuccessCounter.Get()) + require.EqualValues(t, 1, ersFailureCounter.Get()) +} + +func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { + sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + mysqlGTID1 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 9, + } + mysqlGTID2 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 10, + } + mysqlGTID3 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 11, + } + + positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) + + positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) + + positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) + positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) + + positionOnly2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionOnly2.GTIDSet = positionOnly2.GTIDSet.AddGTID(mysqlGTID2) + + positionEmpty := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + + tests := []struct { + name string + validCandidates map[string]mysql.Position + tabletMap map[string]*topo.TabletInfo + emergencyReparentOps EmergencyReparentOptions + result *topodatapb.Tablet + err string + }{ + { + name: "choose most advanced", + validCandidates: map[string]mysql.Position{ + "zone1-0000000100": positionMostAdvanced, + "zone1-0000000101": positionIntermediate1, + "zone1-0000000102": positionIntermediate2, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, { + name: "choose most advanced with the best promotion rule", + validCandidates: map[string]mysql.Position{ + "zone1-0000000100": positionMostAdvanced, + "zone1-0000000101": positionIntermediate1, + "zone1-0000000102": positionMostAdvanced, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, { + name: "choose most advanced with explicit request", + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }}, + validCandidates: map[string]mysql.Position{ + "zone1-0000000100": positionMostAdvanced, + "zone1-0000000101": positionIntermediate1, + "zone1-0000000102": positionMostAdvanced, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, { + name: "split brain detection", + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }}, + validCandidates: map[string]mysql.Position{ + "zone1-0000000100": positionOnly2, + "zone1-0000000101": positionIntermediate1, + "zone1-0000000102": positionEmpty, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + err: "split brain detected between servers", + }, + } + + _ = SetDurabilityPolicy("none", nil) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + erp := NewEmergencyReparenter(nil, nil, logutil.NewMemoryLogger()) + + winningTablet, _, err := erp.findMostAdvanced(test.validCandidates, test.tabletMap, test.emergencyReparentOps) + if test.err != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.err) + } else { + assert.NoError(t, err) + assert.True(t, topoproto.TabletAliasEqual(test.result.Alias, winningTablet.Alias)) + } + }) + } +} + +func TestEmergencyReparenter_checkIfConstraintsSatisfied(t *testing.T) { + testcases := []struct { + name string + newPrimary, prevPrimary *topodatapb.Tablet + opts EmergencyReparentOptions + err string + }{ + { + name: "no constraint failure", + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + Type: topodatapb.TabletType_REPLICA, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, + opts: EmergencyReparentOptions{PreventCrossCellPromotion: true}, + err: "", + }, { + name: "promotion rule constraint failure", + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, + opts: EmergencyReparentOptions{PreventCrossCellPromotion: true}, + err: "elected primary does not satisfy promotion rule constraint - cell1-0000000100", + }, { + name: "cross cell constraint failure", + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + }, + opts: EmergencyReparentOptions{PreventCrossCellPromotion: true}, + err: "elected primary does not satisfy geographic constraint - cell1-0000000100", + }, { + name: "cross cell but no constraint failure", + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + }, + opts: EmergencyReparentOptions{PreventCrossCellPromotion: false}, + err: "", + }, + } + + _ = SetDurabilityPolicy("none", nil) + erp := NewEmergencyReparenter(nil, nil, nil) + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + err := erp.checkIfConstraintsSatisfied(testcase.newPrimary, testcase.prevPrimary, testcase.opts) + if testcase.err == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, testcase.err) + } + }) + } +} + +func TestEmergencyReparenter_reparentReplicas(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + emergencyReparentOps EmergencyReparentOptions + tmc *testutil.TabletManagerClient + unlockTopo bool + newPrimaryTabletAlias string + ts *topo.Server + keyspace string + shard string + tablets []*topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + statusMap map[string]*replicationdatapb.StopReplicationStatus + shouldErr bool + errShouldContain string + }{ + { + name: "success", + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + "zone1-0000000404": assert.AnError, // okay, because we're ignoring it. + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { // forceStart = false + Before: &replicationdatapb.Status{ + IoThreadRunning: false, + SqlThreadRunning: false, + }, + }, + "zone1-0000000102": { // forceStart = true + Before: &replicationdatapb.Status{ + IoThreadRunning: true, + SqlThreadRunning: true, + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: false, + }, + { + name: "MasterPosition error", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: fmt.Errorf("primary position error"), + }, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "primary position error", + }, + { + name: "cannot repopulate reparent journal on new primary", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": assert.AnError, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: "failed to PopulateReparentJournal on primary", + }, + { + name: "all replicas failing to SetMaster does fail the promotion", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + + SetMasterResults: map[string]error{ + // everyone fails, we all fail + "zone1-0000000101": assert.AnError, + "zone1-0000000102": assert.AnError, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-00000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: " replica(s) failed", + }, + { + name: "all replicas slow to SetMaster does fail the promotion", + emergencyReparentOps: EmergencyReparentOptions{WaitReplicasTimeout: time.Millisecond * 10}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterDelays: map[string]time.Duration{ + // nothing is failing, we're just slow + "zone1-0000000101": time.Millisecond * 100, + "zone1-0000000102": time.Millisecond * 75, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + shouldErr: true, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + errShouldContain: "context deadline exceeded", + }, + { + name: "one replica failing to SetMaster does not fail the promotion", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, // this one succeeds, so we're good + "zone1-0000000102": assert.AnError, + }, + }, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + ev := &events.Reparent{} + + testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + Keyspace: tt.keyspace, + Name: tt.shard, + }) + + if !tt.unlockTopo { + var ( + unlock func(*error) + lerr error + ) + + ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) + + defer func() { + unlock(&lerr) + require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) + }() + } + tabletInfo := tt.tabletMap[tt.newPrimaryTabletAlias] + + erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + _, err := erp.reparentReplicas(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps, false /* waitForAllReplicas */, true /* populateReparentJournal */) + if tt.shouldErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errShouldContain) + return + } + + assert.NoError(t, err) + }) + } +} + +func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + emergencyReparentOps EmergencyReparentOptions + tmc *testutil.TabletManagerClient + unlockTopo bool + newSourceTabletAlias string + ts *topo.Server + keyspace string + shard string + tablets []*topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + statusMap map[string]*replicationdatapb.StopReplicationStatus + shouldErr bool + errShouldContain string + result []*topodatapb.Tablet + }{ + { + name: "success", + emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + "zone1-0000000404": assert.AnError, // okay, because we're ignoring it. + }, + }, + newSourceTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { // forceStart = false + Before: &replicationdatapb.Status{ + IoThreadRunning: false, + SqlThreadRunning: false, + }, + }, + "zone1-0000000102": { // forceStart = true + Before: &replicationdatapb.Status{ + IoThreadRunning: true, + SqlThreadRunning: true, + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: false, + result: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + }, + { + name: "all replicas failed", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + + SetMasterResults: map[string]error{ + // everyone fails, we all fail + "zone1-0000000101": assert.AnError, + "zone1-0000000102": assert.AnError, + }, + }, + newSourceTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-00000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: true, + errShouldContain: " replica(s) failed", + }, + { + name: "one replica failed", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + SetMasterResults: map[string]error{ + "zone1-0000000101": nil, // this one succeeds, so we're good + "zone1-0000000102": assert.AnError, + }, + }, + newSourceTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + ts: memorytopo.NewServer("zone1"), + shouldErr: false, + result: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + ev := &events.Reparent{} + + testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + Keyspace: tt.keyspace, + Name: tt.shard, + }) + + if !tt.unlockTopo { + var ( + unlock func(*error) + lerr error + ) + + ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) + + defer func() { + unlock(&lerr) + require.NoError(t, lerr, "could not unlock %s/%s after test", tt.keyspace, tt.shard) + }() + } + tabletInfo := tt.tabletMap[tt.newSourceTabletAlias] + + erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + res, err := erp.promoteIntermediateSource(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps) + if tt.shouldErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errShouldContain) + return + } + + assert.NoError(t, err) + assert.ElementsMatch(t, tt.result, res) + }) + } +} + +func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { + tests := []struct { + name string + emergencyReparentOps EmergencyReparentOptions + intermediateSource *topodatapb.Tablet + prevPrimary *topodatapb.Tablet + validCandidates []*topodatapb.Tablet + tabletMap map[string]*topo.TabletInfo + err string + result *topodatapb.Tablet + }{ + { + name: "explicit request for a primary tablet", + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }}, + intermediateSource: nil, + prevPrimary: nil, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, { + name: "explicit request for a primary tablet not in valid list", + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }}, + intermediateSource: nil, + prevPrimary: nil, + validCandidates: nil, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + err: "requested candidate zone1-0000000100 is not in valid candidates list", + }, { + name: "explicit request for a primary tablet not in tablet map", + emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }}, + intermediateSource: nil, + prevPrimary: nil, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{}, + err: "candidate zone1-0000000100 not found in the tablet map; this an impossible situation", + }, { + name: "preferred candidate in the same cell same as our replica", + emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, + intermediateSource: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + }, + }, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: nil, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, { + name: "preferred candidate in the same cell different from original replica", + emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, + intermediateSource: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + }, + }, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: nil, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, { + name: "preferred candidate in the different cell same as original replica", + emergencyReparentOps: EmergencyReparentOptions{}, + intermediateSource: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + }, + }, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 102, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: nil, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + }, + }, { + name: "preferred candidate in the different cell different from original replica", + emergencyReparentOps: EmergencyReparentOptions{}, + intermediateSource: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + }, + }, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 102, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: nil, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 102, + }, + }, + }, { + name: "prevent cross cell promotion", + emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, + intermediateSource: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + prevPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + }, + }, + validCandidates: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone2", + Uid: 102, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: nil, + result: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _ = SetDurabilityPolicy("none", nil) + logger := logutil.NewMemoryLogger() + + erp := NewEmergencyReparenter(nil, nil, logger) + res, err := erp.identifyPrimaryCandidate(test.intermediateSource, test.prevPrimary, test.validCandidates, test.tabletMap, test.emergencyReparentOps) + if test.err != "" { + assert.EqualError(t, err, test.err) + return + } + assert.NoError(t, err) + assert.True(t, topoproto.TabletAliasEqual(res.Alias, test.result.Alias)) + }) + } +} diff --git a/go/vt/vtctl/reparentutil/ers_intermediate_source_sorter.go b/go/vt/vtctl/reparentutil/ers_intermediate_source_sorter.go new file mode 100644 index 00000000000..f625ec7f306 --- /dev/null +++ b/go/vt/vtctl/reparentutil/ers_intermediate_source_sorter.go @@ -0,0 +1,93 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "sort" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" +) + +// ERSIntermediateSourceSorter sorts tablets by GTID positions and Promotion rules aimed at finding the best +// candidate for intermediate promotion in emergency reparent shard +type ERSIntermediateSourceSorter struct { + tablets []*topodatapb.Tablet + positions []mysql.Position +} + +// NewERSIntermediateSourceSorter creates a new ERSIntermediateSourceSorter +func NewERSIntermediateSourceSorter(tablets []*topodatapb.Tablet, positions []mysql.Position) *ERSIntermediateSourceSorter { + return &ERSIntermediateSourceSorter{ + tablets: tablets, + positions: positions, + } +} + +// Len implements the Interface for sorting +func (ersISSorter *ERSIntermediateSourceSorter) Len() int { return len(ersISSorter.tablets) } + +// Swap implements the Interface for sorting +func (ersISSorter *ERSIntermediateSourceSorter) Swap(i, j int) { + ersISSorter.tablets[i], ersISSorter.tablets[j] = ersISSorter.tablets[j], ersISSorter.tablets[i] + ersISSorter.positions[i], ersISSorter.positions[j] = ersISSorter.positions[j], ersISSorter.positions[i] +} + +// Less implements the Interface for sorting +func (ersISSorter *ERSIntermediateSourceSorter) Less(i, j int) bool { + // Returning "true" in this function means [i] is before [j] in the sorting order, + // which will lead to [i] be a better candidate for promotion + + // Should not happen + // fail-safe code + if ersISSorter.tablets[i] == nil { + return false + } + if ersISSorter.tablets[j] == nil { + return true + } + + if !ersISSorter.positions[i].AtLeast(ersISSorter.positions[j]) { + // [i] does not have all GTIDs that [j] does + return false + } + if !ersISSorter.positions[j].AtLeast(ersISSorter.positions[i]) { + // [j] does not have all GTIDs that [i] does + return true + } + + // at this point, both have the same GTIDs + // so we check their promotion rules + jPromotionRule := PromotionRule(ersISSorter.tablets[j]) + iPromotionRule := PromotionRule(ersISSorter.tablets[i]) + return !jPromotionRule.BetterThan(iPromotionRule) +} + +// sortTabletsForERS sorts the tablets, given their positions for emergency reparent shard +func sortTabletsForERS(tablets []*topodatapb.Tablet, positions []mysql.Position) error { + // throw an error internal error in case of unequal number of tablets and positions + // fail-safe code prevents panic in sorting in case the lengths are unequal + if len(tablets) != len(positions) { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unequal number of tablets and positions") + } + + sort.Sort(NewERSIntermediateSourceSorter(tablets, positions)) + return nil +} diff --git a/go/vt/vtctl/reparentutil/ers_intermediate_source_sorter_test.go b/go/vt/vtctl/reparentutil/ers_intermediate_source_sorter_test.go new file mode 100644 index 00000000000..9c731371026 --- /dev/null +++ b/go/vt/vtctl/reparentutil/ers_intermediate_source_sorter_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reparentutil + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +// TestErsInterMediateSourceSorter tests that the sorting for ERS works correctly +func TestErsInterMediateSourceSorter(t *testing.T) { + sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + sid2 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} + cell1 := "cell1" + cell2 := "cell2" + tabletReplica1_100 := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cell1, + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + } + tabletReplica2_100 := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cell2, + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + } + tabletReplica1_101 := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cell1, + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + } + tabletRdonly1_102 := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cell1, + Uid: 102, + }, + Type: topodatapb.TabletType_RDONLY, + } + + mysqlGTID1 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 9, + } + mysqlGTID2 := mysql.Mysql56GTID{ + Server: sid2, + Sequence: 10, + } + mysqlGTID3 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 11, + } + + positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) + + positionEmpty := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + + positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) + + positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) + positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) + + testcases := []struct { + name string + tablets []*topodatapb.Tablet + positions []mysql.Position + containsErr string + sortedTablets []*topodatapb.Tablet + }{ + { + name: "all advanced, sort via promotion rules", + tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102}, + positions: []mysql.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced}, + sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletRdonly1_102, nil}, + }, { + name: "ordering by position", + tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102}, + positions: []mysql.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced}, + sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, tabletReplica2_100, tabletReplica1_101}, + }, { + name: "tablets and positions count error", + tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100}, + positions: []mysql.Position{positionEmpty, positionIntermediate1, positionMostAdvanced}, + containsErr: "unequal number of tablets and positions", + }, { + name: "promotion rule check", + tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102}, + positions: []mysql.Position{positionMostAdvanced, positionMostAdvanced}, + sortedTablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102}, + }, { + name: "mixed", + tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102}, + positions: []mysql.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1}, + sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletRdonly1_102, tabletReplica1_101}, + }, + } + + err := SetDurabilityPolicy("none", nil) + require.NoError(t, err) + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + err := sortTabletsForERS(testcase.tablets, testcase.positions) + if testcase.containsErr != "" { + require.EqualError(t, err, testcase.containsErr) + } else { + require.NoError(t, err) + require.Equal(t, testcase.sortedTablets, testcase.tablets) + } + }) + } +} diff --git a/go/vt/orchestrator/inst/promotion_rule.go b/go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go similarity index 71% rename from go/vt/orchestrator/inst/promotion_rule.go rename to go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go index 8ea89ec4aa9..06f9ad0dec6 100644 --- a/go/vt/orchestrator/inst/promotion_rule.go +++ b/go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go @@ -14,7 +14,7 @@ limitations under the License. */ -package inst +package promotionrule import ( "fmt" @@ -25,19 +25,19 @@ import ( type CandidatePromotionRule string const ( - MustPromoteRule CandidatePromotionRule = "must" - PreferPromoteRule CandidatePromotionRule = "prefer" - NeutralPromoteRule CandidatePromotionRule = "neutral" - PreferNotPromoteRule CandidatePromotionRule = "prefer_not" - MustNotPromoteRule CandidatePromotionRule = "must_not" + Must CandidatePromotionRule = "must" + Prefer CandidatePromotionRule = "prefer" + Neutral CandidatePromotionRule = "neutral" + PreferNot CandidatePromotionRule = "prefer_not" + MustNot CandidatePromotionRule = "must_not" ) var promotionRuleOrderMap = map[CandidatePromotionRule]int{ - MustPromoteRule: 0, - PreferPromoteRule: 1, - NeutralPromoteRule: 2, - PreferNotPromoteRule: 3, - MustNotPromoteRule: 4, + Must: 0, + Prefer: 1, + Neutral: 2, + PreferNot: 3, + MustNot: 4, } func (this *CandidatePromotionRule) BetterThan(other CandidatePromotionRule) bool { @@ -48,9 +48,9 @@ func (this *CandidatePromotionRule) BetterThan(other CandidatePromotionRule) boo return promotionRuleOrderMap[*this] < otherOrder } -// ParseCandidatePromotionRule returns a CandidatePromotionRule by name. +// Parse returns a CandidatePromotionRule by name. // It returns an error if there is no known rule by the given name. -func ParseCandidatePromotionRule(ruleName string) (CandidatePromotionRule, error) { +func Parse(ruleName string) (CandidatePromotionRule, error) { switch ruleName { case "prefer", "neutral", "prefer_not", "must_not": return CandidatePromotionRule(ruleName), nil diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index 007f8fe99d2..c2d80ca5f5d 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -144,3 +145,104 @@ func FindCurrentPrimary(tabletMap map[string]*topo.TabletInfo, logger logutil.Lo return currentPrimary } + +// getValidCandidatesAndPositionsAsList converts the valid candidates from a map to a list of tablets, making it easier to sort +func getValidCandidatesAndPositionsAsList(validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo) ([]*topodatapb.Tablet, []mysql.Position, error) { + var validTablets []*topodatapb.Tablet + var tabletPositions []mysql.Position + for tabletAlias, position := range validCandidates { + tablet, isFound := tabletMap[tabletAlias] + if !isFound { + return nil, nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "candidate %v not found in the tablet map; this an impossible situation", tabletAlias) + } + validTablets = append(validTablets, tablet.Tablet) + tabletPositions = append(tabletPositions, position) + } + return validTablets, tabletPositions, nil +} + +// restrictValidCandidates is used to restrict some candidates from being considered eligible for becoming the intermediate source or the final promotion candidate +func restrictValidCandidates(validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo) (map[string]mysql.Position, error) { + restrictedValidCandidates := make(map[string]mysql.Position) + for candidate, position := range validCandidates { + candidateInfo, ok := tabletMap[candidate] + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "candidate %v not found in the tablet map; this an impossible situation", candidate) + } + // We do not allow BACKUP, DRAINED or RESTORE type of tablets to be considered for being the replication source or the candidate for primary + if topoproto.IsTypeInList(candidateInfo.Type, []topodatapb.TabletType{topodatapb.TabletType_BACKUP, topodatapb.TabletType_RESTORE, topodatapb.TabletType_DRAINED}) { + continue + } + restrictedValidCandidates[candidate] = position + } + return restrictedValidCandidates, nil +} + +func findCandidateSameCell( + newPrimary *topodatapb.Tablet, + prevPrimary *topodatapb.Tablet, + possibleCandidates []*topodatapb.Tablet, +) *topodatapb.Tablet { + // check whether the one we have selected as the source is in the same cell and belongs to the candidate list provided + for _, candidate := range possibleCandidates { + if !(topoproto.TabletAliasEqual(newPrimary.Alias, candidate.Alias)) { + continue + } + if prevPrimary != nil && !(prevPrimary.Alias.Cell == candidate.Alias.Cell) { + continue + } + return candidate + } + // check whether there is some other tablet in the same cell belonging to the candidate list provided + for _, candidate := range possibleCandidates { + if prevPrimary != nil && !(prevPrimary.Alias.Cell == candidate.Alias.Cell) { + continue + } + return candidate + } + return nil +} + +func findCandidateAnyCell( + newPrimary *topodatapb.Tablet, + possibleCandidates []*topodatapb.Tablet, +) *topodatapb.Tablet { + // check whether the one we have selected as the source belongs to the candidate list provided + for _, candidate := range possibleCandidates { + if !(topoproto.TabletAliasEqual(newPrimary.Alias, candidate.Alias)) { + continue + } + return candidate + } + // return the first candidate from this list, if it isn't empty + if len(possibleCandidates) > 0 { + return possibleCandidates[0] + } + return nil +} + +// waitForCatchUp is used to wait for the given tablet until it has caught up to the source +func waitForCatchUp( + ctx context.Context, + tmc tmclient.TabletManagerClient, + logger logutil.Logger, + newPrimary *topodatapb.Tablet, + source *topodatapb.Tablet, + waitTime time.Duration, +) error { + logger.Infof("waiting for %v to catch up to %v", newPrimary.Alias, source.Alias) + // Find the primary position of the previous primary + pos, err := tmc.MasterPosition(ctx, source) + if err != nil { + return err + } + + // Wait until the new primary has caught upto that position + waitForPosCtx, cancelFunc := context.WithTimeout(ctx, waitTime) + defer cancelFunc() + err = tmc.WaitForPosition(waitForPosCtx, newPrimary, pos) + if err != nil { + return err + } + return nil +} diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index 5cff5044423..770d2f7030f 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -18,21 +18,23 @@ package reparentutil import ( "context" + "fmt" "testing" "time" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vttablet/tmclient" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo/topoproto" ) type chooseNewPrimaryTestTMClient struct { @@ -513,3 +515,323 @@ func TestFindCurrentPrimary(t *testing.T) { }) } } + +func TestGetValidCandidatesAndPositionsAsList(t *testing.T) { + sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + mysqlGTID1 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 9, + } + mysqlGTID2 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 10, + } + mysqlGTID3 := mysql.Mysql56GTID{ + Server: sid1, + Sequence: 11, + } + + positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) + positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) + + positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) + + positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) + positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) + + tests := []struct { + name string + validCandidates map[string]mysql.Position + tabletMap map[string]*topo.TabletInfo + tabletRes []*topodatapb.Tablet + }{ + { + name: "test conversion", + validCandidates: map[string]mysql.Position{ + "zone1-0000000100": positionMostAdvanced, + "zone1-0000000101": positionIntermediate1, + "zone1-0000000102": positionIntermediate2, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + tabletRes: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tabletRes, posRes, err := getValidCandidatesAndPositionsAsList(test.validCandidates, test.tabletMap) + assert.NoError(t, err) + assert.ElementsMatch(t, test.tabletRes, tabletRes) + assert.Equal(t, len(tabletRes), len(posRes)) + for i, tablet := range tabletRes { + assert.Equal(t, test.validCandidates[topoproto.TabletAliasString(tablet.Alias)], posRes[i]) + } + }) + } +} + +func TestWaitForCatchUp(t *testing.T) { + tests := []struct { + name string + tmc tmclient.TabletManagerClient + source *topodatapb.Tablet + newPrimary *topodatapb.Tablet + err string + }{ + { + name: "success", + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "abc", + Error: nil, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000101": { + "abc": nil, + }, + }, + }, + source: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, { + name: "error in primary position", + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "abc", + Error: fmt.Errorf("found error in primary position"), + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000101": { + "abc": nil, + }, + }, + }, + source: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + err: "found error in primary position", + }, { + name: "error in waiting for position", + tmc: &testutil.TabletManagerClient{ + MasterPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "abc", + Error: nil, + }, + }, + WaitForPositionResults: map[string]map[string]error{ + "zone1-0000000101": { + "abc": fmt.Errorf("found error in waiting for position"), + }, + }, + }, + source: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + newPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + err: "found error in waiting for position", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + logger := logutil.NewMemoryLogger() + err := waitForCatchUp(ctx, test.tmc, logger, test.newPrimary, test.source, 2*time.Second) + if test.err != "" { + assert.EqualError(t, err, test.err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestRestrictValidCandidates(t *testing.T) { + tests := []struct { + name string + validCandidates map[string]mysql.Position + tabletMap map[string]*topo.TabletInfo + result map[string]mysql.Position + }{ + { + name: "remove invalid tablets", + validCandidates: map[string]mysql.Position{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + "zone1-0000000102": {}, + "zone1-0000000103": {}, + "zone1-0000000104": {}, + "zone1-0000000105": {}, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_RESTORE, + }, + }, + "zone1-0000000103": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 103, + }, + Type: topodatapb.TabletType_DRAINED, + }, + }, + "zone1-0000000104": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 104, + }, + Type: topodatapb.TabletType_SPARE, + }, + }, + "zone1-0000000105": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 103, + }, + Type: topodatapb.TabletType_BACKUP, + }, + }, + }, + result: map[string]mysql.Position{ + "zone1-0000000100": {}, + "zone1-0000000101": {}, + "zone1-0000000104": {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := restrictValidCandidates(test.validCandidates, test.tabletMap) + assert.NoError(t, err) + assert.Equal(t, res, test.result) + }) + } +} diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index 36a7ae0a281..db9159a5511 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -39,6 +40,7 @@ import ( var ( enableRealtimeStats = flag.Bool("enable_realtime_stats", false, "Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.") + durability = flag.String("durability", "none", "type of durability to enforce. Default is none. Other values are dictated by registered plugins") _ = flag.String("web_dir", "", "NOT USED, here for backward compatibility") _ = flag.String("web_dir2", "", "NOT USED, here for backward compatibility") @@ -49,7 +51,13 @@ const ( ) // InitVtctld initializes all the vtctld functionality. -func InitVtctld(ts *topo.Server) { +func InitVtctld(ts *topo.Server) error { + err := reparentutil.SetDurabilityPolicy(*durability, nil) + if err != nil { + log.Errorf("error in setting durability policy: %v", err) + return err + } + actionRepo := NewActionRepository(ts) // keyspace actions @@ -185,4 +193,6 @@ func InitVtctld(ts *topo.Server) { // Setup reverse proxy for all vttablets through /vttablet/. initVTTabletRedirection(ts) + + return nil } diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index ec16d8003b8..6a73b850860 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -527,6 +527,8 @@ func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias * } defer tm.unlock() + // setReplicationSourceLocked also fixes the semi-sync. In case the tablet type is primary it assumes that it will become a replica if SetReplicationSource + // is called, so we always call fixSemiSync with a non-primary tablet type. This will always set the source side replication to false. return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication) } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index b1de4366ef7..48ffe0d6f20 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -43,12 +43,6 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -const ( - plannedReparentShardOperation = "PlannedReparentShard" //nolint - emergencyReparentShardOperation = "EmergencyReparentShard" //nolint - tabletExternallyReparentedOperation = "TabletExternallyReparented" //nolint -) - // ShardReplicationStatuses returns the ReplicationStatus for each tablet in a shard. func (wr *Wrangler) ShardReplicationStatuses(ctx context.Context, keyspace, shard string) ([]*topo.TabletInfo, []*replicationdatapb.Status, error) { tabletMap, err := wr.ts.GetTabletMapForShard(ctx, keyspace, shard) @@ -151,15 +145,16 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st // EmergencyReparentShard will make the provided tablet the primary for // the shard, when the old primary is completely unreachable. -func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.String) (err error) { +func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.String, preventCrossCellPromotion bool) (err error) { _, err = reparentutil.NewEmergencyReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, keyspace, shard, reparentutil.EmergencyReparentOptions{ - NewPrimaryAlias: primaryElectTabletAlias, - WaitReplicasTimeout: waitReplicasTimeout, - IgnoreReplicas: ignoredTablets, + NewPrimaryAlias: primaryElectTabletAlias, + WaitReplicasTimeout: waitReplicasTimeout, + IgnoreReplicas: ignoredTablets, + PreventCrossCellPromotion: preventCrossCellPromotion, }, ) diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 88fbe10588a..1cd1415eb4a 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -176,7 +176,7 @@ func TestBackupRestore(t *testing.T) { "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition - destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) @@ -402,7 +402,7 @@ func TestBackupRestoreLagged(t *testing.T) { "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = destTablet.FakeMysqlDaemon.CurrentPrimaryPosition - destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) @@ -570,7 +570,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition - destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) @@ -727,7 +727,7 @@ func TestDisableActiveReparents(t *testing.T) { "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition - destTablet.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) destTablet.StartActionLoop(t, wr) defer destTablet.StopActionLoop(t) diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 71efac9286b..3d8b9aa73f9 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -21,16 +21,16 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/discovery" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/sets" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -43,6 +43,7 @@ func TestEmergencyReparentShard(t *testing.T) { discovery.SetTabletPickerRetryDelay(delay) }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) + _ = reparentutil.SetDurabilityPolicy("none", nil) ts := memorytopo.NewServer("cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) @@ -86,7 +87,7 @@ func TestEmergencyReparentShard(t *testing.T) { newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ GTIDSet: newPrimaryRelayLogPos, } - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition) newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", "CREATE DATABASE IF NOT EXISTS _vt", @@ -108,7 +109,7 @@ func TestEmergencyReparentShard(t *testing.T) { // old primary, will be scrapped oldPrimary.FakeMysqlDaemon.ReadOnly = false oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", } @@ -131,8 +132,8 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ GTIDSet: goodReplica1RelayLogPos, } - goodReplica1.FakeMysqlDaemon.WaitPrimaryPosition = goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.WaitPrimaryPositions = append(goodReplica1.FakeMysqlDaemon.WaitPrimaryPositions, goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", "STOP SLAVE", @@ -158,8 +159,8 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ GTIDSet: goodReplica2RelayLogPos, } - goodReplica2.FakeMysqlDaemon.WaitPrimaryPosition = goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition - goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica2.FakeMysqlDaemon.WaitPrimaryPositions = append(goodReplica2.FakeMysqlDaemon.WaitPrimaryPositions, goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -191,6 +192,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { discovery.SetTabletPickerRetryDelay(delay) }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) + _ = reparentutil.SetDurabilityPolicy("none", nil) ts := memorytopo.NewServer("cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) @@ -227,9 +229,16 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ GTIDSet: newPrimaryRelayLogPos, } - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition) + newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(moreAdvancedReplica.Tablet)) newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", + "STOP SLAVE", + "FAKE SET MASTER", + "START SLAVE", + "CREATE DATABASE IF NOT EXISTS _vt", + "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal", + "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, master_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) defer newPrimary.StopActionLoop(t) @@ -265,19 +274,23 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ GTIDSet: moreAdvancedReplicaLogPos, } - moreAdvancedReplica.FakeMysqlDaemon.WaitPrimaryPosition = moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition + moreAdvancedReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(moreAdvancedReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) + moreAdvancedReplica.FakeMysqlDaemon.WaitPrimaryPositions = append(moreAdvancedReplica.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition) + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition) moreAdvancedReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE IO_THREAD", + "STOP SLAVE", + "FAKE SET MASTER", + "START SLAVE", } moreAdvancedReplica.StartActionLoop(t, wr) defer moreAdvancedReplica.StopActionLoop(t) // run EmergencyReparentShard - err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.NewString()) + err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.NewString(), false) cancel() - assert.Error(t, err) - assert.Contains(t, err.Error(), "is not fully caught up") + assert.NoError(t, err) // check what was run err = newPrimary.FakeMysqlDaemon.CheckSuperQueryList() require.NoError(t, err) diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go index fede11b2ff9..35273cf91ca 100644 --- a/go/vt/wrangler/testlib/external_reparent_test.go +++ b/go/vt/wrangler/testlib/external_reparent_test.go @@ -89,7 +89,7 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { t.Fatalf("old primary should be PRIMARY but is: %v", tablet.Type) } - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -168,7 +168,7 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { // Second test: reparent to a replica, and pretend the old // primary is still good to go. - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -246,7 +246,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { newPrimary.StartActionLoop(t, wr) defer newPrimary.StopActionLoop(t) - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -327,7 +327,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { newPrimary.StartActionLoop(t, wr) defer newPrimary.StopActionLoop(t) - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -404,7 +404,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { newPrimary.StartActionLoop(t, wr) defer newPrimary.StopActionLoop(t) - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START Replica", @@ -414,7 +414,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) - goodReplica.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) // On the good replica, we will respond to // TabletActionReplicaWasRestarted. goodReplica.StartActionLoop(t, wr) diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index da4035cdbfd..e1b663e3855 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -58,7 +58,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -66,7 +66,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { Sequence: 990, }, }, - } + }} newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ @@ -91,8 +91,8 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { oldPrimary.FakeMysqlDaemon.ReadOnly = false oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPosition - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -105,12 +105,12 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { oldPrimary.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetReplicationSource is called on new primary to make sure it's replicating before reparenting. - newPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -168,7 +168,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -176,7 +176,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { Sequence: 990, }, }, - } + }} newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ @@ -201,8 +201,8 @@ func TestPlannedReparentShardNoError(t *testing.T) { oldPrimary.FakeMysqlDaemon.ReadOnly = false oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPosition - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -215,12 +215,12 @@ func TestPlannedReparentShardNoError(t *testing.T) { oldPrimary.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetReplicationSource is called on new primary to make sure it's replicating before reparenting. - newPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) // goodReplica1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -232,7 +232,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { // goodReplica2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -317,7 +317,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -325,7 +325,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { Sequence: 990, }, }, - } + }} newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ @@ -351,7 +351,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { oldPrimary.FakeMysqlDaemon.Replicating = false // set to incorrect value to make promote fail on WaitForReplicationPos oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.PromoteResult - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -360,12 +360,12 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { defer oldPrimary.StopActionLoop(t) oldPrimary.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetReplicationSource is called on new primary to make sure it's replicating before reparenting. - newPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -377,7 +377,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -418,7 +418,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { newPrimary.FakeMysqlDaemon.TimeoutHook = func() error { return context.DeadlineExceeded } newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -426,7 +426,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { Sequence: 990, }, }, - } + }} newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ @@ -450,8 +450,8 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { // old primary oldPrimary.FakeMysqlDaemon.ReadOnly = false oldPrimary.FakeMysqlDaemon.Replicating = false - oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPosition - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -461,11 +461,11 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { oldPrimary.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetReplicationSource is called on new primary to make sure it's replicating before reparenting. - newPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -477,7 +477,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -536,7 +536,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { // goodReplica1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) // simulate error that will trigger a call to RestartReplication goodReplica1.FakeMysqlDaemon.SetReplicationSourceError = errors.New("Slave failed to initialize relay log info structure from the repository") goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ @@ -612,7 +612,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true goodReplica1.FakeMysqlDaemon.IOThreadRunning = false - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) goodReplica1.FakeMysqlDaemon.CurrentSourceHost = primary.Tablet.MysqlHostname goodReplica1.FakeMysqlDaemon.CurrentSourcePort = int(primary.Tablet.MysqlPort) // simulate error that will trigger a call to RestartReplication @@ -670,7 +670,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { newPrimary.FakeMysqlDaemon.Replicating = true // make promote fail newPrimary.FakeMysqlDaemon.PromoteError = errors.New("some error") - newPrimary.FakeMysqlDaemon.WaitPrimaryPosition = mysql.Position{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ Domain: 7, @@ -678,7 +678,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { Sequence: 990, }, }, - } + }} newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ GTIDSet: mysql.MariadbGTIDSet{ 7: mysql.MariadbGTID{ @@ -703,8 +703,8 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { oldPrimary.FakeMysqlDaemon.ReadOnly = false oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPosition - oldPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] + oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", "START SLAVE", @@ -714,11 +714,11 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { oldPrimary.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) // SetReplicationSource is called on new primary to make sure it's replicating before reparenting. - newPrimary.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -730,7 +730,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(newPrimary.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", @@ -824,7 +824,7 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", @@ -836,7 +836,7 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { // goodReplica2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(oldPrimary.Tablet) + goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE SET MASTER", diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index e9487fc671e..8cc157839b9 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -147,7 +147,7 @@ func TestReparentTablet(t *testing.T) { // which ends up making this test unpredictable. replica.FakeMysqlDaemon.Replicating = true replica.FakeMysqlDaemon.IOThreadRunning = true - replica.FakeMysqlDaemon.SetReplicationSourceInput = topoproto.MysqlAddr(primary.Tablet) + replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "FAKE SET MASTER", diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 87b94eeb652..6fe9caf7f39 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -353,6 +353,9 @@ message EmergencyReparentShardRequest { // WaitReplicasTimeout is the duration of time to wait for replicas to catch // up in reparenting. vttime.Duration wait_replicas_timeout = 5; + // PreventCrossCellPromotion is used to only promote the new primary from the same cell + // as the failed primary. + bool prevent_cross_cell_promotion = 6; } message EmergencyReparentShardResponse { diff --git a/test/config.json b/test/config.json index 696beb0a60d..8827f7abed1 100644 --- a/test/config.json +++ b/test/config.json @@ -413,9 +413,18 @@ "RetryMax": 1, "Tags": [] }, - "reparent": { + "emergencyreparent": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/reparent"], + "Args": ["vitess.io/vitess/go/test/endtoend/reparent/emergencyreparent"], + "Command": [], + "Manual": false, + "Shard": "14", + "RetryMax": 1, + "Tags": [] + }, + "plannedreparent": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/reparent/plannedreparent"], "Command": [], "Manual": false, "Shard": "14", diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 32c57692771..871aafe8e46 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -29351,6 +29351,9 @@ export namespace vtctldata { /** EmergencyReparentShardRequest wait_replicas_timeout */ wait_replicas_timeout?: (vttime.IDuration|null); + + /** EmergencyReparentShardRequest prevent_cross_cell_promotion */ + prevent_cross_cell_promotion?: (boolean|null); } /** Represents an EmergencyReparentShardRequest. */ @@ -29377,6 +29380,9 @@ export namespace vtctldata { /** EmergencyReparentShardRequest wait_replicas_timeout. */ public wait_replicas_timeout?: (vttime.IDuration|null); + /** EmergencyReparentShardRequest prevent_cross_cell_promotion. */ + public prevent_cross_cell_promotion: boolean; + /** * Creates a new EmergencyReparentShardRequest instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index 0835ce6ade6..9dc0f68faed 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -69794,6 +69794,7 @@ $root.vtctldata = (function() { * @property {topodata.ITabletAlias|null} [new_primary] EmergencyReparentShardRequest new_primary * @property {Array.|null} [ignore_replicas] EmergencyReparentShardRequest ignore_replicas * @property {vttime.IDuration|null} [wait_replicas_timeout] EmergencyReparentShardRequest wait_replicas_timeout + * @property {boolean|null} [prevent_cross_cell_promotion] EmergencyReparentShardRequest prevent_cross_cell_promotion */ /** @@ -69852,6 +69853,14 @@ $root.vtctldata = (function() { */ EmergencyReparentShardRequest.prototype.wait_replicas_timeout = null; + /** + * EmergencyReparentShardRequest prevent_cross_cell_promotion. + * @member {boolean} prevent_cross_cell_promotion + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.prevent_cross_cell_promotion = false; + /** * Creates a new EmergencyReparentShardRequest instance using the specified properties. * @function create @@ -69887,6 +69896,8 @@ $root.vtctldata = (function() { $root.topodata.TabletAlias.encode(message.ignore_replicas[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.prevent_cross_cell_promotion != null && Object.hasOwnProperty.call(message, "prevent_cross_cell_promotion")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.prevent_cross_cell_promotion); return writer; }; @@ -69938,6 +69949,9 @@ $root.vtctldata = (function() { case 5: message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); break; + case 6: + message.prevent_cross_cell_promotion = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -69998,6 +70012,9 @@ $root.vtctldata = (function() { if (error) return "wait_replicas_timeout." + error; } + if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion")) + if (typeof message.prevent_cross_cell_promotion !== "boolean") + return "prevent_cross_cell_promotion: boolean expected"; return null; }; @@ -70037,6 +70054,8 @@ $root.vtctldata = (function() { throw TypeError(".vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout: object expected"); message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); } + if (object.prevent_cross_cell_promotion != null) + message.prevent_cross_cell_promotion = Boolean(object.prevent_cross_cell_promotion); return message; }; @@ -70060,6 +70079,7 @@ $root.vtctldata = (function() { object.shard = ""; object.new_primary = null; object.wait_replicas_timeout = null; + object.prevent_cross_cell_promotion = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; @@ -70074,6 +70094,8 @@ $root.vtctldata = (function() { } if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion")) + object.prevent_cross_cell_promotion = message.prevent_cross_cell_promotion; return object; };