Skip to content

Commit

Permalink
Deprecate enable-semi-sync in favour of RPC parameter (vitessio#10695) (
Browse files Browse the repository at this point in the history
vitessio#858)

* feat: deprecate enable_semi_sync flag

Signed-off-by: Manan Gupta <[email protected]>

* docs: add the deprecation change to the summary

Signed-off-by: Manan Gupta <[email protected]>

* test: fix test expectation for vttablet flags

Signed-off-by: Manan Gupta <[email protected]>

* feat: fix cnf files to not mention the deprecated flag

Signed-off-by: Manan Gupta <[email protected]>

* feat: remove enable_semi_sync from scripts of examples

Signed-off-by: Manan Gupta <[email protected]>

* test: remove setting enable_semi_sync flag in testlib tests

Signed-off-by: Manan Gupta <[email protected]>

* feat: remove enable_semi_sync from a bunch of tests

Signed-off-by: Manan Gupta <[email protected]>

* test: refactor setup to take in durability policy instead of a boolean and add a test for cross cell durability policy

Signed-off-by: Manan Gupta <[email protected]>

* feat: add cross cell durability policy to the docs

Signed-off-by: Manan Gupta <[email protected]>

* feat: fix flag help output

Signed-off-by: Manan Gupta <[email protected]>

* test: we shouldn't remove enable_semi_sync from tests which are testing upgrades

Signed-off-by: Manan Gupta <[email protected]>
  • Loading branch information
GuptaManan100 authored Jul 22, 2022
1 parent c3cb614 commit 18d48fb
Show file tree
Hide file tree
Showing 29 changed files with 299 additions and 573 deletions.
5 changes: 2 additions & 3 deletions config/mycnf/mariadb100.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
# (when the primary goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when a primary is
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

slave_net_timeout = 60
Expand Down
5 changes: 2 additions & 3 deletions config/mycnf/mariadb101.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
# (when the primary goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when a primary is
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

slave_net_timeout = 60
Expand Down
5 changes: 2 additions & 3 deletions config/mycnf/mariadb102.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
# (when the primary goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when a primary is
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

# enable strict mode so it's safe to compare sequence numbers across different server IDs.
Expand Down
5 changes: 2 additions & 3 deletions config/mycnf/mysql57.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,8 @@ collation_server = utf8_general_ci
# (when the primary goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when a primary is
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

# When semi-sync is enabled, don't allow fallback to async
Expand Down
5 changes: 2 additions & 3 deletions config/mycnf/mysql80.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,8 @@ default_authentication_plugin = mysql_native_password
# (when the primary goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when a primary is
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

# MySQL 8.0 will not load plugins during --initialize
Expand Down
10 changes: 10 additions & 0 deletions doc/releasenotes/15_0_0_summary.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
#### vttablet startup flag --enable-query-plan-field-caching
This flag is now deprecated. It will be removed in v16.

#### vttablet startup flag deprecations
- --enable_semi_sync is now deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`

### New Syntax

### VDiff2
Expand Down Expand Up @@ -94,3 +97,10 @@ $ curl -s http://127.0.0.1:15100/debug/vars | jq . | grep Throttler

Added new parameter `multi_shard_autocommit` to lookup vindex definition in vschema, if enabled will send lookup vindex dml query as autocommit to all shards
This is slighly different from `autocommit` parameter where the query is sent in its own transaction separate from the ongoing transaction if any i.e. begin -> lookup query execs -> commit/rollback

### Durability Policy

#### Cross Cell

A new durabilty policy `cross_cell` is now supported. `cross_cell` durability policy only allows replica tablets from a different cell than the current primary to
send semi sync ACKs. This ensures that any committed write exists in atleast 2 tablets belonging to different cells.
5 changes: 2 additions & 3 deletions examples/compose/external_db/mysql/mysql56.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,8 @@ innodb_use_native_aio = 0
# (when the master goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when masters are
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

# When semi-sync is enabled, don't allow fallback to async
Expand Down
5 changes: 2 additions & 3 deletions examples/compose/external_db/mysql/mysql57.cnf
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,8 @@ collation_server = utf8_general_ci
# (when the master goes away). Here we just load the plugin so it's
# available if desired, but it's disabled at startup.
#
# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync
# at the proper time when replication is set up, or when masters are
# promoted or demoted.
# VTTablet will enable semi-sync at the proper time when replication is set up,
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so

# When semi-sync is enabled, don't allow fallback to async
Expand Down
1 change: 0 additions & 1 deletion examples/compose/vttablet-up.sh
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ exec $VTROOT/bin/vttablet \
--tablet-path $alias \
--tablet_hostname "$vthost" \
--health_check_interval 5s \
--enable_semi_sync=false \
--disable_active_reparents=true \
--port $web_port \
--grpc_port $grpc_port \
Expand Down
1 change: 0 additions & 1 deletion examples/local/scripts/vttablet-up.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ vttablet \
--init_shard $shard \
--init_tablet_type $tablet_type \
--health_check_interval 5s \
--enable_semi_sync \
--enable_replication_reporter \
--backup_storage_implementation file \
--file_backup_storage_root $VTDATAROOT/backups \
Expand Down
1 change: 0 additions & 1 deletion examples/operator/vtorc_example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ spec:
extraFlags:
db_charset: utf8mb4
disable_active_reparents: "true"
enable_semi_sync: "false"
resources:
requests:
cpu: 100m
Expand Down
2 changes: 1 addition & 1 deletion go/flags/endtoend/vttablet.txt
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ Usage of vttablet:
--enable_replication_reporter
Use polling to track replication lag.
--enable_semi_sync
Enable semi-sync when configuring replication, on primary and replica tablets only (rdonly tablets will not ack).
DEPRECATED - Set the correct durability policy on the keyspace instead.
--enable_transaction_limit
If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
--enable_transaction_limit_dry_run
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/backup/vtctlbackup/backup_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ func LaunchCluster(setupType int, streamMode string, stripes int) (int, error) {
tablet.VttabletProcess.DbPassword = dbPassword
tablet.VttabletProcess.ExtraArgs = commonTabletArg
tablet.VttabletProcess.SupportsBackup = true
tablet.VttabletProcess.EnableSemiSync = true

if setupType == Mysqlctld {
tablet.MysqlctldProcess = *cluster.MysqlCtldProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
Expand Down
3 changes: 1 addition & 2 deletions go/test/endtoend/recovery/pitr/shardedpitr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ func initializeCluster(t *testing.T) {
shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica}

clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...)
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup", "--enable_semi_sync")
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup")

err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard, *shard0, *shard1})
require.NoError(t, err)
Expand Down Expand Up @@ -516,7 +516,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer *
tablet.Alias = tablet.VttabletProcess.TabletPath
tablet.VttabletProcess.SupportsBackup = true
tablet.VttabletProcess.Keyspace = restoreKeyspaceName
tablet.VttabletProcess.EnableSemiSync = true
tablet.VttabletProcess.ExtraArgs = []string{
"--disable_active_reparents",
"--enable_replication_reporter=false",
Expand Down
3 changes: 1 addition & 2 deletions go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func initializeCluster(t *testing.T) {
shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica}

clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...)
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup", "--enable_semi_sync")
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup")

err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard, *shard0, *shard1})
require.NoError(t, err)
Expand Down Expand Up @@ -495,7 +495,6 @@ func tlsLaunchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, tabletForBi
tablet.Alias = tablet.VttabletProcess.TabletPath
tablet.VttabletProcess.SupportsBackup = true
tablet.VttabletProcess.Keyspace = restoreKeyspaceName
tablet.VttabletProcess.EnableSemiSync = true

certDir := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/ssl_%010d", tablet.MysqlctlProcess.TabletUID))
tablet.VttabletProcess.ExtraArgs = []string{
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/recovery/unshardedrecovery/recovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,6 @@ SET GLOBAL old_alter_table = ON;
tablet.VttabletProcess.ExtraArgs = append(tablet.VttabletProcess.ExtraArgs, recovery.XbArgs...)
}
tablet.VttabletProcess.SupportsBackup = true
tablet.VttabletProcess.EnableSemiSync = true

tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
tablet.MysqlctlProcess.InitDBFile = newInitDBFile
Expand Down
111 changes: 101 additions & 10 deletions go/test/endtoend/reparent/emergencyreparent/ers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import (

func TestTrivialERS(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentClusterLegacy(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -57,7 +57,7 @@ func TestTrivialERS(t *testing.T) {

func TestReparentIgnoreReplicas(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentClusterLegacy(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -99,7 +99,7 @@ func TestReparentIgnoreReplicas(t *testing.T) {

func TestReparentDownPrimary(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentClusterLegacy(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -135,7 +135,7 @@ func TestReparentDownPrimary(t *testing.T) {

func TestReparentNoChoiceDownPrimary(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentClusterLegacy(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -171,7 +171,7 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) {
func TestSemiSyncSetupCorrectly(t *testing.T) {
t.Run("semi-sync enabled", func(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentClusterLegacy(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -199,7 +199,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) {

t.Run("semi-sync disabled", func(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentClusterLegacy(t, false)
clusterInstance := utils.SetupReparentCluster(t, "none")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -229,7 +229,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) {
// TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary
func TestERSPromoteRdonly(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -257,7 +257,7 @@ func TestERSPromoteRdonly(t *testing.T) {
// TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set
func TestERSPreventCrossCellPromotion(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand All @@ -280,7 +280,7 @@ func TestERSPreventCrossCellPromotion(t *testing.T) {
// caught up to it by pulling transactions from it
func TestPullFromRdonly(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -345,7 +345,7 @@ func TestPullFromRdonly(t *testing.T) {
// is stopped on the primary elect.
func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, true)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down Expand Up @@ -442,5 +442,96 @@ func TestERSForInitialization(t *testing.T) {
strArray := utils.GetShardReplicationPositions(t, clusterInstance, utils.KeyspaceName, utils.ShardName, true)
assert.Equal(t, len(tablets), len(strArray))
assert.Contains(t, strArray[0], "primary") // primary first
}

func TestRecoverWithMultipleFailures(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})

// make tablets[1] a rdonly tablet.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly")
require.NoError(t, err)

// Confirm that replication is still working as intended
utils.ConfirmReplication(t, tablets[0], tablets[1:])

// Make the rdonly and primary tablets and databases unavailable.
utils.StopTablet(t, tablets[1], true)
utils.StopTablet(t, tablets[0], true)

// We expect this to succeed since we only have 1 primary eligible tablet which is down
out, err := utils.Ers(clusterInstance, nil, "30s", "10s")
require.NoError(t, err, out)

newPrimary := utils.GetNewPrimary(t, clusterInstance)
utils.ConfirmReplication(t, newPrimary, []*cluster.Vttablet{tablets[2], tablets[3]})
}

// TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting
// a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs
func TestERSFailFast(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})

// make tablets[1] a rdonly tablet.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly")
require.NoError(t, err)

// Confirm that replication is still working as intended
utils.ConfirmReplication(t, tablets[0], tablets[1:])

strChan := make(chan string)
go func() {
// We expect this to fail since we have ignored all replica tablets and only the rdonly is left, which is not capable of sending semi-sync ACKs
out, err := utils.ErsIgnoreTablet(clusterInstance, tablets[2], "240s", "90s", []*cluster.Vttablet{tablets[0], tablets[3]}, false)
require.Error(t, err)
strChan <- out
}()

select {
case out := <-strChan:
require.Contains(t, out, "proposed primary zone1-0000000103 will not be able to make forward progress on being promoted")
case <-time.After(60 * time.Second):
require.Fail(t, "Emergency Reparent Shard did not fail in 60 seconds")
}
}

// TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped.
// If there are more than 1, we also fail.
func TestReplicationStopped(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})

err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE SQL_THREAD;`)
require.NoError(t, err)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[2].Alias, `STOP SLAVE;`)
require.NoError(t, err)
// Run an additional command in the current primary which will only be acked by tablets[3] and be in its relay log.
insertedVal := utils.ConfirmReplication(t, tablets[0], nil)
// Failover to tablets[3]
_, err = utils.Ers(clusterInstance, tablets[3], "60s", "30s")
require.Error(t, err, "ERS should fail with 2 replicas having replication stopped")

// Start replication back on tablet[1]
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `START SLAVE;`)
require.NoError(t, err)
// Failover to tablets[3] again. This time it should succeed
out, err := utils.Ers(clusterInstance, tablets[3], "60s", "30s")
require.NoError(t, err, out)
// Verify that the tablet has the inserted value
err = utils.CheckInsertedValues(context.Background(), t, tablets[3], insertedVal)
require.NoError(t, err)
// Confirm that replication is setup correctly from tablets[3] to tablets[0]
utils.ConfirmReplication(t, tablets[3], tablets[:1])
// Confirm that tablets[2] which had replication stopped initially still has its replication stopped
utils.CheckReplicationStatus(context.Background(), t, tablets[2], false, false)
}
Loading

0 comments on commit 18d48fb

Please sign in to comment.