diff --git a/go/vt/orchestrator/db/generate_base.go b/go/vt/orchestrator/db/generate_base.go index e72fd87b868..f653927fc18 100644 --- a/go/vt/orchestrator/db/generate_base.go +++ b/go/vt/orchestrator/db/generate_base.go @@ -28,21 +28,21 @@ var generateSQLBase = []string{ version varchar(128) CHARACTER SET ascii NOT NULL, binlog_format varchar(16) CHARACTER SET ascii NOT NULL, log_bin tinyint(3) unsigned NOT NULL, - log_slave_updates tinyint(3) unsigned NOT NULL, + log_replica_updates tinyint(3) unsigned NOT NULL, binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, binary_log_pos bigint(20) unsigned NOT NULL, - master_host varchar(128) CHARACTER SET ascii NOT NULL, - master_port smallint(5) unsigned NOT NULL, - slave_sql_running tinyint(3) unsigned NOT NULL, - slave_io_running tinyint(3) unsigned NOT NULL, - master_log_file varchar(128) CHARACTER SET ascii NOT NULL, - read_master_log_pos bigint(20) unsigned NOT NULL, - relay_master_log_file varchar(128) CHARACTER SET ascii NOT NULL, - exec_master_log_pos bigint(20) unsigned NOT NULL, - seconds_behind_master bigint(20) unsigned DEFAULT NULL, - slave_lag_seconds bigint(20) unsigned DEFAULT NULL, - num_slave_hosts int(10) unsigned NOT NULL, - slave_hosts text CHARACTER SET ascii NOT NULL, + source_host varchar(128) CHARACTER SET ascii NOT NULL, + source_port smallint(5) unsigned NOT NULL, + replica_sql_running tinyint(3) unsigned NOT NULL, + replica_io_running tinyint(3) unsigned NOT NULL, + source_log_file varchar(128) CHARACTER SET ascii NOT NULL, + read_source_log_pos bigint(20) unsigned NOT NULL, + relay_source_log_file varchar(128) CHARACTER SET ascii NOT NULL, + exec_source_log_pos bigint(20) unsigned NOT NULL, + replication_lag_seconds bigint(20) unsigned DEFAULT NULL, + replica_lag_seconds bigint(20) unsigned DEFAULT NULL, + num_replica_hosts int(10) unsigned NOT NULL, + replica_hosts text CHARACTER SET ascii NOT NULL, cluster_name varchar(128) CHARACTER SET ascii NOT NULL, PRIMARY KEY (hostname,port) ) ENGINE=InnoDB DEFAULT CHARSET=ascii @@ -375,8 +375,8 @@ var generateSQLBase = []string{ snapshot_unix_timestamp INT UNSIGNED NOT NULL, hostname varchar(128) CHARACTER SET ascii NOT NULL, port smallint(5) unsigned NOT NULL, - master_host varchar(128) CHARACTER SET ascii NOT NULL, - master_port smallint(5) unsigned NOT NULL, + source_host varchar(128) CHARACTER SET ascii NOT NULL, + source_port smallint(5) unsigned NOT NULL, cluster_name tinytext CHARACTER SET ascii NOT NULL, PRIMARY KEY (snapshot_unix_timestamp, hostname, port) ) ENGINE=InnoDB DEFAULT CHARSET=ascii @@ -426,8 +426,8 @@ var generateSQLBase = []string{ analysis varchar(128) NOT NULL, cluster_name varchar(128) NOT NULL, cluster_alias varchar(128) NOT NULL, - count_affected_slaves int unsigned NOT NULL, - slave_hosts text NOT NULL, + count_affected_replicas int unsigned NOT NULL, + replica_hosts text NOT NULL, PRIMARY KEY (detection_id) ) ENGINE=InnoDB DEFAULT CHARSET=ascii `, @@ -494,37 +494,37 @@ var generateSQLBase = []string{ CREATE INDEX domain_name_idx_cluster_domain_name ON cluster_domain_name (domain_name(32)) `, ` - CREATE TABLE IF NOT EXISTS master_position_equivalence ( + CREATE TABLE IF NOT EXISTS primary_position_equivalence ( equivalence_id bigint unsigned not null auto_increment, - master1_hostname varchar(128) CHARACTER SET ascii NOT NULL, - master1_port smallint(5) unsigned NOT NULL, - master1_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - master1_binary_log_pos bigint(20) unsigned NOT NULL, - master2_hostname varchar(128) CHARACTER SET ascii NOT NULL, - master2_port smallint(5) unsigned NOT NULL, - master2_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - master2_binary_log_pos bigint(20) unsigned NOT NULL, + primary1_hostname varchar(128) CHARACTER SET ascii NOT NULL, + primary1_port smallint(5) unsigned NOT NULL, + primary1_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, + primary1_binary_log_pos bigint(20) unsigned NOT NULL, + primary2_hostname varchar(128) CHARACTER SET ascii NOT NULL, + primary2_port smallint(5) unsigned NOT NULL, + primary2_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, + primary2_binary_log_pos bigint(20) unsigned NOT NULL, last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (equivalence_id) ) ENGINE=InnoDB DEFAULT CHARSET=ascii `, ` - DROP INDEX equivalence_uidx ON master_position_equivalence + DROP INDEX equivalence_uidx ON primary_position_equivalence `, ` - CREATE UNIQUE INDEX equivalence_uidx_master_position_equivalence ON master_position_equivalence (master1_hostname, master1_port, master1_binary_log_file, master1_binary_log_pos, master2_hostname, master2_port) + CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port) `, ` - DROP INDEX master2_idx ON master_position_equivalence + DROP INDEX primary2_idx ON primary_position_equivalence `, ` - CREATE INDEX master2_idx_master_position_equivalence ON master_position_equivalence (master2_hostname, master2_port, master2_binary_log_file, master2_binary_log_pos) + CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos) `, ` - DROP INDEX last_suggested_idx ON master_position_equivalence + DROP INDEX last_suggested_idx ON primary_position_equivalence `, ` - CREATE INDEX last_suggested_idx_master_position_equivalence ON master_position_equivalence (last_suggested) + CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested) `, ` CREATE TABLE IF NOT EXISTS async_request ( @@ -859,7 +859,7 @@ var generateSQLBase = []string{ shard varchar(128) CHARACTER SET ascii NOT NULL, cell varchar(128) CHARACTER SET ascii NOT NULL, tablet_type smallint(5) NOT NULL, - master_timestamp timestamp NOT NULL, + primary_timestamp timestamp NOT NULL, info varchar(512) CHARACTER SET ascii NOT NULL, PRIMARY KEY (hostname, port) ) ENGINE=InnoDB DEFAULT CHARSET=ascii diff --git a/go/vt/orchestrator/db/generate_patches.go b/go/vt/orchestrator/db/generate_patches.go index e1c198176df..115d7e17067 100644 --- a/go/vt/orchestrator/db/generate_patches.go +++ b/go/vt/orchestrator/db/generate_patches.go @@ -27,7 +27,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN last_sql_error TEXT NOT NULL AFTER exec_master_log_pos + ADD COLUMN last_sql_error TEXT NOT NULL AFTER exec_source_log_pos `, ` ALTER TABLE @@ -37,7 +37,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN oracle_gtid TINYINT UNSIGNED NOT NULL AFTER slave_io_running + ADD COLUMN oracle_gtid TINYINT UNSIGNED NOT NULL AFTER replica_io_running `, ` ALTER TABLE @@ -47,7 +47,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN relay_log_file varchar(128) CHARACTER SET ascii NOT NULL AFTER exec_master_log_pos + ADD COLUMN relay_log_file varchar(128) CHARACTER SET ascii NOT NULL AFTER exec_source_log_pos `, ` ALTER TABLE @@ -55,12 +55,12 @@ var generateSQLPatches = []string{ ADD COLUMN relay_log_pos bigint unsigned NOT NULL AFTER relay_log_file `, ` - DROP INDEX master_host_port_idx ON database_instance + DROP INDEX source_host_port_idx ON database_instance `, ` ALTER TABLE database_instance - ADD INDEX master_host_port_idx_database_instance (master_host, master_port) + ADD INDEX source_host_port_idx_database_instance (source_host, source_port) `, ` ALTER TABLE @@ -75,7 +75,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN has_replication_filters TINYINT UNSIGNED NOT NULL AFTER slave_io_running + ADD COLUMN has_replication_filters TINYINT UNSIGNED NOT NULL AFTER replica_io_running `, ` ALTER TABLE @@ -105,7 +105,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN is_co_master TINYINT UNSIGNED NOT NULL AFTER replication_depth + ADD COLUMN is_co_primary TINYINT UNSIGNED NOT NULL AFTER replication_depth `, ` ALTER TABLE @@ -115,7 +115,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER slave_lag_seconds + ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER replica_lag_seconds `, ` ALTER TABLE @@ -135,12 +135,12 @@ var generateSQLPatches = []string{ ` ALTER TABLE topology_recovery - ADD COLUMN count_affected_slaves int unsigned NOT NULL + ADD COLUMN count_affected_replicas int unsigned NOT NULL `, ` ALTER TABLE topology_recovery - ADD COLUMN slave_hosts text CHARACTER SET ascii NOT NULL + ADD COLUMN replica_hosts text CHARACTER SET ascii NOT NULL `, ` ALTER TABLE hostname_unresolve @@ -222,17 +222,17 @@ var generateSQLPatches = []string{ ` ALTER TABLE topology_recovery - ADD COLUMN participating_instances text CHARACTER SET ascii NOT NULL after slave_hosts + ADD COLUMN participating_instances text CHARACTER SET ascii NOT NULL after replica_hosts `, ` ALTER TABLE topology_recovery - ADD COLUMN lost_slaves text CHARACTER SET ascii NOT NULL after participating_instances + ADD COLUMN lost_replicas text CHARACTER SET ascii NOT NULL after participating_instances `, ` ALTER TABLE topology_recovery - ADD COLUMN all_errors text CHARACTER SET ascii NOT NULL after lost_slaves + ADD COLUMN all_errors text CHARACTER SET ascii NOT NULL after lost_replicas `, ` ALTER TABLE audit @@ -288,7 +288,7 @@ var generateSQLPatches = []string{ MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP `, ` - ALTER TABLE master_position_equivalence /* sqlite3-skip */ + ALTER TABLE primary_position_equivalence /* sqlite3-skip */ MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP `, ` @@ -493,7 +493,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN semi_sync_master_enabled TINYINT UNSIGNED NOT NULL + ADD COLUMN semi_sync_primary_enabled TINYINT UNSIGNED NOT NULL `, ` ALTER TABLE @@ -513,7 +513,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN master_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER oracle_gtid + ADD COLUMN source_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER oracle_gtid `, ` ALTER TABLE @@ -523,12 +523,12 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN ancestry_uuid text CHARACTER SET ascii NOT NULL AFTER master_uuid + ADD COLUMN ancestry_uuid text CHARACTER SET ascii NOT NULL AFTER source_uuid `, ` ALTER TABLE database_instance - ADD COLUMN replication_sql_thread_state tinyint signed not null default 0 AFTER slave_io_running + ADD COLUMN replication_sql_thread_state tinyint signed not null default 0 AFTER replica_io_running `, ` ALTER TABLE @@ -549,27 +549,27 @@ var generateSQLPatches = []string{ ` ALTER TABLE database_instance - ADD COLUMN semi_sync_master_timeout INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_master_enabled + ADD COLUMN semi_sync_primary_timeout INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_enabled `, ` ALTER TABLE database_instance - ADD COLUMN semi_sync_master_wait_for_slave_count INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_master_timeout + ADD COLUMN semi_sync_primary_wait_for_replica_count INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_timeout `, ` ALTER TABLE database_instance - ADD COLUMN semi_sync_master_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_master_wait_for_slave_count + ADD COLUMN semi_sync_primary_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_wait_for_replica_count `, ` ALTER TABLE database_instance - ADD COLUMN semi_sync_replica_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_master_status + ADD COLUMN semi_sync_replica_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status `, ` ALTER TABLE database_instance - ADD COLUMN semi_sync_master_clients INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_master_status + ADD COLUMN semi_sync_primary_clients INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status `, ` ALTER TABLE @@ -579,7 +579,7 @@ var generateSQLPatches = []string{ ` ALTER TABLE /* sqlite3-skip */ database_instance - MODIFY semi_sync_master_timeout BIGINT UNSIGNED NOT NULL DEFAULT 0 + MODIFY semi_sync_primary_timeout BIGINT UNSIGNED NOT NULL DEFAULT 0 `, // Fields related to Replication Group the instance belongs to ` diff --git a/go/vt/orchestrator/external/golib/sqlutils/sqlite_dialect_test.go b/go/vt/orchestrator/external/golib/sqlutils/sqlite_dialect_test.go index fa11fd5e2fa..8266e77f953 100644 --- a/go/vt/orchestrator/external/golib/sqlutils/sqlite_dialect_test.go +++ b/go/vt/orchestrator/external/golib/sqlutils/sqlite_dialect_test.go @@ -84,7 +84,7 @@ func TestToSqlite3AlterTable(t *testing.T) { statement := ` ALTER TABLE database_instance - ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER slave_lag_seconds + ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER replica_lag_seconds ` result := stripSpaces(ToSqlite3Dialect(statement)) test.S(t).ExpectEquals(result, stripSpaces(` @@ -97,13 +97,13 @@ func TestToSqlite3AlterTable(t *testing.T) { statement := ` ALTER TABLE database_instance - ADD INDEX master_host_port_idx (master_host, master_port) + ADD INDEX source_host_port_idx (source_host, source_port) ` result := stripSpaces(ToSqlite3Dialect(statement)) test.S(t).ExpectEquals(result, stripSpaces(` create index - master_host_port_idx_database_instance - on database_instance (master_host, master_port) + source_host_port_idx_database_instance + on database_instance (source_host, source_port) `)) } { @@ -126,14 +126,14 @@ func TestCreateIndex(t *testing.T) { { statement := ` create index - master_host_port_idx_database_instance - on database_instance (master_host(128), master_port) + source_host_port_idx_database_instance + on database_instance (source_host(128), source_port) ` result := stripSpaces(ToSqlite3Dialect(statement)) test.S(t).ExpectEquals(result, stripSpaces(` create index - master_host_port_idx_database_instance - on database_instance (master_host, master_port) + source_host_port_idx_database_instance + on database_instance (source_host, source_port) `)) } } @@ -205,14 +205,14 @@ func TestToSqlite3GeneralConversions(t *testing.T) { test.S(t).ExpectEquals(result, "select datetime(some_table.some_column, printf('+%d minute', ?))") } { - statement := "AND master_instance.last_attempted_check <= master_instance.last_seen + interval ? minute" + statement := "AND primary_instance.last_attempted_check <= primary_instance.last_seen + interval ? minute" result := ToSqlite3Dialect(statement) - test.S(t).ExpectEquals(result, "AND master_instance.last_attempted_check <= datetime(master_instance.last_seen, printf('+%d minute', ?))") + test.S(t).ExpectEquals(result, "AND primary_instance.last_attempted_check <= datetime(primary_instance.last_seen, printf('+%d minute', ?))") } { - statement := "select concat(master_instance.port, '') as port" + statement := "select concat(primary_instance.port, '') as port" result := ToSqlite3Dialect(statement) - test.S(t).ExpectEquals(result, "select (master_instance.port || '') as port") + test.S(t).ExpectEquals(result, "select (primary_instance.port || '') as port") } { statement := "select concat( 'abc' , 'def') as s" diff --git a/go/vt/orchestrator/inst/analysis_dao.go b/go/vt/orchestrator/inst/analysis_dao.go index b9c3694c782..cc05aa219d1 100644 --- a/go/vt/orchestrator/inst/analysis_dao.go +++ b/go/vt/orchestrator/inst/analysis_dao.go @@ -72,22 +72,22 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) vitess_tablet.hostname, vitess_tablet.port, vitess_tablet.tablet_type, - vitess_tablet.master_timestamp, - master_instance.read_only AS read_only, - MIN(master_instance.data_center) AS data_center, - MIN(master_instance.region) AS region, - MIN(master_instance.physical_environment) AS physical_environment, - MIN(master_instance.master_host) AS master_host, - MIN(master_instance.master_port) AS master_port, - MIN(master_instance.cluster_name) AS cluster_name, - MIN(master_instance.binary_log_file) AS binary_log_file, - MIN(master_instance.binary_log_pos) AS binary_log_pos, - MIN(master_instance.suggested_cluster_alias) AS suggested_cluster_alias, - MIN(master_tablet.info) AS master_tablet_info, + vitess_tablet.primary_timestamp, + primary_instance.read_only AS read_only, + MIN(primary_instance.data_center) AS data_center, + MIN(primary_instance.region) AS region, + MIN(primary_instance.physical_environment) AS physical_environment, + MIN(primary_instance.source_host) AS source_host, + MIN(primary_instance.source_port) AS source_port, + MIN(primary_instance.cluster_name) AS cluster_name, + MIN(primary_instance.binary_log_file) AS binary_log_file, + MIN(primary_instance.binary_log_pos) AS binary_log_pos, + MIN(primary_instance.suggested_cluster_alias) AS suggested_cluster_alias, + MIN(primary_tablet.info) AS primary_tablet_info, MIN( IFNULL( - master_instance.binary_log_file = database_instance_stale_binlog_coordinates.binary_log_file - AND master_instance.binary_log_pos = database_instance_stale_binlog_coordinates.binary_log_pos + primary_instance.binary_log_file = database_instance_stale_binlog_coordinates.binary_log_file + AND primary_instance.binary_log_pos = database_instance_stale_binlog_coordinates.binary_log_pos AND database_instance_stale_binlog_coordinates.first_seen < NOW() - interval ? second, 0 ) @@ -95,42 +95,42 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) MIN( IFNULL( cluster_alias.alias, - master_instance.cluster_name + primary_instance.cluster_name ) ) AS cluster_alias, MIN( IFNULL( cluster_domain_name.domain_name, - master_instance.cluster_name + primary_instance.cluster_name ) ) AS cluster_domain, MIN( - master_instance.last_checked <= master_instance.last_seen - and master_instance.last_attempted_check <= master_instance.last_seen + interval ? second + primary_instance.last_checked <= primary_instance.last_seen + and primary_instance.last_attempted_check <= primary_instance.last_seen + interval ? second ) = 1 AS is_last_check_valid, /* To be considered a primary, traditional async replication must not be present/valid AND the host should either */ /* not be a replication group member OR be the primary of the replication group */ - MIN(master_instance.last_check_partial_success) as last_check_partial_success, + MIN(primary_instance.last_check_partial_success) as last_check_partial_success, MIN( ( - master_instance.master_host IN ('', '_') - OR master_instance.master_port = 0 - OR substr(master_instance.master_host, 1, 2) = '//' + primary_instance.source_host IN ('', '_') + OR primary_instance.source_port = 0 + OR substr(primary_instance.source_host, 1, 2) = '//' ) AND ( - master_instance.replication_group_name = '' - OR master_instance.replication_group_member_role = 'PRIMARY' + primary_instance.replication_group_name = '' + OR primary_instance.replication_group_member_role = 'PRIMARY' ) - ) AS is_master, - MIN(master_instance.is_co_master) AS is_co_master, + ) AS is_primary, + MIN(primary_instance.is_co_primary) AS is_co_primary, MIN( CONCAT( - master_instance.hostname, + primary_instance.hostname, ':', - master_instance.port - ) = master_instance.cluster_name - ) AS is_cluster_master, - MIN(master_instance.gtid_mode) AS gtid_mode, + primary_instance.port + ) = primary_instance.cluster_name + ) AS is_cluster_primary, + MIN(primary_instance.gtid_mode) AS gtid_mode, COUNT(replica_instance.server_id) AS count_replicas, IFNULL( SUM( @@ -141,72 +141,72 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) IFNULL( SUM( replica_instance.last_checked <= replica_instance.last_seen - AND replica_instance.slave_io_running != 0 - AND replica_instance.slave_sql_running != 0 + AND replica_instance.replica_io_running != 0 + AND replica_instance.replica_sql_running != 0 ), 0 ) AS count_valid_replicating_replicas, IFNULL( SUM( replica_instance.last_checked <= replica_instance.last_seen - AND replica_instance.slave_io_running = 0 + AND replica_instance.replica_io_running = 0 AND replica_instance.last_io_error like '%%error %%connecting to master%%' - AND replica_instance.slave_sql_running = 1 + AND replica_instance.replica_sql_running = 1 ), 0 - ) AS count_replicas_failing_to_connect_to_master, - MIN(master_instance.replication_depth) AS replication_depth, + ) AS count_replicas_failing_to_connect_to_primary, + MIN(primary_instance.replication_depth) AS replication_depth, GROUP_CONCAT( concat( replica_instance.Hostname, ':', replica_instance.Port ) - ) as slave_hosts, + ) as replica_hosts, MIN( - master_instance.slave_sql_running = 1 - AND master_instance.slave_io_running = 0 - AND master_instance.last_io_error like '%%error %%connecting to master%%' - ) AS is_failing_to_connect_to_master, + primary_instance.replica_sql_running = 1 + AND primary_instance.replica_io_running = 0 + AND primary_instance.last_io_error like '%%error %%connecting to master%%' + ) AS is_failing_to_connect_to_primary, MIN( - master_instance.slave_sql_running = 0 - AND master_instance.slave_io_running = 0 + primary_instance.replica_sql_running = 0 + AND primary_instance.replica_io_running = 0 ) AS replication_stopped, MIN( - master_downtime.downtime_active is not null - and ifnull(master_downtime.end_timestamp, now()) > now() + primary_downtime.downtime_active is not null + and ifnull(primary_downtime.end_timestamp, now()) > now() ) AS is_downtimed, MIN( - IFNULL(master_downtime.end_timestamp, '') + IFNULL(primary_downtime.end_timestamp, '') ) AS downtime_end_timestamp, MIN( IFNULL( - unix_timestamp() - unix_timestamp(master_downtime.end_timestamp), + unix_timestamp() - unix_timestamp(primary_downtime.end_timestamp), 0 ) ) AS downtime_remaining_seconds, MIN( - master_instance.binlog_server + primary_instance.binlog_server ) AS is_binlog_server, MIN( - master_instance.supports_oracle_gtid + primary_instance.supports_oracle_gtid ) AS supports_oracle_gtid, MIN( - master_instance.semi_sync_master_enabled - ) AS semi_sync_master_enabled, + primary_instance.semi_sync_primary_enabled + ) AS semi_sync_primary_enabled, MIN( - master_instance.semi_sync_master_wait_for_slave_count - ) AS semi_sync_master_wait_for_slave_count, + primary_instance.semi_sync_primary_wait_for_replica_count + ) AS semi_sync_primary_wait_for_replica_count, MIN( - master_instance.semi_sync_master_clients - ) AS semi_sync_master_clients, + primary_instance.semi_sync_primary_clients + ) AS semi_sync_primary_clients, MIN( - master_instance.semi_sync_master_status - ) AS semi_sync_master_status, + primary_instance.semi_sync_primary_status + ) AS semi_sync_primary_status, MIN( - master_instance.semi_sync_replica_enabled + primary_instance.semi_sync_replica_enabled ) AS semi_sync_replica_enabled, - SUM(replica_instance.is_co_master) AS count_co_master_replicas, + SUM(replica_instance.is_co_primary) AS count_co_primary_replicas, SUM(replica_instance.oracle_gtid) AS count_oracle_gtid_replicas, IFNULL( SUM( @@ -236,7 +236,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) 0 ) AS count_valid_semi_sync_replicas, MIN( - master_instance.mariadb_gtid + primary_instance.mariadb_gtid ) AS is_mariadb_gtid, SUM(replica_instance.mariadb_gtid) AS count_mariadb_gtid_replicas, IFNULL( @@ -249,14 +249,14 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) IFNULL( SUM( replica_instance.log_bin - AND replica_instance.log_slave_updates + AND replica_instance.log_replica_updates ), 0 ) AS count_logging_replicas, IFNULL( SUM( replica_instance.log_bin - AND replica_instance.log_slave_updates + AND replica_instance.log_replica_updates AND replica_instance.binlog_format = 'STATEMENT' ), 0 @@ -264,7 +264,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) IFNULL( SUM( replica_instance.log_bin - AND replica_instance.log_slave_updates + AND replica_instance.log_replica_updates AND replica_instance.binlog_format = 'MIXED' ), 0 @@ -272,7 +272,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) IFNULL( SUM( replica_instance.log_bin - AND replica_instance.log_slave_updates + AND replica_instance.log_replica_updates AND replica_instance.binlog_format = 'ROW' ), 0 @@ -282,7 +282,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) 0 ) AS count_delayed_replicas, IFNULL( - SUM(replica_instance.slave_lag_seconds > ?), + SUM(replica_instance.replica_lag_seconds > ?), 0 ) AS count_lagging_replicas, IFNULL(MIN(replica_instance.gtid_mode), '') AS min_replica_gtid_mode, @@ -303,41 +303,41 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) ) AS count_downtimed_replicas, COUNT( DISTINCT case when replica_instance.log_bin - AND replica_instance.log_slave_updates then replica_instance.major_version else NULL end + AND replica_instance.log_replica_updates then replica_instance.major_version else NULL end ) AS count_distinct_logging_major_versions FROM vitess_tablet - LEFT JOIN database_instance master_instance ON ( - vitess_tablet.hostname = master_instance.hostname - AND vitess_tablet.port = master_instance.port + LEFT JOIN database_instance primary_instance ON ( + vitess_tablet.hostname = primary_instance.hostname + AND vitess_tablet.port = primary_instance.port ) - LEFT JOIN vitess_tablet master_tablet ON ( - master_tablet.hostname = master_instance.master_host - AND master_tablet.port = master_instance.master_port + LEFT JOIN vitess_tablet primary_tablet ON ( + primary_tablet.hostname = primary_instance.source_host + AND primary_tablet.port = primary_instance.source_port ) LEFT JOIN hostname_resolve ON ( - master_instance.hostname = hostname_resolve.hostname + primary_instance.hostname = hostname_resolve.hostname ) LEFT JOIN database_instance replica_instance ON ( COALESCE( hostname_resolve.resolved_hostname, - master_instance.hostname - ) = replica_instance.master_host - AND master_instance.port = replica_instance.master_port + primary_instance.hostname + ) = replica_instance.source_host + AND primary_instance.port = replica_instance.source_port ) LEFT JOIN database_instance_maintenance ON ( - master_instance.hostname = database_instance_maintenance.hostname - AND master_instance.port = database_instance_maintenance.port + primary_instance.hostname = database_instance_maintenance.hostname + AND primary_instance.port = database_instance_maintenance.port AND database_instance_maintenance.maintenance_active = 1 ) LEFT JOIN database_instance_stale_binlog_coordinates ON ( - master_instance.hostname = database_instance_stale_binlog_coordinates.hostname - AND master_instance.port = database_instance_stale_binlog_coordinates.port + primary_instance.hostname = database_instance_stale_binlog_coordinates.hostname + AND primary_instance.port = database_instance_stale_binlog_coordinates.port ) - LEFT JOIN database_instance_downtime as master_downtime ON ( - master_instance.hostname = master_downtime.hostname - AND master_instance.port = master_downtime.port - AND master_downtime.downtime_active = 1 + LEFT JOIN database_instance_downtime as primary_downtime ON ( + primary_instance.hostname = primary_downtime.hostname + AND primary_instance.port = primary_downtime.port + AND primary_downtime.downtime_active = 1 ) LEFT JOIN database_instance_downtime as replica_downtime ON ( replica_instance.hostname = replica_downtime.hostname @@ -345,20 +345,20 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) AND replica_downtime.downtime_active = 1 ) LEFT JOIN cluster_alias ON ( - cluster_alias.cluster_name = master_instance.cluster_name + cluster_alias.cluster_name = primary_instance.cluster_name ) LEFT JOIN cluster_domain_name ON ( - cluster_domain_name.cluster_name = master_instance.cluster_name + cluster_domain_name.cluster_name = primary_instance.cluster_name ) WHERE database_instance_maintenance.database_instance_maintenance_id IS NULL - AND ? IN ('', master_instance.cluster_name) + AND ? IN ('', primary_instance.cluster_name) GROUP BY vitess_tablet.hostname, vitess_tablet.port ORDER BY vitess_tablet.tablet_type ASC, - vitess_tablet.master_timestamp DESC + vitess_tablet.primary_timestamp DESC ` clusters := make(map[string]*clusterAnalysis) @@ -376,7 +376,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) } primaryTablet := &topodatapb.Tablet{} - if str := m.GetString("master_tablet_info"); str != "" { + if str := m.GetString("primary_tablet_info"); str != "" { if err := prototext.Unmarshal([]byte(str), primaryTablet); err != nil { log.Errorf("could not read tablet %v: %v", str, err) return nil @@ -384,13 +384,13 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) } a.TabletType = tablet.Type - a.PrimaryTimeStamp = m.GetTime("master_timestamp") + a.PrimaryTimeStamp = m.GetTime("primary_timestamp") - a.IsPrimary = m.GetBool("is_master") - countCoPrimaryReplicas := m.GetUint("count_co_master_replicas") - a.IsCoPrimary = m.GetBool("is_co_master") || (countCoPrimaryReplicas > 0) + a.IsPrimary = m.GetBool("is_primary") + countCoPrimaryReplicas := m.GetUint("count_co_primary_replicas") + a.IsCoPrimary = m.GetBool("is_co_primary") || (countCoPrimaryReplicas > 0) a.AnalyzedInstanceKey = InstanceKey{Hostname: m.GetString("hostname"), Port: m.GetInt("port")} - a.AnalyzedInstancePrimaryKey = InstanceKey{Hostname: m.GetString("master_host"), Port: m.GetInt("master_port")} + a.AnalyzedInstancePrimaryKey = InstanceKey{Hostname: m.GetString("source_host"), Port: m.GetInt("source_port")} a.AnalyzedInstanceDataCenter = m.GetString("data_center") a.AnalyzedInstanceRegion = m.GetString("region") a.AnalyzedInstancePhysicalEnvironment = m.GetString("physical_environment") @@ -410,10 +410,10 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.CountReplicas = m.GetUint("count_replicas") a.CountValidReplicas = m.GetUint("count_valid_replicas") a.CountValidReplicatingReplicas = m.GetUint("count_valid_replicating_replicas") - a.CountReplicasFailingToConnectToPrimary = m.GetUint("count_replicas_failing_to_connect_to_master") + a.CountReplicasFailingToConnectToPrimary = m.GetUint("count_replicas_failing_to_connect_to_primary") a.CountDowntimedReplicas = m.GetUint("count_downtimed_replicas") a.ReplicationDepth = m.GetUint("replication_depth") - a.IsFailingToConnectToPrimary = m.GetBool("is_failing_to_connect_to_master") + a.IsFailingToConnectToPrimary = m.GetBool("is_failing_to_connect_to_primary") a.ReplicationStopped = m.GetBool("replication_stopped") a.IsDowntimed = m.GetBool("is_downtimed") a.DowntimeEndTimestamp = m.GetString("downtime_end_timestamp") @@ -422,7 +422,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.ClusterDetails.ReadRecoveryInfo() a.Replicas = *NewInstanceKeyMap() - a.Replicas.ReadCommaDelimitedList(m.GetString("slave_hosts")) + a.Replicas.ReadCommaDelimitedList(m.GetString("replica_hosts")) countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 @@ -430,13 +430,13 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.MariaDBGTIDImmediateTopology = countValidMariaDBGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 countValidBinlogServerReplicas := m.GetUint("count_valid_binlog_server_replicas") a.BinlogServerImmediateTopology = countValidBinlogServerReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 - a.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_master_enabled") - a.SemiSyncPrimaryStatus = m.GetBool("semi_sync_master_status") + a.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_primary_enabled") + a.SemiSyncPrimaryStatus = m.GetBool("semi_sync_primary_status") a.SemiSyncReplicaEnabled = m.GetBool("semi_sync_replica_enabled") a.CountSemiSyncReplicasEnabled = m.GetUint("count_semi_sync_replicas") // countValidSemiSyncReplicasEnabled := m.GetUint("count_valid_semi_sync_replicas") - a.SemiSyncPrimaryWaitForReplicaCount = m.GetUint("semi_sync_master_wait_for_slave_count") - a.SemiSyncPrimaryClients = m.GetUint("semi_sync_master_clients") + a.SemiSyncPrimaryWaitForReplicaCount = m.GetUint("semi_sync_primary_wait_for_replica_count") + a.SemiSyncPrimaryClients = m.GetUint("semi_sync_primary_clients") a.MinReplicaGTIDMode = m.GetString("min_replica_gtid_mode") a.MaxReplicaGTIDMode = m.GetString("max_replica_gtid_mode") diff --git a/go/vt/orchestrator/inst/cluster_alias_dao.go b/go/vt/orchestrator/inst/cluster_alias_dao.go index 6043b756e7f..fd58c52032f 100644 --- a/go/vt/orchestrator/inst/cluster_alias_dao.go +++ b/go/vt/orchestrator/inst/cluster_alias_dao.go @@ -134,7 +134,7 @@ func UpdateClusterAliases() error { order by ifnull(last_checked <= last_seen, 0) asc, read_only desc, - num_slave_hosts asc + num_replica_hosts asc `, DowntimeLostInRecoveryMessage) return log.Errore(err) } diff --git a/go/vt/orchestrator/inst/instance.go b/go/vt/orchestrator/inst/instance.go index 29f8f6e9617..1f6e7dd453a 100644 --- a/go/vt/orchestrator/inst/instance.go +++ b/go/vt/orchestrator/inst/instance.go @@ -567,7 +567,7 @@ func (this *Instance) descriptionTokens() (tokens []string) { extraTokens = append(extraTokens, token) } if this.SemiSyncPrimaryStatus { - extraTokens = append(extraTokens, "semi:master") + extraTokens = append(extraTokens, "semi:primary") } if this.SemiSyncReplicaStatus { extraTokens = append(extraTokens, "semi:replica") diff --git a/go/vt/orchestrator/inst/instance_dao.go b/go/vt/orchestrator/inst/instance_dao.go index e67f42e11c5..8f0de503743 100644 --- a/go/vt/orchestrator/inst/instance_dao.go +++ b/go/vt/orchestrator/inst/instance_dao.go @@ -825,8 +825,8 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { cluster_name, suggested_cluster_alias, replication_depth, - master_host, - master_port, + source_host, + source_port, ancestry_uuid, executed_gtid_set from database_instance @@ -844,8 +844,8 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { err = db.QueryOrchestrator(query, args, func(m sqlutils.RowMap) error { primaryOrGroupPrimaryClusterName = m.GetString("cluster_name") primaryOrGroupPrimaryReplicationDepth = m.GetUint("replication_depth") - primaryOrGroupPrimaryInstanceKey.Hostname = m.GetString("master_host") - primaryOrGroupPrimaryInstanceKey.Port = m.GetInt("master_port") + primaryOrGroupPrimaryInstanceKey.Hostname = m.GetString("source_host") + primaryOrGroupPrimaryInstanceKey.Port = m.GetInt("source_port") ancestryUUID = m.GetString("ancestry_uuid") primaryOrGroupPrimaryExecutedGtidSet = m.GetString("executed_gtid_set") primaryOrGroupPrimaryDataFound = true @@ -964,18 +964,18 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.Binlog_format = m.GetString("binlog_format") instance.BinlogRowImage = m.GetString("binlog_row_image") instance.LogBinEnabled = m.GetBool("log_bin") - instance.LogReplicationUpdatesEnabled = m.GetBool("log_slave_updates") - instance.SourceKey.Hostname = m.GetString("master_host") - instance.SourceKey.Port = m.GetInt("master_port") + instance.LogReplicationUpdatesEnabled = m.GetBool("log_replica_updates") + instance.SourceKey.Hostname = m.GetString("source_host") + instance.SourceKey.Port = m.GetInt("source_port") instance.IsDetachedPrimary = instance.SourceKey.IsDetached() - instance.ReplicationSQLThreadRuning = m.GetBool("slave_sql_running") - instance.ReplicationIOThreadRuning = m.GetBool("slave_io_running") + instance.ReplicationSQLThreadRuning = m.GetBool("replica_sql_running") + instance.ReplicationIOThreadRuning = m.GetBool("replica_io_running") instance.ReplicationSQLThreadState = ReplicationThreadState(m.GetInt("replication_sql_thread_state")) instance.ReplicationIOThreadState = ReplicationThreadState(m.GetInt("replication_io_thread_state")) instance.HasReplicationFilters = m.GetBool("has_replication_filters") instance.SupportsOracleGTID = m.GetBool("supports_oracle_gtid") instance.UsingOracleGTID = m.GetBool("oracle_gtid") - instance.SourceUUID = m.GetString("master_uuid") + instance.SourceUUID = m.GetString("source_uuid") instance.AncestryUUID = m.GetString("ancestry_uuid") instance.ExecutedGtidSet = m.GetString("executed_gtid_set") instance.GTIDMode = m.GetString("gtid_mode") @@ -984,20 +984,20 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.UsingMariaDBGTID = m.GetBool("mariadb_gtid") instance.SelfBinlogCoordinates.LogFile = m.GetString("binary_log_file") instance.SelfBinlogCoordinates.LogPos = m.GetInt64("binary_log_pos") - instance.ReadBinlogCoordinates.LogFile = m.GetString("master_log_file") - instance.ReadBinlogCoordinates.LogPos = m.GetInt64("read_master_log_pos") - instance.ExecBinlogCoordinates.LogFile = m.GetString("relay_master_log_file") - instance.ExecBinlogCoordinates.LogPos = m.GetInt64("exec_master_log_pos") + instance.ReadBinlogCoordinates.LogFile = m.GetString("source_log_file") + instance.ReadBinlogCoordinates.LogPos = m.GetInt64("read_source_log_pos") + instance.ExecBinlogCoordinates.LogFile = m.GetString("relay_source_log_file") + instance.ExecBinlogCoordinates.LogPos = m.GetInt64("exec_source_log_pos") instance.IsDetached, _ = instance.ExecBinlogCoordinates.ExtractDetachedCoordinates() instance.RelaylogCoordinates.LogFile = m.GetString("relay_log_file") instance.RelaylogCoordinates.LogPos = m.GetInt64("relay_log_pos") instance.RelaylogCoordinates.Type = RelayLog instance.LastSQLError = m.GetString("last_sql_error") instance.LastIOError = m.GetString("last_io_error") - instance.SecondsBehindPrimary = m.GetNullInt64("seconds_behind_master") - instance.ReplicationLagSeconds = m.GetNullInt64("slave_lag_seconds") + instance.SecondsBehindPrimary = m.GetNullInt64("replication_lag_seconds") + instance.ReplicationLagSeconds = m.GetNullInt64("replica_lag_seconds") instance.SQLDelay = m.GetUint("sql_delay") - replicasJSON := m.GetString("slave_hosts") + replicasJSON := m.GetString("replica_hosts") instance.ClusterName = m.GetString("cluster_name") instance.SuggestedClusterAlias = m.GetString("suggested_cluster_alias") instance.DataCenter = m.GetString("data_center") @@ -1005,15 +1005,15 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.PhysicalEnvironment = m.GetString("physical_environment") instance.SemiSyncEnforced = m.GetBool("semi_sync_enforced") instance.SemiSyncAvailable = m.GetBool("semi_sync_available") - instance.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_master_enabled") - instance.SemiSyncPrimaryTimeout = m.GetUint64("semi_sync_master_timeout") - instance.SemiSyncPrimaryWaitForReplicaCount = m.GetUint("semi_sync_master_wait_for_slave_count") + instance.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_primary_enabled") + instance.SemiSyncPrimaryTimeout = m.GetUint64("semi_sync_primary_timeout") + instance.SemiSyncPrimaryWaitForReplicaCount = m.GetUint("semi_sync_primary_wait_for_replica_count") instance.SemiSyncReplicaEnabled = m.GetBool("semi_sync_replica_enabled") - instance.SemiSyncPrimaryStatus = m.GetBool("semi_sync_master_status") - instance.SemiSyncPrimaryClients = m.GetUint("semi_sync_master_clients") + instance.SemiSyncPrimaryStatus = m.GetBool("semi_sync_primary_status") + instance.SemiSyncPrimaryClients = m.GetUint("semi_sync_primary_clients") instance.SemiSyncReplicaStatus = m.GetBool("semi_sync_replica_status") instance.ReplicationDepth = m.GetUint("replication_depth") - instance.IsCoPrimary = m.GetBool("is_co_master") + instance.IsCoPrimary = m.GetBool("is_co_primary") instance.ReplicationCredentialsAvailable = m.GetBool("replication_credentials_available") instance.HasReplicationCredentials = m.GetBool("has_replication_credentials") instance.IsUpToDate = (m.GetUint("seconds_since_last_checked") <= config.Config.InstancePollSeconds) @@ -1161,7 +1161,7 @@ func ReadClusterWriteablePrimary(clusterName string) ([](*Instance), error) { condition := ` cluster_name = ? and read_only = 0 - and (replication_depth = 0 or is_co_master) + and (replication_depth = 0 or is_co_primary) ` return readInstancesByCondition(condition, sqlutils.Args(clusterName), "replication_depth asc") } @@ -1172,7 +1172,7 @@ func ReadClusterWriteablePrimary(clusterName string) ([](*Instance), error) { func ReadClusterPrimary(clusterName string) ([](*Instance), error) { condition := ` cluster_name = ? - and (replication_depth = 0 or is_co_master) + and (replication_depth = 0 or is_co_primary) ` return readInstancesByCondition(condition, sqlutils.Args(clusterName), "read_only asc, replication_depth asc") } @@ -1182,7 +1182,7 @@ func ReadClusterPrimary(clusterName string) ([](*Instance), error) { func ReadWriteableClustersPrimaries() (instances [](*Instance), err error) { condition := ` read_only = 0 - and (replication_depth = 0 or is_co_master) + and (replication_depth = 0 or is_co_primary) ` allPrimaries, err := readInstancesByCondition(condition, sqlutils.Args(), "cluster_name asc, replication_depth asc") if err != nil { @@ -1207,8 +1207,8 @@ func ReadClusterAliasInstances(clusterAlias string) ([](*Instance), error) { // ReadReplicaInstances reads replicas of a given primary func ReadReplicaInstances(primaryKey *InstanceKey) ([](*Instance), error) { condition := ` - master_host = ? - and master_port = ? + source_host = ? + and source_port = ? ` return readInstancesByCondition(condition, sqlutils.Args(primaryKey.Hostname, primaryKey.Port), "") } @@ -1236,8 +1236,8 @@ func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryKey *InstanceKe // ReadBinlogServerReplicaInstances reads direct replicas of a given primary that are binlog servers func ReadBinlogServerReplicaInstances(primaryKey *InstanceKey) ([](*Instance), error) { condition := ` - master_host = ? - and master_port = ? + source_host = ? + and source_port = ? and binlog_server = 1 ` return readInstancesByCondition(condition, sqlutils.Args(primaryKey.Hostname, primaryKey.Port), "") @@ -1258,8 +1258,8 @@ func ReadProblemInstances(clusterName string) ([](*Instance), error) { or (unix_timestamp() - unix_timestamp(last_checked) > ?) or (replication_sql_thread_state not in (-1 ,1)) or (replication_io_thread_state not in (-1 ,1)) - or (abs(cast(seconds_behind_master as signed) - cast(sql_delay as signed)) > ?) - or (abs(cast(slave_lag_seconds as signed) - cast(sql_delay as signed)) > ?) + or (abs(cast(replication_lag_seconds as signed) - cast(sql_delay as signed)) > ?) + or (abs(cast(replica_lag_seconds as signed) - cast(sql_delay as signed)) > ?) or (gtid_errant != '') or (replication_group_name != '' and replication_group_member_state != 'ONLINE') ) @@ -1300,7 +1300,7 @@ func SearchInstances(searchString string) ([](*Instance), error) { or concat(port, '') = ? ` args := sqlutils.Args(searchString, searchString, searchString, searchString, searchString, searchString, searchString, searchString) - return readInstancesByCondition(condition, args, `replication_depth asc, num_slave_hosts desc, cluster_name, hostname, port`) + return readInstancesByCondition(condition, args, `replication_depth asc, num_replica_hosts desc, cluster_name, hostname, port`) } // FindInstances reads all instances whose name matches given pattern @@ -1311,7 +1311,7 @@ func FindInstances(regexpPattern string) (result [](*Instance), err error) { return result, err } condition := `1=1` - unfiltered, err := readInstancesByCondition(condition, sqlutils.Args(), `replication_depth asc, num_slave_hosts desc, cluster_name, hostname, port`) + unfiltered, err := readInstancesByCondition(condition, sqlutils.Args(), `replication_depth asc, num_replica_hosts desc, cluster_name, hostname, port`) if err != nil { return unfiltered, err } @@ -1330,7 +1330,7 @@ func findFuzzyInstances(fuzzyInstanceKey *InstanceKey) ([](*Instance), error) { hostname like concat('%%', ?, '%%') and port = ? ` - return readInstancesByCondition(condition, sqlutils.Args(fuzzyInstanceKey.Hostname, fuzzyInstanceKey.Port), `replication_depth asc, num_slave_hosts desc, cluster_name, hostname, port`) + return readInstancesByCondition(condition, sqlutils.Args(fuzzyInstanceKey.Hostname, fuzzyInstanceKey.Port), `replication_depth asc, num_replica_hosts desc, cluster_name, hostname, port`) } // ReadFuzzyInstanceKey accepts a fuzzy instance key and expects to return a single, fully qualified, @@ -1464,7 +1464,7 @@ func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { // Pick up to two busiest IMs condition := ` replication_depth = 1 - and num_slave_hosts > 0 + and num_replica_hosts > 0 and cluster_name = ? ` intermediatePrimaries, err = readInstancesByCondition(condition, sqlutils.Args(clusterName), "") @@ -1524,7 +1524,7 @@ func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { // Get 2 1st tier leaf replicas, if possible condition := ` replication_depth = 1 - and num_slave_hosts = 0 + and num_replica_hosts = 0 and cluster_name = ? ` replicas, err := readInstancesByCondition(condition, sqlutils.Args(clusterName), "") @@ -1548,7 +1548,7 @@ func GetClusterGhostReplicas(clusterName string) (result [](*Instance), err erro and binlog_format = 'ROW' and cluster_name = ? ` - instances, err := readInstancesByCondition(condition, sqlutils.Args(clusterName), "num_slave_hosts asc") + instances, err := readInstancesByCondition(condition, sqlutils.Args(clusterName), "num_replica_hosts asc") if err != nil { return result, err } @@ -1730,23 +1730,23 @@ func readUnseenPrimaryKeys() ([]InstanceKey, error) { err := db.QueryOrchestratorRowsMap(` SELECT DISTINCT - slave_instance.master_host, slave_instance.master_port + replica_instance.source_host, replica_instance.source_port FROM - database_instance slave_instance + database_instance replica_instance LEFT JOIN - hostname_resolve ON (slave_instance.master_host = hostname_resolve.hostname) + hostname_resolve ON (replica_instance.source_host = hostname_resolve.hostname) LEFT JOIN - database_instance master_instance ON ( - COALESCE(hostname_resolve.resolved_hostname, slave_instance.master_host) = master_instance.hostname - and slave_instance.master_port = master_instance.port) + database_instance primary_instance ON ( + COALESCE(hostname_resolve.resolved_hostname, replica_instance.source_host) = primary_instance.hostname + and replica_instance.source_port = primary_instance.port) WHERE - master_instance.last_checked IS NULL - and slave_instance.master_host != '' - and slave_instance.master_host != '_' - and slave_instance.master_port > 0 - and slave_instance.slave_io_running = 1 + primary_instance.last_checked IS NULL + and replica_instance.source_host != '' + and replica_instance.source_host != '_' + and replica_instance.source_port > 0 + and replica_instance.replica_io_running = 1 `, func(m sqlutils.RowMap) error { - instanceKey, _ := NewResolveInstanceKey(m.GetString("master_host"), m.GetInt("master_port")) + instanceKey, _ := NewResolveInstanceKey(m.GetString("source_host"), m.GetInt("source_port")) // we ignore the error. It can be expected that we are unable to resolve the hostname. // Maybe that's how we got here in the first place! res = append(res, *instanceKey) @@ -1862,21 +1862,21 @@ func readUnknownPrimaryHostnameResolves() (map[string]string, error) { res := make(map[string]string) err := db.QueryOrchestratorRowsMap(` SELECT DISTINCT - slave_instance.master_host, hostname_resolve_history.resolved_hostname + replica_instance.source_host, hostname_resolve_history.resolved_hostname FROM - database_instance slave_instance - LEFT JOIN hostname_resolve ON (slave_instance.master_host = hostname_resolve.hostname) - LEFT JOIN database_instance master_instance ON ( - COALESCE(hostname_resolve.resolved_hostname, slave_instance.master_host) = master_instance.hostname - and slave_instance.master_port = master_instance.port - ) LEFT JOIN hostname_resolve_history ON (slave_instance.master_host = hostname_resolve_history.hostname) + database_instance replica_instance + LEFT JOIN hostname_resolve ON (replica_instance.source_host = hostname_resolve.hostname) + LEFT JOIN database_instance primary_instance ON ( + COALESCE(hostname_resolve.resolved_hostname, replica_instance.source_host) = primary_instance.hostname + and replica_instance.source_port = primary_instance.port + ) LEFT JOIN hostname_resolve_history ON (replica_instance.source_host = hostname_resolve_history.hostname) WHERE - master_instance.last_checked IS NULL - and slave_instance.master_host != '' - and slave_instance.master_host != '_' - and slave_instance.master_port > 0 + primary_instance.last_checked IS NULL + and replica_instance.source_host != '' + and replica_instance.source_host != '_' + and replica_instance.source_port > 0 `, func(m sqlutils.RowMap) error { - res[m.GetString("master_host")] = m.GetString("resolved_hostname") + res[m.GetString("source_host")] = m.GetString("resolved_hostname") return nil }) if err != nil { @@ -2138,7 +2138,7 @@ func ReadAllMinimalInstances() ([]MinimalInstance, error) { res := []MinimalInstance{} query := ` select - hostname, port, master_host, master_port, cluster_name + hostname, port, source_host, source_port, cluster_name from database_instance ` @@ -2149,8 +2149,8 @@ func ReadAllMinimalInstances() ([]MinimalInstance, error) { Port: m.GetInt("port"), } minimalInstance.PrimaryKey = InstanceKey{ - Hostname: m.GetString("master_host"), - Port: m.GetInt("master_port"), + Hostname: m.GetString("source_host"), + Port: m.GetInt("source_port"), } minimalInstance.ClusterName = m.GetString("cluster_name") @@ -2274,19 +2274,19 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "binlog_format", "binlog_row_image", "log_bin", - "log_slave_updates", + "log_replica_updates", "binary_log_file", "binary_log_pos", - "master_host", - "master_port", - "slave_sql_running", - "slave_io_running", + "source_host", + "source_port", + "replica_sql_running", + "replica_io_running", "replication_sql_thread_state", "replication_io_thread_state", "has_replication_filters", "supports_oracle_gtid", "oracle_gtid", - "master_uuid", + "source_uuid", "ancestry_uuid", "executed_gtid_set", "gtid_mode", @@ -2294,37 +2294,37 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "gtid_errant", "mariadb_gtid", "pseudo_gtid", - "master_log_file", - "read_master_log_pos", - "relay_master_log_file", - "exec_master_log_pos", + "source_log_file", + "read_source_log_pos", + "relay_source_log_file", + "exec_source_log_pos", "relay_log_file", "relay_log_pos", "last_sql_error", "last_io_error", - "seconds_behind_master", - "slave_lag_seconds", + "replication_lag_seconds", + "replica_lag_seconds", "sql_delay", - "num_slave_hosts", - "slave_hosts", + "num_replica_hosts", + "replica_hosts", "cluster_name", "suggested_cluster_alias", "data_center", "region", "physical_environment", "replication_depth", - "is_co_master", + "is_co_primary", "replication_credentials_available", "has_replication_credentials", "allow_tls", "semi_sync_enforced", "semi_sync_available", - "semi_sync_master_enabled", - "semi_sync_master_timeout", - "semi_sync_master_wait_for_slave_count", + "semi_sync_primary_enabled", + "semi_sync_primary_timeout", + "semi_sync_primary_wait_for_replica_count", "semi_sync_replica_enabled", - "semi_sync_master_status", - "semi_sync_master_clients", + "semi_sync_primary_status", + "semi_sync_primary_clients", "semi_sync_replica_status", "instance_alias", "last_discovery_latency", @@ -2697,10 +2697,10 @@ func SnapshotTopologies() error { _, err := db.ExecOrchestrator(` insert ignore into database_instance_topology_history (snapshot_unix_timestamp, - hostname, port, master_host, master_port, cluster_name, version) + hostname, port, source_host, source_port, cluster_name, version) select UNIX_TIMESTAMP(NOW()), - hostname, port, master_host, master_port, cluster_name, version + hostname, port, source_host, source_port, cluster_name, version from database_instance `, @@ -2734,8 +2734,8 @@ func ReadHistoryClusterInstances(clusterName string, historyTimestampPattern str instance.Key.Hostname = m.GetString("hostname") instance.Key.Port = m.GetInt("port") - instance.SourceKey.Hostname = m.GetString("master_host") - instance.SourceKey.Port = m.GetInt("master_port") + instance.SourceKey.Hostname = m.GetString("source_host") + instance.SourceKey.Port = m.GetInt("source_port") instance.ClusterName = m.GetString("cluster_name") instances = append(instances, instance) diff --git a/go/vt/orchestrator/inst/instance_dao_test.go b/go/vt/orchestrator/inst/instance_dao_test.go index b028054eac3..293cdb74795 100644 --- a/go/vt/orchestrator/inst/instance_dao_test.go +++ b/go/vt/orchestrator/inst/instance_dao_test.go @@ -58,14 +58,14 @@ func TestMkInsertOdkuSingle(t *testing.T) { s1 := `INSERT ignore INTO database_instance (hostname, port, last_checked, last_attempted_check, last_check_partial_success, uptime, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_slave_updates, binary_log_file, binary_log_pos, master_host, master_port, - slave_sql_running, slave_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, master_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, - master_log_file, read_master_log_pos, relay_master_log_file, exec_master_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, seconds_behind_master, slave_lag_seconds, sql_delay, num_slave_hosts, slave_hosts, cluster_name, suggested_cluster_alias, data_center, region, physical_environment, replication_depth, is_co_master, replication_credentials_available, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_available, semi_sync_master_enabled, semi_sync_master_timeout, semi_sync_master_wait_for_slave_count, semi_sync_replica_enabled, semi_sync_master_status, semi_sync_master_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, num_replica_hosts, replica_hosts, cluster_name, suggested_cluster_alias, data_center, region, physical_environment, replication_depth, is_co_primary, replication_credentials_available, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_available, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) VALUES (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE - hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), uptime=VALUES(uptime), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_slave_updates=VALUES(log_slave_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), master_host=VALUES(master_host), master_port=VALUES(master_port), slave_sql_running=VALUES(slave_sql_running), slave_io_running=VALUES(slave_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), master_uuid=VALUES(master_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), master_log_file=VALUES(master_log_file), read_master_log_pos=VALUES(read_master_log_pos), relay_master_log_file=VALUES(relay_master_log_file), exec_master_log_pos=VALUES(exec_master_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), seconds_behind_master=VALUES(seconds_behind_master), slave_lag_seconds=VALUES(slave_lag_seconds), sql_delay=VALUES(sql_delay), num_slave_hosts=VALUES(num_slave_hosts), slave_hosts=VALUES(slave_hosts), cluster_name=VALUES(cluster_name), suggested_cluster_alias=VALUES(suggested_cluster_alias), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_master=VALUES(is_co_master), replication_credentials_available=VALUES(replication_credentials_available), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), - semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_available=VALUES(semi_sync_available), semi_sync_master_enabled=VALUES(semi_sync_master_enabled), semi_sync_master_timeout=VALUES(semi_sync_master_timeout), semi_sync_master_wait_for_slave_count=VALUES(semi_sync_master_wait_for_slave_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_master_status=VALUES(semi_sync_master_status), semi_sync_master_clients=VALUES(semi_sync_master_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), uptime=VALUES(uptime), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), num_replica_hosts=VALUES(num_replica_hosts), replica_hosts=VALUES(replica_hosts), cluster_name=VALUES(cluster_name), suggested_cluster_alias=VALUES(suggested_cluster_alias), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), replication_credentials_available=VALUES(replication_credentials_available), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), + semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_available=VALUES(semi_sync_available), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen) ` a1 := `i710, 3306, 0, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, @@ -83,16 +83,16 @@ func TestMkInsertOdkuThree(t *testing.T) { // three instances s3 := `INSERT INTO database_instance - (hostname, port, last_checked, last_attempted_check, last_check_partial_success, uptime, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_slave_updates, binary_log_file, binary_log_pos, master_host, master_port, slave_sql_running, slave_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, master_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, master_log_file, read_master_log_pos, relay_master_log_file, exec_master_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, seconds_behind_master, slave_lag_seconds, sql_delay, num_slave_hosts, slave_hosts, cluster_name, suggested_cluster_alias, data_center, region, physical_environment, replication_depth, is_co_master, replication_credentials_available, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_available, semi_sync_master_enabled, semi_sync_master_timeout, semi_sync_master_wait_for_slave_count, - semi_sync_replica_enabled, semi_sync_master_status, semi_sync_master_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) + (hostname, port, last_checked, last_attempted_check, last_check_partial_success, uptime, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, num_replica_hosts, replica_hosts, cluster_name, suggested_cluster_alias, data_center, region, physical_environment, replication_depth, is_co_primary, replication_credentials_available, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_available, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, + semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) VALUES (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE - hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), uptime=VALUES(uptime), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_slave_updates=VALUES(log_slave_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), master_host=VALUES(master_host), master_port=VALUES(master_port), slave_sql_running=VALUES(slave_sql_running), slave_io_running=VALUES(slave_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), master_uuid=VALUES(master_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), master_log_file=VALUES(master_log_file), read_master_log_pos=VALUES(read_master_log_pos), relay_master_log_file=VALUES(relay_master_log_file), exec_master_log_pos=VALUES(exec_master_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), seconds_behind_master=VALUES(seconds_behind_master), slave_lag_seconds=VALUES(slave_lag_seconds), sql_delay=VALUES(sql_delay), num_slave_hosts=VALUES(num_slave_hosts), slave_hosts=VALUES(slave_hosts), cluster_name=VALUES(cluster_name), suggested_cluster_alias=VALUES(suggested_cluster_alias), data_center=VALUES(data_center), region=VALUES(region), - physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_master=VALUES(is_co_master), replication_credentials_available=VALUES(replication_credentials_available), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_available=VALUES(semi_sync_available), - semi_sync_master_enabled=VALUES(semi_sync_master_enabled), semi_sync_master_timeout=VALUES(semi_sync_master_timeout), semi_sync_master_wait_for_slave_count=VALUES(semi_sync_master_wait_for_slave_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_master_status=VALUES(semi_sync_master_status), semi_sync_master_clients=VALUES(semi_sync_master_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), uptime=VALUES(uptime), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), num_replica_hosts=VALUES(num_replica_hosts), replica_hosts=VALUES(replica_hosts), cluster_name=VALUES(cluster_name), suggested_cluster_alias=VALUES(suggested_cluster_alias), data_center=VALUES(data_center), region=VALUES(region), + physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), replication_credentials_available=VALUES(replication_credentials_available), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_available=VALUES(semi_sync_available), + semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen) ` a3 := ` diff --git a/go/vt/orchestrator/inst/instance_topology_dao.go b/go/vt/orchestrator/inst/instance_topology_dao.go index aa1e466cf94..e24a9cf2bf0 100644 --- a/go/vt/orchestrator/inst/instance_topology_dao.go +++ b/go/vt/orchestrator/inst/instance_topology_dao.go @@ -765,7 +765,7 @@ func ResetPrimary(instanceKey *InstanceKey) (*Instance, error) { if err != nil { return instance, log.Errore(err) } - log.Infof("Reset master %+v", instanceKey) + log.Infof("Reset primary %+v", instanceKey) instance, err = ReadTopologyInstance(instanceKey) return instance, err diff --git a/go/vt/orchestrator/inst/tablet_dao.go b/go/vt/orchestrator/inst/tablet_dao.go index ee3c5d7e23b..448d6f0b52b 100644 --- a/go/vt/orchestrator/inst/tablet_dao.go +++ b/go/vt/orchestrator/inst/tablet_dao.go @@ -146,7 +146,7 @@ func SaveTablet(tablet *topodatapb.Tablet) error { _, err = db.ExecOrchestrator(` replace into vitess_tablet ( - hostname, port, cell, keyspace, shard, tablet_type, master_timestamp, info + hostname, port, cell, keyspace, shard, tablet_type, primary_timestamp, info ) values ( ?, ?, ?, ?, ?, ?, ?, ? ) diff --git a/go/vt/orchestrator/logic/topology_recovery_dao.go b/go/vt/orchestrator/logic/topology_recovery_dao.go index 23514f08657..560496509da 100644 --- a/go/vt/orchestrator/logic/topology_recovery_dao.go +++ b/go/vt/orchestrator/logic/topology_recovery_dao.go @@ -61,8 +61,8 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis analysis, cluster_name, cluster_alias, - count_affected_slaves, - slave_hosts, + count_affected_replicas, + replica_hosts, is_actionable, start_active_period ) values ( @@ -153,8 +153,8 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover analysis, cluster_name, cluster_alias, - count_affected_slaves, - slave_hosts, + count_affected_replicas, + replica_hosts, last_detection_id ) values ( ?, @@ -480,7 +480,7 @@ func writeResolveRecovery(topologyRecovery *TopologyRecovery) error { successor_hostname = ?, successor_port = ?, successor_alias = ?, - lost_slaves = ?, + lost_replicas = ?, participating_instances = ?, all_errors = ?, end_recovery = NOW() @@ -517,10 +517,10 @@ func readRecoveries(whereCondition string, limit string, args []interface{}) ([] analysis, cluster_name, cluster_alias, - count_affected_slaves, - slave_hosts, + count_affected_replicas, + replica_hosts, participating_instances, - lost_slaves, + lost_replicas, all_errors, acknowledged, acknowledged_at, @@ -551,8 +551,8 @@ func readRecoveries(whereCondition string, limit string, args []interface{}) ([] topologyRecovery.AnalysisEntry.Analysis = inst.AnalysisCode(m.GetString("analysis")) topologyRecovery.AnalysisEntry.ClusterDetails.ClusterName = m.GetString("cluster_name") topologyRecovery.AnalysisEntry.ClusterDetails.ClusterAlias = m.GetString("cluster_alias") - topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_slaves") - topologyRecovery.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("slave_hosts")) + topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_replicas") + topologyRecovery.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("replica_hosts")) topologyRecovery.SuccessorKey = &inst.InstanceKey{} topologyRecovery.SuccessorKey.Hostname = m.GetString("successor_hostname") @@ -562,7 +562,7 @@ func readRecoveries(whereCondition string, limit string, args []interface{}) ([] topologyRecovery.AnalysisEntry.ClusterDetails.ReadRecoveryInfo() topologyRecovery.AllErrors = strings.Split(m.GetString("all_errors"), "\n") - topologyRecovery.LostReplicas.ReadCommaDelimitedList(m.GetString("lost_slaves")) + topologyRecovery.LostReplicas.ReadCommaDelimitedList(m.GetString("lost_replicas")) topologyRecovery.ParticipatingInstanceKeys.ReadCommaDelimitedList(m.GetString("participating_instances")) topologyRecovery.Acknowledged = m.GetBool("acknowledged") @@ -699,8 +699,8 @@ func readFailureDetections(whereCondition string, limit string, args []interface analysis, cluster_name, cluster_alias, - count_affected_slaves, - slave_hosts, + count_affected_replicas, + replica_hosts, (select max(recovery_id) from topology_recovery where topology_recovery.last_detection_id = detection_id) as related_recovery_id from topology_failure_detection @@ -723,8 +723,8 @@ func readFailureDetections(whereCondition string, limit string, args []interface failureDetection.AnalysisEntry.Analysis = inst.AnalysisCode(m.GetString("analysis")) failureDetection.AnalysisEntry.ClusterDetails.ClusterName = m.GetString("cluster_name") failureDetection.AnalysisEntry.ClusterDetails.ClusterAlias = m.GetString("cluster_alias") - failureDetection.AnalysisEntry.CountReplicas = m.GetUint("count_affected_slaves") - failureDetection.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("slave_hosts")) + failureDetection.AnalysisEntry.CountReplicas = m.GetUint("count_affected_replicas") + failureDetection.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("replica_hosts")) failureDetection.AnalysisEntry.StartActivePeriod = m.GetString("start_active_period") failureDetection.RelatedRecoveryId = m.GetInt64("related_recovery_id")