diff --git a/ch_backup/clickhouse/metadata_cleaner.py b/ch_backup/clickhouse/metadata_cleaner.py index f44f15c..0b7a293 100644 --- a/ch_backup/clickhouse/metadata_cleaner.py +++ b/ch_backup/clickhouse/metadata_cleaner.py @@ -77,7 +77,12 @@ def clean_tables_metadata(self, replicated_tables: List[Table]) -> None: with self._zk_ctl.zk_client as zk_client: # Both paths are already abs. - full_table_zk_path = self._zk_ctl.zk_root_path + path_resolved + full_table_zk_path = ( + self._zk_ctl.zk_root_path # type: ignore + + path_resolved + + "/replicas/" + + self._replica_to_drop + ) if not zk_client.exists(full_table_zk_path): logging.debug( @@ -89,9 +94,7 @@ def clean_tables_metadata(self, replicated_tables: List[Table]) -> None: # We are sure that we want to drop the table from zk. # To force it we will remove it active flag. - active_flag_path = os.path.join( - full_table_zk_path, "replicas", self._replica_to_drop, "is_active" # type: ignore - ) + active_flag_path = os.path.join(full_table_zk_path, "is_active") try: zk_client.delete(active_flag_path) except NoNodeError: @@ -149,7 +152,12 @@ def clean_database_metadata(self, replicated_databases: List[Database]) -> None: with self._zk_ctl.zk_client as zk_client: # Both paths are already abs. - full_database_zk_path = self._zk_ctl.zk_root_path + path_resolved + full_database_zk_path = ( + self._zk_ctl.zk_root_path + + path_resolved + + "/replicas/" + + full_replica_name + ) if not zk_client.exists(full_database_zk_path): logging.debug( @@ -159,9 +167,7 @@ def clean_database_metadata(self, replicated_databases: List[Database]) -> None: ) continue - active_flag_path = os.path.join( - full_database_zk_path, "replicas", full_replica_name, "active" - ) + active_flag_path = os.path.join(full_database_zk_path, "active") try: zk_client.delete(active_flag_path) except NoNodeError: diff --git a/tests/integration/features/backup_replicated.feature b/tests/integration/features/backup_replicated.feature index d692c28..0434aee 100644 --- a/tests/integration/features/backup_replicated.feature +++ b/tests/integration/features/backup_replicated.feature @@ -859,6 +859,48 @@ Feature: Backup replicated merge tree table | zookeeper_path | |/databases/replicated/db_repl| + Scenario Outline: Add new host with replicated + Given we have enabled shared zookeeper for clickhouse01 + And we have enabled shared zookeeper for clickhouse02 + Given ClickHouse settings + """ + allow_experimental_database_replicated: 1 + """ + And we have executed queries on clickhouse01 + """ + DROP DATABASE IF EXISTS db_repl ON CLUSTER 'default' SYNC; + CREATE DATABASE db_repl ENGINE = Replicated('', 'shard_01', '{replica}'); + + DROP TABLE IF EXISTS table_01 ON CLUSTER 'default' SYNC; + CREATE TABLE table_01 ( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard01/test_db.table_01', '{replica}') + PARTITION BY CounterID % 10 + ORDER BY (CounterID, EventDate, intHash32(UserID)) + SAMPLE BY intHash32(UserID); + INSERT INTO table_01 SELECT now(), number, rand() FROM system.numbers LIMIT 10; + """ + When we create clickhouse01 clickhouse backup + + When we restore clickhouse backup #0 to clickhouse02 + """ + schema_only: true + """ + Then we got same clickhouse data at clickhouse01 clickhouse02 + + @require_version_24.8 + Examples: + | zookeeper_path | + |/databases/{uuid}/db_repl| + + @require_version_22.8 + Examples: + | zookeeper_path | + |/databases/replicated/db_repl| + ## Note: Sometimes we can have active orphaned table in the zookeeper. ## Here we are imitating such situation by creating objects with static replica name. @require_version_23.3