import random import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) main_configs = [ "configs/backups_disk.xml", "configs/cluster_different_versions.xml", ] user_configs = [] new_node = cluster.add_instance( "new_node", main_configs=main_configs, user_configs=user_configs, external_dirs=["/backups/"], macros={"replica": "new_node", "shard": "shard1"}, with_zookeeper=True, ) old_node = cluster.add_instance( "old_node", image="clickhouse/clickhouse-server", tag="24.9.2.42", with_installed_binary=True, main_configs=main_configs, user_configs=user_configs, external_dirs=["/backups/"], macros={"replica": "old_node", "shard": "shard1"}, with_zookeeper=True, ) nodes = [new_node, old_node] @pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() yield cluster finally: cluster.shutdown() @pytest.fixture(autouse=True) def cleanup_after_test(): try: yield finally: new_node.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster_ver' SYNC") backup_id_counter = 0 def new_backup_name(): global backup_id_counter backup_id_counter += 1 return f"Disk('backups', '{backup_id_counter}')" # Gets a printable version the name of a node. def get_node_name(node): return "new_node" if (node == new_node) else "old_node" # Choose a random instance. def random_node(): return random.choice(nodes) def test_different_versions(): new_node.query( "CREATE TABLE tbl" " ON CLUSTER 'cluster_ver'" " (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/tbl/', '{replica}')" " ORDER BY tuple()" ) new_node.query(f"INSERT INTO tbl VALUES (1)") old_node.query(f"INSERT INTO tbl VALUES (2)") backup_name = new_backup_name() initiator = random_node() print(f"Using {get_node_name(initiator)} as initiator for BACKUP") initiator.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster_ver' TO {backup_name}") new_node.query("DROP TABLE tbl ON CLUSTER 'cluster_ver' SYNC") initiator = random_node() print(f"Using {get_node_name(initiator)} as initiator for RESTORE") initiator.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster_ver' FROM {backup_name}") new_node.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster_ver' tbl") assert new_node.query("SELECT * FROM tbl ORDER BY x") == TSV([1, 2]) assert old_node.query("SELECT * FROM tbl ORDER BY x") == TSV([1, 2]) # Error NO_ELEMENTS_IN_CONFIG is unrelated. assert ( new_node.query( "SELECT name, last_error_message FROM system.errors WHERE NOT (" "(name == 'NO_ELEMENTS_IN_CONFIG')" ")" ) == "" ) # Error FAILED_TO_SYNC_BACKUP_OR_RESTORE: "No connection to host new_node:9000 yet, will retry" is generated by the old version # when it fails to connect to other host because that other host hasn't started yet. # This is not an error actually, just an exception thrown and caught. The new version doesn't throw this exception. assert ( old_node.query( "SELECT name, last_error_message FROM system.errors WHERE NOT (" "(name == 'NO_ELEMENTS_IN_CONFIG') OR" "((name == 'FAILED_TO_SYNC_BACKUP_OR_RESTORE') AND (last_error_message == 'No connection to host new_node:9000 yet, will retry'))" ")" ) == "" )