Skip to content

Commit

Permalink
fix(scan_operations): add retry policy to cql query
Browse files Browse the repository at this point in the history
The node where scan operations was started could be
used by disruptive nemesis. If node was restarted/stopped
while scan query had been running, the scan operation would
be terminated and error event and message will mark
test as failed.

Add to cql session ExponetionalBackoffRetryPolicy
which allow to retry the query, if node was down
and once it back, query will be succesfully finished

Fixes: scylladb#9284
  • Loading branch information
aleksbykov committed Dec 22, 2024
1 parent 78c864c commit 737d3bb
Showing 1 changed file with 17 additions and 0 deletions.
17 changes: 17 additions & 0 deletions sdcm/scan_operation_thread.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout
from cassandra.cluster import ResponseFuture, ResultSet # pylint: disable=no-name-in-module
from cassandra.query import SimpleStatement # pylint: disable=no-name-in-module
from cassandra.policies import ExponentialBackoffRetryPolicy

from sdcm.remote import LocalCmdRunner
from sdcm.sct_events import Severity
Expand Down Expand Up @@ -106,6 +107,10 @@ def __init__(self, generator: random.Random, thread_params: ThreadParams, thread
self.db_node = self._get_random_node()
self.current_operation_stat = None
self.log.info("FullscanOperationBase init finished")
self._exp_backoff_retry_policy_params = {
"max_num_retries": 15.0, "min_interval": 1.0, "max_interval": 1800.0
}
self._request_default_timeout = 1800

def _get_random_node(self) -> BaseNode:
return self.generator.choice(self.fullscan_params.db_cluster.data_nodes)
Expand All @@ -120,6 +125,8 @@ def execute_query(
| FullPartitionScanReversedOrderEvent]) -> ResultSet:
# pylint: disable=unused-argument
self.log.debug('Will run command %s', cmd)
session.cluster.default_retry_policy = ExponentialBackoffRetryPolicy(**self._exp_backoff_retry_policy_params)
session.default_timeout = self._request_default_timeout
return session.execute(SimpleStatement(
cmd,
fetch_size=self.fullscan_params.page_size,
Expand Down Expand Up @@ -242,6 +249,8 @@ def get_table_clustering_order(self) -> str:
with self.fullscan_params.db_cluster.cql_connection_patient(node=node, connect_timeout=300) as session:
# Using CL ONE. No need for a quorum since querying a constant fixed attribute of a table.
session.default_consistency_level = ConsistencyLevel.ONE
session.cluster.default_retry_policy = ExponentialBackoffRetryPolicy(
**self._exp_backoff_retry_policy_params)
return get_table_clustering_order(ks_cf=self.fullscan_params.ks_cf,
ck_name=self.fullscan_params.ck_name, session=session)
except Exception as error: # pylint: disable=broad-except # noqa: BLE001
Expand All @@ -266,6 +275,8 @@ def randomly_form_cql_statement(self) -> Optional[tuple[str, str]]: # pylint: d

with self.fullscan_params.db_cluster.cql_connection_patient(
node=db_node, connect_timeout=300) as session:
session.cluster.default_retry_policy = ExponentialBackoffRetryPolicy(
**self._exp_backoff_retry_policy_params)
ck_random_min_value = self.generator.randint(a=1, b=self.fullscan_params.rows_count)
ck_random_max_value = self.generator.randint(a=ck_random_min_value, b=self.fullscan_params.rows_count)
self.ck_filter = ck_filter = self.generator.choice(list(self.reversed_query_filter_ck_by.keys()))
Expand Down Expand Up @@ -351,6 +362,9 @@ def execute_query(
self.log.debug('Will run command "%s"', cmd)
session.default_fetch_size = self.fullscan_params.page_size
session.default_consistency_level = ConsistencyLevel.ONE
session.cluster.default_retry_policy = ExponentialBackoffRetryPolicy(**self._exp_backoff_retry_policy_params)
session.default_timeout = self._request_default_timeout

return session.execute_async(cmd)

def reset_output_files(self):
Expand Down Expand Up @@ -460,6 +474,9 @@ def execute_query(self, session, cmd: str,
| FullPartitionScanReversedOrderEvent]) -> None:
self.log.debug('Will run command %s', cmd)
validate_mapreduce_service_requests_start_time = time.time()
session.cluster.default_retry_policy = ExponentialBackoffRetryPolicy(**self._exp_backoff_retry_policy_params)
session.default_timeout = self._session_execution_timeout

try:
cmd_result = session.execute(
query=cmd, trace=False, timeout=self._session_execution_timeout)
Expand Down

0 comments on commit 737d3bb

Please sign in to comment.