diff --git a/avocado/core/dependencies/requirements/cache/__init__.py b/avocado/core/dependencies/requirements/cache/__init__.py index bc51df1719..78e8240625 100644 --- a/avocado/core/dependencies/requirements/cache/__init__.py +++ b/avocado/core/dependencies/requirements/cache/__init__.py @@ -1,3 +1,6 @@ # The sqlite based backend is the only implementation from avocado.core.dependencies.requirements.cache.backends.sqlite import ( - get_requirement, set_requirement) + delete_environment, delete_requirement, + get_all_environments_with_requirement, is_environment_prepared, + is_requirement_in_cache, set_requirement, update_environment, + update_requirement_status) diff --git a/avocado/core/dependencies/requirements/cache/backends/sqlite.py b/avocado/core/dependencies/requirements/cache/backends/sqlite.py index d06f2bb0df..b79c5f1a4a 100644 --- a/avocado/core/dependencies/requirements/cache/backends/sqlite.py +++ b/avocado/core/dependencies/requirements/cache/backends/sqlite.py @@ -24,6 +24,9 @@ #: The location of the requirements cache database CACHE_DATABASE_PATH = get_datafile_path('cache', 'requirements.sqlite') +sqlite3.register_adapter(bool, int) +sqlite3.register_converter("BOOLEAN", lambda v: bool(int(v))) + #: The definition of the database schema SCHEMA = [ 'CREATE TABLE IF NOT EXISTS requirement_type (requirement_type TEXT UNIQUE)', @@ -41,6 +44,7 @@ 'environment TEXT,' 'requirement_type TEXT,' 'requirement TEXT,' + 'saved BOOLEAN,' 'FOREIGN KEY(environment_type) REFERENCES environment(environment_type),' 'FOREIGN KEY(environment) REFERENCES environment(environment),' 'FOREIGN KEY(requirement_type) REFERENCES requirement_type(requirement_type)' @@ -51,6 +55,7 @@ def _create_requirement_cache_db(): + os.makedirs(os.path.dirname(CACHE_DATABASE_PATH), exist_ok=True) with sqlite3.connect(CACHE_DATABASE_PATH) as conn: cursor = conn.cursor() for entry in SCHEMA: @@ -59,7 +64,7 @@ def _create_requirement_cache_db(): def set_requirement(environment_type, environment, - requirement_type, requirement): + requirement_type, requirement, saved=True): if not os.path.exists(CACHE_DATABASE_PATH): _create_requirement_cache_db() @@ -71,18 +76,24 @@ def set_requirement(environment_type, environment, cursor.execute(sql, (environment_type, environment)) sql = "INSERT OR IGNORE INTO requirement_type VALUES (?)" cursor.execute(sql, (requirement_type, )) - sql = "INSERT OR IGNORE INTO requirement VALUES (?, ?, ?, ?)" + sql = "INSERT OR IGNORE INTO requirement VALUES (?, ?, ?, ?, ?)" cursor.execute(sql, (environment_type, environment, - requirement_type, requirement)) - conn.commit() + requirement_type, requirement, saved)) + conn.commit() -def get_requirement(environment_type, environment, - requirement_type, requirement): +def is_requirement_in_cache(environment_type, environment, + requirement_type, requirement): + """Checks if requirement is in cache. + + :rtype: True if requirement is in cache + False if requirement is not in cache + None if requirement is in cache but it is not saved yet. + """ if not os.path.exists(CACHE_DATABASE_PATH): return False - sql = ("SELECT COUNT(*) FROM requirement WHERE (" + sql = ("SELECT r.saved FROM requirement r WHERE (" "environment_type = ? AND " "environment = ? AND " "requirement_type = ? AND " @@ -94,5 +105,196 @@ def get_requirement(environment_type, environment, requirement_type, requirement)) row = result.fetchone() if row is not None: - return row[0] == 1 + if row[0]: + return True + return None + return False + + +def is_environment_prepared(environment): + """Checks if environment has all requirements saved.""" + + if not os.path.exists(CACHE_DATABASE_PATH): + return False + + sql = ("SELECT COUNT(*) FROM requirement r JOIN " + "environment e ON e.environment = r.environment " + "WHERE (r.environment = ? AND " + "r.saved = 0)") + + with sqlite3.connect(CACHE_DATABASE_PATH, + detect_types=sqlite3.PARSE_DECLTYPES) as conn: + cursor = conn.cursor() + result = cursor.execute(sql, (environment,)) + + row = result.fetchone() + if row is not None: + return row[0] == 0 return False + + +def update_environment(environment_type, old_environment, new_environment): + """Updates environment information for each requirement in one environment. + + It will remove the old environment and add the new one to the cache. + + :param environment_type: Type of fetched environment + :type environment_type: str + :param old_environment: Environment which should be updated + :type environment: str + :param new_environment: Environment, which will be a reimbursement for the + old one. + :type environment: str + """ + if not os.path.exists(CACHE_DATABASE_PATH): + return False + + with sqlite3.connect(CACHE_DATABASE_PATH) as conn: + cursor = conn.cursor() + sql = "INSERT OR IGNORE INTO environment VALUES (?, ?)" + cursor.execute(sql, (environment_type, new_environment)) + + sql = ("UPDATE requirement SET environment = ? WHERE (" + "environment_type = ? AND " + "environment = ? )") + + cursor.execute(sql, (new_environment, environment_type, + old_environment)) + + sql = ("DELETE FROM environment WHERE (" + "environment_type = ? AND " + "environment = ? )") + + cursor.execute(sql, (environment_type, old_environment)) + conn.commit() + + +def update_requirement_status(environment_type, environment, requirement_type, + requirement, new_status): + """Updates status of selected requirement in cache. + + The status has two values, save=True or not_save=False. + + :param environment_type: Type of fetched environment + :type environment_type: str + :param environment: Environment where the requirement is + :type environment: str + :param requirement_type: Type of the requirement in environment + :type requirement_type: str + :param requirement: Name of requirement + :type requirement: str + :param new_status: Requirement status which will be updated + :type new_status: bool + """ + + if not os.path.exists(CACHE_DATABASE_PATH): + return False + + sql = ("UPDATE requirement SET saved = ? WHERE (" + "environment_type = ? AND " + "environment = ? AND " + "requirement_type = ? AND " + "requirement = ?)") + + with sqlite3.connect(CACHE_DATABASE_PATH) as conn: + cursor = conn.cursor() + cursor.execute(sql, (new_status, environment_type, environment, + requirement_type, requirement)) + conn.commit() + + return True + + +def delete_environment(environment_type, environment): + """Deletes environment with all its requirements from cache. + + :param environment_type: Type of environment + :type environment_type: str + :param environment: Environment which will be deleted + :type environment: str + """ + + if not os.path.exists(CACHE_DATABASE_PATH): + return False + + with sqlite3.connect(CACHE_DATABASE_PATH) as conn: + sql = ("DELETE FROM requirement WHERE (" + "environment_type = ? AND " + "environment = ? )") + cursor = conn.cursor() + cursor.execute(sql, (environment_type, environment)) + sql = ("DELETE FROM environment WHERE (" + "environment_type = ? AND " + "environment = ? )") + cursor.execute(sql, (environment_type, environment)) + conn.commit() + + +def delete_requirement(environment_type, environment, requirement_type, + requirement): + """Deletes requirement from cache. + + :param environment_type: Type of environment + :type environment_type: str + :param environment: Environment where the requirement is. + :type environment: str + :param requirement_type: Type of the requirement in environment + :type requirement_type: str + :param requirement: Name of requirement which will be deleted + :type requirement: str + """ + + if not os.path.exists(CACHE_DATABASE_PATH): + return False + + with sqlite3.connect(CACHE_DATABASE_PATH) as conn: + sql = ("DELETE FROM requirement WHERE (" + "environment_type = ? AND " + "environment = ? AND " + "requirement_type = ? AND " + "requirement = ?)") + cursor = conn.cursor() + cursor.execute(sql, (environment_type, environment, requirement_type, + requirement)) + conn.commit() + + +def get_all_environments_with_requirement(environment_type, requirement_type, + requirement): + """Fetches all environments with selected requirement from cache. + + :param environment_type: Type of fetched environment + :type environment_type: str + :param requirement_type: Type of the requirement in environment + :type requirement_type: str + :param requirement: Name of requirement + :type requirement: str + :return: Dict with all environments which has selected requirements. + + """ + requirements = {} + if not os.path.exists(CACHE_DATABASE_PATH): + return requirements + + environment_select = ("SELECT e.environment FROM requirement r JOIN " + "environment e ON e.environment = r.environment " + "WHERE (r.environment_type = ? AND " + "r.requirement_type = ? AND " + "r.requirement = ?)") + sql = (f"SELECT r.environment, r.requirement_type, r.requirement " + f"FROM requirement AS r, ({environment_select}) AS e " + f"WHERE r.environment = e.environment") + + with sqlite3.connect(CACHE_DATABASE_PATH, + detect_types=sqlite3.PARSE_DECLTYPES) as conn: + cursor = conn.cursor() + result = cursor.execute(sql, (environment_type, + requirement_type, + requirement)) + + for row in result.fetchall(): + if row[0] in requirements: + requirements[row[0]].append((row[1], row[2])) + else: + requirements[row[0]] = [(row[1], row[2])] + return requirements diff --git a/avocado/core/plugin_interfaces.py b/avocado/core/plugin_interfaces.py index 75beeadad4..caee04f9b8 100644 --- a/avocado/core/plugin_interfaces.py +++ b/avocado/core/plugin_interfaces.py @@ -364,6 +364,46 @@ async def check_task_requirements(runtime_task): :type runtime_task: :class:`avocado.core.task.runtime.RuntimeTask` """ + @staticmethod + @abc.abstractmethod + async def is_requirement_in_cache(runtime_task): + """Checks if it's necessary to run the requirement. + + There are occasions when the similar requirement has been run and its + results are already saved in cache. In such occasion, it is not + necessary to run the task again. For example, this might be useful for + tasks which would install the same package to the same environment. + + :param runtime_task: runtime task with requirement + :type runtime_task: :class:`avocado.core.task.runtime.RuntimeTask` + :return: If the results are already in cache. + :rtype: True if task is in cache + False if task is not in cache + None if task is running in different process and should be in + cache soon. + """ + + @staticmethod + @abc.abstractmethod + async def save_requirement_in_cache(runtime_task): + """Saves the information about requirement in cache before + the runtime_task is run. + + :param runtime_task: runtime task with requirement + :type runtime_task: :class:`avocado.core.task.runtime.RuntimeTask` + """ + + @staticmethod + @abc.abstractmethod + async def update_requirement_cache(runtime_task, result): + """Updates the information about requirement in cache based on result. + + :param runtime_task: runtime task with requirement + :type runtime_task: :class:`avocado.core.task.runtime.RuntimeTask` + :param result: result of runtime_task + :type result: `avocado.core.teststatus.STATUSES` + """ + class DeploymentSpawner(Spawner): """Spawners that needs basic deployment are based on this class. diff --git a/avocado/core/spawners/mock.py b/avocado/core/spawners/mock.py index d9ea21ebaa..7bec6962ea 100644 --- a/avocado/core/spawners/mock.py +++ b/avocado/core/spawners/mock.py @@ -47,6 +47,18 @@ async def terminate_task(self, runtime_task): async def check_task_requirements(runtime_task): return True + @staticmethod + async def is_requirement_in_cache(runtime_task): + return False + + @staticmethod + async def save_requirement_in_cache(runtime_task): + pass + + @staticmethod + async def update_requirement_cache(runtime_task, result): + pass + class MockRandomAliveSpawner(MockSpawner): """A mocking spawner that simulates randomness about tasks being alive.""" diff --git a/avocado/core/task/runtime.py b/avocado/core/task/runtime.py index 25089f9aee..da1bc186aa 100644 --- a/avocado/core/task/runtime.py +++ b/avocado/core/task/runtime.py @@ -1,4 +1,5 @@ from copy import deepcopy +from enum import Enum from itertools import chain from avocado.core.dispatcher import TestPreDispatcher @@ -7,6 +8,23 @@ from avocado.core.varianter import dump_variant +class RuntimeTaskStatus(Enum): + wait_dependencies = 'WAITING DEPENDENCIES' + wait = 'WAITING' + finished = 'FINISHED' + timeout = 'FINISHED TIMEOUT' + in_cache = 'FINISHED IN CACHE' + failfast = 'FINISHED FAILFAST' + fail_triage = 'FAILED ON TRIAGE' + fail_start = 'FAILED ON START' + started = 'STARTED' + + @staticmethod + def finished_statuses(): + return [status for _, status in RuntimeTaskStatus.__members__.items() + if "FINISHED" in status.value] + + class RuntimeTask: """Task with extra status information on its life cycle status. @@ -48,10 +66,7 @@ def __repr__(self): f'Status: "{self.status}">') def __hash__(self): - if self.task.category == "test": - return hash(self.task.identifier) - return hash((str(self.task.runnable), self.task.job_id, - self.task.category)) + return hash(self.task.identifier) def __eq__(self, other): if isinstance(other, RuntimeTask): @@ -60,8 +75,21 @@ def __eq__(self, other): def are_dependencies_finished(self): for dependency in self.dependencies: - if not dependency.status or not ("FINISHED" in dependency.status - or "FAILED" in dependency.status): + if dependency.status not in RuntimeTaskStatus.finished_statuses(): + return False + return True + + def get_finished_dependencies(self): + """Returns all dependencies which already finished.""" + return [dep for dep in self.dependencies if + dep.status in RuntimeTaskStatus.finished_statuses()] + + def can_run(self): + if not self.are_dependencies_finished(): + return False + + for dependency in self.dependencies: + if dependency.result != 'pass': return False return True @@ -209,21 +237,15 @@ def __init__(self, tests, test_suite_name, status_server_uri, job_id): runnable, status_server_uri, job_id) - self._connect_tasks(pre_tasks, [runtime_test]) - - def _connect_tasks(self, dependencies, tasks): - def _get_task_from_graph(task): - if task in self.graph: - task = self.graph.get(task) - else: - self.graph[task] = task - return task - - for dependency_task in dependencies: - dependency_task = _get_task_from_graph(dependency_task) - for task in tasks: - task = _get_task_from_graph(task) - task.dependencies.append(dependency_task) + if pre_tasks: + pre_tasks.append(runtime_test) + self._connect_tasks(pre_tasks) + + def _connect_tasks(self, tasks): + for dependency, task in zip(tasks, tasks[1:]): + self.graph[task] = task + self.graph[dependency] = dependency + task.dependencies.append(dependency) def get_tasks_in_topological_order(self): """Computes the topological order of runtime tasks in graph diff --git a/avocado/core/task/statemachine.py b/avocado/core/task/statemachine.py index 95b8237f96..98227e5ef2 100644 --- a/avocado/core/task/statemachine.py +++ b/avocado/core/task/statemachine.py @@ -5,6 +5,7 @@ import time from avocado.core.exceptions import TestFailFast +from avocado.core.task.runtime import RuntimeTaskStatus from avocado.core.teststatus import STATUSES_NOT_OK LOG = logging.getLogger(__name__) @@ -21,6 +22,7 @@ def __init__(self, tasks, status_repo): self._started = [] self._finished = [] self._lock = asyncio.Lock() + self._cache_lock = asyncio.Lock() @property def requested(self): @@ -46,6 +48,10 @@ def finished(self): def lock(self): return self._lock + @property + def cache_lock(self): + return self._cache_lock + @property async def complete(self): async with self._lock: @@ -105,7 +111,7 @@ async def finish_task(self, runtime_task, status_reason=None): if runtime_task not in self.finished: if status_reason: runtime_task.status = status_reason - LOG.debug('Task "%s" finished: %s', + LOG.debug('Task "%s" finished with status: %s', runtime_task.task.identifier, status_reason) else: LOG.debug('Task "%s" finished', runtime_task.task.identifier) @@ -151,37 +157,6 @@ async def bootstrap(self): async def triage(self): """Reads from triaging, moves into either: ready or finished.""" - async def check_finished_dependencies(runtime_task): - for dependency in runtime_task.dependencies: - task = dependency.task - # check if this dependency `task` failed on triage - # this is needed because this kind of task fail does not - # have information in the status repo - for finished_rt_task in self._state_machine.finished: - if (finished_rt_task.task is task and - finished_rt_task.status == 'FAILED ON TRIAGE'): - - await self._state_machine.finish_task(runtime_task, - "FAILED ON TRIAGE") - return - # from here, this dependency `task` ran, so, let's check - # the its latest data in the status repo - latest_task_data = \ - self._state_machine._status_repo.get_latest_task_data( - str(task.identifier)) - # maybe, the latest data is not available yet - if latest_task_data is None: - async with self._state_machine.lock: - self._state_machine.triaging.append(runtime_task) - await asyncio.sleep(0.1) - return - # if this dependency task failed, skip the parent task - if latest_task_data['result'] not in ['pass']: - await self._state_machine.finish_task(runtime_task, - "FAILED ON TRIAGE") - return - # everything is fine! - return True try: async with self._state_machine.lock: @@ -190,7 +165,7 @@ async def check_finished_dependencies(runtime_task): return # a task waiting requirements already checked its requirements - if runtime_task.status != 'WAITING DEPENDENCIES': + if runtime_task.status != RuntimeTaskStatus.wait_dependencies: # check for requirements a task may have requirements_ok = ( await self._spawner.check_task_requirements(runtime_task)) @@ -198,8 +173,8 @@ async def check_finished_dependencies(runtime_task): LOG.debug('Task "%s": requirements OK (will proceed to check ' 'dependencies)', runtime_task.task.identifier) else: - await self._state_machine.finish_task(runtime_task, - "FAILED ON TRIAGE") + await self._state_machine.finish_task( + runtime_task, RuntimeTaskStatus.fail_triage) return # handle task dependencies @@ -208,17 +183,35 @@ async def check_finished_dependencies(runtime_task): if not runtime_task.are_dependencies_finished(): async with self._state_machine.lock: self._state_machine.triaging.append(runtime_task) - runtime_task.status = 'WAITING DEPENDENCIES' + runtime_task.status = RuntimeTaskStatus.wait_dependencies await asyncio.sleep(0.1) return # dependencies finished, let's check if they finished # successfully, so we can move on with the parent task - dependencies_ok = await check_finished_dependencies(runtime_task) + dependencies_ok = runtime_task.can_run() if not dependencies_ok: LOG.debug('Task "%s" has failed dependencies', runtime_task.task.identifier) return + if runtime_task.task.category != "test": + async with self._state_machine.cache_lock: + is_task_in_cache = await self._spawner.is_requirement_in_cache( + runtime_task) + if is_task_in_cache is None: + async with self._state_machine.lock: + self._state_machine.triaging.append(runtime_task) + runtime_task.status = RuntimeTaskStatus.wait + await asyncio.sleep(0.1) + return + + if is_task_in_cache: + await self._state_machine.finish_task( + runtime_task, RuntimeTaskStatus.in_cache) + runtime_task.result = 'pass' + return + + await self._spawner.save_requirement_in_cache(runtime_task) # the task is ready to run async with self._state_machine.lock: @@ -240,7 +233,7 @@ async def start(self): async with self._state_machine.lock: if len(self._state_machine.started) >= self._max_running: self._state_machine.ready.insert(0, runtime_task) - runtime_task.status = 'WAITING' + runtime_task.status = RuntimeTaskStatus.wait should_wait = True if should_wait: await asyncio.sleep(0.1) @@ -252,14 +245,14 @@ async def start(self): if start_ok: LOG.debug('Task "%s": spawned successfully', runtime_task.task.identifier) - runtime_task.status = None + runtime_task.status = RuntimeTaskStatus.started if self._task_timeout is not None: runtime_task.execution_timeout = time.monotonic() + self._task_timeout async with self._state_machine.lock: self._state_machine.started.append(runtime_task) else: await self._state_machine.finish_task(runtime_task, - "FAILED ON START") + RuntimeTaskStatus.fail_start) async def monitor(self): """Reads from started, moves into finished.""" @@ -277,9 +270,8 @@ async def monitor(self): remaining = runtime_task.execution_timeout - time.monotonic() await asyncio.wait_for(self._spawner.wait_task(runtime_task), remaining) - runtime_task.status = 'FINISHED' except asyncio.TimeoutError: - runtime_task.status = 'FINISHED W/ TIMEOUT' + runtime_task.status = RuntimeTaskStatus.timeout await self._spawner.terminate_task(runtime_task) message = {'status': 'finished', 'result': 'interrupted', @@ -288,13 +280,30 @@ async def monitor(self): 'id': str(runtime_task.task.identifier), 'job_id': runtime_task.task.job_id} self._state_machine._status_repo.process_message(message) + # from here, this `task` ran, so, let's check + # the its latest data in the status repo + latest_task_data = \ + self._state_machine._status_repo.get_latest_task_data( + str(runtime_task.task.identifier)) or {} + # maybe, the results are not available yet + while latest_task_data.get("result") is None: + await asyncio.sleep(0.1) + latest_task_data = \ + self._state_machine._status_repo.get_latest_task_data( + str(runtime_task.task.identifier)) or {} + if runtime_task.task.category != "test": + async with self._state_machine.cache_lock: + await self._spawner.update_requirement_cache( + runtime_task, latest_task_data['result'].upper()) + runtime_task.result = latest_task_data['result'] result_stats = set(key.upper()for key in self._state_machine._status_repo.result_stats.keys()) if self._failfast and not result_stats.isdisjoint(STATUSES_NOT_OK): - await self._state_machine.abort("FAILFAST is enabled") + await self._state_machine.abort(RuntimeTaskStatus.failfast) raise TestFailFast("Interrupting job (failfast).") - await self._state_machine.finish_task(runtime_task) + await self._state_machine.finish_task(runtime_task, + RuntimeTaskStatus.finished) async def run(self): """Pushes Tasks forward and makes them do something with their lives.""" diff --git a/avocado/plugins/spawners/podman.py b/avocado/plugins/spawners/podman.py index 8d9df28ee5..04e0dce828 100644 --- a/avocado/plugins/spawners/podman.py +++ b/avocado/plugins/spawners/podman.py @@ -3,10 +3,13 @@ import logging import os import subprocess +import uuid +from avocado.core.dependencies.requirements import cache from avocado.core.plugin_interfaces import CLI, DeploymentSpawner, Init from avocado.core.settings import settings from avocado.core.spawners.common import SpawnerMixin, SpawnMethod +from avocado.core.teststatus import STATUSES_NOT_OK from avocado.core.version import VERSION from avocado.utils import distro from avocado.utils.asset import Asset @@ -96,6 +99,10 @@ class PodmanSpawner(DeploymentSpawner, SpawnerMixin): _PYTHON_VERSIONS_CACHE = {} + def __init__(self, config=None, job=None): + SpawnerMixin.__init__(self, config, job) + self.environment = f"podman:{self.config.get('spawner.podman.image')}" + def is_task_alive(self, runtime_task): if runtime_task.spawner_handle is None: return False @@ -211,7 +218,9 @@ async def _create_container_for_task(self, runtime_task, env_args, (f"{test_output}:" f"{os.path.expanduser(podman_output)}")) - image = self.config.get('spawner.podman.image') + image, _ = self._get_image_from_cache(runtime_task) + if not image: + image = self.config.get('spawner.podman.image') envs = [f"-e={k}={v}" for k, v in env_args.items()] try: @@ -294,3 +303,78 @@ async def check_task_requirements(runtime_task): if runtime_task.task.runnable.runner_command() is None: return False return True + + async def update_requirement_cache(self, runtime_task, result): + environment_id, _ = self._get_image_from_cache(runtime_task, True) + if result in STATUSES_NOT_OK: + cache.delete_environment(self.environment, environment_id) + return + _, stdout, _ = await self.podman.execute("commit", "-q", + runtime_task.spawner_handle) + container_id = stdout.decode().strip() + cache.update_environment(self.environment, + environment_id, + container_id) + cache.update_requirement_status(self.environment, + container_id, + runtime_task.task.runnable.kind, + runtime_task.task.runnable.kwargs.get( + 'name'), + True) + + async def save_requirement_in_cache(self, runtime_task): + container_id = str(uuid.uuid4()) + _, requirements = self._get_image_from_cache(runtime_task) + if requirements: + for requirement_type, requirement in requirements: + cache.set_requirement(self.environment, container_id, + requirement_type, requirement) + cache.set_requirement(self.environment, + container_id, + runtime_task.task.runnable.kind, + runtime_task.task.runnable.kwargs.get('name'), + False) + + async def is_requirement_in_cache(self, runtime_task): + environment, _ = self._get_image_from_cache(runtime_task, + use_task=True) + if not environment: + return False + if cache.is_environment_prepared(environment): + return True + return None + + def _get_image_from_cache(self, runtime_task, use_task=False): + + def _get_all_finished_requirements(requirement_tasks): + all_finished_requirements = [] + for requirement in requirement_tasks: + all_finished_requirements.extend(_get_all_finished_requirements( + requirement.dependencies)) + runnable = requirement.task.runnable + all_finished_requirements.append((runnable.kind, + runnable.kwargs.get('name'))) + return all_finished_requirements + + finished_requirements = [] + if use_task: + finished_requirements.append( + (runtime_task.task.runnable.kind, + runtime_task.task.runnable.kwargs.get('name'))) + finished_requirements.extend( + _get_all_finished_requirements(runtime_task.dependencies)) + if not finished_requirements: + return None, None + + runtime_task_kind, runtime_task_name = finished_requirements[0] + cache_entries = cache.get_all_environments_with_requirement( + self.environment, + runtime_task_kind, + runtime_task_name) + if not cache_entries: + return None, None + for image, requirements in cache_entries.items(): + if len(finished_requirements) == len(requirements): + if set(requirements) == set(finished_requirements): + return image, requirements + return None, None diff --git a/avocado/plugins/spawners/process.py b/avocado/plugins/spawners/process.py index c5fb901628..29c20c4ed2 100644 --- a/avocado/plugins/spawners/process.py +++ b/avocado/plugins/spawners/process.py @@ -1,9 +1,15 @@ import asyncio import os +import socket import sys +from avocado.core.dependencies.requirements import cache from avocado.core.plugin_interfaces import Spawner from avocado.core.spawners.common import SpawnerMixin, SpawnMethod +from avocado.core.teststatus import STATUSES_NOT_OK + +ENVIRONMENT_TYPE = 'local' +ENVIRONMENT = socket.gethostname() class ProcessSpawner(Spawner, SpawnerMixin): @@ -64,3 +70,33 @@ async def check_task_requirements(runtime_task): if runtime_task.task.runnable.runner_command() is None: return False return True + + @staticmethod + async def update_requirement_cache(runtime_task, result): + kind = runtime_task.task.runnable.kind + name = runtime_task.task.runnable.kwargs.get('name') + cache.set_requirement(ENVIRONMENT_TYPE, ENVIRONMENT, kind, name) + if result in STATUSES_NOT_OK: + cache.delete_requirement(ENVIRONMENT_TYPE, ENVIRONMENT, kind, name) + return + cache.update_requirement_status(ENVIRONMENT_TYPE, + ENVIRONMENT, + kind, + name, + True) + + @staticmethod + async def is_requirement_in_cache(runtime_task): + kind = runtime_task.task.runnable.kind + name = runtime_task.task.runnable.kwargs.get('name') + return cache.is_requirement_in_cache(ENVIRONMENT_TYPE, + ENVIRONMENT, + kind, + name) + + @staticmethod + async def save_requirement_in_cache(runtime_task): + kind = runtime_task.task.runnable.kind + name = runtime_task.task.runnable.kwargs.get('name') + cache.set_requirement(ENVIRONMENT_TYPE, ENVIRONMENT, kind, name, + False) diff --git a/docs/source/guides/user/chapters/requirements.rst b/docs/source/guides/user/chapters/requirements.rst index 4cd5dbce88..b71ac208af 100644 --- a/docs/source/guides/user/chapters/requirements.rst +++ b/docs/source/guides/user/chapters/requirements.rst @@ -19,6 +19,21 @@ it is started. When any of the requirements defined on a test fails, the test is skipped. +When the requirement is fulfilled, it will be saved into the avocado cache, and +it will be reused by other tests. + +Also, the requirement will stay in cache after the Avocado run, so the second +run of the tests will use requirements from cache, which will make tests more +efficient. + +.. warning:: + + If any environment is modified without Avocado knowing about it + (packages being uninstalled, podman images removed, etc), the + requirement resolution behavior is undefined and will probably crash. + If such a change is made to the environment, it's recommended to clear + the requirements cache file. + Defining a test requirement --------------------------- diff --git a/selftests/check.py b/selftests/check.py index 156bbf8b0c..62d2644961 100755 --- a/selftests/check.py +++ b/selftests/check.py @@ -203,13 +203,14 @@ def parse_args(): epilog='''\ The list of test availables for --skip and --select are: - static-checks Run static checks (isort, lint, etc) - job-api Run job API checks - nrunner-interface Run selftests/functional/test_nrunner_interface.py - unit Run selftests/unit/ - jobs Run selftests/jobs/ - functional Run selftests/functional/ - optional-plugins Run optional_plugins/*/tests/ + static-checks Run static checks (isort, lint, etc) + job-api Run job API checks + nrunner-interface Run selftests/functional/test_nrunner_interface.py + nrunner-requirement Run selftests/functional/serial/test_requirements.py + unit Run selftests/unit/ + jobs Run selftests/jobs/ + functional Run selftests/functional/ + optional-plugins Run optional_plugins/*/tests/ ''') group = parser.add_mutually_exclusive_group() parser.add_argument('-f', @@ -568,6 +569,22 @@ def create_suites(args): # pylint: disable=W0621 if args.dict_tests['nrunner-interface']: suites.append(TestSuite.from_config(config_nrunner_interface, "nrunner-interface")) + # ======================================================================== + # Run functional requirement tests + # ======================================================================== + config_nrunner_requirement = { + 'resolver.references': ['selftests/functional/serial/test_requirements.py'], + 'nrunner.max_parallel_tasks': 1, + 'run.dict_variants': [ + {'spawner': 'process'}, + + {'spawner': 'podman'}, + ] + } + + if args.dict_tests['nrunner-requirement']: + suites.append(TestSuite.from_config(config_nrunner_requirement, "nrunner-requirement")) + # ======================================================================== # Run all static checks, unit and functional tests # ======================================================================== @@ -629,6 +646,7 @@ def main(args): # pylint: disable=W0621 'static-checks': False, 'job-api': False, 'nrunner-interface': False, + 'nrunner-requirement': False, 'unit': False, 'jobs': False, 'functional': False, diff --git a/selftests/functional/serial/test_requirements.py b/selftests/functional/serial/test_requirements.py index c48e5c2adc..4ecd189c10 100644 --- a/selftests/functional/serial/test_requirements.py +++ b/selftests/functional/serial/test_requirements.py @@ -1,9 +1,10 @@ import os import unittest +from avocado import Test, skipUnless from avocado.core import exit_codes from avocado.utils import process, script -from selftests.utils import AVOCADO, TestCaseTmpDir +from selftests.utils import AVOCADO SINGLE_SUCCESS_CHECK = '''#!/usr/bin/env python3 @@ -34,27 +35,39 @@ def test_check(self): MULTIPLE_SUCCESS = '''#!/usr/bin/env python3 from avocado import Test +from avocado.utils import process class SuccessTest(Test): + def check_hello(self): + result = process.run("hello", ignore_status=True) + self.assertEqual(result.exit_status, 0) + self.assertIn('Hello, world!', result.stdout_text,) + def test_a(self): """ :avocado: dependency={"type": "package", "name": "hello"} """ + self.check_hello() + def test_b(self): """ :avocado: dependency={"type": "package", "name": "hello"} """ + self.check_hello() + def test_c(self): """ :avocado: dependency={"type": "package", "name": "hello"} """ + self.check_hello() ''' MULTIPLE_FAIL = '''#!/usr/bin/env python3 from avocado import Test +from avocado.utils import process class FailTest(Test): @@ -68,6 +81,10 @@ def test_b(self): """ :avocado: dependency={"type": "package", "name": "hello"} """ + result = process.run("hello", ignore_status=True) + self.assertEqual(result.exit_status, 0) + self.assertIn('Hello, world!', result.stdout_text,) + def test_c(self): """ :avocado: dependency={"type": "package", "name": "hello"} @@ -76,9 +93,12 @@ def test_c(self): ''' -class BasicTest(TestCaseTmpDir): +class BasicTest(Test): + + """ + :avocado: dependency={"type": "package", "name": "podman", "action": "check"} + """ - command = '%s run %s' skip_install_message = ("This test runs on CI environments only as it" " installs packages to test the feature, which" " may not be desired locally, in the user's" @@ -88,55 +108,60 @@ class BasicTest(TestCaseTmpDir): " manager, and some environments don't" " have it available.") - @unittest.skipUnless(os.getenv('CI'), skip_package_manager_message) + def get_command(self, path): + spawner = self.params.get('spawner', default='process') + spawner_command = '' + if spawner == 'podman': + spawner_command = ("--nrunner-spawner=podman " + "--spawner-podman-image=fedora:latest") + return f"{AVOCADO} run {spawner_command} {path}" + + @skipUnless(os.getenv('CI'), skip_package_manager_message) def test_single_success(self): - with script.Script(os.path.join(self.tmpdir.name, + with script.Script(os.path.join(self.workdir, 'test_single_success.py'), SINGLE_SUCCESS_CHECK) as test: - command = self.command % (AVOCADO, test.path) + command = self.get_command(test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 1', result.stdout_text,) self.assertNotIn('bash', result.stdout_text,) - @unittest.skipUnless(os.getenv('CI'), skip_package_manager_message) + @skipUnless(os.getenv('CI'), skip_package_manager_message) def test_single_fail(self): - with script.Script(os.path.join(self.tmpdir.name, + with script.Script(os.path.join(self.workdir, 'test_single_fail.py'), SINGLE_FAIL_CHECK) as test: - command = self.command % (AVOCADO, test.path) + command = self.get_command(test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 0', result.stdout_text,) self.assertIn('SKIP 1', result.stdout_text,) self.assertNotIn('-foo-bar-', result.stdout_text,) - @unittest.skipUnless(os.getenv('CI'), skip_install_message) + @skipUnless(os.getenv('CI'), skip_install_message) def test_multiple_success(self): - with script.Script(os.path.join(self.tmpdir.name, + with script.Script(os.path.join(self.workdir, 'test_multiple_success.py'), MULTIPLE_SUCCESS) as test: - command = self.command % (AVOCADO, test.path) + command = self.get_command(test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 3', result.stdout_text,) self.assertNotIn('vim-common', result.stdout_text,) - @unittest.skipUnless(os.getenv('CI'), skip_install_message) + @skipUnless(os.getenv('CI'), skip_install_message) def test_multiple_fails(self): - with script.Script(os.path.join(self.tmpdir.name, + with script.Script(os.path.join(self.workdir, 'test_multiple_fail.py'), MULTIPLE_FAIL) as test: - command = self.command % (AVOCADO, test.path) + command = self.get_command(test.path) result = process.run(command, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) self.assertIn('PASS 1', result.stdout_text,) self.assertIn('SKIP 2', result.stdout_text,) self.assertNotIn('-foo-bar-', result.stdout_text,) - def tearDown(self): - self.tmpdir.cleanup() - if __name__ == '__main__': unittest.main() diff --git a/selftests/functional/test_requirements_cache.py b/selftests/functional/test_requirements_cache.py index 2de9b808c4..98fd59d54f 100644 --- a/selftests/functional/test_requirements_cache.py +++ b/selftests/functional/test_requirements_cache.py @@ -9,6 +9,14 @@ 'cd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e', 'package', 'bash'), + ('podman', + 'cd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e', + 'package', + 'hello'), + ('podman', + 'ad34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e', + 'package', + 'hello'), ('podman', 'cd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e', 'core', @@ -16,7 +24,12 @@ ('local', 'localhost.localdomain', 'core', - 'avocado') + 'avocado'), + ('podman', + 'pd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e', + 'package', + 'foo', + 0), ] @@ -27,13 +40,53 @@ def test_entries(self): 'avocado.core.dependencies.requirements.cache.backends.sqlite.CACHE_DATABASE_PATH', os.path.join(self.tmpdir.name, 'requirements.sqlite')): - for entry in ENTRIES: + for entry in ENTRIES[:-1]: cache.set_requirement(*entry) - self.assertTrue(cache.get_requirement(*entry)) + self.assertTrue(cache.is_requirement_in_cache(*entry)) + entry = ENTRIES[-1] + cache.set_requirement(*entry) + self.assertIsNone(cache.is_requirement_in_cache(entry[0], + entry[1], + entry[2], + entry[3])) + self.assertFalse(cache.is_requirement_in_cache('local', + 'localhost.localdomain', + 'package', + 'foo')) def test_empty(self): with unittest.mock.patch( 'avocado.core.dependencies.requirements.cache.backends.sqlite.CACHE_DATABASE_PATH', os.path.join(self.tmpdir.name, 'requirements.sqlite')): - self.assertFalse(cache.get_requirement(*ENTRIES[0])) + self.assertFalse(cache.is_requirement_in_cache(*ENTRIES[0])) + + def test_is_environment_prepared(self): + with unittest.mock.patch( + 'avocado.core.dependencies.requirements.cache.backends.sqlite.CACHE_DATABASE_PATH', + os.path.join(self.tmpdir.name, + 'requirements.sqlite')): + for entry in ENTRIES: + cache.set_requirement(*entry) + self.assertFalse(cache.is_environment_prepared( + "pd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e")) + self.assertTrue(cache.is_environment_prepared( + "cd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e")) + + def test_get_all_environments_with_requirement(self): + with unittest.mock.patch( + 'avocado.core.dependencies.requirements.cache.backends.sqlite.CACHE_DATABASE_PATH', + os.path.join(self.tmpdir.name, + 'requirements.sqlite')): + for entry in ENTRIES: + cache.set_requirement(*entry) + all_requirements = cache.get_all_environments_with_requirement( + 'podman', 'package', 'hello') + expected_data = { + 'ad34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e': + [('package', 'hello')], + 'cd34d13b2980d0a9d438f754b2e94f85443076d0ebe1b0db09a0439f35feca5e': + [('core', 'avocado'), + ('package', 'bash'), + ('package', 'hello')]} + self.assertEqual(all_requirements, expected_data) diff --git a/selftests/unit/test_task_runtime.py b/selftests/unit/test_task_runtime.py index b1042d5a67..6f0fc03b65 100644 --- a/selftests/unit/test_task_runtime.py +++ b/selftests/unit/test_task_runtime.py @@ -76,7 +76,9 @@ def test_one_dependency(self): self.assertTrue( runtime_tests[2].task.identifier.name.endswith("test_b")) self.assertTrue( - runtime_tests[3].task.identifier.name.endswith("test_c")) + runtime_tests[3].task.identifier.name.endswith("hello")) + self.assertTrue( + runtime_tests[4].task.identifier.name.endswith("test_c")) def test_multiple_dependencies(self): with script.Script(os.path.join(self.tmpdir.name, @@ -94,6 +96,8 @@ def test_multiple_dependencies(self): self.assertTrue( runtime_tests[2].task.identifier.name.endswith("test_b")) self.assertTrue( - runtime_tests[3].task.identifier.name.endswith("-foo-bar-")) + runtime_tests[3].task.identifier.name.endswith("hello")) self.assertTrue( - runtime_tests[4].task.identifier.name.endswith("test_c")) + runtime_tests[4].task.identifier.name.endswith("-foo-bar-")) + self.assertTrue( + runtime_tests[5].task.identifier.name.endswith("test_c"))