From 1537e73a89230fb9fe65ab78e5e533c70348b3b0 Mon Sep 17 00:00:00 2001 From: Sanniti Pimpley Date: Wed, 3 Apr 2024 17:55:07 -0400 Subject: [PATCH] fix(robot-server): maintain correct order of protocol analyses (#14762) Closes AUTH-229 # Overview Updates the `/protocols` endpoints to always maintain the order of list of analyses as most-recently-started-analysis last, making sure to verify if a new analysis needs to be triggered because of new run-time-parameter values for a previously uploaded protocol. # Risk assessment Medium. Does database update and fixes the analysis order that was broken by #14688 --------- Co-authored-by: Max Marrone --- api/src/opentrons/protocol_engine/types.py | 1 + .../persistence/_migrations/v3_to_v4.py | 52 +++ .../persistence/persistence_directory.py | 7 +- .../persistence/tables/__init__.py | 2 +- .../persistence/tables/schema_4.py | 130 +++++++ .../robot_server/protocols/analysis_models.py | 9 +- .../robot_server/protocols/analysis_store.py | 103 ++++- .../protocols/completed_analysis_store.py | 82 +++- robot-server/robot_server/protocols/router.py | 64 +++- .../http_api/persistence/test_reset.py | 6 +- .../protocols/test_analyses.tavern.yaml | 23 -- ...lyses_with_run_time_parameters.tavern.yaml | 180 +++++++++ .../http_api/protocols/test_key.tavern.yaml | 2 + .../http_api/protocols/test_persistence.py | 4 +- ...basic_transfer_with_run_time_parameters.py | 57 +++ robot-server/tests/persistence/test_tables.py | 69 +++- .../tests/protocols/test_analysis_store.py | 203 +++++++++- .../test_completed_analysis_store.py | 51 ++- .../tests/protocols/test_protocols_router.py | 359 +++++++++++++++++- 19 files changed, 1331 insertions(+), 73 deletions(-) create mode 100644 robot-server/robot_server/persistence/_migrations/v3_to_v4.py create mode 100644 robot-server/robot_server/persistence/tables/schema_4.py create mode 100644 robot-server/tests/integration/http_api/protocols/test_analyses_with_run_time_parameters.tavern.yaml create mode 100644 robot-server/tests/integration/protocols/basic_transfer_with_run_time_parameters.py diff --git a/api/src/opentrons/protocol_engine/types.py b/api/src/opentrons/protocol_engine/types.py index 266dc6aa81f..3d833a65042 100644 --- a/api/src/opentrons/protocol_engine/types.py +++ b/api/src/opentrons/protocol_engine/types.py @@ -847,6 +847,7 @@ def from_hw_state(cls, state: HwTipStateType) -> "TipPresenceStatus": }[state] +# TODO (spp, 2024-04-02): move all RTP types to runner class RTPBase(BaseModel): """Parameters defined in a protocol.""" diff --git a/robot-server/robot_server/persistence/_migrations/v3_to_v4.py b/robot-server/robot_server/persistence/_migrations/v3_to_v4.py new file mode 100644 index 00000000000..8b4445aaec3 --- /dev/null +++ b/robot-server/robot_server/persistence/_migrations/v3_to_v4.py @@ -0,0 +1,52 @@ +"""Migrate the persistence directory from schema 3 to 4. + +Summary of changes from schema 3: + +- Adds a new "run_time_parameter_values_and_defaults" column to analysis table +""" + +from pathlib import Path +from contextlib import ExitStack +import shutil +from typing import Any + +import sqlalchemy + +from ..database import sql_engine_ctx +from ..tables import schema_4 +from .._folder_migrator import Migration + +_DB_FILE = "robot_server.db" + + +class Migration3to4(Migration): # noqa: D101 + def migrate(self, source_dir: Path, dest_dir: Path) -> None: + """Migrate the persistence directory from schema 3 to 4.""" + # Copy over all existing directories and files to new version + for item in source_dir.iterdir(): + if item.is_dir(): + shutil.copytree(src=item, dst=dest_dir / item.name) + else: + shutil.copy(src=item, dst=dest_dir / item.name) + dest_db_file = dest_dir / _DB_FILE + + # Append the new column to existing analyses in v4 database + with ExitStack() as exit_stack: + dest_engine = exit_stack.enter_context(sql_engine_ctx(dest_db_file)) + schema_4.metadata.create_all(dest_engine) + + def add_column( + engine: sqlalchemy.engine.Engine, + table_name: str, + column: Any, + ) -> None: + column_type = column.type.compile(engine.dialect) + engine.execute( + f"ALTER TABLE {table_name} ADD COLUMN {column.key} {column_type}" + ) + + add_column( + dest_engine, + schema_4.analysis_table.name, + schema_4.analysis_table.c.run_time_parameter_values_and_defaults, + ) diff --git a/robot-server/robot_server/persistence/persistence_directory.py b/robot-server/robot_server/persistence/persistence_directory.py index 666d5c7998f..b7982b38555 100644 --- a/robot-server/robot_server/persistence/persistence_directory.py +++ b/robot-server/robot_server/persistence/persistence_directory.py @@ -11,7 +11,7 @@ from anyio import Path as AsyncPath, to_thread from ._folder_migrator import MigrationOrchestrator -from ._migrations import up_to_3 +from ._migrations import up_to_3, v3_to_v4 _TEMP_PERSISTENCE_DIR_PREFIX: Final = "opentrons-robot-server-" @@ -48,7 +48,10 @@ async def prepare_active_subdirectory(prepared_root: Path) -> Path: """Return the active persistence subdirectory after preparing it, if necessary.""" migration_orchestrator = MigrationOrchestrator( root=prepared_root, - migrations=[up_to_3.MigrationUpTo3(subdirectory="3")], + migrations=[ + up_to_3.MigrationUpTo3(subdirectory="3"), + v3_to_v4.Migration3to4(subdirectory="4"), + ], temp_file_prefix="temp-", ) diff --git a/robot-server/robot_server/persistence/tables/__init__.py b/robot-server/robot_server/persistence/tables/__init__.py index 97262e73fab..0aaf869fb35 100644 --- a/robot-server/robot_server/persistence/tables/__init__.py +++ b/robot-server/robot_server/persistence/tables/__init__.py @@ -1,7 +1,7 @@ """SQL database schemas.""" # Re-export the latest schema. -from .schema_3 import ( +from .schema_4 import ( metadata, protocol_table, analysis_table, diff --git a/robot-server/robot_server/persistence/tables/schema_4.py b/robot-server/robot_server/persistence/tables/schema_4.py new file mode 100644 index 00000000000..47d29d3d8f3 --- /dev/null +++ b/robot-server/robot_server/persistence/tables/schema_4.py @@ -0,0 +1,130 @@ +"""v4 of our SQLite schema.""" + +import sqlalchemy + +from robot_server.persistence._utc_datetime import UTCDateTime + +metadata = sqlalchemy.MetaData() + +protocol_table = sqlalchemy.Table( + "protocol", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "created_at", + UTCDateTime, + nullable=False, + ), + sqlalchemy.Column("protocol_key", sqlalchemy.String, nullable=True), +) + +analysis_table = sqlalchemy.Table( + "analysis", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "protocol_id", + sqlalchemy.String, + sqlalchemy.ForeignKey("protocol.id"), + index=True, + nullable=False, + ), + sqlalchemy.Column( + "analyzer_version", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "completed_analysis", + # Stores a JSON string. See CompletedAnalysisStore. + sqlalchemy.String, + nullable=False, + ), + # column added in schema v4 + sqlalchemy.Column( + "run_time_parameter_values_and_defaults", + sqlalchemy.String, + nullable=True, + ), +) + +run_table = sqlalchemy.Table( + "run", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "created_at", + UTCDateTime, + nullable=False, + ), + sqlalchemy.Column( + "protocol_id", + sqlalchemy.String, + sqlalchemy.ForeignKey("protocol.id"), + nullable=True, + ), + # column added in schema v1 + sqlalchemy.Column( + "state_summary", + sqlalchemy.String, + nullable=True, + ), + # column added in schema v1 + sqlalchemy.Column("engine_status", sqlalchemy.String, nullable=True), + # column added in schema v1 + sqlalchemy.Column("_updated_at", UTCDateTime, nullable=True), +) + +action_table = sqlalchemy.Table( + "action", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column("created_at", UTCDateTime, nullable=False), + sqlalchemy.Column("action_type", sqlalchemy.String, nullable=False), + sqlalchemy.Column( + "run_id", + sqlalchemy.String, + sqlalchemy.ForeignKey("run.id"), + nullable=False, + ), +) + +run_command_table = sqlalchemy.Table( + "run_command", + metadata, + sqlalchemy.Column("row_id", sqlalchemy.Integer, primary_key=True), + sqlalchemy.Column( + "run_id", sqlalchemy.String, sqlalchemy.ForeignKey("run.id"), nullable=False + ), + sqlalchemy.Column("index_in_run", sqlalchemy.Integer, nullable=False), + sqlalchemy.Column("command_id", sqlalchemy.String, nullable=False), + sqlalchemy.Column("command", sqlalchemy.String, nullable=False), + sqlalchemy.Index( + "ix_run_run_id_command_id", # An arbitrary name for the index. + "run_id", + "command_id", + unique=True, + ), + sqlalchemy.Index( + "ix_run_run_id_index_in_run", # An arbitrary name for the index. + "run_id", + "index_in_run", + unique=True, + ), +) diff --git a/robot-server/robot_server/protocols/analysis_models.py b/robot-server/robot_server/protocols/analysis_models.py index 0a3c64c9db0..c5827e577da 100644 --- a/robot-server/robot_server/protocols/analysis_models.py +++ b/robot-server/robot_server/protocols/analysis_models.py @@ -5,7 +5,7 @@ from opentrons.protocol_engine.types import RunTimeParameter from opentrons_shared_data.robot.dev_types import RobotType from pydantic import BaseModel, Field -from typing import List, Optional, Union +from typing import List, Optional, Union, NamedTuple from typing_extensions import Literal from opentrons.protocol_engine import ( @@ -150,4 +150,11 @@ class CompletedAnalysis(BaseModel): ) +class RunTimeParameterAnalysisData(NamedTuple): + """Data from analysis of a run-time parameter.""" + + value: Union[float, bool, str] + default: Union[float, bool, str] + + ProtocolAnalysis = Union[PendingAnalysis, CompletedAnalysis] diff --git a/robot-server/robot_server/protocols/analysis_store.py b/robot-server/robot_server/protocols/analysis_store.py index d8ce780f98d..b0ea474ec07 100644 --- a/robot-server/robot_server/protocols/analysis_store.py +++ b/robot-server/robot_server/protocols/analysis_store.py @@ -19,6 +19,7 @@ LoadedModule, Liquid, ) +from opentrons.protocol_engine.types import RunTimeParamValuesType from .analysis_models import ( AnalysisSummary, @@ -27,6 +28,7 @@ CompletedAnalysis, AnalysisResult, AnalysisStatus, + RunTimeParameterAnalysisData, ) from .completed_analysis_store import CompletedAnalysisStore, CompletedAnalysisResource @@ -71,6 +73,14 @@ def __init__(self, analysis_id: str) -> None: super().__init__(f'Analysis "{analysis_id}" not found.') +class AnalysisIsPendingError(RuntimeError): + """Exception raised if a given analysis is still pending.""" + + def __init__(self, analysis_id: str) -> None: + """Initialize the error's message.""" + super().__init__(f'Analysis "{analysis_id}" is still pending.') + + # TODO(sf, 2023-05-05): Like for protocols and runs, there's an in-memory cache for # elements of this store. Unlike for protocols and runs, it isn't just an lru_cache # on the top-level store's access methods, because those access methods have to be @@ -93,10 +103,14 @@ class AnalysisStore: so they're only kept in-memory, and lost when the store instance is destroyed. """ - def __init__(self, sql_engine: sqlalchemy.engine.Engine) -> None: + def __init__( + self, + sql_engine: sqlalchemy.engine.Engine, + completed_store: Optional[CompletedAnalysisStore] = None, + ) -> None: """Initialize the `AnalysisStore`.""" self._pending_store = _PendingAnalysisStore() - self._completed_store = CompletedAnalysisStore( + self._completed_store = completed_store or CompletedAnalysisStore( sql_engine=sql_engine, memory_cache=MemoryCache(_CACHE_MAX_SIZE, str, CompletedAnalysisResource), current_analyzer_version=_CURRENT_ANALYZER_VERSION, @@ -180,6 +194,9 @@ async def update( protocol_id=protocol_id, analyzer_version=_CURRENT_ANALYZER_VERSION, completed_analysis=completed_analysis, + run_time_parameter_values_and_defaults=self._extract_run_time_param_values_and_defaults( + completed_analysis + ), ) await self._completed_store.add( completed_analysis_resource=completed_analysis_resource @@ -258,6 +275,88 @@ async def get_by_protocol(self, protocol_id: str) -> List[ProtocolAnalysis]: else: return completed_analyses + [pending_analysis] + @staticmethod + def _extract_run_time_param_values_and_defaults( + completed_analysis: CompletedAnalysis, + ) -> Dict[str, RunTimeParameterAnalysisData]: + """Extract the Run Time Parameters with current value and default value of each. + + We do this in order to save the RTP data separately, outside the analysis + in the database. This saves us from having to de-serialize the entire analysis + to read just the RTP values. + """ + rtp_list = completed_analysis.runTimeParameters + + rtp_values_and_defaults = {} + for param_spec in rtp_list: + rtp_values_and_defaults.update( + { + param_spec.variableName: RunTimeParameterAnalysisData( + value=param_spec.value, default=param_spec.default + ) + } + ) + return rtp_values_and_defaults + + async def matching_rtp_values_in_analysis( + self, analysis_summary: AnalysisSummary, new_rtp_values: RunTimeParamValuesType + ) -> bool: + """Return whether the last analysis of the given protocol used the mentioned RTP values. + + It is not sufficient to just check the values of provided parameters against the + corresponding parameter values in analysis because a previous request could have + composed of some extra parameters that are not in the current list. + + Similarly, it is not enough to only compare the current parameter values from + the client with the previous values from the client because a previous param + might have been assigned a default value by the client while the current request + doesn't include that param because it can rely on the API to assign the default + value to that param. + + So, we check that the Run Time Parameters in the previous analysis has params + with the values provided in the current request, and also verify that rest of the + parameters in the analysis use default values. + """ + if analysis_summary.status == AnalysisStatus.PENDING: + raise AnalysisIsPendingError(analysis_summary.id) + + rtp_values_and_defaults_in_last_analysis = ( + await self._completed_store.get_rtp_values_and_defaults_by_analysis_id( + analysis_summary.id + ) + ) + # We already make sure that the protocol has an analysis associated with before + # checking the RTP values so this assert should never raise. + # It is only added for type checking. + assert ( + rtp_values_and_defaults_in_last_analysis is not None + ), "This protocol has no analysis associated with it." + + if not set(new_rtp_values.keys()).issubset( + set(rtp_values_and_defaults_in_last_analysis.keys()) + ): + # Since the RTP keys in analysis represent all params defined in the protocol, + # if the client passes a parameter that's not present in the analysis, + # it means that the client is sending incorrect parameters. + # We will let this request trigger an analysis using the incorrect params + # and have the analysis raise an appropriate error instead of giving an + # error response to the protocols request. + # This makes the behavior of robot server consistent regardless of whether + # the client is sending a protocol for the first time or for the nth time. + return False + for ( + parameter, + prev_value_and_default, + ) in rtp_values_and_defaults_in_last_analysis.items(): + if ( + new_rtp_values.get(parameter, prev_value_and_default.default) + == prev_value_and_default.value + ): + continue + else: + return False + return True + class _PendingAnalysisStore: """An in-memory store of protocol analyses that are pending. diff --git a/robot-server/robot_server/protocols/completed_analysis_store.py b/robot-server/robot_server/protocols/completed_analysis_store.py index f4c696d0519..58017e4398a 100644 --- a/robot-server/robot_server/protocols/completed_analysis_store.py +++ b/robot-server/robot_server/protocols/completed_analysis_store.py @@ -2,18 +2,20 @@ from __future__ import annotations import asyncio +import json from typing import Dict, List, Optional from logging import getLogger from dataclasses import dataclass import sqlalchemy import anyio +from pydantic import parse_raw_as from robot_server.persistence.database import sqlite_rowid from robot_server.persistence.tables import analysis_table from robot_server.persistence.pydantic import json_to_pydantic, pydantic_to_json -from .analysis_models import CompletedAnalysis +from .analysis_models import CompletedAnalysis, RunTimeParameterAnalysisData from .analysis_memcache import MemoryCache @@ -31,6 +33,7 @@ class CompletedAnalysisResource: protocol_id: str analyzer_version: str completed_analysis: CompletedAnalysis + run_time_parameter_values_and_defaults: Dict[str, RunTimeParameterAnalysisData] async def to_sql_values(self) -> Dict[str, object]: """Return this data as a dict that can be passed to a SQLALchemy insert. @@ -46,18 +49,25 @@ async def to_sql_values(self) -> Dict[str, object]: def serialize_completed_analysis() -> str: return pydantic_to_json(self.completed_analysis) - serialized_json = await anyio.to_thread.run_sync( + def serialize_rtp_dict() -> str: + return json.dumps(self.run_time_parameter_values_and_defaults) + + serialized_analysis = await anyio.to_thread.run_sync( serialize_completed_analysis, # Cancellation may orphan the worker thread, # but that should be harmless in this case. cancellable=True, ) - + serialized_rtp_dict = await anyio.to_thread.run_sync( + serialize_rtp_dict, + cancellable=True, + ) return { "id": self.id, "protocol_id": self.protocol_id, "analyzer_version": self.analyzer_version, - "completed_analysis": serialized_json, + "completed_analysis": serialized_analysis, + "run_time_parameter_values_and_defaults": serialized_rtp_dict, } @classmethod @@ -94,12 +104,40 @@ def parse_completed_analysis() -> CompletedAnalysis: # but that should be harmless in this case. cancellable=True, ) - + rtp_values_and_defaults = await cls.get_run_time_parameter_values_and_defaults( + sql_row + ) return cls( id=id, protocol_id=protocol_id, analyzer_version=analyzer_version, completed_analysis=completed_analysis, + run_time_parameter_values_and_defaults=rtp_values_and_defaults, + ) + + @classmethod + async def get_run_time_parameter_values_and_defaults( + cls, sql_row: sqlalchemy.engine.Row + ) -> Dict[str, RunTimeParameterAnalysisData]: + """Get the run-time parameters used in the analysis with their values & defaults.""" + + def parse_rtp_dict() -> Dict[str, RunTimeParameterAnalysisData]: + rtp_contents = sql_row.run_time_parameter_values_and_defaults + return ( + parse_raw_as( + Dict[str, RunTimeParameterAnalysisData], + sql_row.run_time_parameter_values_and_defaults, + ) + if rtp_contents + else {} + ) + + # In most cases, this parsing should be quite quick but theoretically + # there could be an unexpectedly large number of run time params. + # So we delegate the parsing of this to a cancellable thread as well. + return await anyio.to_thread.run_sync( + parse_rtp_dict, + cancellable=True, ) @@ -185,6 +223,40 @@ async def get_by_id_as_document(self, analysis_id: str) -> Optional[str]: return document + async def get_rtp_values_and_defaults_by_analysis_id( + self, analysis_id: str + ) -> Optional[Dict[str, RunTimeParameterAnalysisData]]: + """Return the dictionary of run time parameter values & defaults used in the given analysis. + + If the analysis ID doesn't exist, return None. + These RTP values are not cached in memory by themselves since we don't anticipate + that fetching the values from the database to be a time-consuming operation. + """ + async with self._memcache_lock: + try: + analysis = self._memcache.get(analysis_id) + except KeyError: + pass + else: + return analysis.run_time_parameter_values_and_defaults + + statement = sqlalchemy.select(analysis_table).where( + analysis_table.c.id == analysis_id + ) + with self._sql_engine.begin() as transaction: + try: + result = transaction.execute(statement).one() + except sqlalchemy.exc.NoResultFound: + # Since we just no-op when fetching non-existent analysis, + # do the same for non-existent RTP data + return None + + rtp_values_and_defaults = await CompletedAnalysisResource.get_run_time_parameter_values_and_defaults( + result + ) + + return rtp_values_and_defaults + async def get_by_protocol( self, protocol_id: str ) -> List[CompletedAnalysisResource]: diff --git a/robot-server/robot_server/protocols/router.py b/robot-server/robot_server/protocols/router.py index fb72c938def..8ae9365de36 100644 --- a/robot-server/robot_server/protocols/router.py +++ b/robot-server/robot_server/protocols/router.py @@ -37,7 +37,7 @@ from .protocol_auto_deleter import ProtocolAutoDeleter from .protocol_models import Protocol, ProtocolFile, Metadata from .protocol_analyzer import ProtocolAnalyzer -from .analysis_store import AnalysisStore, AnalysisNotFoundError +from .analysis_store import AnalysisStore, AnalysisNotFoundError, AnalysisIsPendingError from .analysis_models import ProtocolAnalysis from .protocol_store import ( ProtocolStore, @@ -74,6 +74,13 @@ class AnalysisNotFound(ErrorDetails): title: str = "Protocol Analysis Not Found" +class LastAnalysisPending(ErrorDetails): + """An error returned when the most recent analysis of a protocol is still pending.""" + + id: Literal["LastAnalysisPending"] = "LastAnalysisPending" + title: str = "Last Analysis Still Pending." + + class ProtocolFilesInvalid(ErrorDetails): """An error returned when an uploaded protocol files are invalid.""" @@ -140,7 +147,9 @@ class ProtocolLinks(BaseModel): resource will be returned instead of creating duplicate ones. When a new protocol resource is created, an analysis is started for it. - See the `/protocols/{id}/analyses/` endpoints. + A new analysis is also started if the same protocol file is uploaded but with + a different set of run-time parameter values than the most recent request. + See the `/protocols/{id}/analyses/` endpoints for more details. """ ), status_code=status.HTTP_201_CREATED, @@ -150,9 +159,10 @@ class ProtocolLinks(BaseModel): status.HTTP_422_UNPROCESSABLE_ENTITY: { "model": ErrorBody[Union[ProtocolFilesInvalid, ProtocolRobotTypeMismatch]] }, + status.HTTP_503_SERVICE_UNAVAILABLE: {"model": ErrorBody[LastAnalysisPending]}, }, ) -async def create_protocol( +async def create_protocol( # noqa: C901 files: List[UploadFile] = File(...), # use Form because request is multipart/form-data # https://fastapi.tiangolo.com/tutorial/request-forms-and-files/ @@ -214,7 +224,6 @@ async def create_protocol( # TODO(mm, 2024-02-07): Investigate whether the filename can actually be None. assert file.filename is not None buffered_files = await file_reader_writer.read(files=files) # type: ignore[arg-type] - if isinstance(run_time_parameter_values, str): # We have to do this isinstance check because if `runTimeParameterValues` is # not specified in the request, then it gets assigned a Form(None) value @@ -223,29 +232,46 @@ async def create_protocol( # so we can validate the data contents and return a better error response. parsed_rtp = json.loads(run_time_parameter_values) else: - parsed_rtp = None + parsed_rtp = {} content_hash = await file_hasher.hash(buffered_files) cached_protocol_id = protocol_store.get_id_by_hash(content_hash) if cached_protocol_id is not None: - # Protocol exists in database resource = protocol_store.get(protocol_id=cached_protocol_id) - if parsed_rtp: - # This protocol exists in database but needs to be re-analyzed with the - # passed-in RTP overrides - task_runner.run( - protocol_analyzer.analyze, - protocol_resource=resource, - analysis_id=analysis_id, - run_time_param_values=parsed_rtp, - ) - analysis_store.add_pending( - protocol_id=cached_protocol_id, - analysis_id=analysis_id, - ) analyses = analysis_store.get_summaries_by_protocol( protocol_id=cached_protocol_id ) + + try: + if ( + # Unexpected situations, like powering off the robot after a protocol upload + # but before the analysis is complete, can leave the protocol resource + # without an associated analysis. + len(analyses) == 0 + or + # The most recent analysis was done using different RTP values + not await analysis_store.matching_rtp_values_in_analysis( + analysis_summary=analyses[-1], new_rtp_values=parsed_rtp + ) + ): + # This protocol exists in database but needs to be (re)analyzed + task_runner.run( + protocol_analyzer.analyze, + protocol_resource=resource, + analysis_id=analysis_id, + run_time_param_values=parsed_rtp, + ) + analyses.append( + analysis_store.add_pending( + protocol_id=cached_protocol_id, + analysis_id=analysis_id, + ) + ) + except AnalysisIsPendingError as error: + raise LastAnalysisPending(detail=str(error)).as_error( + status.HTTP_503_SERVICE_UNAVAILABLE + ) from error + data = Protocol.construct( id=cached_protocol_id, createdAt=resource.created_at, diff --git a/robot-server/tests/integration/http_api/persistence/test_reset.py b/robot-server/tests/integration/http_api/persistence/test_reset.py index c9973713802..394671bba64 100644 --- a/robot-server/tests/integration/http_api/persistence/test_reset.py +++ b/robot-server/tests/integration/http_api/persistence/test_reset.py @@ -40,9 +40,9 @@ async def _assert_reset_was_successful( all_files_and_directories = set(persistence_directory.glob("**/*")) expected_files_and_directories = { persistence_directory / "robot_server.db", - persistence_directory / "3", - persistence_directory / "3" / "protocols", - persistence_directory / "3" / "robot_server.db", + persistence_directory / "4", + persistence_directory / "4" / "protocols", + persistence_directory / "4" / "robot_server.db", } assert all_files_and_directories == expected_files_and_directories diff --git a/robot-server/tests/integration/http_api/protocols/test_analyses.tavern.yaml b/robot-server/tests/integration/http_api/protocols/test_analyses.tavern.yaml index 3634989ed3f..a756ea10e1b 100644 --- a/robot-server/tests/integration/http_api/protocols/test_analyses.tavern.yaml +++ b/robot-server/tests/integration/http_api/protocols/test_analyses.tavern.yaml @@ -84,26 +84,3 @@ stages: # We need to make sure we get the Content-Type right because FastAPI won't do it for us. Content-Type: application/json json: !force_format_include '{analysis_data}' - - - name: Check that uploading the same protocol with run-time parameter values triggers re-analysis - # This test must be executed after the analysis of the previous upload is completed. - request: - url: '{ot2_server_base_url}/protocols' - method: POST - data: - runTimeParameterValues: '{{"volume": 123, "dry_run": true, "pipette": "p10_single"}}' - files: - files: 'tests/integration/protocols/basic_transfer_standalone.py' - response: - strict: - - json:off - status_code: 200 - json: - data: - id: '{protocol_id}' - analyses: [] - analysisSummaries: - - id: '{analysis_id}' - status: completed - - id: !anystr - status: pending diff --git a/robot-server/tests/integration/http_api/protocols/test_analyses_with_run_time_parameters.tavern.yaml b/robot-server/tests/integration/http_api/protocols/test_analyses_with_run_time_parameters.tavern.yaml new file mode 100644 index 00000000000..3ad017a546d --- /dev/null +++ b/robot-server/tests/integration/http_api/protocols/test_analyses_with_run_time_parameters.tavern.yaml @@ -0,0 +1,180 @@ +test_name: Test the protocol analysis endpoints with run time parameters + +marks: + - usefixtures: + - ot2_server_base_url + +stages: + - name: Upload a protocol + request: + url: '{ot2_server_base_url}/protocols' + method: POST + files: + files: 'tests/integration/protocols/basic_transfer_with_run_time_parameters.py' + response: + save: + json: + protocol_id: data.id + analysis_id: data.analysisSummaries[0].id + strict: + - json:off + status_code: 201 + json: + data: + analyses: [] + analysisSummaries: + - id: !anystr + status: pending + + - name: Check that the analysis summary is present in /protocols/:id; retry until it says it's completed + max_retries: 5 + delay_after: 1 + request: + url: '{ot2_server_base_url}/protocols/{protocol_id}' + response: + status_code: 200 + json: + data: + analyses: [] + analysisSummaries: + - id: '{analysis_id}' + status: completed + id: !anything + protocolType: !anything + files: !anything + createdAt: !anything + robotType: !anything + metadata: !anything + links: !anything + + - name: Check that the analysis data is present in /protocols/:id/analyses/:id + request: + url: '{ot2_server_base_url}/protocols/{protocol_id}/analyses/{analysis_id}' + response: + strict: + - json:off + json: + data: + id: '{analysis_id}' + runTimeParameters: + - displayName: Sample count + variableName: sample_count + type: int + default: 6.0 + min: 1.0 + max: 12.0 + value: 6.0 + description: How many samples to process. + - displayName: Pipette volume + variableName: volume + type: float + default: 20.1 + choices: + - displayName: Low Volume + value: 10.23 + - displayName: Medium Volume + value: 20.1 + - displayName: High Volume + value: 50.5 + value: 20.1 + description: How many microliters to pipette of each sample. + - displayName: Dry Run + variableName: dry_run + type: bool + default: false + value: false + description: Skip aspirate and dispense steps. + - displayName: Pipette Name + variableName: pipette + type: str + choices: + - displayName: Single channel 50µL + value: flex_1channel_50 + - displayName: Eight Channel 50µL + value: flex_8channel_50 + default: flex_1channel_50 + value: flex_1channel_50 + description: What pipette to use during the protocol. + commands: + # Check for this command's presence as a smoke test that the analysis isn't empty. + - commandType: loadPipette + + - name: Check that uploading same protocol with new run time parameter values re-triggers analysis + # This test must be executed after the analysis of the previous upload is completed. + request: + url: '{ot2_server_base_url}/protocols' + method: POST + data: + runTimeParameterValues: '{{"sample_count": 10, "volume": 10.23, "dry_run": true}}' + files: + files: 'tests/integration/protocols/basic_transfer_with_run_time_parameters.py' + response: + save: + json: + analysis_id2: data.analysisSummaries[1].id + strict: + - json:off + status_code: 200 + json: + data: + id: '{protocol_id}' + analyses: [ ] + analysisSummaries: + - id: '{analysis_id}' + status: completed + - id: !anystr + status: pending + + - name: Check that the new analysis uses run time parameter values from client; retry until analysis is completed + max_retries: 5 + delay_after: 1 + request: + url: '{ot2_server_base_url}/protocols/{protocol_id}/analyses/{analysis_id2}' + response: + strict: + - json:off + json: + data: + id: '{analysis_id2}' + runTimeParameters: + - displayName: Sample count + variableName: sample_count + type: int + default: 6.0 + min: 1.0 + max: 12.0 + value: 10.0 + description: How many samples to process. + - displayName: Pipette volume + variableName: volume + type: float + default: 20.1 + choices: + - displayName: Low Volume + value: 10.23 + - displayName: Medium Volume + value: 20.1 + - displayName: High Volume + value: 50.5 + value: 10.23 + description: How many microliters to pipette of each sample. + - displayName: Dry Run + variableName: dry_run + type: bool + default: false + value: true + description: Skip aspirate and dispense steps. + - displayName: Pipette Name + variableName: pipette + type: str + choices: + - displayName: Single channel 50µL + value: flex_1channel_50 + - displayName: Eight Channel 50µL + value: flex_8channel_50 + default: flex_1channel_50 + value: flex_1channel_50 + description: What pipette to use during the protocol. + commands: + # Check for this command's presence as a smoke test that the analysis isn't empty. + - commandType: loadPipette \ No newline at end of file diff --git a/robot-server/tests/integration/http_api/protocols/test_key.tavern.yaml b/robot-server/tests/integration/http_api/protocols/test_key.tavern.yaml index 7d0f4361cb3..7729ee15fa5 100644 --- a/robot-server/tests/integration/http_api/protocols/test_key.tavern.yaml +++ b/robot-server/tests/integration/http_api/protocols/test_key.tavern.yaml @@ -169,6 +169,8 @@ stages: author: engineer@opentrons.com key: duplicate_key - name: Upload basic_transfer_standalone protocol with same key + # add a delay before starting to let previous analysis complete + delay_before: 2 request: url: '{ot2_server_base_url}/protocols' method: POST diff --git a/robot-server/tests/integration/http_api/protocols/test_persistence.py b/robot-server/tests/integration/http_api/protocols/test_persistence.py index a939f5f5fda..0480accb39c 100644 --- a/robot-server/tests/integration/http_api/protocols/test_persistence.py +++ b/robot-server/tests/integration/http_api/protocols/test_persistence.py @@ -120,10 +120,10 @@ async def test_protocol_labware_files_persist() -> None: assert restarted_protocol_detail == protocol_detail four_tuberack = Path( - f"{server.persistence_directory}/3/protocols/{protocol_id}/cpx_4_tuberack_100ul.json" + f"{server.persistence_directory}/4/protocols/{protocol_id}/cpx_4_tuberack_100ul.json" ) six_tuberack = Path( - f"{server.persistence_directory}/3/protocols/{protocol_id}/cpx_6_tuberack_100ul.json" + f"{server.persistence_directory}/4/protocols/{protocol_id}/cpx_6_tuberack_100ul.json" ) assert four_tuberack.is_file() assert six_tuberack.is_file() diff --git a/robot-server/tests/integration/protocols/basic_transfer_with_run_time_parameters.py b/robot-server/tests/integration/protocols/basic_transfer_with_run_time_parameters.py new file mode 100644 index 00000000000..7fe90c65d8c --- /dev/null +++ b/robot-server/tests/integration/protocols/basic_transfer_with_run_time_parameters.py @@ -0,0 +1,57 @@ +from opentrons.protocol_api import ProtocolContext, ParameterContext + +metadata = { + "apiLevel": "2.18", + "author": "engineer@opentrons.com", + "protocolName": "basic_transfer_standalone", +} + + +def add_parameters(parameters: ParameterContext): + parameters.add_int( + display_name="Sample count", + variable_name="sample_count", + default=6, + minimum=1, + maximum=12, + description="How many samples to process.", + ) + parameters.add_float( + display_name="Pipette volume", + variable_name="volume", + default=20.1, + choices=[ + {"display_name": "Low Volume", "value": 10.23}, + {"display_name": "Medium Volume", "value": 20.1}, + {"display_name": "High Volume", "value": 50.5}, + ], + description="How many microliters to pipette of each sample.", + unit="µL", # Unit is not wired up, and it doesn't raise errors either. + ) + parameters.add_bool( + display_name="Dry Run", + variable_name="dry_run", + default=False, + description="Skip aspirate and dispense steps.", + ) + parameters.add_str( + display_name="Pipette Name", + variable_name="pipette", + choices=[ + {"display_name": "Single channel 50µL", "value": "flex_1channel_50"}, + {"display_name": "Eight Channel 50µL", "value": "flex_8channel_50"}, + ], + default="flex_1channel_50", + description="What pipette to use during the protocol.", + ) + + +def run(protocol: ProtocolContext) -> None: + plate = protocol.load_labware("corning_96_wellplate_360ul_flat", 1) + tiprack_1 = protocol.load_labware("opentrons_96_tiprack_300ul", 2) + p300 = protocol.load_instrument("p300_single", "right", tip_racks=[tiprack_1]) + + p300.pick_up_tip() + p300.aspirate(100, plate["A1"]) + p300.dispense(100, plate["B1"]) + p300.return_tip() diff --git a/robot-server/tests/persistence/test_tables.py b/robot-server/tests/persistence/test_tables.py index ca0bca5c2d5..eaa2824ce75 100644 --- a/robot-server/tests/persistence/test_tables.py +++ b/robot-server/tests/persistence/test_tables.py @@ -10,6 +10,7 @@ metadata as latest_metadata, schema_3, schema_2, + schema_4, ) # The statements that we expect to emit when we create a fresh database. @@ -39,6 +40,7 @@ protocol_id VARCHAR NOT NULL, analyzer_version VARCHAR NOT NULL, completed_analysis VARCHAR NOT NULL, + run_time_parameter_values_and_defaults VARCHAR, PRIMARY KEY (id), FOREIGN KEY(protocol_id) REFERENCES protocol (id) ) @@ -87,8 +89,70 @@ """, ] +EXPECTED_STATEMENTS_V4 = EXPECTED_STATEMENTS_LATEST -EXPECTED_STATEMENTS_V3 = EXPECTED_STATEMENTS_LATEST +EXPECTED_STATEMENTS_V3 = [ + """ + CREATE TABLE protocol ( + id VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + protocol_key VARCHAR, + PRIMARY KEY (id) + ) + """, + """ + CREATE TABLE analysis ( + id VARCHAR NOT NULL, + protocol_id VARCHAR NOT NULL, + analyzer_version VARCHAR NOT NULL, + completed_analysis VARCHAR NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(protocol_id) REFERENCES protocol (id) + ) + """, + """ + CREATE INDEX ix_analysis_protocol_id ON analysis (protocol_id) + """, + """ + CREATE TABLE run ( + id VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + protocol_id VARCHAR, + state_summary VARCHAR, + engine_status VARCHAR, + _updated_at DATETIME, + PRIMARY KEY (id), + FOREIGN KEY(protocol_id) REFERENCES protocol (id) + ) + """, + """ + CREATE TABLE action ( + id VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + action_type VARCHAR NOT NULL, + run_id VARCHAR NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(run_id) REFERENCES run (id) + ) + """, + """ + CREATE TABLE run_command ( + row_id INTEGER NOT NULL, + run_id VARCHAR NOT NULL, + index_in_run INTEGER NOT NULL, + command_id VARCHAR NOT NULL, + command VARCHAR NOT NULL, + PRIMARY KEY (row_id), + FOREIGN KEY(run_id) REFERENCES run (id) + ) + """, + """ + CREATE UNIQUE INDEX ix_run_run_id_command_id ON run_command (run_id, command_id) + """, + """ + CREATE UNIQUE INDEX ix_run_run_id_index_in_run ON run_command (run_id, index_in_run) + """, +] EXPECTED_STATEMENTS_V2 = [ @@ -165,6 +229,7 @@ def _normalize_statement(statement: str) -> str: ("metadata", "expected_statements"), [ (latest_metadata, EXPECTED_STATEMENTS_LATEST), + (schema_4.metadata, EXPECTED_STATEMENTS_V4), (schema_3.metadata, EXPECTED_STATEMENTS_V3), (schema_2.metadata, EXPECTED_STATEMENTS_V2), ], @@ -172,7 +237,7 @@ def _normalize_statement(statement: str) -> str: def test_creating_tables_emits_expected_statements( metadata: sqlalchemy.MetaData, expected_statements: List[str] ) -> None: - """Test that fresh databases are created with with the expected statements. + """Test that fresh databases are created with the expected statements. This is a snapshot test to help catch accidental changes to our SQL schema. diff --git a/robot-server/tests/protocols/test_analysis_store.py b/robot-server/tests/protocols/test_analysis_store.py index b9c2dcccdac..94d7f67f953 100644 --- a/robot-server/tests/protocols/test_analysis_store.py +++ b/robot-server/tests/protocols/test_analysis_store.py @@ -6,6 +6,8 @@ from typing import List, NamedTuple import pytest +from decoy import Decoy +from opentrons.protocol_engine.types import RunTimeParamValuesType from sqlalchemy.engine import Engine as SQLEngine @@ -28,10 +30,17 @@ AnalysisSummary, PendingAnalysis, CompletedAnalysis, + RunTimeParameterAnalysisData, ) from robot_server.protocols.analysis_store import ( AnalysisStore, AnalysisNotFoundError, + AnalysisIsPendingError, + _CURRENT_ANALYZER_VERSION, +) +from robot_server.protocols.completed_analysis_store import ( + CompletedAnalysisStore, + CompletedAnalysisResource, ) from robot_server.protocols.protocol_store import ( ProtocolStore, @@ -171,12 +180,20 @@ async def test_update_adds_details_and_completes_analysis( pipetteName=PipetteNameType.P300_SINGLE, mount=MountType.LEFT, ) - + run_time_param = pe_types.NumberParameter( + displayName="My parameter", + variableName="cool_param", + type="int", + min=1, + max=5, + value=2.0, + default=3.0, + ) subject.add_pending(protocol_id="protocol-id", analysis_id="analysis-id") await subject.update( analysis_id="analysis-id", robot_type="OT-2 Standard", - run_time_parameters=[], + run_time_parameters=[run_time_param], labware=[labware], pipettes=[pipette], # TODO(mm, 2022-10-21): Give the subject some commands, errors, and liquids here @@ -195,7 +212,7 @@ async def test_update_adds_details_and_completes_analysis( status=AnalysisStatus.COMPLETED, result=AnalysisResult.OK, robotType="OT-2 Standard", - runTimeParameters=[], + runTimeParameters=[run_time_param], labware=[labware], pipettes=[pipette], modules=[], @@ -209,7 +226,17 @@ async def test_update_adds_details_and_completes_analysis( "result": "ok", "status": "completed", "robotType": "OT-2 Standard", - "runTimeParameters": [], + "runTimeParameters": [ + { + "displayName": "My parameter", + "variableName": "cool_param", + "type": "int", + "min": 1, + "max": 5, + "value": 2.0, + "default": 3.0, + } + ], "labware": [ { "id": "labware-id", @@ -228,6 +255,76 @@ async def test_update_adds_details_and_completes_analysis( } +async def test_update_adds_rtp_values_and_defaults_to_completed_store( + decoy: Decoy, sql_engine: SQLEngine, protocol_store: ProtocolStore +) -> None: + """It should add RTP values and defaults to completed analysis store.""" + number_param = pe_types.NumberParameter( + displayName="My parameter", + variableName="cool_param", + type="int", + min=1, + max=5, + value=2.0, + default=3.0, + ) + string_param = pe_types.EnumParameter( + displayName="A choiced param", + variableName="cooler_param", + type="str", + choices=[ + pe_types.EnumChoice(displayName="FOOOO", value="foo"), + pe_types.EnumChoice(displayName="BARRR", value="bar"), + ], + value="baz", + default="blah", + ) + expected_completed_analysis_resource = CompletedAnalysisResource( + id="analysis-id", + protocol_id="protocol-id", + analyzer_version=_CURRENT_ANALYZER_VERSION, + completed_analysis=CompletedAnalysis( + id="analysis-id", + status=AnalysisStatus.COMPLETED, + result=AnalysisResult.OK, + robotType="OT-2 Standard", + runTimeParameters=[number_param, string_param], + labware=[], + pipettes=[], + modules=[], + commands=[], + errors=[], + liquids=[], + ), + run_time_parameter_values_and_defaults={ + "cool_param": RunTimeParameterAnalysisData(value=2.0, default=3.0), + "cooler_param": RunTimeParameterAnalysisData(value="baz", default="blah"), + }, + ) + + mock_completed_store = decoy.mock(cls=CompletedAnalysisStore) + subject = AnalysisStore(sql_engine=sql_engine, completed_store=mock_completed_store) + protocol_store.insert(make_dummy_protocol_resource(protocol_id="protocol-id")) + + subject.add_pending(protocol_id="protocol-id", analysis_id="analysis-id") + await subject.update( + analysis_id="analysis-id", + robot_type="OT-2 Standard", + run_time_parameters=[number_param, string_param], + labware=[], + pipettes=[], + modules=[], + commands=[], + errors=[], + liquids=[], + ) + decoy.verify( + await mock_completed_store.add( + completed_analysis_resource=expected_completed_analysis_resource + ) + ) + + class AnalysisResultSpec(NamedTuple): """Spec data for analysis result tests.""" @@ -291,3 +388,101 @@ async def test_update_infers_status_from_errors( analysis = (await subject.get_by_protocol("protocol-id"))[0] assert isinstance(analysis, CompletedAnalysis) assert analysis.result == expected_result + + +@pytest.mark.parametrize( + argnames=["rtp_values_from_client", "expected_match"], + argvalues=[ + ({"cool_param": 2.0, "cooler_param": "baz", "uncool_param": 5}, True), + ( + {"cool_param": 2, "cooler_param": "baz"}, + True, + ), + ( + {"cool_param": 2, "cooler_param": "buzzzzzzz"}, + False, + ), + ( + {"cool_param": 2.0, "cooler_param": "baz", "weird_param": 5}, + False, + ), + ({}, False), + ], +) +async def test_matching_rtp_values_in_analysis( + decoy: Decoy, + sql_engine: SQLEngine, + protocol_store: ProtocolStore, + rtp_values_from_client: RunTimeParamValuesType, + expected_match: bool, +) -> None: + """It should return whether the client's RTP values match with those in the last analysis of protocol.""" + mock_completed_store = decoy.mock(cls=CompletedAnalysisStore) + subject = AnalysisStore(sql_engine=sql_engine, completed_store=mock_completed_store) + protocol_store.insert(make_dummy_protocol_resource(protocol_id="protocol-id")) + + decoy.when( + await mock_completed_store.get_rtp_values_and_defaults_by_analysis_id( + "analysis-2" + ) + ).then_return( + { + "cool_param": RunTimeParameterAnalysisData(value=2.0, default=3.0), + "cooler_param": RunTimeParameterAnalysisData( + value="baz", default="very cool" + ), + "uncool_param": RunTimeParameterAnalysisData(value=5, default=5), + } + ) + assert ( + await subject.matching_rtp_values_in_analysis( + analysis_summary=AnalysisSummary( + id="analysis-2", status=AnalysisStatus.COMPLETED + ), + new_rtp_values=rtp_values_from_client, + ) + == expected_match + ) + + +async def test_matching_default_rtp_values_in_analysis_with_no_client_rtp_values( + decoy: Decoy, + sql_engine: SQLEngine, + protocol_store: ProtocolStore, +) -> None: + """It should return a match when client sends no RTP values and last analysis used all default values.""" + params_with_only_default_values = { + "cool_param": RunTimeParameterAnalysisData(value=2.0, default=2.0), + "cooler_param": RunTimeParameterAnalysisData( + value="very cool", default="very cool" + ), + "uncool_param": RunTimeParameterAnalysisData(value=True, default=True), + } + mock_completed_store = decoy.mock(cls=CompletedAnalysisStore) + subject = AnalysisStore(sql_engine=sql_engine, completed_store=mock_completed_store) + protocol_store.insert(make_dummy_protocol_resource(protocol_id="protocol-id")) + + decoy.when( + await mock_completed_store.get_rtp_values_and_defaults_by_analysis_id( + "analysis-2" + ) + ).then_return(params_with_only_default_values) + assert ( + await subject.matching_rtp_values_in_analysis( + analysis_summary=AnalysisSummary( + id="analysis-2", status=AnalysisStatus.COMPLETED + ), + new_rtp_values={}, + ) + is True + ) + + +async def test_matching_default_rtp_values_in_analysis_with_pending_analysis( + subject: AnalysisStore, protocol_store: ProtocolStore +) -> None: + """It should raise an error if analysis is pending.""" + with pytest.raises(AnalysisIsPendingError): + await subject.matching_rtp_values_in_analysis( + AnalysisSummary(id="analysis-id", status=AnalysisStatus.PENDING), {} + ) diff --git a/robot-server/tests/protocols/test_completed_analysis_store.py b/robot-server/tests/protocols/test_completed_analysis_store.py index 8339460cf66..f41594d0c5d 100644 --- a/robot-server/tests/protocols/test_completed_analysis_store.py +++ b/robot-server/tests/protocols/test_completed_analysis_store.py @@ -2,6 +2,7 @@ import json from datetime import datetime, timezone from pathlib import Path +from typing import Optional, Dict import pytest from sqlalchemy.engine import Engine @@ -20,6 +21,7 @@ CompletedAnalysis, AnalysisResult, AnalysisStatus, + RunTimeParameterAnalysisData, ) from robot_server.protocols.protocol_store import ( ProtocolStore, @@ -76,7 +78,9 @@ def make_dummy_protocol_resource(protocol_id: str) -> ProtocolResource: def _completed_analysis_resource( - analysis_id: str, protocol_id: str + analysis_id: str, + protocol_id: str, + rtp_values_and_defaults: Optional[Dict[str, RunTimeParameterAnalysisData]] = None, ) -> CompletedAnalysisResource: return CompletedAnalysisResource( analysis_id, @@ -93,6 +97,7 @@ def _completed_analysis_resource( errors=[], liquids=[], ), + run_time_parameter_values_and_defaults=rtp_values_and_defaults or {}, ) @@ -212,3 +217,47 @@ async def test_get_by_protocol( decoy.when(memcache.insert("analysis-id-1", resource_1)).then_return(None) resources = await subject.get_by_protocol("protocol-id-1") assert resources == [resource_1, resource_2] + + +async def test_get_rtp_values_and_defaults_by_analysis_id_prefers_memcache( + subject: CompletedAnalysisStore, + memcache: MemoryCache[str, CompletedAnalysisResource], + protocol_store: ProtocolStore, + decoy: Decoy, +) -> None: + """It should return RTP values and defaults dict from memcache.""" + resource = _completed_analysis_resource( + analysis_id="analysis-id", + protocol_id="protocol-id", + rtp_values_and_defaults={ + "abc": RunTimeParameterAnalysisData(value=123, default=234) + }, + ) + protocol_store.insert(make_dummy_protocol_resource("protocol-id")) + # When we retrieve a resource via its id we should see it query the cache, and it should + # return the identity-same resource + decoy.when(memcache.get("analysis-id")).then_return(resource) + result = await subject.get_rtp_values_and_defaults_by_analysis_id("analysis-id") + assert result == resource.run_time_parameter_values_and_defaults + + +async def test_get_rtp_values_and_defaults_by_analysis_from_db( + subject: CompletedAnalysisStore, + memcache: MemoryCache[str, CompletedAnalysisResource], + protocol_store: ProtocolStore, + decoy: Decoy, +) -> None: + """It should fetch the RTP values and defaults dict from database if not present in cache.""" + resource = _completed_analysis_resource( + analysis_id="analysis-id", + protocol_id="protocol-id", + rtp_values_and_defaults={ + "xyz": RunTimeParameterAnalysisData(value=123, default=234) + }, + ) + protocol_store.insert(make_dummy_protocol_resource("protocol-id")) + await subject.add(resource) + # Not in memcache + decoy.when(memcache.get("analysis-id")).then_raise(KeyError()) + result = await subject.get_rtp_values_and_defaults_by_analysis_id("analysis-id") + assert result == resource.run_time_parameter_values_and_defaults diff --git a/robot-server/tests/protocols/test_protocols_router.py b/robot-server/tests/protocols/test_protocols_router.py index dbdad50c3bd..ffb02d929b1 100644 --- a/robot-server/tests/protocols/test_protocols_router.py +++ b/robot-server/tests/protocols/test_protocols_router.py @@ -1,5 +1,6 @@ """Tests for the /protocols router.""" import io + import pytest from datetime import datetime from decoy import Decoy, matchers @@ -24,7 +25,11 @@ from robot_server.errors.error_responses import ApiError from robot_server.service.json_api import SimpleEmptyBody, MultiBodyMeta from robot_server.service.task_runner import TaskRunner -from robot_server.protocols.analysis_store import AnalysisStore, AnalysisNotFoundError +from robot_server.protocols.analysis_store import ( + AnalysisStore, + AnalysisNotFoundError, + AnalysisIsPendingError, +) from robot_server.protocols.protocol_analyzer import ProtocolAnalyzer from robot_server.protocols.protocol_auto_deleter import ProtocolAutoDeleter from robot_server.protocols.analysis_models import ( @@ -373,6 +378,11 @@ async def test_create_existing_protocol( decoy.when( analysis_store.get_summaries_by_protocol(protocol_id="the-og-proto-id") ).then_return([completed_analysis]) + decoy.when( + await analysis_store.matching_rtp_values_in_analysis( + analysis_summary=completed_analysis, new_rtp_values={} + ) + ).then_return(True) result = await create_protocol( files=[protocol_file], @@ -513,12 +523,12 @@ async def test_create_protocol( protocol_analyzer.analyze, analysis_id="analysis-id", protocol_resource=protocol_resource, - run_time_param_values=None, + run_time_param_values={}, ), ) -async def test_create_protocol_with_run_time_params( +async def test_create_new_protocol_with_run_time_params( decoy: Decoy, protocol_store: ProtocolStore, analysis_store: AnalysisStore, @@ -620,7 +630,240 @@ async def test_create_protocol_with_run_time_params( ) -async def test_create_existing_protocol_with_run_time_params( +async def test_create_existing_protocol_with_no_previous_analysis( + decoy: Decoy, + protocol_store: ProtocolStore, + analysis_store: AnalysisStore, + protocol_reader: ProtocolReader, + file_reader_writer: FileReaderWriter, + file_hasher: FileHasher, + protocol_analyzer: ProtocolAnalyzer, + task_runner: TaskRunner, + protocol_auto_deleter: ProtocolAutoDeleter, +) -> None: + """It should re-trigger analysis of the existing protocol resource.""" + protocol_directory = Path("/dev/null") + content = bytes("some_content", encoding="utf-8") + uploaded_file = io.BytesIO(content) + + protocol_file = UploadFile(filename="foo.json", file=uploaded_file) + buffered_file = BufferedFile(name="blah", contents=content, path=None) + + protocol_source = ProtocolSource( + directory=Path("/dev/null"), + main_file=Path("/dev/null/foo.json"), + files=[ + ProtocolSourceFile( + path=Path("/dev/null/foo.json"), + role=ProtocolFileRole.MAIN, + ) + ], + metadata={"this_is_fake_metadata": True}, + robot_type="OT-2 Standard", + config=JsonProtocolConfig(schema_version=123), + content_hash="a_b_c", + ) + + stored_protocol_resource = ProtocolResource( + protocol_id="protocol-id", + created_at=datetime(year=2020, month=1, day=1), + source=protocol_source, + protocol_key="dummy-key-222", + ) + pending_analysis = AnalysisSummary( + id="analysis-id", + status=AnalysisStatus.PENDING, + ) + decoy.when( + await file_reader_writer.read( + # TODO(mm, 2024-02-07): Recent FastAPI upgrades mean protocol_file.filename + # is typed as possibly None. Investigate whether that can actually happen in + # practice and whether we need to account for it. + files=[protocol_file] # type: ignore[list-item] + ) + ).then_return([buffered_file]) + + decoy.when(await file_hasher.hash(files=[buffered_file])).then_return("a_b_c") + decoy.when(protocol_store.get_id_by_hash("a_b_c")).then_return("the-og-proto-id") + decoy.when(protocol_store.get(protocol_id="the-og-proto-id")).then_return( + stored_protocol_resource + ) + decoy.when( + analysis_store.get_summaries_by_protocol(protocol_id="the-og-proto-id") + ).then_return([]) + decoy.when( + analysis_store.add_pending( + protocol_id="the-og-proto-id", analysis_id="analysis-id" + ) + ).then_return(pending_analysis) + + result = await create_protocol( + files=[protocol_file], + key="dummy-key-111", + run_time_parameter_values='{"vol": 123, "dry_run": true, "mount": "left"}', + protocol_directory=protocol_directory, + protocol_store=protocol_store, + analysis_store=analysis_store, + file_reader_writer=file_reader_writer, + protocol_reader=protocol_reader, + file_hasher=file_hasher, + protocol_analyzer=protocol_analyzer, + task_runner=task_runner, + protocol_auto_deleter=protocol_auto_deleter, + robot_type="OT-2 Standard", + protocol_id="protocol-id", + analysis_id="analysis-id", + created_at=datetime(year=2021, month=1, day=1), + ) + + assert result.content.data == Protocol( + id="the-og-proto-id", + createdAt=datetime(year=2020, month=1, day=1), + protocolType=ProtocolType.JSON, + metadata=Metadata(this_is_fake_metadata=True), # type: ignore[call-arg] + robotType="OT-2 Standard", + analysisSummaries=[pending_analysis], + files=[ProtocolFile(name="foo.json", role=ProtocolFileRole.MAIN)], + key="dummy-key-222", + ) + assert result.status_code == 200 + decoy.verify( + task_runner.run( + protocol_analyzer.analyze, + analysis_id="analysis-id", + protocol_resource=stored_protocol_resource, + run_time_param_values={"vol": 123, "dry_run": True, "mount": "left"}, + ), + analysis_store.add_pending( + protocol_id="the-og-proto-id", + analysis_id="analysis-id", + ), + ) + + +async def test_create_existing_protocol_with_different_run_time_params( + decoy: Decoy, + protocol_store: ProtocolStore, + analysis_store: AnalysisStore, + protocol_reader: ProtocolReader, + file_reader_writer: FileReaderWriter, + file_hasher: FileHasher, + protocol_analyzer: ProtocolAnalyzer, + task_runner: TaskRunner, + protocol_auto_deleter: ProtocolAutoDeleter, +) -> None: + """It should re-trigger analysis of the existing protocol resource.""" + protocol_directory = Path("/dev/null") + content = bytes("some_content", encoding="utf-8") + uploaded_file = io.BytesIO(content) + + protocol_file = UploadFile(filename="foo.json", file=uploaded_file) + buffered_file = BufferedFile(name="blah", contents=content, path=None) + + protocol_source = ProtocolSource( + directory=Path("/dev/null"), + main_file=Path("/dev/null/foo.json"), + files=[ + ProtocolSourceFile( + path=Path("/dev/null/foo.json"), + role=ProtocolFileRole.MAIN, + ) + ], + metadata={"this_is_fake_metadata": True}, + robot_type="OT-2 Standard", + config=JsonProtocolConfig(schema_version=123), + content_hash="a_b_c", + ) + + stored_protocol_resource = ProtocolResource( + protocol_id="protocol-id", + created_at=datetime(year=2020, month=1, day=1), + source=protocol_source, + protocol_key="dummy-key-222", + ) + + completed_summary = AnalysisSummary( + id="analysis-id", + status=AnalysisStatus.COMPLETED, + ) + + pending_summary = AnalysisSummary( + id="analysis-id", + status=AnalysisStatus.PENDING, + ) + decoy.when( + await file_reader_writer.read( + # TODO(mm, 2024-02-07): Recent FastAPI upgrades mean protocol_file.filename + # is typed as possibly None. Investigate whether that can actually happen in + # practice and whether we need to account for it. + files=[protocol_file] # type: ignore[list-item] + ) + ).then_return([buffered_file]) + + decoy.when(await file_hasher.hash(files=[buffered_file])).then_return("a_b_c") + decoy.when(protocol_store.get_id_by_hash("a_b_c")).then_return("the-og-proto-id") + decoy.when(protocol_store.get(protocol_id="the-og-proto-id")).then_return( + stored_protocol_resource + ) + decoy.when( + analysis_store.get_summaries_by_protocol(protocol_id="the-og-proto-id") + ).then_return([completed_summary]) + decoy.when( + await analysis_store.matching_rtp_values_in_analysis( + completed_summary, {"vol": 123, "dry_run": True, "mount": "left"} + ) + ).then_return(False) + decoy.when( + analysis_store.add_pending( + protocol_id="the-og-proto-id", analysis_id="analysis-id" + ) + ).then_return(pending_summary) + + result = await create_protocol( + files=[protocol_file], + key="dummy-key-111", + run_time_parameter_values='{"vol": 123, "dry_run": true, "mount": "left"}', + protocol_directory=protocol_directory, + protocol_store=protocol_store, + analysis_store=analysis_store, + file_reader_writer=file_reader_writer, + protocol_reader=protocol_reader, + file_hasher=file_hasher, + protocol_analyzer=protocol_analyzer, + task_runner=task_runner, + protocol_auto_deleter=protocol_auto_deleter, + robot_type="OT-2 Standard", + protocol_id="protocol-id", + analysis_id="analysis-id", + created_at=datetime(year=2021, month=1, day=1), + ) + + assert result.content.data == Protocol( + id="the-og-proto-id", + createdAt=datetime(year=2020, month=1, day=1), + protocolType=ProtocolType.JSON, + metadata=Metadata(this_is_fake_metadata=True), # type: ignore[call-arg] + robotType="OT-2 Standard", + analysisSummaries=[completed_summary, pending_summary], + files=[ProtocolFile(name="foo.json", role=ProtocolFileRole.MAIN)], + key="dummy-key-222", + ) + assert result.status_code == 200 + decoy.verify( + task_runner.run( + protocol_analyzer.analyze, + analysis_id="analysis-id", + protocol_resource=stored_protocol_resource, + run_time_param_values={"vol": 123, "dry_run": True, "mount": "left"}, + ), + analysis_store.add_pending( + protocol_id="the-og-proto-id", + analysis_id="analysis-id", + ), + ) + + +async def test_create_existing_protocol_with_same_run_time_params( decoy: Decoy, protocol_store: ProtocolStore, analysis_store: AnalysisStore, @@ -666,10 +909,6 @@ async def test_create_existing_protocol_with_run_time_params( id="analysis-id", status=AnalysisStatus.COMPLETED, ), - AnalysisSummary( - id="analysis-id", - status=AnalysisStatus.PENDING, - ), ] decoy.when( @@ -689,6 +928,11 @@ async def test_create_existing_protocol_with_run_time_params( decoy.when( analysis_store.get_summaries_by_protocol(protocol_id="the-og-proto-id") ).then_return(analysis_summaries) + decoy.when( + await analysis_store.matching_rtp_values_in_analysis( + analysis_summaries[-1], {"vol": 123, "dry_run": True, "mount": "left"} + ) + ).then_return(True) result = await create_protocol( files=[protocol_file], @@ -727,11 +971,110 @@ async def test_create_existing_protocol_with_run_time_params( protocol_resource=stored_protocol_resource, run_time_param_values={"vol": 123, "dry_run": True, "mount": "left"}, ), + times=0, + ) + decoy.verify( analysis_store.add_pending( protocol_id="the-og-proto-id", analysis_id="analysis-id", ), + times=0, + ) + + +async def test_create_existing_protocol_with_pending_analysis_raises( + decoy: Decoy, + protocol_store: ProtocolStore, + analysis_store: AnalysisStore, + protocol_reader: ProtocolReader, + file_reader_writer: FileReaderWriter, + file_hasher: FileHasher, + protocol_analyzer: ProtocolAnalyzer, + task_runner: TaskRunner, + protocol_auto_deleter: ProtocolAutoDeleter, +) -> None: + """It should raise an error if protocol has existing pending analysis.""" + protocol_directory = Path("/dev/null") + content = bytes("some_content", encoding="utf-8") + uploaded_file = io.BytesIO(content) + + protocol_file = UploadFile(filename="foo.json", file=uploaded_file) + buffered_file = BufferedFile(name="blah", contents=content, path=None) + + protocol_source = ProtocolSource( + directory=Path("/dev/null"), + main_file=Path("/dev/null/foo.json"), + files=[ + ProtocolSourceFile( + path=Path("/dev/null/foo.json"), + role=ProtocolFileRole.MAIN, + ) + ], + metadata={"this_is_fake_metadata": True}, + robot_type="OT-2 Standard", + config=JsonProtocolConfig(schema_version=123), + content_hash="a_b_c", + ) + + stored_protocol_resource = ProtocolResource( + protocol_id="protocol-id", + created_at=datetime(year=2020, month=1, day=1), + source=protocol_source, + protocol_key="dummy-key-222", + ) + + analysis_summaries = [ + AnalysisSummary( + id="analysis-id", + status=AnalysisStatus.PENDING, + ), + ] + + decoy.when( + await file_reader_writer.read( + # TODO(mm, 2024-02-07): Recent FastAPI upgrades mean protocol_file.filename + # is typed as possibly None. Investigate whether that can actually happen in + # practice and whether we need to account for it. + files=[protocol_file] # type: ignore[list-item] + ) + ).then_return([buffered_file]) + + decoy.when(await file_hasher.hash(files=[buffered_file])).then_return("a_b_c") + decoy.when(protocol_store.get_id_by_hash("a_b_c")).then_return("the-og-proto-id") + decoy.when(protocol_store.get(protocol_id="the-og-proto-id")).then_return( + stored_protocol_resource ) + decoy.when( + analysis_store.get_summaries_by_protocol(protocol_id="the-og-proto-id") + ).then_return(analysis_summaries) + decoy.when( + await analysis_store.matching_rtp_values_in_analysis( + analysis_summaries[-1], {"vol": 123, "dry_run": True, "mount": "left"} + ) + ).then_raise(AnalysisIsPendingError("a-id")) + + with pytest.raises(ApiError) as exc_info: + await create_protocol( + files=[protocol_file], + key="dummy-key-111", + run_time_parameter_values='{"vol": 123, "dry_run": true, "mount": "left"}', + protocol_directory=protocol_directory, + protocol_store=protocol_store, + analysis_store=analysis_store, + file_reader_writer=file_reader_writer, + protocol_reader=protocol_reader, + file_hasher=file_hasher, + protocol_analyzer=protocol_analyzer, + task_runner=task_runner, + protocol_auto_deleter=protocol_auto_deleter, + robot_type="OT-2 Standard", + protocol_id="protocol-id", + analysis_id="analysis-id", + created_at=datetime(year=2021, month=1, day=1), + ) + + assert exc_info.value.status_code == 503 + assert exc_info.value.content["errors"][0]["id"] == "LastAnalysisPending" async def test_create_protocol_not_readable(