diff --git a/abr-testing/abr_testing/protocols/active_protocols/3_Tartrazine Protocol.py b/abr-testing/abr_testing/protocols/active_protocols/3_Tartrazine Protocol.py index 05a6300e053..66db85468f4 100644 --- a/abr-testing/abr_testing/protocols/active_protocols/3_Tartrazine Protocol.py +++ b/abr-testing/abr_testing/protocols/active_protocols/3_Tartrazine Protocol.py @@ -19,55 +19,83 @@ def add_parameters(parameters: ParameterContext) -> None: - """Parameters.""" - helpers.create_single_pipette_mount_parameter(parameters) + """Add parameters.""" + parameters.add_int( + variable_name="number_of_plates", + display_name="Number of Plates", + default=4, + minimum=1, + maximum=4, + ) def run(ctx: ProtocolContext) -> None: """Protocol.""" - mount_pos_50ul = ctx.params.pipette_mount # type: ignore[attr-defined] + number_of_plates = ctx.params.number_of_plates # type: ignore [attr-defined] # Plate Reader plate_reader: AbsorbanceReaderContext = ctx.load_module( helpers.abs_mod_str, "A3" ) # type: ignore[assignment] hs: HeaterShakerContext = ctx.load_module(helpers.hs_str, "A1") # type: ignore[assignment] - hs_adapter = hs.load_adapter("opentrons_96_pcr_adapter") + hs_adapter = hs.load_adapter("opentrons_universal_flat_adapter") tube_rack = ctx.load_labware( "opentrons_10_tuberack_nest_4x50ml_6x15ml_conical", "C2", "Reagent Tube" ) tartrazine_tube = tube_rack["A3"] - + water_tube_1 = tube_rack["A4"] + water_tube_2 = tube_rack["B3"] sample_plate_1 = ctx.load_labware( - "nest_96_wellplate_200ul_flat", "D1", "Sample Plate 1" + "corning_96_wellplate_360ul_flat", "D1", "Sample Plate 1" ) sample_plate_2 = ctx.load_labware( - "nest_96_wellplate_200ul_flat", "C1", "Sample Plate 2" + "corning_96_wellplate_360ul_flat", "D2", "Sample Plate 2" ) sample_plate_3 = ctx.load_labware( - "nest_96_wellplate_200ul_flat", "B1", "Sample Plate 3" + "corning_96_wellplate_360ul_flat", "C1", "Sample Plate 3" + ) + sample_plate_4 = ctx.load_labware( + "corning_96_wellplate_360ul_flat", "B1", "Sample Plate 4" ) - sample_plate_list = [sample_plate_1, sample_plate_2, sample_plate_3] + + sample_plate_list = [sample_plate_1, sample_plate_2, sample_plate_3, sample_plate_4] tiprack_50_1 = ctx.load_labware("opentrons_flex_96_tiprack_50ul", "D3") tiprack_50_2 = ctx.load_labware("opentrons_flex_96_tiprack_50ul", "C3") tiprack_50_3 = ctx.load_labware("opentrons_flex_96_tiprack_50ul", "B3") + tiprack_1000_1 = ctx.load_labware("opentrons_flex_96_tiprack_1000ul", "A2") tip_racks = [tiprack_50_1, tiprack_50_2, tiprack_50_3] # Pipette - p50 = ctx.load_instrument("flex_1channel_50", mount_pos_50ul, tip_racks=tip_racks) + p50 = ctx.load_instrument("flex_1channel_50", "left", tip_racks=tip_racks) + p1000 = ctx.load_instrument( + "flex_1channel_1000", "right", tip_racks=[tiprack_1000_1] + ) # Probe wells liquid_vols_and_wells: Dict[str, List[Dict[str, Well | List[Well] | float]]] = { - "Tartrazine": [{"well": tartrazine_tube, "volume": 45.0}] + "Tartrazine": [{"well": tartrazine_tube, "volume": 45.0}], + "Water": [{"well": [water_tube_1, water_tube_2], "volume": 45.0}], } helpers.find_liquid_height_of_loaded_liquids(ctx, liquid_vols_and_wells, p50) i = 0 all_percent_error_dict = {} cv_dict = {} - for sample_plate in sample_plate_list: - deck_locations = ["D1", "C1", "B1"] + vol = 0.0 + tip_count = 0 + for sample_plate in sample_plate_list[:number_of_plates]: + deck_locations = ["D1", "D2", "C1", "B1"] + p1000.pick_up_tip() for well in sample_plate.wells(): + if vol < 45000: + tube_of_choice = water_tube_1 + else: + tube_of_choice = water_tube_2 p50.pick_up_tip() + p1000.aspirate(190, tube_of_choice) + p1000.air_gap(5) + p1000.dispense(5, well.top()) + p1000.dispense(190, well) + vol += 190 height = helpers.find_liquid_height(p50, tartrazine_tube) p50.aspirate(10, tartrazine_tube.bottom(z=height)) p50.air_gap(5) @@ -75,6 +103,10 @@ def run(ctx: ProtocolContext) -> None: p50.dispense(10, well.bottom(z=0.5)) p50.blow_out() p50.return_tip() + tip_count += 1 + if tip_count >= (96 * 3): + p50.reset_tipracks() + p1000.return_tip() helpers.move_labware_to_hs(ctx, sample_plate, hs, hs_adapter) helpers.set_hs_speed(ctx, hs, 1500, 2.0, True) hs.open_labware_latch() @@ -117,7 +149,6 @@ def run(ctx: ProtocolContext) -> None: plate_reader.open_lid() ctx.move_labware(sample_plate, deck_locations[i], use_gripper=True) i += 1 - # Print percent error dictionary ctx.comment("Percent Error: " + str(all_percent_error_dict)) # Print cv dictionary diff --git a/abr-testing/abr_testing/protocols/csv_parameters/3_samplevols.csv b/abr-testing/abr_testing/protocols/csv_parameters/3_samplevols.csv deleted file mode 100644 index bc952b330a1..00000000000 --- a/abr-testing/abr_testing/protocols/csv_parameters/3_samplevols.csv +++ /dev/null @@ -1,25 +0,0 @@ -Sample_Plate, Sample_well,InitialVol,InitialConc,TargetConc -sample_plate,A2,10,3.94,1 -sample_plate,B2,10,3.5,1 -sample_plate,C2,10,3.46,1 -sample_plate,D2,10,3.1,1 -sample_plate,E2,10,2.64,1 -sample_plate,F2,10,3.16,1 -sample_plate,G2,10,2.9,1 -sample_plate,H2,10,2.8,1 -sample_plate,A3,10,2.82,1 -sample_plate,B3,10,2.84,1 -sample_plate,C3,10,2.72,1 -sample_plate,D3,10,2.9,1 -sample_plate,A5,10,3.94,1 -sample_plate,B5,10,3.5,1 -sample_plate,C5,10,3.46,1 -sample_plate,D5,10,3.1,1 -sample_plate,E5,10,2.64,1 -sample_plate,F5,10,3.16,1 -sample_plate,G5,10,2.9,1 -sample_plate,H5,10,2.8,1 -sample_plate,A6,10,2.82,1 -sample_plate,B6,10,2.84,1 -sample_plate,C6,10,2.72,1 -sample_plate,D6,10,2.9,1 diff --git a/abr-testing/abr_testing/protocols/liquid_setups/3_OT3 ABR Normalize with Tubes Liquid Setup.py b/abr-testing/abr_testing/protocols/liquid_setups/3_Tartrazine Liquid Setup.py similarity index 73% rename from abr-testing/abr_testing/protocols/liquid_setups/3_OT3 ABR Normalize with Tubes Liquid Setup.py rename to abr-testing/abr_testing/protocols/liquid_setups/3_Tartrazine Liquid Setup.py index 86e4de2aeed..9e0b29a03ed 100644 --- a/abr-testing/abr_testing/protocols/liquid_setups/3_OT3 ABR Normalize with Tubes Liquid Setup.py +++ b/abr-testing/abr_testing/protocols/liquid_setups/3_Tartrazine Liquid Setup.py @@ -1,11 +1,11 @@ -"""Plate Filler Protocol for Simple Normalize Long.""" +"""Plate Filler Protocol for Tartrazine Protocol.""" from opentrons import protocol_api from abr_testing.protocols.helpers import ( load_common_liquid_setup_labware_and_instruments, ) metadata = { - "protocolName": "DVT1ABR3 Liquids: Flex Normalize with Tubes", + "protocolName": "DVT1ABR3 Liquids: Tartrazine Protocol", "author": "Rhyann clarke ", "source": "Protocol Library", } @@ -32,9 +32,16 @@ def run(protocol: protocol_api.ProtocolContext) -> None: ) # Transfer Liquid p1000.transfer( - 4000, + 45000, source_reservoir["A1"], - reagent_tube["A1"].top(), + reagent_tube["B3"].top(), + blowout=True, + blowout_location="source well", + ) + p1000.transfer( + 45000, + source_reservoir["A1"], + reagent_tube["A4"].top(), blowout=True, blowout_location="source well", ) diff --git a/abr-testing/abr_testing/tools/abr_setup.py b/abr-testing/abr_testing/tools/abr_setup.py index 67ed5bfb333..a0fca76b6e4 100644 --- a/abr-testing/abr_testing/tools/abr_setup.py +++ b/abr-testing/abr_testing/tools/abr_setup.py @@ -20,9 +20,9 @@ def run_sync_abr_sheet( sync_abr_sheet.run(storage_directory, abr_data_sheet, room_conditions_sheet) -def run_temp_sensor(ip_file: str) -> None: +def run_temp_sensor() -> None: """Run temperature sensors on all robots.""" - processes = ABRAsairScript.run(ip_file) + processes = ABRAsairScript.run() for process in processes: process.start() time.sleep(20) @@ -64,7 +64,6 @@ def get_calibration_data( def main(configurations: configparser.ConfigParser) -> None: """Main function.""" - ip_file = None storage_directory = None email = None drive_folder = None @@ -89,15 +88,10 @@ def main(configurations: configparser.ConfigParser) -> None: # Run Temperature Sensors if not has_defaults: - ip_file = configurations["TEMP-SENSOR"]["Robo_List"] ambient_conditions_sheet = configurations["TEMP-SENSOR"]["Sheet_Url"] print("Starting temp sensors...") - if ip_file: - run_temp_sensor(ip_file) - print("Temp Sensors Started") - else: - print("Missing ip_file location, please fix configs") - sys.exit(1) + run_temp_sensor() + print("Temp Sensors Started") # Get Run Logs and Record if not has_defaults: storage_directory = configurations["RUN-LOG"]["Storage"] diff --git a/api-client/src/runs/commands/types.ts b/api-client/src/runs/commands/types.ts index 8c5517a1fe3..215756ede66 100644 --- a/api-client/src/runs/commands/types.ts +++ b/api-client/src/runs/commands/types.ts @@ -1,8 +1,8 @@ import type { RunTimeCommand, RunCommandError } from '@opentrons/shared-data' export interface GetCommandsParams { - cursor: number | null // the index of the command at the center of the window pageLength: number // the number of items to include + cursor?: number } export interface GetRunCommandsParams extends GetCommandsParams { @@ -10,7 +10,7 @@ export interface GetRunCommandsParams extends GetCommandsParams { } export interface GetRunCommandsParamsRequest extends GetCommandsParams { - includeFixitCommands: boolean | null + includeFixitCommands?: boolean } export interface RunCommandErrors { diff --git a/api-client/src/runs/getErrorRecoveryPolicy.ts b/api-client/src/runs/getErrorRecoveryPolicy.ts new file mode 100644 index 00000000000..67a3d0bd93c --- /dev/null +++ b/api-client/src/runs/getErrorRecoveryPolicy.ts @@ -0,0 +1,17 @@ +import { GET, request } from '../request' + +import type { HostConfig } from '../types' +import type { ResponsePromise } from '../request' +import type { ErrorRecoveryPolicyResponse } from './types' + +export function getErrorRecoveryPolicy( + config: HostConfig, + runId: string +): ResponsePromise { + return request( + GET, + `/runs/${runId}/errorRecoveryPolicy`, + null, + config + ) +} diff --git a/api-client/src/runs/index.ts b/api-client/src/runs/index.ts index 183b8f7e4d4..fff1f303543 100644 --- a/api-client/src/runs/index.ts +++ b/api-client/src/runs/index.ts @@ -15,6 +15,7 @@ export * from './createLabwareOffset' export * from './createLabwareDefinition' export * from './constants' export * from './updateErrorRecoveryPolicy' +export * from './getErrorRecoveryPolicy' export * from './types' export type { CreateRunData } from './createRun' diff --git a/api-client/src/runs/types.ts b/api-client/src/runs/types.ts index bf8596b66d7..e41626b6448 100644 --- a/api-client/src/runs/types.ts +++ b/api-client/src/runs/types.ts @@ -204,6 +204,7 @@ export interface UpdateErrorRecoveryPolicyRequest { } export type UpdateErrorRecoveryPolicyResponse = Record +export type ErrorRecoveryPolicyResponse = UpdateErrorRecoveryPolicyRequest /** * Current Run State Data diff --git a/api/docs/v2/new_examples.rst b/api/docs/v2/new_examples.rst index 1aae3b633d0..42dbf92fd8d 100644 --- a/api/docs/v2/new_examples.rst +++ b/api/docs/v2/new_examples.rst @@ -284,7 +284,7 @@ When used in a protocol, loops automate repetitive steps such as aspirating and # etc... # range() starts at 0 and stops before 8, creating a range of 0-7 for i in range(8): - pipette.distribute(200, reservoir.wells()[i], plate.rows()[i]) + pipette.distribute(20, reservoir.wells()[i], plate.rows()[i]) .. tab:: OT-2 @@ -315,7 +315,7 @@ When used in a protocol, loops automate repetitive steps such as aspirating and # etc... # range() starts at 0 and stops before 8, creating a range of 0-7 for i in range(8): - p300.distribute(200, reservoir.wells()[i], plate.rows()[i]) + p300.distribute(20, reservoir.wells()[i], plate.rows()[i]) Notice here how Python's :py:class:`range` class (e.g., ``range(8)``) determines how many times the code loops. Also, in Python, a range of numbers is *exclusive* of the end value and counting starts at 0, not 1. For the Corning 96-well plate used here, this means well A1=0, B1=1, C1=2, and so on to the last well in the row, which is H1=7. diff --git a/api/release-notes-internal.md b/api/release-notes-internal.md index 1253f7e92fd..761f1f604f3 100644 --- a/api/release-notes-internal.md +++ b/api/release-notes-internal.md @@ -2,6 +2,10 @@ For more details about this release, please see the full [technical change log][ [technical change log]: https://github.com/Opentrons/opentrons/releases +## Internal Release 2.3.0-alpha.0 + +This internal release, pulled from the `edge` branch, contains features being developed for evo tip functionality. It's for internal testing only. + ## Internal Release 2.2.0-alpha.1 This internal release, pulled from the `edge` branch, contains features being developed for 8.2.0. It's for internal testing only. diff --git a/api/release-notes.md b/api/release-notes.md index e41d415d83e..1fdd9a033d9 100644 --- a/api/release-notes.md +++ b/api/release-notes.md @@ -24,6 +24,10 @@ Welcome to the v8.2.0 release of the Opentrons robot software! This release adds - Error recovery no longer causes an `AssertionError` when a Python protocol changes the pipette speed. +### Known Issues + +- You can't downgrade the robot software with an Absorbance Plate Reader attached. Disconnect the module first if you need to downgrade. + --- ## Opentrons Robot Software Changes in 8.1.0 diff --git a/api/src/opentrons/hardware_control/backends/flex_protocol.py b/api/src/opentrons/hardware_control/backends/flex_protocol.py index e5bc7ba1905..3862981b20c 100644 --- a/api/src/opentrons/hardware_control/backends/flex_protocol.py +++ b/api/src/opentrons/hardware_control/backends/flex_protocol.py @@ -77,13 +77,12 @@ def update_constraints_for_calibration_with_gantry_load( ) -> None: ... - def update_constraints_for_emulsifying_pipette( - self, mount: OT3Mount, gantry_load: GantryLoad - ) -> None: - ... - def update_constraints_for_plunger_acceleration( - self, mount: OT3Mount, acceleration: float, gantry_load: GantryLoad + self, + mount: OT3Mount, + acceleration: float, + gantry_load: GantryLoad, + high_speed_pipette: bool = False, ) -> None: ... diff --git a/api/src/opentrons/hardware_control/backends/ot3controller.py b/api/src/opentrons/hardware_control/backends/ot3controller.py index a7c30677910..0ff3c033f44 100644 --- a/api/src/opentrons/hardware_control/backends/ot3controller.py +++ b/api/src/opentrons/hardware_control/backends/ot3controller.py @@ -50,7 +50,6 @@ get_system_constraints, get_system_constraints_for_calibration, get_system_constraints_for_plunger_acceleration, - get_system_constraints_for_emulsifying_pipette, ) from .tip_presence_manager import TipPresenceManager @@ -406,28 +405,24 @@ def update_constraints_for_calibration_with_gantry_load( f"Set system constraints for calibration: {self._move_manager.get_constraints()}" ) - def update_constraints_for_emulsifying_pipette( - self, mount: OT3Mount, gantry_load: GantryLoad - ) -> None: - self._move_manager.update_constraints( - get_system_constraints_for_emulsifying_pipette( - self._configuration.motion_settings, gantry_load, mount - ) - ) - log.debug( - f"Set system constraints for emulsifying pipette: {self._move_manager.get_constraints()}" - ) - def update_constraints_for_gantry_load(self, gantry_load: GantryLoad) -> None: self._move_manager.update_constraints( get_system_constraints(self._configuration.motion_settings, gantry_load) ) def update_constraints_for_plunger_acceleration( - self, mount: OT3Mount, acceleration: float, gantry_load: GantryLoad + self, + mount: OT3Mount, + acceleration: float, + gantry_load: GantryLoad, + high_speed_pipette: bool = False, ) -> None: new_constraints = get_system_constraints_for_plunger_acceleration( - self._configuration.motion_settings, gantry_load, mount, acceleration + self._configuration.motion_settings, + gantry_load, + mount, + acceleration, + high_speed_pipette, ) self._move_manager.update_constraints(new_constraints) diff --git a/api/src/opentrons/hardware_control/backends/ot3simulator.py b/api/src/opentrons/hardware_control/backends/ot3simulator.py index 533fffe5642..377397ba597 100644 --- a/api/src/opentrons/hardware_control/backends/ot3simulator.py +++ b/api/src/opentrons/hardware_control/backends/ot3simulator.py @@ -234,13 +234,12 @@ def update_constraints_for_calibration_with_gantry_load( ) -> None: self._sim_gantry_load = gantry_load - def update_constraints_for_emulsifying_pipette( - self, mount: OT3Mount, gantry_load: GantryLoad - ) -> None: - pass - def update_constraints_for_plunger_acceleration( - self, mount: OT3Mount, acceleration: float, gantry_load: GantryLoad + self, + mount: OT3Mount, + acceleration: float, + gantry_load: GantryLoad, + high_speed_pipette: bool = False, ) -> None: self._sim_gantry_load = gantry_load diff --git a/api/src/opentrons/hardware_control/backends/ot3utils.py b/api/src/opentrons/hardware_control/backends/ot3utils.py index 57e74537bfd..cb8f5e95e71 100644 --- a/api/src/opentrons/hardware_control/backends/ot3utils.py +++ b/api/src/opentrons/hardware_control/backends/ot3utils.py @@ -284,12 +284,22 @@ def get_system_constraints_for_plunger_acceleration( gantry_load: GantryLoad, mount: OT3Mount, acceleration: float, + high_speed_pipette: bool = False, ) -> "SystemConstraints[Axis]": old_constraints = config.by_gantry_load(gantry_load) new_constraints = {} axis_kinds = set([k for _, v in old_constraints.items() for k in v.keys()]) + + def _get_axis_max_speed(ax: Axis) -> float: + if ax == Axis.of_main_tool_actuator(mount) and high_speed_pipette: + _max_speed = float(DEFAULT_EMULSIFYING_PIPETTE_AXIS_MAX_SPEED) + else: + _max_speed = old_constraints["default_max_speed"][axis_kind] + return _max_speed + for axis_kind in axis_kinds: for axis in Axis.of_kind(axis_kind): + _default_max_speed = _get_axis_max_speed(axis) if axis == Axis.of_main_tool_actuator(mount): _accel = acceleration else: @@ -298,7 +308,7 @@ def get_system_constraints_for_plunger_acceleration( _accel, old_constraints["max_speed_discontinuity"][axis_kind], old_constraints["direction_change_speed_discontinuity"][axis_kind], - old_constraints["default_max_speed"][axis_kind], + _default_max_speed, ) return new_constraints diff --git a/api/src/opentrons/hardware_control/modules/absorbance_reader.py b/api/src/opentrons/hardware_control/modules/absorbance_reader.py index ab6ce1bb22b..ec4a80b7f60 100644 --- a/api/src/opentrons/hardware_control/modules/absorbance_reader.py +++ b/api/src/opentrons/hardware_control/modules/absorbance_reader.py @@ -312,6 +312,8 @@ async def update_device(self, firmware_file_path: str) -> Tuple[bool, str]: log.debug(f"Updating {self.name}: {self.port} with {firmware_file_path}") self._updating = True success, res = await self._driver.update_firmware(firmware_file_path) + # it takes time for the plate reader to re-init after an update. + await asyncio.sleep(10) self._device_info = await self._driver.get_device_info() await self._poller.start() self._updating = False diff --git a/api/src/opentrons/hardware_control/ot3api.py b/api/src/opentrons/hardware_control/ot3api.py index af170484150..1b3275f4c7e 100644 --- a/api/src/opentrons/hardware_control/ot3api.py +++ b/api/src/opentrons/hardware_control/ot3api.py @@ -299,8 +299,11 @@ async def set_system_constraints_for_calibration(self) -> None: async def set_system_constraints_for_plunger_acceleration( self, mount: OT3Mount, acceleration: float ) -> None: + high_speed_pipette = self._pipette_handler.get_pipette( + mount + ).is_high_speed_pipette() self._backend.update_constraints_for_plunger_acceleration( - mount, acceleration, self._gantry_load + mount, acceleration, self._gantry_load, high_speed_pipette ) @contextlib.asynccontextmanager @@ -636,9 +639,6 @@ async def cache_pipette( ) self._pipette_handler.hardware_instruments[mount] = p - if self._pipette_handler.has_pipette(mount): - self._confirm_pipette_motion_constraints(mount) - if config is not None: self._set_pressure_sensor_available(mount, instrument_config=config) @@ -646,15 +646,6 @@ async def cache_pipette( # when applicable return skipped - def _confirm_pipette_motion_constraints( - self, - mount: OT3Mount, - ) -> None: - if self._pipette_handler.get_pipette(mount).is_high_speed_pipette(): - self._backend.update_constraints_for_emulsifying_pipette( - mount, self.gantry_load - ) - def get_pressure_sensor_available(self, mount: OT3Mount) -> bool: pip_axis = Axis.of_main_tool_actuator(mount) return self._backend.get_pressure_sensor_available(pip_axis) diff --git a/api/src/opentrons/hardware_control/protocols/gripper_controller.py b/api/src/opentrons/hardware_control/protocols/gripper_controller.py index fc81325193c..1b81f4ab460 100644 --- a/api/src/opentrons/hardware_control/protocols/gripper_controller.py +++ b/api/src/opentrons/hardware_control/protocols/gripper_controller.py @@ -14,6 +14,9 @@ async def grip( ) -> None: ... + async def home_gripper_jaw(self) -> None: + ... + async def ungrip(self, force_newtons: Optional[float] = None) -> None: """Release gripped object. diff --git a/api/src/opentrons/protocol_api/core/engine/robot.py b/api/src/opentrons/protocol_api/core/engine/robot.py index df80917e091..0418afcbb95 100644 --- a/api/src/opentrons/protocol_api/core/engine/robot.py +++ b/api/src/opentrons/protocol_api/core/engine/robot.py @@ -129,3 +129,11 @@ def move_axes_relative(self, axis_map: AxisMapType, speed: Optional[float]) -> N self._engine_client.execute_command( cmd.robot.MoveAxesRelativeParams(axis_map=axis_engine_map, speed=speed) ) + + def release_grip(self) -> None: + self._engine_client.execute_command(cmd.robot.openGripperJawParams()) + + def close_gripper(self, force: Optional[float] = None) -> None: + self._engine_client.execute_command( + cmd.robot.closeGripperJawParams(force=force) + ) diff --git a/api/src/opentrons/protocol_api/core/robot.py b/api/src/opentrons/protocol_api/core/robot.py index 95def3e17f3..f65ddbbd7bb 100644 --- a/api/src/opentrons/protocol_api/core/robot.py +++ b/api/src/opentrons/protocol_api/core/robot.py @@ -41,3 +41,11 @@ def move_axes_to( @abstractmethod def move_axes_relative(self, axis_map: AxisMapType, speed: Optional[float]) -> None: ... + + @abstractmethod + def release_grip(self) -> None: + ... + + @abstractmethod + def close_gripper(self, force: Optional[float] = None) -> None: + ... diff --git a/api/src/opentrons/protocol_api/module_contexts.py b/api/src/opentrons/protocol_api/module_contexts.py index 8890981e32a..96950b927ef 100644 --- a/api/src/opentrons/protocol_api/module_contexts.py +++ b/api/src/opentrons/protocol_api/module_contexts.py @@ -608,7 +608,7 @@ def set_lid_temperature(self, temperature: float) -> None: .. note:: The Thermocycler will proceed to the next command immediately after - ``temperature`` has been reached. + ``temperature`` is reached. """ self._core.set_target_lid_temperature(celsius=temperature) @@ -625,28 +625,18 @@ def execute_profile( """Execute a Thermocycler profile, defined as a cycle of ``steps``, for a given number of ``repetitions``. - :param steps: List of unique steps that make up a single cycle. - Each list item should be a dictionary that maps to - the parameters of the :py:meth:`set_block_temperature` - method with a ``temperature`` key, and either or both of + :param steps: List of steps that make up a single cycle. + Each list item should be a dictionary that maps to the parameters + of the :py:meth:`set_block_temperature` method. The dictionary's + keys must be ``temperature`` and one or both of ``hold_time_seconds`` and ``hold_time_minutes``. :param repetitions: The number of times to repeat the cycled steps. :param block_max_volume: The greatest volume of liquid contained in any individual well of the loaded labware, in µL. If not specified, the default is 25 µL. - .. note:: - - Unlike with :py:meth:`set_block_temperature`, either or both of - ``hold_time_minutes`` and ``hold_time_seconds`` must be defined - and for each step. - - .. note:: - - Before API Version 2.21, Thermocycler profiles run with this command - would be listed in the app as having a number of repetitions equal to - their step count. At or above API Version 2.21, the structure of the - Thermocycler cycles is preserved. + .. versionchanged:: 2.21 + Fixed run log listing number of steps instead of number of repetitions. """ repetitions = validation.ensure_thermocycler_repetition_count(repetitions) diff --git a/api/src/opentrons/protocol_api/robot_context.py b/api/src/opentrons/protocol_api/robot_context.py index 5b0e578f9bb..df14b8bb7c5 100644 --- a/api/src/opentrons/protocol_api/robot_context.py +++ b/api/src/opentrons/protocol_api/robot_context.py @@ -144,11 +144,13 @@ def move_axes_relative( ) self._core.move_axes_relative(axis_map, speed) - def close_gripper_jaw(self, force: float) -> None: - raise NotImplementedError() + def close_gripper_jaw(self, force: Optional[float] = None) -> None: + """Command the gripper closed with some force.""" + self._core.close_gripper(force) def open_gripper_jaw(self) -> None: - raise NotImplementedError() + """Command the gripper open.""" + self._core.release_grip() def axis_coordinates_for( self, diff --git a/api/src/opentrons/protocol_engine/commands/absorbance_reader/read.py b/api/src/opentrons/protocol_engine/commands/absorbance_reader/read.py index 1ca848858b6..8e8926089f1 100644 --- a/api/src/opentrons/protocol_engine/commands/absorbance_reader/read.py +++ b/api/src/opentrons/protocol_engine/commands/absorbance_reader/read.py @@ -82,7 +82,7 @@ async def execute( # noqa: C901 ) if abs_reader_substate.is_lid_on is False: raise CannotPerformModuleAction( - "Cannot perform Read action on Absorbance Reader with the lid open. Try calling `.close_lid()` first." + "Absorbance Plate Reader can't read a plate with the lid open. Call `close_lid()` first." ) # TODO: we need to return a file ID and increase the file count even when a moduel is not attached diff --git a/api/src/opentrons/protocol_engine/commands/aspirate.py b/api/src/opentrons/protocol_engine/commands/aspirate.py index 38dbe03c7e0..fa84afbde8c 100644 --- a/api/src/opentrons/protocol_engine/commands/aspirate.py +++ b/api/src/opentrons/protocol_engine/commands/aspirate.py @@ -11,6 +11,7 @@ FlowRateMixin, BaseLiquidHandlingResult, aspirate_in_place, + prepare_for_aspirate, ) from .movement_common import ( LiquidHandlingWellLocationMixin, @@ -94,6 +95,17 @@ async def execute(self, params: AspirateParams) -> _ExecuteReturn: pipette_id = params.pipetteId labware_id = params.labwareId well_name = params.wellName + well_location = params.wellLocation + + state_update = StateUpdate() + + final_location = self._state_view.geometry.get_well_position( + labware_id=labware_id, + well_name=well_name, + well_location=well_location, + operation_volume=-params.volume, + pipette_id=pipette_id, + ) ready_to_aspirate = self._pipetting.get_is_ready_to_aspirate( pipette_id=pipette_id @@ -102,14 +114,32 @@ async def execute(self, params: AspirateParams) -> _ExecuteReturn: current_well = None if not ready_to_aspirate: - await self._movement.move_to_well( + move_result = await move_to_well( + movement=self._movement, + model_utils=self._model_utils, pipette_id=pipette_id, labware_id=labware_id, well_name=well_name, well_location=WellLocation(origin=WellOrigin.TOP), ) + state_update.append(move_result.state_update) + if isinstance(move_result, DefinedErrorData): + return DefinedErrorData(move_result.public, state_update=state_update) - await self._pipetting.prepare_for_aspirate(pipette_id=pipette_id) + prepare_result = await prepare_for_aspirate( + pipette_id=pipette_id, + pipetting=self._pipetting, + model_utils=self._model_utils, + # Note that the retryLocation is the final location, inside the liquid, + # because that's where we'd want the client to try re-aspirating if this + # command fails and the run enters error recovery. + location_if_error={"retryLocation": final_location}, + ) + state_update.append(prepare_result.state_update) + if isinstance(prepare_result, DefinedErrorData): + return DefinedErrorData( + public=prepare_result.public, state_update=state_update + ) # set our current deck location to the well now that we've made # an intermediate move for the "prepare for aspirate" step @@ -125,12 +155,15 @@ async def execute(self, params: AspirateParams) -> _ExecuteReturn: pipette_id=pipette_id, labware_id=labware_id, well_name=well_name, - well_location=params.wellLocation, + well_location=well_location, current_well=current_well, operation_volume=-params.volume, ) + state_update.append(move_result.state_update) if isinstance(move_result, DefinedErrorData): - return move_result + return DefinedErrorData( + public=move_result.public, state_update=state_update + ) aspirate_result = await aspirate_in_place( pipette_id=pipette_id, @@ -147,47 +180,42 @@ async def execute(self, params: AspirateParams) -> _ExecuteReturn: pipetting=self._pipetting, model_utils=self._model_utils, ) + state_update.append(aspirate_result.state_update) if isinstance(aspirate_result, DefinedErrorData): - return DefinedErrorData( - public=aspirate_result.public, - state_update=StateUpdate.reduce( - move_result.state_update, aspirate_result.state_update - ).set_liquid_operated( - labware_id=labware_id, - well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well( - labware_id, - well_name, - params.pipetteId, - ), - volume_added=CLEAR, - ), - state_update_if_false_positive=StateUpdate.reduce( - move_result.state_update, - aspirate_result.state_update_if_false_positive, + state_update.set_liquid_operated( + labware_id=labware_id, + well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well( + labware_id, + well_name, + params.pipetteId, ), + volume_added=CLEAR, ) - else: - return SuccessData( - public=AspirateResult( - volume=aspirate_result.public.volume, - position=move_result.public.position, - ), - state_update=StateUpdate.reduce( - move_result.state_update, aspirate_result.state_update - ).set_liquid_operated( - labware_id=labware_id, - well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well( - labware_id, well_name, pipette_id - ), - volume_added=-aspirate_result.public.volume - * self._state_view.geometry.get_nozzles_per_well( - labware_id, - well_name, - params.pipetteId, - ), - ), + return DefinedErrorData( + public=aspirate_result.public, state_update=state_update ) + state_update.set_liquid_operated( + labware_id=labware_id, + well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well( + labware_id, well_name, pipette_id + ), + volume_added=-aspirate_result.public.volume + * self._state_view.geometry.get_nozzles_per_well( + labware_id, + well_name, + params.pipetteId, + ), + ) + + return SuccessData( + public=AspirateResult( + volume=aspirate_result.public.volume, + position=move_result.public.position, + ), + state_update=state_update, + ) + class Aspirate( BaseCommand[ diff --git a/api/src/opentrons/protocol_engine/commands/command_unions.py b/api/src/opentrons/protocol_engine/commands/command_unions.py index c33f55e2e01..9c548fa8045 100644 --- a/api/src/opentrons/protocol_engine/commands/command_unions.py +++ b/api/src/opentrons/protocol_engine/commands/command_unions.py @@ -416,6 +416,8 @@ robot.MoveTo, robot.MoveAxesRelative, robot.MoveAxesTo, + robot.openGripperJaw, + robot.closeGripperJaw, ], Field(discriminator="commandType"), ] @@ -499,6 +501,8 @@ robot.MoveAxesRelativeParams, robot.MoveAxesToParams, robot.MoveToParams, + robot.openGripperJawParams, + robot.closeGripperJawParams, ] CommandType = Union[ @@ -580,6 +584,8 @@ robot.MoveAxesRelativeCommandType, robot.MoveAxesToCommandType, robot.MoveToCommandType, + robot.openGripperJawCommandType, + robot.closeGripperJawCommandType, ] CommandCreate = Annotated[ @@ -662,6 +668,8 @@ robot.MoveAxesRelativeCreate, robot.MoveAxesToCreate, robot.MoveToCreate, + robot.openGripperJawCreate, + robot.closeGripperJawCreate, ], Field(discriminator="commandType"), ] @@ -745,6 +753,8 @@ robot.MoveAxesRelativeResult, robot.MoveAxesToResult, robot.MoveToResult, + robot.openGripperJawResult, + robot.closeGripperJawResult, ] diff --git a/api/src/opentrons/protocol_engine/commands/robot/__init__.py b/api/src/opentrons/protocol_engine/commands/robot/__init__.py index 5d5fc691e5f..048fecd09fe 100644 --- a/api/src/opentrons/protocol_engine/commands/robot/__init__.py +++ b/api/src/opentrons/protocol_engine/commands/robot/__init__.py @@ -21,6 +21,20 @@ MoveAxesRelativeResult, MoveAxesRelativeCommandType, ) +from .open_gripper_jaw import ( + openGripperJaw, + openGripperJawCreate, + openGripperJawParams, + openGripperJawResult, + openGripperJawCommandType, +) +from .close_gripper_jaw import ( + closeGripperJaw, + closeGripperJawCreate, + closeGripperJawParams, + closeGripperJawResult, + closeGripperJawCommandType, +) __all__ = [ # robot/moveTo @@ -41,4 +55,16 @@ "MoveAxesRelativeParams", "MoveAxesRelativeResult", "MoveAxesRelativeCommandType", + # robot/openGripperJaw + "openGripperJaw", + "openGripperJawCreate", + "openGripperJawParams", + "openGripperJawResult", + "openGripperJawCommandType", + # robot/closeGripperJaw + "closeGripperJaw", + "closeGripperJawCreate", + "closeGripperJawParams", + "closeGripperJawResult", + "closeGripperJawCommandType", ] diff --git a/api/src/opentrons/protocol_engine/commands/robot/close_gripper_jaw.py b/api/src/opentrons/protocol_engine/commands/robot/close_gripper_jaw.py new file mode 100644 index 00000000000..965c6d2ec72 --- /dev/null +++ b/api/src/opentrons/protocol_engine/commands/robot/close_gripper_jaw.py @@ -0,0 +1,79 @@ +"""Command models for opening a gripper jaw.""" +from __future__ import annotations +from typing import Literal, Type, Optional +from opentrons.hardware_control import HardwareControlAPI +from opentrons.protocol_engine.resources import ensure_ot3_hardware + +from pydantic import BaseModel, Field + +from ..command import ( + AbstractCommandImpl, + BaseCommand, + BaseCommandCreate, + SuccessData, +) +from opentrons.protocol_engine.errors.error_occurrence import ErrorOccurrence + + +closeGripperJawCommandType = Literal["robot/closeGripperJaw"] + + +class closeGripperJawParams(BaseModel): + """Payload required to close a gripper.""" + + force: Optional[float] = Field( + default=None, + description="The force the gripper should use to hold the jaws, falls to default if none is provided.", + ) + + +class closeGripperJawResult(BaseModel): + """Result data from the execution of a closeGripperJaw command.""" + + pass + + +class closeGripperJawImplementation( + AbstractCommandImpl[closeGripperJawParams, SuccessData[closeGripperJawResult]] +): + """closeGripperJaw command implementation.""" + + def __init__( + self, + hardware_api: HardwareControlAPI, + **kwargs: object, + ) -> None: + self._hardware_api = hardware_api + + async def execute( + self, params: closeGripperJawParams + ) -> SuccessData[closeGripperJawResult]: + """Release the gripper.""" + ot3_hardware_api = ensure_ot3_hardware(self._hardware_api) + await ot3_hardware_api.grip(force_newtons=params.force) + return SuccessData( + public=closeGripperJawResult(), + ) + + +class closeGripperJaw( + BaseCommand[closeGripperJawParams, closeGripperJawResult, ErrorOccurrence] +): + """closeGripperJaw command model.""" + + commandType: closeGripperJawCommandType = "robot/closeGripperJaw" + params: closeGripperJawParams + result: Optional[closeGripperJawResult] + + _ImplementationCls: Type[ + closeGripperJawImplementation + ] = closeGripperJawImplementation + + +class closeGripperJawCreate(BaseCommandCreate[closeGripperJawParams]): + """closeGripperJaw command request model.""" + + commandType: closeGripperJawCommandType = "robot/closeGripperJaw" + params: closeGripperJawParams + + _CommandCls: Type[closeGripperJaw] = closeGripperJaw diff --git a/api/src/opentrons/protocol_engine/commands/robot/open_gripper_jaw.py b/api/src/opentrons/protocol_engine/commands/robot/open_gripper_jaw.py new file mode 100644 index 00000000000..22aa1debd42 --- /dev/null +++ b/api/src/opentrons/protocol_engine/commands/robot/open_gripper_jaw.py @@ -0,0 +1,77 @@ +"""Command models for opening a gripper jaw.""" +from __future__ import annotations +from typing import Literal, Type, Optional +from opentrons.hardware_control import HardwareControlAPI +from opentrons.protocol_engine.resources import ensure_ot3_hardware + +from pydantic import BaseModel + +from ..command import ( + AbstractCommandImpl, + BaseCommand, + BaseCommandCreate, + SuccessData, +) +from opentrons.protocol_engine.errors.error_occurrence import ErrorOccurrence + + +openGripperJawCommandType = Literal["robot/openGripperJaw"] + + +class openGripperJawParams(BaseModel): + """Payload required to release a gripper.""" + + pass + + +class openGripperJawResult(BaseModel): + """Result data from the execution of a openGripperJaw command.""" + + pass + + +class openGripperJawImplementation( + AbstractCommandImpl[openGripperJawParams, SuccessData[openGripperJawResult]] +): + """openGripperJaw command implementation.""" + + def __init__( + self, + hardware_api: HardwareControlAPI, + **kwargs: object, + ) -> None: + self._hardware_api = hardware_api + + async def execute( + self, params: openGripperJawParams + ) -> SuccessData[openGripperJawResult]: + """Release the gripper.""" + ot3_hardware_api = ensure_ot3_hardware(self._hardware_api) + + await ot3_hardware_api.home_gripper_jaw() + return SuccessData( + public=openGripperJawResult(), + ) + + +class openGripperJaw( + BaseCommand[openGripperJawParams, openGripperJawResult, ErrorOccurrence] +): + """openGripperJaw command model.""" + + commandType: openGripperJawCommandType = "robot/openGripperJaw" + params: openGripperJawParams + result: Optional[openGripperJawResult] + + _ImplementationCls: Type[ + openGripperJawImplementation + ] = openGripperJawImplementation + + +class openGripperJawCreate(BaseCommandCreate[openGripperJawParams]): + """openGripperJaw command request model.""" + + commandType: openGripperJawCommandType = "robot/openGripperJaw" + params: openGripperJawParams + + _CommandCls: Type[openGripperJaw] = openGripperJaw diff --git a/api/src/opentrons/protocol_engine/commands/unsafe/unsafe_ungrip_labware.py b/api/src/opentrons/protocol_engine/commands/unsafe/unsafe_ungrip_labware.py index 9674513d749..6f8f5b71fce 100644 --- a/api/src/opentrons/protocol_engine/commands/unsafe/unsafe_ungrip_labware.py +++ b/api/src/opentrons/protocol_engine/commands/unsafe/unsafe_ungrip_labware.py @@ -1,6 +1,8 @@ """Ungrip labware payload, result, and implementaiton.""" from __future__ import annotations + +from opentrons.hardware_control.types import Axis from opentrons.protocol_engine.errors.exceptions import GripperNotAttachedError from pydantic import BaseModel from typing import Optional, Type @@ -46,7 +48,7 @@ async def execute( ot3_hardware_api = ensure_ot3_hardware(self._hardware_api) if not ot3_hardware_api.has_gripper(): raise GripperNotAttachedError("No gripper found to perform ungrip.") - await ot3_hardware_api.ungrip() + await ot3_hardware_api.home([Axis.G]) return SuccessData( public=UnsafeUngripLabwareResult(), ) diff --git a/api/src/opentrons/protocol_engine/execution/pipetting.py b/api/src/opentrons/protocol_engine/execution/pipetting.py index 2964f02d183..10d613e4dcf 100644 --- a/api/src/opentrons/protocol_engine/execution/pipetting.py +++ b/api/src/opentrons/protocol_engine/execution/pipetting.py @@ -91,7 +91,11 @@ def get_is_ready_to_aspirate(self, pipette_id: str) -> bool: ) async def prepare_for_aspirate(self, pipette_id: str) -> None: - """Prepare for pipette aspiration.""" + """Prepare for pipette aspiration. + + Raises: + PipetteOverpressureError, propagated as-is from the hardware controller. + """ hw_mount = self._state_view.pipettes.get_mount(pipette_id).to_hw_mount() await self._hardware_api.prepare_for_aspirate(mount=hw_mount) diff --git a/api/src/opentrons/protocol_engine/state/command_history.py b/api/src/opentrons/protocol_engine/state/command_history.py index d555764e54e..0879a7cd130 100644 --- a/api/src/opentrons/protocol_engine/state/command_history.py +++ b/api/src/opentrons/protocol_engine/state/command_history.py @@ -24,6 +24,9 @@ class CommandHistory: _all_command_ids: List[str] """All command IDs, in insertion order.""" + _all_failed_command_ids: List[str] + """All failed command IDs, in insertion order.""" + _all_command_ids_but_fixit_command_ids: List[str] """All command IDs besides fixit command intents, in insertion order.""" @@ -47,6 +50,7 @@ class CommandHistory: def __init__(self) -> None: self._all_command_ids = [] + self._all_failed_command_ids = [] self._all_command_ids_but_fixit_command_ids = [] self._queued_command_ids = OrderedSet() self._queued_setup_command_ids = OrderedSet() @@ -101,6 +105,13 @@ def get_all_commands(self) -> List[Command]: for command_id in self._all_command_ids ] + def get_all_failed_commands(self) -> List[Command]: + """Get all failed commands.""" + return [ + self._commands_by_id[command_id].command + for command_id in self._all_failed_command_ids + ] + def get_filtered_command_ids(self, include_fixit_commands: bool) -> List[str]: """Get all fixit command IDs.""" if include_fixit_commands: @@ -242,6 +253,7 @@ def set_command_failed(self, command: Command) -> None: self._remove_queue_id(command.id) self._remove_setup_queue_id(command.id) self._set_most_recently_completed_command_id(command.id) + self._all_failed_command_ids.append(command.id) def _add(self, command_id: str, command_entry: CommandEntry) -> None: """Create or update a command entry.""" diff --git a/api/src/opentrons/protocol_engine/state/commands.py b/api/src/opentrons/protocol_engine/state/commands.py index 4d2009aae80..da99c14ef3e 100644 --- a/api/src/opentrons/protocol_engine/state/commands.py +++ b/api/src/opentrons/protocol_engine/state/commands.py @@ -228,9 +228,6 @@ class CommandState: This value can be used to generate future hashes. """ - failed_command_errors: List[ErrorOccurrence] - """List of command errors that occurred during run execution.""" - has_entered_error_recovery: bool """Whether the run has entered error recovery.""" @@ -269,7 +266,6 @@ def __init__( run_started_at=None, latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=error_recovery_policy, has_entered_error_recovery=False, ) @@ -366,7 +362,6 @@ def _handle_fail_command_action(self, action: FailCommandAction) -> None: notes=action.notes, ) self._state.failed_command = self._state.command_history.get(action.command_id) - self._state.failed_command_errors.append(public_error_occurrence) if ( prev_entry.command.intent in (CommandIntent.PROTOCOL, None) @@ -706,7 +701,12 @@ def get_error(self) -> Optional[ErrorOccurrence]: def get_all_errors(self) -> List[ErrorOccurrence]: """Get the run's full error list, if there was none, returns an empty list.""" - return self._state.failed_command_errors + failed_commands = self._state.command_history.get_all_failed_commands() + return [ + command_error.error + for command_error in failed_commands + if command_error.error is not None + ] def get_has_entered_recovery_mode(self) -> bool: """Get whether the run has entered recovery mode.""" diff --git a/api/src/opentrons/protocol_engine/state/modules.py b/api/src/opentrons/protocol_engine/state/modules.py index c61d4173ff1..f2f9dc5e8e4 100644 --- a/api/src/opentrons/protocol_engine/state/modules.py +++ b/api/src/opentrons/protocol_engine/state/modules.py @@ -1268,7 +1268,10 @@ def convert_absorbance_reader_data_points( row = chr(ord("A") + i // 12) # Convert index to row (A-H) col = (i % 12) + 1 # Convert index to column (1-12) well_key = f"{row}{col}" - well_map[well_key] = value + truncated_value = float( + "{:.5}".format(str(value)) + ) # Truncate the returned value to the third decimal place + well_map[well_key] = truncated_value return well_map else: raise ValueError( diff --git a/api/src/opentrons/protocol_engine/state/update_types.py b/api/src/opentrons/protocol_engine/state/update_types.py index 567ba39144c..76f16dadfbe 100644 --- a/api/src/opentrons/protocol_engine/state/update_types.py +++ b/api/src/opentrons/protocol_engine/state/update_types.py @@ -299,6 +299,19 @@ class StateUpdate: liquid_class_loaded: LiquidClassLoadedUpdate | NoChangeType = NO_CHANGE + def append(self, other: Self) -> Self: + """Apply another `StateUpdate` "on top of" this one. + + This object is mutated in-place, taking values from `other`. + If an attribute in `other` is `NO_CHANGE`, the value in this object is kept. + """ + fields = dataclasses.fields(other) + for field in fields: + other_value = other.__dict__[field.name] + if other_value != NO_CHANGE: + self.__dict__[field.name] = other_value + return self + @classmethod def reduce(cls: typing.Type[Self], *args: Self) -> Self: """Fuse multiple state updates into a single one. @@ -306,19 +319,10 @@ def reduce(cls: typing.Type[Self], *args: Self) -> Self: State updates that are later in the parameter list are preferred to those that are earlier; NO_CHANGE is ignored. """ - fields = dataclasses.fields(cls) - changes_dicts = [ - { - field.name: update.__dict__[field.name] - for field in fields - if update.__dict__[field.name] != NO_CHANGE - } - for update in args - ] - changes = {} - for changes_dict in changes_dicts: - changes.update(changes_dict) - return cls(**changes) + accumulator = cls() + for arg in args: + accumulator.append(arg) + return accumulator # These convenience functions let the caller avoid the boilerplate of constructing a # complicated dataclass tree. diff --git a/api/tests/opentrons/protocol_engine/commands/robot/test_close_gripper_jaw.py b/api/tests/opentrons/protocol_engine/commands/robot/test_close_gripper_jaw.py new file mode 100644 index 00000000000..c5ccd4bf48d --- /dev/null +++ b/api/tests/opentrons/protocol_engine/commands/robot/test_close_gripper_jaw.py @@ -0,0 +1,28 @@ +"""Test robot.open-gripper-jaw commands.""" +from decoy import Decoy + +from opentrons.hardware_control import OT3HardwareControlAPI + +from opentrons.protocol_engine.commands.command import SuccessData +from opentrons.protocol_engine.commands.robot.close_gripper_jaw import ( + closeGripperJawParams, + closeGripperJawResult, + closeGripperJawImplementation, +) + + +async def test_close_gripper_jaw_implementation( + decoy: Decoy, + ot3_hardware_api: OT3HardwareControlAPI, +) -> None: + """Test the `robot.closeGripperJaw` implementation.""" + subject = closeGripperJawImplementation( + hardware_api=ot3_hardware_api, + ) + + params = closeGripperJawParams(force=10) + + result = await subject.execute(params=params) + + assert result == SuccessData(public=closeGripperJawResult()) + decoy.verify(await ot3_hardware_api.grip(force_newtons=10)) diff --git a/api/tests/opentrons/protocol_engine/commands/robot/test_open_gripper_jaw.py b/api/tests/opentrons/protocol_engine/commands/robot/test_open_gripper_jaw.py new file mode 100644 index 00000000000..6ded7932963 --- /dev/null +++ b/api/tests/opentrons/protocol_engine/commands/robot/test_open_gripper_jaw.py @@ -0,0 +1,28 @@ +"""Test robot.open-gripper-jaw commands.""" +from decoy import Decoy + +from opentrons.hardware_control import OT3HardwareControlAPI + +from opentrons.protocol_engine.commands.command import SuccessData +from opentrons.protocol_engine.commands.robot.open_gripper_jaw import ( + openGripperJawParams, + openGripperJawResult, + openGripperJawImplementation, +) + + +async def test_open_gripper_jaw_implementation( + decoy: Decoy, + ot3_hardware_api: OT3HardwareControlAPI, +) -> None: + """Test the `robot.openGripperJaw` implementation.""" + subject = openGripperJawImplementation( + hardware_api=ot3_hardware_api, + ) + + params = openGripperJawParams() + + result = await subject.execute(params=params) + + assert result == SuccessData(public=openGripperJawResult()) + decoy.verify(await ot3_hardware_api.home_gripper_jaw()) diff --git a/api/tests/opentrons/protocol_engine/commands/test_aspirate.py b/api/tests/opentrons/protocol_engine/commands/test_aspirate.py index 11078fb43cf..8e50d1825ae 100644 --- a/api/tests/opentrons/protocol_engine/commands/test_aspirate.py +++ b/api/tests/opentrons/protocol_engine/commands/test_aspirate.py @@ -12,7 +12,7 @@ from opentrons.protocol_engine.commands.pipetting_common import OverpressureError from opentrons.protocol_engine.commands.movement_common import StallOrCollisionError from opentrons.protocol_engine.state import update_types -from opentrons.types import MountType, Point +from opentrons.types import Point from opentrons.protocol_engine import ( LiquidHandlingWellLocation, WellOrigin, @@ -36,9 +36,9 @@ from opentrons.protocol_engine.resources.model_utils import ModelUtils from opentrons.protocol_engine.types import ( CurrentWell, - LoadedPipette, AspiratedFluid, FluidKind, + WellLocation, ) from opentrons.hardware_control import HardwareControlAPI from opentrons.protocol_engine.notes import CommandNoteAdder @@ -67,47 +67,50 @@ def subject( async def test_aspirate_implementation_no_prep( decoy: Decoy, state_view: StateView, - hardware_api: HardwareControlAPI, movement: MovementHandler, pipetting: PipettingHandler, subject: AspirateImplementation, mock_command_note_adder: CommandNoteAdder, ) -> None: """An Aspirate should have an execution implementation without preparing to aspirate.""" + pipette_id = "pipette-id" + labware_id = "labware-id" + well_name = "well-name" location = LiquidHandlingWellLocation( origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1) ) - - data = AspirateParams( - pipetteId="abc", - labwareId="123", - wellName="A3", + params = AspirateParams( + pipetteId=pipette_id, + labwareId=labware_id, + wellName=well_name, wellLocation=location, volume=50, flowRate=1.23, ) - decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id="abc")).then_return(True) + decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( + True + ) decoy.when( state_view.geometry.get_nozzles_per_well( - labware_id="123", - target_well_name="A3", - pipette_id="abc", + labware_id=labware_id, + target_well_name=well_name, + pipette_id=pipette_id, ) ).then_return(2) decoy.when( state_view.geometry.get_wells_covered_by_pipette_with_active_well( - "123", "A3", "abc" + labware_id, well_name, pipette_id ) - ).then_return(["A3", "A4"]) + ).then_return(["covered-well-1", "covered-well-2"]) decoy.when( await movement.move_to_well( - pipette_id="abc", - labware_id="123", - well_name="A3", + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, well_location=location, current_well=None, force_direct=False, @@ -119,30 +122,33 @@ async def test_aspirate_implementation_no_prep( decoy.when( await pipetting.aspirate_in_place( - pipette_id="abc", + pipette_id=pipette_id, volume=50, flow_rate=1.23, command_note_adder=mock_command_note_adder, ), ).then_return(50) - result = await subject.execute(data) + result = await subject.execute(params) assert result == SuccessData( public=AspirateResult(volume=50, position=DeckPoint(x=1, y=2, z=3)), state_update=update_types.StateUpdate( pipette_location=update_types.PipetteLocationUpdate( - pipette_id="abc", - new_location=update_types.Well(labware_id="123", well_name="A3"), + pipette_id=pipette_id, + new_location=update_types.Well( + labware_id=labware_id, well_name=well_name + ), new_deck_point=DeckPoint(x=1, y=2, z=3), ), liquid_operated=update_types.LiquidOperatedUpdate( - labware_id="123", - well_names=["A3", "A4"], + labware_id=labware_id, + well_names=["covered-well-1", "covered-well-2"], volume_added=-100, ), pipette_aspirated_fluid=update_types.PipetteAspiratedFluidUpdate( - pipette_id="abc", fluid=AspiratedFluid(kind=FluidKind.LIQUID, volume=50) + pipette_id=pipette_id, + fluid=AspiratedFluid(kind=FluidKind.LIQUID, volume=50), ), ), ) @@ -151,104 +157,112 @@ async def test_aspirate_implementation_no_prep( async def test_aspirate_implementation_with_prep( decoy: Decoy, state_view: StateView, - hardware_api: HardwareControlAPI, movement: MovementHandler, pipetting: PipettingHandler, mock_command_note_adder: CommandNoteAdder, subject: AspirateImplementation, ) -> None: """An Aspirate should have an execution implementation with preparing to aspirate.""" + pipette_id = "pipette-id" + labware_id = "labware-id" + well_name = "well-name" location = LiquidHandlingWellLocation( origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1) ) - - data = AspirateParams( - pipetteId="abc", - labwareId="123", - wellName="A3", + volume = 50 + flow_rate = 1.23 + params = AspirateParams( + pipetteId=pipette_id, + labwareId=labware_id, + wellName=well_name, wellLocation=location, - volume=50, - flowRate=1.23, + volume=volume, + flowRate=flow_rate, ) - decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id="abc")).then_return(False) - - decoy.when(state_view.pipettes.get(pipette_id="abc")).then_return( - LoadedPipette.construct( # type:ignore[call-arg] - mount=MountType.LEFT - ) + decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( + False ) + decoy.when( state_view.geometry.get_nozzles_per_well( - labware_id="123", - target_well_name="A3", - pipette_id="abc", + labware_id=labware_id, + target_well_name=well_name, + pipette_id=pipette_id, ) ).then_return(2) decoy.when( state_view.geometry.get_wells_covered_by_pipette_with_active_well( - "123", "A3", "abc" + labware_id, well_name, pipette_id ) - ).then_return(["A3", "A4"]) + ).then_return(["covered-well-1", "covered-well-2"]) + + decoy.when( + await movement.move_to_well( + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, + well_location=WellLocation(origin=WellOrigin.TOP), + current_well=None, + force_direct=False, + minimum_z_height=None, + speed=None, + operation_volume=None, + ), + ).then_return(Point()) + decoy.when( await movement.move_to_well( - pipette_id="abc", - labware_id="123", - well_name="A3", + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, well_location=location, current_well=CurrentWell( - pipette_id="abc", - labware_id="123", - well_name="A3", + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, ), force_direct=False, minimum_z_height=None, speed=None, - operation_volume=-50, + operation_volume=-volume, ), ).then_return(Point(x=1, y=2, z=3)) decoy.when( await pipetting.aspirate_in_place( - pipette_id="abc", - volume=50, - flow_rate=1.23, + pipette_id=pipette_id, + volume=volume, + flow_rate=flow_rate, command_note_adder=mock_command_note_adder, ), - ).then_return(50) + ).then_return(volume) - result = await subject.execute(data) + result = await subject.execute(params) assert result == SuccessData( public=AspirateResult(volume=50, position=DeckPoint(x=1, y=2, z=3)), state_update=update_types.StateUpdate( pipette_location=update_types.PipetteLocationUpdate( - pipette_id="abc", - new_location=update_types.Well(labware_id="123", well_name="A3"), + pipette_id=pipette_id, + new_location=update_types.Well( + labware_id=labware_id, well_name=well_name + ), new_deck_point=DeckPoint(x=1, y=2, z=3), ), liquid_operated=update_types.LiquidOperatedUpdate( - labware_id="123", - well_names=["A3", "A4"], + labware_id=labware_id, + well_names=["covered-well-1", "covered-well-2"], volume_added=-100, ), pipette_aspirated_fluid=update_types.PipetteAspiratedFluidUpdate( - pipette_id="abc", fluid=AspiratedFluid(kind=FluidKind.LIQUID, volume=50) + pipette_id=pipette_id, + fluid=AspiratedFluid(kind=FluidKind.LIQUID, volume=50), ), ), ) - decoy.verify( - await movement.move_to_well( - pipette_id="abc", - labware_id="123", - well_name="A3", - well_location=LiquidHandlingWellLocation(origin=WellOrigin.TOP), - ), - await pipetting.prepare_for_aspirate(pipette_id="abc"), - ) - async def test_aspirate_raises_volume_error( decoy: Decoy, @@ -259,40 +273,44 @@ async def test_aspirate_raises_volume_error( subject: AspirateImplementation, ) -> None: """Should raise an assertion error for volume larger than working volume.""" + pipette_id = "pipette-id" + labware_id = "labware-id" + well_name = "well-name" location = LiquidHandlingWellLocation( origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1) ) - - data = AspirateParams( - pipetteId="abc", - labwareId="123", - wellName="A3", + params = AspirateParams( + pipetteId=pipette_id, + labwareId=labware_id, + wellName=well_name, wellLocation=location, volume=50, flowRate=1.23, ) - decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id="abc")).then_return(True) + decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( + True + ) decoy.when( state_view.geometry.get_nozzles_per_well( - labware_id="123", - target_well_name="A3", - pipette_id="abc", + labware_id=labware_id, + target_well_name=well_name, + pipette_id=pipette_id, ) ).then_return(2) decoy.when( state_view.geometry.get_wells_covered_by_pipette_with_active_well( - "123", "A3", "abc" + labware_id, well_name, pipette_id ) - ).then_return(["A3", "A4"]) + ).then_return(["covered-well-1", "covered-well-2"]) decoy.when( await movement.move_to_well( - pipette_id="abc", - labware_id="123", - well_name="A3", + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, well_location=location, current_well=None, force_direct=False, @@ -304,7 +322,7 @@ async def test_aspirate_raises_volume_error( decoy.when( await pipetting.aspirate_in_place( - pipette_id="abc", + pipette_id=pipette_id, volume=50, flow_rate=1.23, command_note_adder=mock_command_note_adder, @@ -312,7 +330,7 @@ async def test_aspirate_raises_volume_error( ).then_raise(AssertionError("blah blah")) with pytest.raises(AssertionError): - await subject.execute(data) + await subject.execute(params) async def test_overpressure_error( @@ -337,7 +355,7 @@ async def test_overpressure_error( error_id = "error-id" error_timestamp = datetime(year=2020, month=1, day=2) - data = AspirateParams( + params = AspirateParams( pipetteId=pipette_id, labwareId=labware_id, wellName=well_name, @@ -348,17 +366,17 @@ async def test_overpressure_error( decoy.when( state_view.geometry.get_nozzles_per_well( - labware_id="labware-id", - target_well_name="well-name", - pipette_id="pipette-id", + labware_id=labware_id, + target_well_name=well_name, + pipette_id=pipette_id, ) ).then_return(2) decoy.when( state_view.geometry.get_wells_covered_by_pipette_with_active_well( - "labware-id", "well-name", "pipette-id" + labware_id, well_name, pipette_id ) - ).then_return(["A3", "A4"]) + ).then_return(["covered-well-1", "covered-well-2"]) decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( True @@ -390,7 +408,7 @@ async def test_overpressure_error( decoy.when(model_utils.generate_id()).then_return(error_id) decoy.when(model_utils.get_timestamp()).then_return(error_timestamp) - result = await subject.execute(data) + result = await subject.execute(params) assert result == DefinedErrorData( public=OverpressureError.construct( @@ -409,22 +427,13 @@ async def test_overpressure_error( ), liquid_operated=update_types.LiquidOperatedUpdate( labware_id=labware_id, - well_names=["A3", "A4"], + well_names=["covered-well-1", "covered-well-2"], volume_added=update_types.CLEAR, ), pipette_aspirated_fluid=update_types.PipetteUnknownFluidUpdate( pipette_id=pipette_id ), ), - state_update_if_false_positive=update_types.StateUpdate( - pipette_location=update_types.PipetteLocationUpdate( - pipette_id=pipette_id, - new_location=update_types.Well( - labware_id=labware_id, well_name=well_name - ), - new_deck_point=DeckPoint(x=position.x, y=position.y, z=position.z), - ), - ), ) @@ -438,15 +447,18 @@ async def test_aspirate_implementation_meniscus( mock_command_note_adder: CommandNoteAdder, ) -> None: """Aspirate should update WellVolumeOffset when called with WellOrigin.MENISCUS.""" + pipette_id = "pipette-id" + labware_id = "labware-id" + well_name = "well-name" location = LiquidHandlingWellLocation( origin=WellOrigin.MENISCUS, offset=WellOffset(x=0, y=0, z=-1), volumeOffset="operationVolume", ) - data = AspirateParams( - pipetteId="abc", - labwareId="123", - wellName="A3", + params = AspirateParams( + pipetteId=pipette_id, + labwareId=labware_id, + wellName=well_name, wellLocation=location, volume=50, flowRate=1.23, @@ -454,25 +466,27 @@ async def test_aspirate_implementation_meniscus( decoy.when( state_view.geometry.get_nozzles_per_well( - labware_id="123", - target_well_name="A3", - pipette_id="abc", + labware_id=labware_id, + target_well_name=well_name, + pipette_id=pipette_id, ) ).then_return(2) decoy.when( state_view.geometry.get_wells_covered_by_pipette_with_active_well( - "123", "A3", "abc" + labware_id, well_name, pipette_id ) - ).then_return(["A3", "A4"]) + ).then_return(["covered-well-1", "covered-well-2"]) - decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id="abc")).then_return(True) + decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( + True + ) decoy.when( await movement.move_to_well( - pipette_id="abc", - labware_id="123", - well_name="A3", + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, well_location=location, current_well=None, force_direct=False, @@ -484,36 +498,39 @@ async def test_aspirate_implementation_meniscus( decoy.when( await pipetting.aspirate_in_place( - pipette_id="abc", + pipette_id=pipette_id, volume=50, flow_rate=1.23, command_note_adder=mock_command_note_adder, ), ).then_return(50) - result = await subject.execute(data) + result = await subject.execute(params) assert result == SuccessData( public=AspirateResult(volume=50, position=DeckPoint(x=1, y=2, z=3)), state_update=update_types.StateUpdate( pipette_location=update_types.PipetteLocationUpdate( - pipette_id="abc", - new_location=update_types.Well(labware_id="123", well_name="A3"), + pipette_id=pipette_id, + new_location=update_types.Well( + labware_id=labware_id, well_name=well_name + ), new_deck_point=DeckPoint(x=1, y=2, z=3), ), liquid_operated=update_types.LiquidOperatedUpdate( - labware_id="123", - well_names=["A3", "A4"], + labware_id=labware_id, + well_names=["covered-well-1", "covered-well-2"], volume_added=-100, ), pipette_aspirated_fluid=update_types.PipetteAspiratedFluidUpdate( - pipette_id="abc", fluid=AspiratedFluid(kind=FluidKind.LIQUID, volume=50) + pipette_id=pipette_id, + fluid=AspiratedFluid(kind=FluidKind.LIQUID, volume=50), ), ), ) -async def test_stall_error( +async def test_stall_during_final_movement( decoy: Decoy, movement: MovementHandler, pipetting: PipettingHandler, @@ -521,7 +538,7 @@ async def test_stall_error( model_utils: ModelUtils, state_view: StateView, ) -> None: - """It should return an overpressure error if the hardware API indicates that.""" + """It should propagate a stall error that happens when moving to the final position.""" pipette_id = "pipette-id" labware_id = "labware-id" well_name = "well-name" @@ -535,7 +552,7 @@ async def test_stall_error( True ) - data = AspirateParams( + params = AspirateParams( pipetteId=pipette_id, labwareId=labware_id, wellName=well_name, @@ -561,7 +578,7 @@ async def test_stall_error( decoy.when(model_utils.generate_id()).then_return(error_id) decoy.when(model_utils.get_timestamp()).then_return(error_timestamp) - result = await subject.execute(data) + result = await subject.execute(params) assert result == DefinedErrorData( public=StallOrCollisionError.construct( @@ -571,3 +588,154 @@ async def test_stall_error( ), state_update=update_types.StateUpdate(pipette_location=update_types.CLEAR), ) + + +async def test_stall_during_preparation( + decoy: Decoy, + movement: MovementHandler, + pipetting: PipettingHandler, + subject: AspirateImplementation, + model_utils: ModelUtils, +) -> None: + """It should propagate a stall error that happens during the prepare-to-aspirate part.""" + pipette_id = "pipette-id" + labware_id = "labware-id" + well_name = "well-name" + well_location = LiquidHandlingWellLocation( + origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1) + ) + + error_id = "error-id" + error_timestamp = datetime(year=2020, month=1, day=2) + + params = AspirateParams( + pipetteId=pipette_id, + labwareId=labware_id, + wellName=well_name, + wellLocation=well_location, + volume=50, + flowRate=1.23, + ) + + decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( + False + ) + + decoy.when( + await movement.move_to_well( + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, + well_location=WellLocation(origin=WellOrigin.TOP), + current_well=None, + force_direct=False, + minimum_z_height=None, + speed=None, + operation_volume=None, + ), + ).then_raise(StallOrCollisionDetectedError()) + decoy.when(model_utils.generate_id()).then_return(error_id) + decoy.when(model_utils.get_timestamp()).then_return(error_timestamp) + + result = await subject.execute(params) + assert result == DefinedErrorData( + public=StallOrCollisionError.construct( + id=error_id, createdAt=error_timestamp, wrappedErrors=[matchers.Anything()] + ), + state_update=update_types.StateUpdate( + pipette_location=update_types.CLEAR, + ), + state_update_if_false_positive=update_types.StateUpdate(), + ) + + +async def test_overpressure_during_preparation( + decoy: Decoy, + movement: MovementHandler, + pipetting: PipettingHandler, + subject: AspirateImplementation, + state_view: StateView, + model_utils: ModelUtils, +) -> None: + """It should propagate an overpressure error that happens during the prepare-to-aspirate part.""" + pipette_id = "pipette-id" + labware_id = "labware-id" + well_name = "well-name" + well_location = LiquidHandlingWellLocation( + origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1) + ) + + error_id = "error-id" + error_timestamp = datetime(year=2020, month=1, day=2) + + params = AspirateParams( + pipetteId=pipette_id, + labwareId=labware_id, + wellName=well_name, + wellLocation=well_location, + volume=50, + flowRate=1.23, + ) + + decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id=pipette_id)).then_return( + False + ) + + retry_location = Point(1, 2, 3) + decoy.when( + state_view.geometry.get_well_position( + labware_id=labware_id, + well_name=well_name, + well_location=well_location, + operation_volume=-params.volume, + pipette_id=pipette_id, + ) + ).then_return(retry_location) + + prep_location = Point(4, 5, 6) + decoy.when( + await movement.move_to_well( + pipette_id=pipette_id, + labware_id=labware_id, + well_name=well_name, + well_location=WellLocation(origin=WellOrigin.TOP), + current_well=None, + force_direct=False, + minimum_z_height=None, + speed=None, + operation_volume=None, + ), + ).then_return(prep_location) + + decoy.when(await pipetting.prepare_for_aspirate(pipette_id)).then_raise( + PipetteOverpressureError() + ) + decoy.when(model_utils.generate_id()).then_return(error_id) + decoy.when(model_utils.get_timestamp()).then_return(error_timestamp) + + result = await subject.execute(params) + assert result == DefinedErrorData( + public=OverpressureError.construct( + id=error_id, + createdAt=error_timestamp, + wrappedErrors=[matchers.Anything()], + errorInfo={ + "retryLocation": (retry_location.x, retry_location.y, retry_location.z) + }, + ), + state_update=update_types.StateUpdate( + pipette_location=update_types.PipetteLocationUpdate( + pipette_id=pipette_id, + new_location=update_types.Well( + labware_id=labware_id, well_name=well_name + ), + new_deck_point=DeckPoint( + x=prep_location.x, y=prep_location.y, z=prep_location.z + ), + ), + pipette_aspirated_fluid=update_types.PipetteUnknownFluidUpdate( + pipette_id=pipette_id + ), + ), + state_update_if_false_positive=update_types.StateUpdate(), + ) diff --git a/api/tests/opentrons/protocol_engine/commands/unsafe/test_ungrip_labware.py b/api/tests/opentrons/protocol_engine/commands/unsafe/test_ungrip_labware.py index a4eae34a08d..bcd77093abf 100644 --- a/api/tests/opentrons/protocol_engine/commands/unsafe/test_ungrip_labware.py +++ b/api/tests/opentrons/protocol_engine/commands/unsafe/test_ungrip_labware.py @@ -1,6 +1,7 @@ """Test update-position-estimator commands.""" from decoy import Decoy +from opentrons.hardware_control.types import Axis from opentrons.protocol_engine.commands.unsafe.unsafe_ungrip_labware import ( UnsafeUngripLabwareParams, UnsafeUngripLabwareResult, @@ -25,7 +26,7 @@ async def test_ungrip_labware_implementation( assert result == SuccessData(public=UnsafeUngripLabwareResult()) decoy.verify( - await ot3_hardware_api.ungrip(), + await ot3_hardware_api.home([Axis.G]), ) diff --git a/api/tests/opentrons/protocol_engine/state/test_command_state.py b/api/tests/opentrons/protocol_engine/state/test_command_state.py index fde0d66e654..c52cd8ca74d 100644 --- a/api/tests/opentrons/protocol_engine/state/test_command_state.py +++ b/api/tests/opentrons/protocol_engine/state/test_command_state.py @@ -30,6 +30,7 @@ from opentrons.protocol_engine.state.commands import ( CommandStore, CommandView, + CommandErrorSlice, ) from opentrons.protocol_engine.state.config import Config from opentrons.protocol_engine.state.update_types import StateUpdate @@ -193,7 +194,7 @@ def test_command_failure(error_recovery_type: ErrorRecoveryType) -> None: ) assert subject_view.get("command-id") == expected_failed_command - assert subject.state.failed_command_errors == [expected_error_occurrence] + assert subject_view.get_all_errors() == [expected_error_occurrence] def test_command_failure_clears_queues() -> None: @@ -255,7 +256,7 @@ def test_command_failure_clears_queues() -> None: assert subject_view.get_running_command_id() is None assert subject_view.get_queue_ids() == OrderedSet() assert subject_view.get_next_to_execute() is None - assert subject.state.failed_command_errors == [expected_error_occurance] + assert subject_view.get_all_errors() == [expected_error_occurance] def test_setup_command_failure_only_clears_setup_command_queue() -> None: @@ -555,7 +556,7 @@ def test_door_during_error_recovery() -> None: subject.handle_action(play) assert subject_view.get_status() == EngineStatus.AWAITING_RECOVERY assert subject_view.get_next_to_execute() == "command-id-2" - assert subject.state.failed_command_errors == [expected_error_occurance] + assert subject_view.get_all_errors() == [expected_error_occurance] @pytest.mark.parametrize("close_door_before_queueing", [False, True]) @@ -732,7 +733,7 @@ def test_error_recovery_type_tracking() -> None: id="c2-error", createdAt=datetime(year=2023, month=3, day=3), error=exception ) - assert subject.state.failed_command_errors == [ + assert view.get_all_errors() == [ error_occurrence_1, error_occurrence_2, ] @@ -1100,3 +1101,94 @@ def test_get_state_update_for_false_positive() -> None: subject.handle_action(resume_from_recovery) assert subject_view.get_state_update_for_false_positive() == empty_state_update + + +def test_get_errors_slice_empty() -> None: + """It should return an empty error list.""" + subject = CommandStore( + config=_make_config(), + error_recovery_policy=_placeholder_error_recovery_policy, + is_door_open=False, + ) + subject_view = CommandView(subject.state) + result = subject_view.get_errors_slice(cursor=0, length=2) + + assert result == CommandErrorSlice(commands_errors=[], cursor=0, total_length=0) + + +def test_get_errors_slice() -> None: + """It should return a slice of all command errors.""" + subject = CommandStore( + config=_make_config(), + error_recovery_policy=_placeholder_error_recovery_policy, + is_door_open=False, + ) + + subject_view = CommandView(subject.state) + + queue_1 = actions.QueueCommandAction( + request=commands.WaitForResumeCreate( + params=commands.WaitForResumeParams(), key="command-key-1" + ), + request_hash=None, + created_at=datetime(year=2021, month=1, day=1), + command_id="command-id-1", + ) + subject.handle_action(queue_1) + queue_2_setup = actions.QueueCommandAction( + request=commands.WaitForResumeCreate( + params=commands.WaitForResumeParams(), + intent=commands.CommandIntent.SETUP, + key="command-key-2", + ), + request_hash=None, + created_at=datetime(year=2021, month=1, day=1), + command_id="command-id-2", + ) + subject.handle_action(queue_2_setup) + queue_3_setup = actions.QueueCommandAction( + request=commands.WaitForResumeCreate( + params=commands.WaitForResumeParams(), + intent=commands.CommandIntent.SETUP, + key="command-key-3", + ), + request_hash=None, + created_at=datetime(year=2021, month=1, day=1), + command_id="command-id-3", + ) + subject.handle_action(queue_3_setup) + + run_2_setup = actions.RunCommandAction( + command_id="command-id-2", + started_at=datetime(year=2022, month=2, day=2), + ) + subject.handle_action(run_2_setup) + fail_2_setup = actions.FailCommandAction( + command_id="command-id-2", + running_command=subject_view.get("command-id-2"), + error_id="error-id", + failed_at=datetime(year=2023, month=3, day=3), + error=errors.ProtocolEngineError(message="oh no"), + notes=[], + type=ErrorRecoveryType.CONTINUE_WITH_ERROR, + ) + subject.handle_action(fail_2_setup) + + result = subject_view.get_errors_slice(cursor=1, length=3) + + assert result == CommandErrorSlice( + [ + ErrorOccurrence( + id="error-id", + createdAt=datetime(2023, 3, 3, 0, 0), + isDefined=False, + errorType="ProtocolEngineError", + errorCode="4000", + detail="oh no", + errorInfo={}, + wrappedErrors=[], + ) + ], + cursor=0, + total_length=1, + ) diff --git a/api/tests/opentrons/protocol_engine/state/test_command_store_old.py b/api/tests/opentrons/protocol_engine/state/test_command_store_old.py index d5f171b7ea9..881719ba7ad 100644 --- a/api/tests/opentrons/protocol_engine/state/test_command_store_old.py +++ b/api/tests/opentrons/protocol_engine/state/test_command_store_old.py @@ -333,7 +333,6 @@ def test_command_store_handles_pause_action(pause_source: PauseSource) -> None: recovery_target=None, latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -363,7 +362,6 @@ def test_command_store_handles_play_action(pause_source: PauseSource) -> None: run_started_at=datetime(year=2021, month=1, day=1), latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -398,7 +396,6 @@ def test_command_store_handles_finish_action() -> None: run_started_at=datetime(year=2021, month=1, day=1), latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -453,7 +450,6 @@ def test_command_store_handles_stop_action( run_started_at=datetime(year=2021, month=1, day=1), latest_protocol_command_hash=None, stopped_by_estop=from_estop, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -491,7 +487,6 @@ def test_command_store_handles_stop_action_when_awaiting_recovery() -> None: run_started_at=datetime(year=2021, month=1, day=1), latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -525,7 +520,6 @@ def test_command_store_cannot_restart_after_should_stop() -> None: run_started_at=None, latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -672,7 +666,6 @@ def test_command_store_wraps_unknown_errors() -> None: recovery_target=None, latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -742,7 +735,6 @@ def __init__(self, message: str) -> None: run_started_at=None, latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -778,7 +770,6 @@ def test_command_store_ignores_stop_after_graceful_finish() -> None: run_started_at=datetime(year=2021, month=1, day=1), latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -814,7 +805,6 @@ def test_command_store_ignores_finish_after_non_graceful_stop() -> None: run_started_at=datetime(year=2021, month=1, day=1), latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) @@ -850,7 +840,6 @@ def test_handles_hardware_stopped() -> None: run_started_at=None, latest_protocol_command_hash=None, stopped_by_estop=False, - failed_command_errors=[], error_recovery_policy=matchers.Anything(), has_entered_error_recovery=False, ) diff --git a/api/tests/opentrons/protocol_engine/state/test_command_view_old.py b/api/tests/opentrons/protocol_engine/state/test_command_view_old.py index f7b1d6cd31f..0cbef9cf474 100644 --- a/api/tests/opentrons/protocol_engine/state/test_command_view_old.py +++ b/api/tests/opentrons/protocol_engine/state/test_command_view_old.py @@ -28,19 +28,17 @@ CommandState, CommandView, CommandSlice, - CommandErrorSlice, CommandPointer, RunResult, QueueStatus, ) -from opentrons.protocol_engine.state.command_history import CommandEntry +from opentrons.protocol_engine.state.command_history import CommandEntry, CommandHistory from opentrons.protocol_engine.errors import ProtocolCommandFailedError, ErrorOccurrence from opentrons_shared_data.errors.codes import ErrorCodes -from opentrons.protocol_engine.state.command_history import CommandHistory from opentrons.protocol_engine.state.update_types import StateUpdate from .command_fixtures import ( @@ -77,7 +75,6 @@ def get_command_view( # noqa: C901 finish_error: Optional[errors.ErrorOccurrence] = None, commands: Sequence[cmd.Command] = (), latest_command_hash: Optional[str] = None, - failed_command_errors: Optional[List[ErrorOccurrence]] = None, has_entered_error_recovery: bool = False, ) -> CommandView: """Get a command view test subject.""" @@ -121,7 +118,6 @@ def get_command_view( # noqa: C901 run_started_at=run_started_at, latest_protocol_command_hash=latest_command_hash, stopped_by_estop=False, - failed_command_errors=failed_command_errors or [], has_entered_error_recovery=has_entered_error_recovery, error_recovery_policy=_placeholder_error_recovery_policy, ) @@ -1031,42 +1027,6 @@ def test_get_slice_default_cursor_running() -> None: ) -def test_get_errors_slice_empty() -> None: - """It should return a slice from the tail if no current command.""" - subject = get_command_view(failed_command_errors=[]) - result = subject.get_errors_slice(cursor=0, length=2) - - assert result == CommandErrorSlice(commands_errors=[], cursor=0, total_length=0) - - -def test_get_errors_slice() -> None: - """It should return a slice of all command errors.""" - error_1 = ErrorOccurrence.construct(id="error-id-1") # type: ignore[call-arg] - error_2 = ErrorOccurrence.construct(id="error-id-2") # type: ignore[call-arg] - error_3 = ErrorOccurrence.construct(id="error-id-3") # type: ignore[call-arg] - error_4 = ErrorOccurrence.construct(id="error-id-4") # type: ignore[call-arg] - - subject = get_command_view( - failed_command_errors=[error_1, error_2, error_3, error_4] - ) - - result = subject.get_errors_slice(cursor=1, length=3) - - assert result == CommandErrorSlice( - commands_errors=[error_2, error_3, error_4], - cursor=1, - total_length=4, - ) - - result = subject.get_errors_slice(cursor=-3, length=10) - - assert result == CommandErrorSlice( - commands_errors=[error_1, error_2, error_3, error_4], - cursor=0, - total_length=4, - ) - - def test_get_slice_without_fixit() -> None: """It should select a cursor based on the running command, if present.""" command_1 = create_succeeded_command(command_id="command-id-1") diff --git a/api/tests/opentrons/protocol_engine/state/test_update_types.py b/api/tests/opentrons/protocol_engine/state/test_update_types.py new file mode 100644 index 00000000000..741df813e19 --- /dev/null +++ b/api/tests/opentrons/protocol_engine/state/test_update_types.py @@ -0,0 +1,75 @@ +"""Unit tests for the utilities in `update_types`.""" + + +from opentrons.protocol_engine.state import update_types + + +def test_append() -> None: + """Test `StateUpdate.append()`.""" + state_update = update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=True + ) + ) + + # Populating a new field should leave the original ones unchanged. + result = state_update.append( + update_types.StateUpdate(pipette_location=update_types.CLEAR) + ) + assert result is state_update + assert state_update.absorbance_reader_lid == update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=True + ) + assert state_update.pipette_location == update_types.CLEAR + + # Populating a field that's already been populated should overwrite it. + result = state_update.append( + update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=False + ) + ) + ) + assert result is state_update + assert state_update.absorbance_reader_lid == update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=False + ) + assert state_update.pipette_location == update_types.CLEAR + + +def test_reduce() -> None: + """Test `StateUpdate.reduce()`.""" + assert update_types.StateUpdate.reduce() == update_types.StateUpdate() + + # It should union all the set fields together. + assert update_types.StateUpdate.reduce( + update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=True + ) + ), + update_types.StateUpdate(pipette_location=update_types.CLEAR), + ) == update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=True + ), + pipette_location=update_types.CLEAR, + ) + + # When one field appears multiple times, the last write wins. + assert update_types.StateUpdate.reduce( + update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=True + ) + ), + update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=False + ) + ), + ) == update_types.StateUpdate( + absorbance_reader_lid=update_types.AbsorbanceReaderLidUpdate( + module_id="module_id", is_lid_on=False + ) + ) diff --git a/app-shell/build/release-notes-internal.md b/app-shell/build/release-notes-internal.md index be1008ec824..2a2b46ebb7c 100644 --- a/app-shell/build/release-notes-internal.md +++ b/app-shell/build/release-notes-internal.md @@ -1,6 +1,10 @@ For more details about this release, please see the full [technical changelog][]. [technical change log]: https://github.com/Opentrons/opentrons/releases +## Internal Release 2.3.0-alpha.0 + +This internal release, pulled from the `edge` branch, contains features being developed for evo tip functionality. It's for internal testing only. + ## Internal Release 2.2.0-alpha.1 This internal release, pulled from the `edge` branch, contains features being developed for 8.2.0. It's for internal testing only. diff --git a/app-shell/build/release-notes.md b/app-shell/build/release-notes.md index a093e29e6ed..9b9231e9709 100644 --- a/app-shell/build/release-notes.md +++ b/app-shell/build/release-notes.md @@ -30,6 +30,10 @@ Welcome to the v8.2.0 release of the Opentrons App! This release adds support fo - Fixed an app crash when performing certain error recovery steps with Python API version 2.15 protocols. +### Known Issues + +- If you attach an Absorbance Plate Reader to _any_ Flex on your local network, you must update all copies of the Opentrons App on the same network to at least v8.1.0. + --- ## Opentrons App Changes in 8.1.0 diff --git a/app/src/assets/images/magnetic_block_gen_1@3x.png b/app/src/assets/images/magnetic_block_gen_1@3x.png new file mode 100644 index 00000000000..860966e07a3 Binary files /dev/null and b/app/src/assets/images/magnetic_block_gen_1@3x.png differ diff --git a/app/src/assets/localization/en/app_settings.json b/app/src/assets/localization/en/app_settings.json index a5edc2c727f..f73ac990fc6 100644 --- a/app/src/assets/localization/en/app_settings.json +++ b/app/src/assets/localization/en/app_settings.json @@ -41,7 +41,7 @@ "enable_dev_tools_description": "Enabling this setting opens Developer Tools on app launch, enables additional logging and gives access to feature flags.", "error_boundary_desktop_app_description": "You need to reload the app. Contact support with the following error message:", "error_boundary_title": "An unknown error has occurred", - "error_recovery_mode": "Error Recovery Mode", + "error_recovery_mode": "Recovery mode", "error_recovery_mode_description": "Pause on protocol errors instead of canceling the run.", "feature_flags": "Feature Flags", "general": "General", diff --git a/app/src/assets/localization/en/error_recovery.json b/app/src/assets/localization/en/error_recovery.json index 392c9c694c3..b46e276c48b 100644 --- a/app/src/assets/localization/en/error_recovery.json +++ b/app/src/assets/localization/en/error_recovery.json @@ -43,7 +43,7 @@ "ignore_similar_errors_later_in_run": "Ignore similar errors later in the run?", "inspect_the_robot": "First, inspect the robot to ensure it's prepared to continue the run from the next step.Then, close the robot door before proceeding.", "labware_released_from_current_height": "The labware will be released from its current height.", - "launch_recovery_mode": "Launch Recovery Mode", + "launch_recovery_mode": "Launch recovery mode", "manually_fill_liquid_in_well": "Manually fill liquid in well {{well}}", "manually_fill_well_and_skip": "Manually fill well and skip to next step", "manually_move_lw_and_skip": "Manually move labware and skip to next step", @@ -60,7 +60,7 @@ "proceed_to_cancel": "Proceed to cancel", "proceed_to_tip_selection": "Proceed to tip selection", "recovery_action_failed": "{{action}} failed", - "recovery_mode": "Recovery Mode", + "recovery_mode": "Recovery mode", "recovery_mode_explanation": "Recovery Mode provides you with guided and manual controls for handling errors at runtime.
You can make changes to ensure the step in progress when the error occurred can be completed or choose to cancel the protocol. When changes are made and no subsequent errors are detected, the method completes. Depending on the conditions that caused the error, you will only be provided with appropriate options.", "release": "Release", "release_labware_from_gripper": "Release labware from gripper", diff --git a/app/src/assets/localization/en/quick_transfer.json b/app/src/assets/localization/en/quick_transfer.json index c986da098c1..2ebdb5699b8 100644 --- a/app/src/assets/localization/en/quick_transfer.json +++ b/app/src/assets/localization/en/quick_transfer.json @@ -5,7 +5,7 @@ "advanced_setting_disabled": "Advanced setting disabled for this transfer", "advanced_settings": "Advanced settings", "air_gap": "Air gap", - "air_gap_before_aspirating": "Air gap before aspirating", + "air_gap_after_aspirating": "Air gap after aspirating", "air_gap_before_dispensing": "Air gap before dispensing", "air_gap_capacity_error": "The tip is too full to add an air gap.", "air_gap_value": "{{volume}} µL", diff --git a/app/src/assets/localization/en/shared.json b/app/src/assets/localization/en/shared.json index 0b580a612e8..f4e542e761b 100644 --- a/app/src/assets/localization/en/shared.json +++ b/app/src/assets/localization/en/shared.json @@ -11,6 +11,7 @@ "change_robot": "Change robot", "clear_data": "clear data", "close": "close", + "closed": "closed", "close_robot_door": "Close the robot door before starting the run.", "confirm": "Confirm", "confirm_placement": "Confirm placement", diff --git a/app/src/assets/localization/zh/quick_transfer.json b/app/src/assets/localization/zh/quick_transfer.json index f57d7315651..5ecce11bb9c 100644 --- a/app/src/assets/localization/zh/quick_transfer.json +++ b/app/src/assets/localization/zh/quick_transfer.json @@ -4,7 +4,6 @@ "add_or_remove": "添加或移除", "advanced_setting_disabled": "此移液的高级设置已禁用", "advanced_settings": "高级设置", - "air_gap_before_aspirating": "在吸液前设置空气间隙", "air_gap_before_dispensing": "在分液前设置空气间隙", "air_gap_capacity_error": "移液器空间已满,无法添加空气间隙。", "air_gap_value": "{{volume}} µL", diff --git a/app/src/local-resources/modules/utils/getModuleImage.ts b/app/src/local-resources/modules/utils/getModuleImage.ts index f5db2094dcc..b68d5aa8562 100644 --- a/app/src/local-resources/modules/utils/getModuleImage.ts +++ b/app/src/local-resources/modules/utils/getModuleImage.ts @@ -8,6 +8,7 @@ import thermoModuleGen1HighRes from '/app/assets/images/modules/thermocyclerModu import heaterShakerModuleHighRes from '/app/assets/images/modules/heaterShakerModuleV1@3x.png' import thermoModuleGen2 from '/app/assets/images/thermocycler_gen_2_closed.png' import magneticBlockGen1 from '/app/assets/images/magnetic_block_gen_1.png' +import magneticBlockGen1HighRes from '/app/assets/images/magnetic_block_gen_1@3x.png' import absorbanceReader from '/app/assets/images/opentrons_plate_reader.png' import type { ModuleModel } from '@opentrons/shared-data' @@ -30,7 +31,7 @@ export function getModuleImage( case 'thermocyclerModuleV2': return thermoModuleGen2 case 'magneticBlockV1': - return magneticBlockGen1 + return highRes ? magneticBlockGen1HighRes : magneticBlockGen1 case 'absorbanceReaderV1': return absorbanceReader default: diff --git a/app/src/molecules/InterventionModal/index.tsx b/app/src/molecules/InterventionModal/index.tsx index 625b478a8e9..679d3b3d1f8 100644 --- a/app/src/molecules/InterventionModal/index.tsx +++ b/app/src/molecules/InterventionModal/index.tsx @@ -23,7 +23,6 @@ import { import { getIsOnDevice } from '/app/redux/config' -import type { IconName } from '@opentrons/components' import { ModalContentOneColSimpleButtons } from './ModalContentOneColSimpleButtons' import { TwoColumn } from './TwoColumn' import { OneColumn } from './OneColumn' @@ -32,6 +31,10 @@ import { ModalContentMixed } from './ModalContentMixed' import { DescriptionContent } from './DescriptionContent' import { DeckMapContent } from './DeckMapContent' import { CategorizedStepContent } from './CategorizedStepContent' + +import type { FlattenSimpleInterpolation } from 'styled-components' +import type { IconName } from '@opentrons/components' + export { ModalContentOneColSimpleButtons, TwoColumn, @@ -122,6 +125,8 @@ export interface InterventionModalProps { type?: ModalType /** optional icon name */ iconName?: IconName | null | undefined + /* Optional icon size override. */ + iconSize?: string /** modal contents */ children: React.ReactNode } @@ -133,6 +138,7 @@ export function InterventionModal({ iconName, iconHeading, children, + iconSize, }: InterventionModalProps): JSX.Element { const modalType = type ?? 'intervention-required' const headerColor = @@ -166,7 +172,7 @@ export function InterventionModal({ {titleHeading} {iconName != null ? ( - + ) : null} {iconHeading != null ? iconHeading : null} @@ -178,15 +184,15 @@ export function InterventionModal({ ) } -const ICON_STYLE = css` - width: ${SPACING.spacing16}; - height: ${SPACING.spacing16}; +const buildIconStyle = ( + iconSize: string | undefined +): FlattenSimpleInterpolation => css` + width: ${iconSize ?? SPACING.spacing16}; + height: ${iconSize ?? SPACING.spacing16}; margin: ${SPACING.spacing4}; cursor: ${CURSOR_POINTER}; @media (${RESPONSIVENESS.touchscreenMediaQuerySpecs}) { - width: ${SPACING.spacing32}; - height: ${SPACING.spacing32}; margin: ${SPACING.spacing12}; } ` diff --git a/app/src/molecules/WizardRequiredEquipmentList/index.tsx b/app/src/molecules/WizardRequiredEquipmentList/index.tsx index 69c5cd02893..f6f7457a71a 100644 --- a/app/src/molecules/WizardRequiredEquipmentList/index.tsx +++ b/app/src/molecules/WizardRequiredEquipmentList/index.tsx @@ -155,7 +155,7 @@ function RequiredEquipmentCard(props: RequiredEquipmentCardProps): JSX.Element { ) : null} diff --git a/app/src/organisms/Desktop/Devices/HistoricalProtocolRun.tsx b/app/src/organisms/Desktop/Devices/HistoricalProtocolRun.tsx index 1ec488cfc45..b9e05314f80 100644 --- a/app/src/organisms/Desktop/Devices/HistoricalProtocolRun.tsx +++ b/app/src/organisms/Desktop/Devices/HistoricalProtocolRun.tsx @@ -41,12 +41,15 @@ export function HistoricalProtocolRun( const { t } = useTranslation('run_details') const { run, protocolName, robotIsBusy, robotName, protocolKey } = props const [drawerOpen, setDrawerOpen] = useState(false) - const countRunDataFiles = + let countRunDataFiles = 'runTimeParameters' in run ? run?.runTimeParameters.filter( parameter => parameter.type === 'csv_file' ).length : 0 + if ('outputFileIds' in run) { + countRunDataFiles += run.outputFileIds.length + } const runStatus = run.status const runDisplayName = formatTimestamp(run.createdAt) let duration = EMPTY_TIMESTAMP diff --git a/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderContent/ActionButton/hooks/useActionButtonProperties.ts b/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderContent/ActionButton/hooks/useActionButtonProperties.ts index 4c5486eb6e7..f9ea87080ca 100644 --- a/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderContent/ActionButton/hooks/useActionButtonProperties.ts +++ b/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderContent/ActionButton/hooks/useActionButtonProperties.ts @@ -132,6 +132,7 @@ export function useActionButtonProperties({ handleButtonClick = () => { isResetRunLoadingRef.current = true reset() + runHeaderModalContainerUtils.dropTipUtils.resetTipStatus() trackEvent({ name: ANALYTICS_PROTOCOL_PROCEED_TO_RUN, properties: { sourceLocation: 'RunRecordDetail', robotSerialNumber }, diff --git a/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderModalContainer/hooks/useRunHeaderDropTip.ts b/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderModalContainer/hooks/useRunHeaderDropTip.ts index 687b0d404c5..82d9c30c84b 100644 --- a/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderModalContainer/hooks/useRunHeaderDropTip.ts +++ b/app/src/organisms/Desktop/Devices/ProtocolRun/ProtocolRunHeader/RunHeaderModalContainer/hooks/useRunHeaderDropTip.ts @@ -16,7 +16,10 @@ import { useTipAttachmentStatus } from '/app/resources/instruments' import type { RobotType } from '@opentrons/shared-data' import type { Run, RunStatus } from '@opentrons/api-client' -import type { PipetteWithTip } from '/app/resources/instruments' +import type { + PipetteWithTip, + TipAttachmentStatusResult, +} from '/app/resources/instruments' import type { DropTipWizardFlowsProps } from '/app/organisms/DropTipWizardFlows' import type { UseProtocolDropTipModalResult } from '../modals' import type { PipetteDetails } from '/app/resources/maintenance_runs' @@ -35,6 +38,7 @@ export interface UseRunHeaderDropTipParams { export interface UseRunHeaderDropTipResult { dropTipModalUtils: UseProtocolDropTipModalResult dropTipWizardUtils: RunHeaderDropTipWizProps + resetTipStatus: TipAttachmentStatusResult['resetTipStatus'] } // Handles all the tip related logic during a protocol run on the desktop app. @@ -104,11 +108,9 @@ export function useRunHeaderDropTip({ { includeFixitCommands: false, pageLength: 1, - cursor: null, }, { enabled: isTerminalRunStatus(runStatus) } ) - // Manage tip checking useEffect(() => { // If a user begins a new run without navigating away from the run page, reset tip status. @@ -120,7 +122,9 @@ export function useRunHeaderDropTip({ // have to do it here if done during Error Recovery. else if ( runSummaryNoFixit != null && - !lastRunCommandPromptedErrorRecovery(runSummaryNoFixit) + runSummaryNoFixit.length > 0 && + !lastRunCommandPromptedErrorRecovery(runSummaryNoFixit) && + isTerminalRunStatus(runStatus) ) { void determineTipStatus() } @@ -143,7 +147,11 @@ export function useRunHeaderDropTip({ } }, [runStatus, isRunCurrent, enteredER, initialPipettesWithTipsCount]) - return { dropTipModalUtils, dropTipWizardUtils: buildDTWizUtils() } + return { + dropTipModalUtils, + dropTipWizardUtils: buildDTWizUtils(), + resetTipStatus, + } } // TODO(jh, 09-12-24): Consolidate this with the same utility that exists elsewhere. diff --git a/app/src/organisms/Desktop/Devices/RobotSettings/AdvancedTab/__tests__/EnableErrorRecoveryMode.test.tsx b/app/src/organisms/Desktop/Devices/RobotSettings/AdvancedTab/__tests__/EnableErrorRecoveryMode.test.tsx index a2d4824951f..9406e38f768 100644 --- a/app/src/organisms/Desktop/Devices/RobotSettings/AdvancedTab/__tests__/EnableErrorRecoveryMode.test.tsx +++ b/app/src/organisms/Desktop/Devices/RobotSettings/AdvancedTab/__tests__/EnableErrorRecoveryMode.test.tsx @@ -32,7 +32,7 @@ describe('EnableErrorRecoveryMode', () => { it('should render text and toggle button', () => { render(props) - screen.getByText('Error Recovery Mode') + screen.getByText('Recovery mode') screen.getByText('Pause on protocol errors instead of canceling the run.') expect( screen.getByLabelText('enable_error_recovery_mode') diff --git a/app/src/organisms/Desktop/Devices/hooks/useDownloadRunLog.ts b/app/src/organisms/Desktop/Devices/hooks/useDownloadRunLog.ts index 61acbfb12c4..6e07d5bbfab 100644 --- a/app/src/organisms/Desktop/Devices/hooks/useDownloadRunLog.ts +++ b/app/src/organisms/Desktop/Devices/hooks/useDownloadRunLog.ts @@ -27,7 +27,6 @@ export function useDownloadRunLog( if (host == null) return // first getCommands to get total length of commands getCommands(host, runId, { - cursor: null, pageLength: 0, includeFixitCommands: true, }) diff --git a/app/src/organisms/Desktop/RunProgressMeter/__tests__/RunProgressMeter.test.tsx b/app/src/organisms/Desktop/RunProgressMeter/__tests__/RunProgressMeter.test.tsx index 3618b0ce586..928bd2572a9 100644 --- a/app/src/organisms/Desktop/RunProgressMeter/__tests__/RunProgressMeter.test.tsx +++ b/app/src/organisms/Desktop/RunProgressMeter/__tests__/RunProgressMeter.test.tsx @@ -77,7 +77,6 @@ describe('RunProgressMeter', () => { .thenReturn(null) when(useNotifyAllCommandsQuery) .calledWith(NON_DETERMINISTIC_RUN_ID, { - cursor: null, pageLength: 1, }) .thenReturn(mockUseAllCommandsResponseNonDeterministic) diff --git a/app/src/organisms/Desktop/RunProgressMeter/index.tsx b/app/src/organisms/Desktop/RunProgressMeter/index.tsx index 481947e1707..fb158f5d686 100644 --- a/app/src/organisms/Desktop/RunProgressMeter/index.tsx +++ b/app/src/organisms/Desktop/RunProgressMeter/index.tsx @@ -67,7 +67,6 @@ export function RunProgressMeter(props: RunProgressMeterProps): JSX.Element { const runData = runRecord?.data ?? null const { data: mostRecentCommandData } = useNotifyAllCommandsQuery(runId, { - cursor: null, pageLength: 1, }) // This lastRunCommand also includes "fixit" commands. diff --git a/app/src/organisms/DropTipWizardFlows/DropTipWizardHeader.tsx b/app/src/organisms/DropTipWizardFlows/DropTipWizardHeader.tsx index e883da67d2b..55363426104 100644 --- a/app/src/organisms/DropTipWizardFlows/DropTipWizardHeader.tsx +++ b/app/src/organisms/DropTipWizardFlows/DropTipWizardHeader.tsx @@ -1,11 +1,9 @@ -import { useState, useEffect } from 'react' import { useTranslation } from 'react-i18next' -import { BEFORE_BEGINNING, BLOWOUT_SUCCESS, DT_ROUTES } from './constants' import { WizardHeader } from '/app/molecules/WizardHeader' import type { DropTipWizardProps } from './DropTipWizard' -import type { DropTipFlowsRoute, DropTipFlowsStep, ErrorDetails } from './types' +import type { ErrorDetails } from './types' type DropTipWizardHeaderProps = DropTipWizardProps & { isExitInitiated: boolean @@ -16,9 +14,6 @@ type DropTipWizardHeaderProps = DropTipWizardProps & { export function DropTipWizardHeader({ confirmExit, - currentStep, - currentRoute, - currentStepIdx, isExitInitiated, isFinalWizardStep, errorDetails, @@ -36,73 +31,14 @@ export function DropTipWizardHeader({ handleCleanUpAndClose, }) - const { totalSteps, currentStepNumber } = useSeenBlowoutSuccess({ - currentStep, - currentRoute, - currentStepIdx, - }) - return ( ) } -interface UseSeenBlowoutSuccessProps { - currentStep: DropTipFlowsStep - currentRoute: DropTipFlowsRoute - currentStepIdx: number -} - -interface UseSeenBlowoutSuccessResult { - currentStepNumber: number | null - totalSteps: number | null -} - -// Calculate the props used for determining step count based on the route. Because blowout and drop tip are separate routes, -// there's a need for state to track whether we've seen blowout, so the step counter is accurate when the drop tip route is active. -export function useSeenBlowoutSuccess({ - currentStep, - currentRoute, - currentStepIdx, -}: UseSeenBlowoutSuccessProps): UseSeenBlowoutSuccessResult { - const [hasSeenBlowoutSuccess, setHasSeenBlowoutSuccess] = useState(false) - - useEffect(() => { - if (currentStep === BLOWOUT_SUCCESS) { - setHasSeenBlowoutSuccess(true) - } else if (currentStep === BEFORE_BEGINNING) { - setHasSeenBlowoutSuccess(false) - } - }, [currentStep]) - - const shouldRenderStepCounter = currentRoute !== DT_ROUTES.BEFORE_BEGINNING - - let totalSteps: null | number - if (!shouldRenderStepCounter) { - totalSteps = null - } else if (currentRoute === DT_ROUTES.BLOWOUT || hasSeenBlowoutSuccess) { - totalSteps = DT_ROUTES.BLOWOUT.length + DT_ROUTES.DROP_TIP.length - } else { - totalSteps = currentRoute.length - } - - let currentStepNumber: null | number - if (!shouldRenderStepCounter) { - currentStepNumber = null - } else if (hasSeenBlowoutSuccess && currentRoute === DT_ROUTES.DROP_TIP) { - currentStepNumber = DT_ROUTES.BLOWOUT.length + currentStepIdx + 1 - } else { - currentStepNumber = currentStepIdx + 1 - } - - return { currentStepNumber, totalSteps } -} - export interface UseWizardExitHeaderProps { isFinalStep: boolean hasInitiatedExit: boolean diff --git a/app/src/organisms/DropTipWizardFlows/__tests__/DropTipWizardHeader.test.tsx b/app/src/organisms/DropTipWizardFlows/__tests__/DropTipWizardHeader.test.tsx index 306b3eff6d1..c5720adf4ab 100644 --- a/app/src/organisms/DropTipWizardFlows/__tests__/DropTipWizardHeader.test.tsx +++ b/app/src/organisms/DropTipWizardFlows/__tests__/DropTipWizardHeader.test.tsx @@ -1,16 +1,14 @@ import type * as React from 'react' import { beforeEach, describe, expect, it, vi } from 'vitest' -import { renderHook, screen } from '@testing-library/react' +import { screen } from '@testing-library/react' import { renderWithProviders } from '/app/__testing-utils__' import { i18n } from '/app/i18n' import { mockDropTipWizardContainerProps } from '../__fixtures__' import { useWizardExitHeader, - useSeenBlowoutSuccess, DropTipWizardHeader, } from '../DropTipWizardHeader' -import { DT_ROUTES } from '../constants' import type { Mock } from 'vitest' import type { UseWizardExitHeaderProps } from '../DropTipWizardHeader' @@ -31,22 +29,6 @@ describe('DropTipWizardHeader', () => { it('renders appropriate copy and onClick behavior', () => { render(props) screen.getByText('Drop tips') - screen.getByText('Step 1 / 5') - }) -}) - -describe('useSeenBlowoutSuccess', () => { - it('should not render step counter when currentRoute is BEFORE_BEGINNING', () => { - const { result } = renderHook(() => - useSeenBlowoutSuccess({ - currentStep: 'SOME_STEP' as any, - currentRoute: DT_ROUTES.BEFORE_BEGINNING, - currentStepIdx: 0, - }) - ) - - expect(result.current.totalSteps).toBe(null) - expect(result.current.currentStepNumber).toBe(null) }) }) diff --git a/app/src/organisms/DropTipWizardFlows/hooks/__tests__/errors.test.tsx b/app/src/organisms/DropTipWizardFlows/hooks/__tests__/errors.test.tsx index 08caf96c946..476513706f9 100644 --- a/app/src/organisms/DropTipWizardFlows/hooks/__tests__/errors.test.tsx +++ b/app/src/organisms/DropTipWizardFlows/hooks/__tests__/errors.test.tsx @@ -29,9 +29,9 @@ describe('useDropTipCommandErrors', () => { act(() => { result.current({ - runCommandError: { - errorType: DROP_TIP_SPECIAL_ERROR_TYPES.MUST_HOME_ERROR, - } as any, + type: DROP_TIP_SPECIAL_ERROR_TYPES.MUST_HOME_ERROR, + message: 'remove_the_tips_manually', + header: 'cant_safely_drop_tips', }) }) diff --git a/app/src/organisms/DropTipWizardFlows/hooks/errors.tsx b/app/src/organisms/DropTipWizardFlows/hooks/errors.tsx index c8df4d239a9..1f7d6cf09ff 100644 --- a/app/src/organisms/DropTipWizardFlows/hooks/errors.tsx +++ b/app/src/organisms/DropTipWizardFlows/hooks/errors.tsx @@ -9,8 +9,7 @@ import type { RunCommandError } from '@opentrons/shared-data' import type { ErrorDetails } from '../types' export interface SetRobotErrorDetailsParams { - runCommandError?: RunCommandError - message?: string + message: string | null header?: string type?: RunCommandError['errorType'] } @@ -23,16 +22,8 @@ export function useDropTipCommandErrors( ): (cbProps: SetRobotErrorDetailsParams) => void { const { t } = useTranslation('drop_tip_wizard') - return ({ - runCommandError, - message, - header, - type, - }: SetRobotErrorDetailsParams) => { - if ( - runCommandError?.errorType === - DROP_TIP_SPECIAL_ERROR_TYPES.MUST_HOME_ERROR - ) { + return ({ message, header, type }: SetRobotErrorDetailsParams) => { + if (type === DROP_TIP_SPECIAL_ERROR_TYPES.MUST_HOME_ERROR) { const headerText = t('cant_safely_drop_tips') const messageText = t('remove_the_tips_manually') diff --git a/app/src/organisms/DropTipWizardFlows/hooks/useDropTipCommands.ts b/app/src/organisms/DropTipWizardFlows/hooks/useDropTipCommands.ts index 493b4a77bff..c3162a48f07 100644 --- a/app/src/organisms/DropTipWizardFlows/hooks/useDropTipCommands.ts +++ b/app/src/organisms/DropTipWizardFlows/hooks/useDropTipCommands.ts @@ -2,13 +2,18 @@ import { useState, useEffect } from 'react' import { useDeleteMaintenanceRunMutation } from '@opentrons/react-api-client' -import { DT_ROUTES, MANAGED_PIPETTE_ID } from '../constants' +import { + DROP_TIP_SPECIAL_ERROR_TYPES, + DT_ROUTES, + MANAGED_PIPETTE_ID, +} from '../constants' import { getAddressableAreaFromConfig } from '../utils' import { useNotifyDeckConfigurationQuery } from '/app/resources/deck_configuration' import type { CreateCommand, AddressableAreaName, PipetteModelSpecs, + RunCommandError, } from '@opentrons/shared-data' import { FLEX_ROBOT_TYPE } from '@opentrons/shared-data' import type { CommandData, PipetteData } from '@opentrons/api-client' @@ -129,17 +134,25 @@ export function useDropTipCommands({ issuedCommandsType ) - return chainRunCommands( - isFlex - ? [ - ENGAGE_AXES, - UPDATE_ESTIMATORS_EXCEPT_PLUNGERS, - Z_HOME, - moveToAACommand, - ] - : [Z_HOME, moveToAACommand], - false - ) + if (isFlex) { + return chainRunCommands( + [ENGAGE_AXES, UPDATE_ESTIMATORS_EXCEPT_PLUNGERS], + false + ) + .catch(error => { + // If one of the engage/estimator commands fails, we can safely assume it's because the position is + // unknown, so show the special error modal. + throw { + ...error, + errorType: DROP_TIP_SPECIAL_ERROR_TYPES.MUST_HOME_ERROR, + } + }) + .then(() => { + return chainRunCommands([Z_HOME, moveToAACommand], false) + }) + } else { + return chainRunCommands([Z_HOME, moveToAACommand], false) + } }) .then((commandData: CommandData[]) => { const error = commandData[0].data.error @@ -149,12 +162,12 @@ export function useDropTipCommands({ } resolve() }) - .catch(error => { + .catch((error: RunCommandError) => { if (fixitCommandTypeUtils != null && issuedCommandsType === 'fixit') { fixitCommandTypeUtils.errorOverrides.generalFailure() } else { setErrorDetails({ - runCommandError: error, + type: error.errorType ?? null, message: error.detail ? `Error moving to position: ${error.detail}` : 'Error moving to position: invalid addressable area.', @@ -224,51 +237,70 @@ export function useDropTipCommands({ proceed: () => void ): Promise => { return new Promise((resolve, reject) => { - chainRunCommands( - currentRoute === DT_ROUTES.BLOWOUT - ? buildBlowoutCommands(instrumentModelSpecs, isFlex, pipetteId) - : buildDropTipInPlaceCommand(isFlex, pipetteId), - false - ) - .then((commandData: CommandData[]) => { - const error = commandData[0].data.error - if (error != null) { - if ( - fixitCommandTypeUtils != null && - issuedCommandsType === 'fixit' - ) { - if (currentRoute === DT_ROUTES.BLOWOUT) { - fixitCommandTypeUtils.errorOverrides.blowoutFailed() - } else { - fixitCommandTypeUtils.errorOverrides.tipDropFailed() - } - } - - setErrorDetails({ - runCommandError: error, - message: `Error moving to position: ${error.detail}`, - }) - } else { - proceed() - resolve() - } - }) - .catch((error: Error) => { - if (fixitCommandTypeUtils != null && issuedCommandsType === 'fixit') { - if (currentRoute === DT_ROUTES.BLOWOUT) { - fixitCommandTypeUtils.errorOverrides.blowoutFailed() - } else { - fixitCommandTypeUtils.errorOverrides.tipDropFailed() - } - } + const isBlowoutRoute = currentRoute === DT_ROUTES.BLOWOUT + + const handleError = (error: RunCommandError | Error): void => { + if (fixitCommandTypeUtils != null && issuedCommandsType === 'fixit') { + isBlowoutRoute + ? fixitCommandTypeUtils.errorOverrides.blowoutFailed() + : fixitCommandTypeUtils.errorOverrides.tipDropFailed() + } else { + const operation = isBlowoutRoute ? 'blowout' : 'drop tip' + const type = 'errorType' in error ? error.errorType : undefined + const messageDetail = + 'message' in error ? error.message : error.detail setErrorDetails({ - message: `Error issuing ${ - currentRoute === DT_ROUTES.BLOWOUT ? 'blowout' : 'drop tip' - } command: ${error.message}`, + type, + message: + messageDetail != null + ? `Error during ${operation}: ${messageDetail}` + : null, }) - resolve() + } + reject(error) + } + + // Throw any errors in the response body if any. + const handleSuccess = (commandData: CommandData[]): void => { + const error = commandData[0].data.error + if (error != null) { + // eslint-disable-next-line @typescript-eslint/no-throw-literal + throw error + } + proceed() + resolve() + } + + // For Flex, we need extra preparation steps + const prepareFlexBlowout = (): Promise => { + return chainRunCommands( + [ENGAGE_AXES, UPDATE_PLUNGER_ESTIMATORS], + false + ).catch(error => { + throw { + ...error, + errorType: DROP_TIP_SPECIAL_ERROR_TYPES.MUST_HOME_ERROR, + } }) + } + + const executeCommands = (): Promise => { + const commands = isBlowoutRoute + ? buildBlowoutCommands(instrumentModelSpecs, isFlex, pipetteId) + : buildDropTipInPlaceCommand(isFlex, pipetteId) + + return chainRunCommands(commands, false) + } + + if (isBlowoutRoute && isFlex) { + prepareFlexBlowout() + .then(executeCommands) + .then(handleSuccess) + .catch(handleError) + } else { + executeCommands().then(handleSuccess).catch(handleError) + } }) } @@ -356,13 +388,10 @@ const buildBlowoutCommands = ( ): CreateCommand[] => isFlex ? [ - ENGAGE_AXES, - UPDATE_PLUNGER_ESTIMATORS, { commandType: 'unsafe/blowOutInPlace', params: { pipetteId: pipetteId ?? MANAGED_PIPETTE_ID, - flowRate: Math.min( specs.defaultBlowOutFlowRate.value, MAXIMUM_BLOWOUT_FLOW_RATE_UL_PER_S @@ -376,7 +405,6 @@ const buildBlowoutCommands = ( commandType: 'blowOutInPlace', params: { pipetteId: pipetteId ?? MANAGED_PIPETTE_ID, - flowRate: specs.defaultBlowOutFlowRate.value, }, }, diff --git a/app/src/organisms/DropTipWizardFlows/steps/BeforeBeginning.tsx b/app/src/organisms/DropTipWizardFlows/steps/BeforeBeginning.tsx index bbcb908ea24..82be17832b4 100644 --- a/app/src/organisms/DropTipWizardFlows/steps/BeforeBeginning.tsx +++ b/app/src/organisms/DropTipWizardFlows/steps/BeforeBeginning.tsx @@ -235,6 +235,10 @@ const CONTAINER_STYLE = css` display: ${DISPLAY_FLEX}; flex-direction: ${DIRECTION_COLUMN}; grid-gap: ${SPACING.spacing16}; + + @media ${RESPONSIVENESS.touchscreenMediaQuerySpecs} { + grid-gap: ${SPACING.spacing8}; + } ` const ODD_MEDIUM_BUTTON_STYLE = css` diff --git a/app/src/organisms/ErrorRecoveryFlows/ErrorRecoveryWizard.tsx b/app/src/organisms/ErrorRecoveryFlows/ErrorRecoveryWizard.tsx index 795b1517ee8..2c6f047f80d 100644 --- a/app/src/organisms/ErrorRecoveryFlows/ErrorRecoveryWizard.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/ErrorRecoveryWizard.tsx @@ -107,10 +107,7 @@ export function ErrorRecoveryComponent( const buildTitleHeading = (): JSX.Element => { const titleText = hasLaunchedRecovery ? t('recovery_mode') : t('cancel_run') return ( - + {titleText} ) diff --git a/app/src/organisms/ErrorRecoveryFlows/RecoveryTakeover.tsx b/app/src/organisms/ErrorRecoveryFlows/RecoveryTakeover.tsx index eccd4e972a7..4a28ac24b0c 100644 --- a/app/src/organisms/ErrorRecoveryFlows/RecoveryTakeover.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/RecoveryTakeover.tsx @@ -120,9 +120,17 @@ export function RecoveryTakeoverDesktop({ }: RecoveryTakeoverProps): JSX.Element { const { t } = useTranslation('error_recovery') + const buildTitleHeadingDesktop = (): JSX.Element => { + return ( + + {t('error_on_robot', { robot: robotName })} + + ) + } + return ( diff --git a/app/src/organisms/ErrorRecoveryFlows/__tests__/RecoverySplash.test.tsx b/app/src/organisms/ErrorRecoveryFlows/__tests__/RecoverySplash.test.tsx index 7446ea7464c..901ab22e158 100644 --- a/app/src/organisms/ErrorRecoveryFlows/__tests__/RecoverySplash.test.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/__tests__/RecoverySplash.test.tsx @@ -136,7 +136,7 @@ describe('RecoverySplash', () => { render(props) const primaryBtn = screen.getByRole('button', { - name: 'Launch Recovery Mode', + name: 'Launch recovery mode', }) const secondaryBtn = screen.getByRole('button', { name: 'Cancel run' }) @@ -183,7 +183,7 @@ describe('RecoverySplash', () => { render(props) - clickButtonLabeled('Launch Recovery Mode') + clickButtonLabeled('Launch recovery mode') expect(mockMakeToast).toHaveBeenCalled() }) diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useCurrentlyRecoveringFrom.test.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useCurrentlyRecoveringFrom.test.ts index 4b177972851..cc27994d671 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useCurrentlyRecoveringFrom.test.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useCurrentlyRecoveringFrom.test.ts @@ -42,9 +42,11 @@ describe('useCurrentlyRecoveringFrom', () => { }, }, }, + isFetching: false, } as any) vi.mocked(useCommandQuery).mockReturnValue({ data: { data: 'mockCommandDetails' }, + isFetching: false, } as any) const { result } = renderHook(() => @@ -53,7 +55,7 @@ describe('useCurrentlyRecoveringFrom', () => { expect(vi.mocked(useNotifyAllCommandsQuery)).toHaveBeenCalledWith( MOCK_RUN_ID, - { cursor: null, pageLength: 0 }, + { pageLength: 0 }, { enabled: false, refetchInterval: 5000 } ) expect(vi.mocked(useCommandQuery)).toHaveBeenCalledWith( @@ -69,8 +71,11 @@ describe('useCurrentlyRecoveringFrom', () => { data: { links: {}, }, + isFetching: false, + } as any) + vi.mocked(useCommandQuery).mockReturnValue({ + isFetching: false, } as any) - vi.mocked(useCommandQuery).mockReturnValue({} as any) const { result } = renderHook(() => useCurrentlyRecoveringFrom(MOCK_RUN_ID, RUN_STATUS_AWAITING_RECOVERY) @@ -94,9 +99,11 @@ describe('useCurrentlyRecoveringFrom', () => { }, }, }, + isFetching: false, } as any) vi.mocked(useCommandQuery).mockReturnValue({ data: { data: 'mockCommandDetails' }, + isFetching: false, } as any) const { result } = renderHook(() => @@ -111,6 +118,91 @@ describe('useCurrentlyRecoveringFrom', () => { expect(result.current).toStrictEqual('mockCommandDetails') }) + it('returns null if all commands query is still fetching', () => { + vi.mocked(useNotifyAllCommandsQuery).mockReturnValue({ + data: { + links: { + currentlyRecoveringFrom: { + meta: { + runId: MOCK_RUN_ID, + commandId: MOCK_COMMAND_ID, + }, + }, + }, + }, + isFetching: true, + } as any) + vi.mocked(useCommandQuery).mockReturnValue({ + data: { data: 'mockCommandDetails' }, + isFetching: false, + } as any) + + const { result } = renderHook(() => + useCurrentlyRecoveringFrom(MOCK_RUN_ID, RUN_STATUS_AWAITING_RECOVERY) + ) + + expect(result.current).toStrictEqual(null) + }) + + it('returns null if command query is still fetching', () => { + vi.mocked(useNotifyAllCommandsQuery).mockReturnValue({ + data: { + links: { + currentlyRecoveringFrom: { + meta: { + runId: MOCK_RUN_ID, + commandId: MOCK_COMMAND_ID, + }, + }, + }, + }, + isFetching: false, + } as any) + vi.mocked(useCommandQuery).mockReturnValue({ + data: { data: 'mockCommandDetails' }, + isFetching: true, + } as any) + + const { result } = renderHook(() => + useCurrentlyRecoveringFrom(MOCK_RUN_ID, RUN_STATUS_AWAITING_RECOVERY) + ) + + expect(result.current).toStrictEqual(null) + }) + + it('resets isReadyToShow when run exits recovery mode', () => { + const { rerender, result } = renderHook( + ({ status }) => useCurrentlyRecoveringFrom(MOCK_RUN_ID, status), + { initialProps: { status: RUN_STATUS_AWAITING_RECOVERY } } + ) + + vi.mocked(useNotifyAllCommandsQuery).mockReturnValue({ + data: { + links: { + currentlyRecoveringFrom: { + meta: { + runId: MOCK_RUN_ID, + commandId: MOCK_COMMAND_ID, + }, + }, + }, + }, + isFetching: false, + } as any) + vi.mocked(useCommandQuery).mockReturnValue({ + data: { data: 'mockCommandDetails' }, + isFetching: false, + } as any) + + rerender({ status: RUN_STATUS_AWAITING_RECOVERY }) + + expect(result.current).toStrictEqual('mockCommandDetails') + + rerender({ status: RUN_STATUS_IDLE } as any) + + expect(result.current).toStrictEqual(null) + }) + it('calls invalidateQueries when the run enters recovery mode', () => { renderHook(() => useCurrentlyRecoveringFrom(MOCK_RUN_ID, RUN_STATUS_AWAITING_RECOVERY) diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryCommands.test.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryCommands.test.ts index 21307b8e4e8..ce333c1fb39 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryCommands.test.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryCommands.test.ts @@ -4,11 +4,13 @@ import { renderHook, act } from '@testing-library/react' import { useResumeRunFromRecoveryMutation, useStopRunMutation, - useUpdateErrorRecoveryPolicy, useResumeRunFromRecoveryAssumingFalsePositiveMutation, } from '@opentrons/react-api-client' -import { useChainRunCommands } from '/app/resources/runs' +import { + useChainRunCommands, + useUpdateRecoveryPolicyWithStrategy, +} from '/app/resources/runs' import { useRecoveryCommands, HOME_PIPETTE_Z_AXES, @@ -80,9 +82,9 @@ describe('useRecoveryCommands', () => { vi.mocked(useChainRunCommands).mockReturnValue({ chainRunCommands: mockChainRunCommands, } as any) - vi.mocked(useUpdateErrorRecoveryPolicy).mockReturnValue({ - mutateAsync: mockUpdateErrorRecoveryPolicy, - } as any) + vi.mocked(useUpdateRecoveryPolicyWithStrategy).mockReturnValue( + mockUpdateErrorRecoveryPolicy as any + ) vi.mocked( useResumeRunFromRecoveryAssumingFalsePositiveMutation ).mockReturnValue({ @@ -361,7 +363,8 @@ describe('useRecoveryCommands', () => { ) expect(mockUpdateErrorRecoveryPolicy).toHaveBeenCalledWith( - expectedPolicyRules + expectedPolicyRules, + 'append' ) }) diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryToasts.test.tsx b/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryToasts.test.tsx index c9874eb5532..085551f42e7 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryToasts.test.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/__tests__/useRecoveryToasts.test.tsx @@ -94,13 +94,13 @@ describe('useRecoveryToasts', () => { result.current.makeSuccessToast() expect(mockMakeToast).toHaveBeenCalledWith( - 'Retrying step 1 succeeded.', + 'test command', 'success', expect.objectContaining({ closeButton: true, disableTimeout: true, displayType: 'desktop', - heading: expect.any(String), + heading: 'Retrying step 1 succeeded.', }) ) }) @@ -209,8 +209,8 @@ describe('useRecoveryFullCommandText', () => { const { result } = renderHook(() => useRecoveryFullCommandText({ robotType: FLEX_ROBOT_TYPE, - stepNumber: 0, - commandTextData: { commands: [TEST_COMMAND] } as any, + stepNumber: 1, + commandTextData: { commands: [TEST_COMMAND, {}] } as any, allRunDefs: [], }) ) @@ -259,7 +259,7 @@ describe('useRecoveryFullCommandText', () => { const { result } = renderHook(() => useRecoveryFullCommandText({ robotType: FLEX_ROBOT_TYPE, - stepNumber: 0, + stepNumber: 1, commandTextData: { commands: [TC_COMMAND], } as any, @@ -279,9 +279,9 @@ describe('useRecoveryFullCommandText', () => { const { result } = renderHook(() => useRecoveryFullCommandText({ robotType: FLEX_ROBOT_TYPE, - stepNumber: 0, + stepNumber: 1, commandTextData: { - commands: [TC_COMMAND], + commands: [TC_COMMAND, {}], } as any, allRunDefs: [], }) diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/useCurrentlyRecoveringFrom.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/useCurrentlyRecoveringFrom.ts index d0273643b61..38f9e39165a 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/useCurrentlyRecoveringFrom.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/useCurrentlyRecoveringFrom.ts @@ -1,4 +1,4 @@ -import { useEffect } from 'react' +import { useEffect, useState } from 'react' import { useQueryClient } from 'react-query' import { @@ -23,13 +23,15 @@ const VALID_RECOVERY_FETCH_STATUSES = [ ] as Array // Return the `currentlyRecoveringFrom` command returned by the server, if any. -// Otherwise, returns null. +// The command will only be returned after the initial fetches are complete to prevent rendering of stale data. export function useCurrentlyRecoveringFrom( runId: string, runStatus: RunStatus | null ): FailedCommand | null { const queryClient = useQueryClient() const host = useHost() + const [isReadyToShow, setIsReadyToShow] = useState(false) + // There can only be a currentlyRecoveringFrom command when the run is in recovery mode. // In case we're falling back to polling, only enable queries when that is the case. const isRunInRecoveryMode = VALID_RECOVERY_FETCH_STATUSES.includes(runStatus) @@ -38,12 +40,17 @@ export function useCurrentlyRecoveringFrom( useEffect(() => { if (isRunInRecoveryMode) { void queryClient.invalidateQueries([host, 'runs', runId]) + } else { + setIsReadyToShow(false) } }, [isRunInRecoveryMode, host, runId]) - const { data: allCommandsQueryData } = useNotifyAllCommandsQuery( + const { + data: allCommandsQueryData, + isFetching: isAllCommandsFetching, + } = useNotifyAllCommandsQuery( runId, - { cursor: null, pageLength: 0 }, // pageLength 0 because we only care about the links. + { pageLength: 0 }, // pageLength 0 because we only care about the links. { enabled: isRunInRecoveryMode, refetchInterval: ALL_COMMANDS_POLL_MS, @@ -54,7 +61,10 @@ export function useCurrentlyRecoveringFrom( // TODO(mm, 2024-05-21): When the server supports fetching the // currentlyRecoveringFrom command in one step, do that instead of this chained query. - const { data: commandQueryData } = useCommandQuery( + const { + data: commandQueryData, + isFetching: isCommandFetching, + } = useCommandQuery( currentlyRecoveringFromLink?.meta.runId ?? null, currentlyRecoveringFromLink?.meta.commandId ?? null, { @@ -62,5 +72,25 @@ export function useCurrentlyRecoveringFrom( } ) - return isRunInRecoveryMode ? commandQueryData?.data ?? null : null + // Only mark as ready to show when waterfall fetches are complete + useEffect(() => { + if ( + isRunInRecoveryMode && + !isAllCommandsFetching && + !isCommandFetching && + !isReadyToShow + ) { + setIsReadyToShow(true) + } + }, [ + isRunInRecoveryMode, + isAllCommandsFetching, + isCommandFetching, + isReadyToShow, + ]) + + const shouldShowCommand = + isRunInRecoveryMode && isReadyToShow && commandQueryData?.data + + return shouldShowCommand ? commandQueryData.data : null } diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/useERUtils.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/useERUtils.ts index ecf03e4f56b..72e9bb481bc 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/useERUtils.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/useERUtils.ts @@ -1,3 +1,5 @@ +import { useMemo } from 'react' + import { useInstrumentsQuery } from '@opentrons/react-api-client' import { useRouteUpdateActions } from './useRouteUpdateActions' @@ -13,12 +15,12 @@ import { } from '/app/resources/runs' import { useRecoveryOptionCopy } from './useRecoveryOptionCopy' import { useRecoveryActionMutation } from './useRecoveryActionMutation' -import { useRunningStepCounts } from '/app/resources/protocols/hooks' import { useRecoveryToasts } from './useRecoveryToasts' import { useRecoveryAnalytics } from '/app/redux-resources/analytics' import { useShowDoorInfo } from './useShowDoorInfo' import { useCleanupRecoveryState } from './useCleanupRecoveryState' import { useFailedPipetteUtils } from './useFailedPipetteUtils' +import { getRunningStepCountsFrom } from '/app/resources/protocols' import type { LabwareDefinition2, @@ -102,7 +104,14 @@ export function useERUtils({ pageLength: 999, }) - const stepCounts = useRunningStepCounts(runId, runCommands) + const stepCounts = useMemo( + () => + getRunningStepCountsFrom( + protocolAnalysis?.commands ?? [], + failedCommand?.byRunRecord ?? null + ), + [protocolAnalysis != null, failedCommand] + ) const analytics = useRecoveryAnalytics() diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryCommands.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryCommands.ts index 65bd77eed0b..3e4b20225c5 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryCommands.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryCommands.ts @@ -4,11 +4,13 @@ import head from 'lodash/head' import { useResumeRunFromRecoveryMutation, useStopRunMutation, - useUpdateErrorRecoveryPolicy, useResumeRunFromRecoveryAssumingFalsePositiveMutation, } from '@opentrons/react-api-client' -import { useChainRunCommands } from '/app/resources/runs' +import { + useChainRunCommands, + useUpdateRecoveryPolicyWithStrategy, +} from '/app/resources/runs' import { DEFINED_ERROR_TYPES, ERROR_KINDS, RECOVERY_MAP } from '../constants' import { getErrorKind } from '/app/organisms/ErrorRecoveryFlows/utils' @@ -23,12 +25,7 @@ import type { PrepareToAspirateRunTimeCommand, MoveLabwareParams, } from '@opentrons/shared-data' -import type { - CommandData, - IfMatchType, - RecoveryPolicyRulesParams, - RunAction, -} from '@opentrons/api-client' +import type { CommandData, IfMatchType, RunAction } from '@opentrons/api-client' import type { WellGroup } from '@opentrons/components' import type { FailedCommand, RecoveryRoute, RouteStep } from '../types' import type { UseFailedLabwareUtilsResult } from './useFailedLabwareUtils' @@ -38,6 +35,7 @@ import type { UseRecoveryAnalyticsResult } from '/app/redux-resources/analytics' import type { CurrentRecoveryOptionUtils } from './useRecoveryRouting' import type { ErrorRecoveryFlowsProps } from '..' import type { FailedCommandBySource } from './useRetainedFailedCommandBySource' +import type { UpdateErrorRecoveryPolicyWithStrategy } from '/app/resources/runs' interface UseRecoveryCommandsParams { runId: string @@ -100,11 +98,10 @@ export function useRecoveryCommands({ mutateAsync: resumeRunFromRecoveryAssumingFalsePositive, } = useResumeRunFromRecoveryAssumingFalsePositiveMutation() const { stopRun } = useStopRunMutation() - const { - mutateAsync: updateErrorRecoveryPolicy, - } = useUpdateErrorRecoveryPolicy(runId) + const updateErrorRecoveryPolicy = useUpdateRecoveryPolicyWithStrategy(runId) const { makeSuccessToast } = recoveryToastUtils + // TODO(jh, 11-21-24): Some commands return a 200 with an error body. We should catch these and propagate the error. const chainRunRecoveryCommands = useCallback( ( commands: CreateCommand[], @@ -230,10 +227,12 @@ export function useRecoveryCommands({ ifMatch ) - return updateErrorRecoveryPolicy(ignorePolicyRules) + return updateErrorRecoveryPolicy(ignorePolicyRules, 'append') .then(() => Promise.resolve()) - .catch(() => - Promise.reject(new Error('Failed to update recovery policy.')) + .catch((e: Error) => + Promise.reject( + new Error(`Failed to update recovery policy: ${e.message}`) + ) ) } else { void proceedToRouteAndStep(RECOVERY_MAP.ERROR_WHILE_RECOVERING.ROUTE) @@ -420,12 +419,8 @@ export const buildIgnorePolicyRules = ( commandType: FailedCommand['commandType'], errorType: string, ifMatch: IfMatchType -): RecoveryPolicyRulesParams => { - return [ - { - commandType, - errorType, - ifMatch, - }, - ] -} +): UpdateErrorRecoveryPolicyWithStrategy['newPolicy'] => ({ + commandType, + errorType, + ifMatch, +}) diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryToasts.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryToasts.ts index 6daf6998ae5..9fef84caca9 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryToasts.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/useRecoveryToasts.ts @@ -110,8 +110,9 @@ export function useRecoveryFullCommandText( ): string | null { const { commandTextData, stepNumber } = props + // TODO TOME: I think you are looking one command to far, for some reason. const relevantCmdIdx = stepNumber ?? -1 - const relevantCmd = commandTextData?.commands[relevantCmdIdx] ?? null + const relevantCmd = commandTextData?.commands[relevantCmdIdx - 1] ?? null const { commandText, kind } = useCommandTextString({ ...props, diff --git a/app/src/organisms/ErrorRecoveryFlows/hooks/useRetainedFailedCommandBySource.ts b/app/src/organisms/ErrorRecoveryFlows/hooks/useRetainedFailedCommandBySource.ts index 90afa5851da..5a226fddcf1 100644 --- a/app/src/organisms/ErrorRecoveryFlows/hooks/useRetainedFailedCommandBySource.ts +++ b/app/src/organisms/ErrorRecoveryFlows/hooks/useRetainedFailedCommandBySource.ts @@ -1,4 +1,4 @@ -import { useState, useEffect } from 'react' +import { useState, useLayoutEffect } from 'react' import type { RunTimeCommand } from '@opentrons/shared-data' import type { ErrorRecoveryFlowsProps } from '..' @@ -27,7 +27,7 @@ export function useRetainedFailedCommandBySource( setRetainedFailedCommand, ] = useState(null) - useEffect(() => { + useLayoutEffect(() => { if (failedCommandByRunRecord !== null) { const failedCommandByAnalysis = protocolAnalysis?.commands.find( diff --git a/app/src/organisms/ErrorRecoveryFlows/index.tsx b/app/src/organisms/ErrorRecoveryFlows/index.tsx index 6461ae773fc..68184d71b46 100644 --- a/app/src/organisms/ErrorRecoveryFlows/index.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/index.tsx @@ -1,4 +1,4 @@ -import { useMemo, useState } from 'react' +import { useMemo, useLayoutEffect, useState } from 'react' import { useSelector } from 'react-redux' import { @@ -7,10 +7,12 @@ import { RUN_STATUS_AWAITING_RECOVERY_PAUSED, RUN_STATUS_BLOCKED_BY_OPEN_DOOR, RUN_STATUS_FAILED, + RUN_STATUS_FINISHING, RUN_STATUS_IDLE, RUN_STATUS_PAUSED, RUN_STATUS_RUNNING, RUN_STATUS_STOP_REQUESTED, + RUN_STATUS_STOPPED, RUN_STATUS_SUCCEEDED, } from '@opentrons/api-client' import { @@ -41,10 +43,13 @@ const VALID_ER_RUN_STATUSES: RunStatus[] = [ RUN_STATUS_STOP_REQUESTED, ] +// Effectively statuses that are not an "awaiting-recovery" status OR "stop requested." const INVALID_ER_RUN_STATUSES: RunStatus[] = [ RUN_STATUS_RUNNING, RUN_STATUS_PAUSED, RUN_STATUS_BLOCKED_BY_OPEN_DOOR, + RUN_STATUS_FINISHING, + RUN_STATUS_STOPPED, RUN_STATUS_FAILED, RUN_STATUS_SUCCEEDED, RUN_STATUS_IDLE, @@ -52,7 +57,6 @@ const INVALID_ER_RUN_STATUSES: RunStatus[] = [ export interface UseErrorRecoveryResult { isERActive: boolean - /* There is no FailedCommand if the run statis is not AWAITING_RECOVERY. */ failedCommand: FailedCommand | null } @@ -61,44 +65,38 @@ export function useErrorRecoveryFlows( runStatus: RunStatus | null ): UseErrorRecoveryResult { const [isERActive, setIsERActive] = useState(false) - // If client accesses a valid ER runs status besides AWAITING_RECOVERY but accesses it outside of Error Recovery flows, don't show ER. - const [hasSeenAwaitingRecovery, setHasSeenAwaitingRecovery] = useState(false) const failedCommand = useCurrentlyRecoveringFrom(runId, runStatus) - if ( - !hasSeenAwaitingRecovery && - ([ - RUN_STATUS_AWAITING_RECOVERY, - RUN_STATUS_AWAITING_RECOVERY_BLOCKED_BY_OPEN_DOOR, - RUN_STATUS_AWAITING_RECOVERY_PAUSED, - ] as Array).includes(runStatus) - ) { - setHasSeenAwaitingRecovery(true) + // The complexity of this logic exists to persist Error Recovery screens past the server's definition of Error Recovery. + // Ex, show a "cancelling run" modal in Error Recovery flows despite the robot no longer being in a recoverable state. + + const isValidERStatus = ( + status: RunStatus | null, + hasSeenAwaitingRecovery: boolean + ): boolean => { + return ( + status !== null && + (status === RUN_STATUS_AWAITING_RECOVERY || + (VALID_ER_RUN_STATUSES.includes(status) && hasSeenAwaitingRecovery)) + ) } - // Reset recovery mode after the client has exited recovery, otherwise "cancel run" will trigger ER after the first recovery. - else if ( - hasSeenAwaitingRecovery && - runStatus != null && - INVALID_ER_RUN_STATUSES.includes(runStatus) - ) { - setHasSeenAwaitingRecovery(false) - } - - const isValidRunStatus = - runStatus != null && - VALID_ER_RUN_STATUSES.includes(runStatus) && - hasSeenAwaitingRecovery - if (!isERActive && isValidRunStatus && failedCommand != null) { - setIsERActive(true) - } - // Because multiple ER flows may occur per run, disable ER when the status is not "awaiting-recovery" or a - // terminating run status in which we want to persist ER flows. Specific recovery commands cause run status to change. - // See a specific command's docstring for details. - // ER handles a null failedCommand outside the splash screen, so we shouldn't set it false here. - else if (isERActive && !isValidRunStatus) { - setIsERActive(false) - } + // If client accesses a valid ER runs status besides AWAITING_RECOVERY but accesses it outside of Error Recovery flows, + // don't show ER. + useLayoutEffect(() => { + if (runStatus != null) { + const isAwaitingRecovery = + VALID_ER_RUN_STATUSES.includes(runStatus) && + runStatus !== RUN_STATUS_STOP_REQUESTED && + failedCommand != null // Prevents one render cycle of an unknown failed command. + + if (isAwaitingRecovery) { + setIsERActive(isValidERStatus(runStatus, true)) + } else if (INVALID_ER_RUN_STATUSES.includes(runStatus)) { + setIsERActive(isValidERStatus(runStatus, false)) + } + } + }, [runStatus, failedCommand]) return { isERActive, diff --git a/app/src/organisms/ErrorRecoveryFlows/shared/GripperIsHoldingLabware.tsx b/app/src/organisms/ErrorRecoveryFlows/shared/GripperIsHoldingLabware.tsx index 94ef850834c..036f1aff3d0 100644 --- a/app/src/organisms/ErrorRecoveryFlows/shared/GripperIsHoldingLabware.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/shared/GripperIsHoldingLabware.tsx @@ -14,11 +14,11 @@ import { import { RecoverySingleColumnContentWrapper } from './RecoveryContentWrapper' import { RecoveryFooterButtons } from './RecoveryFooterButtons' import { ODD_ONLY, DESKTOP_ONLY, RECOVERY_MAP } from '../constants' +import { RecoveryRadioGroup } from '/app/organisms/ErrorRecoveryFlows/shared/RecoveryRadioGroup' import type { JSX } from 'react' import type { TFunction } from 'i18next' import type { RecoveryContentProps } from '../types' -import { RecoveryRadioGroup } from '/app/organisms/ErrorRecoveryFlows/shared/RecoveryRadioGroup' type HoldingLabwareOption = 'yes' | 'no' export const HOLDING_LABWARE_OPTIONS: HoldingLabwareOption[] = [ diff --git a/app/src/organisms/ErrorRecoveryFlows/shared/GripperReleaseLabware.tsx b/app/src/organisms/ErrorRecoveryFlows/shared/GripperReleaseLabware.tsx index 3e0c24756d2..2c433775a83 100644 --- a/app/src/organisms/ErrorRecoveryFlows/shared/GripperReleaseLabware.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/shared/GripperReleaseLabware.tsx @@ -95,6 +95,6 @@ const ANIMATION_CONTAINER_STYLE = css` const ANIMATION_STYLE = css` @media ${RESPONSIVENESS.touchscreenMediaQuerySpecs} { width: 27rem; - height: 18.75rem; + height: 20.25rem; } ` diff --git a/app/src/organisms/ErrorRecoveryFlows/shared/RecoveryInterventionModal.tsx b/app/src/organisms/ErrorRecoveryFlows/shared/RecoveryInterventionModal.tsx index ccfaa8376ba..82974023805 100644 --- a/app/src/organisms/ErrorRecoveryFlows/shared/RecoveryInterventionModal.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/shared/RecoveryInterventionModal.tsx @@ -63,6 +63,7 @@ const SMALL_MODAL_STYLE = css` padding: ${SPACING.spacing32}; height: 100%; overflow: ${OVERFLOW_HIDDEN}; + user-select: none; } ` const LARGE_MODAL_STYLE = css` @@ -73,5 +74,6 @@ const LARGE_MODAL_STYLE = css` @media ${RESPONSIVENESS.touchscreenMediaQuerySpecs} { height: 100%; overflow: ${OVERFLOW_HIDDEN}; + user-select: none; } ` diff --git a/app/src/organisms/ErrorRecoveryFlows/shared/TwoColLwInfoAndDeck.tsx b/app/src/organisms/ErrorRecoveryFlows/shared/TwoColLwInfoAndDeck.tsx index e378b25d769..00fa95072c1 100644 --- a/app/src/organisms/ErrorRecoveryFlows/shared/TwoColLwInfoAndDeck.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/shared/TwoColLwInfoAndDeck.tsx @@ -102,9 +102,9 @@ export function TwoColLwInfoAndDeck( >['infoProps']['type'] => { switch (selectedRecoveryOption) { case MANUAL_MOVE_AND_SKIP.ROUTE: - case MANUAL_REPLACE_AND_RETRY.ROUTE: return 'location-arrow-location' default: + case MANUAL_REPLACE_AND_RETRY.ROUTE: return 'location' } } diff --git a/app/src/organisms/ErrorRecoveryFlows/shared/__tests__/TwoColLwInfoAndDeck.test.tsx b/app/src/organisms/ErrorRecoveryFlows/shared/__tests__/TwoColLwInfoAndDeck.test.tsx index 15094e5aacb..0629038f800 100644 --- a/app/src/organisms/ErrorRecoveryFlows/shared/__tests__/TwoColLwInfoAndDeck.test.tsx +++ b/app/src/organisms/ErrorRecoveryFlows/shared/__tests__/TwoColLwInfoAndDeck.test.tsx @@ -98,7 +98,7 @@ describe('TwoColLwInfoAndDeck', () => { expect(vi.mocked(LeftColumnLabwareInfo)).toHaveBeenCalledWith( expect.objectContaining({ title: 'Manually replace labware on deck', - type: 'location-arrow-location', + type: 'location', bannerText: 'Ensure labware is accurately placed in the slot to prevent further errors.', }), diff --git a/app/src/organisms/InterventionModal/PauseInterventionContent.tsx b/app/src/organisms/InterventionModal/PauseInterventionContent.tsx index 72f09e522b6..bb39eadb698 100644 --- a/app/src/organisms/InterventionModal/PauseInterventionContent.tsx +++ b/app/src/organisms/InterventionModal/PauseInterventionContent.tsx @@ -10,9 +10,8 @@ import { Flex, RESPONSIVENESS, SPACING, - TYPOGRAPHY, useInterval, - LegacyStyledText, + StyledText, } from '@opentrons/components' import { EMPTY_TIMESTAMP } from '/app/resources/runs' @@ -61,20 +60,6 @@ const PAUSE_HEADER_STYLE = css` } ` -const PAUSE_TEXT_STYLE = css` - ${TYPOGRAPHY.h1Default} - @media ${RESPONSIVENESS.touchscreenMediaQuerySpecs} { - ${TYPOGRAPHY.level4HeaderSemiBold} - } -` - -const PAUSE_TIME_STYLE = css` - ${TYPOGRAPHY.h1Default} - @media ${RESPONSIVENESS.touchscreenMediaQuerySpecs} { - ${TYPOGRAPHY.level1Header} - } -` - interface PauseHeaderProps { startedAt: string | null } @@ -95,10 +80,15 @@ function PauseHeader({ startedAt }: PauseHeaderProps): JSX.Element { return ( - + {i18n.format(t('paused_for'), 'capitalize')} - - {runTime} + + + {runTime} + ) } diff --git a/app/src/organisms/InterventionModal/index.tsx b/app/src/organisms/InterventionModal/index.tsx index 4e837b9458a..864bc99a231 100644 --- a/app/src/organisms/InterventionModal/index.tsx +++ b/app/src/organisms/InterventionModal/index.tsx @@ -165,7 +165,7 @@ export function InterventionModal({ } })() - const { iconName, headerTitle, headerTitleOnDevice } = (() => { + const { iconName, headerTitle, headerTitleOnDevice, iconSize } = (() => { switch (command.commandType) { case 'waitForResume': case 'pause': @@ -173,12 +173,14 @@ export function InterventionModal({ iconName: 'pause-circle' as IconName, headerTitle: t('pause_on', { robot_name: robotName }), headerTitleOnDevice: t('pause'), + iconSize: SPACING.spacing32, } case 'moveLabware': return { iconName: 'move-xy-circle' as IconName, headerTitle: t('move_labware_on', { robot_name: robotName }), headerTitleOnDevice: t('move_labware'), + iconSize: SPACING.spacing32, } default: console.warn( @@ -189,6 +191,7 @@ export function InterventionModal({ iconName: null, headerTitle: '', headerTitleOnDevice: '', + iconSize: undefined, } } })() @@ -228,6 +231,7 @@ export function InterventionModal({ iconHeading={{headerTitle}} iconName={iconName} type="intervention-required" + iconSize={iconSize} > {childContent} diff --git a/app/src/organisms/LabwarePositionCheck/CheckItem.tsx b/app/src/organisms/LabwarePositionCheck/CheckItem.tsx index c9050b5dd3f..5d5008554a6 100644 --- a/app/src/organisms/LabwarePositionCheck/CheckItem.tsx +++ b/app/src/organisms/LabwarePositionCheck/CheckItem.tsx @@ -156,6 +156,13 @@ export const CheckItem = (props: CheckItemProps): JSX.Element | null => { t as TFunction, i18n ) + const slotOnlyDisplayLocation = getDisplayLocation( + location, + labwareDefs, + t as TFunction, + i18n, + true + ) const labwareDisplayName = getLabwareDisplayName(labwareDef) let placeItemInstruction: JSX.Element = ( @@ -445,7 +452,7 @@ export const CheckItem = (props: CheckItemProps): JSX.Element | null => { { {...props} header={t('prepare_item_in_location', { item: isTiprack ? t('tip_rack') : t('labware'), - location: displayLocation, + location: slotOnlyDisplayLocation, })} body={ { const { moduleData } = props - const { t } = useTranslation('device_details') + const { t, i18n } = useTranslation(['device_details', 'shared']) const StatusLabelProps = { status: 'Idle', @@ -37,6 +37,10 @@ export const AbsorbanceReaderData = ( break } } + const lidDisplayStatus = + moduleData.lidStatus === 'on' + ? i18n.format(t('shared:closed'), 'capitalize') + : i18n.format(t('shared:open'), 'capitalize') return ( <> @@ -46,7 +50,7 @@ export const AbsorbanceReaderData = ( data-testid="abs_module_data" > {t('abs_reader_lid_status', { - status: moduleData.lidStatus === 'on' ? 'closed' : 'open', + status: lidDisplayStatus, })} diff --git a/app/src/organisms/ODD/ProtocolSetup/ProtocolSetupLiquids/index.tsx b/app/src/organisms/ODD/ProtocolSetup/ProtocolSetupLiquids/index.tsx index 551b7e716cd..153315d294b 100644 --- a/app/src/organisms/ODD/ProtocolSetup/ProtocolSetupLiquids/index.tsx +++ b/app/src/organisms/ODD/ProtocolSetup/ProtocolSetupLiquids/index.tsx @@ -153,26 +153,30 @@ export function LiquidsList(props: LiquidsListProps): JSX.Element { - {liquid.displayName} + {liquid.displayName.length > 33 + ? `${liquid.displayName.substring(0, 33)}...` + : liquid.displayName} - + {getTotalVolumePerLiquidId(liquid.id, labwareByLiquidId)}{' '} {MICRO_LITERS} + - {openItem ? ( { it('renders the first air gap screen, continue, and back buttons', () => { render(props) - screen.getByText('Air gap before aspirating') + screen.getByText('Air gap after aspirating') screen.getByTestId('ChildNavigation_Primary_Button') screen.getByText('Enabled') screen.getByText('Disabled') diff --git a/app/src/organisms/ODD/RunningProtocol/CurrentRunningProtocolCommand.tsx b/app/src/organisms/ODD/RunningProtocol/CurrentRunningProtocolCommand.tsx index 2866c3f95dd..92c1d1f5733 100644 --- a/app/src/organisms/ODD/RunningProtocol/CurrentRunningProtocolCommand.tsx +++ b/app/src/organisms/ODD/RunningProtocol/CurrentRunningProtocolCommand.tsx @@ -149,7 +149,6 @@ export function CurrentRunningProtocolCommand({ }: CurrentRunningProtocolCommandProps): JSX.Element | null { const { t } = useTranslation('run_details') const { data: mostRecentCommandData } = useNotifyAllCommandsQuery(runId, { - cursor: null, pageLength: 1, }) diff --git a/app/src/pages/ODD/RobotSettingsDashboard/RobotSettingsList.tsx b/app/src/pages/ODD/RobotSettingsDashboard/RobotSettingsList.tsx index 043b2b8b843..cbc0d68e353 100644 --- a/app/src/pages/ODD/RobotSettingsDashboard/RobotSettingsList.tsx +++ b/app/src/pages/ODD/RobotSettingsDashboard/RobotSettingsList.tsx @@ -200,7 +200,7 @@ export function RobotSettingsList(props: RobotSettingsListProps): JSX.Element { settingName={t('app_settings:error_recovery_mode')} dataTestId="RobotSettingButton_error_recovery_mode" settingInfo={t('app_settings:error_recovery_mode_description')} - iconName="recovery" + iconName="recovery-alt" rightElement={} onClick={toggleERSettings} /> diff --git a/app/src/pages/ODD/RobotSettingsDashboard/__tests__/RobotSettingsDashboard.test.tsx b/app/src/pages/ODD/RobotSettingsDashboard/__tests__/RobotSettingsDashboard.test.tsx index 00b70120809..9fa824ec0b9 100644 --- a/app/src/pages/ODD/RobotSettingsDashboard/__tests__/RobotSettingsDashboard.test.tsx +++ b/app/src/pages/ODD/RobotSettingsDashboard/__tests__/RobotSettingsDashboard.test.tsx @@ -113,7 +113,7 @@ describe('RobotSettingsDashboard', () => { screen.getByText('Robot System Version') screen.getByText('Network Settings') screen.getByText('Status LEDs') - screen.getByText('Error Recovery Mode') + screen.getByText('Recovery mode') screen.getByText( 'Control the strip of color lights on the front of the robot.' ) diff --git a/app/src/pages/ODD/RunSummary/index.tsx b/app/src/pages/ODD/RunSummary/index.tsx index 98a53f591d4..86aec6aaf87 100644 --- a/app/src/pages/ODD/RunSummary/index.tsx +++ b/app/src/pages/ODD/RunSummary/index.tsx @@ -147,15 +147,15 @@ export function RunSummary(): JSX.Element { const { closeCurrentRun } = useCloseCurrentRun() // Close the current run only if it's active and then execute the onSuccess callback. Prefer this wrapper over // closeCurrentRun directly, since the callback is swallowed if currentRun is null. - const closeCurrentRunIfValid = (onSuccess?: () => void): void => { + const closeCurrentRunIfValid = (onSettled?: () => void): void => { if (isRunCurrent) { closeCurrentRun({ - onSuccess: () => { - onSuccess?.() + onSettled: () => { + onSettled?.() }, }) } else { - onSuccess?.() + onSettled?.() } } const [showRunFailedModal, setShowRunFailedModal] = useState(false) @@ -240,15 +240,14 @@ export function RunSummary(): JSX.Element { const runSummaryNoFixit = useCurrentRunCommands({ includeFixitCommands: false, pageLength: 1, - cursor: null, }) useEffect(() => { if ( isRunCurrent && runSummaryNoFixit != null && + runSummaryNoFixit.length > 0 && !lastRunCommandPromptedErrorRecovery(runSummaryNoFixit) ) { - console.log('HITTING THIS') void determineTipStatus() } }, [runSummaryNoFixit, isRunCurrent]) diff --git a/app/src/pages/ODD/RunningProtocol/__tests__/RunningProtocol.test.tsx b/app/src/pages/ODD/RunningProtocol/__tests__/RunningProtocol.test.tsx index f98666d3cbd..33d6f79e9a7 100644 --- a/app/src/pages/ODD/RunningProtocol/__tests__/RunningProtocol.test.tsx +++ b/app/src/pages/ODD/RunningProtocol/__tests__/RunningProtocol.test.tsx @@ -146,7 +146,6 @@ describe('RunningProtocol', () => { .thenReturn(mockRobotSideAnalysis) when(vi.mocked(useNotifyAllCommandsQuery)) .calledWith(RUN_ID, { - cursor: null, pageLength: 1, }) .thenReturn(mockUseAllCommandsResponseNonDeterministic) diff --git a/app/src/resources/instruments/useTipAttachmentStatus.ts b/app/src/resources/instruments/useTipAttachmentStatus.ts index ee0d6449ea6..b08d5ea5270 100644 --- a/app/src/resources/instruments/useTipAttachmentStatus.ts +++ b/app/src/resources/instruments/useTipAttachmentStatus.ts @@ -81,7 +81,6 @@ export function useTipAttachmentStatus( getCommands(host as HostConfig, runId, { includeFixitCommands: false, pageLength: 1, - cursor: null, }), ]) .then(([attachedInstruments, currentState, commandsData]) => { diff --git a/app/src/resources/protocols/hooks/useRunningStepCounts.ts b/app/src/resources/protocols/hooks/useRunningStepCounts.ts index 915676ba5ec..f3a7cb5028f 100644 --- a/app/src/resources/protocols/hooks/useRunningStepCounts.ts +++ b/app/src/resources/protocols/hooks/useRunningStepCounts.ts @@ -3,6 +3,7 @@ import { useMostRecentCompletedAnalysis } from '/app/resources/runs' import { useLastRunProtocolCommand } from './useLastRunProtocolCommand' import type { CommandsData } from '@opentrons/api-client' +import { getRunningStepCountsFrom } from '/app/resources/protocols' export interface StepCounts { /* Excludes "fixit" commands. Returns null if the step is not found. */ @@ -35,21 +36,5 @@ export function useRunningStepCounts( commandsData ?? null ) - const lastRunAnalysisCommandIndex = analysisCommands.findIndex( - c => c.key === lastRunCommandNoFixit?.key - ) - - const currentStepNumberByAnalysis = - lastRunAnalysisCommandIndex === -1 ? null : lastRunAnalysisCommandIndex + 1 - - const hasRunDiverged = - lastRunCommandNoFixit?.key == null || currentStepNumberByAnalysis == null - - const totalStepCount = !hasRunDiverged ? analysisCommands.length : null - - return { - currentStepNumber: currentStepNumberByAnalysis, - totalStepCount, - hasRunDiverged, - } + return getRunningStepCountsFrom(analysisCommands, lastRunCommandNoFixit) } diff --git a/app/src/resources/protocols/utils.ts b/app/src/resources/protocols/utils.ts index 82d62e8cc0b..cfee1b61eb6 100644 --- a/app/src/resources/protocols/utils.ts +++ b/app/src/resources/protocols/utils.ts @@ -1,4 +1,6 @@ import type { RunTimeCommand } from '@opentrons/shared-data' +import type { RunCommandSummary } from '@opentrons/api-client' +import type { StepCounts } from '/app/resources/protocols/hooks' export function isGripperInCommands(commands: RunTimeCommand[]): boolean { return ( @@ -8,3 +10,27 @@ export function isGripperInCommands(commands: RunTimeCommand[]): boolean { ) ?? false ) } + +// See useRunningStepCounts. +export function getRunningStepCountsFrom( + analysisCommands: RunTimeCommand[], + lastRunProtocolCommand: RunCommandSummary | null +): StepCounts { + const lastRunAnalysisCommandIndex = analysisCommands.findIndex( + c => c.key === lastRunProtocolCommand?.key + ) + + const currentStepNumberByAnalysis = + lastRunAnalysisCommandIndex === -1 ? null : lastRunAnalysisCommandIndex + 1 + + const hasRunDiverged = + lastRunProtocolCommand?.key == null || currentStepNumberByAnalysis == null + + const totalStepCount = !hasRunDiverged ? analysisCommands.length : null + + return { + currentStepNumber: currentStepNumberByAnalysis, + totalStepCount, + hasRunDiverged, + } +} diff --git a/app/src/resources/runs/__tests__/useRunTimestamps.test.ts b/app/src/resources/runs/__tests__/useRunTimestamps.test.ts index 1417be1e96d..cbe714472e9 100644 --- a/app/src/resources/runs/__tests__/useRunTimestamps.test.ts +++ b/app/src/resources/runs/__tests__/useRunTimestamps.test.ts @@ -24,7 +24,7 @@ vi.mock('../useNotifyRunQuery') describe('useRunTimestamps hook', () => { beforeEach(() => { when(useRunCommands) - .calledWith(RUN_ID_2, { cursor: null, pageLength: 1 }, expect.any(Object)) + .calledWith(RUN_ID_2, { pageLength: 1 }, expect.any(Object)) .thenReturn([mockCommand.data as any]) }) diff --git a/app/src/resources/runs/index.ts b/app/src/resources/runs/index.ts index 5c97434dd31..d1d84d3bc1b 100644 --- a/app/src/resources/runs/index.ts +++ b/app/src/resources/runs/index.ts @@ -29,3 +29,4 @@ export * from './useModuleCalibrationStatus' export * from './useProtocolAnalysisErrors' export * from './useLastRunCommand' export * from './useRunStatuses' +export * from './useUpdateRecoveryPolicyWithStrategy' diff --git a/app/src/resources/runs/useLastRunCommand.ts b/app/src/resources/runs/useLastRunCommand.ts index b061b46c7c8..a8ee6e249d3 100644 --- a/app/src/resources/runs/useLastRunCommand.ts +++ b/app/src/resources/runs/useLastRunCommand.ts @@ -18,7 +18,7 @@ export function useLastRunCommand( const runStatus = useRunStatus(runId) const { data: commandsData } = useNotifyAllCommandsQuery( runId, - { cursor: null, pageLength: 1 }, + { pageLength: 1 }, { ...options, refetchInterval: diff --git a/app/src/resources/runs/useNotifyAllCommandsQuery.ts b/app/src/resources/runs/useNotifyAllCommandsQuery.ts index 12bafb21ef3..e0082d26dd2 100644 --- a/app/src/resources/runs/useNotifyAllCommandsQuery.ts +++ b/app/src/resources/runs/useNotifyAllCommandsQuery.ts @@ -3,23 +3,12 @@ import { useAllCommandsQuery } from '@opentrons/react-api-client' import { useNotifyDataReady } from '../useNotifyDataReady' import type { UseQueryResult } from 'react-query' -import type { - CommandsData, - GetRunCommandsParams, - GetCommandsParams, -} from '@opentrons/api-client' +import type { CommandsData, GetRunCommandsParams } from '@opentrons/api-client' import type { QueryOptionsWithPolling } from '../useNotifyDataReady' -const DEFAULT_PAGE_LENGTH = 30 - -export const DEFAULT_PARAMS: GetCommandsParams = { - cursor: null, - pageLength: DEFAULT_PAGE_LENGTH, -} - export function useNotifyAllCommandsQuery( runId: string | null, - params?: GetRunCommandsParams | null, + params?: GetRunCommandsParams, options: QueryOptionsWithPolling = {} ): UseQueryResult { // Assume the useAllCommandsQuery() response can only change when the command links change. @@ -32,19 +21,8 @@ export function useNotifyAllCommandsQuery( topic: 'robot-server/runs/commands_links', options, }) - const nullCheckedParams = params ?? DEFAULT_PARAMS - - const nullCheckedFixitCommands = params?.includeFixitCommands ?? null - const finalizedNullCheckParams = { - ...nullCheckedParams, - includeFixitCommands: nullCheckedFixitCommands, - } - const httpQueryResult = useAllCommandsQuery( - runId, - finalizedNullCheckParams, - queryOptionsNotify - ) + const httpQueryResult = useAllCommandsQuery(runId, params, queryOptionsNotify) if (shouldRefetch) { void httpQueryResult.refetch() diff --git a/app/src/resources/runs/useRunTimestamps.ts b/app/src/resources/runs/useRunTimestamps.ts index f3ae3f1a803..b19cbe42193 100644 --- a/app/src/resources/runs/useRunTimestamps.ts +++ b/app/src/resources/runs/useRunTimestamps.ts @@ -30,7 +30,7 @@ export function useRunTimestamps(runId: string | null): RunTimestamps { const runCommands = useRunCommands( runId, - { cursor: null, pageLength: 1 }, + { pageLength: 1 }, { enabled: runStatus === RUN_STATUS_SUCCEEDED || diff --git a/app/src/resources/runs/useUpdateRecoveryPolicyWithStrategy.ts b/app/src/resources/runs/useUpdateRecoveryPolicyWithStrategy.ts new file mode 100644 index 00000000000..f26eb507afc --- /dev/null +++ b/app/src/resources/runs/useUpdateRecoveryPolicyWithStrategy.ts @@ -0,0 +1,60 @@ +import { + useHost, + useUpdateErrorRecoveryPolicy, +} from '@opentrons/react-api-client' +import { getErrorRecoveryPolicy } from '@opentrons/api-client' + +import type { + HostConfig, + RecoveryPolicyRulesParams, + UpdateErrorRecoveryPolicyResponse, +} from '@opentrons/api-client' + +/** + * append - Add a new policy rule to the end of the existing recovery policy. + */ +export type UpdatePolicyStrategy = 'append' + +export interface UpdateErrorRecoveryPolicyWithStrategy { + runId: string + newPolicy: RecoveryPolicyRulesParams[number] + strategy: UpdatePolicyStrategy +} + +export function useUpdateRecoveryPolicyWithStrategy( + runId: string +): ( + newPolicy: UpdateErrorRecoveryPolicyWithStrategy['newPolicy'], + strategy: UpdateErrorRecoveryPolicyWithStrategy['strategy'] +) => Promise { + const host = useHost() + + const { + mutateAsync: updateErrorRecoveryPolicy, + } = useUpdateErrorRecoveryPolicy(runId) + + return ( + newPolicy: UpdateErrorRecoveryPolicyWithStrategy['newPolicy'], + strategy: UpdateErrorRecoveryPolicyWithStrategy['strategy'] + ) => + getErrorRecoveryPolicy(host as HostConfig, runId).then(res => { + const existingPolicyRules = res.data.data.policyRules.map(rule => ({ + commandType: rule.matchCriteria.command.commandType, + errorType: rule.matchCriteria.command.error.errorType, + ifMatch: rule.ifMatch, + })) + + const buildUpdatedPolicy = (): RecoveryPolicyRulesParams => { + switch (strategy) { + case 'append': + return [...existingPolicyRules, newPolicy] + default: { + console.error('Unhandled policy strategy, defaulting to append.') + return [...existingPolicyRules, newPolicy] + } + } + } + + return updateErrorRecoveryPolicy(buildUpdatedPolicy()) + }) +} diff --git a/components/src/atoms/InputField/index.tsx b/components/src/atoms/InputField/index.tsx index 06b4ad68533..001374f71ce 100644 --- a/components/src/atoms/InputField/index.tsx +++ b/components/src/atoms/InputField/index.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { forwardRef } from 'react' import styled, { css } from 'styled-components' import { Flex } from '../../primitives' @@ -15,7 +15,15 @@ import { RESPONSIVENESS, SPACING, TYPOGRAPHY } from '../../ui-style-constants' import { Tooltip } from '../Tooltip' import { useHoverTooltip } from '../../tooltips' import { StyledText } from '../StyledText' + +import type { + ChangeEventHandler, + FocusEvent, + MouseEvent, + ReactNode, +} from 'react' import type { IconName } from '../../icons' + export const INPUT_TYPE_NUMBER = 'number' as const export const LEGACY_INPUT_TYPE_TEXT = 'text' as const export const LEGACY_INPUT_TYPE_PASSWORD = 'password' as const @@ -25,7 +33,7 @@ export interface InputFieldProps { /** field is disabled if value is true */ disabled?: boolean /** change handler */ - onChange?: React.ChangeEventHandler + onChange?: ChangeEventHandler /** name of field in form */ name?: string /** optional ID of element */ @@ -33,7 +41,7 @@ export interface InputFieldProps { /** placeholder text */ placeholder?: string /** optional suffix component, appears to the right of input text */ - units?: React.ReactNode + units?: ReactNode /** current value of text in box, defaults to '' */ value?: string | number | null /** if included, InputField will use error style and display error instead of caption */ @@ -50,11 +58,11 @@ export interface InputFieldProps { | typeof LEGACY_INPUT_TYPE_PASSWORD | typeof INPUT_TYPE_NUMBER /** mouse click handler */ - onClick?: (event: React.MouseEvent) => unknown + onClick?: (event: MouseEvent) => unknown /** focus handler */ - onFocus?: (event: React.FocusEvent) => unknown + onFocus?: (event: FocusEvent) => unknown /** blur handler */ - onBlur?: (event: React.FocusEvent) => unknown + onBlur?: (event: FocusEvent) => unknown /** makes input field read-only */ readOnly?: boolean /** html tabindex property */ @@ -88,7 +96,7 @@ export interface InputFieldProps { padding?: string } -export const InputField = React.forwardRef( +export const InputField = forwardRef( (props, ref): JSX.Element => { const { placeholder, @@ -246,13 +254,13 @@ export const InputField = React.forwardRef( color: ${props.disabled ? COLORS.grey40 : COLORS.grey50}; font: ${TYPOGRAPHY.bodyTextRegular}; text-align: ${TYPOGRAPHY.textAlignRight}; + white-space: ${NO_WRAP}; @media ${RESPONSIVENESS.touchscreenMediaQuerySpecs} { color: ${props.disabled ? COLORS.grey40 : COLORS.grey50}; font-size: ${TYPOGRAPHY.fontSize22}; font-weight: ${TYPOGRAPHY.fontWeightRegular}; line-height: ${TYPOGRAPHY.lineHeight28}; justify-content: ${textAlign}; - white-space: ${NO_WRAP}; } ` diff --git a/components/src/hardware-sim/BaseDeck/BaseDeck.tsx b/components/src/hardware-sim/BaseDeck/BaseDeck.tsx index fcf71c57ea1..c8f22175952 100644 --- a/components/src/hardware-sim/BaseDeck/BaseDeck.tsx +++ b/components/src/hardware-sim/BaseDeck/BaseDeck.tsx @@ -1,5 +1,4 @@ -import * as React from 'react' - +import { Fragment } from 'react' import { getDeckDefFromRobotType, getModuleDef2, @@ -34,6 +33,7 @@ import { StagingAreaFixture } from './StagingAreaFixture' import { WasteChuteFixture } from './WasteChuteFixture' import { WasteChuteStagingAreaFixture } from './WasteChuteStagingAreaFixture' +import type { ComponentProps, ReactNode } from 'react' import type { Svg } from '../../primitives' import type { CutoutFixtureId, @@ -54,7 +54,7 @@ export interface LabwareOnDeck { wellFill?: WellFill missingTips?: WellGroup /** generic prop to render self-positioned children for each labware */ - labwareChildren?: React.ReactNode + labwareChildren?: ReactNode onLabwareClick?: () => void highlight?: boolean highlightShadow?: boolean @@ -66,9 +66,9 @@ export interface ModuleOnDeck { moduleLocation: ModuleLocation nestedLabwareDef?: LabwareDefinition2 | null nestedLabwareWellFill?: WellFill - innerProps?: React.ComponentProps['innerProps'] + innerProps?: ComponentProps['innerProps'] /** generic prop to render self-positioned children for each module */ - moduleChildren?: React.ReactNode + moduleChildren?: ReactNode onLabwareClick?: () => void highlightLabware?: boolean highlightShadowLabware?: boolean @@ -84,12 +84,12 @@ interface BaseDeckProps { lightFill?: string mediumFill?: string darkFill?: string - children?: React.ReactNode + children?: ReactNode showSlotLabels?: boolean /** whether to make wrapping svg tag animatable via @react-spring/web, defaults to false */ animatedSVG?: boolean /** extra props to pass to svg tag */ - svgProps?: React.ComponentProps + svgProps?: ComponentProps } const LABWARE_OFFSET_DISPLAY_THRESHOLD = 2 @@ -195,7 +195,7 @@ export function BaseDeck(props: BaseDeckProps): JSX.Element { /> ))} {trashBinFixtures.map(fixture => ( - + - + ))} {wasteChuteOnlyFixtures.map(fixture => { if (fixture.cutoutId === WASTE_CHUTE_CUTOUT) { diff --git a/components/src/hardware-sim/Deck/RobotWorkSpace.tsx b/components/src/hardware-sim/Deck/RobotWorkSpace.tsx index bf999139659..5c9663a6ca1 100644 --- a/components/src/hardware-sim/Deck/RobotWorkSpace.tsx +++ b/components/src/hardware-sim/Deck/RobotWorkSpace.tsx @@ -1,8 +1,9 @@ -import * as React from 'react' +import { useRef } from 'react' import { OT2_ROBOT_TYPE } from '@opentrons/shared-data' import { Svg } from '../../primitives' import { DeckFromLayers } from './DeckFromLayers' +import type { ReactNode } from 'react' import type { StyleProps } from '../../primitives' import type { DeckDefinition, DeckSlot } from '@opentrons/shared-data' @@ -17,7 +18,7 @@ export interface RobotWorkSpaceRenderProps { export interface RobotWorkSpaceProps extends StyleProps { deckDef?: DeckDefinition viewBox?: string | null - children?: (props: RobotWorkSpaceRenderProps) => React.ReactNode + children?: (props: RobotWorkSpaceRenderProps) => ReactNode deckLayerBlocklist?: string[] // optional boolean to show the OT-2 deck from deck defintion layers showDeckLayers?: boolean @@ -36,7 +37,7 @@ export function RobotWorkSpace(props: RobotWorkSpaceProps): JSX.Element | null { id, ...styleProps } = props - const wrapperRef = React.useRef(null) + const wrapperRef = useRef(null) // NOTE: getScreenCTM in Chrome a DOMMatrix type, // in Firefox the same fn returns a deprecated SVGMatrix. diff --git a/components/src/hooks/useMenuHandleClickOutside.tsx b/components/src/hooks/useMenuHandleClickOutside.tsx index f3e567a25f6..bf93df4a395 100644 --- a/components/src/hooks/useMenuHandleClickOutside.tsx +++ b/components/src/hooks/useMenuHandleClickOutside.tsx @@ -1,24 +1,26 @@ -import * as React from 'react' +import { useState } from 'react' import { COLORS } from '../helix-design-system' import { Overlay } from '../modals' +import type { Dispatch, MouseEventHandler, SetStateAction } from 'react' + interface MenuHandleClickOutside { menuOverlay: JSX.Element - handleOverflowClick: React.MouseEventHandler + handleOverflowClick: MouseEventHandler showOverflowMenu: boolean - setShowOverflowMenu: React.Dispatch> + setShowOverflowMenu: Dispatch> } export function useMenuHandleClickOutside(): MenuHandleClickOutside { - const [showOverflowMenu, setShowOverflowMenu] = React.useState(false) + const [showOverflowMenu, setShowOverflowMenu] = useState(false) - const handleOverflowClick: React.MouseEventHandler = e => { + const handleOverflowClick: MouseEventHandler = e => { e.preventDefault() e.stopPropagation() setShowOverflowMenu(currentShowOverflowMenu => !currentShowOverflowMenu) } - const handleClickOutside: React.MouseEventHandler = e => { + const handleClickOutside: MouseEventHandler = e => { e.preventDefault() e.stopPropagation() setShowOverflowMenu(false) diff --git a/components/src/icons/icon-data.ts b/components/src/icons/icon-data.ts index b5e928005dc..c37534a7df6 100644 --- a/components/src/icons/icon-data.ts +++ b/components/src/icons/icon-data.ts @@ -655,6 +655,11 @@ export const ICON_DATA_BY_NAME: Record< 'M24.4 49.6H34.2V38.6H44.2V29.8H34.2V18.8H24.4V29.8H13.4V38.6H24.4V49.6ZM28.8 72C20.46 69.9 13.575 65.115 8.145 57.645C2.715 50.175 0 41.88 0 32.76V10.8L28.8 0L57.6 10.8V32.76C57.6 41.88 54.885 50.175 49.455 57.645C44.025 65.115 37.14 69.9 28.8 72Z', viewBox: '0 0 58 72', }, + 'recovery-alt': { + path: + 'M14.5 27.2563H17.5V11.4062H14.5V27.2563ZM16 40.4062C11.3333 39.2396 7.5 36.5312 4.5 32.2812C1.5 28.0312 0 23.3729 0 18.3062V6.40625L16 0.40625L32 6.40625V18.3062C32 23.3729 30.5 28.0312 27.5 32.2812C24.5 36.5312 20.6667 39.2396 16 40.4062ZM16 37.3063C19.8333 36.0396 22.9583 33.6479 25.375 30.1313C27.7917 26.6146 29 22.6729 29 18.3062V8.50625L16 3.60625L3 8.50625V18.3062C3 22.6729 4.20833 26.6146 6.625 30.1313C9.04167 33.6479 12.1667 36.0396 16 37.3063Z M23.85 17.9062V20.9062H8V17.9062H23.85Z M17.5 17.9062H14.5V20.9062H17.5V17.9062Z', + viewBox: '0 0 32 41', + }, 'radiobox-blank': { path: 'M12,20A8,8 0 0,1 4,12A8,8 0 0,1 12,4A8,8 0 0,1 20,12A8,8 0 0,1 12,20M12,2A10,10 0 0,0 2,12A10,10 0 0,0 12,22A10,10 0 0,0 22,12A10,10 0 0,0 12,2Z', diff --git a/components/src/organisms/Toolbox/index.tsx b/components/src/organisms/Toolbox/index.tsx index 147b8b0eda2..3064469f247 100644 --- a/components/src/organisms/Toolbox/index.tsx +++ b/components/src/organisms/Toolbox/index.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { useEffect, useRef, useState } from 'react' import { Box, Btn, Flex } from '../../primitives' import { ALIGN_CENTER, @@ -53,10 +53,8 @@ export function Toolbox(props: ToolboxProps): JSX.Element { position = POSITION_FIXED, } = props - const slideOutRef = React.useRef(null) - const [isScrolledToBottom, setIsScrolledToBottom] = React.useState( - false - ) + const slideOutRef = useRef(null) + const [isScrolledToBottom, setIsScrolledToBottom] = useState(false) const handleScroll = (): void => { if (slideOutRef.current == null) return const { scrollTop, scrollHeight, clientHeight } = slideOutRef.current @@ -67,7 +65,7 @@ export function Toolbox(props: ToolboxProps): JSX.Element { } } - React.useEffect(() => { + useEffect(() => { handleScroll() }, [slideOutRef]) diff --git a/hardware-testing/hardware_testing/modules/__init__.py b/hardware-testing/hardware_testing/modules/__init__.py new file mode 100644 index 00000000000..abbbb3c6ffc --- /dev/null +++ b/hardware-testing/hardware_testing/modules/__init__.py @@ -0,0 +1 @@ +"""Module QC Scripts.""" diff --git a/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/driver.py b/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/driver.py index 443140573bd..afa24d1f315 100644 --- a/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/driver.py +++ b/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/driver.py @@ -117,12 +117,17 @@ def __init__(self, port: str, simulating: bool = False) -> None: """Constructor.""" self._simulating = simulating if not self._simulating: - self._serial = serial.Serial(port, baudrate=STACKER_FREQ) + self._serial = serial.Serial(port, baudrate=STACKER_FREQ, timeout=60) - def _send_and_recv(self, msg: str, guard_ret: str = "") -> str: + def _send_and_recv( + self, msg: str, guard_ret: str = "", response_required: bool = True + ) -> str: """Internal utility to send a command and receive the response.""" assert not self._simulating + self._serial.reset_input_buffer() self._serial.write(msg.encode()) + if not response_required: + return "OK\n" ret = self._serial.readline() if guard_ret: if not ret.startswith(guard_ret.encode()): @@ -155,6 +160,12 @@ def set_serial_number(self, sn: str) -> None: return self._send_and_recv(f"M996 {sn}\n", "M996 OK") + def stop_motor(self) -> None: + """Stop motor movement.""" + if self._simulating: + return + self._send_and_recv("M0\n", response_required=False) + def get_limit_switch(self, axis: StackerAxis, direction: Direction) -> bool: """Get limit switch status. @@ -243,4 +254,5 @@ def move_to_limit_switch( def __del__(self) -> None: """Close serial port.""" if not self._simulating: + self.stop_motor() self._serial.close() diff --git a/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/test_estop.py b/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/test_estop.py index 2a2f24161b7..31df6f1decc 100644 --- a/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/test_estop.py +++ b/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/test_estop.py @@ -56,14 +56,23 @@ def run(driver: FlexStacker, report: CSVReport, section: str) -> None: report(section, "trigger-estop", [CSVResult.PASS]) - print("try to move X axis...") - driver.move_in_mm(StackerAxis.X, x_limit.opposite().distance(10)) - print("X should not move") - report( - section, - "x-move-disabled", - [CSVResult.from_bool(driver.get_limit_switch(StackerAxis.X, x_limit))], - ) + print("Check X limit switch...") + limit_switch_triggered = driver.get_limit_switch(StackerAxis.X, x_limit) + if limit_switch_triggered: + report( + section, + "x-move-disabled", + [CSVResult.from_bool(False)], + ) + else: + print("try to move X axis back to the limit switch...") + driver.move_in_mm(StackerAxis.X, x_limit.distance(3)) + print("X should not move") + report( + section, + "x-move-disabled", + [CSVResult.from_bool(not driver.get_limit_switch(StackerAxis.X, x_limit))], + ) print("try to move Z axis...") driver.move_in_mm(StackerAxis.Z, z_limit.opposite().distance(10)) @@ -74,14 +83,23 @@ def run(driver: FlexStacker, report: CSVReport, section: str) -> None: [CSVResult.from_bool(driver.get_limit_switch(StackerAxis.Z, z_limit))], ) - print("try to move L axis...") - driver.move_in_mm(StackerAxis.L, l_limit.opposite().distance(10)) - print("L should not move") - report( - section, - "l-move-disabled", - [CSVResult.from_bool(driver.get_limit_switch(StackerAxis.L, l_limit))], - ) + print("Check L limit switch...") + limit_switch_triggered = driver.get_limit_switch(StackerAxis.L, l_limit) + if limit_switch_triggered: + report( + section, + "l-move-disabled", + [CSVResult.from_bool(False)], + ) + else: + print("try to move L axis back to the limit switch...") + driver.move_in_mm(StackerAxis.L, l_limit.distance(1)) + print("L should not move") + report( + section, + "l-move-disabled", + [CSVResult.from_bool(not driver.get_limit_switch(StackerAxis.L, l_limit))], + ) if not driver._simulating: ui.get_user_ready("Untrigger the E-Stop") diff --git a/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/utils.py b/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/utils.py index 2aca90c8886..d30d6eea783 100644 --- a/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/utils.py +++ b/hardware-testing/hardware_testing/modules/flex_stacker_evt_qc/utils.py @@ -14,7 +14,7 @@ def test_limit_switches_per_direction( direction: Direction, report: CSVReport, section: str, - speed: float = 50.0, + speed: float = 10.0, ) -> None: """Sequence to test the limit switch for one direction.""" ui.print_header(f"{axis} Limit Switch - {direction} direction") @@ -26,7 +26,7 @@ def test_limit_switches_per_direction( # move until the limit switch is reached print(f"moving towards {direction} limit switch...\n") - driver.move_to_limit_switch(axis, direction, MoveParams(max_speed=speed)) + driver.move_to_limit_switch(axis, direction, MoveParams(max_speed_discont=speed)) result = driver.get_limit_switch(axis, direction) opposite_result = not driver.get_limit_switch(axis, direction.opposite()) print(f"{direction} switch triggered: {result}") diff --git a/hardware-testing/hardware_testing/scripts/ABRAsairScript.py b/hardware-testing/hardware_testing/scripts/ABRAsairScript.py index 710d3c17578..5f94faad1e8 100644 --- a/hardware-testing/hardware_testing/scripts/ABRAsairScript.py +++ b/hardware-testing/hardware_testing/scripts/ABRAsairScript.py @@ -1,5 +1,4 @@ """ABR Asair Automation Script!""" -import sys import paramiko as pmk import time import json @@ -62,19 +61,27 @@ def run_command_on_ip( print(f"Error running command on {curr_ip}: {e}") -def run(file_name: str) -> List[Any]: +def run() -> List[Any]: """Run asair script module.""" # Load Robot IPs cmd = "nohup python3 -m hardware_testing.scripts.abr_asair_sensor {name} {duration} {frequency}" cd = "cd /opt/opentrons-robot-server && " robot_ips = [] robot_names = [] - with open(file_name) as file: - file_dict = json.load(file) - robot_dict = file_dict.get("ip_address_list") - robot_ips = list(robot_dict.keys()) - robot_names = list(robot_dict.values()) - print("Executing Script on All Robots:") + + robot = input("Enter IP of robot (type 'all' to run on all robots): ") + if robot.lower() == "all": + ip_file = input("Path of IPs.json: ") + with open(ip_file) as file: + file_dict = json.load(file) + robot_dict = file_dict.get("ip_address_list") + robot_ips = list(robot_dict.keys()) + robot_names = list(robot_dict.values()) + else: + robot_name = input("What is the name of the robot? ") + robot_ips.append(robot) + robot_names.append(robot_name) + print("Executing Script on Robot(s):") # Launch the processes for each robot. processes = [] for index in range(len(robot_ips)): @@ -87,8 +94,7 @@ def run(file_name: str) -> List[Any]: if __name__ == "__main__": # Wait for all processes to finish. - file_name = sys.argv[1] - processes = run(file_name) + processes = run() for process in processes: process.start() time.sleep(20) diff --git a/opentrons-ai-client/src/organisms/InstrumentsSection/index.tsx b/opentrons-ai-client/src/organisms/InstrumentsSection/index.tsx index 6f815b45a5d..2986654df24 100644 --- a/opentrons-ai-client/src/organisms/InstrumentsSection/index.tsx +++ b/opentrons-ai-client/src/organisms/InstrumentsSection/index.tsx @@ -84,6 +84,10 @@ export function InstrumentsSection(): JSX.Element | null { name ) ) + .filter(name => { + const specs = getPipetteSpecsV2(name) + return !specs?.displayName?.includes('GEN1') + }) .map(name => ({ value: name, name: getPipetteSpecsV2(name)?.displayName ?? '', diff --git a/opentrons-ai-server/api/domain/config_anthropic.py b/opentrons-ai-server/api/domain/config_anthropic.py index beebc16d5ec..718e8f283c9 100644 --- a/opentrons-ai-server/api/domain/config_anthropic.py +++ b/opentrons-ai-server/api/domain/config_anthropic.py @@ -1,23 +1,50 @@ SYSTEM_PROMPT = """ -You are a friendly and knowledgeable AI assistant specializing in Opentrons protocol development. -You help scientists create and optimize protocols using the Opentrons Python API v2. - -Your key responsibilities: -1. Welcome scientists warmly and understand their protocol needs -2. Generate accurate Python protocols using standard Opentrons labware (see standard-loadname-info.md in ) -3. Provide clear explanations and documentation -4. Flag potential safety or compatibility issues -5. Suggest protocol optimizations when appropriate - -Important guidelines: -- Always verify labware compatibility before generating protocols -- Include appropriate error handling in generated code -- Provide clear setup instructions and prerequisites -- Flag any potential safety concerns -- Format code examples using standard Python conventions - -If you encounter requests outside your knowledge of Opentrons capabilities, -ask for clarification rather than making assumptions. +You are an expert AI assistant specializing in Opentrons protocol development, +combining deep knowledge of laboratory automation with practical programming expertise. +Your mission is to help scientists automate their laboratory workflows efficiently and +safely using the Opentrons Python API v2 and provided documents in . + + +- Complete mastery of Opentrons Python API v2 +- Deep understanding of laboratory protocols and liquid handling principles +- Expertise in all Opentrons hardware specifications and limitations +- Comprehensive knowledge of supported labware and their compatibility + + +1. Protocol Development & Optimization + - Generate precise, efficient protocols using provided documentation + - Implement proper tip management and resource calculation before code generation + - Use transfer functions optimally to avoid unnecessary loops + - Validate all variables, well positions, and module compatibility + - Follow best practices for error prevention and handling + - Verify sufficient tips and proper deck layout + - Ensure correct API version compatibility (≥2.16 for Flex features) + +2. + - Welcome scientists warmly and understand their protocol needs + - Maintain a professional yet approachable tone + - Ask clarifying questions when requirements are ambiguous + - Provide rationale for technical decisions and recommendations + - Offer alternatives when requested features aren't possible + - Guide users toward best practices + +3. + - Calculate and validate total tip requirements before protocol generation + - Plan efficient tip usage and replacement strategy + - Include explicit tip tracking in protocols + - Track and optimize reagent usage + - Manage deck space efficiently based on provided layout documentation + - Ensure proper module-labware compatibility + - Verify correct adapter usage for temperature-sensitive labware + +4. + - Verify all variables are defined before use + - Confirm tip rack quantity matches transfer operations + - Validate all well positions exist in specified labware + - Check module-labware compatibility + - Verify correct API version for all features + - Ensure proper slot assignments + - Validate sufficient resources for complete protocol execution """ DOCUMENTS = """ @@ -29,14 +56,15 @@ Follow these instructions to handle the user's prompt: 1. : - a) A request to generate a protocol - b) A question about the Opentrons Python API v2 or about details of protocol - c) A common task (e.g., value changes, OT-2 to Flex conversion, slot correction) - d) An unrelated or unclear request - e) A tool calling. If a user calls simulate protocol explicity, then call. - f) A greeting. Respond kindly. + - A request to generate a protocol + - A question about the Opentrons Python API v2 or about details of protocol + - A common task (e.g., value changes, OT-2 to Flex conversion, slot correction) + - An unrelated or unclear request + - A tool calling. If a user calls simulate protocol explicity, then call. + - A greeting. Respond kindly. + - A protocol type (e.g., serial dilution, before generation see serial_dilution_examples.md in - Note: when you respond you dont need mention the category or the type. + Note: when you respond you do not need mention the category or the type. 2. If the prompt is unrelated or unclear, ask the user for clarification. I'm sorry, but your prompt seems unclear. Could you please provide more details? @@ -65,22 +93,17 @@ b) If any crucial information is missing, ask for clarification: - To generate an accurate protocol, I need more information about [missing elements]. - Please provide details about: - [List of missing elements] + To generate an accurate protocol, I need more information about [missing elements]. + Please provide details about: + [List of missing elements] - c) If all necessary information is available, generate the protocol using the following structure: + c) Generate the protocol using the following structure: + - apiLevel and robotType are required otherwise robot does not run. ```python from opentrons import protocol_api - metadata = {{ - 'protocolName': '[Protocol name based on user prompt]', - 'author': 'AI Assistant', - 'description': '[Brief description based on user prompt]' - }} - requirements = {{ 'robotType': '[Robot type: OT-2(default) for Opentrons OT-2, Flex for Opentrons Flex]', 'apiLevel': '[apiLevel, default: 2.19]' @@ -102,9 +125,12 @@ def run(protocol: protocol_api.ProtocolContext): # For Flex protocols using API version 2.16 or later, load trash bin trash = protocol.load_trash_bin('A3') + # any calculation, setup, liquids + # Protocol steps [Step-by-step protocol commands with comments] - [Please make sure that the transfer function is used with the new_tip parameter correctly] + [Please make sure that the transfer function is used with the new_tip parameter explicitly and correctly] + [Arguments for `new_tip` must be explict all the time, default: `new_tip='once'`] ``` d) Use the `transfer` function to handle iterations over wells and volumes. Provide lists of source and @@ -140,8 +166,9 @@ def run(protocol: protocol_api.ProtocolContext): e) In the end, make sure you show generate well-written protocol with proper short but useful comments. + f) If it is to fix, then just fix and do not simulate. -5. Common model issues to avoid: +5. - Model outputs `p300_multi` instead of `p300_multi_gen2`. - Model outputs `thermocyclerModuleV1` instead of `thermocyclerModuleV2`. - Model outputs `opentrons_flex_96_tiprack_50ul` instead of `opentrons_flex_96_filtertiprack_50ul`. @@ -195,19 +222,21 @@ def run(protocol: protocol_api.ProtocolContext): which only has positions A1-D6, causing a KeyError when trying to reference well 'A7'. - Model tries to close thermocycler before opening it. Attempted to access labware inside a closed thermocycler, the thermocycler must be opened first. - - Required Validation Steps: + - - Verify all variables are defined before use - Confirm tip rack quantity matches transfer count - Validate all well positions exist in labware - Check module-labware compatibility - Verify correct API version for all features used + - Verify apiLevel is defined + - Verify tips are sufficient for the protocol to cover all steps 6. If slots are not defined, refer to deck_layout.md for proper slot definitions. Make sure slots are different for different labware. If the source and destination are not defined, then you define yourself but inform user with your choice, because user may want to change them. 7. If the request lacks sufficient information to generate a protocol, use casual_examples.md - as a reference to generate a basic protocol. + as a reference to generate a basic protocol. For serial dilution please refer to serial_dilution_examples.md. Remember to use only the information provided in the . Do not introduce any external information or assumptions. diff --git a/opentrons-ai-server/api/storage/docs/serial_dilution_samples.md b/opentrons-ai-server/api/storage/docs/serial_dilution_examples.md similarity index 52% rename from opentrons-ai-server/api/storage/docs/serial_dilution_samples.md rename to opentrons-ai-server/api/storage/docs/serial_dilution_examples.md index ad9a5ee24be..0fa0ed870e2 100644 --- a/opentrons-ai-server/api/storage/docs/serial_dilution_samples.md +++ b/opentrons-ai-server/api/storage/docs/serial_dilution_examples.md @@ -1,4 +1,508 @@ -# Serial dilution examples +# Tutorial: Creating a Serial Dilution Protocol + +## Introduction + +This tutorial will guide you through creating a Python protocol file from scratch. At the end of this process you'll have a complete protocol that can run on a Flex or an OT-2 robot. + +### What You'll Automate + +The lab task that you'll automate in this tutorial is serial dilution: taking a solution and progressively diluting it by transferring it stepwise across a plate from column 1 to column 12. With just a dozen or so lines of code, you can instruct your robot to perform the hundreds of individual pipetting actions necessary to fill an entire 96-well plate. And all of those liquid transfers will be done automatically, so you'll have more time to do other work in your lab. + +### Hardware and Labware + +Before running a protocol, you'll want to have the right kind of hardware and labware ready for your Flex or OT-2. + +- **Flex users** Most Flex code examples will use a Flex 1-Channel 1000 μL pipette. +- **OT-2 users** You can use either a single-channel or 8-channel pipette for this tutorial. Most OT-2 code examples will use a P300 Single-Channel GEN2 pipette. + +The Flex and OT-2 use similar labware for serial dilution. The tutorial code will use the labware listed in the table below, but as long as you have labware of each type you can modify the code to run with your labware. + +| Labware type | Labware name | API load name | +| ------------- | ------------------------------ | --------------------------------- | +| Reservoir | NEST 12 Well Reservoir 15 mL | `nest_12_reservoir_15ml` | +| Well plate | NEST 96 Well Plate 200 µL Flat | `nest_96_wellplate_200ul_flat` | +| Flex tip rack | Opentrons Flex Tips, 200 µL | `opentrons_flex_96_tiprack_200ul` | +| OT-2 tip rack | Opentrons 96 Tip Rack | `opentrons_96_tiprack_300ul` | + +For the liquids, you can use plain water as the diluent and water dyed with food coloring as the solution. + +## Create a Protocol File + +Let's start from scratch to create your serial dilution protocol. Open up a new file in your editor and start with the line: + +```python +from opentrons import protocol_api +``` + +Throughout this documentation, you'll see protocols that begin with the `import` statement shown above. It identifies your code as an Opentrons protocol. This statement is not required, but including it is a good practice and allows most code editors to provide helpful autocomplete suggestions. + +Everything else in the protocol file is required. Next, you'll specify the version of the API you're using. Then comes the core of the protocol: defining a single `run()` function that provides the locations of your labware, states which kind of pipettes you'll use, and finally issues the commands that the robot will perform. + +For this tutorial, you'll write very little Python outside of the `run()` function. But for more complex applications it's worth remembering that your protocol file _is_ a Python script, so any Python code that can run on your robot can be a part of a protocol. + +### Metadata + +Every protocol needs to have a metadata dictionary with information about the protocol. At minimum, you need to specify what version of the API the protocol requires. The scripts for this tutorial were validated against API version 2.16, so specify: + +```python +metadata = {"apiLevel": "2.16"} +``` + +You can include any other information you like in the metadata dictionary. The fields `protocolName`, `description`, and `author` are all displayed in the Opentrons App, so it's a good idea to expand the dictionary to include them: + +```python +metadata = { + "apiLevel": "2.16", + "protocolName": "Serial Dilution Tutorial", + "description": """This protocol is the outcome of following the + Python Protocol API Tutorial located at + https://docs.opentrons.com/v2/tutorial.html. It takes a + solution and progressively dilutes it by transferring it + stepwise across a plate.""", + "author": "New API User" +} +``` + +### Requirements + +The `requirements` code block can appear before _or_ after the `metadata` code block in a Python protocol. It uses the following syntax and accepts two arguments: `robotType` and `apiLevel`. + +Whether you need a `requirements` block depends on your robot model and API version. + +- **Flex:** The `requirements` block is always required. And, the API version does not go in the `metadata` section. The API version belongs in the `requirements`. For example: + +```python +requirements = {"robotType": "Flex", "apiLevel": "2.16"} +``` + +- **OT-2:** The `requirements` block is optional, but including it is a recommended best practice, particularly if you're using API version 2.15 or greater. If you do use it, remember to remove the API version from the `metadata`. For example: + +```python +requirements = {"robotType": "OT-2", "apiLevel": "2.16"} +``` + +### The `run()` function + +Now it's time to actually instruct the Flex or OT-2 how to perform serial dilution. All of this information is contained in a single Python function, which has to be named `run`. This function takes one argument, which is the _protocol context_. Many examples in these docs use the argument name `protocol`, and sometimes they specify the argument's type: + +```python +def run(protocol: protocol_api.ProtocolContext): +``` + +With the protocol context argument named and typed, you can start calling methods on `protocol` to add labware and hardware. + +#### Labware + +For serial dilution, you need to load a tip rack, reservoir, and 96-well plate on the deck of your Flex or OT-2. Loading labware is done with the `load_labware()` method of the protocol context, which takes two arguments: the standard labware name as defined in the Opentrons Labware Library, and the position where you'll place the labware on the robot's deck. + +##### Flex + +Here's how to load the labware on a Flex in slots D1, D2, and D3: + +```python +def run(protocol: protocol_api.ProtocolContext): + tips = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "D1") + reservoir = protocol.load_labware("nest_12_reservoir_15ml", "D2") + plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D3") +``` + +If you're using a different model of labware, find its name in the Labware Library and replace it in your code. + +##### OT-2 + +Here's how to load the labware on an OT-2 in slots 1, 2, and 3: + +```python +def run(protocol: protocol_api.ProtocolContext): + tips = protocol.load_labware("opentrons_96_tiprack_300ul", 1) + reservoir = protocol.load_labware("nest_12_reservoir_15ml", 2) + plate = protocol.load_labware("nest_96_wellplate_200ul_flat", 3) +``` + +If you're using a different model of labware, find its name in the Labware Library and replace it in your code. + +You may notice that these deck maps don't show where the liquids will be at the start of the protocol. Liquid definitions aren't required in Python protocols, unlike protocols made in Protocol Designer. If you want to identify liquids, see Labeling Liquids in Wells. (Sneak peek: you'll put the diluent in column 1 of the reservoir and the solution in column 2 of the reservoir.) + +#### Trash Bin + +Flex and OT-2 both come with a trash bin for disposing used tips. + +The OT-2 trash bin is fixed in slot 12. Since it can't go anywhere else on the deck, you don't need to write any code to tell the API where it is. + +Flex lets you put a trash bin in multiple locations on the deck. You can even have more than one trash bin, or none at all (if you use the waste chute instead, or if your protocol never trashes any tips). For serial dilution, you'll need to dispose used tips, so you also need to tell the API where the trash container is located on your robot. Loading a trash bin on Flex is done with the `load_trash_bin()` method, which takes one argument: its location. Here's how to load the trash in slot A3: + +```python +trash = protocol.load_trash_bin("A3") +``` + +#### Pipettes + +Next you'll specify what pipette to use in the protocol. Loading a pipette is done with the `load_instrument()` method, which takes three arguments: the name of the pipette, the mount it's installed in, and the tip racks it should use when performing transfers. Load whatever pipette you have installed in your robot by using its standard pipette name. Here's how to load the pipette in the left mount and instantiate it as a variable named `left_pipette`: + +For Flex: + +```python +left_pipette = protocol.load_instrument("flex_1channel_1000", "left", tip_racks=[tips]) +``` + +For OT-2: + +```python +left_pipette = protocol.load_instrument("p300_single_gen2", "left", tip_racks=[tips]) +``` + +Since the pipette is so fundamental to the protocol, it might seem like you should have specified it first. But there's a good reason why pipettes are loaded after labware: you need to have already loaded `tips` in order to tell the pipette to use it. And now you won't have to reference `tips` again in your code — it's assigned to the `left_pipette` and the robot will know to use it when commanded to pick up tips. + +Note: You may notice that the value of `tip_racks` is in brackets, indicating that it's a list. This serial dilution protocol only uses one tip rack, but some protocols require more tips, so you can assign them to a pipette all at once, like `tip_racks=[tips1, tips2]`. + +#### Steps + +Finally, all of your labware and hardware is in place, so it's time to give the robot pipetting steps (also known as commands). The required steps of the serial dilution process break down into three main phases: + +1. Measure out equal amounts of diluent from the reservoir to every well on the plate. +2. Measure out equal amounts of solution from the reservoir into wells in the first column of the plate. +3. Move a portion of the combined liquid from column 1 to 2, then from column 2 to 3, and so on all the way to column 12. + +Thanks to the flexibility of the API's `transfer()` method, which combines many building block commands into one call, each of these phases can be accomplished with a single line of code! You'll just have to write a few more lines of code to repeat the process for as many rows as you want to fill. + +Let's start with the diluent. This phase takes a larger quantity of liquid and spreads it equally to many wells. `transfer()` can handle this all at once, because it accepts either a single well or a list of wells for its source and destination: + +```python +left_pipette.transfer(100, reservoir["A1"], plate.wells()) +``` + +Breaking down these single lines of code shows the power of complex commands. The first argument is the amount to transfer to each destination, 100 µL. The second argument is the source, column 1 of the reservoir (which is still specified with grid-style coordinates as `A1` — a reservoir only has an A row). The third argument is the destination. Here, calling the `wells()` method of `plate` returns a list of _every well_, and the command will apply to all of them. + +In plain English, you've instructed the robot, "For every well on the plate, aspirate 100 µL of fluid from column 1 of the reservoir and dispense it in the well." That's how we understand this line of code as scientists, yet the robot will understand and execute it as nearly 200 discrete actions. + +Now it's time to start mixing in the solution. To do this row by row, nest the commands in a `for` loop: + +```python +for i in range(8): + row = plate.rows()[i] +``` + +Using Python's built-in `range` class is an easy way to repeat this block 8 times, once for each row. This also lets you use the repeat index `i` with `plate.rows()` to keep track of the current row. + +In each row, you first need to add solution. This will be similar to what you did with the diluent, but putting it only in column 1 of the plate. It's best to mix the combined solution and diluent thoroughly, so add the optional `mix_after` argument to `transfer()`: + +```python +left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50)) +``` + +As before, the first argument specifies to transfer 100 µL. The second argument is the source, column 2 of the reservoir. The third argument is the destination, the element at index 0 of the current `row`. Since Python lists are zero-indexed, but columns on labware start numbering at 1, this will be well A1 on the first time through the loop, B1 the second time, and so on. The fourth argument specifies to mix 3 times with 50 µL of fluid each time. + +Finally, it's time to dilute the solution down the row. One approach would be to nest another `for` loop here, but instead let's use another feature of the `transfer()` method, taking lists as the source and destination arguments: + +```python +left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50)) +``` + +There's some Python shorthand here, so let's unpack it. You can get a range of indices from a list using the colon `:` operator, and omitting it at either end means "from the beginning" or "until the end" of the list. So the source is `row[:11]`, from the beginning of the row until its 11th item. And the destination is `row[1:]`, from index 1 (column 2!) until the end. Since both of these lists have 11 items, `transfer()` will _step through them in parallel_, and they're constructed so when the source is 0, the destination is 1; when the source is 1, the destination is 2; and so on. This condenses all of the subsequent transfers down the row into a single line of code. + +All that remains is for the loop to repeat these steps, filling each row down the plate. + +That's it! If you're using a single-channel pipette, you're ready to try out your protocol. + +#### 8-Channel Pipette + +If you're using an 8-channel pipette, you'll need to make a couple tweaks to the single-channel code from above. Most importantly, whenever you target a well in row A of a plate with an 8-channel pipette, it will move its topmost tip to row A, lining itself up over the entire column. + +Thus, when adding the diluent, instead of targeting every well on the plate, you should only target the top row: + +```python +left_pipette.transfer(100, reservoir["A1"], plate.rows()[0]) +``` + +And by accessing an entire column at once, the 8-channel pipette effectively implements the `for` loop in hardware, so you'll need to remove it: + +```python +row = plate.rows()[0] +left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50)) +left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50)) +``` + +Instead of tracking the current row in the `row` variable, this code sets it to always be row A (index 0). + +## Complete Protocol Examples + +Here are the complete protocols for both single-channel and 8-channel configurations: + +### Single-Channel Protocol (Flex) + +```python +from opentrons import protocol_api + +requirements = {"robotType": "Flex", "apiLevel": "2.16"} + +metadata = { + "protocolName": "Serial Dilution Tutorial", + "description": """This protocol is the outcome of following the + Python Protocol API Tutorial located at + https://docs.opentrons.com/v2/tutorial.html. It takes a + solution and progressively dilutes it by transferring it + stepwise across a plate.""", + "author": "New API User" +} + +def run(protocol: protocol_api.ProtocolContext): + # Load labware + tips = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "D1") + reservoir = protocol.load_labware("nest_12_reservoir_15ml", "D2") + plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D3") + trash = protocol.load_trash_bin("A3") + + # Load pipette + left_pipette = protocol.load_instrument( + "flex_1channel_1000", + "left", + tip_racks=[tips] + ) + + # Distribute diluent + left_pipette.transfer(100, reservoir["A1"], plate.wells()) + + # Perform serial dilution + for i in range(8): + row = plate.rows()[i] + # Add solution to first well of row + left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50)) + # Perform dilution down the row + left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50)) +``` + +### 8-Channel Protocol (OT-2) + +```python +from opentrons import protocol_api + +requirements = {"robotType": "OT-2", "apiLevel": "2.16"} + +metadata = { + "protocolName": "Serial Dilution Tutorial", + "description": """This protocol is the outcome of following the + Python Protocol API Tutorial located at + https://docs.opentrons.com/v2/tutorial.html. It takes a + solution and progressively dilutes it by transferring it + stepwise across a plate.""", + "author": "New API User" +} + +def run(protocol: protocol_api.ProtocolContext): + # Load labware + tips = protocol.load_labware("opentrons_96_tiprack_300ul", 1) + reservoir = protocol.load_labware("nest_12_reservoir_15ml", 2) + plate = protocol.load_labware("nest_96_wellplate_200ul_flat", 3) + + # Load pipette + left_pipette = protocol.load_instrument( + "p300_single_gen2", + "left", + tip_racks=[tips] + ) + + # Distribute diluent to top row (8-channel will handle all rows) + left_pipette.transfer(100, reservoir["A1"], plate.rows()[0]) + + # Perform serial dilution using only the top row + row = plate.rows()[0] + left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50)) + left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50)) +``` + +These complete examples show how to implement the entire serial dilution workflow for both Flex and OT-2 robots, using either single-channel or 8-channel pipettes. The protocols will: + +1. Load all necessary labware and pipettes +2. Distribute diluent to all wells +3. Add solution to the first column +4. Perform serial dilution across the plate +5. Mix thoroughly at each step + +The key difference between single-channel and 8-channel implementations is that the 8-channel pipette naturally handles all rows simultaneously, eliminating the need for the row-by-row loop. + + + + + +Write a protocol using the Opentrons Python Protocol API v2 for Opentrons OT-2 robot according to the following description: + +Application: +Other + +Description: +A simple serial dilution + +Pipette mount(s): + +P20 Single-Channel GEN2 is mounted on the left +Modules: + +Labware: + +NEST 12 Well Reservoir 15 mL x 1 +NEST 96 Well Plate 100 µL PCR Full Skirt x 1 +Opentrons OT-2 96 Tip Rack 20 µL x 1 +Liquids: + +Liquid 1: diluent liquid, solution + +Steps: + +Step 1: For every well on the plate, aspirate 100 µL of fluid from column 1 of the reservoir and dispense it in the well plate. Use the same tip for each transfer. + +Step 2: Dilute the solution down the row: + +- Transfer 100ul of diluent from column 2 of the reservoir to the first well of the row. Mix 3 times with 50 ul. Use the same tip for each transfer. +- Perform a serial dilution by transferring 100 µL from each well to the next well in the row (from left to right). Mix 3 times with 50 µL after each transfer. Use the same tip for each transfer. + +```python +from opentrons import protocol_api + +metadata = { + 'protocolName': 'Serial Dilution', + 'author': 'Assistant', + 'description': 'Perform serial dilution with diluent and sample distribution' +} + +requirements = { + "robotType": "OT-2", + "apiLevel": "2.19" +} + +def run(protocol: protocol_api.ProtocolContext): + # Load labware + tiprack = protocol.load_labware('opentrons_96_tiprack_300ul', 1) # Changed to 300ul tips + reservoir = protocol.load_labware('nest_12_reservoir_15ml', 2) + plate = protocol.load_labware('nest_96_wellplate_100ul_pcr_full_skirt', 3) + + # Load P300 pipette instead of P20 + pipette = protocol.load_instrument('p300_single_gen2', 'left', tip_racks=[tiprack]) + + # Step 1: Distribute diluent to all wells using the same tip + pipette.distribute( + 100, + reservoir.columns()[0], + plate.wells(), + new_tip='once' + ) + + # Step 2: Perform serial dilution for each row + for i in range(8): # Process each row + row = plate.rows()[i] # Get current row + + # Transfer solution from reservoir to first well and mix + pipette.transfer( + 100, + reservoir.columns()[1], + row[0], + mix_after=(3, 50), + new_tip='once' + ) + + # Dilute down the row + pipette.transfer( + 100, + row[:11], # Source wells: first 11 wells + row[1:], # Destination wells: last 11 wells + mix_after=(3, 50), + new_tip='once' + ) + +``` + + + + + +Application: +Other + +Description: +A simple serial dilution protocol + +Pipette mount(s): + +P300 8-Channel GEN2 is mounted on the left +Modules: None + +Labware: + +Opentrons OT-2 96 Tip Rack 300 µL x 1 +NEST 12 Well Reservoir 15 mL x 1 +NEST 96 Well Plate 100 µL PCR Full Skirt x 1 +Liquids: + +Liquid 1: diluent, solution + +Steps: + +Step 1: Transfer 100ul of diluent from reservoir first column to all wells of the well plate. Use the same tip for each transfer. +Step 2: Dilute down the row: + +- Transfer 100ul of diluent from second column of reservoir to the first row of the first column of the well plate. Mix 3 times with 50ul. Use the same tip for each transfer. +- Transfer 100ul of solution from each well to the next well in the row. Mix 3 times with 50ul. Use the same tip for each transfer. + +```python +from opentrons import protocol_api + +metadata = { + 'protocolName': 'Serial Dilution Protocol', + 'author': 'Assistant', + 'description': 'A simple serial dilution protocol using multi-channel pipette' +} + +requirements = { + "robotType": "OT-2", + "apiLevel": "2.19" +} + +def run(protocol: protocol_api.ProtocolContext): + # Load labware + tips = protocol.load_labware("opentrons_96_tiprack_300ul", "1") + reservoir = protocol.load_labware("nest_12_reservoir_15ml", "2") + plate = protocol.load_labware("nest_96_wellplate_100ul_pcr_full_skirt", "3") + + # Load pipette + p300_multi = protocol.load_instrument( + "p300_multi_gen2", + "left", + tip_racks=[tips] + ) + + # Step 1: Distribute diluent to all wells + p300_multi.transfer( + 100, + reservoir['A1'], + plate.rows()[0], + new_tip='once' + ) + + # Step 2: Serial dilution along the row + # Transfer from reservoir and mix + p300_multi.transfer( + 100, + reservoir['A2'], + plate.rows()[0][0], + mix_after=(3, 50), + new_tip='once' + ) + + # Perform serial dilution down the row + p300_multi.transfer( + 100, + plate.rows()[0][:11], + plate.rows()[0][1:], + mix_after=(3, 50), + new_tip='once' + ) + +``` + + + + + +# More serial dilution protocol examples ## 1. Serial dilution example diff --git a/opentrons-ai-server/api/storage/docs/standard-loadname-info.md b/opentrons-ai-server/api/storage/docs/standard-loadname-info.md index 5ca402ec2f3..f956a95cd6c 100644 --- a/opentrons-ai-server/api/storage/docs/standard-loadname-info.md +++ b/opentrons-ai-server/api/storage/docs/standard-loadname-info.md @@ -570,6 +570,7 @@ Total number of labware: 73 - Well count: 96 - Max volume: 2.4 mL - Well shape: U-bottom + @@ -597,3 +598,38 @@ Total number of labware: 73 - volume: 5–1000 µL + + + + - Loadname: `temperature module` or `tempdeck` + - Introduced in API Version: 2.0 + + +- Loadname: `temperature module gen2` +- Introduced in API Version: 2.3 + + +- Loadname: `magnetic module` or `magdeck` +- Introduced in API Version: 2.0 + + +- Loadname: `magnetic module gen2` +- Introduced in API Version: 2.3 + + +- Loadname: `thermocycler module` or `thermocycler` +- Introduced in API Version: 2.0 + + +- Loadname: `thermocycler module gen2` or `thermocyclerModuleV2` +- Introduced in API Version: 2.13 + + +- Loadname: `heaterShakerModuleV1` +- Introduced in API Version: 2.13 + + +- Loadname: `magneticBlockV1` +- Introduced in API Version: 2.15 + + diff --git a/protocol-designer/src/ProtocolEditor.tsx b/protocol-designer/src/ProtocolEditor.tsx index 7ca14592e86..244d9d264e0 100644 --- a/protocol-designer/src/ProtocolEditor.tsx +++ b/protocol-designer/src/ProtocolEditor.tsx @@ -1,15 +1,22 @@ import { DndProvider } from 'react-dnd' import { HashRouter } from 'react-router-dom' import { HTML5Backend } from 'react-dnd-html5-backend' -import { DIRECTION_COLUMN, Flex, OVERFLOW_AUTO } from '@opentrons/components' +import { + Box, + DIRECTION_COLUMN, + Flex, + OVERFLOW_AUTO, +} from '@opentrons/components' import { PortalRoot as TopPortalRoot } from './components/portals/TopPortal' import { ProtocolRoutes } from './ProtocolRoutes' function ProtocolEditorComponent(): JSX.Element { return ( -
@@ -17,7 +24,7 @@ function ProtocolEditorComponent(): JSX.Element { -
+
) } diff --git a/protocol-designer/src/ProtocolRoutes.tsx b/protocol-designer/src/ProtocolRoutes.tsx index b491ac8ca60..7350aa0a8da 100644 --- a/protocol-designer/src/ProtocolRoutes.tsx +++ b/protocol-designer/src/ProtocolRoutes.tsx @@ -73,7 +73,6 @@ export function ProtocolRoutes(): JSX.Element { onReset={handleReset} > - {showGateModal ? : null} diff --git a/protocol-designer/src/assets/localization/en/alert.json b/protocol-designer/src/assets/localization/en/alert.json index ece60e77570..0f3f439be05 100644 --- a/protocol-designer/src/assets/localization/en/alert.json +++ b/protocol-designer/src/assets/localization/en/alert.json @@ -13,15 +13,15 @@ "title": "Dispense volume will overflow a destination well" }, "BELOW_PIPETTE_MINIMUM_VOLUME": { - "title": "Transfer volume is below pipette minimum ({{min}} uL)", + "title": "Transfer volume is below pipette minimum ({{min}} µL)", "body": "Pipettes cannot accurately handle volumes below their minimum. " }, "BELOW_MIN_AIR_GAP_VOLUME": { - "title": "Air gap volume is below pipette minimum ({{min}} uL)", + "title": "Air gap volume is below pipette minimum ({{min}} µL)", "body": "Pipettes cannot accurately handle volumes below their minimum. " }, "BELOW_MIN_DISPOSAL_VOLUME": { - "title": "Disposal volume is below recommended minimum ({{min}} uL)", + "title": "Disposal volume is below recommended minimum ({{min}} µL)", "body": "For accuracy in multi-dispense Transfers we recommend you use a disposal volume of at least the pipette's minimum. Read more ", "link": "here" } diff --git a/protocol-designer/src/assets/localization/en/create_new_protocol.json b/protocol-designer/src/assets/localization/en/create_new_protocol.json index f2e132728a5..5d81a9ef2fd 100644 --- a/protocol-designer/src/assets/localization/en/create_new_protocol.json +++ b/protocol-designer/src/assets/localization/en/create_new_protocol.json @@ -41,7 +41,7 @@ "trash_required": "A trash entity is required", "trashBin": "Trash Bin", "up_to_3_tipracks": "Up to 3 tip rack types are allowed per pipette", - "vol_label": "{{volume}} uL", + "vol_label": "{{volume}} µL", "wasteChute": "Waste Chute", "which_fixtures": "Which fixtures will you be using?", "which_modules": "Select modules to use in your protocol.", diff --git a/protocol-designer/src/components/StepEditForm/fields/DropTipField/index.tsx b/protocol-designer/src/components/StepEditForm/fields/DropTipField/index.tsx index a17e1804576..00902b75915 100644 --- a/protocol-designer/src/components/StepEditForm/fields/DropTipField/index.tsx +++ b/protocol-designer/src/components/StepEditForm/fields/DropTipField/index.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { useEffect } from 'react' import { useTranslation } from 'react-i18next' import { useSelector } from 'react-redux' import { DropdownField, FormGroup } from '@opentrons/components' @@ -8,13 +8,15 @@ import { } from '../../../../step-forms/selectors' import { getAllTiprackOptions } from '../../../../ui/labware/selectors' import { getEnableReturnTip } from '../../../../feature-flags/selectors' + +import type { ComponentProps, ChangeEvent } from 'react' import type { DropdownOption } from '@opentrons/components' import type { StepFormDropdown } from '../StepFormDropdownField' import styles from '../../StepEditForm.module.css' export function DropTipField( - props: Omit, 'options'> & {} + props: Omit, 'options'> & {} ): JSX.Element { const { value: dropdownItem, @@ -49,7 +51,7 @@ export function DropTipField( if (wasteChute != null) options.push(wasteChuteOption) if (trashBin != null) options.push(trashOption) - React.useEffect(() => { + useEffect(() => { if ( additionalEquipment[String(dropdownItem)] == null && labwareEntities[String(dropdownItem)] == null @@ -75,7 +77,7 @@ export function DropTipField( value={dropdownItem ? String(dropdownItem) : null} onBlur={onFieldBlur} onFocus={onFieldFocus} - onChange={(e: React.ChangeEvent) => { + onChange={(e: ChangeEvent) => { updateValue(e.currentTarget.value) }} /> diff --git a/protocol-designer/src/constants.ts b/protocol-designer/src/constants.ts index b28f6ebee35..52fd9db947f 100644 --- a/protocol-designer/src/constants.ts +++ b/protocol-designer/src/constants.ts @@ -1,26 +1,27 @@ import mapValues from 'lodash/mapValues' import { - MAGNETIC_MODULE_TYPE, - TEMPERATURE_MODULE_TYPE, - THERMOCYCLER_MODULE_TYPE, + ABSORBANCE_READER_TYPE, + ABSORBANCE_READER_V1, HEATERSHAKER_MODULE_TYPE, + HEATERSHAKER_MODULE_V1, + MAGNETIC_BLOCK_TYPE, + MAGNETIC_BLOCK_V1, + MAGNETIC_MODULE_TYPE, MAGNETIC_MODULE_V1, MAGNETIC_MODULE_V2, + TEMPERATURE_MODULE_TYPE, TEMPERATURE_MODULE_V1, TEMPERATURE_MODULE_V2, + THERMOCYCLER_MODULE_TYPE, THERMOCYCLER_MODULE_V1, - HEATERSHAKER_MODULE_V1, THERMOCYCLER_MODULE_V2, - MAGNETIC_BLOCK_TYPE, - MAGNETIC_BLOCK_V1, - ABSORBANCE_READER_TYPE, - ABSORBANCE_READER_V1, } from '@opentrons/shared-data' import type { - LabwareDefinition2, + CutoutId, DeckSlot as DeckDefSlot, - ModuleType, + LabwareDefinition2, ModuleModel, + ModuleType, } from '@opentrons/shared-data' import type { DeckSlot, WellVolumes } from './types' @@ -167,3 +168,10 @@ export const DND_TYPES = { // Values for TC fields export const THERMOCYCLER_STATE: 'thermocyclerState' = 'thermocyclerState' export const THERMOCYCLER_PROFILE: 'thermocyclerProfile' = 'thermocyclerProfile' +// Priority for fixtures +export const STAGING_AREA_CUTOUTS_ORDERED: CutoutId[] = [ + 'cutoutB3', + 'cutoutC3', + 'cutoutD3', + 'cutoutA3', +] diff --git a/protocol-designer/src/organisms/Alerts/FormAlerts.tsx b/protocol-designer/src/organisms/Alerts/FormAlerts.tsx index d51d34ff5d8..b6080a05a73 100644 --- a/protocol-designer/src/organisms/Alerts/FormAlerts.tsx +++ b/protocol-designer/src/organisms/Alerts/FormAlerts.tsx @@ -118,7 +118,7 @@ function FormAlertsComponent(props: FormAlertsProps): JSX.Element | null { width="100%" iconMarginLeft={SPACING.spacing4} > - + {data.title} @@ -183,7 +183,6 @@ function FormAlertsComponent(props: FormAlertsProps): JSX.Element | null { return [...formErrors, ...timelineWarnings, ...formWarnings].length > 0 ? ( {showFormErrors diff --git a/protocol-designer/src/organisms/Alerts/WarningContents.tsx b/protocol-designer/src/organisms/Alerts/WarningContents.tsx index 559d2d957ce..d75222dbaf1 100644 --- a/protocol-designer/src/organisms/Alerts/WarningContents.tsx +++ b/protocol-designer/src/organisms/Alerts/WarningContents.tsx @@ -1,6 +1,4 @@ import { useTranslation } from 'react-i18next' -import { START_TERMINAL_ITEM_ID } from '../../steplist' -import { TerminalItemLink } from './TerminalItemLink' import type { AlertLevel } from './types' @@ -22,7 +20,6 @@ export function WarningContents( {t(`timeline.warning.${warningType}.body`, { defaultValue: '', })} - ) default: diff --git a/protocol-designer/src/organisms/Kitchen/index.tsx b/protocol-designer/src/organisms/Kitchen/index.tsx index 1732ac014a6..770447f9239 100644 --- a/protocol-designer/src/organisms/Kitchen/index.tsx +++ b/protocol-designer/src/organisms/Kitchen/index.tsx @@ -1,5 +1,4 @@ -import * as React from 'react' - +import { useState } from 'react' import { Flex, ALIGN_CENTER, @@ -14,6 +13,7 @@ import { import { uuid } from '../../utils' import { KitchenContext } from './KitchenContext' +import type { ReactNode } from 'react' import type { SnackbarProps, ToastProps, @@ -22,7 +22,7 @@ import type { import type { BakeOptions, MakeSnackbarOptions } from './KitchenContext' interface PantryProps { - children: React.ReactNode + children: ReactNode } /** @@ -32,8 +32,8 @@ interface PantryProps { * @returns */ export function Kitchen({ children }: PantryProps): JSX.Element { - const [toasts, setToasts] = React.useState([]) - const [snackbar, setSnackbar] = React.useState(null) + const [toasts, setToasts] = useState([]) + const [snackbar, setSnackbar] = useState(null) /** * makes toast, rendering it in the Kitchen display container diff --git a/protocol-designer/src/organisms/KnowledgeLink.tsx b/protocol-designer/src/organisms/KnowledgeLink.tsx index 8398539db57..12b460b5c03 100644 --- a/protocol-designer/src/organisms/KnowledgeLink.tsx +++ b/protocol-designer/src/organisms/KnowledgeLink.tsx @@ -5,7 +5,8 @@ interface KnowledgeLinkProps { children: ReactNode } -export const DOC_URL = 'https://docs.opentrons.com/protocol-designer/' +export const DOC_URL = + 'https://insights.opentrons.com/hubfs/Protocol%20Designer%20Instruction%20Manual.pdf' export function KnowledgeLink(props: KnowledgeLinkProps): JSX.Element { const { children } = props diff --git a/protocol-designer/src/organisms/TipPositionModal/ZTipPositionModal.tsx b/protocol-designer/src/organisms/TipPositionModal/ZTipPositionModal.tsx index 23d1540bbfc..2282869ac92 100644 --- a/protocol-designer/src/organisms/TipPositionModal/ZTipPositionModal.tsx +++ b/protocol-designer/src/organisms/TipPositionModal/ZTipPositionModal.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { useState } from 'react' import { createPortal } from 'react-dom' import { useTranslation } from 'react-i18next' import { @@ -53,12 +53,12 @@ export function ZTipPositionModal(props: ZTipPositionModalProps): JSX.Element { wellDepthMm, }) - const [value, setValue] = React.useState( + const [value, setValue] = useState( zValue !== null ? String(zValue) : null ) // in this modal, pristinity hides the OUT_OF_BOUNDS error only. - const [isPristine, setPristine] = React.useState(true) + const [isPristine, setPristine] = useState(true) const getMinMaxMmFromBottom = (): { maxMmFromBottom: number diff --git a/protocol-designer/src/organisms/TipPositionModal/index.tsx b/protocol-designer/src/organisms/TipPositionModal/index.tsx index 704ab7e8cf7..e7dfed1a476 100644 --- a/protocol-designer/src/organisms/TipPositionModal/index.tsx +++ b/protocol-designer/src/organisms/TipPositionModal/index.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { useState } from 'react' import { createPortal } from 'react-dom' import { useTranslation } from 'react-i18next' import { @@ -64,7 +64,7 @@ export function TipPositionModal( 'shared', 'application', ]) - const [view, setView] = React.useState<'top' | 'side'>('side') + const [view, setView] = useState<'top' | 'side'>('side') const zSpec = specs.z const ySpec = specs.y const xSpec = specs.x @@ -80,18 +80,18 @@ export function TipPositionModal( wellDepthMm, }) - const [zValue, setZValue] = React.useState( + const [zValue, setZValue] = useState( zSpec?.value == null ? String(defaultMmFromBottom) : String(zSpec?.value) ) - const [yValue, setYValue] = React.useState( + const [yValue, setYValue] = useState( ySpec?.value == null ? null : String(ySpec?.value) ) - const [xValue, setXValue] = React.useState( + const [xValue, setXValue] = useState( xSpec?.value == null ? null : String(xSpec?.value) ) // in this modal, pristinity hides the OUT_OF_BOUNDS error only. - const [isPristine, setPristine] = React.useState(true) + const [isPristine, setPristine] = useState(true) const getMinMaxMmFromBottom = (): { maxMmFromBottom: number minMmFromBottom: number diff --git a/protocol-designer/src/pages/CreateNewProtocolWizard/SelectPipettes.tsx b/protocol-designer/src/pages/CreateNewProtocolWizard/SelectPipettes.tsx index 57f2ec6b4b5..72cc6338526 100644 --- a/protocol-designer/src/pages/CreateNewProtocolWizard/SelectPipettes.tsx +++ b/protocol-designer/src/pages/CreateNewProtocolWizard/SelectPipettes.tsx @@ -1,4 +1,4 @@ -import { useState, useEffect } from 'react' +import { useEffect, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import styled, { css } from 'styled-components' import { useDispatch, useSelector } from 'react-redux' @@ -91,6 +91,16 @@ export function SelectPipettes(props: WizardTileProps): JSX.Element | null { setPipetteVolume(null) } + const ref = useRef(null) + + const handleScrollToBottom = (): void => { + if (ref.current != null) { + ref.current.scrollIntoView({ + behavior: 'smooth', + block: 'end', + }) + } + } // initialize pipette name once all fields are filled out useEffect(() => { if ( @@ -106,6 +116,10 @@ export function SelectPipettes(props: WizardTileProps): JSX.Element | null { } }, [pipetteType, pipetteGen, pipetteVolume, selectedPipetteName]) + useEffect(() => { + handleScrollToBottom() + }, [pipetteType, pipetteVolume, pipetteGen]) + const noPipette = (pipettesByMount.left.pipetteName == null || pipettesByMount.left.tiprackDefURI == null) && @@ -114,7 +128,8 @@ export function SelectPipettes(props: WizardTileProps): JSX.Element | null { const isDisabled = (page === 'add' && pipettesByMount[defaultMount].tiprackDefURI == null) || - noPipette + noPipette || + selectedValues.length === 0 const targetPipetteMount = pipettesByMount.left.pipetteName == null || @@ -186,6 +201,7 @@ export function SelectPipettes(props: WizardTileProps): JSX.Element | null { flexDirection={DIRECTION_COLUMN} overflowY={OVERFLOW_AUTO} gridGap={SPACING.spacing32} + ref={ref} > - {tooltipOnDisabled != null ? ( + {tooltipOnDisabled != null && disabled ? ( {tooltipOnDisabled} ) : null} diff --git a/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/SelectPipettes.test.tsx b/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/SelectPipettes.test.tsx index 3dec8676ebf..016a143a4bc 100644 --- a/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/SelectPipettes.test.tsx +++ b/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/SelectPipettes.test.tsx @@ -21,6 +21,7 @@ vi.mock('../../../organisms') vi.mock('../../../labware-defs/actions') vi.mock('../utils') const mockLocation = vi.fn() +window.HTMLElement.prototype.scrollIntoView = vi.fn() vi.mock('react-router-dom', async importOriginal => { const actual = await importOriginal() @@ -69,8 +70,8 @@ describe('SelectPipettes', () => { vi.mocked(getLabwareDefsByURI).mockReturnValue({}) vi.mocked(getAllowAllTipracks).mockReturnValue(false) vi.mocked(getTiprackOptions).mockReturnValue({ - 'opentrons/opentrons_flex_96_tiprack_200ul/1': '200uL Flex tipracks', - 'opentrons/opentrons_flex_96_tiprack_1000ul/1': '1000uL Flex tipracks', + 'opentrons/opentrons_flex_96_tiprack_200ul/1': '200µL Flex tipracks', + 'opentrons/opentrons_flex_96_tiprack_1000ul/1': '1000µL Flex tipracks', }) }) @@ -86,11 +87,11 @@ describe('SelectPipettes', () => { fireEvent.click(screen.getByRole('label', { name: '1-Channel' })) screen.getByText('Pipette volume') // select pip volume - fireEvent.click(screen.getByRole('label', { name: '1000 uL' })) + fireEvent.click(screen.getByRole('label', { name: '1000 µL' })) // select tip screen.getByText('Add custom pipette tips') - screen.getByText('200uL Flex tipracks') - fireEvent.click(screen.getByText('1000uL Flex tipracks')) + screen.getByText('200µL Flex tipracks') + fireEvent.click(screen.getByText('1000µL Flex tipracks')) screen.getByRole('button', { name: 'Confirm' }) @@ -103,8 +104,8 @@ describe('SelectPipettes', () => { it('renders the first page of select pipettes for an ot-2', () => { vi.mocked(getTiprackOptions).mockReturnValue({ - 'opentrons/opentrons_96_tiprack_10ul/1': '10uL tipracks', - 'opentrons/opentrons_96_tiprack_300ul/1': '300uL tipracks', + 'opentrons/opentrons_96_tiprack_10ul/1': '10µL tipracks', + 'opentrons/opentrons_96_tiprack_300ul/1': '300µL tipracks', }) const values = { @@ -139,11 +140,11 @@ describe('SelectPipettes', () => { screen.getByText('Pipette volume') // select pip volume - fireEvent.click(screen.getByRole('label', { name: '20 uL' })) + fireEvent.click(screen.getByRole('label', { name: '20 µL' })) // select tip screen.getByText('Add custom pipette tips') - screen.getByText('10uL tipracks') - fireEvent.click(screen.getByText('300uL tipracks')) + screen.getByText('10µL tipracks') + fireEvent.click(screen.getByText('300µL tipracks')) screen.getByText('Add custom pipette tips') // add custom pipette tips diff --git a/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/utils.test.ts b/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/utils.test.ts index 02039fb312e..17a46072474 100644 --- a/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/utils.test.ts +++ b/protocol-designer/src/pages/CreateNewProtocolWizard/__tests__/utils.test.ts @@ -291,12 +291,25 @@ describe('getTrashSlot', () => { const result = getTrashSlot(MOCK_FORM_STATE) expect(result).toBe('cutoutA3') }) - it('should return cutoutA1 when there is a staging area in slot A3', () => { + it('should return cutoutA3 when there are 3 or fewer staging areas', () => { MOCK_FORM_STATE = { ...MOCK_FORM_STATE, additionalEquipment: ['stagingArea'], } const result = getTrashSlot(MOCK_FORM_STATE) + expect(result).toBe('cutoutA3') + }) + it('should return cutoutA1 when there are 4 staging areas', () => { + MOCK_FORM_STATE = { + ...MOCK_FORM_STATE, + additionalEquipment: [ + 'stagingArea', + 'stagingArea', + 'stagingArea', + 'stagingArea', + ], + } + const result = getTrashSlot(MOCK_FORM_STATE) expect(result).toBe('cutoutA1') }) }) diff --git a/protocol-designer/src/pages/CreateNewProtocolWizard/index.tsx b/protocol-designer/src/pages/CreateNewProtocolWizard/index.tsx index e0a24fea138..df079c72318 100644 --- a/protocol-designer/src/pages/CreateNewProtocolWizard/index.tsx +++ b/protocol-designer/src/pages/CreateNewProtocolWizard/index.tsx @@ -16,7 +16,6 @@ import { MAGNETIC_BLOCK_TYPE, MAGNETIC_MODULE_TYPE, OT2_ROBOT_TYPE, - STAGING_AREA_CUTOUTS, TEMPERATURE_MODULE_TYPE, THERMOCYCLER_MODULE_TYPE, WASTE_CHUTE_CUTOUT, @@ -29,7 +28,10 @@ import * as labwareDefSelectors from '../../labware-defs/selectors' import * as labwareDefActions from '../../labware-defs/actions' import * as labwareIngredActions from '../../labware-ingred/actions' import { actions as steplistActions } from '../../steplist' -import { INITIAL_DECK_SETUP_STEP_ID } from '../../constants' +import { + INITIAL_DECK_SETUP_STEP_ID, + STAGING_AREA_CUTOUTS_ORDERED, +} from '../../constants' import { actions as stepFormActions } from '../../step-forms' import { createModuleWithNoSlot } from '../../modules' import { @@ -44,7 +46,6 @@ import { SelectModules } from './SelectModules' import { SelectFixtures } from './SelectFixtures' import { AddMetadata } from './AddMetadata' import { getTrashSlot } from './utils' - import type { ThunkDispatch } from 'redux-thunk' import type { NormalizedPipette } from '@opentrons/step-generation' import type { BaseState } from '../../types' @@ -283,10 +284,7 @@ export function CreateNewProtocolWizard(): JSX.Element | null { if (stagingAreas.length > 0) { stagingAreas.forEach((_, index) => { return dispatch( - createDeckFixture( - 'stagingArea', - STAGING_AREA_CUTOUTS.reverse()[index] - ) + createDeckFixture('stagingArea', STAGING_AREA_CUTOUTS_ORDERED[index]) ) }) } diff --git a/protocol-designer/src/pages/CreateNewProtocolWizard/utils.tsx b/protocol-designer/src/pages/CreateNewProtocolWizard/utils.tsx index 90e06a8368e..cbbe3d98a7f 100644 --- a/protocol-designer/src/pages/CreateNewProtocolWizard/utils.tsx +++ b/protocol-designer/src/pages/CreateNewProtocolWizard/utils.tsx @@ -8,7 +8,6 @@ import { MAGNETIC_BLOCK_V1, MAGNETIC_MODULE_V1, MAGNETIC_MODULE_V2, - STAGING_AREA_CUTOUTS, TEMPERATURE_MODULE_V1, TEMPERATURE_MODULE_V2, THERMOCYCLER_MODULE_TYPE, @@ -19,6 +18,7 @@ import { import wasteChuteImage from '../../assets/images/waste_chute.png' import trashBinImage from '../../assets/images/flex_trash_bin.png' import stagingAreaImage from '../../assets/images/staging_area.png' +import { STAGING_AREA_CUTOUTS_ORDERED } from '../../constants' import type { CutoutId, @@ -268,7 +268,9 @@ export const getTrashSlot = (values: WizardFormState): string => { equipment.includes('stagingArea') ) - const cutouts = stagingAreas.map((_, index) => STAGING_AREA_CUTOUTS[index]) + const cutouts = stagingAreas.map( + (_, index) => STAGING_AREA_CUTOUTS_ORDERED[index] + ) const hasWasteChute = additionalEquipment.find(equipment => equipment.includes('wasteChute') ) diff --git a/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/AddStepButton.tsx b/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/AddStepButton.tsx index b7e070de5c5..974198e0501 100644 --- a/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/AddStepButton.tsx +++ b/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/AddStepButton.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { useState } from 'react' import { useDispatch, useSelector } from 'react-redux' import { createPortal } from 'react-dom' import { useTranslation } from 'react-i18next' @@ -38,6 +38,7 @@ import { } from '../../../../components/modals/ConfirmDeleteModal' import { AddStepOverflowButton } from './AddStepOverflowButton' +import type { MouseEvent } from 'react' import type { ThunkDispatch } from 'redux-thunk' import type { BaseState } from '../../../../types' import type { StepType } from '../../../../form-types' @@ -58,19 +59,17 @@ export function AddStepButton(): JSX.Element { ) const isStepCreationDisabled = useSelector(getIsMultiSelectMode) const modules = useSelector(stepFormSelectors.getInitialDeckSetup).modules - const [ - showStepOverflowMenu, - setShowStepOverflowMenu, - ] = React.useState(false) + const [showStepOverflowMenu, setShowStepOverflowMenu] = useState( + false + ) const overflowWrapperRef = useOnClickOutside({ onClickOutside: () => { setShowStepOverflowMenu(false) }, }) - const [ - enqueuedStepType, - setEnqueuedStepType, - ] = React.useState(null) + const [enqueuedStepType, setEnqueuedStepType] = useState( + null + ) const getSupportedSteps = (): Array< Exclude @@ -166,7 +165,7 @@ export function AddStepButton(): JSX.Element { boxShadow="0px 1px 3px rgba(0, 0, 0, 0.2)" backgroundColor={COLORS.white} flexDirection={DIRECTION_COLUMN} - onClick={(e: React.MouseEvent) => { + onClick={(e: MouseEvent) => { e.preventDefault() e.stopPropagation() }} diff --git a/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/Substep.tsx b/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/Substep.tsx index 7c62f23140d..7a94454cefc 100644 --- a/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/Substep.tsx +++ b/protocol-designer/src/pages/Designer/ProtocolSteps/Timeline/Substep.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { memo } from 'react' import { useTranslation } from 'react-i18next' import noop from 'lodash/noop' import { @@ -169,4 +169,4 @@ function SubstepComponent(props: SubstepProps): JSX.Element { ) } -export const Substep = React.memo(SubstepComponent) +export const Substep = memo(SubstepComponent) diff --git a/protocol-designer/src/pages/Designer/__tests__/LiquidsOverflowMenu.test.tsx b/protocol-designer/src/pages/Designer/__tests__/LiquidsOverflowMenu.test.tsx index 24877f61294..45fbebe5494 100644 --- a/protocol-designer/src/pages/Designer/__tests__/LiquidsOverflowMenu.test.tsx +++ b/protocol-designer/src/pages/Designer/__tests__/LiquidsOverflowMenu.test.tsx @@ -1,4 +1,4 @@ -import * as React from 'react' +import { createRef } from 'react' import { describe, it, expect, vi, beforeEach } from 'vitest' import { fireEvent, screen } from '@testing-library/react' import { i18n } from '../../../assets/localization' @@ -7,6 +7,7 @@ import * as labwareIngredActions from '../../../labware-ingred/actions' import { renderWithProviders } from '../../../__testing-utils__' import { LiquidsOverflowMenu } from '../LiquidsOverflowMenu' +import type { ComponentProps } from 'react' import type { NavigateFunction } from 'react-router-dom' const mockLocation = vi.fn() @@ -21,20 +22,20 @@ vi.mock('react-router-dom', async importOriginal => { } }) -const render = (props: React.ComponentProps) => { +const render = (props: ComponentProps) => { return renderWithProviders(, { i18nInstance: i18n, })[0] } describe('SlotOverflowMenu', () => { - let props: React.ComponentProps + let props: ComponentProps beforeEach(() => { props = { onClose: vi.fn(), showLiquidsModal: vi.fn(), - overflowWrapperRef: React.createRef(), + overflowWrapperRef: createRef(), } vi.mocked(labwareIngredSelectors.allIngredientNamesIds).mockReturnValue([ { diff --git a/protocol-designer/src/pages/ProtocolOverview/DeckThumbnail.tsx b/protocol-designer/src/pages/ProtocolOverview/DeckThumbnail.tsx index 250feb06df2..f28b850919b 100644 --- a/protocol-designer/src/pages/ProtocolOverview/DeckThumbnail.tsx +++ b/protocol-designer/src/pages/ProtocolOverview/DeckThumbnail.tsx @@ -35,6 +35,7 @@ import type { StagingAreaLocation, TrashCutoutId } from '@opentrons/components' import type { CutoutId, DeckSlotId } from '@opentrons/shared-data' import type { AdditionalEquipmentEntity } from '@opentrons/step-generation' +const RIGHT_COLUMN_FIXTURE_PADDING = 50 // mm const WASTE_CHUTE_SPACE = 30 const OT2_STANDARD_DECK_VIEW_LAYER_BLOCK_LIST: string[] = [ 'calibrationMarkings', @@ -99,6 +100,8 @@ export function DeckThumbnail(props: DeckThumbnailProps): JSX.Element { const filteredAddressableAreas = deckDef.locations.addressableAreas.filter( aa => isAddressableAreaStandardSlot(aa.id, deckDef) ) + const hasRightColumnFixtures = + stagingAreaFixtures.length + wasteChuteFixtures.length > 0 return ( {() => ( <> diff --git a/react-api-client/src/runs/index.ts b/react-api-client/src/runs/index.ts index 5e479ed5093..77b487a84fc 100644 --- a/react-api-client/src/runs/index.ts +++ b/react-api-client/src/runs/index.ts @@ -19,6 +19,7 @@ export { useRunCommandErrors } from './useRunCommandErrors' export * from './useCreateLabwareOffsetMutation' export * from './useCreateLabwareDefinitionMutation' export * from './useUpdateErrorRecoveryPolicy' +export * from './useErrorRecoveryPolicy' export type { UsePlayRunMutationResult } from './usePlayRunMutation' export type { UsePauseRunMutationResult } from './usePauseRunMutation' diff --git a/react-api-client/src/runs/useAllCommandsAsPreSerializedList.ts b/react-api-client/src/runs/useAllCommandsAsPreSerializedList.ts index 4f7e2fdc0e0..c441fcc3553 100644 --- a/react-api-client/src/runs/useAllCommandsAsPreSerializedList.ts +++ b/react-api-client/src/runs/useAllCommandsAsPreSerializedList.ts @@ -14,10 +14,6 @@ import type { } from '@opentrons/api-client' const DEFAULT_PAGE_LENGTH = 30 -export const DEFAULT_PARAMS: GetRunCommandsParams = { - cursor: null, - pageLength: DEFAULT_PAGE_LENGTH, -} export function useAllCommandsAsPreSerializedList( runId: string | null, @@ -25,17 +21,15 @@ export function useAllCommandsAsPreSerializedList( options: UseQueryOptions = {} ): UseQueryResult { const host = useHost() - const nullCheckedParams = params ?? DEFAULT_PARAMS const allOptions: UseQueryOptions = { ...options, enabled: host !== null && runId != null && options.enabled !== false, } - const { cursor, pageLength } = nullCheckedParams - const nullCheckedFixitCommands = params?.includeFixitCommands ?? null - const finalizedNullCheckParams = { - ...nullCheckedParams, - includeFixitCommands: nullCheckedFixitCommands, + const { cursor, pageLength, includeFixitCommands } = params ?? {} + const finalizedParams = { + ...params, + pageLength: params?.pageLength ?? DEFAULT_PAGE_LENGTH, } // map undefined values to null to agree with react query caching @@ -50,13 +44,13 @@ export function useAllCommandsAsPreSerializedList( 'getCommandsAsPreSerializedList', cursor, pageLength, - nullCheckedFixitCommands, + includeFixitCommands, ], () => { return getCommandsAsPreSerializedList( host as HostConfig, runId as string, - finalizedNullCheckParams + finalizedParams ).then(response => { const responseData = response.data return { diff --git a/react-api-client/src/runs/useAllCommandsQuery.ts b/react-api-client/src/runs/useAllCommandsQuery.ts index 427e96b554f..a1fd6047fe0 100644 --- a/react-api-client/src/runs/useAllCommandsQuery.ts +++ b/react-api-client/src/runs/useAllCommandsQuery.ts @@ -9,45 +9,30 @@ import type { } from '@opentrons/api-client' const DEFAULT_PAGE_LENGTH = 30 -export const DEFAULT_PARAMS: GetRunCommandsParamsRequest = { - cursor: null, - pageLength: DEFAULT_PAGE_LENGTH, - includeFixitCommands: null, -} export function useAllCommandsQuery( runId: string | null, - params?: GetRunCommandsParamsRequest | null, + params?: GetRunCommandsParamsRequest, options: UseQueryOptions = {} ): UseQueryResult { const host = useHost() - const nullCheckedParams = params ?? DEFAULT_PARAMS - const allOptions: UseQueryOptions = { ...options, enabled: host !== null && runId != null && options.enabled !== false, } - const { cursor, pageLength } = nullCheckedParams - const nullCheckedFixitCommands = params?.includeFixitCommands ?? null - const finalizedNullCheckParams = { - ...nullCheckedParams, - includeFixitCommands: nullCheckedFixitCommands, + + const { cursor, pageLength, includeFixitCommands } = params ?? {} + const finalizedParams = { + ...params, + pageLength: params?.pageLength ?? DEFAULT_PAGE_LENGTH, } const query = useQuery( - [ - host, - 'runs', - runId, - 'commands', - cursor, - pageLength, - finalizedNullCheckParams, - ], + [host, 'runs', runId, 'commands', cursor, pageLength, includeFixitCommands], () => { return getCommands( host as HostConfig, runId as string, - finalizedNullCheckParams + finalizedParams ).then(response => response.data) }, allOptions diff --git a/react-api-client/src/runs/useErrorRecoveryPolicy.ts b/react-api-client/src/runs/useErrorRecoveryPolicy.ts new file mode 100644 index 00000000000..dd625689b83 --- /dev/null +++ b/react-api-client/src/runs/useErrorRecoveryPolicy.ts @@ -0,0 +1,31 @@ +import { useQuery } from 'react-query' + +import { getErrorRecoveryPolicy } from '@opentrons/api-client' + +import { useHost } from '../api' + +import type { UseQueryOptions, UseQueryResult } from 'react-query' +import type { + ErrorRecoveryPolicyResponse, + HostConfig, +} from '@opentrons/api-client' + +export function useErrorRecoveryPolicy( + runId: string, + options: UseQueryOptions = {} +): UseQueryResult { + const host = useHost() + + const query = useQuery( + [host, 'runs', runId, 'errorRecoveryPolicy'], + () => + getErrorRecoveryPolicy(host as HostConfig, runId) + .then(response => response.data) + .catch(e => { + throw e + }), + options + ) + + return query +} diff --git a/react-api-client/src/runs/useRunCommandErrors.ts b/react-api-client/src/runs/useRunCommandErrors.ts index 63dd5875636..ef536d438e0 100644 --- a/react-api-client/src/runs/useRunCommandErrors.ts +++ b/react-api-client/src/runs/useRunCommandErrors.ts @@ -9,10 +9,6 @@ import type { } from '@opentrons/api-client' const DEFAULT_PAGE_LENGTH = 30 -export const DEFAULT_PARAMS: GetCommandsParams = { - cursor: null, - pageLength: DEFAULT_PAGE_LENGTH, -} export function useRunCommandErrors( runId: string | null, @@ -20,20 +16,23 @@ export function useRunCommandErrors( options: UseQueryOptions = {} ): UseQueryResult { const host = useHost() - const nullCheckedParams = params ?? DEFAULT_PARAMS - const allOptions: UseQueryOptions = { ...options, enabled: host !== null && runId != null && options.enabled !== false, } - const { cursor, pageLength } = nullCheckedParams + + const { cursor, pageLength } = params ?? {} + const finalizedParams = { + ...params, + pageLength: params?.pageLength ?? DEFAULT_PAGE_LENGTH, + } const query = useQuery( [host, 'runs', runId, 'commandErrors', cursor, pageLength], () => { return getRunCommandErrors( host as HostConfig, runId as string, - nullCheckedParams + finalizedParams ).then(response => response.data) }, allOptions diff --git a/react-api-client/src/runs/useUpdateErrorRecoveryPolicy.ts b/react-api-client/src/runs/useUpdateErrorRecoveryPolicy.ts index 1fa379b1bc5..7e09abc6160 100644 --- a/react-api-client/src/runs/useUpdateErrorRecoveryPolicy.ts +++ b/react-api-client/src/runs/useUpdateErrorRecoveryPolicy.ts @@ -16,7 +16,7 @@ import type { HostConfig, } from '@opentrons/api-client' -export type UseUpdateErrorRecoveryPolicyResponse = UseMutationResult< +export type UseErrorRecoveryPolicyResponse = UseMutationResult< UpdateErrorRecoveryPolicyResponse, AxiosError, RecoveryPolicyRulesParams @@ -37,7 +37,7 @@ export type UseUpdateErrorRecoveryPolicyOptions = UseMutationOptions< export function useUpdateErrorRecoveryPolicy( runId: string, options: UseUpdateErrorRecoveryPolicyOptions = {} -): UseUpdateErrorRecoveryPolicyResponse { +): UseErrorRecoveryPolicyResponse { const host = useHost() const mutation = useMutation< diff --git a/robot-server/pytest.ini b/robot-server/pytest.ini index 211c218295d..51ed89fd0d2 100644 --- a/robot-server/pytest.ini +++ b/robot-server/pytest.ini @@ -4,3 +4,8 @@ markers = ot3_only: Test only functions using the OT3 hardware addopts = --color=yes --strict-markers asyncio_mode = auto + +# Don't allow any new code that uses features removed in SQLAlchemy 2.0. +# We should remove this when we upgrade to SQLAlchemy 2.0. +filterwarnings = + error::sqlalchemy.exc.RemovedIn20Warning diff --git a/robot-server/robot_server/data_files/data_files_store.py b/robot-server/robot_server/data_files/data_files_store.py index 28257dbb8d2..e0ef8fefa44 100644 --- a/robot-server/robot_server/data_files/data_files_store.py +++ b/robot-server/robot_server/data_files/data_files_store.py @@ -15,7 +15,7 @@ analysis_csv_rtp_table, run_csv_rtp_table, ) -from robot_server.persistence.tables.schema_7 import DataFileSourceSQLEnum +from robot_server.persistence.tables import DataFileSourceSQLEnum from .models import DataFileSource, FileIdNotFoundError, FileInUseError diff --git a/robot-server/robot_server/persistence/_migrations/_util.py b/robot-server/robot_server/persistence/_migrations/_util.py index b3c44a96af2..b0fd8074966 100644 --- a/robot-server/robot_server/persistence/_migrations/_util.py +++ b/robot-server/robot_server/persistence/_migrations/_util.py @@ -1,10 +1,11 @@ """Shared helpers for migrations.""" -import sqlalchemy - import shutil +import typing from pathlib import Path +import sqlalchemy + from ..database import sqlite_rowid @@ -54,3 +55,43 @@ def copytree_if_exists(src: Path, dst: Path) -> None: shutil.copytree(src=src, dst=dst) except FileNotFoundError: pass + + +def add_column( + engine: sqlalchemy.engine.Engine, + table_name: str, + column: typing.Any, +) -> None: + """Add a column to an existing SQL table, with an `ALTER TABLE` statement. + + Params: + engine: A SQLAlchemy engine to connect to the database. + table_name: The SQL name of the parent table. + column: The SQLAlchemy column object. + + Known limitations: + + - This does not currently support indexes. + - This does not currently support constraints. + - The column will always be added as nullable. Adding non-nullable columns in + SQLite requires an elaborate and sensitive dance that we do not wish to attempt. + https://www.sqlite.org/lang_altertable.html#making_other_kinds_of_table_schema_changes + + To avoid those limitations, instead of this function, consider this: + + 1. Start with an empty database, or drop or rename the current table. + 2. Use SQLAlchemy's `metadata.create_all()` to create an empty table with the new + schema, including the new column. + 3. Copy rows from the old table to the new one, populating the new column + however you please. + """ + column_type = column.type.compile(engine.dialect) + with engine.begin() as transaction: + # todo(mm, 2024-11-25): This text seems like something that SQLAlchemy could generate for us + # (maybe: https://docs.sqlalchemy.org/en/20/core/metadata.html#sqlalchemy.schema.Column.compile), + # and that might help us account for indexes and constraints. + transaction.execute( + sqlalchemy.text( + f"ALTER TABLE {table_name} ADD COLUMN {column.key} {column_type}" + ) + ) diff --git a/robot-server/robot_server/persistence/_migrations/v3_to_v4.py b/robot-server/robot_server/persistence/_migrations/v3_to_v4.py index f5273f5f678..b69eb0e5b4f 100644 --- a/robot-server/robot_server/persistence/_migrations/v3_to_v4.py +++ b/robot-server/robot_server/persistence/_migrations/v3_to_v4.py @@ -9,10 +9,8 @@ from pathlib import Path from contextlib import ExitStack import shutil -from typing import Any - -import sqlalchemy +from ._util import add_column from ..database import sql_engine_ctx from ..file_and_directory_names import DB_FILE from ..tables import schema_4 @@ -35,16 +33,6 @@ def migrate(self, source_dir: Path, dest_dir: Path) -> None: dest_engine = exit_stack.enter_context(sql_engine_ctx(dest_db_file)) schema_4.metadata.create_all(dest_engine) - def add_column( - engine: sqlalchemy.engine.Engine, - table_name: str, - column: Any, - ) -> None: - column_type = column.type.compile(engine.dialect) - engine.execute( - f"ALTER TABLE {table_name} ADD COLUMN {column.key} {column_type}" - ) - add_column( dest_engine, schema_4.analysis_table.name, diff --git a/robot-server/robot_server/persistence/_migrations/v4_to_v5.py b/robot-server/robot_server/persistence/_migrations/v4_to_v5.py index 788723968b2..44b7f9a12d4 100644 --- a/robot-server/robot_server/persistence/_migrations/v4_to_v5.py +++ b/robot-server/robot_server/persistence/_migrations/v4_to_v5.py @@ -9,10 +9,8 @@ from pathlib import Path from contextlib import ExitStack import shutil -from typing import Any - -import sqlalchemy +from ._util import add_column from ..database import sql_engine_ctx from ..file_and_directory_names import DB_FILE from ..tables import schema_5 @@ -35,16 +33,6 @@ def migrate(self, source_dir: Path, dest_dir: Path) -> None: dest_engine = exit_stack.enter_context(sql_engine_ctx(dest_db_file)) schema_5.metadata.create_all(dest_engine) - def add_column( - engine: sqlalchemy.engine.Engine, - table_name: str, - column: Any, - ) -> None: - column_type = column.type.compile(engine.dialect) - engine.execute( - f"ALTER TABLE {table_name} ADD COLUMN {column.key} {column_type}" - ) - add_column( dest_engine, schema_5.protocol_table.name, diff --git a/robot-server/robot_server/persistence/_migrations/v6_to_v7.py b/robot-server/robot_server/persistence/_migrations/v6_to_v7.py index f2c0f2ad93d..8fe63806b8a 100644 --- a/robot-server/robot_server/persistence/_migrations/v6_to_v7.py +++ b/robot-server/robot_server/persistence/_migrations/v6_to_v7.py @@ -11,12 +11,12 @@ from pathlib import Path from contextlib import ExitStack import shutil -from typing import Any import sqlalchemy +from ._util import add_column from ..database import sql_engine_ctx, sqlite_rowid -from ..tables import DataFileSourceSQLEnum, schema_7 +from ..tables import schema_7 from .._folder_migrator import Migration from ..file_and_directory_names import ( @@ -44,16 +44,6 @@ def migrate(self, source_dir: Path, dest_dir: Path) -> None: dest_transaction = exit_stack.enter_context(dest_engine.begin()) - def add_column( - engine: sqlalchemy.engine.Engine, - table_name: str, - column: Any, - ) -> None: - column_type = column.type.compile(engine.dialect) - engine.execute( - f"ALTER TABLE {table_name} ADD COLUMN {column.key} {column_type}" - ) - add_column( dest_engine, schema_7.run_command_table.name, @@ -102,6 +92,6 @@ def _migrate_data_files_table_with_new_source_col( """Add a new 'source' column to data_files table.""" dest_transaction.execute( sqlalchemy.update(schema_7.data_files_table).values( - {"source": DataFileSourceSQLEnum.UPLOADED} + {"source": schema_7.DataFileSourceSQLEnum.UPLOADED} ) ) diff --git a/robot-server/robot_server/persistence/_migrations/v7_to_v8.py b/robot-server/robot_server/persistence/_migrations/v7_to_v8.py new file mode 100644 index 00000000000..035d92cf045 --- /dev/null +++ b/robot-server/robot_server/persistence/_migrations/v7_to_v8.py @@ -0,0 +1,128 @@ +"""Migrate the persistence directory from schema 7 to 8. + +Summary of changes from schema 7: + +- Adds a new command_error to store the commands error in the commands table +- Adds a new command_status to store the commands status in the commands table +""" + +import json +from pathlib import Path +from contextlib import ExitStack +import shutil + +import sqlalchemy + +from ._util import add_column +from ..database import sql_engine_ctx +from ..tables import schema_8 +from .._folder_migrator import Migration + +from ..file_and_directory_names import ( + DB_FILE, +) +from ..tables.schema_8 import CommandStatusSQLEnum + + +class Migration7to8(Migration): # noqa: D101 + def migrate(self, source_dir: Path, dest_dir: Path) -> None: + """Migrate the persistence directory from schema 6 to 7.""" + # Copy over all existing directories and files to new version + for item in source_dir.iterdir(): + if item.is_dir(): + shutil.copytree(src=item, dst=dest_dir / item.name) + else: + shutil.copy(src=item, dst=dest_dir / item.name) + + dest_db_file = dest_dir / DB_FILE + + with ExitStack() as exit_stack: + dest_engine = exit_stack.enter_context(sql_engine_ctx(dest_db_file)) + + dest_transaction = exit_stack.enter_context(dest_engine.begin()) + + add_column( + dest_engine, + schema_8.run_command_table.name, + schema_8.run_command_table.c.command_error, + ) + + add_column( + dest_engine, + schema_8.run_command_table.name, + schema_8.run_command_table.c.command_status, + ) + + _add_missing_indexes(dest_transaction=dest_transaction) + + _migrate_command_table_with_new_command_error_col_and_command_status( + dest_transaction=dest_transaction + ) + + +def _add_missing_indexes(dest_transaction: sqlalchemy.engine.Connection) -> None: + # todo(2024-11-20): Probably add the indexes missing from prior migrations here. + # https://opentrons.atlassian.net/browse/EXEC-827 + index = next( + index + for index in schema_8.run_command_table.indexes + if index.name == "ix_run_run_id_command_status_index_in_run" + ) + index.create(dest_transaction) + + +def _migrate_command_table_with_new_command_error_col_and_command_status( + dest_transaction: sqlalchemy.engine.Connection, +) -> None: + """Add a new 'command_error' and 'command_status' column to run_command_table table.""" + commands_table = schema_8.run_command_table + select_commands = sqlalchemy.select(commands_table) + commands_to_update = [] + for row in dest_transaction.execute(select_commands).all(): + data = json.loads(row.command) + new_command_error = ( + # Account for old_row.command["error"] being null. + None + if "error" not in row.command or data["error"] is None + else json.dumps(data["error"]) + ) + # parse json as enum + new_command_status = _convert_commands_status_to_sql_command_status( + data["status"] + ) + commands_to_update.append( + { + "_id": row.row_id, + "command_error": new_command_error, + "command_status": new_command_status, + } + ) + + if len(commands_to_update) > 0: + update_commands = ( + sqlalchemy.update(commands_table) + .where(commands_table.c.row_id == sqlalchemy.bindparam("_id")) + .values( + { + "command_error": sqlalchemy.bindparam("command_error"), + "command_status": sqlalchemy.bindparam("command_status"), + } + ) + ) + dest_transaction.execute(update_commands, commands_to_update) + + +def _convert_commands_status_to_sql_command_status( + status: str, +) -> CommandStatusSQLEnum: + match status: + case "queued": + return CommandStatusSQLEnum.QUEUED + case "running": + return CommandStatusSQLEnum.RUNNING + case "failed": + return CommandStatusSQLEnum.FAILED + case "succeeded": + return CommandStatusSQLEnum.SUCCEEDED + case _: + assert False, "command status is unknown" diff --git a/robot-server/robot_server/persistence/file_and_directory_names.py b/robot-server/robot_server/persistence/file_and_directory_names.py index 7074dd6db2f..220a32e7673 100644 --- a/robot-server/robot_server/persistence/file_and_directory_names.py +++ b/robot-server/robot_server/persistence/file_and_directory_names.py @@ -8,7 +8,7 @@ from typing import Final -LATEST_VERSION_DIRECTORY: Final = "7.1" +LATEST_VERSION_DIRECTORY: Final = "8" DECK_CONFIGURATION_FILE: Final = "deck_configuration.json" PROTOCOLS_DIRECTORY: Final = "protocols" diff --git a/robot-server/robot_server/persistence/persistence_directory.py b/robot-server/robot_server/persistence/persistence_directory.py index c6b40ce10ab..1f6a9fb6733 100644 --- a/robot-server/robot_server/persistence/persistence_directory.py +++ b/robot-server/robot_server/persistence/persistence_directory.py @@ -11,7 +11,7 @@ from anyio import Path as AsyncPath, to_thread from ._folder_migrator import MigrationOrchestrator -from ._migrations import up_to_3, v3_to_v4, v4_to_v5, v5_to_v6, v6_to_v7 +from ._migrations import up_to_3, v3_to_v4, v4_to_v5, v5_to_v6, v6_to_v7, v7_to_v8 from .file_and_directory_names import LATEST_VERSION_DIRECTORY _TEMP_PERSISTENCE_DIR_PREFIX: Final = "opentrons-robot-server-" @@ -59,7 +59,8 @@ def make_migration_orchestrator(prepared_root: Path) -> MigrationOrchestrator: # Subdirectory "7" was previously used on our edge branch for an in-dev # schema that was never released to the public. It may be present on # internal robots. - v6_to_v7.Migration6to7(subdirectory=LATEST_VERSION_DIRECTORY), + v6_to_v7.Migration6to7(subdirectory="7.1"), + v7_to_v8.Migration7to8(subdirectory=LATEST_VERSION_DIRECTORY), ], temp_file_prefix="temp-", ) diff --git a/robot-server/robot_server/persistence/tables/__init__.py b/robot-server/robot_server/persistence/tables/__init__.py index fa0129a4ee6..56e149d6dfd 100644 --- a/robot-server/robot_server/persistence/tables/__init__.py +++ b/robot-server/robot_server/persistence/tables/__init__.py @@ -1,7 +1,7 @@ """SQL database schemas.""" # Re-export the latest schema. -from .schema_7 import ( +from .schema_8 import ( metadata, protocol_table, analysis_table, @@ -17,6 +17,7 @@ ProtocolKindSQLEnum, BooleanSettingKey, DataFileSourceSQLEnum, + CommandStatusSQLEnum, ) @@ -36,4 +37,5 @@ "ProtocolKindSQLEnum", "BooleanSettingKey", "DataFileSourceSQLEnum", + "CommandStatusSQLEnum", ] diff --git a/robot-server/robot_server/persistence/tables/schema_8.py b/robot-server/robot_server/persistence/tables/schema_8.py new file mode 100644 index 00000000000..c92dd4645c7 --- /dev/null +++ b/robot-server/robot_server/persistence/tables/schema_8.py @@ -0,0 +1,358 @@ +"""v8 of our SQLite schema.""" +import enum +import sqlalchemy + +from robot_server.persistence._utc_datetime import UTCDateTime + +metadata = sqlalchemy.MetaData() + + +class PrimitiveParamSQLEnum(enum.Enum): + """Enum type to store primitive param type.""" + + INT = "int" + FLOAT = "float" + BOOL = "bool" + STR = "str" + + +class ProtocolKindSQLEnum(enum.Enum): + """What kind a stored protocol is.""" + + STANDARD = "standard" + QUICK_TRANSFER = "quick-transfer" + + +class DataFileSourceSQLEnum(enum.Enum): + """The source this data file is from.""" + + UPLOADED = "uploaded" + GENERATED = "generated" + + +class CommandStatusSQLEnum(enum.Enum): + """Command status sql enum.""" + + QUEUED = "queued" + RUNNING = "running" + SUCCEEDED = "succeeded" + FAILED = "failed" + + +protocol_table = sqlalchemy.Table( + "protocol", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "created_at", + UTCDateTime, + nullable=False, + ), + sqlalchemy.Column("protocol_key", sqlalchemy.String, nullable=True), + sqlalchemy.Column( + "protocol_kind", + sqlalchemy.Enum( + ProtocolKindSQLEnum, + values_callable=lambda obj: [e.value for e in obj], + validate_strings=True, + create_constraint=True, + ), + index=True, + nullable=False, + ), +) + +analysis_table = sqlalchemy.Table( + "analysis", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "protocol_id", + sqlalchemy.String, + sqlalchemy.ForeignKey("protocol.id"), + index=True, + nullable=False, + ), + sqlalchemy.Column( + "analyzer_version", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "completed_analysis", + # Stores a JSON string. See CompletedAnalysisStore. + sqlalchemy.String, + nullable=False, + ), +) + +analysis_primitive_type_rtp_table = sqlalchemy.Table( + "analysis_primitive_rtp_table", + metadata, + sqlalchemy.Column( + "row_id", + sqlalchemy.Integer, + primary_key=True, + ), + sqlalchemy.Column( + "analysis_id", + sqlalchemy.ForeignKey("analysis.id"), + nullable=False, + ), + sqlalchemy.Column( + "parameter_variable_name", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "parameter_type", + sqlalchemy.Enum( + PrimitiveParamSQLEnum, + values_callable=lambda obj: [e.value for e in obj], + create_constraint=True, + # todo(mm, 2024-09-24): Can we add validate_strings=True here? + ), + nullable=False, + ), + sqlalchemy.Column( + "parameter_value", + sqlalchemy.String, + nullable=False, + ), +) + +analysis_csv_rtp_table = sqlalchemy.Table( + "analysis_csv_rtp_table", + metadata, + sqlalchemy.Column( + "row_id", + sqlalchemy.Integer, + primary_key=True, + ), + sqlalchemy.Column( + "analysis_id", + sqlalchemy.ForeignKey("analysis.id"), + nullable=False, + ), + sqlalchemy.Column( + "parameter_variable_name", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "file_id", + sqlalchemy.ForeignKey("data_files.id"), + nullable=True, + ), +) + +run_table = sqlalchemy.Table( + "run", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "created_at", + UTCDateTime, + nullable=False, + ), + sqlalchemy.Column( + "protocol_id", + sqlalchemy.String, + sqlalchemy.ForeignKey("protocol.id"), + nullable=True, + ), + sqlalchemy.Column( + "state_summary", + sqlalchemy.String, + nullable=True, + ), + sqlalchemy.Column("engine_status", sqlalchemy.String, nullable=True), + sqlalchemy.Column("_updated_at", UTCDateTime, nullable=True), + sqlalchemy.Column( + "run_time_parameters", + # Stores a JSON string. See RunStore. + sqlalchemy.String, + nullable=True, + ), +) + +action_table = sqlalchemy.Table( + "action", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column("created_at", UTCDateTime, nullable=False), + sqlalchemy.Column("action_type", sqlalchemy.String, nullable=False), + sqlalchemy.Column( + "run_id", + sqlalchemy.String, + sqlalchemy.ForeignKey("run.id"), + nullable=False, + ), +) + +run_command_table = sqlalchemy.Table( + "run_command", + metadata, + sqlalchemy.Column("row_id", sqlalchemy.Integer, primary_key=True), + sqlalchemy.Column( + "run_id", sqlalchemy.String, sqlalchemy.ForeignKey("run.id"), nullable=False + ), + # command_index in commands enumeration + sqlalchemy.Column("index_in_run", sqlalchemy.Integer, nullable=False), + sqlalchemy.Column("command_id", sqlalchemy.String, nullable=False), + sqlalchemy.Column("command", sqlalchemy.String, nullable=False), + sqlalchemy.Column( + "command_intent", + sqlalchemy.String, + # nullable=True to match the underlying SQL, which is nullable because of a bug + # in the migration that introduced this column. This is not intended to ever be + # null in practice. + nullable=True, + ), + sqlalchemy.Column("command_error", sqlalchemy.String, nullable=True), + sqlalchemy.Column( + "command_status", + sqlalchemy.Enum( + CommandStatusSQLEnum, + values_callable=lambda obj: [e.value for e in obj], + validate_strings=True, + # nullable=True because it was easier for the migration to add the column + # this way. This is not intended to ever be null in practice. + nullable=True, + # todo(mm, 2024-11-20): We want create_constraint=True here. Something + # about the way we compare SQL in test_tables.py is making that difficult-- + # even when we correctly add the constraint in the migration, the SQL + # doesn't compare equal to what create_constraint=True here would emit. + create_constraint=False, + ), + ), + sqlalchemy.Index( + "ix_run_run_id_command_id", # An arbitrary name for the index. + "run_id", + "command_id", + unique=True, + ), + sqlalchemy.Index( + "ix_run_run_id_index_in_run", # An arbitrary name for the index. + "run_id", + "index_in_run", + unique=True, + ), + sqlalchemy.Index( + "ix_run_run_id_command_status_index_in_run", # An arbitrary name for the index. + "run_id", + "command_status", + "index_in_run", + unique=True, + ), +) + +data_files_table = sqlalchemy.Table( + "data_files", + metadata, + sqlalchemy.Column( + "id", + sqlalchemy.String, + primary_key=True, + ), + sqlalchemy.Column( + "name", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "file_hash", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "created_at", + UTCDateTime, + nullable=False, + ), + sqlalchemy.Column( + "source", + sqlalchemy.Enum( + DataFileSourceSQLEnum, + values_callable=lambda obj: [e.value for e in obj], + validate_strings=True, + # create_constraint=False to match the underlying SQL, which omits + # the constraint because of a bug in the migration that introduced this + # column. This is not intended to ever have values other than those in + # DataFileSourceSQLEnum. + create_constraint=False, + ), + # nullable=True to match the underlying SQL, which is nullable because of a bug + # in the migration that introduced this column. This is not intended to ever be + # null in practice. + nullable=True, + ), +) + +run_csv_rtp_table = sqlalchemy.Table( + "run_csv_rtp_table", + metadata, + sqlalchemy.Column( + "row_id", + sqlalchemy.Integer, + primary_key=True, + ), + sqlalchemy.Column( + "run_id", + sqlalchemy.ForeignKey("run.id"), + nullable=False, + ), + sqlalchemy.Column( + "parameter_variable_name", + sqlalchemy.String, + nullable=False, + ), + sqlalchemy.Column( + "file_id", + sqlalchemy.ForeignKey("data_files.id"), + nullable=True, + ), +) + + +class BooleanSettingKey(enum.Enum): + """Keys for boolean settings.""" + + ENABLE_ERROR_RECOVERY = "enable_error_recovery" + + +boolean_setting_table = sqlalchemy.Table( + "boolean_setting", + metadata, + sqlalchemy.Column( + "key", + sqlalchemy.Enum( + BooleanSettingKey, + values_callable=lambda obj: [e.value for e in obj], + validate_strings=True, + create_constraint=True, + ), + primary_key=True, + ), + sqlalchemy.Column( + "value", + sqlalchemy.Boolean, + nullable=False, + ), +) diff --git a/robot-server/robot_server/runs/dependencies.py b/robot-server/robot_server/runs/dependencies.py index 60c96d1e5e6..5f49f04582d 100644 --- a/robot-server/robot_server/runs/dependencies.py +++ b/robot-server/robot_server/runs/dependencies.py @@ -48,6 +48,7 @@ _run_orchestrator_store_accessor = AppStateAccessor[RunOrchestratorStore]( "run_orchestrator_store" ) +_run_data_manager_accessor = AppStateAccessor[RunDataManager]("run_data_manager") _light_control_accessor = AppStateAccessor[LightController]("light_controller") @@ -154,6 +155,7 @@ async def get_is_okay_to_create_maintenance_run( async def get_run_data_manager( + app_state: Annotated[AppState, Depends(get_app_state)], task_runner: Annotated[TaskRunner, Depends(get_task_runner)], run_orchestrator_store: Annotated[ RunOrchestratorStore, Depends(get_run_orchestrator_store) @@ -164,14 +166,20 @@ async def get_run_data_manager( ErrorRecoverySettingStore, Depends(get_error_recovery_setting_store) ], ) -> RunDataManager: - """Get a run data manager to keep track of current/historical run data.""" - return RunDataManager( - run_orchestrator_store=run_orchestrator_store, - run_store=run_store, - error_recovery_setting_store=error_recovery_setting_store, - task_runner=task_runner, - runs_publisher=runs_publisher, - ) + """Get a singleton run data manager to keep track of current/historical run data.""" + run_data_manager = _run_data_manager_accessor.get_from(app_state) + + if run_data_manager is None: + run_data_manager = RunDataManager( + run_orchestrator_store=run_orchestrator_store, + run_store=run_store, + error_recovery_setting_store=error_recovery_setting_store, + task_runner=task_runner, + runs_publisher=runs_publisher, + ) + _run_data_manager_accessor.set_on(app_state, run_data_manager) + + return run_data_manager async def get_run_auto_deleter( diff --git a/robot-server/robot_server/runs/router/__init__.py b/robot-server/robot_server/runs/router/__init__.py index 738d0e05952..d7a1640edb8 100644 --- a/robot-server/robot_server/runs/router/__init__.py +++ b/robot-server/robot_server/runs/router/__init__.py @@ -5,6 +5,7 @@ from .commands_router import commands_router from .actions_router import actions_router from .labware_router import labware_router +from .error_recovery_policy_router import error_recovery_policy_router runs_router = APIRouter() @@ -12,5 +13,6 @@ runs_router.include_router(commands_router) runs_router.include_router(actions_router) runs_router.include_router(labware_router) +runs_router.include_router(error_recovery_policy_router) __all__ = ["runs_router"] diff --git a/robot-server/robot_server/runs/router/base_router.py b/robot-server/robot_server/runs/router/base_router.py index 23153669d41..4fb4be2b401 100644 --- a/robot-server/robot_server/runs/router/base_router.py +++ b/robot-server/robot_server/runs/router/base_router.py @@ -75,7 +75,6 @@ get_run_auto_deleter, get_quick_transfer_run_auto_deleter, ) -from ..error_recovery_models import ErrorRecoveryPolicy from robot_server.deck_configuration.fastapi_dependencies import ( get_deck_configuration_store, @@ -439,47 +438,6 @@ async def update_run( ) -@PydanticResponse.wrap_route( - base_router.put, - path="/runs/{runId}/errorRecoveryPolicy", - summary="Set a run's error recovery policy", - description=dedent( - """ - Update how to handle different kinds of command failures. - - For this to have any effect, error recovery must also be enabled globally. - See `PATCH /errorRecovery/settings`. - """ - ), - responses={ - status.HTTP_200_OK: {"model": SimpleEmptyBody}, - status.HTTP_409_CONFLICT: {"model": ErrorBody[RunStopped]}, - }, -) -async def put_error_recovery_policy( - runId: str, - request_body: RequestModel[ErrorRecoveryPolicy], - run_data_manager: Annotated[RunDataManager, Depends(get_run_data_manager)], -) -> PydanticResponse[SimpleEmptyBody]: - """Create run polices. - - Arguments: - runId: Run ID pulled from URL. - request_body: Request body with run policies data. - run_data_manager: Current and historical run data management. - """ - rules = request_body.data.policyRules - try: - run_data_manager.set_error_recovery_rules(run_id=runId, rules=rules) - except RunNotCurrentError as e: - raise RunStopped(detail=str(e)).as_error(status.HTTP_409_CONFLICT) from e - - return await PydanticResponse.create( - content=SimpleEmptyBody.construct(), - status_code=status.HTTP_200_OK, - ) - - @PydanticResponse.wrap_route( base_router.get, path="/runs/{runId}/commandErrors", @@ -527,14 +485,13 @@ async def get_run_commands_error( run_data_manager: Run data retrieval interface. """ try: - all_errors = run_data_manager.get_command_errors(run_id=runId) - total_length = len(all_errors) + all_errors_count = run_data_manager.get_command_errors_count(run_id=runId) if cursor is None: - if len(all_errors) > 0: + if all_errors_count > 0: # Get the most recent error, # which we can find just at the end of the list. - cursor = total_length - 1 + cursor = all_errors_count - 1 else: cursor = 0 diff --git a/robot-server/robot_server/runs/router/error_recovery_policy_router.py b/robot-server/robot_server/runs/router/error_recovery_policy_router.py new file mode 100644 index 00000000000..4653d564244 --- /dev/null +++ b/robot-server/robot_server/runs/router/error_recovery_policy_router.py @@ -0,0 +1,97 @@ +"""Router for /runs/{runId}/errorRecoveryPolicy endpoints.""" + + +from textwrap import dedent +from typing import Annotated + +from fastapi import status, APIRouter, Depends + +from robot_server.errors.error_responses import ErrorBody +from robot_server.service.json_api.request import RequestModel +from robot_server.service.json_api.response import ( + PydanticResponse, + SimpleBody, + SimpleEmptyBody, +) + +from .base_router import RunStopped +from ..dependencies import get_run_data_manager +from ..run_data_manager import RunDataManager, RunNotCurrentError +from ..error_recovery_models import ErrorRecoveryPolicy + + +error_recovery_policy_router = APIRouter() + + +@PydanticResponse.wrap_route( + error_recovery_policy_router.put, + path="/runs/{runId}/errorRecoveryPolicy", + summary="Set a run's error recovery policy", + description=dedent( + """ + Update how to handle different kinds of command failures. + + For this to have any effect, error recovery must also be enabled globally. + See `PATCH /errorRecovery/settings`. + """ + ), + responses={ + status.HTTP_200_OK: {"model": SimpleEmptyBody}, + status.HTTP_409_CONFLICT: {"model": ErrorBody[RunStopped]}, + }, +) +async def put_error_recovery_policy( + runId: str, + request_body: RequestModel[ErrorRecoveryPolicy], + run_data_manager: Annotated[RunDataManager, Depends(get_run_data_manager)], +) -> PydanticResponse[SimpleEmptyBody]: + """Set a run's error recovery policy. + + Arguments: + runId: Run ID pulled from URL. + request_body: Request body with the new run policy. + run_data_manager: Current and historical run data management. + """ + rules = request_body.data.policyRules + try: + run_data_manager.set_error_recovery_rules(run_id=runId, rules=rules) + except RunNotCurrentError as e: + raise RunStopped(detail=str(e)).as_error(status.HTTP_409_CONFLICT) from e + + return await PydanticResponse.create( + content=SimpleEmptyBody.construct(), + status_code=status.HTTP_200_OK, + ) + + +@PydanticResponse.wrap_route( + error_recovery_policy_router.get, + path="/runs/{runId}/errorRecoveryPolicy", + summary="Get a run's current error recovery policy", + description="See `PUT /runs/{runId}/errorRecoveryPolicy`.", + responses={ + status.HTTP_200_OK: {"model": SimpleBody[ErrorRecoveryPolicy]}, + status.HTTP_409_CONFLICT: {"model": ErrorBody[RunStopped]}, + }, +) +async def get_error_recovery_policy( + runId: str, + run_data_manager: Annotated[RunDataManager, Depends(get_run_data_manager)], +) -> PydanticResponse[SimpleBody[ErrorRecoveryPolicy]]: + """Get a run's current error recovery policy. + + Arguments: + runId: Run ID pulled from URL. + run_data_manager: Current and historical run data management. + """ + try: + rules = run_data_manager.get_error_recovery_rules(run_id=runId) + except RunNotCurrentError as e: + raise RunStopped(detail=str(e)).as_error(status.HTTP_409_CONFLICT) from e + + return await PydanticResponse.create( + content=SimpleBody.construct( + data=ErrorRecoveryPolicy.construct(policyRules=rules) + ), + status_code=status.HTTP_200_OK, + ) diff --git a/robot-server/robot_server/runs/run_data_manager.py b/robot-server/robot_server/runs/run_data_manager.py index cbbcd022eb6..9999e040523 100644 --- a/robot-server/robot_server/runs/run_data_manager.py +++ b/robot-server/robot_server/runs/run_data_manager.py @@ -15,7 +15,6 @@ CommandErrorSlice, CommandPointer, Command, - ErrorOccurrence, ) from opentrons.protocol_engine.types import ( PrimitiveRunTimeParamValuesType, @@ -144,7 +143,7 @@ class PreSerializedCommandsNotAvailableError(LookupError): class RunDataManager: """Collaborator to manage current and historical run data. - Provides a facade to both an EngineStore (current run) and a RunStore + Provides a facade to both a RunOrchestratorStore (current run) and a RunStore (historical runs). Returns `Run` response models to the router. Args: @@ -162,7 +161,14 @@ def __init__( ) -> None: self._run_orchestrator_store = run_orchestrator_store self._run_store = run_store + self._error_recovery_setting_store = error_recovery_setting_store + # todo(mm, 2024-11-22): Storing the list of error recovery rules is outside the + # responsibilities of this class. It's also clunky for us to store it like this + # because we need to remember to clear it whenever the current run changes. + # This error recovery mapping stuff probably belongs in RunOrchestratorStore. + self._current_run_error_recovery_rules: List[ErrorRecoveryRule] = [] + self._task_runner = task_runner self._runs_publisher = runs_publisher @@ -214,9 +220,10 @@ async def create( ) error_recovery_is_enabled = self._error_recovery_setting_store.get_is_enabled() + self._current_run_error_recovery_rules = _INITIAL_ERROR_RECOVERY_RULES initial_error_recovery_policy = ( error_recovery_mapping.create_error_recovery_policy_from_rules( - _INITIAL_ERROR_RECOVERY_RULES, error_recovery_is_enabled + self._current_run_error_recovery_rules, error_recovery_is_enabled ) ) @@ -368,18 +375,16 @@ async def update(self, run_id: str, current: Optional[bool]) -> Union[Run, BadRu next_current = current if current is False else True if next_current is False: - ( - commands, - state_summary, - parameters, - ) = await self._run_orchestrator_store.clear() + run_result = await self._run_orchestrator_store.clear() + state_summary = run_result.state_summary + parameters = run_result.parameters run_resource: Union[ RunResource, BadRunResource ] = self._run_store.update_run_state( run_id=run_id, - summary=state_summary, - commands=commands, - run_time_parameters=parameters, + summary=run_result.state_summary, + commands=run_result.commands, + run_time_parameters=run_result.parameters, ) self._runs_publisher.publish_pre_serialized_commands_notification(run_id) else: @@ -429,7 +434,7 @@ def get_commands_slice( def get_command_error_slice( self, run_id: str, cursor: int, length: int ) -> CommandErrorSlice: - """Get a slice of run commands. + """Get a slice of run commands errors. Args: run_id: ID of the run. @@ -443,9 +448,9 @@ def get_command_error_slice( return self._run_orchestrator_store.get_command_error_slice( cursor=cursor, length=length ) - - # TODO(tz, 8-5-2024): Change this to return to error list from the DB when we implement https://opentrons.atlassian.net/browse/EXEC-655. - raise RunNotCurrentError() + return self._run_store.get_commands_errors_slice( + run_id=run_id, cursor=cursor, length=length + ) def get_current_command(self, run_id: str) -> Optional[CommandPointer]: """Get the "current" command, if any. @@ -504,13 +509,11 @@ def get_command(self, run_id: str, command_id: str) -> Command: return self._run_store.get_command(run_id=run_id, command_id=command_id) - def get_command_errors(self, run_id: str) -> list[ErrorOccurrence]: + def get_command_errors_count(self, run_id: str) -> int: """Get all command errors.""" if run_id == self._run_orchestrator_store.current_run_id: - return self._run_orchestrator_store.get_command_errors() - - # TODO(tz, 8-5-2024): Change this to return the error list from the DB when we implement https://opentrons.atlassian.net/browse/EXEC-655. - raise RunNotCurrentError() + return len(self._run_orchestrator_store.get_command_errors()) + return self._run_store.get_command_errors_count(run_id) def get_nozzle_maps(self, run_id: str) -> Mapping[str, NozzleMapInterface]: """Get current nozzle maps keyed by pipette id.""" @@ -544,7 +547,7 @@ def get_all_commands_as_preserialized_list( def set_error_recovery_rules( self, run_id: str, rules: List[ErrorRecoveryRule] ) -> None: - """Set the run's error recovery policy. + """Set the run's error recovery policy, in robot-server terms. The input rules get combined with the global error recovery enabled/disabled setting, which this method retrieves automatically. @@ -554,11 +557,20 @@ def set_error_recovery_rules( f"Cannot update {run_id} because it is not the current run." ) is_enabled = self._error_recovery_setting_store.get_is_enabled() + self._current_run_error_recovery_rules = rules mapped_policy = error_recovery_mapping.create_error_recovery_policy_from_rules( - rules, is_enabled + self._current_run_error_recovery_rules, is_enabled ) self._run_orchestrator_store.set_error_recovery_policy(policy=mapped_policy) + def get_error_recovery_rules(self, run_id: str) -> List[ErrorRecoveryRule]: + """Get the run's error recovery policy.""" + if run_id != self._run_orchestrator_store.current_run_id: + raise RunNotCurrentError( + f"Cannot get the error recovery policy of {run_id} because it is not the current run." + ) + return self._current_run_error_recovery_rules + def _get_state_summary(self, run_id: str) -> Union[StateSummary, BadStateSummary]: if run_id == self._run_orchestrator_store.current_run_id: return self._run_orchestrator_store.get_state_summary() diff --git a/robot-server/robot_server/runs/run_orchestrator_store.py b/robot-server/robot_server/runs/run_orchestrator_store.py index b4bd757150e..a8ad429db4a 100644 --- a/robot-server/robot_server/runs/run_orchestrator_store.py +++ b/robot-server/robot_server/runs/run_orchestrator_store.py @@ -293,7 +293,9 @@ async def clear(self) -> RunResult: self._run_orchestrator = None return RunResult( - state_summary=run_data, commands=commands, parameters=run_time_parameters + state_summary=run_data, + commands=commands, + parameters=run_time_parameters, ) # todo(mm, 2024-11-15): Are all of these pass-through methods helpful? diff --git a/robot-server/robot_server/runs/run_store.py b/robot-server/robot_server/runs/run_store.py index 6ab8665c454..0148f20058b 100644 --- a/robot-server/robot_server/runs/run_store.py +++ b/robot-server/robot_server/runs/run_store.py @@ -11,7 +11,14 @@ from pydantic import ValidationError from opentrons.util.helpers import utc_now -from opentrons.protocol_engine import StateSummary, CommandSlice, CommandIntent +from opentrons.protocol_engine import ( + StateSummary, + CommandSlice, + CommandIntent, + ErrorOccurrence, + CommandErrorSlice, + CommandStatus, +) from opentrons.protocol_engine.commands import Command from opentrons.protocol_engine.types import RunTimeParameter @@ -38,6 +45,7 @@ from .action_models import RunAction, RunActionType from .run_models import RunNotFoundError +from ..persistence.tables import CommandStatusSQLEnum log = logging.getLogger(__name__) @@ -179,6 +187,12 @@ def update_run_state( "command_intent": str(command.intent.value) if command.intent else CommandIntent.PROTOCOL, + "command_error": pydantic_to_json(command.error) + if command.error + else None, + "command_status": _convert_commands_status_to_sql_command_status( + command.status + ), }, ) @@ -537,6 +551,107 @@ def get_all_commands_as_preserialized_list( commands_result = transaction.scalars(select_commands).all() return commands_result + def get_command_errors_count(self, run_id: str) -> int: + """Get run commands errors count from the store. + + Args: + run_id: Run ID to pull commands from. + + Returns: + The number of commands errors. + + Raises: + RunNotFoundError: The given run ID was not found. + """ + with self._sql_engine.begin() as transaction: + if not self._run_exists(run_id, transaction): + raise RunNotFoundError(run_id=run_id) + + select_count = sqlalchemy.select(sqlalchemy.func.count()).where( + and_( + run_command_table.c.run_id == run_id, + run_command_table.c.command_status == CommandStatusSQLEnum.FAILED, + ) + ) + errors_count: int = transaction.execute(select_count).scalar_one() + return errors_count + + def get_commands_errors_slice( + self, + run_id: str, + length: int, + cursor: Optional[int], + ) -> CommandErrorSlice: + """Get a slice of run commands errors from the store. + + Args: + run_id: Run ID to pull commands from. + length: Number of commands to return. + cursor: The starting index of the slice in the whole collection. + If `None`, up to `length` elements at the end of the collection will + be returned. + + Returns: + A collection of command errors as well as the actual cursor used and + the total length of the collection. + + Raises: + RunNotFoundError: The given run ID was not found. + """ + with self._sql_engine.begin() as transaction: + if not self._run_exists(run_id, transaction): + raise RunNotFoundError(run_id=run_id) + + select_count = sqlalchemy.select(sqlalchemy.func.count()).where( + and_( + run_command_table.c.run_id == run_id, + run_command_table.c.command_status == CommandStatusSQLEnum.FAILED, + ) + ) + count_result: int = transaction.execute(select_count).scalar_one() + + actual_cursor = cursor if cursor is not None else count_result - length + # Clamp to [0, count_result). + # cursor is 0 based index and row number starts from 1. + actual_cursor = max(0, min(actual_cursor, count_result - 1)) + 1 + select_command_errors = ( + sqlalchemy.select( + sqlalchemy.func.row_number().over().label("row_num"), + run_command_table, + ) + .where( + and_( + run_command_table.c.run_id == run_id, + run_command_table.c.command_status + == CommandStatusSQLEnum.FAILED, + ) + ) + .order_by(run_command_table.c.index_in_run) + .subquery() + ) + + select_slice = ( + sqlalchemy.select(select_command_errors.c.command_error) + .where( + and_( + select_command_errors.c.row_num >= actual_cursor, + select_command_errors.c.row_num < actual_cursor + length, + ) + ) + .order_by(select_command_errors.c.index_in_run) + ) + slice_result = transaction.execute(select_slice).all() + + sliced_commands: List[ErrorOccurrence] = [ + json_to_pydantic(ErrorOccurrence, row.command_error) for row in slice_result + ] + + return CommandErrorSlice( + cursor=actual_cursor, + total_length=count_result, + commands_errors=sliced_commands, + ) + @lru_cache(maxsize=_CACHE_ENTRIES) def get_command(self, run_id: str, command_id: str) -> Command: """Get run command by id. @@ -712,3 +827,17 @@ def _convert_state_to_sql_values( "_updated_at": utc_now(), "run_time_parameters": pydantic_list_to_json(run_time_parameters), } + + +def _convert_commands_status_to_sql_command_status( + status: CommandStatus, +) -> CommandStatusSQLEnum: + match status: + case CommandStatus.QUEUED: + return CommandStatusSQLEnum.QUEUED + case CommandStatus.RUNNING: + return CommandStatusSQLEnum.RUNNING + case CommandStatus.FAILED: + return CommandStatusSQLEnum.FAILED + case CommandStatus.SUCCEEDED: + return CommandStatusSQLEnum.SUCCEEDED diff --git a/robot-server/tests/persistence/test_tables.py b/robot-server/tests/persistence/test_tables.py index 642d2506e93..6363ed8f47f 100644 --- a/robot-server/tests/persistence/test_tables.py +++ b/robot-server/tests/persistence/test_tables.py @@ -18,6 +18,7 @@ schema_5, schema_6, schema_7, + schema_8, ) # The statements that we expect to emit when we create a fresh database. @@ -110,6 +111,8 @@ command_id VARCHAR NOT NULL, command VARCHAR NOT NULL, command_intent VARCHAR, + command_error VARCHAR, + command_status VARCHAR(9), PRIMARY KEY (row_id), FOREIGN KEY(run_id) REFERENCES run (id) ) @@ -121,6 +124,9 @@ CREATE UNIQUE INDEX ix_run_run_id_index_in_run ON run_command (run_id, index_in_run) """, """ + CREATE UNIQUE INDEX ix_run_run_id_command_status_index_in_run ON run_command (run_id, command_status, index_in_run) + """, + """ CREATE INDEX ix_protocol_protocol_kind ON protocol (protocol_kind) """, """ @@ -155,7 +161,130 @@ ] -EXPECTED_STATEMENTS_V7 = EXPECTED_STATEMENTS_LATEST +EXPECTED_STATEMENTS_V8 = EXPECTED_STATEMENTS_LATEST + + +EXPECTED_STATEMENTS_V7 = [ + """ + CREATE TABLE protocol ( + id VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + protocol_key VARCHAR, + protocol_kind VARCHAR(14) NOT NULL, + PRIMARY KEY (id), + CONSTRAINT protocolkindsqlenum CHECK (protocol_kind IN ('standard', 'quick-transfer')) + ) + """, + """ + CREATE TABLE analysis ( + id VARCHAR NOT NULL, + protocol_id VARCHAR NOT NULL, + analyzer_version VARCHAR NOT NULL, + completed_analysis VARCHAR NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(protocol_id) REFERENCES protocol (id) + ) + """, + """ + CREATE TABLE analysis_primitive_rtp_table ( + row_id INTEGER NOT NULL, + analysis_id VARCHAR NOT NULL, + parameter_variable_name VARCHAR NOT NULL, + parameter_type VARCHAR(5) NOT NULL, + parameter_value VARCHAR NOT NULL, + PRIMARY KEY (row_id), + FOREIGN KEY(analysis_id) REFERENCES analysis (id), + CONSTRAINT primitiveparamsqlenum CHECK (parameter_type IN ('int', 'float', 'bool', 'str')) + ) + """, + """ + CREATE TABLE analysis_csv_rtp_table ( + row_id INTEGER NOT NULL, + analysis_id VARCHAR NOT NULL, + parameter_variable_name VARCHAR NOT NULL, + file_id VARCHAR, + PRIMARY KEY (row_id), + FOREIGN KEY(analysis_id) REFERENCES analysis (id), + FOREIGN KEY(file_id) REFERENCES data_files (id) + ) + """, + """ + CREATE INDEX ix_analysis_protocol_id ON analysis (protocol_id) + """, + """ + CREATE TABLE run ( + id VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + protocol_id VARCHAR, + state_summary VARCHAR, + engine_status VARCHAR, + _updated_at DATETIME, + run_time_parameters VARCHAR, + PRIMARY KEY (id), + FOREIGN KEY(protocol_id) REFERENCES protocol (id) + ) + """, + """ + CREATE TABLE action ( + id VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + action_type VARCHAR NOT NULL, + run_id VARCHAR NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(run_id) REFERENCES run (id) + ) + """, + """ + CREATE TABLE run_command ( + row_id INTEGER NOT NULL, + run_id VARCHAR NOT NULL, + index_in_run INTEGER NOT NULL, + command_id VARCHAR NOT NULL, + command VARCHAR NOT NULL, + command_intent VARCHAR, + PRIMARY KEY (row_id), + FOREIGN KEY(run_id) REFERENCES run (id) + ) + """, + """ + CREATE UNIQUE INDEX ix_run_run_id_command_id ON run_command (run_id, command_id) + """, + """ + CREATE UNIQUE INDEX ix_run_run_id_index_in_run ON run_command (run_id, index_in_run) + """, + """ + CREATE INDEX ix_protocol_protocol_kind ON protocol (protocol_kind) + """, + """ + CREATE TABLE data_files ( + id VARCHAR NOT NULL, + name VARCHAR NOT NULL, + file_hash VARCHAR NOT NULL, + created_at DATETIME NOT NULL, + source VARCHAR(9), + PRIMARY KEY (id) + ) + """, + """ + CREATE TABLE run_csv_rtp_table ( + row_id INTEGER NOT NULL, + run_id VARCHAR NOT NULL, + parameter_variable_name VARCHAR NOT NULL, + file_id VARCHAR, + PRIMARY KEY (row_id), + FOREIGN KEY(run_id) REFERENCES run (id), + FOREIGN KEY(file_id) REFERENCES data_files (id) + ) + """, + """ + CREATE TABLE boolean_setting ( + "key" VARCHAR(21) NOT NULL, + value BOOLEAN NOT NULL, + PRIMARY KEY ("key"), + CONSTRAINT booleansettingkey CHECK ("key" IN ('enable_error_recovery')) + ) + """, +] EXPECTED_STATEMENTS_V6 = [ @@ -554,6 +683,7 @@ def _normalize_statement(statement: str) -> str: ("metadata", "expected_statements"), [ (latest_metadata, EXPECTED_STATEMENTS_LATEST), + (schema_8.metadata, EXPECTED_STATEMENTS_V8), (schema_7.metadata, EXPECTED_STATEMENTS_V7), (schema_6.metadata, EXPECTED_STATEMENTS_V6), (schema_5.metadata, EXPECTED_STATEMENTS_V5), diff --git a/robot-server/tests/runs/router/test_base_router.py b/robot-server/tests/runs/router/test_base_router.py index aa27d37e66b..ee9b291cc4a 100644 --- a/robot-server/tests/runs/router/test_base_router.py +++ b/robot-server/tests/runs/router/test_base_router.py @@ -25,7 +25,6 @@ from robot_server.data_files.models import DataFileSource from robot_server.errors.error_responses import ApiError -from robot_server.runs.error_recovery_models import ErrorRecoveryPolicy from robot_server.service.json_api import ( RequestModel, SimpleBody, @@ -67,7 +66,6 @@ get_runs, remove_run, update_run, - put_error_recovery_policy, get_run_commands_error, get_current_state, CurrentStateLinks, @@ -709,44 +707,6 @@ async def test_update_to_current_missing( assert exc_info.value.content["errors"][0]["id"] == "RunNotFound" -async def test_create_policies( - decoy: Decoy, mock_run_data_manager: RunDataManager -) -> None: - """It should call RunDataManager create run policies.""" - policies = decoy.mock(cls=ErrorRecoveryPolicy) - await put_error_recovery_policy( - runId="rud-id", - request_body=RequestModel(data=policies), - run_data_manager=mock_run_data_manager, - ) - decoy.verify( - mock_run_data_manager.set_error_recovery_rules( - run_id="rud-id", rules=policies.policyRules - ) - ) - - -async def test_create_policies_raises_not_active_run( - decoy: Decoy, mock_run_data_manager: RunDataManager -) -> None: - """It should raise that the run is not current.""" - policies = decoy.mock(cls=ErrorRecoveryPolicy) - decoy.when( - mock_run_data_manager.set_error_recovery_rules( - run_id="rud-id", rules=policies.policyRules - ) - ).then_raise(RunNotCurrentError()) - with pytest.raises(ApiError) as exc_info: - await put_error_recovery_policy( - runId="rud-id", - request_body=RequestModel(data=policies), - run_data_manager=mock_run_data_manager, - ) - - assert exc_info.value.status_code == 409 - assert exc_info.value.content["errors"][0]["id"] == "RunStopped" - - async def test_get_run_commands_errors( decoy: Decoy, mock_run_data_manager: RunDataManager ) -> None: @@ -759,13 +719,7 @@ async def test_get_run_commands_errors( ) ).then_raise(RunNotCurrentError("oh no!")) - error = pe_errors.ErrorOccurrence( - id="error-id", - errorType="PrettyBadError", - createdAt=datetime(year=2024, month=4, day=4), - detail="Things are not looking good.", - ) - decoy.when(mock_run_data_manager.get_command_errors("run-id")).then_return([error]) + decoy.when(mock_run_data_manager.get_command_errors_count("run-id")).then_return(1) with pytest.raises(ApiError): result = await get_run_commands_error( @@ -787,7 +741,7 @@ async def test_get_run_commands_errors_raises_no_run( createdAt=datetime(year=2024, month=4, day=4), detail="Things are not looking good.", ) - decoy.when(mock_run_data_manager.get_command_errors("run-id")).then_return([error]) + decoy.when(mock_run_data_manager.get_command_errors_count("run-id")).then_return(1) command_error_slice = CommandErrorSlice( cursor=1, total_length=3, commands_errors=[error] @@ -831,10 +785,7 @@ async def test_get_run_commands_errors_defualt_cursor( expected_cursor_result: int, ) -> None: """It should return a list of all commands errors in a run.""" - print(error_list) - decoy.when(mock_run_data_manager.get_command_errors("run-id")).then_return( - error_list - ) + decoy.when(mock_run_data_manager.get_command_errors_count("run-id")).then_return(1) command_error_slice = CommandErrorSlice( cursor=expected_cursor_result, total_length=3, commands_errors=error_list diff --git a/robot-server/tests/runs/router/test_error_recovery_policy_router.py b/robot-server/tests/runs/router/test_error_recovery_policy_router.py new file mode 100644 index 00000000000..efdf4d15f53 --- /dev/null +++ b/robot-server/tests/runs/router/test_error_recovery_policy_router.py @@ -0,0 +1,90 @@ +# noqa: D100 + +import pytest +from decoy import Decoy + +from robot_server.errors.error_responses import ApiError +from robot_server.runs import error_recovery_models as er_models +from robot_server.runs.router.error_recovery_policy_router import ( + get_error_recovery_policy, + put_error_recovery_policy, +) +from robot_server.runs.run_data_manager import RunDataManager, RunNotCurrentError +from robot_server.service.json_api.request import RequestModel + + +async def test_put(decoy: Decoy, mock_run_data_manager: RunDataManager) -> None: + """It should call RunDataManager create run policies.""" + policies = decoy.mock(cls=er_models.ErrorRecoveryPolicy) + await put_error_recovery_policy( + runId="run-id", + request_body=RequestModel(data=policies), + run_data_manager=mock_run_data_manager, + ) + decoy.verify( + mock_run_data_manager.set_error_recovery_rules( + run_id="run-id", rules=policies.policyRules + ) + ) + + +async def test_put_raises_not_active_run( + decoy: Decoy, mock_run_data_manager: RunDataManager +) -> None: + """It should raise that the run is not current.""" + policies = decoy.mock(cls=er_models.ErrorRecoveryPolicy) + decoy.when( + mock_run_data_manager.set_error_recovery_rules( + run_id="run-id", rules=policies.policyRules + ) + ).then_raise(RunNotCurrentError()) + with pytest.raises(ApiError) as exc_info: + await put_error_recovery_policy( + runId="run-id", + request_body=RequestModel(data=policies), + run_data_manager=mock_run_data_manager, + ) + + assert exc_info.value.status_code == 409 + assert exc_info.value.content["errors"][0]["id"] == "RunStopped" + + +async def test_get(decoy: Decoy, mock_run_data_manager: RunDataManager) -> None: + """It should call RunDataManager create run policies.""" + rule = er_models.ErrorRecoveryRule( + matchCriteria=er_models.MatchCriteria( + command=er_models.CommandMatcher( + commandType="commandType", + error=er_models.ErrorMatcher(errorType="errorType"), + ) + ), + ifMatch=er_models.ReactionIfMatch.WAIT_FOR_RECOVERY, + ) + + decoy.when(mock_run_data_manager.get_error_recovery_rules("run-id")).then_return( + [rule] + ) + result = await get_error_recovery_policy( + runId="run-id", + run_data_manager=mock_run_data_manager, + ) + assert result.status_code == 200 + assert result.content.data == er_models.ErrorRecoveryPolicy(policyRules=[rule]) + + +async def test_get_raises_not_active_run( + decoy: Decoy, mock_run_data_manager: RunDataManager +) -> None: + """It should raise that the run is not current.""" + decoy.when( + mock_run_data_manager.get_error_recovery_rules(run_id="run-id") + ).then_raise(RunNotCurrentError()) + + with pytest.raises(ApiError) as exc_info: + await get_error_recovery_policy( + runId="run-id", + run_data_manager=mock_run_data_manager, + ) + + assert exc_info.value.status_code == 409 + assert exc_info.value.content["errors"][0]["id"] == "RunStopped" diff --git a/robot-server/tests/runs/test_run_data_manager.py b/robot-server/tests/runs/test_run_data_manager.py index d27e1aebaff..a78a7585c52 100644 --- a/robot-server/tests/runs/test_run_data_manager.py +++ b/robot-server/tests/runs/test_run_data_manager.py @@ -935,16 +935,30 @@ def test_get_commands_slice_current_run( assert expected_command_slice == result -def test_get_commands_errors_slice__not_current_run_raises( +def test_get_commands_errors_slice_historical_run( decoy: Decoy, subject: RunDataManager, mock_run_orchestrator_store: RunOrchestratorStore, + mock_run_store: RunStore, ) -> None: """Should get a sliced command error list from engine store.""" + expected_commands_errors_result = [ + ErrorOccurrence.construct(id="error-id") # type: ignore[call-arg] + ] + + command_error_slice = CommandErrorSlice( + cursor=1, total_length=3, commands_errors=expected_commands_errors_result + ) + decoy.when(mock_run_orchestrator_store.current_run_id).then_return("run-not-id") - with pytest.raises(RunNotCurrentError): - subject.get_command_error_slice("run-id", 1, 2) + decoy.when(mock_run_store.get_commands_errors_slice("run-id", 2, 1)).then_return( + command_error_slice + ) + + result = subject.get_command_error_slice("run-id", 1, 2) + + assert command_error_slice == result def test_get_commands_errors_slice_current_run( @@ -1240,7 +1254,7 @@ async def test_get_current_run_labware_definition( ] -async def test_create_policies_raises_run_not_current( +async def test_set_error_recovery_rules_raises_run_not_current( decoy: Decoy, mock_run_orchestrator_store: RunOrchestratorStore, subject: RunDataManager, @@ -1255,7 +1269,7 @@ async def test_create_policies_raises_run_not_current( ) -async def test_create_policies_translates_and_calls_orchestrator( +async def test_set_error_recovery_rules_translates_and_calls_orchestrator( decoy: Decoy, mock_run_orchestrator_store: RunOrchestratorStore, mock_error_recovery_setting_store: ErrorRecoverySettingStore, @@ -1282,6 +1296,34 @@ async def test_create_policies_translates_and_calls_orchestrator( ) +async def test_get_error_recovery_rules( + decoy: Decoy, + mock_run_orchestrator_store: RunOrchestratorStore, + subject: RunDataManager, +) -> None: + """It should return the current run's previously-set list of error recovery rules.""" + # Before there has been any current run, it should raise an exception. + with pytest.raises(RunNotCurrentError): + subject.get_error_recovery_rules(run_id="whatever") + + # While there is a current run, it should return its list of rules. + decoy.when(mock_run_orchestrator_store.current_run_id).then_return( + sentinel.current_run_id + ) + subject.set_error_recovery_rules( + run_id=sentinel.current_run_id, rules=sentinel.input_rules + ) + assert ( + subject.get_error_recovery_rules(run_id=sentinel.current_run_id) + == sentinel.input_rules + ) + + # When the run stops being current, it should go back to raising. + decoy.when(mock_run_orchestrator_store.current_run_id).then_return(None) + with pytest.raises(RunNotCurrentError): + subject.get_error_recovery_rules(run_id="whatever") + + def test_get_nozzle_map_current_run( decoy: Decoy, mock_run_orchestrator_store: RunOrchestratorStore, diff --git a/robot-server/tests/runs/test_run_store.py b/robot-server/tests/runs/test_run_store.py index 17a5c3b252f..ab8e5f10fdf 100644 --- a/robot-server/tests/runs/test_run_store.py +++ b/robot-server/tests/runs/test_run_store.py @@ -36,6 +36,7 @@ CommandSlice, Liquid, EngineStatus, + ErrorOccurrence, ) from opentrons.types import MountType, DeckSlotName @@ -59,7 +60,7 @@ def subject( @pytest.fixture def protocol_commands() -> List[pe_commands.Command]: - """Get a StateSummary value object.""" + """Get protocol commands list.""" return [ pe_commands.WaitForResume( id="pause-1", @@ -99,6 +100,61 @@ def protocol_commands() -> List[pe_commands.Command]: ] +@pytest.fixture +def protocol_commands_errors() -> List[pe_commands.Command]: + """Get protocol commands errors list.""" + return [ + pe_commands.WaitForResume( + id="pause-4", + key="command-key", + status=pe_commands.CommandStatus.SUCCEEDED, + createdAt=datetime(year=2022, month=2, day=2), + params=pe_commands.WaitForResumeParams(message="hey world"), + result=pe_commands.WaitForResumeResult(), + intent=pe_commands.CommandIntent.PROTOCOL, + ), + pe_commands.WaitForResume( + id="pause-1", + key="command-key", + status=pe_commands.CommandStatus.FAILED, + createdAt=datetime(year=2021, month=1, day=1), + params=pe_commands.WaitForResumeParams(message="hello world"), + result=pe_commands.WaitForResumeResult(), + intent=pe_commands.CommandIntent.PROTOCOL, + error=ErrorOccurrence.construct( + id="error-id", + createdAt=datetime(2024, 1, 1), + errorType="blah-blah", + detail="test details", + ), + ), + pe_commands.WaitForResume( + id="pause-2", + key="command-key", + status=pe_commands.CommandStatus.FAILED, + createdAt=datetime(year=2022, month=2, day=2), + params=pe_commands.WaitForResumeParams(message="hey world"), + result=pe_commands.WaitForResumeResult(), + intent=pe_commands.CommandIntent.PROTOCOL, + error=ErrorOccurrence.construct( + id="error-id-2", + createdAt=datetime(2024, 1, 1), + errorType="blah-blah", + detail="test details", + ), + ), + pe_commands.WaitForResume( + id="pause-3", + key="command-key", + status=pe_commands.CommandStatus.SUCCEEDED, + createdAt=datetime(year=2022, month=2, day=2), + params=pe_commands.WaitForResumeParams(message="hey world"), + result=pe_commands.WaitForResumeResult(), + intent=pe_commands.CommandIntent.PROTOCOL, + ), + ] + + @pytest.fixture def state_summary() -> StateSummary: """Get a StateSummary test object.""" @@ -289,6 +345,50 @@ async def test_update_run_state( ) +async def test_update_run_state_command_with_errors( + subject: RunStore, + state_summary: StateSummary, + protocol_commands_errors: List[pe_commands.Command], + run_time_parameters: List[pe_types.RunTimeParameter], + mock_runs_publisher: mock.Mock, +) -> None: + """It should be able to update a run state to the store.""" + commands_with_errors = [ + command + for command in protocol_commands_errors + if command.status == pe_commands.CommandStatus.FAILED + ] + action = RunAction( + actionType=RunActionType.PLAY, + createdAt=datetime(year=2022, month=2, day=2, tzinfo=timezone.utc), + id="action-id", + ) + + subject.insert( + run_id="run-id", + protocol_id=None, + created_at=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc), + ) + + subject.update_run_state( + run_id="run-id", + summary=state_summary, + commands=protocol_commands_errors, + run_time_parameters=run_time_parameters, + ) + + subject.insert_action(run_id="run-id", action=action) + command_errors_result = subject.get_commands_errors_slice( + run_id="run-id", + length=5, + cursor=0, + ) + + assert command_errors_result.commands_errors == [ + item.error for item in commands_with_errors + ] + + async def test_insert_and_get_csv_rtp( subject: RunStore, data_files_store: DataFilesStore, diff --git a/shared-data/command/schemas/11.json b/shared-data/command/schemas/11.json index f7b89b96f70..df6ffad33c7 100644 --- a/shared-data/command/schemas/11.json +++ b/shared-data/command/schemas/11.json @@ -82,7 +82,9 @@ "unsafe/placeLabware": "#/definitions/UnsafePlaceLabwareCreate", "robot/moveAxesRelative": "#/definitions/MoveAxesRelativeCreate", "robot/moveAxesTo": "#/definitions/MoveAxesToCreate", - "robot/moveTo": "#/definitions/MoveToCreate" + "robot/moveTo": "#/definitions/MoveToCreate", + "robot/openGripperJaw": "#/definitions/openGripperJawCreate", + "robot/closeGripperJaw": "#/definitions/closeGripperJawCreate" } }, "oneOf": [ @@ -319,6 +321,12 @@ }, { "$ref": "#/definitions/MoveToCreate" + }, + { + "$ref": "#/definitions/openGripperJawCreate" + }, + { + "$ref": "#/definitions/closeGripperJawCreate" } ], "definitions": { @@ -6216,6 +6224,84 @@ } }, "required": ["params"] + }, + "openGripperJawParams": { + "title": "openGripperJawParams", + "description": "Payload required to release a gripper.", + "type": "object", + "properties": {} + }, + "openGripperJawCreate": { + "title": "openGripperJawCreate", + "description": "openGripperJaw command request model.", + "type": "object", + "properties": { + "commandType": { + "title": "Commandtype", + "default": "robot/openGripperJaw", + "enum": ["robot/openGripperJaw"], + "type": "string" + }, + "params": { + "$ref": "#/definitions/openGripperJawParams" + }, + "intent": { + "description": "The reason the command was added. If not specified or `protocol`, the command will be treated as part of the protocol run itself, and added to the end of the existing command queue.\n\nIf `setup`, the command will be treated as part of run setup. A setup command may only be enqueued if the run has not started.\n\nUse setup commands for activities like pre-run calibration checks and module setup, like pre-heating.", + "allOf": [ + { + "$ref": "#/definitions/CommandIntent" + } + ] + }, + "key": { + "title": "Key", + "description": "A key value, unique in this run, that can be used to track the same logical command across multiple runs of the same protocol. If a value is not provided, one will be generated.", + "type": "string" + } + }, + "required": ["params"] + }, + "closeGripperJawParams": { + "title": "closeGripperJawParams", + "description": "Payload required to close a gripper.", + "type": "object", + "properties": { + "force": { + "title": "Force", + "description": "The force the gripper should use to hold the jaws, falls to default if none is provided.", + "type": "number" + } + } + }, + "closeGripperJawCreate": { + "title": "closeGripperJawCreate", + "description": "closeGripperJaw command request model.", + "type": "object", + "properties": { + "commandType": { + "title": "Commandtype", + "default": "robot/closeGripperJaw", + "enum": ["robot/closeGripperJaw"], + "type": "string" + }, + "params": { + "$ref": "#/definitions/closeGripperJawParams" + }, + "intent": { + "description": "The reason the command was added. If not specified or `protocol`, the command will be treated as part of the protocol run itself, and added to the end of the existing command queue.\n\nIf `setup`, the command will be treated as part of run setup. A setup command may only be enqueued if the run has not started.\n\nUse setup commands for activities like pre-run calibration checks and module setup, like pre-heating.", + "allOf": [ + { + "$ref": "#/definitions/CommandIntent" + } + ] + }, + "key": { + "title": "Key", + "description": "A key value, unique in this run, that can be used to track the same logical command across multiple runs of the same protocol. If a value is not provided, one will be generated.", + "type": "string" + } + }, + "required": ["params"] } }, "$id": "opentronsCommandSchemaV11", diff --git a/shared-data/python/opentrons_shared_data/pipette/model_constants.py b/shared-data/python/opentrons_shared_data/pipette/model_constants.py index 7d34e0e5f6a..daf9b233e52 100644 --- a/shared-data/python/opentrons_shared_data/pipette/model_constants.py +++ b/shared-data/python/opentrons_shared_data/pipette/model_constants.py @@ -20,6 +20,7 @@ PipetteGenerationType.FLEX: { PipetteChannelType.SINGLE_CHANNEL: RobotMountConfigs(2133.33, 230.15, 80), PipetteChannelType.EIGHT_CHANNEL: RobotMountConfigs(2133.33, 230.15, 80), + PipetteChannelType.EIGHT_CHANNEL_EM: RobotMountConfigs(2133.33, 230.15, 80), PipetteChannelType.NINETY_SIX_CHANNEL: RobotMountConfigs(2133.33, 230.15, 80), }, } diff --git a/shared-data/python/opentrons_shared_data/pipette/mutable_configurations.py b/shared-data/python/opentrons_shared_data/pipette/mutable_configurations.py index 06b31215b65..7e1beb5dd35 100644 --- a/shared-data/python/opentrons_shared_data/pipette/mutable_configurations.py +++ b/shared-data/python/opentrons_shared_data/pipette/mutable_configurations.py @@ -41,7 +41,7 @@ LIQUID_CLASS = LiquidClasses.default -def _edit_non_quirk( +def _edit_non_quirk( # noqa: C901 mutable_config_key: str, new_mutable_value: MutableConfig, base_dict: Dict[str, Any] ) -> None: def _do_edit_non_quirk( @@ -58,6 +58,9 @@ def _do_edit_non_quirk( elif thiskey == "##EACHTIPTYPE##": for key in existing.keys(): _do_edit_non_quirk(new_value, existing[key], restkeys) + elif thiskey == "##EACHTIP##": + for key in existing.keys(): + _do_edit_non_quirk(new_value, existing[key], restkeys) else: _do_edit_non_quirk(new_value, existing[thiskey], restkeys) else: diff --git a/shared-data/python/opentrons_shared_data/pipette/pipette_load_name_conversions.py b/shared-data/python/opentrons_shared_data/pipette/pipette_load_name_conversions.py index f5113cff9e7..865862bfec2 100644 --- a/shared-data/python/opentrons_shared_data/pipette/pipette_load_name_conversions.py +++ b/shared-data/python/opentrons_shared_data/pipette/pipette_load_name_conversions.py @@ -81,6 +81,8 @@ def channels_from_string(channels: str) -> PipetteChannelType: if channels == "96": return PipetteChannelType.NINETY_SIX_CHANNEL elif "multi" in channels: + if "em" in channels: + return PipetteChannelType.EIGHT_CHANNEL_EM return PipetteChannelType.EIGHT_CHANNEL elif channels == "single": return PipetteChannelType.SINGLE_CHANNEL @@ -115,6 +117,8 @@ def get_channel_from_pipette_name(pipette_name_tuple: Tuple[str, ...]) -> str: elif "96" in pipette_name_tuple: return "ninety_six_channel" else: + if "em" in pipette_name_tuple: + return "eight_channel_em" return "eight_channel" @@ -154,7 +158,6 @@ def version_from_generation(pipette_name_tuple: Tuple[str, ...]) -> PipetteVersi ) model_from_pipette_name = pipette_name_tuple[0] channel_from_pipette_name = get_channel_from_pipette_name(pipette_name_tuple) - paths_to_validate = ( get_shared_data_root() / "pipette" / "definitions" / "2" / "general" ) @@ -246,7 +249,12 @@ def convert_pipette_name( """ split_pipette_name = name.split("_") - channels = channels_from_string(split_pipette_name[1]) + channels_type = split_pipette_name[1] + if len(split_pipette_name) > 2: + if split_pipette_name[2] == "em": + channels_type = "multi_em" + + channels = channels_from_string(channels_type) if provided_version: version = version_from_string(provided_version) else: diff --git a/shared-data/python/opentrons_shared_data/pipette/types.py b/shared-data/python/opentrons_shared_data/pipette/types.py index c52e57eb20e..685dae89957 100644 --- a/shared-data/python/opentrons_shared_data/pipette/types.py +++ b/shared-data/python/opentrons_shared_data/pipette/types.py @@ -49,6 +49,7 @@ def check_and_return_type( class PipetteChannelType(int, enum.Enum): SINGLE_CHANNEL = 1 EIGHT_CHANNEL = 8 + EIGHT_CHANNEL_EM = 82 NINETY_SIX_CHANNEL = 96 def __str__(self) -> str: @@ -56,6 +57,8 @@ def __str__(self) -> str: return "96" elif self.value == 8: return "multi" + elif self.value == 82: + return "multi_em" else: return "single" diff --git a/shared-data/python/tests/pipette/test_mutable_configurations.py b/shared-data/python/tests/pipette/test_mutable_configurations.py index 40059f6bfe6..38920c473e8 100644 --- a/shared-data/python/tests/pipette/test_mutable_configurations.py +++ b/shared-data/python/tests/pipette/test_mutable_configurations.py @@ -1,4 +1,5 @@ import json +import logging import os from pathlib import Path from typing import Dict, Any, cast, Union, Generator @@ -68,9 +69,8 @@ def test_load_old_overrides_regression( "type": "float", "default": 0.1, } - json.dump( - TMPFILE_DATA, open(override_configuration_path / "P20SV222021040709.json", "w") - ) + with open(override_configuration_path / "P20SV222021040709.json", "w") as f: + json.dump(TMPFILE_DATA, f) configs = mutable_configurations.load_with_mutable_configurations( pipette_definition.PipetteModelVersionType( pipette_type=types.PipetteModelType.p20, @@ -305,3 +305,159 @@ def test_build_mutable_config_using_old_units() -> None: assert ( types.MutableConfig.build(**old_units_config, name="dropTipSpeed") is not None # type: ignore ) + + +@pytest.mark.parametrize( + ("filename", "type", "channels", "version", "file_contents"), + # From https://opentrons.atlassian.net/browse/RQA-3676. + # These could probably be pared down. + [ + ( + "P20MV202020121412.json", + types.PipetteModelType.p20, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 0), + '{"model": "p20_multi_v2.0"}', + ), + ( + "P3HSV1318071638.json", + types.PipetteModelType.p300, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(1, 3), + '{"dropTipShake": true, "model": "p300_single_v1.3", "quirks": {"dropTipShake": true}, "top": {"value": 30.0, "default": 19.5, "units": "mm", "type": "float", "min": -20, "max": 30}, "pickUpPresses": {"value": 3.0, "default": 3, "units": "presses", "type": "int", "min": 0, "max": 15}}', + ), + ( + "P3HMV212021040004.json", + types.PipetteModelType.p300, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 1), + '{"needsUnstick": true, "model": "p300_multi_v2.1"}', + ), + ( + "P20SV202020032604.json", + types.PipetteModelType.p20, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 0), + '{"model": "p20_single_v2.0"}', + ), + ( + "P1KSV202019072441.json", + types.PipetteModelType.p1000, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 0), + '{"pickupTipShake": true, "model": "p1000_single_v2.0", "quirks": {"pickupTipShake": true}}', + ), + ( + "P3HMV202021011105.json", + types.PipetteModelType.p300, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 0), + '{"needsUnstick": true, "model": "p300_multi_v2.0"}', + ), + ( + "P20SV202019072527.json", + types.PipetteModelType.p20, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 0), + '{"model": "p20_single_v2.0"}', + ), + ( + "P3HSV202021042602.json", + types.PipetteModelType.p300, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 0), + '{"model": "p300_single_v2.0"}', + ), + ( + "P20SV222021030914.json", + types.PipetteModelType.p20, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 2), + '{"model": "p20_single_v2.2", "quirks": {}, "pickUpPresses": {"value": 3.0, "default": 1, "units": "presses", "type": "int", "min": 0, "max": 15}}', + ), + ( + "P3HMV202020040801.json", + types.PipetteModelType.p300, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 0), + '{"needsUnstick": true, "model": "p300_multi_v2.0", "quirks": {"needsUnstick": true}, "top": {"value": 20.0, "default": 19.5, "units": "mm", "type": "float", "min": -20, "max": 30}, "bottom": {"value": -14.0, "default": -14.5, "units": "mm", "type": "float", "min": -20, "max": 30}, "blowout": {"value": -18.5, "default": -19.0, "units": "mm", "type": "float", "min": -20, "max": 30}, "dropTip": {"value": -20.0, "default": -33.4, "units": "mm", "type": "float", "min": -20, "max": 30}, "pickUpCurrent": {"value": 0.9, "default": 0.8, "units": "amps", "type": "float", "min": 0.1, "max": 2.0}, "pickUpDistance": {"value": 10.0, "default": 11.0, "units": "mm", "type": "float", "min": 0, "max": 10}, "pickUpIncrement": {"value": 0.1, "default": 0.0, "units": "mm", "type": "int", "min": 0, "max": 10}, "pickUpPresses": {"value": 4.0, "default": 1, "units": "presses", "type": "int", "min": 0, "max": 15}, "pickUpSpeed": {"value": 11.0, "default": 10.0, "units": "mm/s", "type": "float", "min": 0.01, "max": 30}, "plungerCurrent": {"value": 1.1, "default": 1.0, "units": "amps", "type": "float", "min": 0.1, "max": 2.0}, "dropTipCurrent": {"value": 1.22, "default": 1.25, "units": "amps", "type": "float", "min": 0.1, "max": 2.0}, "dropTipSpeed": {"value": 8.0, "default": 7.5, "units": "mm/s", "type": "float", "min": 0.01, "max": 30}, "tipLength": {"value": 52.0, "default": 51.0, "units": "mm", "type": "float", "min": 0, "max": 100}}', + ), + ( + "P3HMV212021040002.json", + types.PipetteModelType.p300, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 1), + '{"needsUnstick": true, "model": "p300_multi_v2.1"}', + ), + ( + "P3HMV1318072625.json", + types.PipetteModelType.p300, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(1, 3), + '{"dropTipShake": true, "model": "p300_multi_v1.3", "quirks": {"dropTipShake": true}, "pickUpPresses": {"value": 4.0, "default": 3, "units": "presses", "type": "int", "min": 0, "max": 15}}', + ), + ( + "P50MV1519091757.json", + types.PipetteModelType.p50, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(1, 5), + '{"dropTipShake": true, "doubleDropTip": true, "model": "p50_multi_v1.5", "quirks": {"doubleDropTip": true, "dropTipShake": true}}', + ), + ( + "P3HSV202019072224.json", + types.PipetteModelType.p300, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 0), + '{"model": "p300_single_v2.0", "quirks": {}}', + ), + ( + "P20MV202019112708.json", + types.PipetteModelType.p20, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 0), + '{"model": "p20_multi_v2.0", "quirks": {}}', + ), + ( + "P3HSV202021031503.json", + types.PipetteModelType.p300, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 1), + '{"model": "p300_single_v2.1"}', + ), + ( + "P1KSV202020060206.json", + types.PipetteModelType.p1000, + types.PipetteChannelType.SINGLE_CHANNEL, + types.PipetteVersionType(2, 0), + '{"pickupTipShake": true, "model": "p1000_single_v2.0"}', + ), + ( + "P3HMV202021010906.json", + types.PipetteModelType.p300, + types.PipetteChannelType.EIGHT_CHANNEL, + types.PipetteVersionType(2, 0), + '{"needsUnstick": true, "model": "p300_multi_v2.0"}', + ), + ], +) +def test_loading_does_not_log_warnings( + filename: str, + type: types.PipetteModelType, + channels: types.PipetteChannelType, + version: types.PipetteVersionType, + file_contents: str, + caplog: pytest.LogCaptureFixture, + override_configuration_path: Path, +) -> None: + """Make sure load_with_mutable_configurations() doesn't log any exceptions. + + load_with_mutable_configurations() suppresses and logs internal exceptions to + protect its caller, but those are still bugs, and we still want tests to catch them. + """ + model = pipette_definition.PipetteModelVersionType(type, channels, version) + (override_configuration_path / filename).write_text(file_contents) + with caplog.at_level(logging.WARNING): + mutable_configurations.load_with_mutable_configurations( + model, override_configuration_path, Path(filename).stem + ) + assert len(caplog.records) == 0