From cde88cbc35700de1beeffe7b6ed961d6a6ae2ed3 Mon Sep 17 00:00:00 2001 From: rclarke0 Date: Tue, 23 Jul 2024 17:27:33 -0400 Subject: [PATCH 1/3] increase google sheet columns --- abr-testing/abr_testing/automation/google_sheets_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/abr-testing/abr_testing/automation/google_sheets_tool.py b/abr-testing/abr_testing/automation/google_sheets_tool.py index 3a6590fa03b..78cbc17abf6 100644 --- a/abr-testing/abr_testing/automation/google_sheets_tool.py +++ b/abr-testing/abr_testing/automation/google_sheets_tool.py @@ -57,7 +57,7 @@ def open_worksheet(self, tab_number: int) -> Any: def create_worksheet(self, title: str) -> Optional[str]: """Create a worksheet with tab name. Existing spreadsheet needed.""" try: - new_sheet = self.spread_sheet.add_worksheet(title, rows="2500", cols="40") + new_sheet = self.spread_sheet.add_worksheet(title, rows="2500", cols="50") return new_sheet.id except gspread.exceptions.APIError: print("Sheet already exists.") From 31c2e60913d24b5256b086dfbaf561daff548aad Mon Sep 17 00:00:00 2001 From: rclarke0 Date: Thu, 19 Sep 2024 09:51:49 -0400 Subject: [PATCH 2/3] feat(abr-testing): plate reader data collection --- abr-testing/Pipfile | 2 +- abr-testing/Pipfile.lock | 115 +++++++++-------- .../automation/google_sheets_tool.py | 4 +- .../data_collection/abr_google_drive.py | 13 +- .../data_collection/get_run_logs.py | 2 +- .../data_collection/read_robot_logs.py | 53 +++++++- abr-testing/abr_testing/tools/plate_reader.py | 117 ++++++++++++++++++ 7 files changed, 243 insertions(+), 63 deletions(-) create mode 100644 abr-testing/abr_testing/tools/plate_reader.py diff --git a/abr-testing/Pipfile b/abr-testing/Pipfile index c046e523a69..4bcece1c630 100644 --- a/abr-testing/Pipfile +++ b/abr-testing/Pipfile @@ -18,7 +18,7 @@ slackclient = "*" slack-sdk = "*" pandas = "*" pandas-stubs = "*" - +numpy = "*" [dev-packages] atomicwrites = "==1.4.1" diff --git a/abr-testing/Pipfile.lock b/abr-testing/Pipfile.lock index 05e3c72eeda..08da1926e92 100644 --- a/abr-testing/Pipfile.lock +++ b/abr-testing/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "b82b82f6cc192a520ccaa5f2c94a2dda16993ef31ebd9e140f85f80b6a96cc6a" + "sha256": "f2a4a8a95be01ccb8425c8069a3dc8e3f932c1f9b36f5b12c838ee9cfc26015e" }, "pipfile-spec": 6, "requires": { @@ -394,12 +394,12 @@ }, "google-api-python-client": { "hashes": [ - "sha256:8b84dde11aaccadc127e4846f5cd932331d804ea324e353131595e3f25376e97", - "sha256:d74da1358f3f2d63daf3c6f26bd96d89652051183bc87cf10a56ceb2a70beb50" + "sha256:41f671be10fa077ee5143ee9f0903c14006d39dc644564f4e044ae96b380bf68", + "sha256:b1e62c9889c5ef6022f11d30d7ef23dc55100300f0e8aaf8aa09e8e92540acad" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==2.145.0" + "version": "==2.146.0" }, "google-auth": { "hashes": [ @@ -456,11 +456,11 @@ }, "idna": { "hashes": [ - "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac", - "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603" + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" ], "markers": "python_version >= '3.6'", - "version": "==3.8" + "version": "==3.10" }, "jsonschema": { "hashes": [ @@ -607,6 +607,7 @@ "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3", "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f" ], + "index": "pypi", "markers": "python_version >= '3.9'", "version": "==1.26.4" }, @@ -709,23 +710,24 @@ }, "protobuf": { "hashes": [ - "sha256:018db9056b9d75eb93d12a9d35120f97a84d9a919bcab11ed56ad2d399d6e8dd", - "sha256:510ed78cd0980f6d3218099e874714cdf0d8a95582e7b059b06cabad855ed0a0", - "sha256:532627e8fdd825cf8767a2d2b94d77e874d5ddb0adefb04b237f7cc296748681", - "sha256:6206afcb2d90181ae8722798dcb56dc76675ab67458ac24c0dd7d75d632ac9bd", - "sha256:66c3edeedb774a3508ae70d87b3a19786445fe9a068dd3585e0cefa8a77b83d0", - "sha256:6d7cc9e60f976cf3e873acb9a40fed04afb5d224608ed5c1a105db4a3f09c5b6", - "sha256:853db610214e77ee817ecf0514e0d1d052dff7f63a0c157aa6eabae98db8a8de", - "sha256:d001a73c8bc2bf5b5c1360d59dd7573744e163b3607fa92788b7f3d5fefbd9a5", - "sha256:dde74af0fa774fa98892209992295adbfb91da3fa98c8f67a88afe8f5a349add", - "sha256:dde9fcaa24e7a9654f4baf2a55250b13a5ea701493d904c54069776b99a8216b", - "sha256:eef7a8a2f4318e2cb2dee8666d26e58eaf437c14788f3a2911d0c3da40405ae8" + "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132", + "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f", + "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece", + "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0", + "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f", + "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0", + "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276", + "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7", + "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3", + "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36", + "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d" ], "markers": "python_version >= '3.8'", - "version": "==5.28.0" + "version": "==5.28.2" }, "pyasn1": { "hashes": [ + "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034" ], "markers": "python_version >= '3.8'", @@ -733,6 +735,7 @@ }, "pyasn1-modules": { "hashes": [ + "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c" ], "markers": "python_version >= '3.8'", @@ -917,11 +920,11 @@ }, "setuptools": { "hashes": [ - "sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308", - "sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6" + "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2", + "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538" ], "markers": "python_version >= '3.8'", - "version": "==74.1.2" + "version": "==75.1.0" }, "six": { "hashes": [ @@ -933,12 +936,12 @@ }, "slack-sdk": { "hashes": [ - "sha256:af8fc4ef1d1cbcecd28d01acf6955a3bb5b13d56f0a43a1b1c7e3b212cc5ec5b", - "sha256:f35e85f2847e6c25cf7c2d1df206ca0ad75556263fb592457bf03cca68ef64bb" + "sha256:070eb1fb355c149a5f80fa0be6eeb5f5588e4ddff4dd76acf060454435cb037e", + "sha256:853bb55154115d080cae342c4099f2ccb559a78ae8d0f5109b49842401a920fa" ], "index": "pypi", "markers": "python_version >= '3.6'", - "version": "==3.32.0" + "version": "==3.33.0" }, "slackclient": { "hashes": [ @@ -975,11 +978,11 @@ }, "types-pytz": { "hashes": [ - "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981", - "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659" + "sha256:4433b5df4a6fc587bbed41716d86a5ba5d832b4378e506f40d34bc9c81df2c24", + "sha256:a1eebf57ebc6e127a99d2fa2ba0a88d2b173784ef9b3defcc2004ab6855a44df" ], "markers": "python_version >= '3.8'", - "version": "==2024.1.0.20240417" + "version": "==2024.2.0.20240913" }, "typing-extensions": { "hashes": [ @@ -1007,11 +1010,11 @@ }, "urllib3": { "hashes": [ - "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", - "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" ], "markers": "python_version >= '3.8'", - "version": "==2.2.2" + "version": "==2.2.3" }, "wrapt": { "hashes": [ @@ -1485,12 +1488,12 @@ }, "google-api-python-client": { "hashes": [ - "sha256:8b84dde11aaccadc127e4846f5cd932331d804ea324e353131595e3f25376e97", - "sha256:d74da1358f3f2d63daf3c6f26bd96d89652051183bc87cf10a56ceb2a70beb50" + "sha256:41f671be10fa077ee5143ee9f0903c14006d39dc644564f4e044ae96b380bf68", + "sha256:b1e62c9889c5ef6022f11d30d7ef23dc55100300f0e8aaf8aa09e8e92540acad" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==2.145.0" + "version": "==2.146.0" }, "google-api-python-client-stubs": { "hashes": [ @@ -1535,11 +1538,11 @@ }, "idna": { "hashes": [ - "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac", - "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603" + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" ], "markers": "python_version >= '3.6'", - "version": "==3.8" + "version": "==3.10" }, "iniconfig": { "hashes": [ @@ -1616,11 +1619,11 @@ }, "platformdirs": { "hashes": [ - "sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c", - "sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617" + "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", + "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb" ], "markers": "python_version >= '3.8'", - "version": "==4.3.2" + "version": "==4.3.6" }, "pluggy": { "hashes": [ @@ -1640,20 +1643,20 @@ }, "protobuf": { "hashes": [ - "sha256:018db9056b9d75eb93d12a9d35120f97a84d9a919bcab11ed56ad2d399d6e8dd", - "sha256:510ed78cd0980f6d3218099e874714cdf0d8a95582e7b059b06cabad855ed0a0", - "sha256:532627e8fdd825cf8767a2d2b94d77e874d5ddb0adefb04b237f7cc296748681", - "sha256:6206afcb2d90181ae8722798dcb56dc76675ab67458ac24c0dd7d75d632ac9bd", - "sha256:66c3edeedb774a3508ae70d87b3a19786445fe9a068dd3585e0cefa8a77b83d0", - "sha256:6d7cc9e60f976cf3e873acb9a40fed04afb5d224608ed5c1a105db4a3f09c5b6", - "sha256:853db610214e77ee817ecf0514e0d1d052dff7f63a0c157aa6eabae98db8a8de", - "sha256:d001a73c8bc2bf5b5c1360d59dd7573744e163b3607fa92788b7f3d5fefbd9a5", - "sha256:dde74af0fa774fa98892209992295adbfb91da3fa98c8f67a88afe8f5a349add", - "sha256:dde9fcaa24e7a9654f4baf2a55250b13a5ea701493d904c54069776b99a8216b", - "sha256:eef7a8a2f4318e2cb2dee8666d26e58eaf437c14788f3a2911d0c3da40405ae8" + "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132", + "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f", + "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece", + "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0", + "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f", + "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0", + "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276", + "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7", + "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3", + "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36", + "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d" ], "markers": "python_version >= '3.8'", - "version": "==5.28.0" + "version": "==5.28.2" }, "py": { "hashes": [ @@ -1665,6 +1668,7 @@ }, "pyasn1": { "hashes": [ + "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034" ], "markers": "python_version >= '3.8'", @@ -1672,6 +1676,7 @@ }, "pyasn1-modules": { "hashes": [ + "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c" ], "markers": "python_version >= '3.8'", @@ -1793,11 +1798,11 @@ }, "urllib3": { "hashes": [ - "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", - "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" ], "markers": "python_version >= '3.8'", - "version": "==2.2.2" + "version": "==2.2.3" } } } diff --git a/abr-testing/abr_testing/automation/google_sheets_tool.py b/abr-testing/abr_testing/automation/google_sheets_tool.py index c150eb93f5e..beeb141dc9a 100644 --- a/abr-testing/abr_testing/automation/google_sheets_tool.py +++ b/abr-testing/abr_testing/automation/google_sheets_tool.py @@ -225,8 +225,8 @@ def get_sheet_by_name(self, title: str) -> None: def token_check(self) -> None: """Check if credentials are still valid and refresh if expired.""" - if self.credentials.expired: - self.credentials.refresh() # Refresh the credentials + if self.credentials.access_token_expired: + self.gc.login() # Refresh the credentials def get_row_index_with_value(self, some_string: str, col_num: int) -> Any: """Find row index of string by looking in specific column.""" diff --git a/abr-testing/abr_testing/data_collection/abr_google_drive.py b/abr-testing/abr_testing/data_collection/abr_google_drive.py index f1fe6ad0c8f..d0d281c5ecc 100644 --- a/abr-testing/abr_testing/data_collection/abr_google_drive.py +++ b/abr-testing/abr_testing/data_collection/abr_google_drive.py @@ -7,8 +7,7 @@ from abr_testing.data_collection import read_robot_logs from typing import Set, Dict, Any, Tuple, List, Union from abr_testing.automation import google_drive_tool, google_sheets_tool -from abr_testing.tools import sync_abr_sheet - +from abr_testing.tools import sync_abr_sheet, plate_reader def get_modules(file_results: Dict[str, str]) -> Dict[str, Any]: """Get module IPs and models from run log.""" @@ -17,6 +16,7 @@ def get_modules(file_results: Dict[str, str]) -> Dict[str, Any]: "temperatureModuleV2", "magneticBlockV1", "thermocyclerModuleV2", + "absorbanceReaderV1" ) all_modules = {key: "" for key in modList} for module in file_results.get("modules", []): @@ -35,6 +35,7 @@ def create_data_dictionary( issue_url: str, plate: str, accuracy: Any, + hellma_plate_standards: List[Dict[str, Any]] ) -> Tuple[List[List[Any]], List[str], List[List[Any]], List[str]]: """Pull data from run files and format into a dictionary.""" runs_and_robots: List[Any] = [] @@ -113,6 +114,7 @@ def create_data_dictionary( hs_dict = read_robot_logs.hs_commands(file_results) tm_dict = read_robot_logs.temperature_module_commands(file_results) pipette_dict = read_robot_logs.instrument_commands(file_results) + plate_reader_dict = read_robot_logs.plate_reader_commands(file_results, hellma_plate_standards) notes = {"Note1": "", "Jira Link": issue_url} plate_measure = { "Plate Measured": plate, @@ -132,6 +134,7 @@ def create_data_dictionary( **hs_dict, **tm_dict, **tc_dict, + **plate_reader_dict, **pipette_dict, **plate_measure, } @@ -181,6 +184,8 @@ def create_data_dictionary( storage_directory = args.storage_directory[0] google_sheet_name = args.google_sheet_name[0] email = args.email[0] + + try: credentials_path = os.path.join(storage_directory, "credentials.json") except FileNotFoundError: @@ -203,13 +208,15 @@ def create_data_dictionary( missing_runs_from_gs = read_robot_logs.get_unseen_run_ids( run_ids_on_gd, run_ids_on_gs ) + # Read Hellma Files + file_values = plate_reader.read_hellma_plate_files(storage_directory, 101934) # Add missing runs to google sheet ( transposed_runs_and_robots, headers, transposed_runs_and_lpc, headers_lpc, - ) = create_data_dictionary(missing_runs_from_gs, storage_directory, "", "", "") + ) = create_data_dictionary(missing_runs_from_gs, storage_directory, "", "", "", hellma_plate_standards=file_values) start_row = google_sheet.get_index_row() + 1 print(start_row) google_sheet.batch_update_cells(transposed_runs_and_robots, "A", start_row, "0") diff --git a/abr-testing/abr_testing/data_collection/get_run_logs.py b/abr-testing/abr_testing/data_collection/get_run_logs.py index cc36acd5760..c3b66d2e19a 100644 --- a/abr-testing/abr_testing/data_collection/get_run_logs.py +++ b/abr-testing/abr_testing/data_collection/get_run_logs.py @@ -47,7 +47,7 @@ def get_run_data(one_run: Any, ip: str) -> Dict[str, Any]: params={"cursor": cursor, "pageLength": page_length}, ) command_data = response.json() - commands.extend(command_data["data"]) + commands.extend(command_data.get("data","")) run["commands"] = commands response = requests.get( f"http://{ip}:31950/runs/{one_run}", headers={"opentrons-version": "3"} diff --git a/abr-testing/abr_testing/data_collection/read_robot_logs.py b/abr-testing/abr_testing/data_collection/read_robot_logs.py index 96182609c49..55561a0b9c7 100644 --- a/abr-testing/abr_testing/data_collection/read_robot_logs.py +++ b/abr-testing/abr_testing/data_collection/read_robot_logs.py @@ -14,6 +14,7 @@ import json import requests import sys +from abr_testing.tools import plate_reader def lpc_data( @@ -167,7 +168,57 @@ def instrument_commands(file_results: Dict[str, Any]) -> Dict[str, float]: } return pipette_dict - +def plate_reader_commands(file_results: Dict[str,Any], hellma_plate_standards: List[Dict[str, Any]])-> Dict[str, float]: + """Plate Reader Command Counts.""" + commandData = file_results.get("commands", "") + move_lid_count: int = 0 + initialize_count: int = 0 + read = "no" + final_result = {} + # Count Number of Reads + read_count, avg_read_time = count_command_in_run_data( + commandData, "absorbanceReader/read", True + ) + # Count Number of Initializations + initialize_count, avg_initialize_time = count_command_in_run_data(commandData, "absorbanceReader/initialize", True) + # Count Number of Lid Movements + for command in commandData: + commandType = command["commandType"] + if commandType == "absorbanceReader/openLid" or commandType == "absorbanceReader/closeLid": + move_lid_count +=1 + elif commandType == "absorbanceReader/read": + read = "yes" + elif read == "yes" and commandType == "comment": + result = command["params"].get("message", "") + wavelength = result.split('result: {')[1].split(":")[0] + wavelength_str = wavelength + ": " + rest_of_string = result.split(wavelength_str)[1][:-1] + result_dict = eval(rest_of_string) + result_ndarray = plate_reader.convert_read_dictionary_to_array(result_dict) + for item in hellma_plate_standards: + wavelength_of_interest = item["wavelength"] + if str(wavelength) == str(wavelength_of_interest): + error_cells = plate_reader.check_byonoy_data_accuracy(result_ndarray, item, False) + if len(error_cells[0]) > 0: + percent = (96 - len(error_cells))/96 * 100 + for cell in error_cells: + print("FAIL: Cell " + str(cell) + " out of accuracy spec.") + else: + percent = 100 + print("PASS: All cells at this wavelength and orientation meet accuracy specification") + final_result[wavelength] = percent + input("###########################") + read = "no" + plate_dict = { + "Plate Reader # of Reads": read_count, + "Plate Reader Avg Read Time (sec)": avg_read_time, + "Plate Reader # of Initializations": initialize_count, + "Plate Reader Avg Initialize Time (sec)": avg_initialize_time, + "Plate Reader # of Lid Movements": move_lid_count, + "Plate Reader Result": final_result + } + return plate_dict + def hs_commands(file_results: Dict[str, Any]) -> Dict[str, float]: """Gets total latch engagements, homes, rotations and total on time (sec) for heater shaker.""" # TODO: modify for cases that have more than 1 heater shaker. diff --git a/abr-testing/abr_testing/tools/plate_reader.py b/abr-testing/abr_testing/tools/plate_reader.py new file mode 100644 index 00000000000..15283de7922 --- /dev/null +++ b/abr-testing/abr_testing/tools/plate_reader.py @@ -0,0 +1,117 @@ +"""Plate Reader Functions""" +import argparse +import os +import numpy as np +from typing import Dict, Any + +def convert_read_dictionary_to_array(read_data:Dict[str,Any])-> np.ndarray: + """Convert a dictionary of read results to an array + + Converts a dictionary of OD values, as formatted by the Opentrons API's + plate reader read() function, to a 2D numpy.array of shape (8,12) for + further processing. + + read_data: dict + a dictonary of read values with celll numbers for keys, e.g. 'A1' + """ + data = np.empty((8,12)) + for key, value in read_data.items(): + row_index = ord(key[0]) - ord('A') + column_index = int(key[1:]) - 1 + data[row_index][column_index] = value + + return data + +def check_byonoy_data_accuracy(output: Dict[str, Any], cal, flipped:bool): + """Check multiple OD measurements for accuracy + + od_list: list of 2D numpy.array of shape (8,12) + a list of multiple plate readings as returned by read_byonoy_directory_to_list() + cal: namedtuple + 2D numpy.array of shape (8,12) of calibration values, and 1D + numpy.array of tolerances, as returned by read_byonoy_file_to_array + flipped: bool + True if reference plate was rotated 180 degrees for measurment + """ + print("entered analysis") + run_error_cells = [] + cal_data = cal["data"] + cal_tolerance = cal["tolerance"] + # Calculate absolute accuracy tolerances for each cell + # The last two columns have a higher tolerance per the Byonoy datasheet + # because OD>2.0 and wavelength>=450nm on the Hellma plate + accuracy_tolerances = np.zeros((8,12)) + accuracy_tolerances[:,:10] = cal_data[:,:10]*0.01 + cal_tolerance[:10] + 0.01 + accuracy_tolerances[:,10:] = cal_data[:,10:]*0.015 + cal_tolerance[10:] + 0.01 + if (flipped): + within_tolerance = np.isclose(output, np.rot90(cal_data, 2), atol=np.rot90(accuracy_tolerances, 2)) + else: + within_tolerance = np.isclose(output, cal_data, atol=accuracy_tolerances) + #print(within_tolerance) + errors = np.where(within_tolerance==False) + error_cells = [(chr(ord('@')+errors[0][i]+1) + str(errors[1][i]+1)) for i in range(0, len(errors[0]))] + run_error_cells.append(error_cells) + return run_error_cells + +def read_byonoy_file_to_array(filename)-> Dict[str, Any]: + """Read a Byonoy endpoint CSV file into a numpy array + + Returns a named tuple with a 2D numpy array of shape (8,12) of OD values + from a Byonoy endpoint CSV file and a 1D numpy array of tolerances (which + are present in the reference plate calibration data files). + + filename: str + absolute path and filename of the CSV file to be read + """ + wavelength = filename.split("nm.csv")[0].split("_")[-1] + with open(filename, 'r') as f: + #print(filename) + + f.seek(0) + file_data = np.genfromtxt(f, usecols=range(1,13), skip_header=1, max_rows=8, delimiter = ',') + #print(file_data.shape, file_data) + + f.seek(0) + file_tolerance = np.genfromtxt(f, usecols=range(1,13), skip_header=9, max_rows=1, delimiter = ',') + #print(file_tolerance.shape, file_tolerance) + + File_Values = {'wavelength': wavelength,'data': file_data, 'tolerance': file_tolerance} + return File_Values + +def read_hellma_plate_files(storage_directory: str, hellma_plate_number: int)-> None: + wavelengths = [405, 450, 490, 650] + file_values = [] + for wave in wavelengths: + file_name = "_".join(["hellma", str(hellma_plate_number), str(wave)]) + file_name_csv = file_name + "nm.csv" + try: + file_path = os.path.join(storage_directory, file_name_csv) + File_Values = read_byonoy_file_to_array(file_path) + file_values.append(File_Values) + except FileNotFoundError: + print(f"Hellma plate {hellma_plate_number} file at {wave} does not exist in this folder.") + continue + return file_values + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Read storage directory with hellma plate files.") + parser.add_argument( + "storage_directory", + metavar="STORAGE_DIRECTORY", + type=str, + nargs=1, + help="Path to storage directory for hellma plate files.", + ) + parser.add_argument( + "hellma_plate_number", + metavar="HELLMA_PLATE_NUMBER", + type=int, + nargs=1, + help="Hellma Plate Number.", + ) + args = parser.parse_args() + storage_directory = args.storage_directory[0] + hellma_plate_number = args.hellma_plate_number[0] + read_hellma_plate_files() \ No newline at end of file From fb7082809c490a6b8670005061f7ce94743bb895 Mon Sep 17 00:00:00 2001 From: rclarke0 Date: Mon, 23 Sep 2024 14:01:25 -0400 Subject: [PATCH 3/3] feat(abr-testing): data recording for plate reader --- abr-testing/Pipfile | 2 +- .../data_collection/abr_google_drive.py | 21 +++- .../data_collection/abr_robot_error.py | 11 +- .../data_collection/get_run_logs.py | 2 +- .../data_collection/read_robot_logs.py | 33 +++-- .../data_collection/single_run_log_reader.py | 11 +- abr-testing/abr_testing/tools/abr_scale.py | 11 +- abr-testing/abr_testing/tools/plate_reader.py | 113 +++++++++++------- 8 files changed, 139 insertions(+), 65 deletions(-) diff --git a/abr-testing/Pipfile b/abr-testing/Pipfile index 4bcece1c630..0ea9e6f76aa 100644 --- a/abr-testing/Pipfile +++ b/abr-testing/Pipfile @@ -18,7 +18,7 @@ slackclient = "*" slack-sdk = "*" pandas = "*" pandas-stubs = "*" -numpy = "*" +numpy = "==1.8.3" [dev-packages] atomicwrites = "==1.4.1" diff --git a/abr-testing/abr_testing/data_collection/abr_google_drive.py b/abr-testing/abr_testing/data_collection/abr_google_drive.py index d0d281c5ecc..4556b62800b 100644 --- a/abr-testing/abr_testing/data_collection/abr_google_drive.py +++ b/abr-testing/abr_testing/data_collection/abr_google_drive.py @@ -9,6 +9,7 @@ from abr_testing.automation import google_drive_tool, google_sheets_tool from abr_testing.tools import sync_abr_sheet, plate_reader + def get_modules(file_results: Dict[str, str]) -> Dict[str, Any]: """Get module IPs and models from run log.""" modList = ( @@ -16,7 +17,7 @@ def get_modules(file_results: Dict[str, str]) -> Dict[str, Any]: "temperatureModuleV2", "magneticBlockV1", "thermocyclerModuleV2", - "absorbanceReaderV1" + "absorbanceReaderV1", ) all_modules = {key: "" for key in modList} for module in file_results.get("modules", []): @@ -35,7 +36,7 @@ def create_data_dictionary( issue_url: str, plate: str, accuracy: Any, - hellma_plate_standards: List[Dict[str, Any]] + hellma_plate_standards: List[Dict[str, Any]], ) -> Tuple[List[List[Any]], List[str], List[List[Any]], List[str]]: """Pull data from run files and format into a dictionary.""" runs_and_robots: List[Any] = [] @@ -114,7 +115,9 @@ def create_data_dictionary( hs_dict = read_robot_logs.hs_commands(file_results) tm_dict = read_robot_logs.temperature_module_commands(file_results) pipette_dict = read_robot_logs.instrument_commands(file_results) - plate_reader_dict = read_robot_logs.plate_reader_commands(file_results, hellma_plate_standards) + plate_reader_dict = read_robot_logs.plate_reader_commands( + file_results, hellma_plate_standards + ) notes = {"Note1": "", "Jira Link": issue_url} plate_measure = { "Plate Measured": plate, @@ -184,8 +187,7 @@ def create_data_dictionary( storage_directory = args.storage_directory[0] google_sheet_name = args.google_sheet_name[0] email = args.email[0] - - + try: credentials_path = os.path.join(storage_directory, "credentials.json") except FileNotFoundError: @@ -216,7 +218,14 @@ def create_data_dictionary( headers, transposed_runs_and_lpc, headers_lpc, - ) = create_data_dictionary(missing_runs_from_gs, storage_directory, "", "", "", hellma_plate_standards=file_values) + ) = create_data_dictionary( + missing_runs_from_gs, + storage_directory, + "", + "", + "", + hellma_plate_standards=file_values, + ) start_row = google_sheet.get_index_row() + 1 print(start_row) google_sheet.batch_update_cells(transposed_runs_and_robots, "A", start_row, "0") diff --git a/abr-testing/abr_testing/data_collection/abr_robot_error.py b/abr-testing/abr_testing/data_collection/abr_robot_error.py index 9f87f7d4c46..3edf9b315c8 100644 --- a/abr-testing/abr_testing/data_collection/abr_robot_error.py +++ b/abr-testing/abr_testing/data_collection/abr_robot_error.py @@ -13,6 +13,7 @@ import re import pandas as pd from statistics import mean, StatisticsError +from abr_testing.tools import plate_reader def compare_current_trh_to_average( @@ -590,13 +591,21 @@ def get_run_error_info_from_robot( except FileNotFoundError: print("Run file not uploaded.") run_id = os.path.basename(error_run_log).split("_")[1].split(".")[0] + # Get hellma readings + file_values = plate_reader.read_hellma_plate_files(storage_directory, 101934) + ( runs_and_robots, headers, runs_and_lpc, headers_lpc, ) = abr_google_drive.create_data_dictionary( - run_id, error_folder_path, issue_url, "", "" + run_id, + error_folder_path, + issue_url, + "", + "", + hellma_plate_standards=file_values, ) start_row = google_sheet.get_index_row() + 1 diff --git a/abr-testing/abr_testing/data_collection/get_run_logs.py b/abr-testing/abr_testing/data_collection/get_run_logs.py index c3b66d2e19a..3d8eb851197 100644 --- a/abr-testing/abr_testing/data_collection/get_run_logs.py +++ b/abr-testing/abr_testing/data_collection/get_run_logs.py @@ -47,7 +47,7 @@ def get_run_data(one_run: Any, ip: str) -> Dict[str, Any]: params={"cursor": cursor, "pageLength": page_length}, ) command_data = response.json() - commands.extend(command_data.get("data","")) + commands.extend(command_data.get("data", "")) run["commands"] = commands response = requests.get( f"http://{ip}:31950/runs/{one_run}", headers={"opentrons-version": "3"} diff --git a/abr-testing/abr_testing/data_collection/read_robot_logs.py b/abr-testing/abr_testing/data_collection/read_robot_logs.py index 55561a0b9c7..3501a330a70 100644 --- a/abr-testing/abr_testing/data_collection/read_robot_logs.py +++ b/abr-testing/abr_testing/data_collection/read_robot_logs.py @@ -168,7 +168,10 @@ def instrument_commands(file_results: Dict[str, Any]) -> Dict[str, float]: } return pipette_dict -def plate_reader_commands(file_results: Dict[str,Any], hellma_plate_standards: List[Dict[str, Any]])-> Dict[str, float]: + +def plate_reader_commands( + file_results: Dict[str, Any], hellma_plate_standards: List[Dict[str, Any]] +) -> Dict[str, object]: """Plate Reader Command Counts.""" commandData = file_results.get("commands", "") move_lid_count: int = 0 @@ -180,17 +183,22 @@ def plate_reader_commands(file_results: Dict[str,Any], hellma_plate_standards: L commandData, "absorbanceReader/read", True ) # Count Number of Initializations - initialize_count, avg_initialize_time = count_command_in_run_data(commandData, "absorbanceReader/initialize", True) + initialize_count, avg_initialize_time = count_command_in_run_data( + commandData, "absorbanceReader/initialize", True + ) # Count Number of Lid Movements for command in commandData: commandType = command["commandType"] - if commandType == "absorbanceReader/openLid" or commandType == "absorbanceReader/closeLid": - move_lid_count +=1 + if ( + commandType == "absorbanceReader/openLid" + or commandType == "absorbanceReader/closeLid" + ): + move_lid_count += 1 elif commandType == "absorbanceReader/read": read = "yes" elif read == "yes" and commandType == "comment": result = command["params"].get("message", "") - wavelength = result.split('result: {')[1].split(":")[0] + wavelength = result.split("result: {")[1].split(":")[0] wavelength_str = wavelength + ": " rest_of_string = result.split(wavelength_str)[1][:-1] result_dict = eval(rest_of_string) @@ -198,14 +206,18 @@ def plate_reader_commands(file_results: Dict[str,Any], hellma_plate_standards: L for item in hellma_plate_standards: wavelength_of_interest = item["wavelength"] if str(wavelength) == str(wavelength_of_interest): - error_cells = plate_reader.check_byonoy_data_accuracy(result_ndarray, item, False) + error_cells = plate_reader.check_byonoy_data_accuracy( + result_ndarray, item, False + ) if len(error_cells[0]) > 0: - percent = (96 - len(error_cells))/96 * 100 + percent = (96 - len(error_cells)) / 96 * 100 for cell in error_cells: print("FAIL: Cell " + str(cell) + " out of accuracy spec.") else: percent = 100 - print("PASS: All cells at this wavelength and orientation meet accuracy specification") + print( + f"PASS: {wavelength_of_interest} meet accuracy specification" + ) final_result[wavelength] = percent input("###########################") read = "no" @@ -215,10 +227,11 @@ def plate_reader_commands(file_results: Dict[str,Any], hellma_plate_standards: L "Plate Reader # of Initializations": initialize_count, "Plate Reader Avg Initialize Time (sec)": avg_initialize_time, "Plate Reader # of Lid Movements": move_lid_count, - "Plate Reader Result": final_result + "Plate Reader Result": final_result, } return plate_dict - + + def hs_commands(file_results: Dict[str, Any]) -> Dict[str, float]: """Gets total latch engagements, homes, rotations and total on time (sec) for heater shaker.""" # TODO: modify for cases that have more than 1 heater shaker. diff --git a/abr-testing/abr_testing/data_collection/single_run_log_reader.py b/abr-testing/abr_testing/data_collection/single_run_log_reader.py index 5304842b550..39060529c89 100644 --- a/abr-testing/abr_testing/data_collection/single_run_log_reader.py +++ b/abr-testing/abr_testing/data_collection/single_run_log_reader.py @@ -5,6 +5,7 @@ import csv from abr_testing.data_collection import read_robot_logs from abr_testing.data_collection import abr_google_drive +from abr_testing.tools import plate_reader if __name__ == "__main__": parser = argparse.ArgumentParser(description="Read single run log locally saved.") @@ -25,13 +26,21 @@ sys.exit() # Get Runs from Storage and Read Logs run_ids_in_storage = read_robot_logs.get_run_ids_from_storage(run_log_file_path) + # Get hellma readins + file_values = plate_reader.read_hellma_plate_files(run_log_file_path, 101934) + ( runs_and_robots, header, runs_and_lpc, lpc_headers, ) = abr_google_drive.create_data_dictionary( - run_ids_in_storage, run_log_file_path, "", "", "" + run_ids_in_storage, + run_log_file_path, + "", + "", + "", + hellma_plate_standards=file_values, ) transposed_list = list(zip(*runs_and_robots)) # Adds Run to local csv diff --git a/abr-testing/abr_testing/tools/abr_scale.py b/abr-testing/abr_testing/tools/abr_scale.py index d02bf0acfed..0f6c29c3f69 100644 --- a/abr-testing/abr_testing/tools/abr_scale.py +++ b/abr-testing/abr_testing/tools/abr_scale.py @@ -10,6 +10,7 @@ from typing import Any, Tuple import sys import json +from abr_testing.tools import plate_reader def get_protocol_step_as_int( @@ -141,14 +142,20 @@ def get_most_recent_run_and_record( ) # Record run to google sheets. print(most_recent_run_id) - + # Read Hellma Files + hellma_file_values = plate_reader.read_hellma_plate_files(storage_directory, 101934) ( runs_and_robots, headers, runs_and_lpc, headers_lpc, ) = abr_google_drive.create_data_dictionary( - most_recent_run_id, storage_directory, "", labware, accuracy + most_recent_run_id, + storage_directory, + "", + labware, + accuracy, + hellma_plate_standards=hellma_file_values, ) google_sheet_abr_data = google_sheets_tool.google_sheet( credentials_path, "ABR-run-data", tab_number=0 diff --git a/abr-testing/abr_testing/tools/plate_reader.py b/abr-testing/abr_testing/tools/plate_reader.py index 15283de7922..3be15fa4268 100644 --- a/abr-testing/abr_testing/tools/plate_reader.py +++ b/abr-testing/abr_testing/tools/plate_reader.py @@ -1,34 +1,38 @@ -"""Plate Reader Functions""" +"""Plate Reader Functions.""" import argparse import os import numpy as np -from typing import Dict, Any +from typing import Dict, Any, List + + +def convert_read_dictionary_to_array(read_data: Dict[str, Any]) -> np.ndarray: + """Convert a dictionary of read results to an array. -def convert_read_dictionary_to_array(read_data:Dict[str,Any])-> np.ndarray: - """Convert a dictionary of read results to an array - Converts a dictionary of OD values, as formatted by the Opentrons API's - plate reader read() function, to a 2D numpy.array of shape (8,12) for + plate reader read() function, to a 2D numpy.array of shape (8,12) for further processing. - + read_data: dict a dictonary of read values with celll numbers for keys, e.g. 'A1' """ - data = np.empty((8,12)) + data = np.empty((8, 12)) for key, value in read_data.items(): - row_index = ord(key[0]) - ord('A') + row_index = ord(key[0]) - ord("A") column_index = int(key[1:]) - 1 data[row_index][column_index] = value return data -def check_byonoy_data_accuracy(output: Dict[str, Any], cal, flipped:bool): - """Check multiple OD measurements for accuracy - + +def check_byonoy_data_accuracy( + output: Any, cal: Dict[str, np.ndarray], flipped: bool +) -> List[Any]: + """Check multiple OD measurements for accuracy. + od_list: list of 2D numpy.array of shape (8,12) a list of multiple plate readings as returned by read_byonoy_directory_to_list() cal: namedtuple - 2D numpy.array of shape (8,12) of calibration values, and 1D + 2D numpy.array of shape (8,12) of calibration values, and 1D numpy.array of tolerances, as returned by read_byonoy_file_to_array flipped: bool True if reference plate was rotated 180 degrees for measurment @@ -40,45 +44,65 @@ def check_byonoy_data_accuracy(output: Dict[str, Any], cal, flipped:bool): # Calculate absolute accuracy tolerances for each cell # The last two columns have a higher tolerance per the Byonoy datasheet # because OD>2.0 and wavelength>=450nm on the Hellma plate - accuracy_tolerances = np.zeros((8,12)) - accuracy_tolerances[:,:10] = cal_data[:,:10]*0.01 + cal_tolerance[:10] + 0.01 - accuracy_tolerances[:,10:] = cal_data[:,10:]*0.015 + cal_tolerance[10:] + 0.01 - if (flipped): - within_tolerance = np.isclose(output, np.rot90(cal_data, 2), atol=np.rot90(accuracy_tolerances, 2)) + output_array = np.asarray(output) + accuracy_tols = np.zeros((8, 12)) + accuracy_tols[:, :10] = cal_data[:, :10] * 0.01 + cal_tolerance[:10] + 0.01 + accuracy_tols[:, 10:] = cal_data[:, 10:] * 0.015 + cal_tolerance[10:] + 0.01 + if flipped: + within_tolerance = np.isclose( + output_array, + np.rot90(cal_data, 2), + atol=np.rot90(accuracy_tols, 2), + ) # type: ignore else: - within_tolerance = np.isclose(output, cal_data, atol=accuracy_tolerances) - #print(within_tolerance) - errors = np.where(within_tolerance==False) - error_cells = [(chr(ord('@')+errors[0][i]+1) + str(errors[1][i]+1)) for i in range(0, len(errors[0]))] - run_error_cells.append(error_cells) + within_tolerance = np.isclose(output_array, cal_data, atol=accuracy_tols) # type: ignore + errors = np.where(within_tolerance is False) + error_cells = [ + (chr(ord("@") + errors[0][i] + 1) + str(errors[1][i] + 1)) + for i in range(0, len(errors[0])) + ] + run_error_cells.append(error_cells) return run_error_cells -def read_byonoy_file_to_array(filename)-> Dict[str, Any]: - """Read a Byonoy endpoint CSV file into a numpy array - + +def read_byonoy_file_to_array(filename: str) -> Dict[str, Any]: + """Read a Byonoy endpoint CSV file into a numpy array. + Returns a named tuple with a 2D numpy array of shape (8,12) of OD values - from a Byonoy endpoint CSV file and a 1D numpy array of tolerances (which + from a Byonoy endpoint CSV file and a 1D numpy array of tolerances (which are present in the reference plate calibration data files). - + filename: str absolute path and filename of the CSV file to be read """ wavelength = filename.split("nm.csv")[0].split("_")[-1] - with open(filename, 'r') as f: - #print(filename) - + with open(filename, "r") as f: + # print(filename) + f.seek(0) - file_data = np.genfromtxt(f, usecols=range(1,13), skip_header=1, max_rows=8, delimiter = ',') - #print(file_data.shape, file_data) - + file_data = np.genfromtxt( + f, usecols=range(1, 13), skip_header=1, max_rows=8, delimiter="," + ) + # print(file_data.shape, file_data) + f.seek(0) - file_tolerance = np.genfromtxt(f, usecols=range(1,13), skip_header=9, max_rows=1, delimiter = ',') - #print(file_tolerance.shape, file_tolerance) + file_tolerance = np.genfromtxt( + f, usecols=range(1, 13), skip_header=9, max_rows=1, delimiter="," + ) + # print(file_tolerance.shape, file_tolerance) - File_Values = {'wavelength': wavelength,'data': file_data, 'tolerance': file_tolerance} + File_Values = { + "wavelength": wavelength, + "data": file_data, + "tolerance": file_tolerance, + } return File_Values -def read_hellma_plate_files(storage_directory: str, hellma_plate_number: int)-> None: + +def read_hellma_plate_files( + storage_directory: str, hellma_plate_number: int +) -> List[Any]: + """Read hellma files for the following wavelengths.""" wavelengths = [405, 450, 490, 650] file_values = [] for wave in wavelengths: @@ -89,14 +113,17 @@ def read_hellma_plate_files(storage_directory: str, hellma_plate_number: int)-> File_Values = read_byonoy_file_to_array(file_path) file_values.append(File_Values) except FileNotFoundError: - print(f"Hellma plate {hellma_plate_number} file at {wave} does not exist in this folder.") + print( + f"Hellma plate {hellma_plate_number} file at {wave} does not exist in this folder." + ) continue return file_values - - + if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Read storage directory with hellma plate files.") + parser = argparse.ArgumentParser( + description="Read storage directory with hellma plate files." + ) parser.add_argument( "storage_directory", metavar="STORAGE_DIRECTORY", @@ -114,4 +141,4 @@ def read_hellma_plate_files(storage_directory: str, hellma_plate_number: int)-> args = parser.parse_args() storage_directory = args.storage_directory[0] hellma_plate_number = args.hellma_plate_number[0] - read_hellma_plate_files() \ No newline at end of file + read_hellma_plate_files(storage_directory, hellma_plate_number)