Skip to content

Commit

Permalink
Merge branch 'edge' into fix_support-responsiveness
Browse files Browse the repository at this point in the history
  • Loading branch information
koji committed Nov 15, 2024
2 parents 823e088 + 0110e17 commit 3b8b530
Show file tree
Hide file tree
Showing 413 changed files with 20,287 additions and 7,827 deletions.
30 changes: 23 additions & 7 deletions .github/workflows/analyses-snapshot-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,13 @@ jobs:
timeout-minutes: 15
runs-on: ubuntu-latest
env:
BASE_IMAGE_NAME: opentrons-python-base:3.10
ANALYSIS_REF: ${{ github.event.inputs.ANALYSIS_REF || github.head_ref || 'edge' }}
SNAPSHOT_REF: ${{ github.event.inputs.SNAPSHOT_REF || github.head_ref || 'edge' }}
# If we're running because of workflow_dispatch, use the user input to decide
# whether to open a PR on failure. Otherwise, there is no user input,
# so we only open a PR if the PR has the label 'gen-analyses-snapshot-pr'
OPEN_PR_ON_FAILURE: ${{ (github.event_name == 'workflow_dispatch' && github.events.inputs.OPEN_PR_ON_FAILURE) || ((github.event_name != 'workflow_dispatch') && (contains(github.event.pull_request.labels.*.name, 'gen-analyses-snapshot-pr'))) }}
OPEN_PR_ON_FAILURE: ${{ (github.event_name == 'workflow_dispatch' && github.event.inputs.OPEN_PR_ON_FAILURE) || ((github.event_name != 'workflow_dispatch') && (contains(github.event.pull_request.labels.*.name, 'gen-analyses-snapshot-pr'))) }}
PR_TARGET_BRANCH: ${{ github.event.pull_request.base.ref || 'not a pr'}}
steps:
- name: Checkout Repository
Expand All @@ -71,9 +72,24 @@ jobs:
echo "Analyses snapshots match ${{ env.PR_TARGET_BRANCH }} snapshots."
fi
- name: Docker Build
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Build base image
id: build_base_image
uses: docker/build-push-action@v6
with:
context: analyses-snapshot-testing/citools
file: analyses-snapshot-testing/citools/Dockerfile.base
push: false
load: true
tags: ${{ env.BASE_IMAGE_NAME }}
cache-from: type=gha
cache-to: type=gha,mode=max

- name: Build analysis image
working-directory: analyses-snapshot-testing
run: make build-opentrons-analysis
run: make build-opentrons-analysis BASE_IMAGE_NAME=${{ env.BASE_IMAGE_NAME }} ANALYSIS_REF=${{ env.ANALYSIS_REF }} CACHEBUST=${{ github.run_number }}

- name: Set up Python 3.13
uses: actions/setup-python@v5
Expand Down Expand Up @@ -112,8 +128,8 @@ jobs:
commit-message: 'fix(analyses-snapshot-testing): heal analyses snapshots'
title: 'fix(analyses-snapshot-testing): heal ${{ env.ANALYSIS_REF }} snapshots'
body: 'This PR was requested on the PR https://github.com/${{ github.repository }}/pull/${{ github.event.pull_request.number }}'
branch: 'analyses-snapshot-testing/${{ env.ANALYSIS_REF }}-from-${{ env.SNAPSHOT_REF}}'
base: ${{ env.SNAPSHOT_REF}}
branch: 'analyses-snapshot-testing/${{ env.ANALYSIS_REF }}-from-${{ env.SNAPSHOT_REF }}'
base: ${{ env.SNAPSHOT_REF }}

- name: Comment on feature PR
if: always() && steps.create_pull_request.outcome == 'success' && github.event_name == 'pull_request'
Expand All @@ -135,5 +151,5 @@ jobs:
commit-message: 'fix(analyses-snapshot-testing): heal ${{ env.ANALYSIS_REF }} snapshots'
title: 'fix(analyses-snapshot-testing): heal ${{ env.ANALYSIS_REF }} snapshots'
body: 'The ${{ env.ANALYSIS_REF }} overnight analyses snapshot test is failing. This PR was opened to alert us to the failure.'
branch: 'analyses-snapshot-testing/${{ env.ANALYSIS_REF }}-from-${{ env.SNAPSHOT_REF}}'
base: ${{ env.SNAPSHOT_REF}}
branch: 'analyses-snapshot-testing/${{ env.ANALYSIS_REF }}-from-${{ env.SNAPSHOT_REF }}'
base: ${{ env.SNAPSHOT_REF }}
86 changes: 43 additions & 43 deletions .github/workflows/pd-test-build-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -79,49 +79,49 @@ jobs:
files: ./coverage/lcov.info
flags: protocol-designer

# e2e-test:
# name: 'pd e2e tests'
# needs: ['js-unit-test']
# timeout-minutes: 30
# strategy:
# matrix:
# os: ['ubuntu-22.04']
# runs-on: '${{ matrix.os }}'
# steps:
# - uses: 'actions/checkout@v3'
# with:
# fetch-depth: 0
# # https://github.com/actions/checkout/issues/290
# - name: 'Fix actions/checkout odd handling of tags'
# if: startsWith(github.ref, 'refs/tags')
# run: |
# git fetch -f origin ${{ github.ref }}:${{ github.ref }}
# git checkout ${{ github.ref }}
# - uses: 'actions/setup-node@v3'
# with:
# node-version: '18.19.0'
# - name: 'install udev for usb-detection'
# if: startsWith(matrix.os, 'ubuntu')
# run: |
# # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
# sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
# sudo apt-get update && sudo apt-get install libudev-dev
# - name: 'cache yarn cache'
# uses: actions/cache@v3
# with:
# path: |
# ${{ github.workspace }}/.yarn-cache
# ${{ github.workspace }}/.npm-cache
# key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
# restore-keys: |
# js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-
# - name: 'setup-js'
# run: |
# npm config set cache ./.npm-cache
# yarn config set cache-folder ./.yarn-cache
# make setup-js
# - name: 'test-e2e'
# run: make -C protocol-designer test-e2e
e2e-test:
name: 'pd e2e tests'
needs: ['js-unit-test']
timeout-minutes: 30
strategy:
matrix:
os: ['ubuntu-22.04']
runs-on: '${{ matrix.os }}'
steps:
- uses: 'actions/checkout@v3'
with:
fetch-depth: 0
# https://github.com/actions/checkout/issues/290
- name: 'Fix actions/checkout odd handling of tags'
if: startsWith(github.ref, 'refs/tags')
run: |
git fetch -f origin ${{ github.ref }}:${{ github.ref }}
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
node-version: '18.19.0'
- name: 'install udev for usb-detection'
if: startsWith(matrix.os, 'ubuntu')
run: |
# WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
path: |
${{ github.workspace }}/.yarn-cache
${{ github.workspace }}/.npm-cache
key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
restore-keys: |
js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-
- name: 'setup-js'
run: |
npm config set cache ./.npm-cache
yarn config set cache-folder ./.yarn-cache
make setup-js
- name: 'test-e2e'
run: make -C protocol-designer test-e2e
build-pd:
name: 'build protocol designer artifact'
needs: ['js-unit-test']
Expand Down
2 changes: 2 additions & 0 deletions abr-testing/abr_testing/automation/google_drive_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ def list_folder(self, delete: Any = False, folder: bool = False) -> Set[str]:
else "" # type: ignore
if self.parent_folder
else None,
supportsAllDrives=True,
includeItemsFromAllDrives=True,
pageSize=1000,
fields="nextPageToken, files(id, name, mimeType)",
pageToken=page_token,
Expand Down
28 changes: 24 additions & 4 deletions abr-testing/abr_testing/data_collection/abr_google_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,13 @@ def create_data_dictionary(
plate: str,
accuracy: Any,
hellma_plate_standards: List[Dict[str, Any]],
) -> Tuple[List[List[Any]], List[str], List[List[Any]], List[str]]:
) -> Tuple[List[List[Any]], List[str], List[List[Any]], List[str], List[List[Any]]]:
"""Pull data from run files and format into a dictionary."""
runs_and_robots: List[Any] = []
runs_and_lpc: List[Dict[str, Any]] = []
headers: List[str] = []
headers_lpc: List[str] = []
list_of_heights: List[List[Any]] = [[], [], [], [], [], [], [], []]
for filename in os.listdir(storage_directory):
file_path = os.path.join(storage_directory, filename)
if file_path.endswith(".json"):
Expand Down Expand Up @@ -120,6 +121,9 @@ def create_data_dictionary(
plate_reader_dict = read_robot_logs.plate_reader_commands(
file_results, hellma_plate_standards
)
list_of_heights = read_robot_logs.liquid_height_commands(
file_results, list_of_heights
)
notes = {"Note1": "", "Jira Link": issue_url}
plate_measure = {
"Plate Measured": plate,
Expand Down Expand Up @@ -155,7 +159,13 @@ def create_data_dictionary(
print(f"Number of runs read: {num_of_runs_read}")
transposed_runs_and_robots = list(map(list, zip(*runs_and_robots)))
transposed_runs_and_lpc = list(map(list, zip(*runs_and_lpc)))
return transposed_runs_and_robots, headers, transposed_runs_and_lpc, headers_lpc
return (
transposed_runs_and_robots,
headers,
transposed_runs_and_lpc,
headers_lpc,
list_of_heights,
)


def run(
Expand All @@ -173,7 +183,8 @@ def run(
credentials_path, google_sheet_name, 0
)
# Get run ids on google sheet
run_ids_on_gs = set(google_sheet.get_column(2))
run_ids_on_gs: Set[str] = set(google_sheet.get_column(2))

# Get robots on google sheet
# Uploads files that are not in google drive directory
google_drive.upload_missing_files(storage_directory)
Expand All @@ -191,6 +202,7 @@ def run(
headers,
transposed_runs_and_lpc,
headers_lpc,
list_of_heights,
) = create_data_dictionary(
missing_runs_from_gs,
storage_directory,
Expand All @@ -201,7 +213,15 @@ def run(
)
start_row = google_sheet.get_index_row() + 1
google_sheet.batch_update_cells(transposed_runs_and_robots, "A", start_row, "0")

# Record Liquid Heights Found
google_sheet_ldf = google_sheets_tool.google_sheet(
credentials_path, google_sheet_name, 2
)
google_sheet_ldf.get_row(1)
start_row_lhd = google_sheet_ldf.get_index_row() + 1
google_sheet_ldf.batch_update_cells(
list_of_heights, "A", start_row_lhd, "2075262446"
)
# Add LPC to google sheet
google_sheet_lpc = google_sheets_tool.google_sheet(credentials_path, "ABR-LPC", 0)
start_row_lpc = google_sheet_lpc.get_index_row() + 1
Expand Down
10 changes: 10 additions & 0 deletions abr-testing/abr_testing/data_collection/abr_robot_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,6 +602,7 @@ def get_run_error_info_from_robot(
headers,
runs_and_lpc,
headers_lpc,
list_of_heights,
) = abr_google_drive.create_data_dictionary(
run_id,
error_folder_path,
Expand All @@ -614,6 +615,15 @@ def get_run_error_info_from_robot(
start_row = google_sheet.get_index_row() + 1
google_sheet.batch_update_cells(runs_and_robots, "A", start_row, "0")
print("Wrote run to ABR-run-data")
# Record Liquid Heights Found
google_sheet_ldf = google_sheets_tool.google_sheet(
credentials_path, google_sheet_name, 4
)
start_row_lhd = google_sheet_ldf.get_index_row() + 1
google_sheet_ldf.batch_update_cells(
list_of_heights, "A", start_row_lhd, "1795535088"
)
print("wrote liquid heights found.")
# Add LPC to google sheet
google_sheet_lpc = google_sheets_tool.google_sheet(
credentials_path, "ABR-LPC", 0
Expand Down
36 changes: 36 additions & 0 deletions abr-testing/abr_testing/data_collection/read_robot_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,42 @@ def instrument_commands(
return pipette_dict


def liquid_height_commands(
file_results: Dict[str, Any], all_heights_list: List[List[Any]]
) -> List[List[Any]]:
"""Record found liquid heights during a protocol."""
commandData = file_results.get("commands", "")
robot = file_results.get("robot_name", "")
run_id = file_results.get("run_id", "")
for command in commandData:
commandType = command["commandType"]
if commandType == "comment":
result = command["params"].get("message", "")
try:
result_str = "'" + result.split("result: {")[1] + "'"
entries = result_str.split(", (")
comment_time = command["completedAt"]
for entry in entries:
height = float(entry.split(": ")[1].split("'")[0].split("}")[0])
labware_type = str(
entry.split(",")[0].replace("'", "").replace("(", "")
)
well_location = str(entry.split(", ")[1].split(" ")[0])
slot_location = str(entry.split("slot ")[1].split(")")[0])
labware_name = str(entry.split("of ")[1].split(" on")[0])
all_heights_list[0].append(robot)
all_heights_list[1].append(run_id)
all_heights_list[2].append(comment_time)
all_heights_list[3].append(labware_type)
all_heights_list[4].append(labware_name)
all_heights_list[5].append(slot_location)
all_heights_list[6].append(well_location)
all_heights_list[7].append(height)
except (IndexError, ValueError):
continue
return all_heights_list


def plate_reader_commands(
file_results: Dict[str, Any], hellma_plate_standards: List[Dict[str, Any]]
) -> Dict[str, object]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
header,
runs_and_lpc,
lpc_headers,
list_of_heights,
) = abr_google_drive.create_data_dictionary(
run_ids_in_storage,
run_log_file_path,
Expand All @@ -42,6 +43,7 @@
"",
hellma_plate_standards=file_values,
)
print("list_of_heights not recorded.")
transposed_list = list(zip(*runs_and_robots))
# Adds Run to local csv
sheet_location = os.path.join(run_log_file_path, "saved_data.csv")
Expand Down
Loading

0 comments on commit 3b8b530

Please sign in to comment.