diff --git a/.codecov.yml b/.codecov.yml
index 1ac3cc70a0d..4eb13688110 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -1,6 +1,5 @@
ignore:
- '**/node_modules'
- - 'protocol-library-kludge'
- 'webpack-config'
- 'hardware-testing'
- '**/*.md'
diff --git a/.eslintignore b/.eslintignore
index 1444a777aa1..8d887bcfc64 100644
--- a/.eslintignore
+++ b/.eslintignore
@@ -5,11 +5,11 @@
**/build/**
**/venv/**
.opentrons_config
-
+**/tsconfig*.json
+**/vite.config.ts
# prettier
**/package.json
**/CHANGELOG.md
-lerna.json
!api/release-notes.md
!app-shell/build/release-notes.md
@@ -25,9 +25,10 @@ storybook-static
api/**
update-server/**
robot-server/**
-notify-server/**
shared-data/python/**
hardware-testing/**
# app-testing don't format the json protocols
app-testing/files
+# app testing don't format the snapshots
+app-testing/tests/__snapshots__
diff --git a/.eslintrc.js b/.eslintrc.js
index e4d018b52be..6e70df2ff27 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -7,9 +7,15 @@ module.exports = {
project: require('path').join(__dirname, 'tsconfig-eslint.json'),
},
- extends: ['standard-with-typescript', 'plugin:react/recommended', 'prettier'],
+ extends: [
+ 'standard-with-typescript',
+ 'plugin:react/recommended',
+ 'prettier',
+ 'plugin:json/recommended',
+ 'plugin:storybook/recommended',
+ ],
- plugins: ['react', 'react-hooks', 'json', 'jest'],
+ plugins: ['react', 'react-hooks', 'json', 'testing-library'],
rules: {
camelcase: 'off',
@@ -32,6 +38,28 @@ module.exports = {
'no-case-declarations': 'warn',
'prefer-regex-literals': 'warn',
'react/prop-types': 'warn',
+
+ // Enforce notification hooks
+ 'no-restricted-imports': [
+ 'error',
+ {
+ paths: [
+ {
+ name: '@opentrons/react-api-client',
+ importNames: [
+ 'useAllRunsQuery',
+ 'useRunQuery',
+ 'useLastRunCommandKey',
+ 'useCurrentMaintenanceRun',
+ 'useDeckConfigurationQuery',
+ 'useAllCommandsAsPreSerializedList',
+ ],
+ message:
+ 'The HTTP hook is deprecated. Utilize the equivalent notification wrapper (useNotifyX) instead.',
+ },
+ ],
+ },
+ ],
},
globals: {},
@@ -50,7 +78,15 @@ module.exports = {
overrides: [
{
files: ['**/*.js'],
- parser: '@babel/eslint-parser',
+ extends: ['plugin:@typescript-eslint/disable-type-checked'],
+ parserOptions: {
+ project: require('path').join(__dirname, 'tsconfig-eslint.json'),
+ },
+ rules: {
+ '@typescript-eslint/no-var-requires': 'off',
+ '@typescript-eslint/explicit-function-return-type': 'warn',
+ '@typescript-eslint/no-unused-vars': 'warn',
+ },
},
{
// TODO(mc, 2021-03-18): remove to default these rules back to errors
@@ -65,6 +101,22 @@ module.exports = {
'@typescript-eslint/no-floating-promises': 'warn',
'@typescript-eslint/no-unnecessary-type-assertion': 'warn',
'@typescript-eslint/no-unnecessary-boolean-literal-compare': 'warn',
+ '@typescript-eslint/no-unsafe-argument': 'warn',
+ '@typescript-eslint/consistent-type-imports': 'warn',
+ '@typescript-eslint/consistent-indexed-object-style': 'warn',
+ '@typescript-eslint/no-confusing-void-expression': 'warn',
+ '@typescript-eslint/ban-types': 'warn',
+ '@typescript-eslint/non-nullable-type-assertion-style': 'warn',
+ '@typescript-eslint/await-thenable': 'warn',
+ '@typescript-eslint/ban-ts-comment': 'warn',
+ '@typescript-eslint/unbound-method': 'warn',
+ '@typescript-eslint/consistent-generic-constructors': 'warn',
+ '@typescript-eslint/no-misused-promises': 'warn',
+ // need this to be able to pass in css prop into raw elements (babel adds this at build time for styled-components)
+ 'react/no-unknown-property': [
+ 'error',
+ { ignore: ['css', 'indeterminate'] },
+ ],
},
},
{
@@ -74,25 +126,23 @@ module.exports = {
'**/__mocks__/**.@(js|ts|tsx)',
'**/__utils__/**.@(js|ts|tsx)',
'**/__fixtures__/**.@(js|ts|tsx)',
+ '**/fixtures/**.@(js|ts|tsx)',
'scripts/*.@(js|ts|tsx)',
],
- env: {
- jest: true,
- },
- extends: ['plugin:jest/recommended'],
rules: {
- 'jest/expect-expect': 'off',
- 'jest/no-standalone-expect': 'off',
- 'jest/no-disabled-tests': 'error',
- 'jest/consistent-test-it': 'error',
'@typescript-eslint/consistent-type-assertions': 'off',
'@typescript-eslint/no-var-requires': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
+ '@typescript-eslint/no-confusing-void-expression': 'warn',
'node/handle-callback-err': 'off',
- // TODO(mc, 2021-01-29): fix these and remove warning overrides
- 'jest/no-deprecated-functions': 'warn',
- 'jest/valid-title': 'warn',
- 'jest/no-conditional-expect': 'warn',
+ },
+ },
+ {
+ files: ['**/__tests__/**test.tsx'],
+ extends: ['plugin:testing-library/react'],
+ rules: {
+ 'testing-library/no-manual-cleanup': 'off',
+ 'testing-library/prefer-screen-queries': 'warn',
},
},
{
@@ -105,6 +155,16 @@ module.exports = {
{
files: ['**/cypress/**'],
extends: ['plugin:cypress/recommended'],
+ rules: {
+ 'cypress/unsafe-to-chain-command': 'warn',
+ },
+ },
+ // Allow HTTP hooks in notification wrappers and tests
+ {
+ files: ['app/src/resources/**', '**/__tests__/**test**'],
+ rules: {
+ 'no-restricted-imports': 'off',
+ },
},
],
}
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index f0ee1890dd2..0367b720649 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -8,22 +8,11 @@
*.d.ts @Opentrons/js
/webpack-config @Opentrons/js
-# subprojects - those with clear team ownership should have appropriate notifications
-/protocol-designer @Opentrons/app-and-uis
-/labware-designer @Opentrons/app-and-uis
-/labware-library @Opentrons/app-and-uis
-/protocol-library-kludge @Opentrons/app-and-uis
-/update-server @Opentrons/robot-svcs
-/discovery-client @Opentrons/robot-svcs @Opentrons/app-and-uis
-/shared-data/pipette @Opentrons/embedded-sw
-/shared-data/protocol @Opentrons/robot-svcs @Opentrons/app-and-uis
-/shared-data/module @Opentrons/embedded-sw
-/shared-data/deck @Opentrons/embedded-sw
-/shared-data/labware @Opentrons/embedded-sw
-
# subprojects by language - some subprojects are shared by teams but united by a
# language community (including makefiles and config) so mark them appropriately
/app @Opentrons/js
+/api-client @Opentrons/js
+/react-api-client @Opentrons/js
/app-shell @Opentrons/js
/components @Opentrons/js
/api @Opentrons/py
diff --git a/.github/actions/python/pypi-deploy/action.yaml b/.github/actions/python/pypi-deploy/action.yaml
index 1ce4ff67a1e..e24ab6e7b20 100644
--- a/.github/actions/python/pypi-deploy/action.yaml
+++ b/.github/actions/python/pypi-deploy/action.yaml
@@ -28,7 +28,7 @@ runs:
fi
fi
status=0
- QUIET=1 BUILD_NUMBER=${OT_BUILD} make -C ${{ inputs.project }} clean deploy twine_repository_url=${{ inputs.repository_url }} pypi_username=opentrons pypi_password=${{ inputs.password }} || status=$?
+ CI=1 QUIET=1 BUILD_NUMBER=${OT_BUILD} make -C ${{ inputs.project }} clean deploy twine_repository_url=${{ inputs.repository_url }} pypi_username=__token__ pypi_password=${{ inputs.password }} || status=$?
if [[ ${status} != 0 ]] && [[ ${{ inputs.repository_url }} =~ "test.pypi.org" ]]; then
echo "upload failures allowed to test pypi"
exit 0
diff --git a/.github/actions/python/setup/action.yaml b/.github/actions/python/setup/action.yaml
index 6a6b02d9305..c90563ccd1f 100644
--- a/.github/actions/python/setup/action.yaml
+++ b/.github/actions/python/setup/action.yaml
@@ -14,6 +14,8 @@ runs:
- shell: bash
run: |
if [[ "${OSTYPE}" =~ "linux" ]]; then
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
sudo apt-get install -y --no-install-recommends libsystemd-dev
fi
@@ -25,8 +27,10 @@ runs:
if: ${{ inputs.python-version != 'false' }}
run: echo "OT_VIRTUALENV_VERSION=${{ inputs.python-version }}" >> $GITHUB_ENV
- shell: bash
- run: |
- npm install --global shx@0.3.3
- $OT_PYTHON -m pip install pipenv==2021.5.29
+ run: npm install --global shx@0.3.3
+ - shell: bash
+ run: $OT_PYTHON -m pip install --upgrade pip
+ - shell: bash
+ run: $OT_PYTHON -m pip install --user pipenv==2023.12.1
- shell: bash
- run: 'make -C ${{ inputs.project }} setup'
+ run: 'make -C ${{ inputs.project }} setup || make -C ${{ inputs.project }} setup'
diff --git a/.github/actions/webstack/deploy-to-sandbox/action.yaml b/.github/actions/webstack/deploy-to-sandbox/action.yaml
index e9b2eec7698..43c4716e05e 100644
--- a/.github/actions/webstack/deploy-to-sandbox/action.yaml
+++ b/.github/actions/webstack/deploy-to-sandbox/action.yaml
@@ -15,4 +15,4 @@ runs:
steps:
- shell: bash
run: |
- aws s3 sync ${{ inputs.distPath }} s3://sandbox.${{ inputs.domain }}/${{ inputs.destPrefix }} --acl=public-read
+ aws s3 sync ${{ inputs.distPath }} s3://sandbox.${{ inputs.domain }}/${{ inputs.destPrefix }}
diff --git a/.github/workflows/abr-testing-lint-test.yaml b/.github/workflows/abr-testing-lint-test.yaml
new file mode 100644
index 00000000000..e103c61efdd
--- /dev/null
+++ b/.github/workflows/abr-testing-lint-test.yaml
@@ -0,0 +1,59 @@
+# This workflow runs test and lint on branch pushes that touch the abr-testing
+# project or its dependencies.
+
+name: 'abr-testing lint/test'
+on:
+ push:
+ paths:
+ - 'Makefile'
+ - 'abr-testing/**'
+ - 'scripts/**/*.mk'
+ - 'scripts/**/*.py'
+ - '.github/workflows/abr-testing-lint-test.yaml'
+ - '.github/actions/python/**'
+ branches:
+ - 'edge'
+ tags-ignore:
+ - '*'
+ pull_request:
+ paths:
+ - 'abr-testing/**'
+ - 'scripts/**/*.mk'
+ - 'scripts/**/*.py'
+ - '.github/workflows/abr-testing-lint-test.yaml'
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ github.ref_name != 'edge' || github.run_id}}-${{ github.ref_type != 'tag' || github.run_id }}
+ cancel-in-progress: true
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ lint-test:
+ runs-on: 'windows-latest'
+ steps:
+ - name: Checkout opentrons repo
+ uses: 'actions/checkout@v3'
+ with:
+ fetch-depth: 0
+ - name: Setup Node
+ uses: 'actions/setup-node@v3'
+ with:
+ node-version: '12'
+ - name: Setup Python
+ uses: 'actions/setup-python@v4'
+ with:
+ python-version: '3.10'
+ - name: Set up abr-testing project
+ uses: './.github/actions/python/setup'
+ with:
+ project: 'abr-testing'
+ - name: lint
+ run:
+ make -C abr-testing lint
+ - name: test
+ run:
+ make -C abr-testing test
diff --git a/.github/workflows/analyses-snapshot-test.yaml b/.github/workflows/analyses-snapshot-test.yaml
new file mode 100644
index 00000000000..1cef60e5f56
--- /dev/null
+++ b/.github/workflows/analyses-snapshot-test.yaml
@@ -0,0 +1,72 @@
+name: Analyses Snapshot Test
+
+on:
+ workflow_dispatch:
+ inputs:
+ TARGET:
+ description: 'Target branch or tag'
+ required: true
+ default: 'edge'
+ TEST_SOURCE:
+ description: 'Target for the test code'
+ required: true
+ default: 'edge'
+ schedule:
+ - cron: '26 7 * * *' # 7:26 AM UTC
+
+jobs:
+ build-and-test:
+ timeout-minutes: 15
+ runs-on: ubuntu-latest
+ env:
+ TARGET: ${{ github.event.inputs.TARGET || 'edge' }}
+ TEST_SOURCE: ${{ github.event.inputs.TEST_SOURCE || 'edge' }}
+
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.TEST_SOURCE }}
+
+ - name: Docker Build
+ working-directory: app-testing
+ run: make build-opentrons-analysis
+
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+ cache: 'pipenv'
+ cache-dependency-path: app-testing/Pipfile.lock
+
+ - name: Setup Python Dependencies
+ working-directory: app-testing
+ run: make setup
+
+ - name: Run Test
+ id: run_test
+ working-directory: app-testing
+ run: make snapshot-test
+
+ - name: Upload Report
+ if: '!cancelled()'
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-report
+ path: app-testing/results/
+
+ - name: Handle Test Failure
+ if: failure()
+ working-directory: app-testing
+ run: make snapshot-test-update
+
+ - name: Create Snapshot update Request
+ if: failure()
+ uses: peter-evans/create-pull-request@v5
+ with:
+ commit-message: 'fix(app-testing): snapshot failure capture'
+ title: 'fix(app-testing): snapshot failure capture'
+ body: 'This PR is an automated snapshot update request. Please review the changes and merge if they are acceptable or find you bug and fix it.'
+ branch: 'app-testing/${{ env.TARGET }}-from-${{ env.TEST_SOURCE}}'
+ base: ${{ env.TEST_SOURCE}}
+
diff --git a/.github/workflows/api-test-lint-deploy.yaml b/.github/workflows/api-test-lint-deploy.yaml
index f4547dc9bdd..5143c6e8021 100644
--- a/.github/workflows/api-test-lint-deploy.yaml
+++ b/.github/workflows/api-test-lint-deploy.yaml
@@ -56,10 +56,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
@@ -75,17 +75,13 @@ jobs:
os: ['windows-2022', 'ubuntu-22.04', 'macos-latest']
# TODO(mc, 2022-02-24): expand this matrix to 3.8 and 3.9,
# preferably in a nightly cronjob on edge or something
- python: ['3.7', '3.10']
+ python: ['3.10']
with-ot-hardware: ['true', 'false']
exclude:
- os: 'windows-2022'
with-ot-hardware: 'true'
- os: 'macos-latest'
with-ot-hardware: 'true'
- - os: 'macos-latest'
- python: '3.10'
- - python: '3.10'
- with-ot-hardware: 'true'
runs-on: '${{ matrix.os }}'
steps:
- uses: 'actions/checkout@v3'
@@ -99,7 +95,7 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
python-version: ${{ matrix.python }}
@@ -149,10 +145,10 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- name: 'set complex environment variables'
uses: actions/github-script@v6
with:
@@ -169,11 +165,11 @@ jobs:
with:
project: 'api'
repository_url: 'https://test.pypi.org/legacy/'
- password: '${{ secrets.OT_TEST_PYPI_PASSWORD }}'
+ password: '${{ secrets.TEST_PYPI_DEPLOY_TOKEN_OPENTRONS }}'
- if: startsWith(env.OT_TAG, 'v')
name: 'upload to real pypi'
uses: './.github/actions/python/pypi-deploy'
with:
project: 'api'
repository_url: 'https://upload.pypi.org/legacy/'
- password: '${{ secrets.OT_PYPI_PASSWORD }}'
+ password: '${{ secrets.PYPI_DEPLOY_TOKEN_OPENTRONS }}'
diff --git a/.github/workflows/app-test-build-deploy.yaml b/.github/workflows/app-test-build-deploy.yaml
index dc93eae9c9a..878a875bdfc 100644
--- a/.github/workflows/app-test-build-deploy.yaml
+++ b/.github/workflows/app-test-build-deploy.yaml
@@ -11,7 +11,6 @@ on:
- 'app-shell-odd/**/*'
- 'components/**/*'
- 'shared-data/**/*'
- - 'webpack-config/**/*'
- 'discovery-client/**/*'
- '*.js'
- 'scripts/**/*'
@@ -32,7 +31,6 @@ on:
- 'app-shell-odd/**/*'
- 'components/**/*'
- 'shared-data/**/*'
- - 'webpack-config/**/*'
- 'discovery-client/**/*'
- '*.js'
- '*.json'
@@ -61,9 +59,12 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -110,17 +111,18 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- - name: 'downgrade npm version'
- run: npm install -g npm@6
- name: check make version
run: make --version
- name: 'install libudev and libsystemd'
if: startsWith(matrix.os, 'ubuntu')
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -141,7 +143,7 @@ jobs:
yarn config set cache-folder ${{ github.workspace }}/.yarn-cache
make setup-js
- name: 'test native(er) packages'
- run: make test-js-internal tests="app-shell/src app-shell-odd/src discovery-client/src" cov_opts="--coverage=true --ci=true --collectCoverageFrom='(app-shell|app-shell-odd| discovery-client)/src/**/*.(js|ts|tsx)'"
+ run: make test-js-internal tests="app-shell/src app-shell-odd/src discovery-client/src" cov_opts="--coverage=true"
- name: 'Upload coverage report'
uses: 'codecov/codecov-action@v3'
with:
@@ -243,17 +245,18 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- - name: 'downgrade npm version'
- run: npm install -g npm@6
- name: check make version
run: make --version
- name: 'install libudev and libsystemd'
if: startsWith(matrix.os, 'ubuntu')
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -293,7 +296,7 @@ jobs:
OT_APP_DEPLOY_FOLDER: ${{ steps.project.outputs.folder }}
run: |
- make -C app-shell dist-${{ matrix.os }}
+ make -C app-shell dist-${{ matrix.os }} USE_HARD_LINKS=false
- name: 'upload github artifact'
if: matrix.target == 'desktop'
@@ -427,9 +430,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -443,7 +449,6 @@ jobs:
path: |
${{ github.workspace }}/.npm-cache/_prebuild
${{ github.workspace }}/.yarn-cache
- key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
- name: 'setup-js'
run: |
npm config set cache ${{ github.workspace }}/.npm-cache
diff --git a/.github/workflows/app-testing-lint.yaml b/.github/workflows/app-testing-lint.yaml
index ec9b45bfe6c..446cea74306 100644
--- a/.github/workflows/app-testing-lint.yaml
+++ b/.github/workflows/app-testing-lint.yaml
@@ -19,20 +19,18 @@ jobs:
lint:
name: 'app-testing lint'
timeout-minutes: 5
- runs-on: 'ubuntu-22.04'
+ runs-on: 'ubuntu-latest'
steps:
- name: Checkout opentrons repo
- uses: 'actions/checkout@v3'
+ uses: 'actions/checkout@v4'
- name: Setup Python
- uses: 'actions/setup-python@v4'
+ uses: 'actions/setup-python@v5'
with:
- python-version: '3.11'
+ python-version: '3.12'
cache: 'pipenv'
cache-dependency-path: app-testing/Pipfile.lock
- - name: Install Pipenv
- run: pip install -U pipenv
- - name: Pipenv Install
+ - name: Setup
id: install
working-directory: ./app-testing
run: make setup
diff --git a/.github/workflows/components-test-build-deploy.yaml b/.github/workflows/components-test-build-deploy.yaml
index 60d3f19fc4e..7d4f2f5f49a 100644
--- a/.github/workflows/components-test-build-deploy.yaml
+++ b/.github/workflows/components-test-build-deploy.yaml
@@ -8,14 +8,12 @@ on:
- 'Makefile'
- 'components/**'
- 'app/**/*.stories.@(js|jsx|ts|tsx)'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/components-test-build-deploy.yaml'
push:
paths:
- 'components/**'
- 'app/**/*.stories.@(js|jsx|ts|tsx)'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/components-test-build-deploy.yaml'
branches:
@@ -44,9 +42,12 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -59,7 +60,6 @@ jobs:
- name: 'setup-js'
run: |
npm config set cache ./.npm-cache
- yarn config set cache-folder ./.yarn-cache
make setup-js
- name: 'run components unit tests'
run: make -C components test-cov
@@ -69,7 +69,7 @@ jobs:
files: ./coverage/lcov.info
flags: components
- build-components:
+ build-components-storybook:
name: 'build components artifact'
runs-on: 'ubuntu-22.04'
if: github.event_name != 'pull_request'
@@ -78,9 +78,12 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -102,11 +105,33 @@ jobs:
with:
name: 'components-artifact'
path: storybook-static
+
+ determine-build-type:
+ runs-on: 'ubuntu-latest'
+ name: 'Determine build type'
+ outputs:
+ type: ${{steps.determine-build-type.outputs.type}}
+ steps:
+ - id: determine-build-type
+ run: |
+ echo "Determining build type for event ${{github.event_type}} and ref ${{github.ref}}"
+ if [ "${{ format('{0}', github.ref == 'refs/heads/edge') }}" = "true" ] ; then
+ echo "storybook s3 builds for edge"
+ echo 'type=storybook' >> $GITHUB_OUTPUT
+ elif [ "${{ format('{0}', startsWith(github.ref, 'refs/tags/components')) }}" = "true" ] ; then
+ echo "publish builds for components tags"
+ echo 'type=publish' >> $GITHUB_OUTPUT
+ else
+ echo "No build for ref ${{github.ref}} and event ${{github.event_type}}"
+ echo 'type=none' >> $GITHUB_OUTPUT
+ fi
+
deploy-components:
- name: 'deploy components artifact to S3'
+ name: 'deploy components storybook artifact to S3'
runs-on: 'ubuntu-22.04'
- needs: ['js-unit-test', 'build-components']
- if: github.event_name != 'pull_request'
+ needs:
+ ['js-unit-test', 'build-components-storybook', 'determine-build-type']
+ if: needs.determine-build-type.outputs.type != 'none'
steps:
- uses: 'actions/checkout@v3'
# https://github.com/actions/checkout/issues/290
@@ -117,7 +142,7 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -137,3 +162,52 @@ jobs:
AWS_DEFAULT_REGION: us-east-2
run: |
aws s3 sync ./dist s3://opentrons-components/${{ env.OT_BRANCH}} --acl public-read
+
+ publish-components:
+ name: 'publish components package to npm'
+ runs-on: 'ubuntu-latest'
+ needs: ['js-unit-test', 'determine-build-type']
+ if: needs.determine-build-type.outputs.type == 'publish'
+ steps:
+ - uses: 'actions/checkout@v3'
+ # https://github.com/actions/checkout/issues/290
+ - name: 'Fix actions/checkout odd handling of tags'
+ if: startsWith(github.ref, 'refs/tags')
+ run: |
+ git fetch -f origin ${{ github.ref }}:${{ github.ref }}
+ git checkout ${{ github.ref }}
+ - uses: 'actions/setup-node@v3'
+ with:
+ node-version: '18.19.0'
+ registry-url: 'https://registry.npmjs.org'
+ - name: 'install udev for usb-detection'
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
+ - name: 'setup-js'
+ run: |
+ npm config set cache ./.npm-cache
+ yarn config set cache-folder ./.yarn-cache
+ make setup-js
+ - name: 'build typescript'
+ run: make build-ts
+ - name: 'build library'
+ run: |
+ make -C components lib
+ # replace package.json stub version number with version from tag
+ - name: 'set version number'
+ run: |
+ npm install -g json
+ VERSION_STRING=$(echo ${{ github.ref }} | sed 's/refs\/tags\/components@//')
+ json -I -f ./components/package.json -e "this.version=\"$VERSION_STRING\""
+ json -I -f ./components/package.json -e "this.dependencies['@opentrons/shared-data']=\"$VERSION_STRING\""
+ - uses: 'actions/setup-node@v3'
+ with:
+ node-version: '18.19.0'
+ registry-url: 'https://registry.npmjs.org'
+ - name: 'publish to npm registry'
+ env:
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
+ run: |
+ cd ./components && echo "//registry.npmjs.org/:_authToken=${NODE_AUTH_TOKEN}" > ./.npmrc && npm publish --access public
diff --git a/.github/workflows/docs-build.yaml b/.github/workflows/docs-build.yaml
index b954f7d1433..08b1c2b76cf 100644
--- a/.github/workflows/docs-build.yaml
+++ b/.github/workflows/docs-build.yaml
@@ -51,10 +51,10 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v3'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'api'
diff --git a/.github/workflows/g-code-confirm-tests.yaml b/.github/workflows/g-code-confirm-tests.yaml
index 151ae13c3d1..146fa96b9a2 100644
--- a/.github/workflows/g-code-confirm-tests.yaml
+++ b/.github/workflows/g-code-confirm-tests.yaml
@@ -49,7 +49,7 @@ jobs:
node-version: '12'
- uses: 'actions/setup-python@v3'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'g-code-testing'
diff --git a/.github/workflows/g-code-testing-lint-test.yaml b/.github/workflows/g-code-testing-lint-test.yaml
index 3678e5c4a20..e174bc7ac52 100644
--- a/.github/workflows/g-code-testing-lint-test.yaml
+++ b/.github/workflows/g-code-testing-lint-test.yaml
@@ -1,5 +1,5 @@
# This workflow runs test and lint on branch pushes that touch the
-# notify-server project or its dependencies
+# g-code-testing project or its dependencies
name: 'G-Code Testing Lint & Test'
@@ -46,10 +46,13 @@ jobs:
with:
fetch-depth: 0
- name: 'install udev'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -59,7 +62,7 @@ jobs:
buildComplexEnvVars(core, context)
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'g-code-testing'
diff --git a/.github/workflows/hardware-lint-test.yaml b/.github/workflows/hardware-lint-test.yaml
index 453adba1269..f5e701ea883 100644
--- a/.github/workflows/hardware-lint-test.yaml
+++ b/.github/workflows/hardware-lint-test.yaml
@@ -55,7 +55,7 @@ jobs:
- name: Setup Python
uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- name: Setup Hardware Project
uses: './.github/actions/python/setup'
diff --git a/.github/workflows/hardware-testing-protocols.yaml b/.github/workflows/hardware-testing-protocols.yaml
index e962e3811e4..ee59d2dc25c 100644
--- a/.github/workflows/hardware-testing-protocols.yaml
+++ b/.github/workflows/hardware-testing-protocols.yaml
@@ -50,7 +50,7 @@ jobs:
- name: Setup Python
uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- name: Setup Project
uses: './.github/actions/python/setup'
diff --git a/.github/workflows/hardware-testing.yaml b/.github/workflows/hardware-testing.yaml
index 3df5507d2b1..6977194ca2a 100644
--- a/.github/workflows/hardware-testing.yaml
+++ b/.github/workflows/hardware-testing.yaml
@@ -52,7 +52,7 @@ jobs:
- name: Setup Python
uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- name: Setup Project
uses: './.github/actions/python/setup'
diff --git a/.github/workflows/http-docs-build.yaml b/.github/workflows/http-docs-build.yaml
index e9ad64d75e5..6294eeb2172 100644
--- a/.github/workflows/http-docs-build.yaml
+++ b/.github/workflows/http-docs-build.yaml
@@ -51,10 +51,10 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-python@v3'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: './.github/actions/python/setup'
with:
project: 'robot-server'
diff --git a/.github/workflows/js-check.yaml b/.github/workflows/js-check.yaml
index 53bcaa05781..8a02c1823ba 100644
--- a/.github/workflows/js-check.yaml
+++ b/.github/workflows/js-check.yaml
@@ -45,7 +45,7 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
@@ -54,7 +54,10 @@ jobs:
const { buildComplexEnvVars } = require(`${process.env.GITHUB_WORKSPACE}/.github/workflows/utils.js`)
buildComplexEnvVars(core, context)
- name: 'install libudev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -88,4 +91,4 @@ jobs:
if: always() && steps.setup-js.outcome == 'success'
run: make lint-css
- name: 'test scripts'
- run: yarn jest scripts
+ run: yarn vitest scripts
diff --git a/.github/workflows/ll-test-build-deploy.yaml b/.github/workflows/ll-test-build-deploy.yaml
index 75e907af97f..140537593e2 100644
--- a/.github/workflows/ll-test-build-deploy.yaml
+++ b/.github/workflows/ll-test-build-deploy.yaml
@@ -8,15 +8,15 @@ on:
- 'labware-library/**'
- 'shared-data/labware/**'
- 'components/**'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/ll-test-build-deploy.yaml'
+ - '.github/actions/webstack/deploy-to-sandbox/**'
+ - '.github/workflows/utils.js'
push:
paths:
- 'labware-library/**'
- 'shared-data/labware/**'
- 'components/**'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/ll-test-build-deploy.yaml'
branches:
@@ -45,7 +45,7 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
# https://github.com/actions/checkout/issues/290
- name: 'Fix actions/checkout odd handling of tags'
if: startsWith(github.ref, 'refs/tags')
@@ -53,7 +53,10 @@ jobs:
git fetch -f origin ${{ github.ref }}:${{ github.ref }}
git checkout ${{ github.ref }}
- name: 'install libudev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -91,9 +94,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install libudev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -116,6 +122,7 @@ jobs:
build-ll:
name: 'build labware library artifact'
needs: ['js-unit-test']
+ timeout-minutes: 30
runs-on: 'ubuntu-20.04'
if: github.event_name != 'pull_request'
steps:
@@ -130,9 +137,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install libudev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -173,9 +183,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
diff --git a/.github/workflows/notify-server-lint-test.yaml b/.github/workflows/notify-server-lint-test.yaml
deleted file mode 100644
index dde68a9f0fa..00000000000
--- a/.github/workflows/notify-server-lint-test.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-# This workflow runs test and lint on branch pushes that touch the
-# notify-server project or its dependencies
-
-name: 'Notify server lint/test'
-
-on:
- # Most of the time, we run on pull requests, which lets us handle external PRs
- push:
- paths:
- - 'Makefile'
- - 'notify-server/**/*'
- - '.github/workflows/notify-server-lint-test.yaml'
- - 'api/**/*'
- - 'hardware/**/*'
- - 'scripts/**/*.mk'
- - 'scripts/**/*.py'
- - '.github/actions/python/**/*'
- branches:
- - 'edge'
- - 'release'
- - '*hotfix*'
- tags-ignore:
- - '*'
- pull_request:
- paths:
- - 'Makefile'
- - 'notify-server/**/*'
- - 'api/**/*'
- - 'hardware/**/*'
- - 'scripts/**/*.mk'
- - 'scripts/**/*.py'
- - '.github/actions/python/**/*'
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ github.ref_name != 'edge' || github.run_id}}-${{ github.ref_type != 'tag' || github.run_id }}
- cancel-in-progress: true
-
-defaults:
- run:
- shell: bash
-
-jobs:
- lint-test:
- name: 'notify server package linting and tests'
- timeout-minutes: 20
- runs-on: 'ubuntu-22.04'
- steps:
- - uses: 'actions/checkout@v3'
- with:
- fetch-depth: 0
- - uses: 'actions/setup-node@v3'
- with:
- node-version: '16'
- - uses: 'actions/setup-python@v4'
- with:
- python-version: '3.7'
-
- - uses: './.github/actions/python/setup'
- with:
- project: 'notify-server'
- - name: Lint
- run: make -C notify-server lint
- - name: Test
- run: make -C notify-server test-cov
- - name: 'Upload coverage report'
- uses: 'codecov/codecov-action@v3'
- with:
- files: ./notify-server/coverage.xml
- flags: notify-server
diff --git a/.github/workflows/opentrons-ai-client-test-build-deploy.yaml b/.github/workflows/opentrons-ai-client-test-build-deploy.yaml
new file mode 100644
index 00000000000..2f569d9bf78
--- /dev/null
+++ b/.github/workflows/opentrons-ai-client-test-build-deploy.yaml
@@ -0,0 +1,81 @@
+# Run tests, build the app, and deploy it cross platform
+
+name: 'OpentronsAI client test, build, and deploy'
+
+# ToDo (kk:04/16/2024) Add build and deploy task
+
+on:
+ push:
+ paths:
+ - 'Makefile'
+ - 'opentrons-ai-client/**/*'
+ - 'components/**/*'
+ - '*.js'
+ - '*.json'
+ - 'yarn.lock'
+ - '.github/workflows/app-test-build-deploy.yaml'
+ - '.github/workflows/utils.js'
+ branches:
+ - '**'
+ tags:
+ - 'v*'
+ - 'ot3@*'
+ pull_request:
+ paths:
+ - 'Makefile'
+ - 'opentrons-ai-client/**/*'
+ - 'components/**/*'
+ - '*.js'
+ - '*.json'
+ - 'yarn.lock'
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ github.ref_name != 'edge' || github.run_id}}-${{ github.ref_type != 'tag' || github.run_id }}
+ cancel-in-progress: true
+
+env:
+ CI: true
+
+jobs:
+ js-unit-test:
+ runs-on: 'ubuntu-22.04'
+ name: 'opentrons ai frontend unit tests'
+ timeout-minutes: 60
+ steps:
+ - uses: 'actions/checkout@v3'
+ - uses: 'actions/setup-node@v3'
+ with:
+ node-version: '18.19.0'
+ - name: 'install udev'
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
+ - name: 'set complex environment variables'
+ id: 'set-vars'
+ uses: actions/github-script@v6
+ with:
+ script: |
+ const { buildComplexEnvVars } = require(`${process.env.GITHUB_WORKSPACE}/.github/workflows/utils.js`)
+ buildComplexEnvVars(core, context)
+ - name: 'cache yarn cache'
+ uses: actions/cache@v3
+ with:
+ path: |
+ ${{ github.workspace }}/.npm-cache/_prebuild
+ ${{ github.workspace }}/.yarn-cache
+ key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
+ - name: 'setup-js'
+ run: |
+ npm config set cache ${{ github.workspace }}/.npm-cache
+ yarn config set cache-folder ${{ github.workspace }}/.yarn-cache
+ make setup-js
+ - name: 'test frontend packages'
+ run: |
+ make -C opentrons-ai-client test-cov
+ - name: 'Upload coverage report'
+ uses: codecov/codecov-action@v3
+ with:
+ files: ./coverage/lcov.info
+ flags: opentrons-ai-client
diff --git a/.github/workflows/pd-test-build-deploy.yaml b/.github/workflows/pd-test-build-deploy.yaml
index 566496257b9..9f23419da94 100644
--- a/.github/workflows/pd-test-build-deploy.yaml
+++ b/.github/workflows/pd-test-build-deploy.yaml
@@ -9,7 +9,6 @@ on:
- 'step-generation/**'
- 'shared-data/**'
- 'components/**'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/pd-test-build-deploy.yaml'
push:
@@ -18,7 +17,6 @@ on:
- 'step-generation/**'
- 'shared-data/**'
- 'components/**'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/pd-test-build-deploy.yaml'
branches:
@@ -53,9 +51,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v2
with:
@@ -98,10 +99,13 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
if: startsWith(matrix.os, 'ubuntu')
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -135,9 +139,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -145,8 +152,6 @@ jobs:
${{ github.workspace }}/.yarn-cache
${{ github.workspace }}/.npm-cache
key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
- restore-keys: |
- js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-
- name: 'setup-js'
run: |
npm config set cache ./.npm-cache
@@ -178,9 +183,12 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'set complex environment variables'
id: 'set-vars'
uses: actions/github-script@v6
diff --git a/.github/workflows/performance-metrics-test-lint.yaml b/.github/workflows/performance-metrics-test-lint.yaml
new file mode 100644
index 00000000000..e57df828caf
--- /dev/null
+++ b/.github/workflows/performance-metrics-test-lint.yaml
@@ -0,0 +1,54 @@
+# This workflow runs lint on pull requests that touch anything in the performance-metrics directory
+
+name: 'performance-metrics test & lint'
+
+on:
+ pull_request:
+ paths:
+ - 'performance-metrics/**'
+ - '.github/workflows/performance-metrics-test-lint.yaml'
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ lint:
+ name: 'performance-metrics test & lint'
+ timeout-minutes: 5
+ runs-on: 'ubuntu-latest'
+ steps:
+ - name: Checkout opentrons repo
+ uses: 'actions/checkout@v4'
+
+ - name: Setup Python
+ uses: 'actions/setup-python@v5'
+ with:
+ python-version: '3.10'
+ cache: 'pipenv'
+ cache-dependency-path: performance-metrics/Pipfile.lock
+
+ - name: "Install Python deps"
+ uses: './.github/actions/python/setup'
+ with:
+ project: 'performance-metrics'
+
+ - name: Setup
+ id: install
+ working-directory: ./performance-metrics
+ run: make setup
+
+ - name: Test
+ if: always() && steps.install.outcome == 'success' || steps.install.outcome == 'skipped'
+ working-directory: ./performance-metrics
+ run: make test
+
+ - name: Lint
+ if: always() && steps.install.outcome == 'success' || steps.install.outcome == 'skipped'
+ working-directory: ./performance-metrics
+ run: make lint
diff --git a/.github/workflows/react-api-client-test.yaml b/.github/workflows/react-api-client-test.yaml
index d3fc398e7a0..a8f5ed959b2 100644
--- a/.github/workflows/react-api-client-test.yaml
+++ b/.github/workflows/react-api-client-test.yaml
@@ -39,9 +39,12 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install libudev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
diff --git a/.github/workflows/robot-server-lint-test.yaml b/.github/workflows/robot-server-lint-test.yaml
index d199b09aaa2..96d1969121b 100644
--- a/.github/workflows/robot-server-lint-test.yaml
+++ b/.github/workflows/robot-server-lint-test.yaml
@@ -14,7 +14,6 @@ on:
- 'server-utils/**/*'
- '!shared-data/js/**/*'
- 'robot-server/**/*'
- - 'notify-server/**/*'
- 'scripts/**/*.mk'
- 'scripts/**/*.py'
- '.github/workflows/robot-server-lint-test.yaml'
@@ -34,7 +33,6 @@ on:
- 'server-utils/**/*'
- '!shared-data/js/**/*'
- 'robot-server/**/*'
- - 'notify-server/**/*'
- 'scripts/**/*.mk'
- 'scripts/**/*.py'
- '.github/workflows/robot-server-lint-test.yaml'
@@ -63,10 +61,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
diff --git a/.github/workflows/server-utils-lint-test.yaml b/.github/workflows/server-utils-lint-test.yaml
index c7039652bf9..240d9e0bd25 100644
--- a/.github/workflows/server-utils-lint-test.yaml
+++ b/.github/workflows/server-utils-lint-test.yaml
@@ -46,10 +46,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
@@ -67,10 +67,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'server-utils'
diff --git a/.github/workflows/shared-data-test-lint-deploy.yaml b/.github/workflows/shared-data-test-lint-deploy.yaml
index 690cb3fd8c5..39cc4cd30e4 100644
--- a/.github/workflows/shared-data-test-lint-deploy.yaml
+++ b/.github/workflows/shared-data-test-lint-deploy.yaml
@@ -19,6 +19,8 @@ on:
- '*hotfix*'
tags:
- 'v*'
+ - 'shared-data*'
+ - 'components*'
pull_request:
paths:
- 'Makefile'
@@ -49,10 +51,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v3'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
@@ -69,10 +71,8 @@ jobs:
os: ['windows-2022', 'ubuntu-22.04', 'macos-latest']
# TODO(mc, 2022-02-24): expand this matrix to 3.8 and 3.9,
# preferably in a nightly cronjob on edge or something
- python: ['3.7', '3.10']
- exclude:
- - os: 'macos-latest'
- python: '3.10'
+ python: ['3.10']
+
runs-on: '${{ matrix.os }}'
steps:
- uses: 'actions/checkout@v3'
@@ -80,10 +80,13 @@ jobs:
fetch-depth: 0
- name: 'install udev for usb-detection'
if: startsWith(matrix.os, 'ubuntu')
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- uses: 'actions/setup-node@v1'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
python-version: ${{ matrix.python }}
@@ -115,9 +118,12 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -127,7 +133,7 @@ jobs:
key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
restore-keys: |
js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-
- - name: 'setup-js'
+ - name: 'js deps'
run: |
npm config set cache ./.npm-cache
yarn config set cache-folder ./.yarn-cache
@@ -157,12 +163,15 @@ jobs:
git checkout ${{ github.ref }}
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'shared-data/python'
@@ -179,11 +188,89 @@ jobs:
with:
project: 'shared-data/python'
repository_url: 'https://test.pypi.org/legacy/'
- password: '${{ secrets.OT_TEST_PYPI_PASSWORD }}'
+ password: '${{ secrets.TEST_PYPI_DEPLOY_TOKEN_OPENTRONS_SHARED_DATA }}'
- if: startsWith(env.OT_TAG, 'v')
name: 'upload to pypi'
uses: './.github/actions/python/pypi-deploy'
with:
project: 'shared-data/python'
repository_url: 'https://upload.pypi.org/legacy/'
- password: '${{ secrets.OT_PYPI_PASSWORD }}'
+ password: '${{ secrets.PYPI_DEPLOY_TOKEN_OPENTRONS_SHARED_DATA }}'
+
+ publish-switch:
+ runs-on: 'ubuntu-latest'
+ name: 'Determine whether or not to publish artifacts'
+ outputs:
+ should_publish: ${{steps.publish-switch.outputs.should_publish}}
+ steps:
+ - id: publish-switch
+ run: |
+ echo "Determining whether to publish artifacts for event ${{github.event_type}} and ref ${{github.ref}}"
+ if [ "${{ format('{0}', startsWith(github.ref, 'refs/tags/shared-data')) }}" = "true" ] ; then
+ echo "Publishing builds for shared-data@ tags"
+ echo 'should_publish=true' >> $GITHUB_OUTPUT
+ elif [ "${{ format('{0}', startsWith(github.ref, 'refs/tags/components')) }}" = "true" ] ; then
+ echo "Publishing builds for components@ tags"
+ echo 'should_publish=true' >> $GITHUB_OUTPUT
+ else
+ echo "No publish for ref ${{github.ref}} and event ${{github.event_type}}"
+ echo 'should_publish=false' >> $GITHUB_OUTPUT
+ fi
+
+ publish-to-npm:
+ name: 'publish shared-data package to npm'
+ runs-on: 'ubuntu-latest'
+ needs: ['js-test', 'publish-switch']
+ if: needs.publish-switch.outputs.should_publish == 'true'
+ steps:
+ - uses: 'actions/checkout@v3'
+ # https://github.com/actions/checkout/issues/290
+ - name: 'Fix actions/checkout odd handling of tags'
+ if: startsWith(github.ref, 'refs/tags')
+ run: |
+ git fetch -f origin ${{ github.ref }}:${{ github.ref }}
+ git checkout ${{ github.ref }}
+ - uses: 'actions/setup-node@v3'
+ with:
+ node-version: '18.19.0'
+ registry-url: 'https://registry.npmjs.org'
+ - name: 'install udev for usb-detection'
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
+ - name: 'cache yarn cache'
+ uses: actions/cache@v3
+ with:
+ path: |
+ ${{ github.workspace }}/.yarn-cache
+ ${{ github.workspace }}/.npm-cache
+ key: js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-${{ hashFiles('yarn.lock') }}
+ restore-keys: |
+ js-${{ secrets.GH_CACHE_VERSION }}-${{ runner.os }}-yarn-
+ - name: 'js deps'
+ run: |
+ npm config set cache ./.npm-cache
+ yarn config set cache-folder ./.yarn-cache
+ make setup-js
+ - name: 'build typescript'
+ run: make build-ts
+ - name: 'build library'
+ run: |
+ make -C shared-data lib-js
+ # replace package.json stub version number with version from tag
+ - name: 'set version number'
+ run: |
+ npm install -g json
+ VERSION_STRING=$(echo ${{ github.ref }} | sed -E 's/refs\/tags\/(components|shared-data)@//')
+ json -I -f ./shared-data/package.json -e "this.version=\"$VERSION_STRING\""
+ cd ./shared-data
+ - uses: 'actions/setup-node@v3'
+ with:
+ node-version: '18.19.0'
+ registry-url: 'https://registry.npmjs.org'
+ - name: 'publish to npm registry'
+ env:
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
+ run: |
+ cd ./shared-data && echo "//registry.npmjs.org/:_authToken=${NODE_AUTH_TOKEN}" > ./.npmrc && npm publish --access public
diff --git a/.github/workflows/step-generation-test.yaml b/.github/workflows/step-generation-test.yaml
index 6836e50dd02..7ac65f3997e 100644
--- a/.github/workflows/step-generation-test.yaml
+++ b/.github/workflows/step-generation-test.yaml
@@ -7,14 +7,12 @@ on:
paths:
- 'step-generation/**'
- 'shared-data/**'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/step-generation-test.yaml'
push:
paths:
- 'step-generation/**'
- 'shared-data/**'
- - 'webpack-config/**'
- 'package.json'
- '.github/workflows/step-generation-test.yaml'
branches:
@@ -40,9 +38,12 @@ jobs:
- uses: 'actions/checkout@v3'
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'install udev for usb-detection'
- run: sudo apt-get update && sudo apt-get install libudev-dev
+ run: |
+ # WORKAROUND: Remove microsoft debian repo due to https://github.com/microsoft/linux-package-repositories/issues/130. Remove line below after it is resolved
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get update && sudo apt-get install libudev-dev
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
diff --git a/.github/workflows/system-server-lint-test.yaml b/.github/workflows/system-server-lint-test.yaml
index 0fecec47e83..720ca905bd7 100644
--- a/.github/workflows/system-server-lint-test.yaml
+++ b/.github/workflows/system-server-lint-test.yaml
@@ -48,10 +48,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
@@ -69,10 +69,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'system-server'
diff --git a/.github/workflows/tag-releases.yaml b/.github/workflows/tag-releases.yaml
index 120c1c462df..864f1e45b36 100644
--- a/.github/workflows/tag-releases.yaml
+++ b/.github/workflows/tag-releases.yaml
@@ -24,7 +24,7 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- name: 'cache yarn cache'
uses: actions/cache@v3
with:
@@ -37,6 +37,7 @@ jobs:
npm config set cache ${{ github.workspace }}/.npm-cache
yarn config set cache-folder ${{ github.workspace }}/.yarn-cache
yarn install
+
- name: 'create release'
run: |
node ./scripts/deploy/create-release.js ${{ github.token }} ${{ github.ref_name }} --deploy
diff --git a/.github/workflows/update-server-lint-test.yaml b/.github/workflows/update-server-lint-test.yaml
index fe908f9f9e6..b4d1435838f 100644
--- a/.github/workflows/update-server-lint-test.yaml
+++ b/.github/workflows/update-server-lint-test.yaml
@@ -46,10 +46,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
@@ -67,10 +67,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'update-server'
diff --git a/.github/workflows/usb-bridge-lint-test.yaml b/.github/workflows/usb-bridge-lint-test.yaml
index 1ee0d62c53d..2888291871a 100644
--- a/.github/workflows/usb-bridge-lint-test.yaml
+++ b/.github/workflows/usb-bridge-lint-test.yaml
@@ -46,10 +46,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
@@ -67,10 +67,10 @@ jobs:
fetch-depth: 0
- uses: 'actions/setup-node@v3'
with:
- node-version: '16'
+ node-version: '18.19.0'
- uses: 'actions/setup-python@v4'
with:
- python-version: '3.7'
+ python-version: '3.10'
- uses: './.github/actions/python/setup'
with:
project: 'usb-bridge'
diff --git a/.gitignore b/.gitignore
index 084de769a00..24c7debdd80 100755
--- a/.gitignore
+++ b/.gitignore
@@ -126,7 +126,6 @@ calibrations/
api/pyproject.toml
robot-server/pyproject.toml
update-server/pyproject.toml
-notify-server/pyproject.toml
shared-data/python/pyproject.toml
hardware/pyproject.toml
@@ -135,7 +134,6 @@ hardware/pyproject.toml
# file
api/LICENSE
update-server/LICENSE
-notify-server/LICENSE
shared-data/python/LICENSE
shared-data/LICENSE
robot-server/LICENSE
@@ -161,3 +159,5 @@ opentrons-robot-app.tar.gz
# asdf versions file
.tool-versions
+mock_dir
+.eslintcache
diff --git a/api/src/opentrons/commands/__init__.py b/.npmrc
similarity index 100%
rename from api/src/opentrons/commands/__init__.py
rename to .npmrc
diff --git a/.nvmrc b/.nvmrc
index 19c7bdba7b1..3c032078a4a 100644
--- a/.nvmrc
+++ b/.nvmrc
@@ -1 +1 @@
-16
\ No newline at end of file
+18
diff --git a/.storybook/main.js b/.storybook/main.js
index 38a7dd4d638..985486d5d4e 100644
--- a/.storybook/main.js
+++ b/.storybook/main.js
@@ -1,20 +1,22 @@
-'use strict'
-
-const { baseConfig } = require('@opentrons/webpack-config')
-
module.exports = {
- webpackFinal: config => ({
- ...config,
- module: { ...config.module, rules: baseConfig.module.rules },
- plugins: [...config.plugins, ...baseConfig.plugins],
- }),
stories: [
'../components/**/*.stories.@(js|jsx|ts|tsx)',
'../app/**/*.stories.@(js|jsx|ts|tsx)',
+ '../opentrons-ai-client/**/*.stories.@(js|jsx|ts|tsx)',
],
+
addons: [
'@storybook/addon-links',
'@storybook/addon-essentials',
'storybook-addon-pseudo-states',
],
+
+ framework: {
+ name: '@storybook/react-vite',
+ options: {},
+ },
+
+ docs: {
+ autodocs: true,
+ },
}
diff --git a/.storybook/preview.js b/.storybook/preview.js
deleted file mode 100644
index d8537e57827..00000000000
--- a/.storybook/preview.js
+++ /dev/null
@@ -1,36 +0,0 @@
-import React from 'react'
-import { I18nextProvider } from 'react-i18next'
-import { GlobalStyle } from '../app/src/atoms/GlobalStyle'
-import { i18n } from '../app/src/i18n'
-
-export const customViewports = {
- onDeviceDisplay: {
- name: 'Touchscreen',
- type: 'tablet',
- styles: {
- width: '1024px',
- height: '600px',
- },
- },
-}
-
-export const parameters = {
- actions: { argTypesRegex: '^on[A-Z].*' },
- viewport: { viewports: customViewports },
- options: {
- storySort: {
- method: 'alphabetical',
- order: ['Design Tokens', 'Library', 'App', 'ODD'],
- },
- },
-}
-
-// Global decorator to apply the styles to all stories
-export const decorators = [
- Story => (
-
-
-
-
- ),
-]
diff --git a/.storybook/preview.jsx b/.storybook/preview.jsx
new file mode 100644
index 00000000000..32864c9abcb
--- /dev/null
+++ b/.storybook/preview.jsx
@@ -0,0 +1,36 @@
+import React from 'react'
+import { I18nextProvider } from 'react-i18next'
+import { GlobalStyle } from '../app/src/atoms/GlobalStyle'
+import { i18n } from '../app/src/i18n'
+
+export const customViewports = {
+ onDeviceDisplay: {
+ name: 'Touchscreen',
+ type: 'tablet',
+ styles: {
+ width: '1024px',
+ height: '600px',
+ },
+ },
+}
+
+export const parameters = {
+ actions: { argTypesRegex: '^on[A-Z].*' },
+ viewport: { viewports: customViewports },
+ options: {
+ storySort: {
+ method: 'alphabetical',
+ order: ['Design Tokens', 'Library', 'App', 'ODD', 'AI'],
+ },
+ },
+}
+
+// Global decorator to apply the styles to all stories
+export const decorators = [
+ Story => (
+
+
+
+
+ ),
+]
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e4460fecda9..3c426ab4e14 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -160,7 +160,7 @@ You will need the following tools installed to develop on the Opentrons platform
- git
- curl
- ssh
-- Python v3.7
+- Python v3.10
- Node.js v16
- [Yarn 1][yarn]
@@ -171,7 +171,7 @@ See [DEV_SETUP.md](./DEV_SETUP.md) for our recommended development setup guides
We use:
- [pytest][] to test Python
-- [Jest][jest] to test JavaScript
+- [Vitest][vitest] to test JavaScript
- To run tests in watch mode, you should also install [watchman][]
- [Cypress.io][cypress] for end to end UI testing
@@ -199,7 +199,7 @@ make test-js watch=true
make test-js cover=false
# update snapshot tests
-# https://jestjs.io/docs/en/snapshot-testing
+# https://vitest.dev/guide/snapshot.html
make test-js updateSnapshot=true
```
@@ -217,7 +217,7 @@ make check-js
```
[pytest]: https://docs.pytest.org/en/latest/
-[jest]: https://jestjs.io/
+[vitest]: https://vitest.dev/
[watchman]: https://facebook.github.io/watchman/
[cypress]: https://www.cypress.io/
@@ -291,7 +291,7 @@ JavaScript dependencies are installed by [yarn][]. When calling yarn, you should
A development dependency is any dependency that is used only to help manage the project. Examples of development dependencies would be:
- Build tools (webpack, babel)
-- Testing/linting/checking tools (jest, typescript, eslint)
+- Testing/linting/checking tools (vitest, typescript, eslint)
- Libraries used only in support scripts (aws, express)
To add a development dependency:
diff --git a/Config.in b/Config.in
index d477c20edab..6c607c38e30 100644
--- a/Config.in
+++ b/Config.in
@@ -3,7 +3,6 @@ source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/api/Config.in"
source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/update-server/Config.in"
source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/robot-server/Config.in"
source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/shared-data/python/Config.in"
-source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/notify-server/Config.in"
source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/system-server/Config.in"
source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/server-utils/Config.in"
-source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/hardware/Config.in"
\ No newline at end of file
+source "$BR2_EXTERNAL_OPENTRONS_MONOREPO_PATH/hardware/Config.in"
diff --git a/DEV_SETUP.md b/DEV_SETUP.md
index 9cb07992c5a..238f2c7fda3 100644
--- a/DEV_SETUP.md
+++ b/DEV_SETUP.md
@@ -12,8 +12,8 @@ You will need the following tools installed to develop on the Opentrons platform
- git
- curl
- ssh
-- Python v3.7
-- Node.js v16
+- Python v3.10
+- Node.js v18
### macOS
@@ -82,10 +82,10 @@ Close and re-open your terminal to confirm `nvs` is installed.
nvs --version
```
-Now we can use nvs to install Node.js v16 and switch on `auto` mode, which will make sure Node.js v16 is used any time we're in the `opentrons` project directory.
+Now we can use `nvs` to install the currently required Node.js version set in `.nvmrc`. The `auto` command selects the correct version of Node.js any time we're in the `opentrons` project directory. Without `auto`, we would have to manually run `use` or `install` each time we work on the project.
```shell
-nvs add 16
+nvs add 18
nvs auto on
```
@@ -124,10 +124,10 @@ Close and re-open your terminal to verify that `pyenv` is installed
pyenv --version
```
-Now, install the required version of Python. Use the latest available version of `3.7.x`, which is `3.7.15` at the time of writing.
+Now, install the required version of Python. Use the latest available version of `3.10.x`, which is `3.10.13` at the time of writing.
```shell
-pyenv install 3.7.15
+pyenv install 3.10.13
```
If your `pyenv` command isn't working, confirm that your shell is set up properly. If you print out the contents of `~/.zprofile` and `~/.zshrc`, you should see something similar to the following:
@@ -148,7 +148,7 @@ eval "$(pyenv init -)"
# ...
```
-#### 3. Install `jpeg` if on ARM Mac (M1)
+#### 3. Install `jpeg` if on ARM Mac (M1/M2/M3)
`/hardware` depends on the Python library Pillow. On ARM Macs, `pip` will build Pillow from source, which requires [jpeg](https://formulae.brew.sh/formula/jpeg) to be installed.
@@ -198,15 +198,15 @@ cd ./opentrons
Once you are inside the repository for the first time, you should do two things:
1. Confirm that `nvs` selected the proper version of Node.js to use
-2. Tell `pyenv` to use Python 3.7
-3. Run `python --version` to confirm your chosen version. If you get the incorrect version and you're using an Apple silicon Mac, try running `eval "$(pyenv init --path)"` and then `pyenv local 3.7.15`. Then check `python --version` again.
+2. Tell `pyenv` to use Python 3.10
+3. Run `python --version` to confirm your chosen version. If you get the incorrect version and you're using an Apple silicon Mac, try running `eval "$(pyenv init --path)"` and then `pyenv local 3.10.13`. Then check `python --version` again.
```shell
-# confirm Node v16
+# confirm Node v18
node --version
# set Python version, and confirm
-pyenv local 3.7.15
+pyenv local 3.10.13
python --version
```
@@ -216,6 +216,12 @@ Once you've confirmed you're running the correct versions of Node.js and Python,
npm install --global yarn@1
```
+If you are using [Corepack][], you can install `yarn` via `corepack`.
+
+```shell
+corepack enable
+```
+
Finally, you need to download and install all of our various development dependencies. **This step will take several minutes** the first time you run it!
```shell
@@ -236,3 +242,4 @@ Once `make setup` completes, you're ready to start developing! Check out our gen
[yarn]: https://classic.yarnpkg.com/
[pipenv]: https://github.com/pypa/pipenv
[contributing guide]: ./CONTRIBUTING.md
+[corepack]: https://github.com/nodejs/corepack
diff --git a/Dockerfile b/Dockerfile
index 60bd111736c..6bc38b9bab5 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,7 +1,7 @@
FROM ubuntu as base
ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
-RUN apt-get update && apt-get install --yes python3 pip pkg-config libsystemd-dev
+RUN apt-get update && apt-get install --yes python3 pip pkg-config libsystemd-dev git
FROM base as builder
COPY scripts scripts
@@ -9,21 +9,20 @@ COPY LICENSE LICENSE
COPY shared-data shared-data
+COPY server-utils/setup.py server-utils/setup.py
+COPY server-utils/server_utils server-utils/server_utils
+
COPY api/MANIFEST.in api/MANIFEST.in
COPY api/setup.py api/setup.py
COPY api/pypi-readme.rst api/pypi-readme.rst
COPY api/src/opentrons api/src/opentrons
-COPY notify-server/setup.py notify-server/setup.py
-COPY notify-server/README.rst notify-server/README.rst
-COPY notify-server/notify_server notify-server/notify_server
-
COPY robot-server/setup.py robot-server/setup.py
COPY robot-server/robot_server robot-server/robot_server
RUN cd shared-data/python && python3 setup.py bdist_wheel -d /dist/
+RUN cd server-utils && python3 setup.py bdist_wheel -d /dist/
RUN cd api && python3 setup.py bdist_wheel -d /dist/
-RUN cd notify-server && python3 setup.py bdist_wheel -d /dist/
RUN cd robot-server && python3 setup.py bdist_wheel -d /dist/
FROM base
diff --git a/Makefile b/Makefile
index fa8f42e5b15..c24e2751137 100755
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,6 @@ COMPONENTS_DIR := components
DISCOVERY_CLIENT_DIR := discovery-client
G_CODE_TESTING_DIR := g-code-testing
LABWARE_LIBRARY_DIR := labware-library
-NOTIFY_SERVER_DIR := notify-server
PROTOCOL_DESIGNER_DIR := protocol-designer
SHARED_DATA_DIR := shared-data
UPDATE_SERVER_DIR := update-server
@@ -26,7 +25,7 @@ HARDWARE_DIR := hardware
USB_BRIDGE_DIR := usb-bridge
NODE_USB_BRIDGE_CLIENT_DIR := usb-bridge/node-client
-PYTHON_DIRS := $(API_DIR) $(UPDATE_SERVER_DIR) $(NOTIFY_SERVER_DIR) $(ROBOT_SERVER_DIR) $(SERVER_UTILS_DIR) $(SHARED_DATA_DIR)/python $(G_CODE_TESTING_DIR) $(HARDWARE_DIR) $(USB_BRIDGE_DIR)
+PYTHON_DIRS := $(API_DIR) $(UPDATE_SERVER_DIR) $(ROBOT_SERVER_DIR) $(SERVER_UTILS_DIR) $(SHARED_DATA_DIR)/python $(G_CODE_TESTING_DIR) $(HARDWARE_DIR) $(USB_BRIDGE_DIR)
# This may be set as an environment variable (and is by CI tasks that upload
# to test pypi) to add a .dev extension to the python package versions. If
@@ -48,26 +47,32 @@ endif
# run at usage (=), not on makefile parse (:=)
# todo(mm, 2021-03-17): Deduplicate with scripts/python.mk.
-usb_host=$(shell yarn run -s discovery find -i 169.254)
+usb_host=$(shell yarn -s discovery find -i 169.254)
# install all project dependencies
.PHONY: setup
setup: setup-js setup-py
+# Both the python and JS setup targets depend on a minimal python setup so they can create
+# virtual envs using pipenv.
+.PHONY: setup-py-toolchain
+setup-py-toolchain:
+ $(OT_PYTHON) -m pip install --upgrade pip
+ $(OT_PYTHON) -m pip install pipenv==2023.12.1
+
# front-end dependecies handled by yarn
.PHONY: setup-js
setup-js:
+setup-js: setup-py-toolchain
yarn config set network-timeout 60000
yarn
$(MAKE) -C $(APP_SHELL_DIR) setup
$(MAKE) -C $(APP_SHELL_ODD_DIR) setup
- $(MAKE) -C $(SHARED_DATA_DIR) setup-js
PYTHON_SETUP_TARGETS := $(addsuffix -py-setup, $(PYTHON_DIRS))
.PHONY: setup-py
-setup-py:
- $(OT_PYTHON) -m pip install pipenv==2021.5.29
+setup-py: setup-py-toolchain
$(MAKE) $(PYTHON_SETUP_TARGETS)
@@ -141,8 +146,6 @@ push:
sleep 1
$(MAKE) -C $(SERVER_UTILS_DIR) push
sleep 1
- $(MAKE) -C $(NOTIFY_SERVER_DIR) push
- sleep 1
$(MAKE) -C $(SYSTEM_SERVER_DIR) push
sleep 1
$(MAKE) -C $(ROBOT_SERVER_DIR) push
@@ -157,7 +160,6 @@ push-ot3:
$(MAKE) -C $(HARDWARE_DIR) push-no-restart-ot3
$(MAKE) -C $(API_DIR) push-no-restart-ot3
$(MAKE) -C $(SERVER_UTILS_DIR) push-ot3
- $(MAKE) -C $(NOTIFY_SERVER_DIR) push-ot3
$(MAKE) -C $(ROBOT_SERVER_DIR) push-ot3
$(MAKE) -C $(SYSTEM_SERVER_DIR) push-ot3
$(MAKE) -C $(UPDATE_SERVER_DIR) push-ot3
@@ -194,7 +196,6 @@ test-py: test-py-windows
$(MAKE) -C $(UPDATE_SERVER_DIR) test
$(MAKE) -C $(ROBOT_SERVER_DIR) test
$(MAKE) -C $(SERVER_UTILS_DIR) test
- $(MAKE) -C $(NOTIFY_SERVER_DIR) test
$(MAKE) -C $(G_CODE_TESTING_DIR) test
$(MAKE) -C $(USB_BRIDGE_DIR) test
@@ -214,10 +215,17 @@ lint-py: $(PYTHON_LINT_TARGETS)
$(MAKE) -C $* lint
.PHONY: lint-js
-lint-js:
- yarn eslint --quiet=$(quiet) ".*.@(js|ts|tsx)" "**/*.@(js|ts|tsx)"
+lint-js: lint-js-eslint lint-js-prettier
+
+.PHONY: lint-js-eslint
+lint-js-eslint:
+ yarn eslint --quiet=$(quiet) --ignore-pattern "node_modules/" --cache ".*.@(js|ts|tsx)" "**/*.@(js|ts|tsx)"
+
+.PHONY: lint-js-prettier
+lint-js-prettier:
yarn prettier --ignore-path .eslintignore --check $(FORMAT_FILE_GLOB)
+
.PHONY: lint-json
lint-json:
yarn eslint --max-warnings 0 --ext .json .
@@ -263,8 +271,8 @@ circular-dependencies-js:
.PHONY: test-js-internal
test-js-internal:
- yarn jest $(tests) $(test_opts) $(cov_opts)
+ yarn vitest $(tests) $(test_opts) $(cov_opts)
.PHONY: test-js-%
test-js-%:
- $(MAKE) test-js-internal tests="$(if $(tests),$(foreach test,$(tests),$*/$(test)),$*)" test_opts="$(test_opts)" cov_opts="$(cov_opts)"
\ No newline at end of file
+ $(MAKE) test-js-internal tests="$(if $(tests),$(foreach test,$(tests),$*/$(test)),$*)" test_opts="$(test_opts)" cov_opts="$(cov_opts)"
diff --git a/RELEASING.md b/RELEASING.md
index 38629cd6fc8..9aa79245644 100644
--- a/RELEASING.md
+++ b/RELEASING.md
@@ -1,175 +1,152 @@
# Releasing Software (for Opentrons developers)
-Below you will find instructions for release processes for projects within our monorepo. The main goal of our process is to
-neatly document any changes that may happen during QA, such as bug fixes, and separate production concerns from our development branch.
+Below you will find instructions for the release processes for projects within this monorepo.
## Releasing Robot Software Stacks
-The app and API projects are currently versioned together to ensure interoperability.
+### Overview
-1. Ensure you have a release created in GitHub for the robot stack you're releasing - buildroot for ot-2, oe-core for ot-3 - with all the changes you want in this release, if any. If there are no system changes, you don't have to create a new release; the last tag in the system repo is used for release builds.
+The robot release process has 3 main outputs:
-2. Checkout `edge` and make a release branch, without any new changes. The branch name should match `release_*` to make it clear this is a release.
+- Opentrons App
+- OT-2 system package
+- Flex system package
- ```shell
- git checkout edge
- git pull
- git checkout -b release_${version}
- git push --set-upstream origin release_${version}
- ```
+The robot software stack is composed of the following repositories:
-3. Open a PR into `release` for your release branch; this should contain all the changes that were in `edge` and not yet `release`. This PR will stick around for the duration of the release process, as QA-discovered bugs will have their fixes merged to this PR.
+- [opentrons]("https://github.com/Opentrons/opentrons") (this repository)
+- [opentrons_modules]("https://github.com/Opentrons/opentrons-modules") (module firmware)
+- [oe_core]("https://github.com/Opentrons/oe-core") (Flex OS)
+- [ot3_firmware]("https://github.com/Opentrons/ot3-firmware") (Flex firmware)
+- [buildroot]("https://github.com/Opentrons/buildroot") (OT-2 OS)
- Part of what should happen in this branch is soliciting input and changes for the user-facing release notes at `app-shell/build/release-notes.md` for the app and `api/release-notes.md` for the robot software. Any changes should be done in a PR just like a QA bug. You should have final approval before the alpha process concludes.
+```mermaid
+flowchart LR
+ subgraph Shared ["Shared Repositories"]
+ opentrons["Opentrons/opentrons" ]
+ opentrons_modules["Opentrons/opentrons-modules" ]
+ end
-4. Check out and pull your release branch locally and create a tag for a new alpha version (since this is in QA). The alpha version should end with an `-alpha.N` prerelease tag, where `N` goes from 0 up over the course of the QA process. You don't need a PR or a commit to create a new version; the presence of the tag is all that you need. Let's call the alpha version you're about to create `${alphaVersion}`:
+ subgraph Flex ["Flex Only"]
+ oe_core["Opentrons/oe-core"]
+ ot3_firmware["Opentrons/ot3-firmware" ]
+ end
- ```shell
- git checkout release_${version}
- git pull
- git tag -a v${alphaVersion} -m 'chore(release): ${alphaVersion}'
- ```
+ subgraph OT2 ["OT-2 Only"]
+ buildroot["Opentrons/buildroot" ]
+ end
-5. Review the tag with `git show v${alphaVersion}`. Double check that the commit displayed is the one you want - it should probably be the latest commit in your release branch, and you should double check that with the Github web UI. If the tag looks good, push it - this starts the build process. This is a release candidate that will undergo QA.
+ OT2Build["OT-2 System Package"]
+ opentrons --> OT2Build
+ buildroot --> OT2Build
- ```shell
- git push origin v${alphaVersion}
- ```
+ App["Opentrons App"]
+ opentrons --> App
- Changelogs for the release are automatically generated when the tag is pushed and sent to the release page in github.
+ FlexBuild["Flex System Package"]
+ opentrons --> FlexBuild
+ oe_core --> FlexBuild
+ ot3_firmware --> FlexBuild
+ opentrons_modules --> OT2Build
+ opentrons_modules --> FlexBuild
+```
-6. Run QA on this release. If issues are found, create PRs targeted on the release branch. To create new alpha releases, repeat steps 4-6.
+These are all versioned and released together. These assets are produced in 2 possible channels:
-7. Once QA is a pass, do a final check that the release notes are good and wordsmithed, and then do a NORMAL MERGE into `release`. Do NOT squash or rebase; do NOT yet push a tag. This should be done from your local command line (and will succeed as long as the release PR is reviewed and status checks have passed):
+- Release (External facing releases - stable, beta, alpha)
+- Internal Release (Internal facing releases - stable, beta, alpha)
- ```shell
- # note: make sure you have pulled the latest changes for branch
- # release_${version} locally before merging into release
- git checkout release_${version}
- git pull
- git checkout release
- git pull
+> [!TIP]
+> using `git config remote.origin.tagOpt --tags` ensures that when you fetch and pull, you get all the tags from the origin remote.
- git merge --ff-only release_${version}
- git push origin release
- ```
+### Steps to release the changes in `edge`
-8. Make a tag for the release. This tag will have the actual target release version, no alpha prerelease tags involved. It should be the same as the `${version}` part of your release branch:
+1. Checkout `edge` and make a chore release branch, without any new changes. The branch name should match `chore_release-${version}`.
```shell
- git tag -a v${version} -m 'chore(release): ${version}'
- git show v${version}
- ```
-
- The `git show` should reveal that the tag is on what was, pre-merge, the last commit of your release branch and is, post-merge, the last commit of `release`. You should double-check this with the github web UI.
-
- Once the tag looks good, you can push it:
-
- ```shell
- git push origin v${version}
+ git switch edge
+ git pull
+ git switch -c chore_release-${version}
+ git push --set-upstream origin chore_release-${version}
```
- The tag push will kick off release builds and deploy the results to customers. It will also create a release page where those builds and automatically generated in-depth changelogs will be posted.
-
-9. Ensure all deploy jobs succeeded:
-
- - The Opentrons App should be prompting people to update to the new version.
- - https://pypi.org/project/opentrons/ should be showing the new version.
-
-10. Release the Python Protocol API docs for this version (see below under Releasing Web Projects).
-
-11. Update the download links on https://opentrons.com/ot-app/. That page is defined in an Opentrons private repository.
-
-12. Open a PR of `release` into `edge`. Give the PR a name like `chore(release): Merge changes from ${version} into edge`. Once it passes, on the command line merge it into `edge`:
-
- ```shell
- git checkout edge
- git pull
- git merge --no-ff release
- ```
-
-13. Use the PR title for the merge commit title. You can then `git push origin edge`, which will succeed as long as the PR is approved and status checks pass.
-
-## Releasing Robot Software Stack Hotfixes
-
-1. Ensure you have a system release created in GitHub (buildroot for OT2, oe-core for OT3) with all the changes you want to see, if any. If there aren't any, you don't have to create a new release; by default, the last tag is used for release builds.
+2. Open a PR targeting `release` from `chore_release-${version}`; this should contain all the changes that were in `edge` and not yet in `release`. This PR will not be merged in GitHub. Apply the `DO NOT MERGE` label. When we are ready, approval and passing checks on this PR allows the bypass of the branch protection on `release` that prevents direct pushes. Step 8 will resolve this PR.
-2. Checkout `release` and make a release branch, without any new changes. The branch name should be `hotfix_${version}` to make it clear this is a hotfix.
+3. Evaluate changes on our dependent repositories. If there have been changes to `opentrons-modules`, `oe-core`, `ot3-firmware`, or `buildroot`, ensure that the changes are in the correct branches. Tags will need to be pushed to repositories with changes. Further exact tagging instructions for each of the repositories are TODO.
- ```shell
- git checkout release
- git pull
- git checkout -b hotfix_${version}
- git push --set-upstream origin hotfix_${version}
- ```
+4. Check out and pull `chore_release-${version}` locally. Create a tag for a new alpha version. The alpha versions end with an `-alpha.N` prerelease tag, where `N` increments by 1 from 0 over the course of the QA process. You don't need a PR or a commit to create a new version. Pushing tags in the formats prescribed here are the triggers of the release process. Let's call the alpha version you're about to create `${alphaVersion}`:
-3. Target the hotfix PRs on this branch.
+> [!IMPORTANT]
+> Use annotated tag (`-a`) with a message (`-m`) for all tags.
-4. Wordsmith the release notes in `app-shell/build/release-notes.md` and `api/release-notes.md` in a PR that uses the `chore` commit type.
+```shell
+git switch chore_release-${version}
+git pull
+git tag -a v${alphaVersion} -m 'chore(release): ${alphaVersion}
+```
-5. Once the fixes and release notes have been merged into the hotfix branch, bump to an alpha version to begin qa by creating and pushing a tag. Let's call the new alpha version `${alphaVersion}`:
+5. Review the tag with `git log v${alphaVersion} --oneline -n10`. Double check that the commit displayed is the one you want - it should probably be the latest commit in your release branch, and you should double check that with the Github web UI. If the tag looks good, push it - this starts the build process. This is a release candidate that will undergo QA. Changelogs for the release are automatically generated when the tag is pushed and sent to the release page in github.
```shell
- git checkout hotfix_${version}
- git pull
- git tag -a v${alphaVersion} -m 'chore(release): ${alphaVersion}'
- git show v${alphaVersion}
+ git push origin v${alphaVersion}
```
-6. Inspect the created tag and then push it:
+6. Run QA on this release. If issues are found, create PRs targeting `chore_release-${version}`. To create a new alpha releases, repeat steps 4-6.
- ```shell
- git show v${alphaVersion}
- ```
+7. Once QA is complete, do a final check that the release notes are complete and proof-read.
- The `git show` command should reveal that the tag points to the latest commit of the hotfix branch. You should verify this with the github web UI.
+8. We are ready to `merge -ff-only` the `chore_release-${version}` into `release`.
- ```shell
- git push v${alphaVersion}
- ```
-
-7. QA the release build. If there are problems discovered, do normal PR processes to merge the further changes into the hotfix branch. Once issues are fixed, repeat steps 5-7 with a new alpha version.
+> [!CAUTION]
+> Do **NOT** squash or rebase
+> Do **NOT** yet push a tag
-8. Once QA is a pass, do a NORMAL MERGE into `release`. Do NOT squash or rebase. This should be done from your local command line (and will succeed as long as the release PR is reviewed and status checks have passed):
+This should be done from your local command line. Here we make use of the PR in step 2 to bypass the branch protection on `release`. The PR checks must be passing and the PR must have approval:
- ```shell
- # note: make sure you have pulled the latest changes for branch
- # release_${version} locally before merging into release
- git checkout hotfix_${version}
- git pull
- git checkout release
- git pull
- git merge --ff-only release_${version}
- git push origin release
- ```
+```shell
+git switch chore_release-${version}
+git pull
+git checkout release
+git pull
+# now do the merge
+git merge --ff-only chore_release-${version}
+git push origin release
+```
-9. Tag the release with its full target version, which we'll call `${version}` since it's no longer an alpha:
+9. Make a tag for the release. This tag will have the actual target release version, no alpha prerelease tags involved. It should be the same as the `${version}` part of your release branch:
```shell
git tag -a v${version} -m 'chore(release): ${version}'
- git show v${version}
+ git log v${version} --oneline -n10
```
- The `git show` command should reveal that the tag points to the most recent commit of the `release` branch, which should be the most recent commit on the hotfix branch you just merged. You should verify this with the Github web UI.
+ The `git log` should reveal that the tag is on what was, pre-merge, the last commit of your release branch and is, post-merge, the last commit of `release`. You should double-check this with the github web UI.
- Once the tag looks good, push it:
+ Once the tag looks good, you can push it. The tag push will kick off release builds and deploy the results to customers. It will also create a release page where those builds and automatically generated in-depth changelogs will be posted.
```shell
git push origin v${version}
```
- Pushing the tag will create release builds and a github release page with the in-depth changelogs.
+10. Ensure package deployments succeed by validating the version in our release dockets. The examples below are for the release channel. Internal Release channel looks a little different but are similar and documented elsewhere.
-10. Ensure all deploy jobs succeeded:
+- Flex
+- OT-2
+- App Stable
+ - Windows
+ -
+ -
+- App Alpha
+ - Windows
+ -
+ -
+- Python `opentrons` package
+- Python `opentrons-shared-data` package
+- The Opentrons App should be prompting people to update to the new version given their current channel.
- - The Opentrons App should be prompting people to update to the new version.
- - https://pypi.org/project/opentrons/ should be showing the new version.
+11. Release the Python Protocol API docs for this version (see below under Releasing Web Projects).
-11. Update the download links on https://opentrons.com/ot-app/. That page is defined in an Opentrons private repository.
-
-12. Release the Python Protocol API docs for this version (see below under Releasing Web Projects)
-
-13. Open a PR of `release` into `edge`. Give the PR a name like `chore(release): Merge changes from ${version} into edge`. Once it passes, on the command line merge it into `edge`:
+12. Open a PR of `release` into `edge`. Give the PR a name like `chore(release): Merge changes from ${version} into edge`. Once it passes and has approval, on the command line merge it into `edge`:
```shell
git checkout edge
@@ -177,11 +154,17 @@ The app and API projects are currently versioned together to ensure interoperabi
git merge --no-ff release
```
-14. Use the PR title for the merge commit title. You can then `git push origin edge`, which will succeed as long as the PR is approved and status checks pass.
+13. Use the PR title for the merge commit title. You can then `git push origin edge`, which will succeed as long as the PR is approved and status checks pass.
+
+## Releasing Robot Software Stack Isolated changes
+
+If critical bugfixes or isolated features need to be released, the process is the same as above, but the `chore_release-${version}` branch is not created from `edge`. We would likely base the `chore_release-${version}` branch on `release` then create bug fix PRs targeting `chore_release-${version}`. Or we might cherry pick in commits and/or merge in a feature branch to `chore_release-${version}`.
### tag usage
-We specify the version of a release artifact through a specifically-formatted git tag. We consider our monorepo to support several projects: robot stack, ot3, protocol-designer, etc. Tags look like this:
+We specify the version of a release artifact through a specifically-formatted git tag. We consider our monorepo to support several projects: robot stack, ot3, protocol-designer, etc.
+
+#### Tags look like this:
```shell
${projectPrefix}${projectVersion}
@@ -189,9 +172,11 @@ ${projectPrefix}${projectVersion}
`${projectPrefix}` is the project name plus `@` for everything but robot stack, where it is `v`.
-For instance, the tag for 6.2.1-alpha.3 of the robot stack is `v6.2.1-alpha.3`.
-The tag for 4.0.0 of protocol designer is `protocol-designer@4.0.0`.
-The tag for 0.1.2-beta.1 of ot3 is `ot3@0.1.2-beta.1`.
+##### Examples
+
+- the tag for 6.2.1-alpha.3 of the robot stack is `v6.2.1-alpha.3`
+- the tag for 0.1.2-beta.1 of an internal release or robot stack is `ot3@0.1.2-beta.1`
+- the tag for 4.0.0 of protocol designer is `protocol-designer@4.0.0`
Versions follow [semver.inc][semver-inc]. QA is done on alpha builds, and only alpha tags should be pushed until you're ready to release the project.
diff --git a/__mocks__/electron-store.js b/__mocks__/electron-store.js
index 84ed5f7b822..e4a3ed72bf2 100644
--- a/__mocks__/electron-store.js
+++ b/__mocks__/electron-store.js
@@ -1,6 +1,26 @@
// mock electron-store
'use strict'
+import { vi } from 'vitest'
-module.exports = jest.createMockFromModule(
- '../app-shell/node_modules/electron-store'
-)
+// will by default mock the config dir. if you need other behaavior you can
+// override this mock (see app-shell/src/__tests__/discovery.test.ts for an example)
+const Store = vi.fn(function () {
+ this.store = vi.fn(() => {
+ return {}
+ })
+ this.get = vi.fn(property => {
+ return {}
+ })
+ this.onDidChange = vi.fn()
+})
+
+// eslint-disable-next-line import/no-default-export
+export default Store
+
+// const Store = vi.fn(function () {
+// this.store = vi.fn(() => migrate(DEFAULTS_V12))
+// this.get = vi.fn(property => {
+// return this.store()[property]
+// })
+// this.onDidChange = vi.fn()
+// })
diff --git a/__mocks__/electron-updater.js b/__mocks__/electron-updater.js
index d5b9fdac857..4eec2944593 100644
--- a/__mocks__/electron-updater.js
+++ b/__mocks__/electron-updater.js
@@ -1,6 +1,6 @@
// mock electron-updater
'use strict'
-
+import { vi } from 'vitest'
const EventEmitter = require('events')
const autoUpdater = new EventEmitter()
@@ -13,12 +13,12 @@ module.exports.__mockReset = () => {
currentVersion: { version: '0.0.0-mock' },
channel: null,
- checkForUpdates: jest.fn(),
- checkForUpdatesAndNotify: jest.fn(),
- downloadUpdate: jest.fn(),
- getFeedURL: jest.fn(),
- setFeedURL: jest.fn(),
- quitAndInstall: jest.fn(),
+ checkForUpdates: vi.fn(),
+ checkForUpdatesAndNotify: vi.fn(),
+ downloadUpdate: vi.fn(),
+ getFeedURL: vi.fn(),
+ setFeedURL: vi.fn(),
+ quitAndInstall: vi.fn(),
})
}
diff --git a/__mocks__/electron.js b/__mocks__/electron.js
index 31d7bcec3e0..66159d8e654 100644
--- a/__mocks__/electron.js
+++ b/__mocks__/electron.js
@@ -1,24 +1,25 @@
// mock electron module
-'use strict'
+// 'use strict'
+import { vi } from 'vitest'
module.exports = {
app: {
getPath: () => '__mock-app-path__',
- once: jest.fn(),
+ once: vi.fn(),
},
ipcRenderer: {
- on: jest.fn(),
- send: jest.fn(),
+ on: vi.fn(),
+ send: vi.fn(),
},
dialog: {
// https://electronjs.org/docs/api/dialog#dialogshowopendialogbrowserwindow-options
- showOpenDialog: jest.fn(),
+ showOpenDialog: vi.fn(),
},
shell: {
- trashItem: jest.fn(),
- openPath: jest.fn(),
+ trashItem: vi.fn(),
+ openPath: vi.fn(),
},
}
diff --git a/abr-testing/.flake8 b/abr-testing/.flake8
new file mode 100644
index 00000000000..cc618b04ba2
--- /dev/null
+++ b/abr-testing/.flake8
@@ -0,0 +1,24 @@
+[flake8]
+
+# set line-length for future black support
+# https://github.com/psf/black/blob/master/docs/compatible_configs.md
+max-line-length = 100
+
+# max cyclomatic complexity
+# NOTE: (andy s) increasing this from 9 to 15 b/c test scripts often handle all logic in main
+max-complexity = 15
+
+extend-ignore =
+ # ignore E203 because black might reformat it
+ E203,
+ # do not require type annotations for self nor cls
+ ANN101,
+ ANN102
+
+# configure flake8-docstrings
+# https://pypi.org/project/flake8-docstrings/
+docstring-convention = google
+
+noqa-require-code = true
+
+# per-file-ignores =
diff --git a/abr-testing/Makefile b/abr-testing/Makefile
new file mode 100644
index 00000000000..d9ec6bdbb31
--- /dev/null
+++ b/abr-testing/Makefile
@@ -0,0 +1,80 @@
+include ../scripts/push.mk
+include ../scripts/python.mk
+
+SHX := npx shx
+
+ot_project := $(OPENTRONS_PROJECT)
+project_rs_default = $(if $(ot_project),$(ot_project),robot-stack)
+project_ir_default = $(if $(ot_project),$(ot_project),ot3)
+
+package_name = abr_testing
+package_version = $(call python_package_version,abr-testing,$(project_rs_default))
+wheel_file = dist/$(call python_get_wheelname,abr-testing,$(project_rs_default),$(package_name),$(BUILD_NUMBER))
+sdist_file = dist/$(call python_get_sdistname,abr-testing,$(project_rs_default),$(package_name))
+
+tests ?= tests
+test_opts ?=
+
+# Host key location for robot
+ssh_key ?= $(default_ssh_key)
+# Other SSH args for robot
+ssh_opts ?= $(default_ssh_opts)
+# Helper to safely bundle ssh options
+ssh_helper = $(if $(ssh_key),-i $(ssh_key)) $(ssh_opts)
+ssh_helper_ot3 = $(ssh_helper) -o HostkeyAlgorithms=+ssh-rsa -o PubkeyAcceptedAlgorithms=+ssh-rsa
+
+# Source discovery
+# For the python sources
+ot_py_sources := $(filter %.py,$(shell $(SHX) find abr_testing/))
+ot_sources := $(ot_py_sources)
+
+# Defined separately than the clean target so the wheel file doesn’t have to
+# depend on a PHONY target
+clean_cmd = $(SHX) rm -rf build dist .coverage coverage.xml '*.egg-info' '**/__pycache__' '**/*.pyc' '**/.mypy_cache'
+
+
+.PHONY: all
+all: clean sdist wheel
+
+.PHONY: setup
+setup:
+ $(pipenv) sync $(pipenv_opts)
+ $(pipenv) run pip freeze
+
+.PHONY: teardown
+teardown:
+ $(pipenv) --rm
+
+.PHONY: clean
+clean:
+ $(clean_cmd)
+
+.PHONY: wheel
+wheel: export OPENTRONS_PROJECT=$(project_rs_default)
+wheel:
+ rm -rf dist/*.whl
+ $(python) setup.py $(wheel_opts) bdist_wheel
+ $(SHX) rm -rf build
+ $(SHX) ls dist
+
+.PHONY: sdist
+sdist: export OPENTRONS_PROJECT=$(project_rs_default)
+sdist:
+ $(clean_cmd)
+ $(python) setup.py sdist
+ $(SHX) rm -rf build
+ $(SHX) ls dist
+
+.PHONY: lint
+lint:
+ $(python) -m mypy abr_testing tests
+ $(python) -m black --check abr_testing tests setup.py
+ $(python) -m flake8 abr_testing tests setup.py
+
+.PHONY: format
+format:
+ $(python) -m black abr_testing tests setup.py
+
+.PHONY: test
+test:
+ @echo "No tests yet"
diff --git a/abr-testing/Pipfile b/abr-testing/Pipfile
new file mode 100644
index 00000000000..3d27109a634
--- /dev/null
+++ b/abr-testing/Pipfile
@@ -0,0 +1,34 @@
+[[source]]
+url = "https://pypi.org/simple"
+verify_ssl = true
+name = "pypi"
+
+[packages]
+abr-testing = { editable = true, path = "." }
+google-api-python-client = "==2.41.0"
+httplib2 = "==0.22.0"
+types-httplib2 = "*"
+oauth2client = "==4.1.3"
+gspread = "==6.0.2"
+hardware-testing = {editable = true, path = "../hardware-testing"}
+opentrons-shared-data = {editable = true, path = "./../shared-data/python"}
+opentrons-hardware = {editable = true, path = "./../hardware", extras=['FLEX']}
+opentrons = {editable = true, path = "./../api", extras=['flex-hardware']}
+
+[dev-packages]
+atomicwrites = "==1.4.1"
+colorama = "==0.4.4"
+pytest = "==7.1.1"
+pytest-cov = "==2.10.1"
+mypy = "==1.8.0"
+black = "==22.3.0"
+flake8 = "~=3.9.0"
+flake8-annotations = "~=2.6.2"
+flake8-docstrings = "~=1.6.0"
+flake8-noqa = "~=1.2.1"
+requests = "==2.27.1"
+types-requests = "==2.25.6"
+google-api-python-client-stubs = "*"
+
+[requires]
+python_version = "3.10"
diff --git a/abr-testing/Pipfile.lock b/abr-testing/Pipfile.lock
new file mode 100644
index 00000000000..08de33841dc
--- /dev/null
+++ b/abr-testing/Pipfile.lock
@@ -0,0 +1,1265 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "fadb441a49bb18c215a9ba514e2cb28ebf29db0cdb7e7239462ae71f3599ff25"
+ },
+ "pipfile-spec": 6,
+ "requires": {
+ "python_version": "3.10"
+ },
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.org/simple",
+ "verify_ssl": true
+ }
+ ]
+ },
+ "default": {
+ "abr-testing": {
+ "editable": true,
+ "path": "."
+ },
+ "aionotify": {
+ "hashes": [
+ "sha256:385e1becfaac2d9f4326673033d53912ef9565b6febdedbec593ee966df392c6",
+ "sha256:64b702ad0eb115034533f9f62730a9253b79f5ff0fbf3d100c392124cdf12507"
+ ],
+ "version": "==0.2.0"
+ },
+ "anyio": {
+ "hashes": [
+ "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780",
+ "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==3.7.1"
+ },
+ "attrs": {
+ "hashes": [
+ "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.2.0"
+ },
+ "cachetools": {
+ "hashes": [
+ "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945",
+ "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==5.3.3"
+ },
+ "certifi": {
+ "hashes": [
+ "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==2024.2.2"
+ },
+ "charset-normalizer": {
+ "hashes": [
+ "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
+ "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
+ "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
+ "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
+ "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
+ "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
+ "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
+ "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
+ "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
+ "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
+ "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
+ "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
+ "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
+ "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
+ "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
+ "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
+ "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
+ "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
+ "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
+ "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
+ "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
+ "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
+ "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
+ "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
+ "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
+ "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
+ "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
+ "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
+ "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
+ "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
+ "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
+ "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
+ "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
+ "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
+ "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
+ "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
+ "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
+ "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
+ "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
+ "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
+ "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
+ "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
+ "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
+ "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
+ "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
+ "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
+ "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
+ "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
+ "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
+ "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
+ "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
+ "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
+ "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
+ "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
+ "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
+ "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
+ "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
+ "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
+ "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
+ "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
+ "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
+ "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
+ "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
+ "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
+ "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
+ "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
+ "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
+ "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
+ "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
+ "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
+ "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
+ "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
+ "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
+ "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
+ "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.3.2"
+ },
+ "click": {
+ "hashes": [
+ "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==8.1.7"
+ },
+ "colorama": {
+ "hashes": [
+ "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44",
+ "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"
+ ],
+ "markers": "platform_system == 'Windows'",
+ "version": "==0.4.6"
+ },
+ "exceptiongroup": {
+ "hashes": [
+ "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
+ "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==1.2.0"
+ },
+ "google-api-core": {
+ "hashes": [
+ "sha256:5a63aa102e0049abe85b5b88cb9409234c1f70afcda21ce1e40b285b9629c1d6",
+ "sha256:62d97417bfc674d6cef251e5c4d639a9655e00c45528c4364fbfebb478ce72a9"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.18.0"
+ },
+ "google-api-python-client": {
+ "hashes": [
+ "sha256:ce25fc21cf0649a1cbf42583e78d5fd7648ff2700e7b89b945209149ba913adc",
+ "sha256:facbe8e25ea9d07241299bf7704f53dec154ad3dc52fec2ea23ca6d6e5f6b392"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.6'",
+ "version": "==2.41.0"
+ },
+ "google-auth": {
+ "hashes": [
+ "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360",
+ "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.29.0"
+ },
+ "google-auth-httplib2": {
+ "hashes": [
+ "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05",
+ "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"
+ ],
+ "version": "==0.2.0"
+ },
+ "google-auth-oauthlib": {
+ "hashes": [
+ "sha256:292d2d3783349f2b0734a0a0207b1e1e322ac193c2c09d8f7c613fb7cc501ea8",
+ "sha256:297c1ce4cb13a99b5834c74a1fe03252e1e499716718b190f56bcb9c4abc4faf"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==1.2.0"
+ },
+ "googleapis-common-protos": {
+ "hashes": [
+ "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e",
+ "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.63.0"
+ },
+ "gspread": {
+ "hashes": [
+ "sha256:0238ba43f3bd45e7fa96fd206e9ceb73b03c2896eb143d7f4373c6d0cfe6fddf",
+ "sha256:0982beeb07fa3ec4482a3aaa96ca13a1e6b427a0aca4058beab4cdc33c0cbb64"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==6.0.2"
+ },
+ "hardware-testing": {
+ "editable": true,
+ "path": "../hardware-testing"
+ },
+ "httplib2": {
+ "hashes": [
+ "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc",
+ "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==0.22.0"
+ },
+ "idna": {
+ "hashes": [
+ "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.6"
+ },
+ "jsonschema": {
+ "hashes": [
+ "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d",
+ "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==4.17.3"
+ },
+ "numpy": {
+ "hashes": [
+ "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b",
+ "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818",
+ "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20",
+ "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0",
+ "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010",
+ "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a",
+ "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea",
+ "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c",
+ "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71",
+ "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110",
+ "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be",
+ "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a",
+ "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a",
+ "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5",
+ "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed",
+ "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd",
+ "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c",
+ "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e",
+ "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0",
+ "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c",
+ "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a",
+ "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b",
+ "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0",
+ "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6",
+ "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2",
+ "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a",
+ "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30",
+ "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218",
+ "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5",
+ "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07",
+ "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2",
+ "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4",
+ "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764",
+ "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef",
+ "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3",
+ "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"
+ ],
+ "markers": "python_version >= '3.9'",
+ "version": "==1.26.4"
+ },
+ "oauth2client": {
+ "hashes": [
+ "sha256:b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac",
+ "sha256:d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6"
+ ],
+ "index": "pypi",
+ "version": "==4.1.3"
+ },
+ "oauthlib": {
+ "hashes": [
+ "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca",
+ "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==3.2.2"
+ },
+ "opentrons": {
+ "editable": true,
+ "extras": [
+ "flex-hardware"
+ ],
+ "markers": "python_version >= '3.8'",
+ "path": "./../api"
+ },
+ "opentrons-hardware": {
+ "editable": true,
+ "extras": [
+ "FLEX"
+ ],
+ "path": "./../hardware"
+ },
+ "opentrons-shared-data": {
+ "editable": true,
+ "markers": "python_version >= '3.8'",
+ "path": "./../shared-data/python"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==24.0"
+ },
+ "proto-plus": {
+ "hashes": [
+ "sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2",
+ "sha256:a829c79e619e1cf632de091013a4173deed13a55f326ef84f05af6f50ff4c82c"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==1.23.0"
+ },
+ "protobuf": {
+ "hashes": [
+ "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4",
+ "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8",
+ "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c",
+ "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d",
+ "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4",
+ "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa",
+ "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c",
+ "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019",
+ "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9",
+ "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c",
+ "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.25.3"
+ },
+ "pyasn1": {
+ "hashes": [
+ "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c",
+ "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.6.0"
+ },
+ "pyasn1-modules": {
+ "hashes": [
+ "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6",
+ "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.4.0"
+ },
+ "pydantic": {
+ "hashes": [
+ "sha256:08b6ec0917c30861e3fe71a93be1648a2aa4f62f866142ba21670b24444d7fd8",
+ "sha256:0fbb503bbbbab0c588ed3cd21975a1d0d4163b87e360fec17a792f7d8c4ff29f",
+ "sha256:1245f4f61f467cb3dfeced2b119afef3db386aec3d24a22a1de08c65038b255f",
+ "sha256:13e86a19dca96373dcf3190fcb8797d40a6f12f154a244a8d1e8e03b8f280593",
+ "sha256:21efacc678a11114c765eb52ec0db62edffa89e9a562a94cbf8fa10b5db5c046",
+ "sha256:23d47a4b57a38e8652bcab15a658fdb13c785b9ce217cc3a729504ab4e1d6bc9",
+ "sha256:24a7679fab2e0eeedb5a8924fc4a694b3bcaac7d305aeeac72dd7d4e05ecbebf",
+ "sha256:282613a5969c47c83a8710cc8bfd1e70c9223feb76566f74683af889faadc0ea",
+ "sha256:336709883c15c050b9c55a63d6c7ff09be883dbc17805d2b063395dd9d9d0022",
+ "sha256:412ab4a3f6dbd2bf18aefa9f79c7cca23744846b31f1d6555c2ee2b05a2e14ca",
+ "sha256:466669501d08ad8eb3c4fecd991c5e793c4e0bbd62299d05111d4f827cded64f",
+ "sha256:46f17b832fe27de7850896f3afee50ea682220dd218f7e9c88d436788419dca6",
+ "sha256:49a46a0994dd551ec051986806122767cf144b9702e31d47f6d493c336462597",
+ "sha256:4ae57b4d8e3312d486e2498d42aed3ece7b51848336964e43abbf9671584e67f",
+ "sha256:53e3819bd20a42470d6dd0fe7fc1c121c92247bca104ce608e609b59bc7a77ee",
+ "sha256:596f12a1085e38dbda5cbb874d0973303e34227b400b6414782bf205cc14940c",
+ "sha256:646b2b12df4295b4c3148850c85bff29ef6d0d9621a8d091e98094871a62e5c7",
+ "sha256:798a3d05ee3b71967844a1164fd5bdb8c22c6d674f26274e78b9f29d81770c4e",
+ "sha256:7f4fcec873f90537c382840f330b90f4715eebc2bc9925f04cb92de593eae054",
+ "sha256:82d886bd3c3fbeaa963692ef6b643159ccb4b4cefaf7ff1617720cbead04fd1d",
+ "sha256:8e3a76f571970fcd3c43ad982daf936ae39b3e90b8a2e96c04113a369869dc87",
+ "sha256:8ee853cd12ac2ddbf0ecbac1c289f95882b2d4482258048079d13be700aa114c",
+ "sha256:9d578ac4bf7fdf10ce14caba6f734c178379bd35c486c6deb6f49006e1ba78a7",
+ "sha256:a42c7d17706911199798d4c464b352e640cab4351efe69c2267823d619a937e5",
+ "sha256:aad4e10efa5474ed1a611b6d7f0d130f4aafadceb73c11d9e72823e8f508e663",
+ "sha256:ad8c2bc677ae5f6dbd3cf92f2c7dc613507eafe8f71719727cbc0a7dec9a8c01",
+ "sha256:bc3ed06ab13660b565eed80887fcfbc0070f0aa0691fbb351657041d3e874efe",
+ "sha256:bfb113860e9288d0886e3b9e49d9cf4a9d48b441f52ded7d96db7819028514cc",
+ "sha256:c37c28449752bb1f47975d22ef2882d70513c546f8f37201e0fec3a97b816eee",
+ "sha256:c66609e138c31cba607d8e2a7b6a5dc38979a06c900815495b2d90ce6ded35b4",
+ "sha256:d604be0f0b44d473e54fdcb12302495fe0467c56509a2f80483476f3ba92b33c",
+ "sha256:d986e115e0b39604b9eee3507987368ff8148222da213cd38c359f6f57b3b347",
+ "sha256:dba49d52500c35cfec0b28aa8b3ea5c37c9df183ffc7210b10ff2a415c125c4a",
+ "sha256:e897c9f35281f7889873a3e6d6b69aa1447ceb024e8495a5f0d02ecd17742a7f",
+ "sha256:f9f674b5c3bebc2eba401de64f29948ae1e646ba2735f884d1594c5f675d6f2a",
+ "sha256:fa7790e94c60f809c95602a26d906eba01a0abee9cc24150e4ce2189352deb1b"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.10.14"
+ },
+ "pyparsing": {
+ "hashes": [
+ "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"
+ ],
+ "markers": "python_version >= '3.1'",
+ "version": "==3.1.2"
+ },
+ "pyrsistent": {
+ "hashes": [
+ "sha256:0724c506cd8b63c69c7f883cc233aac948c1ea946ea95996ad8b1380c25e1d3f",
+ "sha256:09848306523a3aba463c4b49493a760e7a6ca52e4826aa100ee99d8d39b7ad1e",
+ "sha256:0f3b1bcaa1f0629c978b355a7c37acd58907390149b7311b5db1b37648eb6958",
+ "sha256:21cc459636983764e692b9eba7144cdd54fdec23ccdb1e8ba392a63666c60c34",
+ "sha256:2e14c95c16211d166f59c6611533d0dacce2e25de0f76e4c140fde250997b3ca",
+ "sha256:2e2c116cc804d9b09ce9814d17df5edf1df0c624aba3b43bc1ad90411487036d",
+ "sha256:4021a7f963d88ccd15b523787d18ed5e5269ce57aa4037146a2377ff607ae87d",
+ "sha256:4c48f78f62ab596c679086084d0dd13254ae4f3d6c72a83ffdf5ebdef8f265a4",
+ "sha256:4f5c2d012671b7391803263419e31b5c7c21e7c95c8760d7fc35602353dee714",
+ "sha256:58b8f6366e152092194ae68fefe18b9f0b4f89227dfd86a07770c3d86097aebf",
+ "sha256:59a89bccd615551391f3237e00006a26bcf98a4d18623a19909a2c48b8e986ee",
+ "sha256:5cdd7ef1ea7a491ae70d826b6cc64868de09a1d5ff9ef8d574250d0940e275b8",
+ "sha256:6288b3fa6622ad8a91e6eb759cfc48ff3089e7c17fb1d4c59a919769314af224",
+ "sha256:6d270ec9dd33cdb13f4d62c95c1a5a50e6b7cdd86302b494217137f760495b9d",
+ "sha256:79ed12ba79935adaac1664fd7e0e585a22caa539dfc9b7c7c6d5ebf91fb89054",
+ "sha256:7d29c23bdf6e5438c755b941cef867ec2a4a172ceb9f50553b6ed70d50dfd656",
+ "sha256:8441cf9616d642c475684d6cf2520dd24812e996ba9af15e606df5f6fd9d04a7",
+ "sha256:881bbea27bbd32d37eb24dd320a5e745a2a5b092a17f6debc1349252fac85423",
+ "sha256:8c3aba3e01235221e5b229a6c05f585f344734bd1ad42a8ac51493d74722bbce",
+ "sha256:a14798c3005ec892bbada26485c2eea3b54109cb2533713e355c806891f63c5e",
+ "sha256:b14decb628fac50db5e02ee5a35a9c0772d20277824cfe845c8a8b717c15daa3",
+ "sha256:b318ca24db0f0518630e8b6f3831e9cba78f099ed5c1d65ffe3e023003043ba0",
+ "sha256:c1beb78af5423b879edaf23c5591ff292cf7c33979734c99aa66d5914ead880f",
+ "sha256:c55acc4733aad6560a7f5f818466631f07efc001fd023f34a6c203f8b6df0f0b",
+ "sha256:ca52d1ceae015859d16aded12584c59eb3825f7b50c6cfd621d4231a6cc624ce",
+ "sha256:cae40a9e3ce178415040a0383f00e8d68b569e97f31928a3a8ad37e3fde6df6a",
+ "sha256:e78d0c7c1e99a4a45c99143900ea0546025e41bb59ebc10182e947cf1ece9174",
+ "sha256:ef3992833fbd686ee783590639f4b8343a57f1f75de8633749d984dc0eb16c86",
+ "sha256:f058a615031eea4ef94ead6456f5ec2026c19fb5bd6bfe86e9665c4158cf802f",
+ "sha256:f5ac696f02b3fc01a710427585c855f65cd9c640e14f52abe52020722bb4906b",
+ "sha256:f920385a11207dc372a028b3f1e1038bb244b3ec38d448e6d8e43c6b3ba20e98",
+ "sha256:fed2c3216a605dc9a6ea50c7e84c82906e3684c4e80d2908208f662a6cbf9022"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.20.0"
+ },
+ "pyserial": {
+ "hashes": [
+ "sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb",
+ "sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0"
+ ],
+ "version": "==3.5"
+ },
+ "python-can": {
+ "hashes": [
+ "sha256:6ad50f4613289f3c4d276b6d2ac8901d776dcb929994cce93f55a69e858c595f",
+ "sha256:7eea9b81b0ff908000a825db024313f622895bd578e8a17433e0474cd7d2da83"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==4.2.2"
+ },
+ "pywin32": {
+ "hashes": [
+ "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d",
+ "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65",
+ "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e",
+ "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b",
+ "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4",
+ "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040",
+ "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a",
+ "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36",
+ "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8",
+ "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e",
+ "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802",
+ "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a",
+ "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407",
+ "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"
+ ],
+ "markers": "platform_system == 'Windows' and platform_python_implementation == 'CPython'",
+ "version": "==306"
+ },
+ "requests": {
+ "hashes": [
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.31.0"
+ },
+ "requests-oauthlib": {
+ "hashes": [
+ "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36",
+ "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"
+ ],
+ "markers": "python_version >= '3.4'",
+ "version": "==2.0.0"
+ },
+ "rsa": {
+ "hashes": [
+ "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7",
+ "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"
+ ],
+ "markers": "python_version >= '3.6' and python_version < '4'",
+ "version": "==4.9"
+ },
+ "setuptools": {
+ "hashes": [
+ "sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e",
+ "sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==69.2.0"
+ },
+ "six": {
+ "hashes": [
+ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==1.16.0"
+ },
+ "sniffio": {
+ "hashes": [
+ "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2",
+ "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.3.1"
+ },
+ "strenum": {
+ "hashes": [
+ "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff",
+ "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"
+ ],
+ "version": "==0.4.15"
+ },
+ "types-httplib2": {
+ "hashes": [
+ "sha256:1eda99fea18ec8a1dc1a725ead35b889d0836fec1b11ae6f1fe05440724c1d15",
+ "sha256:8cd706fc81f0da32789a4373a28df6f39e9d5657d1281db4d2fd22ee29e83661"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==0.22.0.20240310"
+ },
+ "typing-extensions": {
+ "hashes": [
+ "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.10.0"
+ },
+ "uritemplate": {
+ "hashes": [
+ "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0",
+ "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==4.1.1"
+ },
+ "urllib3": {
+ "hashes": [
+ "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2.2.1"
+ },
+ "wrapt": {
+ "hashes": [
+ "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc",
+ "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81",
+ "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09",
+ "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e",
+ "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca",
+ "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0",
+ "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb",
+ "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487",
+ "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40",
+ "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c",
+ "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060",
+ "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202",
+ "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41",
+ "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9",
+ "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b",
+ "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664",
+ "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d",
+ "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362",
+ "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00",
+ "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc",
+ "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1",
+ "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267",
+ "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956",
+ "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966",
+ "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1",
+ "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228",
+ "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72",
+ "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d",
+ "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292",
+ "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0",
+ "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0",
+ "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36",
+ "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c",
+ "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5",
+ "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f",
+ "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73",
+ "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b",
+ "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2",
+ "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593",
+ "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39",
+ "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389",
+ "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf",
+ "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf",
+ "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89",
+ "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c",
+ "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c",
+ "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f",
+ "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440",
+ "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465",
+ "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136",
+ "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b",
+ "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8",
+ "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3",
+ "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8",
+ "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6",
+ "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e",
+ "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f",
+ "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c",
+ "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e",
+ "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8",
+ "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2",
+ "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020",
+ "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35",
+ "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d",
+ "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3",
+ "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537",
+ "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809",
+ "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d",
+ "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a",
+ "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==1.16.0"
+ }
+ },
+ "develop": {
+ "atomicwrites": {
+ "hashes": [
+ "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"
+ ],
+ "index": "pypi",
+ "version": "==1.4.1"
+ },
+ "attrs": {
+ "hashes": [
+ "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.2.0"
+ },
+ "black": {
+ "hashes": [
+ "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b",
+ "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176",
+ "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09",
+ "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a",
+ "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015",
+ "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79",
+ "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb",
+ "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20",
+ "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464",
+ "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968",
+ "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82",
+ "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21",
+ "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0",
+ "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265",
+ "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b",
+ "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a",
+ "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72",
+ "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce",
+ "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0",
+ "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a",
+ "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163",
+ "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad",
+ "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"
+ ],
+ "index": "pypi",
+ "markers": "python_full_version >= '3.6.2'",
+ "version": "==22.3.0"
+ },
+ "cachetools": {
+ "hashes": [
+ "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945",
+ "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==5.3.3"
+ },
+ "certifi": {
+ "hashes": [
+ "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==2024.2.2"
+ },
+ "charset-normalizer": {
+ "hashes": [
+ "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
+ "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
+ "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
+ "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
+ "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
+ "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
+ "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
+ "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
+ "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
+ "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
+ "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
+ "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
+ "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
+ "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
+ "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
+ "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
+ "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
+ "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
+ "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
+ "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
+ "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
+ "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
+ "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
+ "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
+ "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
+ "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
+ "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
+ "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
+ "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
+ "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
+ "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
+ "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
+ "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
+ "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
+ "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
+ "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
+ "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
+ "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
+ "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
+ "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
+ "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
+ "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
+ "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
+ "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
+ "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
+ "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
+ "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
+ "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
+ "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
+ "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
+ "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
+ "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
+ "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
+ "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
+ "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
+ "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
+ "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
+ "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
+ "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
+ "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
+ "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
+ "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
+ "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
+ "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
+ "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
+ "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
+ "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
+ "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
+ "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
+ "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
+ "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
+ "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
+ "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
+ "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
+ "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.3.2"
+ },
+ "click": {
+ "hashes": [
+ "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==8.1.7"
+ },
+ "colorama": {
+ "hashes": [
+ "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44",
+ "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"
+ ],
+ "markers": "platform_system == 'Windows'",
+ "version": "==0.4.6"
+ },
+ "coverage": {
+ "hashes": [
+ "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c",
+ "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63",
+ "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7",
+ "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f",
+ "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8",
+ "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf",
+ "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0",
+ "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384",
+ "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76",
+ "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7",
+ "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d",
+ "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70",
+ "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f",
+ "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818",
+ "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b",
+ "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d",
+ "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec",
+ "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083",
+ "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2",
+ "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9",
+ "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd",
+ "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade",
+ "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e",
+ "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a",
+ "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227",
+ "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87",
+ "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c",
+ "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e",
+ "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c",
+ "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e",
+ "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd",
+ "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec",
+ "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562",
+ "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8",
+ "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677",
+ "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357",
+ "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c",
+ "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd",
+ "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49",
+ "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286",
+ "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1",
+ "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf",
+ "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51",
+ "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409",
+ "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384",
+ "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e",
+ "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978",
+ "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57",
+ "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e",
+ "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2",
+ "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48",
+ "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==7.4.4"
+ },
+ "flake8": {
+ "hashes": [
+ "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b",
+ "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
+ "version": "==3.9.2"
+ },
+ "flake8-annotations": {
+ "hashes": [
+ "sha256:0d6cd2e770b5095f09689c9d84cc054c51b929c41a68969ea1beb4b825cac515",
+ "sha256:d10c4638231f8a50c0a597c4efce42bd7b7d85df4f620a0ddaca526138936a4f"
+ ],
+ "index": "pypi",
+ "markers": "python_full_version >= '3.6.1' and python_full_version < '4.0.0'",
+ "version": "==2.6.2"
+ },
+ "flake8-docstrings": {
+ "hashes": [
+ "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde",
+ "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b"
+ ],
+ "index": "pypi",
+ "version": "==1.6.0"
+ },
+ "flake8-noqa": {
+ "hashes": [
+ "sha256:26d92ca6b72dec732d294e587a2bdeb66dab01acc609ed6a064693d6baa4e789",
+ "sha256:445618162e0bbae1b9d983326d4e39066c5c6de71ba0c444ca2d4d1fa5b2cdb7"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==1.2.9"
+ },
+ "google-api-core": {
+ "hashes": [
+ "sha256:5a63aa102e0049abe85b5b88cb9409234c1f70afcda21ce1e40b285b9629c1d6",
+ "sha256:62d97417bfc674d6cef251e5c4d639a9655e00c45528c4364fbfebb478ce72a9"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.18.0"
+ },
+ "google-api-python-client": {
+ "hashes": [
+ "sha256:ce25fc21cf0649a1cbf42583e78d5fd7648ff2700e7b89b945209149ba913adc",
+ "sha256:facbe8e25ea9d07241299bf7704f53dec154ad3dc52fec2ea23ca6d6e5f6b392"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.6'",
+ "version": "==2.41.0"
+ },
+ "google-api-python-client-stubs": {
+ "hashes": [
+ "sha256:1e3dd295fc9759d6bdc27edffb074465375c3373c8437488893d9c2eeefcfa10",
+ "sha256:38766c0216ee01ac22b624bd88683fa093de3a52eaf4770a73d27b8493099cd5"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7' and python_version < '4.0'",
+ "version": "==1.9.0"
+ },
+ "google-auth": {
+ "hashes": [
+ "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360",
+ "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.29.0"
+ },
+ "google-auth-httplib2": {
+ "hashes": [
+ "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05",
+ "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"
+ ],
+ "version": "==0.2.0"
+ },
+ "googleapis-common-protos": {
+ "hashes": [
+ "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e",
+ "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.63.0"
+ },
+ "httplib2": {
+ "hashes": [
+ "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc",
+ "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==0.22.0"
+ },
+ "idna": {
+ "hashes": [
+ "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.6"
+ },
+ "iniconfig": {
+ "hashes": [
+ "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
+ "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.0.0"
+ },
+ "mccabe": {
+ "hashes": [
+ "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
+ "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
+ ],
+ "version": "==0.6.1"
+ },
+ "mypy": {
+ "hashes": [
+ "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a",
+ "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1",
+ "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0",
+ "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08",
+ "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279",
+ "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659",
+ "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08",
+ "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c",
+ "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8",
+ "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be",
+ "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92",
+ "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae",
+ "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d",
+ "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4",
+ "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d",
+ "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30",
+ "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49",
+ "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d",
+ "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb",
+ "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6",
+ "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e",
+ "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7",
+ "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362",
+ "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==0.981"
+ },
+ "mypy-extensions": {
+ "hashes": [
+ "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
+ "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==1.0.0"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==24.0"
+ },
+ "pathspec": {
+ "hashes": [
+ "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
+ "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.12.1"
+ },
+ "platformdirs": {
+ "hashes": [
+ "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
+ "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.2.0"
+ },
+ "pluggy": {
+ "hashes": [
+ "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
+ "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==1.4.0"
+ },
+ "proto-plus": {
+ "hashes": [
+ "sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2",
+ "sha256:a829c79e619e1cf632de091013a4173deed13a55f326ef84f05af6f50ff4c82c"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==1.23.0"
+ },
+ "protobuf": {
+ "hashes": [
+ "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4",
+ "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8",
+ "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c",
+ "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d",
+ "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4",
+ "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa",
+ "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c",
+ "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019",
+ "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9",
+ "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c",
+ "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.25.3"
+ },
+ "py": {
+ "hashes": [
+ "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719",
+ "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
+ "version": "==1.11.0"
+ },
+ "pyasn1": {
+ "hashes": [
+ "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c",
+ "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.6.0"
+ },
+ "pyasn1-modules": {
+ "hashes": [
+ "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6",
+ "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.4.0"
+ },
+ "pycodestyle": {
+ "hashes": [
+ "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068",
+ "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==2.7.0"
+ },
+ "pydocstyle": {
+ "hashes": [
+ "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019",
+ "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==6.3.0"
+ },
+ "pyflakes": {
+ "hashes": [
+ "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3",
+ "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==2.3.1"
+ },
+ "pyparsing": {
+ "hashes": [
+ "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"
+ ],
+ "markers": "python_version >= '3.1'",
+ "version": "==3.1.2"
+ },
+ "pytest": {
+ "hashes": [
+ "sha256:841132caef6b1ad17a9afde46dc4f6cfa59a05f9555aae5151f73bdf2820ca63",
+ "sha256:92f723789a8fdd7180b6b06483874feca4c48a5c76968e03bb3e7f806a1869ea"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==7.1.1"
+ },
+ "pytest-cov": {
+ "hashes": [
+ "sha256:45ec2d5182f89a81fc3eb29e3d1ed3113b9e9a873bcddb2a71faaab066110191",
+ "sha256:47bd0ce14056fdd79f93e1713f88fad7bdcc583dcd7783da86ef2f085a0bb88e"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
+ "version": "==2.10.1"
+ },
+ "requests": {
+ "hashes": [
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.31.0"
+ },
+ "rsa": {
+ "hashes": [
+ "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7",
+ "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"
+ ],
+ "markers": "python_version >= '3.6' and python_version < '4'",
+ "version": "==4.9"
+ },
+ "snowballstemmer": {
+ "hashes": [
+ "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1",
+ "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"
+ ],
+ "version": "==2.2.0"
+ },
+ "tomli": {
+ "hashes": [
+ "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+ "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==2.0.1"
+ },
+ "types-requests": {
+ "hashes": [
+ "sha256:a5a305b43ea57bf64d6731f89816946a405b591eff6de28d4c0fd58422cee779",
+ "sha256:e21541c0f55c066c491a639309159556dd8c5833e49fcde929c4c47bdb0002ee"
+ ],
+ "index": "pypi",
+ "version": "==2.25.6"
+ },
+ "typing-extensions": {
+ "hashes": [
+ "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.10.0"
+ },
+ "uritemplate": {
+ "hashes": [
+ "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0",
+ "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==4.1.1"
+ },
+ "urllib3": {
+ "hashes": [
+ "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2.2.1"
+ }
+ }
+}
diff --git a/abr-testing/README.md b/abr-testing/README.md
new file mode 100644
index 00000000000..3e6733f727b
--- /dev/null
+++ b/abr-testing/README.md
@@ -0,0 +1,5 @@
+# ABR Testing Library
+
+A python package containing tools that work with sets of Flex robots to do cohort application based reliability testing and data tracking.
+
+Most of the code here is intended to run on a computer that is running the testing, and must be kept compatible with Windows.
diff --git a/abr-testing/abr_testing/__init__.py b/abr-testing/abr_testing/__init__.py
new file mode 100644
index 00000000000..6fe572495c8
--- /dev/null
+++ b/abr-testing/abr_testing/__init__.py
@@ -0,0 +1 @@
+"""The package holding code for ABR testing."""
diff --git a/abr-testing/abr_testing/automation/__init__.py b/abr-testing/abr_testing/automation/__init__.py
new file mode 100644
index 00000000000..d476965e5a9
--- /dev/null
+++ b/abr-testing/abr_testing/automation/__init__.py
@@ -0,0 +1 @@
+"""Tools for Google API Interaction."""
diff --git a/abr-testing/abr_testing/automation/google_drive_tool.py b/abr-testing/abr_testing/automation/google_drive_tool.py
new file mode 100644
index 00000000000..dbaba0d1ecc
--- /dev/null
+++ b/abr-testing/abr_testing/automation/google_drive_tool.py
@@ -0,0 +1,191 @@
+"""Google Drive Tool."""
+import os
+import io
+import json
+import sys
+from typing import Set, Any, Optional, List, Dict
+import webbrowser
+import mimetypes
+from oauth2client.service_account import ServiceAccountCredentials # type: ignore[import]
+import googleapiclient # type: ignore[import]
+from googleapiclient.discovery import build
+from googleapiclient.http import MediaFileUpload
+from googleapiclient.http import MediaIoBaseDownload
+
+"""Google Drive Tool.
+
+This module requires a credentials.json file before getting started.
+Retrieve from https://console.cloud.google.com/apis/credentials."""
+
+
+class google_drive:
+ """Google Drive Tool."""
+
+ def __init__(self, credentials: Any, folder_name: str, email: str) -> None:
+ """Connects to google drive via credentials file."""
+ try:
+ self.scope = ["https://www.googleapis.com/auth/drive"]
+ self.credentials = ServiceAccountCredentials.from_json_keyfile_name(
+ credentials, self.scope
+ )
+ self.drive_service = build("drive", "v3", credentials=self.credentials)
+ self.parent_folder = folder_name
+ self.email = email
+ except json.decoder.JSONDecodeError:
+ print("Error! Get file: https://console.cloud.google.com/apis/credentials")
+ sys.exit()
+
+ def list_folder(self, delete: Any = False) -> Set[str]:
+ """List folders and files in Google Drive."""
+ file_names = set()
+ page_token: str = ""
+ while True:
+ results = (
+ self.drive_service.files()
+ .list(
+ q=f"'{self.parent_folder}' in parents and trashed=false"
+ if self.parent_folder
+ else "" # type: ignore
+ if self.parent_folder
+ else None,
+ pageSize=1000,
+ fields="nextPageToken, files(id, name, mimeType)",
+ pageToken=page_token,
+ )
+ .execute()
+ )
+ items = results.get("files", [])
+ if not items:
+ break
+ for item in items:
+ item_name = item["name"]
+ if delete:
+ self.delete_files(item["id"])
+ file_names.add(item_name)
+ page_token = results.get("nextPageToken", "")
+ if len(page_token) < 1:
+ break
+ if not file_names:
+ print("No folders or files found in Google Drive.")
+ return file_names
+
+ def delete_files(self, file_or_folder_id: str) -> None:
+ """Delete a file or folder in Google Drive by ID."""
+ try:
+ self.drive_service.files().delete(fileId=file_or_folder_id).execute()
+ print(f"Successfully deleted file/folder with ID: {file_or_folder_id}")
+ except Exception as e:
+ print(f"Error deleting file/folder with ID: {file_or_folder_id}")
+ print(f"Error details: {str(e)}")
+
+ def upload_file(self, file_path: str) -> str:
+ """Upload file to Google Drive."""
+ file_metadata = {
+ "name": os.path.basename(file_path),
+ "mimeType": str(mimetypes.guess_type(file_path)[0]),
+ "parents": [self.parent_folder],
+ }
+ media = MediaFileUpload(file_path, resumable=True)
+
+ uploaded_file = (
+ self.drive_service.files()
+ .create(body=file_metadata, media_body=media, fields="id") # type: ignore
+ .execute()
+ )
+ return uploaded_file["id"]
+
+ def upload_missing_files(self, storage_directory: str) -> None:
+ """Upload missing files to Google Drive."""
+ # Read .json files.
+ google_drive_files = self.list_folder()
+ google_drive_files_json = [
+ file for file in google_drive_files if file.endswith(".json")
+ ]
+ # Read local directory.
+ local_files_json = set(
+ file for file in os.listdir(storage_directory) if file.endswith(".json")
+ )
+ missing_files = local_files_json - set(google_drive_files_json)
+ # Upload missing files.
+ uploaded_files = []
+ for file in missing_files:
+ file_path = os.path.join(storage_directory, file)
+ uploaded_file_id = google_drive.upload_file(self, file_path)
+ uploaded_files.append(
+ {"name": os.path.basename(file_path), "id": uploaded_file_id}
+ )
+ try:
+ self.share_permissions(uploaded_file_id)
+ except googleapiclient.errors.HttpError:
+ continue
+
+ # Fetch the updated file list after all are uploaded
+ files = google_drive.list_folder(self)
+
+ file_names = [file for file in files]
+ for uploaded_file in uploaded_files:
+ this_name = uploaded_file["name"]
+ if this_name in file_names:
+ print(
+ f"File '{this_name}' was successfully uploaded with ID: {uploaded_file['id']}"
+ )
+ else:
+ print(f"File '{this_name}' was not found after uploading.")
+ print(f"{len(files)} item(s) in Google Drive")
+
+ def open_folder(self) -> Optional[str]:
+ """Open folder in web browser."""
+ folder_metadata = (
+ self.drive_service.files()
+ .get(fileId=self.parent_folder, fields="webViewLink")
+ .execute()
+ )
+ folder_link = folder_metadata.get("webViewLink")
+ if folder_link:
+ print(f"Folder link: {folder_link}")
+ webbrowser.open(
+ folder_link
+ ) # Open the folder link in the default web browser
+ else:
+ print("Folder link not found.")
+ return folder_link
+
+ def share_permissions(self, file_id: str) -> None:
+ """Share permissions with self."""
+ new_permission = {
+ "type": "user",
+ "role": "writer",
+ "emailAddress": self.email,
+ }
+ self.drive_service.permissions().create(
+ fileId=file_id, body=new_permission, transferOwnership=False # type: ignore
+ ).execute()
+
+ def download_files(
+ self, files_to_download: List[Dict[str, Any]], save_directory: str
+ ) -> None:
+ """Download files to a specified directory."""
+ for file in files_to_download:
+ id = file["id"]
+ file_name = file["name"]
+ file_path = os.path.join(save_directory, file_name)
+ request = self.drive_service.files().get_media(fileId=id) # type: ignore[attr-defined]
+ fh = io.FileIO(file_path, "wb")
+ downloader = MediaIoBaseDownload(fh, request)
+ done = False
+ while done is False:
+ status, done = downloader.next_chunk()
+ print(f"Downloading {file_name}... {int(status.progress() * 100)}%")
+
+ def search_folder(self, search_strings: List[str], folder_id: str) -> List[Any]:
+ """Search folder for files containing string from list."""
+ files_found = []
+ for search_string in search_strings:
+ query = f"'{folder_id}' in parents and name contains '{search_string}'"
+ response = (
+ self.drive_service.files()
+ .list(q=query, fields="files(id,name)")
+ .execute()
+ )
+ files_found.extend(response.get("files", []))
+ return files_found
diff --git a/abr-testing/abr_testing/automation/google_sheets_tool.py b/abr-testing/abr_testing/automation/google_sheets_tool.py
new file mode 100644
index 00000000000..e132422a482
--- /dev/null
+++ b/abr-testing/abr_testing/automation/google_sheets_tool.py
@@ -0,0 +1,143 @@
+"""Google Sheet Tool."""
+import gspread # type: ignore[import]
+import socket
+import httplib2
+import time as t
+import sys
+from datetime import datetime
+from oauth2client.service_account import ServiceAccountCredentials # type: ignore[import]
+from typing import Dict, List, Any, Set, Tuple
+
+"""Google Sheets Tool.
+
+This module requires a credentials.json file before getting started.
+Retrieve from https://console.cloud.google.com/apis/credentials.
+"""
+
+
+class google_sheet:
+ """Google Sheets Tool."""
+
+ def __init__(self, credentials: Any, file_name: str, tab_number: int) -> None:
+ """Connects to google sheet via credentials file."""
+ try:
+ self.scope = [
+ "https://spreadsheets.google.com/feeds",
+ "https://www.googleapis.com/auth/drive",
+ ]
+ self.credentials = ServiceAccountCredentials.from_json_keyfile_name(
+ credentials, self.scope
+ )
+ self.gc = gspread.authorize(self.credentials)
+ self.file_name = file_name
+ self.tab_number = tab_number
+ self.spread_sheet = self.open_google_sheet()
+ self.worksheet = self.open_worksheet(self.tab_number)
+ self.row_index = 1
+ print(f"Connected to google sheet: {self.file_name}")
+ except gspread.exceptions.APIError:
+ print("ERROR: Check google sheet name. Check credentials file.")
+ sys.exit()
+
+ def open_google_sheet(self) -> Any:
+ """Open Google Spread Sheet."""
+ sheet = self.gc.open(self.file_name)
+ return sheet
+
+ def open_worksheet(self, tab_number: int) -> Any:
+ """Open individual worksheet within a googlesheet."""
+ return self.spread_sheet.get_worksheet(tab_number)
+
+ def create_worksheet(self, tab_name: int) -> None:
+ """Create a worksheet with tab name. Existing spreadsheet needed."""
+ try:
+ self.spread_sheet.add_worksheet(tab_name, rows="1000", cols="26")
+ except gspread.exceptions.APIError:
+ print("Work Sheet already exists")
+
+ def write_header(self, header: List) -> None:
+ """Write Header to first row if not present."""
+ header_list = self.worksheet.row_values(1)
+ if header_list != header:
+ self.worksheet.insert_row(header, self.row_index)
+
+ def write_to_row(self, data: List) -> None:
+ """Write data into a row in a List[] format."""
+ try:
+ self.row_index += 1
+ data = [
+ item.strftime("%Y/%m/%d %H:%M:%S")
+ if isinstance(item, datetime)
+ else item
+ for item in data
+ ]
+ self.worksheet.insert_row(data, index=self.row_index)
+ except socket.gaierror:
+ pass
+ except httplib2.ServerNotFoundError:
+ print("UNABLE TO CONNECT TO SERVER!!, CHECK CONNECTION")
+ except Exception as error:
+ print(error.__traceback__)
+ except gspread.exceptions.APIError:
+ print("Write quotes exceeded. Waiting 30 sec before writing.")
+ t.sleep(30)
+ self.worksheet.insert_row(data, index=self.row_index)
+
+ def delete_row(self, row_index: int) -> None:
+ """Delete Row from google sheet."""
+ self.worksheet.delete_rows(row_index)
+
+ def update_cell(
+ self, row: int, column: int, single_data: Any
+ ) -> Tuple[int, int, Any]:
+ """Update ONE individual cell according to a row and column."""
+ self.worksheet.update_cell(row, column, single_data)
+ return row, column, single_data
+
+ def get_all_data(self) -> Dict[str, Any]:
+ """Get all the Data recorded from worksheet."""
+ return self.worksheet.get_all_records()
+
+ def get_column(self, column_number: int) -> Set[str]:
+ """Get all values in column."""
+ return self.worksheet.col_values(column_number)
+
+ def get_index_row(self) -> int:
+ """Check for the next available row to write too."""
+ row_index = len(self.get_column(1))
+ print(f"Row Index: {row_index} recorded on google sheet.")
+ return row_index
+
+ def update_row_index(self) -> None:
+ """Update self.row_index instance variable."""
+ self.row_index = self.get_index_row()
+
+ def get_all_sheets(self) -> List[str]:
+ """List all tabs in the spreadsheets."""
+ worksheets = self.spread_sheet.worksheets()
+ return worksheets
+
+ def get_sheet_by_name(self, title: str) -> None:
+ """Reference sheet by name."""
+ try:
+ worksheet = self.spread_sheet.worksheet(title)
+ return worksheet
+ except gspread.exceptions.WorksheetNotFound:
+ raise gspread.exceptions.WorksheetNotFound(
+ "Worksheet does not exist!!, Use create_worksheet() function first."
+ )
+
+ def token_check(self) -> None:
+ """Check if still credentials are still logged in."""
+ if self.credentials.access_token_expired:
+ self.gc.login()
+
+ def get_row_index_with_value(self, some_string: str, col_num: int) -> Any:
+ """Find row index of string by looking in specific column."""
+ cell = self.worksheet.find(some_string, in_column=col_num)
+ try:
+ row_index = int(cell.row)
+ except AttributeError:
+ print("Row not found.")
+ return None
+ return row_index
diff --git a/abr-testing/abr_testing/automation/jira_tool.py b/abr-testing/abr_testing/automation/jira_tool.py
new file mode 100644
index 00000000000..5c0a2556dfb
--- /dev/null
+++ b/abr-testing/abr_testing/automation/jira_tool.py
@@ -0,0 +1,165 @@
+"""JIRA Ticket Creator."""
+
+import requests
+from requests.auth import HTTPBasicAuth
+import json
+import webbrowser
+import argparse
+from typing import List
+
+
+class JiraTicket:
+ """Connects to JIRA ticket site."""
+
+ def __init__(self, url: str, api_token: str, email: str) -> None:
+ """Connect to jira."""
+ self.url = url
+ self.api_token = api_token
+ self.email = email
+ self.auth = HTTPBasicAuth(email, api_token)
+ self.headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json",
+ }
+
+ def issues_on_board(self, board_id: str) -> List[str]:
+ """Print Issues on board."""
+ response = requests.get(
+ f"{self.url}/rest/agile/1.0/board/{board_id}/issue",
+ headers=self.headers,
+ auth=self.auth,
+ )
+ response.raise_for_status()
+ try:
+ board_data = response.json()
+ all_issues = board_data["issues"]
+ except json.JSONDecodeError as e:
+ print("Error decoding json: ", e)
+ issue_ids = []
+ for i in all_issues:
+ issue_id = i.get("id")
+ issue_ids.append(issue_id)
+ return issue_ids
+
+ def open_issue(self, issue_key: str) -> str:
+ """Open issue on web browser."""
+ url = f"{self.url}/browse/{issue_key}"
+ print(f"Opening at {url}.")
+ webbrowser.open(url)
+ return url
+
+ def create_ticket(
+ self,
+ summary: str,
+ description: str,
+ project_key: str,
+ reporter_id: str,
+ issue_type: str,
+ priority: str,
+ components: list,
+ affects_versions: str,
+ robot: str,
+ ) -> str:
+ """Create ticket."""
+ data = {
+ "fields": {
+ "project": {"id": "10273", "key": project_key},
+ "issuetype": {"name": issue_type},
+ "summary": summary,
+ "reporter": {"id": reporter_id},
+ "parent": {"key": robot},
+ "priority": {"name": priority},
+ "components": [{"name": component} for component in components],
+ "versions": [{"name": affects_versions}],
+ "description": {
+ "content": [
+ {
+ "content": [{"text": description, "type": "text"}],
+ "type": "paragraph",
+ }
+ ],
+ "type": "doc",
+ "version": 1,
+ }
+ # Include other required fields as needed
+ }
+ }
+ try:
+ response = requests.post(
+ f"{self.url}/rest/api/3/issue/",
+ headers=self.headers,
+ auth=self.auth,
+ json=data,
+ )
+ response.raise_for_status()
+ response_str = str(response.content)
+ issue_url = response.json().get("self")
+ issue_key = response.json().get("key")
+ print(f"issue key {issue_key}")
+ print(f"issue url{issue_url}")
+ if issue_key is None:
+ print("Error: Could not create issue. No key returned.")
+ except requests.exceptions.HTTPError:
+ print(f"HTTP error occurred. Response content: {response_str}")
+ except json.JSONDecodeError:
+ print(f"JSON decoding error occurred. Response content: {response_str}")
+ return issue_key
+
+ def post_attachment_to_ticket(self, issue_id: str, attachment_path: str) -> None:
+ """Adds attachments to ticket."""
+ # TODO: Ensure that file is actually uploaded.
+ file = {"file": open(attachment_path, "rb")}
+ JSON_headers = {"Accept": "application/json"}
+ try:
+ response = requests.post(
+ f"{self.url}/rest/api/3/issue/{issue_id}/attachments",
+ headers=JSON_headers,
+ auth=self.auth,
+ files=file,
+ )
+ print(response)
+ except json.JSONDecodeError:
+ error_message = str(response.content)
+ print(f"JSON decoding error occurred. Response content: {error_message}.")
+
+
+if __name__ == "__main__":
+ """Create ticket for specified robot."""
+ parser = argparse.ArgumentParser(description="Pulls run logs from ABR robots.")
+ parser.add_argument(
+ "jira_api_token",
+ metavar="JIRA_API_TOKEN",
+ type=str,
+ nargs=1,
+ help="JIRA API Token. Get from https://id.atlassian.com/manage-profile/security.",
+ )
+ parser.add_argument(
+ "email",
+ metavar="EMAIL",
+ type=str,
+ nargs=1,
+ help="Email connected to JIRA account.",
+ )
+ # TODO: write function to get reporter_id from email.
+ parser.add_argument(
+ "reporter_id",
+ metavar="REPORTER_ID",
+ type=str,
+ nargs=1,
+ help="JIRA Reporter ID.",
+ )
+ # TODO: improve help comment on jira board id.
+ parser.add_argument(
+ "board_id",
+ metavar="BOARD_ID",
+ type=str,
+ nargs=1,
+ help="JIRA Board ID. RABR is 217",
+ )
+ args = parser.parse_args()
+ url = "https://opentrons.atlassian.net"
+ api_token = args.jira_api_token[0]
+ email = args.email[0]
+ board_id = args.board_id[0]
+ reporter_id = args.reporter_id[0]
+ ticket = JiraTicket(url, api_token, email)
diff --git a/abr-testing/abr_testing/data_collection/__init__.py b/abr-testing/abr_testing/data_collection/__init__.py
new file mode 100644
index 00000000000..307d25f4ea4
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/__init__.py
@@ -0,0 +1 @@
+"""Collect run logs and upload."""
diff --git a/abr-testing/abr_testing/data_collection/abr_calibration_logs.py b/abr-testing/abr_testing/data_collection/abr_calibration_logs.py
new file mode 100644
index 00000000000..11f37e8ab95
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/abr_calibration_logs.py
@@ -0,0 +1,208 @@
+"""Get Calibration logs from robots."""
+from typing import Dict, Any, List, Union
+import argparse
+import os
+import json
+import sys
+import time as t
+from abr_testing.data_collection import read_robot_logs
+from abr_testing.automation import google_drive_tool, google_sheets_tool
+
+
+def check_for_duplicates(
+ sheet_location: str,
+ google_sheet: Any,
+ col_1: int,
+ col_2: int,
+ row: List[str],
+ headers: List[str],
+) -> Union[List[str], None]:
+ """Check google sheet for duplicates."""
+ t.sleep(5)
+ serials = google_sheet.get_column(col_1)
+ modify_dates = google_sheet.get_column(col_2)
+ # Check for calibration time stamp.
+ if row[-1] is not None:
+ if len(row[-1]) > 0:
+ for serial, modify_date in zip(serials, modify_dates):
+ if row[col_1 - 1] == serial and row[col_2 - 1] == modify_date:
+ print(
+ f"Skipped row for instrument {serial}. Already on Google Sheet."
+ )
+ return None
+ read_robot_logs.write_to_sheets(sheet_location, google_sheet, row, headers)
+ print(f"Writing calibration for: {row[7]}")
+ return row
+
+
+def upload_calibration_offsets(
+ calibration: Dict[str, Any], storage_directory: str
+) -> None:
+ """Upload calibration data to google_sheet."""
+ # Common Headers
+ headers_beg = list(calibration.keys())[:4]
+ headers_end = list(["X", "Y", "Z", "lastModified"])
+ # INSTRUMENT SHEET
+ instrument_headers = (
+ headers_beg + list(calibration["Instruments"][0].keys())[:7] + headers_end
+ )
+ local_instrument_file = google_sheet_name + "-Instruments"
+ instrument_sheet_location = read_robot_logs.create_abr_data_sheet(
+ storage_directory, local_instrument_file, instrument_headers
+ )
+ # INSTRUMENTS DATA
+ instruments = calibration["Instruments"]
+ for instrument in range(len(instruments)):
+ one_instrument = instruments[instrument]
+ x = one_instrument["data"]["calibratedOffset"]["offset"].get("x", "")
+ y = one_instrument["data"]["calibratedOffset"]["offset"].get("y", "")
+ z = one_instrument["data"]["calibratedOffset"]["offset"].get("z", "")
+ modified = one_instrument["data"]["calibratedOffset"].get("last_modified", "")
+ instrument_row = (
+ list(calibration.values())[:4]
+ + list(one_instrument.values())[:7]
+ + list([x, y, z, modified])
+ )
+ check_for_duplicates(
+ instrument_sheet_location,
+ google_sheet_instruments,
+ 8,
+ 15,
+ instrument_row,
+ instrument_headers,
+ )
+
+ # MODULE SHEET
+ if len(calibration.get("Modules", "")) > 0:
+ module_headers = (
+ headers_beg + list(calibration["Modules"][0].keys())[:7] + headers_end
+ )
+ local_modules_file = google_sheet_name + "-Modules"
+ modules_sheet_location = read_robot_logs.create_abr_data_sheet(
+ storage_directory, local_modules_file, module_headers
+ )
+ # MODULES DATA
+ modules = calibration["Modules"]
+ for module in range(len(modules)):
+ one_module = modules[module]
+ x = one_module["moduleOffset"]["offset"].get("x", "")
+ y = one_module["moduleOffset"]["offset"].get("y", "")
+ z = one_module["moduleOffset"]["offset"].get("z", "")
+ modified = one_module["moduleOffset"].get("last_modified", "")
+ module_row = (
+ list(calibration.values())[:4]
+ + list(one_module.values())[:7]
+ + list([x, y, z, modified])
+ )
+ check_for_duplicates(
+ modules_sheet_location,
+ google_sheet_modules,
+ 8,
+ 15,
+ module_row,
+ module_headers,
+ )
+ # DECK SHEET
+ local_deck_file = google_sheet_name + "-Deck"
+ deck_headers = headers_beg + list(["pipetteCalibratedWith", "Slot"]) + headers_end
+ deck_sheet_location = read_robot_logs.create_abr_data_sheet(
+ storage_directory, local_deck_file, deck_headers
+ )
+ # DECK DATA
+ deck = calibration["Deck"]
+ slots = ["D3", "D1", "A1"]
+ deck_modified = deck["data"].get("lastModified", "")
+ pipette_calibrated_with = deck["data"].get("pipetteCalibratedWith", "")
+ for i in range(len(deck["data"]["matrix"])):
+ coords = deck["data"]["matrix"][i]
+ x = coords[0]
+ y = coords[1]
+ z = coords[2]
+ deck_row = list(calibration.values())[:4] + list(
+ [pipette_calibrated_with, slots[i], x, y, z, deck_modified]
+ )
+ check_for_duplicates(
+ deck_sheet_location, google_sheet_deck, 6, 10, deck_row, deck_headers
+ )
+
+
+if __name__ == "__main__":
+ """Get calibration logs."""
+ parser = argparse.ArgumentParser(
+ description="Pulls calibration logs from ABR robots."
+ )
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path to long term storage directory for run logs.",
+ )
+ parser.add_argument(
+ "folder_name",
+ metavar="FOLDER_NAME",
+ type=str,
+ nargs=1,
+ help="Google Drive folder name. Open desired folder and copy string after drive/folders/.",
+ )
+ parser.add_argument(
+ "google_sheet_name",
+ metavar="GOOGLE_SHEET_NAME",
+ type=str,
+ nargs=1,
+ help="Google sheet name.",
+ )
+ parser.add_argument(
+ "email", metavar="EMAIL", type=str, nargs=1, help="opentrons gmail."
+ )
+ parser.add_argument(
+ "ip_or_all",
+ metavar="IP_OR_ALL",
+ type=str,
+ nargs=1,
+ help="Enter 'ALL' to read IPs.json or type full IP address of 1 robot.",
+ )
+ args = parser.parse_args()
+ storage_directory = args.storage_directory[0]
+ folder_name = args.folder_name[0]
+ google_sheet_name = args.google_sheet_name[0]
+ ip_or_all = args.ip_or_all[0]
+ email = args.email[0]
+ # Connect to google drive.
+ try:
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ except FileNotFoundError:
+ print(f"Add credentials.json file to: {storage_directory}.")
+ sys.exit()
+ google_drive = google_drive_tool.google_drive(credentials_path, folder_name, email)
+ # Connect to google sheet
+ google_sheet_instruments = google_sheets_tool.google_sheet(
+ credentials_path, google_sheet_name, 0
+ )
+ google_sheet_modules = google_sheets_tool.google_sheet(
+ credentials_path, google_sheet_name, 1
+ )
+ google_sheet_deck = google_sheets_tool.google_sheet(
+ credentials_path, google_sheet_name, 2
+ )
+ ip_json_file = os.path.join(storage_directory, "IPs.json")
+ try:
+ ip_file = json.load(open(ip_json_file))
+ except FileNotFoundError:
+ print(f"Add .json file with robot IPs to: {storage_directory}.")
+ sys.exit()
+
+ if ip_or_all == "ALL":
+ ip_address_list = ip_file["ip_address_list"]
+ for ip in ip_address_list:
+ saved_file_path, calibration = read_robot_logs.get_calibration_offsets(
+ ip, storage_directory
+ )
+ upload_calibration_offsets(calibration, storage_directory)
+ else:
+ saved_file_path, calibration = read_robot_logs.get_calibration_offsets(
+ ip_or_all, storage_directory
+ )
+ upload_calibration_offsets(calibration, storage_directory)
+
+ google_drive.upload_missing_files(storage_directory)
diff --git a/abr-testing/abr_testing/data_collection/abr_google_drive.py b/abr-testing/abr_testing/data_collection/abr_google_drive.py
new file mode 100644
index 00000000000..f8a2dc8fa4f
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/abr_google_drive.py
@@ -0,0 +1,186 @@
+"""Read ABR run logs from google drive."""
+import argparse
+import os
+import sys
+import json
+from datetime import datetime, timedelta
+from abr_testing.data_collection import read_robot_logs
+from typing import Set, Dict, Any, Tuple, List, Union
+from abr_testing.automation import google_drive_tool, google_sheets_tool
+
+
+def get_modules(file_results: Dict[str, str]) -> Dict[str, Any]:
+ """Get module IPs and models from run log."""
+ modList = (
+ "heaterShakerModuleV1",
+ "temperatureModuleV2",
+ "magneticBlockV1",
+ "thermocyclerModuleV2",
+ )
+ all_modules = {key: "" for key in modList}
+ for module in file_results.get("modules", []):
+ if isinstance(module, dict) and module.get("model") in modList:
+ try:
+ all_modules[module["model"]] = module["serialNumber"]
+ except KeyError:
+ all_modules[module["model"]] = "EMPTYSN"
+
+ return all_modules
+
+
+def create_data_dictionary(
+ runs_to_save: Union[Set[str], str],
+ storage_directory: str,
+ issue_url: str,
+) -> Tuple[Dict[Any, Dict[str, Any]], List]:
+ """Pull data from run files and format into a dictionary."""
+ runs_and_robots = {}
+ for filename in os.listdir(storage_directory):
+ file_path = os.path.join(storage_directory, filename)
+ if file_path.endswith(".json"):
+ with open(file_path) as file:
+ file_results = json.load(file)
+ else:
+ continue
+ if not isinstance(file_results, dict):
+ continue
+ run_id = file_results.get("run_id", "NaN")
+ if run_id in runs_to_save:
+ robot = file_results.get("robot_name")
+ protocol_name = file_results["protocol"]["metadata"].get("protocolName", "")
+ software_version = file_results.get("API_Version", "")
+ left_pipette = file_results.get("left", "")
+ right_pipette = file_results.get("right", "")
+ extension = file_results.get("extension", "")
+ (
+ num_of_errors,
+ error_type,
+ error_code,
+ error_instrument,
+ error_level,
+ ) = read_robot_logs.get_error_info(file_results)
+
+ all_modules = get_modules(file_results)
+
+ start_time_str, complete_time_str, start_date, run_time_min = (
+ "",
+ "",
+ "",
+ 0.0,
+ )
+ try:
+ start_time = datetime.strptime(
+ file_results.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ adjusted_start_time = start_time - timedelta(hours=5)
+ start_date = str(adjusted_start_time.date())
+ start_time_str = str(adjusted_start_time).split("+")[0]
+ complete_time = datetime.strptime(
+ file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ adjusted_complete_time = complete_time - timedelta(hours=5)
+ complete_time_str = str(adjusted_complete_time).split("+")[0]
+ run_time = complete_time - start_time
+ run_time_min = run_time.total_seconds() / 60
+ except ValueError:
+ pass # Handle datetime parsing errors if necessary
+
+ if run_time_min > 0:
+ row = {
+ "Robot": robot,
+ "Run_ID": run_id,
+ "Protocol_Name": protocol_name,
+ "Software Version": software_version,
+ "Date": start_date,
+ "Start_Time": start_time_str,
+ "End_Time": complete_time_str,
+ "Run_Time (min)": run_time_min,
+ "Errors": num_of_errors,
+ "Error_Code": error_code,
+ "Error_Type": error_type,
+ "Error_Instrument": error_instrument,
+ "Error_Level": error_level,
+ "Left Mount": left_pipette,
+ "Right Mount": right_pipette,
+ "Extension": extension,
+ }
+ tc_dict = read_robot_logs.thermocycler_commands(file_results)
+ hs_dict = read_robot_logs.hs_commands(file_results)
+ tm_dict = read_robot_logs.temperature_module_commands(file_results)
+ notes = {"Note1": "", "Jira Link": issue_url}
+ row_2 = {
+ **row,
+ **all_modules,
+ **notes,
+ **hs_dict,
+ **tm_dict,
+ **tc_dict,
+ }
+ headers = list(row_2.keys())
+ runs_and_robots[run_id] = row_2
+ else:
+ continue
+ return runs_and_robots, headers
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Read run logs on google drive.")
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path to long term storage directory for run logs.",
+ )
+ parser.add_argument(
+ "folder_name",
+ metavar="FOLDER_NAME",
+ type=str,
+ nargs=1,
+ help="Google Drive folder name. Open desired folder and copy string after drive/folders/.",
+ )
+ parser.add_argument(
+ "google_sheet_name",
+ metavar="GOOGLE_SHEET_NAME",
+ type=str,
+ nargs=1,
+ help="Google sheet name.",
+ )
+ parser.add_argument(
+ "email", metavar="EMAIL", type=str, nargs=1, help="opentrons gmail."
+ )
+ args = parser.parse_args()
+ folder_name = args.folder_name[0]
+ storage_directory = args.storage_directory[0]
+ google_sheet_name = args.google_sheet_name[0]
+ email = args.email[0]
+ try:
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ except FileNotFoundError:
+ print(f"Add credentials.json file to: {storage_directory}.")
+ sys.exit()
+ google_drive = google_drive_tool.google_drive(credentials_path, folder_name, email)
+ # Get run ids on google sheet
+ google_sheet = google_sheets_tool.google_sheet(
+ credentials_path, google_sheet_name, 0
+ )
+ google_sheet_lpc = google_sheets_tool.google_sheet(credentials_path, "ABR-LPC", 0)
+
+ run_ids_on_gs = google_sheet.get_column(2)
+ run_ids_on_gs = set(run_ids_on_gs)
+
+ # Uploads files that are not in google drive directory
+ google_drive.upload_missing_files(storage_directory)
+
+ # Run ids in google_drive_folder
+ run_ids_on_gd = read_robot_logs.get_run_ids_from_google_drive(google_drive)
+ missing_runs_from_gs = read_robot_logs.get_unseen_run_ids(
+ run_ids_on_gd, run_ids_on_gs
+ )
+ # Add missing runs to google sheet
+ runs_and_robots, headers = create_data_dictionary(
+ missing_runs_from_gs, storage_directory, ""
+ )
+ read_robot_logs.write_to_local_and_google_sheet(
+ runs_and_robots, storage_directory, google_sheet_name, google_sheet, headers
+ )
diff --git a/abr-testing/abr_testing/data_collection/abr_lpc.py b/abr-testing/abr_testing/data_collection/abr_lpc.py
new file mode 100644
index 00000000000..dd880d09c37
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/abr_lpc.py
@@ -0,0 +1 @@
+"""Get Unique LPC Values from Run logs."""
diff --git a/abr-testing/abr_testing/data_collection/abr_robot_error.py b/abr-testing/abr_testing/data_collection/abr_robot_error.py
new file mode 100644
index 00000000000..231b8077eed
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/abr_robot_error.py
@@ -0,0 +1,216 @@
+"""Create ticket for robot with error."""
+from typing import List, Tuple
+from abr_testing.data_collection import read_robot_logs, abr_google_drive, get_run_logs
+import requests
+import argparse
+from abr_testing.automation import jira_tool, google_sheets_tool, google_drive_tool
+import shutil
+import os
+import subprocess
+import json
+import sys
+import gspread # type: ignore[import]
+
+
+def get_error_runs_from_robot(ip: str) -> List[str]:
+ """Get runs that have errors from robot."""
+ error_run_ids = []
+ response = requests.get(
+ f"http://{ip}:31950/runs", headers={"opentrons-version": "3"}
+ )
+ run_data = response.json()
+ run_list = run_data["data"]
+ for run in run_list:
+ run_id = run["id"]
+ num_of_errors = len(run["errors"])
+ if not run["current"] and num_of_errors > 0:
+ error_run_ids.append(run_id)
+ return error_run_ids
+
+
+def get_error_info_from_robot(
+ ip: str, one_run: str, storage_directory: str
+) -> Tuple[str, str, str, List[str], str, str]:
+ """Get error information from robot to fill out ticket."""
+ description = dict()
+ # get run information
+ results = get_run_logs.get_run_data(one_run, ip)
+ # save run information to local directory as .json file
+ saved_file_path = read_robot_logs.save_run_log_to_json(
+ ip, results, storage_directory
+ )
+ # Error Printout
+ (
+ num_of_errors,
+ error_type,
+ error_code,
+ error_instrument,
+ error_level,
+ ) = read_robot_logs.get_error_info(results)
+ # JIRA Ticket Fields
+ failure_level = "Level " + str(error_level) + " Failure"
+ components = [failure_level, "Flex-RABR"]
+ affects_version = results["API_Version"]
+ parent = results.get("robot_name", "")
+ print(parent)
+ summary = parent + "_" + str(one_run) + "_" + str(error_code) + "_" + error_type
+ # Description of error
+ description["protocol_name"] = results["protocol"]["metadata"].get(
+ "protocolName", ""
+ )
+ description["error"] = " ".join([error_code, error_type, error_instrument])
+ description["protocol_step"] = list(results["commands"])[-1]
+ description["right_mount"] = results.get("right", "No attachment")
+ description["left_mount"] = results.get("left", "No attachment")
+ description["gripper"] = results.get("extension", "No attachment")
+ all_modules = abr_google_drive.get_modules(results)
+ whole_description = {**description, **all_modules}
+ whole_description_str = (
+ "{"
+ + "\n".join("{!r}: {!r},".format(k, v) for k, v in whole_description.items())
+ + "}"
+ )
+
+ return (
+ summary,
+ parent,
+ affects_version,
+ components,
+ whole_description_str,
+ saved_file_path,
+ )
+
+
+if __name__ == "__main__":
+ """Create ticket for specified robot."""
+ parser = argparse.ArgumentParser(description="Pulls run logs from ABR robots.")
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path to long term storage directory for run logs.",
+ )
+ parser.add_argument(
+ "jira_api_token",
+ metavar="JIRA_API_TOKEN",
+ type=str,
+ nargs=1,
+ help="JIRA API Token. Get from https://id.atlassian.com/manage-profile/security.",
+ )
+ parser.add_argument(
+ "email",
+ metavar="EMAIL",
+ type=str,
+ nargs=1,
+ help="Email connected to JIRA account.",
+ )
+ # TODO: write function to get reporter_id from email.
+ parser.add_argument(
+ "reporter_id",
+ metavar="REPORTER_ID",
+ type=str,
+ nargs=1,
+ help="JIRA Reporter ID.",
+ )
+ # TODO: improve help comment on jira board id.
+ parser.add_argument(
+ "board_id",
+ metavar="BOARD_ID",
+ type=str,
+ nargs=1,
+ help="JIRA Board ID. RABR is 217",
+ )
+ args = parser.parse_args()
+ storage_directory = args.storage_directory[0]
+ ip = str(input("Enter Robot IP: "))
+ url = "https://opentrons.atlassian.net"
+ api_token = args.jira_api_token[0]
+ email = args.email[0]
+ board_id = args.board_id[0]
+ reporter_id = args.reporter_id[0]
+ ticket = jira_tool.JiraTicket(url, api_token, email)
+ try:
+ error_runs = get_error_runs_from_robot(ip)
+ except requests.exceptions.InvalidURL:
+ print("Invalid IP address.")
+ sys.exit()
+ one_run = error_runs[-1] # Most recent run with error.
+ (
+ summary,
+ robot,
+ affects_version,
+ components,
+ whole_description_str,
+ run_log_file_path,
+ ) = get_error_info_from_robot(ip, one_run, storage_directory)
+ # Get Calibration Data
+ saved_file_path_calibration, calibration = read_robot_logs.get_calibration_offsets(
+ ip, storage_directory
+ )
+ file_paths = read_robot_logs.get_logs(storage_directory, ip)
+ print(f"Making ticket for run: {one_run} on robot {robot}.")
+ # TODO: make argument or see if I can get rid of with using board_id.
+ project_key = "RABR"
+ parent_key = project_key + "-" + robot[-1]
+ # TODO: read board to see if ticket for run id already exists.
+ # CREATE TICKET
+ issue_key = ticket.create_ticket(
+ summary,
+ whole_description_str,
+ project_key,
+ reporter_id,
+ "Bug",
+ "Medium",
+ components,
+ affects_version,
+ parent_key,
+ )
+ # OPEN TICKET
+ issue_url = ticket.open_issue(issue_key)
+ # MOVE FILES TO ERROR FOLDER.
+ error_files = [saved_file_path_calibration, run_log_file_path] + file_paths
+ error_folder_path = os.path.join(storage_directory, issue_key)
+ os.makedirs(error_folder_path, exist_ok=True)
+ for source_file in error_files:
+ destination_file = os.path.join(
+ error_folder_path, os.path.basename(source_file)
+ )
+ shutil.move(source_file, destination_file)
+ # OPEN FOLDER DIRECTORY
+ subprocess.Popen(["explorer", error_folder_path])
+ # CONNECT TO GOOGLE DRIVE
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ google_sheet_name = "ABR-run-data"
+ try:
+ google_drive = google_drive_tool.google_drive(
+ credentials_path,
+ "1Cvej0eadFOTZr9ILRXJ0Wg65ymOtxL4m",
+ "rhyann.clarke@opentrons.ocm",
+ )
+ print("Connected to google drive.")
+ except json.decoder.JSONDecodeError:
+ print(
+ "Credential file is damaged. Get from https://console.cloud.google.com/apis/credentials"
+ )
+ sys.exit()
+ # CONNECT TO GOOGLE SHEET
+ try:
+ google_sheet = google_sheets_tool.google_sheet(
+ credentials_path, google_sheet_name, 0
+ )
+ print(f"Connected to google sheet: {google_sheet_name}")
+ except gspread.exceptions.APIError:
+ print("ERROR: Check google sheet name. Check credentials file.")
+ sys.exit()
+ # WRITE ERRORED RUN TO GOOGLE SHEET
+ error_run_log = os.path.join(error_folder_path, os.path.basename(run_log_file_path))
+ google_drive.upload_file(error_run_log)
+ run_id = os.path.basename(error_run_log).split("_")[1].split(".")[0]
+ runs_and_robots, headers = abr_google_drive.create_data_dictionary(
+ run_id, error_folder_path, issue_url
+ )
+ read_robot_logs.write_to_local_and_google_sheet(
+ runs_and_robots, storage_directory, google_sheet_name, google_sheet, headers
+ )
+ print("Wrote run to ABR-run-data")
diff --git a/abr-testing/abr_testing/data_collection/error_levels.csv b/abr-testing/abr_testing/data_collection/error_levels.csv
new file mode 100644
index 00000000000..c2f54c9f09e
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/error_levels.csv
@@ -0,0 +1,52 @@
+Prefix,Error Code,Description,Categories,Level of Failure,
+1,1000,Communication Error,Hardware communication failed,2,
+1,1001,Canbus Communication Error,Hardware communication failed,2,
+1,1002,Internal USB Communication Error,Hardware communication failed,2,
+1,1003,Module Communication Error,Hardware communication failed,2,
+1,1004,Command Timed Out,Hardware communication failed,3,
+1,1005,Firmware Update Failed,Hardware communication failed,2,
+1,1006,Internal Message Format Error,Hardware communication failed,2,
+1,1007,Canbus Configuration Error,Hardware communication failed,2,
+1,1008,Canbus Bus Error,Hardware communication failed,2,
+2,2000,Robotics Control Error,A Robot Action Failed,3,
+2,2001,Motion Failed,A Robot Action Failed,4,
+2,2002,Homing Failed,A Robot Action Failed,4,
+2,2003,Stall or Collision Detected,A Robot Action Failed,3,
+2,2004,Motion Planning Failed,A Robot Action Failed,3,
+2,2005,Position Estimation Invalid,A Robot Action Failed,3,
+2,2006,Move Condition Not Met,A Robot Action Failed,3,
+2,2007,Calibration Structure Not Found,A Robot Action Failed,4,
+2,2008,Edge Not Found,A Robot Action Failed,3,
+2,2009,Early Capactivive Sense Trigger,A Robot Action Failed,4,
+2,2010,Innacrruate Non Contact Sweep,A Robot Action Failed,3,
+2,2011,Misaligned Gantry,A Robot Action Failed,3,
+2,2012,Unmatched Tip Presence States,A Robot Action Failed, 4,
+2,2013,Position Unknown,A Robot Action Failed,4,
+2,2014,Execution Cancelled,A Robot Action Failed, 4,
+2,2015,Failed Gripper Pickup Error,A Robot Action Failed,3,
+3,3000,Robotics Interaction Error,A Robot Interaction Failed,3,
+3,3001,Labware Dropped,A Robot Interaction Failed, 4,
+3,3002,Labware Not Picked Up,A Robot Interaction Failed,4,
+3,3003,Tip Pickup Failed,A Robot Interaction Failed,4,
+3,3004,Tip Drop Failed,A Robot Interaction Failed,4,
+3,3005,Unexpeted Tip Removal,A Robot Interaction Failed,4,
+3,3006,Pipette Overpressure,A Robot Interaction Failed,3,
+3,3008,E-Stop Activated,A Robot Interaction Failed,5, Not an error,
+3,3009,E-Stop Not Present,A Robot Interaction Failed,5,
+3,3010,Pipette Not Present,A Robot Interaction Failed,5,
+3,3011,Gripper Not Present,A Robot Interaction Failed,5,
+3,3012,Unexpected Tip Attach,A Robot Interaction Failed,4,
+3,3013,Firmware Update Required,A Robot Interaction Failed,5, Not an error,
+3,3014,Invalid ID Actuator,A Robot Interaction Failed,3,
+3,3015,Module Not Pesent,A Robot Interaction Failed,5,Not an error
+3,3016,Invalid Instrument Data,A Robot Interaction Failed,3,
+3,3017,Invalid Liquid Class Name,A Robot Interaction Failed,5,Not an error
+3,3018,Tip Detector Not Found,A Robot Interaction Failed,3,
+4,4000,General Error,A Software Error Occured,4,How severe does a general error get
+4,4001,Robot In Use,A Software Error Occured,5,Not an error
+4,4002,API Removed,A Software Error Occured,5,used an old app on a new robot
+4,4003,Not Supported On Robot Type,A Software Error Occured,5,Not an error
+4,4004,Command Precondition Violated,A Software Error Occured,5,Not an error
+4,4005,Command Parameter Limit Violated,A Software Error Occured,5,Not an error
+4,4006,Invalid Protocol Data,A Software Error Occured,5,Not an error
+4,4007,API Misconfiguration,A Software Error Occured,5,Not an error
\ No newline at end of file
diff --git a/abr-testing/abr_testing/data_collection/error_levels.py b/abr-testing/abr_testing/data_collection/error_levels.py
new file mode 100644
index 00000000000..2eb3d6f9d93
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/error_levels.py
@@ -0,0 +1,8 @@
+"""ABR Error Levels.
+
+This reads a csv file that has error level categorization.
+"""
+
+import os
+
+ERROR_LEVELS_PATH = os.path.join(os.path.dirname(__file__), "error_levels.csv")
diff --git a/abr-testing/abr_testing/data_collection/get_run_logs.py b/abr-testing/abr_testing/data_collection/get_run_logs.py
new file mode 100644
index 00000000000..70b0e3f680a
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/get_run_logs.py
@@ -0,0 +1,143 @@
+"""ABR Run Log Pull."""
+from typing import Set, Dict, Any
+import argparse
+import os
+import json
+import requests
+import sys
+from abr_testing.data_collection import read_robot_logs
+from abr_testing.automation import google_drive_tool
+
+
+def get_run_ids_from_robot(ip: str) -> Set[str]:
+ """Get all completed runs from each robot."""
+ run_ids = set()
+ response = requests.get(
+ f"http://{ip}:31950/runs", headers={"opentrons-version": "3"}
+ )
+ run_data = response.json()
+ run_list = run_data["data"]
+ for run in run_list:
+ run_id = run["id"]
+ if not run["current"]:
+ run_ids.add(run_id)
+ return run_ids
+
+
+def get_run_data(one_run: Any, ip: str) -> Dict[str, Any]:
+ """Use http requests to get command, health, and protocol data from robot."""
+ response = requests.get(
+ f"http://{ip}:31950/runs/{one_run}/commands",
+ headers={"opentrons-version": "3"},
+ params={"cursor": 0, "pageLength": 0},
+ )
+ data = response.json()
+ command_count = data["meta"]["totalLength"]
+ page_length = 100
+ commands = list()
+ run = dict()
+ for cursor in range(0, command_count, page_length):
+ response = requests.get(
+ f"http://{ip}:31950/runs/{one_run}/commands",
+ headers={"opentrons-version": "3"},
+ params={"cursor": cursor, "pageLength": page_length},
+ )
+ command_data = response.json()
+ commands.extend(command_data["data"])
+ run["commands"] = commands
+ response = requests.get(
+ f"http://{ip}:31950/runs/{one_run}", headers={"opentrons-version": "3"}
+ )
+ run_meta_data = response.json()
+ protocol_id = run_meta_data["data"]["protocolId"]
+ run.update(run_meta_data["data"])
+ response = requests.get(
+ f"http://{ip}:31950/protocols/{protocol_id}", headers={"opentrons-version": "3"}
+ )
+ protocol_data = response.json()
+ run["protocol"] = protocol_data["data"]
+ response = requests.get(
+ f"http://{ip}:31950/health", headers={"opentrons-version": "3"}
+ )
+ health_data = response.json()
+ run["robot_name"] = health_data.get("name", "")
+ run["API_Version"] = health_data.get("api_version", "")
+ run["robot_serial"] = health_data.get("robot_serial", "")
+ run["run_id"] = one_run
+
+ # Instruments Attached
+ response = requests.get(
+ f"http://{ip}:31950/instruments", headers={"opentrons-version": "3"}
+ )
+ instrument_data = response.json()
+ for instrument in instrument_data["data"]:
+ run[instrument["mount"]] = instrument["serialNumber"]
+ return run
+
+
+def save_runs(runs_to_save: Set[str], ip: str, storage_directory: str) -> Set[str]:
+ """Saves runs to user given storage directory."""
+ saved_file_paths = set()
+ for a_run in runs_to_save:
+ data = get_run_data(a_run, ip)
+ saved_file_path = read_robot_logs.save_run_log_to_json(
+ ip, data, storage_directory
+ )
+ saved_file_paths.add(saved_file_path)
+ print(f"Saved {len(runs_to_save)} run(s) from robot with IP address {ip}.")
+ return saved_file_paths
+
+
+def get_all_run_logs(storage_directory: str) -> None:
+ """GET ALL RUN LOGS.
+
+ Connect to each ABR robot to read run log data.
+ Read each robot's list of unique run log IDs and compare them to all IDs in storage.
+ Any ID that is not in storage, download the run log and put it in storage.
+ """
+ ip_json_file = os.path.join(storage_directory, "IPs.json")
+ try:
+ ip_file = json.load(open(ip_json_file))
+ except FileNotFoundError:
+ print(f"Add .json file with robot IPs to: {storage_directory}.")
+ sys.exit()
+ ip_address_list = ip_file["ip_address_list"]
+ runs_from_storage = read_robot_logs.get_run_ids_from_google_drive(google_drive)
+ for ip in ip_address_list:
+ runs = get_run_ids_from_robot(ip)
+ runs_to_save = read_robot_logs.get_unseen_run_ids(runs, runs_from_storage)
+ save_runs(runs_to_save, ip, storage_directory)
+ google_drive.upload_missing_files(storage_directory)
+
+
+if __name__ == "__main__":
+ """Get run logs."""
+ parser = argparse.ArgumentParser(description="Pulls run logs from ABR robots.")
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path to long term storage directory for run logs.",
+ )
+ parser.add_argument(
+ "folder_name",
+ metavar="FOLDER_NAME",
+ type=str,
+ nargs=1,
+ help="Google Drive folder name. Open desired folder and copy string after drive/folders/.",
+ )
+ parser.add_argument(
+ "email", metavar="EMAIL", type=str, nargs=1, help="opentrons gmail."
+ )
+ args = parser.parse_args()
+ storage_directory = args.storage_directory[0]
+ folder_name = args.folder_name[0]
+ email = args.email[0]
+ try:
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ except FileNotFoundError:
+ print(f"Add credentials.json file to: {storage_directory}.")
+ sys.exit()
+ google_drive = google_drive_tool.google_drive(credentials_path, folder_name, email)
+ get_all_run_logs(storage_directory)
diff --git a/abr-testing/abr_testing/data_collection/module_ramp_rates.py b/abr-testing/abr_testing/data_collection/module_ramp_rates.py
new file mode 100644
index 00000000000..2155e79dc21
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/module_ramp_rates.py
@@ -0,0 +1,148 @@
+"""Get ramp rates of modules."""
+from abr_testing.automation import google_sheets_tool
+from abr_testing.data_collection import read_robot_logs
+import argparse
+import os
+import sys
+import json
+from datetime import datetime
+from typing import Dict, Any
+import requests
+
+
+def ramp_rate(file_results: Dict[str, Any]) -> Dict[int, float]:
+ """Get ramp rates."""
+ i = 0
+ commands = file_results["commands"]
+ for command in commands:
+ commandType = command["commandType"]
+ if (
+ commandType == "thermocycler/setTargetBlockTemperature"
+ or commandType == "temperatureModule/setTargetTemperature"
+ or commandType == "heaterShaker/setTargetTemperature"
+ ):
+ temp = command["params"].get("celsius", 0.0)
+ if (
+ commandType == "thermocycler/waitForBlockTemperature"
+ or commandType == "temperatureModule/waitForTemperature"
+ or commandType == "heaterShaker/waitForTemperature"
+ ):
+ start_time = datetime.strptime(
+ command.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ end_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ duration = (end_time - start_time).total_seconds()
+ i += 1
+ temps_and_durations[duration] = temp
+ ramp_rates = {}
+ times = list(temps_and_durations.keys())
+ for i in range(len(times) - 1):
+ time1 = times[i]
+ time2 = times[i + 1]
+ temp1 = temps_and_durations[time1]
+ temp2 = temps_and_durations[time2]
+ ramp_rate = (temp2 - temp1) / (time2)
+ ramp_rates[i] = ramp_rate
+ return ramp_rates
+
+
+if __name__ == "__main__":
+ # SCRIPT ARGUMENTS
+ parser = argparse.ArgumentParser(description="Read run logs on google drive.")
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path to long term storage directory for run logs.",
+ )
+ parser.add_argument(
+ "google_sheet_name",
+ metavar="GOOGLE_SHEET_NAME",
+ type=str,
+ nargs=1,
+ help="Google sheet name.",
+ )
+ parser.add_argument(
+ "email", metavar="EMAIL", type=str, nargs=1, help="opentrons gmail."
+ )
+ args = parser.parse_args()
+ storage_directory = args.storage_directory[0]
+ google_sheet_name = args.google_sheet_name[0]
+ # FIND CREDENTIALS FILE
+ try:
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ except FileNotFoundError:
+ print(f"Add credentials.json file to: {storage_directory}.")
+ sys.exit()
+ # CONNECT TO GOOGLE SHEET
+ google_sheet = google_sheets_tool.google_sheet(
+ credentials_path, google_sheet_name, 1
+ )
+ run_ids_on_sheet = google_sheet.get_column(2)
+ runs_and_robots = {}
+ for filename in os.listdir(storage_directory):
+ file_path = os.path.join(storage_directory, filename)
+ if file_path.endswith(".json"):
+ with open(file_path) as file:
+ file_results = json.load(file)
+ else:
+ continue
+ # CHECK if file is ramp rate run
+ run_id = file_results.get("run_id", None)
+ temps_and_durations: Dict[float, float] = dict()
+ if run_id is not None and run_id not in run_ids_on_sheet:
+
+ ramp_rates = ramp_rate(file_results)
+ protocol_name = file_results["protocol"]["metadata"].get("protocolName", "")
+ if "Ramp Rate" in protocol_name:
+ ip = filename.split("_")[0]
+ if len(ramp_rates) > 1:
+ cooling_ramp_rate = abs(min(ramp_rates.values()))
+ heating_ramp_rate = abs(max(ramp_rates.values()))
+ start_time = datetime.strptime(
+ file_results.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ start_date = str(start_time.date())
+ module_serial_number = file_results["modules"][0].get(
+ "serialNumber", "NaN"
+ )
+ try:
+ response = requests.get(
+ f"http://{ip}:31950/modules",
+ headers={"opentrons-version": "3"},
+ )
+ modules = response.json()
+ for module in modules["data"]:
+ if module["serialNumber"] == module_serial_number:
+ firmwareVersion = module["firmwareVersion"]
+ else:
+ firmwareVersion = "NaN"
+ except requests.exceptions.ConnectionError:
+ firmwareVersion = "NaN"
+ row = {
+ "Robot": file_results.get("robot_name", ""),
+ "Run_ID": run_id,
+ "Protocol_Name": file_results["protocol"]["metadata"].get(
+ "protocolName", ""
+ ),
+ "Software Version": file_results.get("API_Version", ""),
+ "Firmware Version": firmwareVersion,
+ "Date": start_date,
+ "Serial Number": module_serial_number,
+ "Approx. Average Heating Ramp Rate (C/s)": heating_ramp_rate,
+ "Approx. Average Cooling Ramp Rate (C/s)": cooling_ramp_rate,
+ }
+ headers = list(row.keys())
+ runs_and_robots[run_id] = row
+ read_robot_logs.write_to_local_and_google_sheet(
+ runs_and_robots,
+ storage_directory,
+ google_sheet_name,
+ google_sheet,
+ headers,
+ )
+ else:
+ continue
diff --git a/abr-testing/abr_testing/data_collection/read_robot_logs.py b/abr-testing/abr_testing/data_collection/read_robot_logs.py
new file mode 100644
index 00000000000..7539e913057
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/read_robot_logs.py
@@ -0,0 +1,498 @@
+"""ABR Read Robot Logs.
+
+This library has functions to download logs from robots, extracting wanted information,
+and uploading to a google sheet using credentials and google_sheets_tools module
+saved in a local directory.
+"""
+import csv
+from datetime import datetime
+import os
+from abr_testing.data_collection.error_levels import ERROR_LEVELS_PATH
+from typing import List, Dict, Any, Tuple, Set
+import time as t
+import json
+import requests
+import sys
+
+
+def lpc_data(file_results: Dict[str, Any], protocol_info: Dict) -> List[Dict[str, Any]]:
+ """Get labware offsets from one run log."""
+ offsets = file_results.get("labwareOffsets", "")
+ all_offsets: List[Dict[str, Any]] = []
+ if len(offsets) > 0:
+ for offset in offsets:
+ labware_type = offset.get("definitionUri", "")
+ slot = offset["location"].get("slotName", "")
+ module_location = offset["location"].get("moduleModel", "")
+ adapter = offset["location"].get("definitionUri", "")
+ x_offset = offset["vector"].get("x", 0.0)
+ y_offset = offset["vector"].get("y", 0.0)
+ z_offset = offset["vector"].get("z", 0.0)
+ created_at = offset.get("createdAt", "")
+ row = {
+ "createdAt": created_at,
+ "Labware Type": labware_type,
+ "Slot": slot,
+ "Module": module_location,
+ "Adapter": adapter,
+ "X": x_offset,
+ "Y": y_offset,
+ "Z": z_offset,
+ }
+ row2 = {**protocol_info, **row}
+ all_offsets.append(row2)
+ return all_offsets
+
+
+def command_time(command: Dict[str, str]) -> Tuple[float, float]:
+ """Calculate total create and complete time per command."""
+ try:
+ create_time = datetime.strptime(
+ command.get("createdAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ start_time = datetime.strptime(
+ command.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ complete_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ create_to_start = (start_time - create_time).total_seconds()
+ start_to_complete = (complete_time - start_time).total_seconds()
+ except ValueError:
+ create_to_start = 0
+ start_to_complete = 0
+ return create_to_start, start_to_complete
+
+
+def hs_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
+ """Gets total latch engagements, homes, rotations and total on time (sec) for heater shaker."""
+ # TODO: modify for cases that have more than 1 heater shaker.
+ commandData = file_results.get("commands", "")
+ hs_latch_count: float = 0.0
+ hs_temp: float = 0.0
+ hs_home_count: float = 0.0
+ hs_speed: float = 0.0
+ hs_rotations: Dict[str, float] = dict()
+ hs_temps: Dict[float, float] = dict()
+ temp_time = None
+ shake_time = None
+ deactivate_time = None
+ for command in commandData:
+ commandType = command["commandType"]
+ # Heatershaker
+ # Latch count
+ if (
+ commandType == "heaterShaker/closeLabwareLatch"
+ or commandType == "heaterShaker/openLabwareLatch"
+ ):
+ hs_latch_count += 1
+ # Home count
+ elif commandType == "heaterShaker/deactivateShaker":
+ hs_home_count += 1
+ shake_deactivate_time = datetime.strptime(
+ command.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if shake_time is not None and shake_deactivate_time > shake_time:
+ shake_duration = (shake_deactivate_time - shake_time).total_seconds()
+ hs_rotations[hs_speed] = hs_rotations.get(hs_speed, 0.0) + (
+ (hs_speed * shake_duration) / 60
+ )
+ elif commandType == "heaterShaker/deactivateHeater":
+ deactivate_time = datetime.strptime(
+ command.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if temp_time is not None and deactivate_time > temp_time:
+ temp_duration = (deactivate_time - temp_time).total_seconds()
+ hs_temps[hs_temp] = hs_temps.get(hs_temp, 0.0) + temp_duration
+ # of Rotations
+ elif commandType == "heaterShaker/setAndWaitForShakeSpeed":
+ hs_speed = command["params"]["rpm"]
+ shake_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ # On Time
+ elif commandType == "heaterShaker/setTargetTemperature":
+ # if heater shaker temp is not deactivated.
+ hs_temp = command["params"]["celsius"]
+ temp_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if temp_time is not None and deactivate_time is None:
+ # If heater shaker module is not deactivated, protocol completedAt time stamp used.
+ protocol_end = datetime.strptime(
+ file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ temp_duration = (protocol_end - temp_time).total_seconds()
+ hs_temps[hs_temp] = hs_temps.get(hs_temp, 0.0) + temp_duration
+ hs_latch_sets = hs_latch_count / 2 # one set of open/close
+ hs_total_rotations = sum(hs_rotations.values())
+ hs_total_temp_time = sum(hs_temps.values())
+ hs_dict = {
+ "Heatershaker # of Latch Open/Close": hs_latch_sets,
+ "Heatershaker # of Homes": hs_home_count,
+ "Heatershaker # of Rotations": hs_total_rotations,
+ "Heatershaker Temp On Time (sec)": hs_total_temp_time,
+ }
+ return hs_dict
+
+
+def temperature_module_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
+ """Get # of temp changes and total temp on time for temperature module from run log."""
+ # TODO: modify for cases that have more than 1 temperature module.
+ tm_temp_change = 0
+ tm_temps: Dict[str, float] = dict()
+ temp_time = None
+ deactivate_time = None
+ commandData = file_results.get("commands", "")
+ for command in commandData:
+ commandType = command["commandType"]
+ if commandType == "temperatureModule/setTargetTemperature":
+ tm_temp = command["params"]["celsius"]
+ tm_temp_change += 1
+ if commandType == "temperatureModule/waitForTemperature":
+ temp_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if commandType == "temperatureModule/deactivate":
+ deactivate_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if temp_time is not None and deactivate_time > temp_time:
+ temp_duration = (deactivate_time - temp_time).total_seconds()
+ tm_temps[tm_temp] = tm_temps.get(tm_temp, 0.0) + temp_duration
+ if temp_time is not None and deactivate_time is None:
+ # If temperature module is not deactivated, protocol completedAt time stamp used.
+ protocol_end = datetime.strptime(
+ file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ temp_duration = (protocol_end - temp_time).total_seconds()
+ tm_temps[tm_temp] = tm_temps.get(tm_temp, 0.0) + temp_duration
+ tm_total_temp_time = sum(tm_temps.values())
+ tm_dict = {
+ "Temp Module # of Temp Changes": tm_temp_change,
+ "Temp Module Temp On Time (sec)": tm_total_temp_time,
+ }
+ return tm_dict
+
+
+def thermocycler_commands(file_results: Dict[str, Any]) -> Dict[str, float]:
+ """Counts # of lid engagements, temp changes, and temp sustaining mins."""
+ # TODO: modify for cases that have more than 1 thermocycler.
+ commandData = file_results.get("commands", "")
+ lid_engagements: float = 0.0
+ block_temp_changes: float = 0.0
+ lid_temp_changes: float = 0.0
+ lid_temps: Dict[str, float] = dict()
+ block_temps: Dict[str, float] = dict()
+ lid_on_time = None
+ lid_off_time = None
+ block_on_time = None
+ block_off_time = None
+ for command in commandData:
+ commandType = command["commandType"]
+ if (
+ commandType == "thermocycler/openLid"
+ or commandType == "thermocycler/closeLid"
+ ):
+ lid_engagements += 1
+ if commandType == "thermocycler/setTargetBlockTemperature":
+ block_temp = command["params"]["celsius"]
+ block_temp_changes += 1
+ block_on_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if commandType == "thermocycler/setTargetLidTemperature":
+ lid_temp_changes += 1
+ lid_temp = command["params"]["celsius"]
+ lid_on_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if commandType == "thermocycler/deactivateLid":
+ lid_off_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if lid_on_time is not None and lid_off_time > lid_on_time:
+ lid_duration = (lid_off_time - lid_on_time).total_seconds()
+ lid_temps[lid_temp] = lid_temps.get(lid_temp, 0.0) + lid_duration
+ if commandType == "thermocycler/deactivateBlock":
+ block_off_time = datetime.strptime(
+ command.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ if block_on_time is not None and block_off_time > block_on_time:
+ block_duration = (block_off_time - block_on_time).total_seconds()
+ block_temps[block_temp] = (
+ block_temps.get(block_temp, 0.0) + block_duration
+ )
+ if commandType == "thermocycler/runProfile":
+ profile = command["params"]["profile"]
+ total_changes = len(profile)
+ block_temp_changes += total_changes
+ for cycle in profile:
+ block_temp = cycle["celsius"]
+ block_time = cycle["holdSeconds"]
+ block_temps[block_temp] = block_temps.get(block_temp, 0.0) + block_time
+ if block_on_time is not None and block_off_time is None:
+ # If thermocycler block not deactivated protocol completedAt time stamp used
+ protocol_end = datetime.strptime(
+ file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ temp_duration = (protocol_end - block_on_time).total_seconds()
+ block_temps[block_temp] = block_temps.get(block_temp, 0.0) + temp_duration
+ if lid_on_time is not None and lid_off_time is None:
+ # If thermocycler lid not deactivated protocol completedAt time stamp used
+ protocol_end = datetime.strptime(
+ file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
+ )
+ temp_duration = (protocol_end - lid_on_time).total_seconds()
+ lid_temps[lid_temp] = block_temps.get(lid_temp, 0.0) + temp_duration
+
+ block_total_time = sum(block_temps.values())
+ lid_total_time = sum(lid_temps.values())
+ lid_sets = lid_engagements / 2
+ tc_dict = {
+ "Thermocycler # of Lid Open/Close": lid_sets,
+ "Thermocycler Block # of Temp Changes": block_temp_changes,
+ "Thermocycler Block Temp On Time (sec)": block_total_time,
+ "Thermocycler Lid # of Temp Changes": lid_temp_changes,
+ "Thermocycler Lid Temp On Time (sec)": lid_total_time,
+ }
+
+ return tc_dict
+
+
+def create_abr_data_sheet(
+ storage_directory: str, file_name: str, headers: List[str]
+) -> str:
+ """Creates csv file to log ABR data."""
+ file_name_csv = file_name + ".csv"
+ sheet_location = os.path.join(storage_directory, file_name_csv)
+ if os.path.exists(sheet_location):
+ return sheet_location
+ else:
+ with open(sheet_location, "w") as csvfile:
+ writer = csv.DictWriter(csvfile, fieldnames=headers)
+ writer.writeheader()
+ print(f"Created file. Located: {sheet_location}.")
+ return sheet_location
+
+
+def get_error_info(file_results: Dict[str, Any]) -> Tuple[int, str, str, str, str]:
+ """Determines if errors exist in run log and documents them."""
+ error_levels = []
+ # Read error levels file
+ with open(ERROR_LEVELS_PATH, "r") as error_file:
+ error_levels = list(csv.reader(error_file))
+ num_of_errors = len(file_results["errors"])
+ if num_of_errors == 0:
+ error_type = ""
+ error_code = ""
+ error_instrument = ""
+ error_level = ""
+ return 0, error_type, error_code, error_instrument, error_level
+ commands_of_run: List[Dict[str, Any]] = file_results.get("commands", [])
+ run_command_error: Dict[str, Any] = commands_of_run[-1]
+ error_str: int = len(run_command_error.get("error", ""))
+ if error_str > 1:
+ error_type = run_command_error["error"].get("errorType", "")
+ error_code = run_command_error["error"].get("errorCode", "")
+ try:
+ # Instrument Error
+ error_instrument = run_command_error["error"]["errorInfo"]["node"]
+ except KeyError:
+ # Module
+ error_instrument = run_command_error["error"]["errorInfo"].get("port", "")
+ else:
+ error_type = file_results["errors"][0]["errorType"]
+ error_code = file_results["errors"][0]["errorCode"]
+ error_instrument = file_results["errors"][0]["detail"]
+ for error in error_levels:
+ code_error = error[1]
+ if code_error == error_code:
+ error_level = error[4]
+
+ return num_of_errors, error_type, error_code, error_instrument, error_level
+
+
+def write_to_local_and_google_sheet(
+ runs_and_robots: Dict[Any, Dict[str, Any]],
+ storage_directory: str,
+ file_name: str,
+ google_sheet: Any,
+ header: List[str],
+) -> None:
+ """Write data dictionary to google sheet and local csv."""
+ sheet_location = os.path.join(storage_directory, file_name)
+ file_exists = os.path.exists(sheet_location) and os.path.getsize(sheet_location) > 0
+ list_of_runs = list(runs_and_robots.keys())
+ with open(sheet_location, "a", newline="") as f:
+ writer = csv.writer(f)
+ if not file_exists:
+ writer.writerow(header)
+ for run in range(len(list_of_runs)):
+ row = runs_and_robots[list_of_runs[run]].values()
+ row_list = list(row)
+ writer.writerow(row_list)
+ google_sheet.write_header(header)
+ google_sheet.token_check()
+ google_sheet.update_row_index()
+ google_sheet.write_to_row(row_list)
+ t.sleep(3)
+
+
+def read_abr_data_sheet(
+ storage_directory: str, file_name_csv: str, google_sheet: Any
+) -> Set[str]:
+ """Reads current run sheet to determine what new run data should be added."""
+ print(file_name_csv)
+ sheet_location = os.path.join(storage_directory, file_name_csv)
+ runs_in_sheet = set()
+ # Read the CSV file
+ with open(sheet_location, "r") as csv_start:
+ data = csv.DictReader(csv_start)
+ headers = data.fieldnames
+ if headers is not None:
+ for row in data:
+ run_id = row[headers[1]]
+ runs_in_sheet.add(run_id)
+ print(f"There are {str(len(runs_in_sheet))} runs documented in the ABR sheet.")
+ # Read Google Sheet
+ google_sheet.token_check()
+ google_sheet.write_header(headers)
+ google_sheet.update_row_index()
+ return runs_in_sheet
+
+
+def get_run_ids_from_storage(storage_directory: str) -> Set[str]:
+ """Read all files in storage directory, extracts run id, adds to set."""
+ os.makedirs(storage_directory, exist_ok=True)
+ list_of_files = os.listdir(storage_directory)
+ run_ids = set()
+ for this_file in list_of_files:
+ read_file = os.path.join(storage_directory, this_file)
+ if read_file.endswith(".json"):
+ file_results = json.load(open(read_file))
+ run_id = file_results.get("run_id", "")
+ if len(run_id) > 0:
+ run_ids.add(run_id)
+ return run_ids
+
+
+def get_unseen_run_ids(runs: Set[str], runs_from_storage: Set[str]) -> Set[str]:
+ """Subtracts runs from storage from current runs being read."""
+ runs_to_save = runs - runs_from_storage
+ return runs_to_save
+
+
+def save_run_log_to_json(
+ ip: str, results: Dict[str, Any], storage_directory: str
+) -> str:
+ """Save run log to local json file."""
+ data_file_name = ip + "_" + results["run_id"] + ".json"
+ saved_file_path = os.path.join(storage_directory, data_file_name)
+ json.dump(results, open(saved_file_path, mode="w"))
+ return saved_file_path
+
+
+def get_run_ids_from_google_drive(google_drive: Any) -> Set[str]:
+ """Get run ids in google drive folder."""
+ # Run ids in google_drive_folder
+ file_names = google_drive.list_folder()
+ run_ids_on_gd = set()
+ for file in file_names:
+ if file.endswith(".json") and "_" in file:
+ file_id = file.split(".json")[0].split("_")[1]
+ run_ids_on_gd.add(file_id)
+ return run_ids_on_gd
+
+
+def write_to_sheets(
+ sheet_location: str, google_sheet: Any, row_list: List[Any], headers: List[str]
+) -> None:
+ """Write list to google sheet and csv."""
+ with open(sheet_location, "a", newline="") as f:
+ writer = csv.writer(f)
+ writer.writerow(row_list)
+ # Read Google Sheet
+ google_sheet.token_check()
+ google_sheet.write_header(headers)
+ google_sheet.update_row_index()
+ google_sheet.write_to_row(row_list)
+ t.sleep(5)
+
+
+def get_calibration_offsets(
+ ip: str, storage_directory: str
+) -> Tuple[str, Dict[str, Any]]:
+ """Connect to robot via ip and get calibration data."""
+ calibration = dict()
+ # Robot Information [Name, Software Version]
+ try:
+ response = requests.get(
+ f"http://{ip}:31950/health", headers={"opentrons-version": "3"}
+ )
+ print(f"Connected to {ip}")
+ except Exception:
+ print(f"ERROR: Failed to read IP address: {ip}")
+ sys.exit()
+ health_data = response.json()
+ robot_name = health_data.get("name", "")
+ api_version = health_data.get("api_version", "")
+ pull_date_timestamp = datetime.now()
+ date = pull_date_timestamp.date().isoformat()
+ file_date = str(pull_date_timestamp).replace(":", "").split(".")[0]
+ calibration["Robot"] = robot_name
+ calibration["Software Version"] = api_version
+ calibration["Pull Date"] = date
+ calibration["Pull Timestamp"] = pull_date_timestamp.isoformat()
+ calibration["run_id"] = "calibration" + "_" + file_date
+ # Calibration [Instruments, modules, deck]
+ response = requests.get(
+ f"http://{ip}:31950/instruments",
+ headers={"opentrons-version": "3"},
+ params={"cursor": 0, "pageLength": 0},
+ )
+ instruments: Dict[str, Any] = response.json()
+ calibration["Instruments"] = instruments.get("data", "")
+ response = requests.get(
+ f"http://{ip}:31950/modules",
+ headers={"opentrons-version": "3"},
+ params={"cursor": 0, "pageLength": 0},
+ )
+ modules: Dict[str, Any] = response.json()
+ calibration["Modules"] = modules.get("data", "")
+ response = requests.get(
+ f"http://{ip}:31950/calibration/status",
+ headers={"opentrons-version": "3"},
+ params={"cursor": 0, "pageLength": 0},
+ )
+ deck: Dict[str, Any] = response.json()
+ calibration["Deck"] = deck.get("deckCalibration", "")
+ save_name = ip + "_calibration.json"
+ saved_file_path = os.path.join(storage_directory, save_name)
+ json.dump(calibration, open(saved_file_path, mode="w"))
+ return saved_file_path, calibration
+
+
+def get_logs(storage_directory: str, ip: str) -> List[str]:
+ """Get Robot logs."""
+ log_types = ["api.log", "server.log", "serial.log", "touchscreen.log"]
+ all_paths = []
+ for log_type in log_types:
+ try:
+ response = requests.get(
+ f"http://{ip}:31950/logs/{log_type}",
+ headers={"log_identifier": log_type},
+ params={"records": 5000},
+ )
+ response.raise_for_status()
+ log_data = response.text
+ log_name = ip + "_" + log_type.split(".")[0] + ".json"
+ file_path = os.path.join(storage_directory, log_name)
+ with open(file_path, mode="w", encoding="utf-8") as file:
+ file.write(response.text)
+ json.dump(log_data, open(file_path, mode="w"))
+ except RuntimeError:
+ print(f"Request exception. Did not save {log_type}")
+ continue
+ all_paths.append(file_path)
+ return all_paths
diff --git a/abr-testing/abr_testing/data_collection/single_run_log_reader.py b/abr-testing/abr_testing/data_collection/single_run_log_reader.py
new file mode 100644
index 00000000000..df078929338
--- /dev/null
+++ b/abr-testing/abr_testing/data_collection/single_run_log_reader.py
@@ -0,0 +1,51 @@
+"""Reads single run log retrieved by get_run_logs.py and saves to local csv."""
+import argparse
+import sys
+import os
+import csv
+from abr_testing.data_collection import read_robot_logs
+from abr_testing.data_collection import abr_google_drive
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Read single run log locally saved.")
+ parser.add_argument(
+ "run_log_file_path",
+ metavar="RUN_LOG_FILE_PATH",
+ type=str,
+ nargs=1,
+ help="Folder path that holds individual run logs of interest.",
+ )
+ parser.add_argument(
+ "google_sheet_name",
+ metavar="GOOGLE_SHEET_NAME",
+ type=str,
+ nargs=1,
+ help="Google sheet name.",
+ )
+ args = parser.parse_args()
+ run_log_file_path = args.run_log_file_path[0]
+ google_sheet_name = args.google_sheet_name[0]
+
+ try:
+ credentials_path = os.path.join(run_log_file_path, "credentials.json")
+ except FileNotFoundError:
+ print(f"Add credentials.json file to: {run_log_file_path}.")
+ sys.exit()
+ # Get Runs from Storage and Read Logs
+ run_ids_in_storage = read_robot_logs.get_run_ids_from_storage(run_log_file_path)
+ runs_and_robots, header = abr_google_drive.create_data_dictionary(
+ run_ids_in_storage, run_log_file_path, ""
+ )
+ list_of_runs = list(runs_and_robots.keys())
+ # Adds Run to local csv
+ sheet_location = os.path.join(run_log_file_path, "saved_data.csv")
+ file_exists = os.path.exists(sheet_location) and os.path.getsize(sheet_location) > 0
+ with open(sheet_location, "a", newline="") as f:
+ writer = csv.writer(f)
+ if not file_exists:
+ writer.writerow(header)
+ for run in list_of_runs:
+ # Add new row
+ row = runs_and_robots[run].values()
+ row_list = list(row)
+ writer.writerow(row_list)
diff --git a/notify-server/notify_server/py.typed b/abr-testing/abr_testing/py.typed
similarity index 100%
rename from notify-server/notify_server/py.typed
rename to abr-testing/abr_testing/py.typed
diff --git a/abr-testing/abr_testing/tools/__init__.py b/abr-testing/abr_testing/tools/__init__.py
new file mode 100644
index 00000000000..7890dc32616
--- /dev/null
+++ b/abr-testing/abr_testing/tools/__init__.py
@@ -0,0 +1 @@
+"""ABR tools."""
diff --git a/abr-testing/abr_testing/tools/abr_asair_sensor.py b/abr-testing/abr_testing/tools/abr_asair_sensor.py
new file mode 100644
index 00000000000..eef69329436
--- /dev/null
+++ b/abr-testing/abr_testing/tools/abr_asair_sensor.py
@@ -0,0 +1,115 @@
+"""ABR Temperature Humidity Sensors."""
+
+from hardware_testing import data # type: ignore[import]
+from hardware_testing.drivers import asair_sensor # type: ignore[import]
+import datetime
+import time as t
+from typing import List
+import argparse
+from abr_testing.automation import google_sheets_tool
+
+
+class _ABRAsairSensor:
+ def __init__(self, robot: str, duration: int, frequency: int) -> None:
+ try:
+ credentials_path = "/var/lib/jupyter/notebooks/abr.json"
+ except FileNotFoundError:
+ print("Make sure credentials file is in jupyter notebook.")
+ test_name = "ABR-Environment-Monitoring"
+ run_id = data.create_run_id()
+ file_name = data.create_file_name(test_name, run_id, robot)
+ sensor = asair_sensor.BuildAsairSensor(False, True)
+ print(sensor)
+ env_data = sensor.get_reading()
+ header = [
+ "Robot",
+ "Timestamp",
+ "Date",
+ "Time",
+ "Temp (oC)",
+ "Relative Humidity (%)",
+ ]
+ header_str = ",".join(header) + "\n"
+ data.append_data_to_file(test_name, run_id, file_name, header_str)
+ # Upload to google has passed
+ try:
+ google_sheet = google_sheets_tool.google_sheet(
+ credentials_path, "ABR Ambient Conditions", tab_number=0
+ )
+ print("Connected to the google sheet.")
+ except FileNotFoundError:
+ print(
+ "There are no google sheets credentials. Make sure credentials in jupyter notebook."
+ )
+ results_list: List = []
+ start_time = datetime.datetime.now()
+ while True:
+ env_data = sensor.get_reading()
+ timestamp = datetime.datetime.now()
+ # Time adjustment for ABR robot timezone
+ new_timestamp = timestamp - datetime.timedelta(hours=5)
+ date = new_timestamp.date()
+ time = new_timestamp.time()
+ temp = env_data.temperature
+ print(temp)
+ rh = env_data.relative_humidity
+ print(rh)
+ row = [
+ robot,
+ str(new_timestamp),
+ str(date),
+ str(time),
+ temp,
+ rh,
+ ]
+
+ results_list.append(row)
+ # Check if duration elapsed
+ elapsed_time = datetime.datetime.now() - start_time
+ if elapsed_time.total_seconds() >= duration * 60:
+ break
+ # write to google sheet
+ try:
+ google_sheet.token_check()
+ google_sheet.write_header(header)
+ google_sheet.update_row_index()
+ google_sheet.write_to_row(row)
+ print("Wrote row")
+ except RuntimeError:
+ print("Did not write row.")
+ # Delay for desired frequency minutes before the next iteration
+ t.sleep(frequency * 60) # seconds
+
+ # Upload to robot testing data folder
+ for sublist in results_list:
+ row_str = ", ".join(map(str, sublist)) + "\n" # type: str
+ save_file_path = data.append_data_to_file(
+ test_name, run_id, file_name, row_str
+ )
+ print(f"Saved to robot: f{save_file_path}.")
+ print(
+ f"Done. Ran for {duration} minutes and collected every {frequency} minutes."
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Starts Temp/RH Sensor.")
+ parser.add_argument(
+ "robot", metavar="ROBOT", type=str, nargs=1, help="ABR Robot Name"
+ )
+ parser.add_argument(
+ "duration",
+ metavar="DURATION",
+ type=int,
+ nargs=1,
+ help="Duration (min) to run sensor for.",
+ )
+ parser.add_argument(
+ "frequency",
+ metavar="FREQUENCY",
+ type=int,
+ nargs=1,
+ help="How frequently to record temp/rh (min for.",
+ )
+ args = parser.parse_args()
+ _ABRAsairSensor(args.robot[0], args.duration[0], args.frequency[0])
diff --git a/abr-testing/abr_testing/tools/abr_scale.py b/abr-testing/abr_testing/tools/abr_scale.py
new file mode 100644
index 00000000000..75c887d4ecc
--- /dev/null
+++ b/abr-testing/abr_testing/tools/abr_scale.py
@@ -0,0 +1,106 @@
+"""ABR Scale Reader."""
+import os
+import datetime
+from hardware_testing.drivers import find_port, list_ports_and_select # type: ignore[import]
+from hardware_testing.drivers.radwag import RadwagScale # type: ignore[import]
+import argparse
+from abr_testing.data_collection import read_robot_logs
+from abr_testing.automation import google_sheets_tool
+
+
+if __name__ == "__main__":
+ # Adds Arguments
+ parser = argparse.ArgumentParser(description="Record stable mass for labware.")
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path to long term storage directory for scale .csvs.",
+ )
+ parser.add_argument(
+ "file_name",
+ metavar="FILE_NAME",
+ type=str,
+ nargs=1,
+ help="Name of google sheet and local csv to save data to.",
+ )
+ parser.add_argument("robot", metavar="ROBOT", type=str, nargs=1, help="Robot name.")
+ parser.add_argument(
+ "labware_name",
+ metavar="LABWARE_NAME",
+ type=str,
+ nargs=1,
+ help="Name of labware.",
+ )
+ parser.add_argument(
+ "protocol_step",
+ metavar="PROTOCOL_STEP",
+ type=str,
+ nargs=1,
+ help="1 for empty plate, 2 for filled plate, 3 for end of protocol.",
+ )
+ args = parser.parse_args()
+ robot = args.robot[0]
+ labware = args.labware_name[0]
+ protocol_step = args.protocol_step[0]
+ storage_directory = args.storage_directory[0]
+ file_name = args.file_name[0]
+ file_name_csv = file_name + ".csv"
+ # find port using known VID:PID, then connect
+ vid, pid = RadwagScale.vid_pid()
+ try:
+ scale = RadwagScale.create(port=find_port(vid=vid, pid=pid))
+ except RuntimeError:
+ device = list_ports_and_select()
+ scale = RadwagScale.create(device)
+ scale.connect()
+ grams = 0.0
+ is_stable = False
+ # Set up csv sheet
+ headers = ["Robot", "Date", "Timestamp", "Labware", "Mass (g)", "Measurement Step"]
+ sheet_location = read_robot_logs.create_abr_data_sheet(
+ storage_directory, file_name, headers
+ )
+ # Set up google sheet
+ try:
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ google_sheet = google_sheets_tool.google_sheet(
+ credentials_path, file_name, tab_number=0
+ )
+ print("Connected to google sheet.")
+ except FileNotFoundError:
+ print("No google sheets credentials. Add credentials to storage notebook.")
+
+ # Scale Loop
+ grams, is_stable = scale.read_mass()
+ grams, is_stable = scale.read_mass()
+ is_stable = False
+ break_all = False
+ while is_stable is False:
+ grams, is_stable = scale.read_mass()
+ grams, is_stable = scale.read_mass()
+ print(f"Scale reading: grams={grams}, is_stable={is_stable}")
+ time_now = datetime.datetime.now()
+ date = str(time_now.date())
+ row = [robot, date, str(time_now), labware, grams, protocol_step]
+ row_list = list(row)
+ while is_stable is True:
+ print("is stable")
+ read_robot_logs.write_to_sheets(
+ sheet_location, google_sheet, row_list, headers
+ )
+ is_stable = False
+ y_or_no = input("Do you want to weigh another sample? (Y/N): ")
+ if y_or_no == "Y":
+ # Uses same storage directory and file.
+ grams, is_stable = scale.read_mass()
+ is_stable = False
+ robot = input("Robot: ")
+ labware = input("Labware: ")
+ protocol_step = input("Measurement Step (1,2,3): ")
+ grams, is_stable = scale.read_mass()
+ elif y_or_no == "N":
+ break_all = True
+ if break_all:
+ break
diff --git a/abr-testing/abr_testing/tools/query_and_download.py b/abr-testing/abr_testing/tools/query_and_download.py
new file mode 100644
index 00000000000..320b99b333e
--- /dev/null
+++ b/abr-testing/abr_testing/tools/query_and_download.py
@@ -0,0 +1,48 @@
+"""Download files from google drive based off string search."""
+from abr_testing.automation import google_drive_tool
+import argparse
+import os
+import json
+import sys
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Download files based off title search."
+ )
+ parser.add_argument(
+ "storage_directory",
+ metavar="STORAGE_DIRECTORY",
+ type=str,
+ nargs=1,
+ help="Path save downloaded files. Contains .json file with query words.",
+ )
+ parser.add_argument(
+ "folder_name",
+ metavar="FOLDER_NAME",
+ type=str,
+ nargs=1,
+ help="Google Drive folder name. Open desired folder and copy string after drive/folders/.",
+ )
+ parser.add_argument(
+ "email", metavar="EMAIL", type=str, nargs=1, help="opentrons gmail."
+ )
+ args = parser.parse_args()
+ folder_name = args.folder_name[0]
+ email = args.email[0]
+ storage_directory = args.storage_directory[0]
+
+ search_file_path = os.path.join(storage_directory, "search_words.json")
+ try:
+ search_file = json.load(open(search_file_path))
+ except FileNotFoundError:
+ print("Add .json file with search words formatted in a list.")
+ try:
+ credentials_path = os.path.join(storage_directory, "credentials.json")
+ except FileNotFoundError:
+ print(f"Add credentials.json file to: {storage_directory}.")
+ sys.exit()
+ google_drive = google_drive_tool.google_drive(credentials_path, folder_name, email)
+ print("Connected to google drive.")
+ search_lists = search_file["search_words"]
+ found_files = google_drive.search_folder(search_lists, folder_name)
+ google_drive.download_files(found_files, storage_directory)
diff --git a/abr-testing/mypy.ini b/abr-testing/mypy.ini
new file mode 100644
index 00000000000..eeb271520a5
--- /dev/null
+++ b/abr-testing/mypy.ini
@@ -0,0 +1,6 @@
+[mypy]
+show_error_codes = True
+strict = False
+
+[mypy-can.*]
+ignore_missing_imports = True
diff --git a/abr-testing/setup.py b/abr-testing/setup.py
new file mode 100644
index 00000000000..5c5edb49993
--- /dev/null
+++ b/abr-testing/setup.py
@@ -0,0 +1,33 @@
+"""Setup script."""
+
+import os
+import sys
+
+from setuptools import setup, find_packages
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+sys.path.append(os.path.join(HERE, "..", "scripts"))
+from python_build_utils import normalize_version # noqa: E402
+
+
+def _get_version() -> None:
+ buildno = os.getenv("BUILD_NUMBER")
+ project = os.getenv("OPENTRONS_PROJECT", "ot3")
+ git_dir = os.getenv("OPENTRONS_GIT_DIR", None)
+ if buildno:
+ normalize_opts = {"extra_tag": buildno}
+ else:
+ normalize_opts = {}
+ return normalize_version("abr-testing", project, git_dir=git_dir, **normalize_opts)
+
+
+setup(
+ name="abr_testing",
+ version=_get_version(),
+ packages=find_packages(where=".", exclude=["tests.*", "tests"]),
+ url="",
+ license="",
+ author="opentrons",
+ author_email="engineering@opentrons.com",
+ description="tools for running application-based reliability tests.",
+)
diff --git a/abr-testing/tests/conftest.py b/abr-testing/tests/conftest.py
new file mode 100644
index 00000000000..dc52d11c3a7
--- /dev/null
+++ b/abr-testing/tests/conftest.py
@@ -0,0 +1 @@
+"""Pytest integrations."""
diff --git a/api-client/package.json b/api-client/package.json
index 650bdcc4e25..daefb4c8991 100644
--- a/api-client/package.json
+++ b/api-client/package.json
@@ -4,16 +4,13 @@
"description": "Opentrons robot API client for Node.js and the browser",
"version": "0.0.0-dev",
"license": "Apache-2.0",
- "main": "dist/api-client.js",
- "module": "dist/api-client.mjs",
+ "main": "src/index.ts",
"types": "lib/index.d.ts",
"source": "src/index.ts",
- "browser": {
- "./dist/api-client.js": "./dist/api-client.browser.js",
- "./dist/api-client.mjs": "./dist/api-client.browser.mjs"
- },
"dependencies": {
"@opentrons/shared-data": "link:../shared-data",
- "axios": "^0.21.1"
+ "@types/lodash": "^4.14.191",
+ "axios": "^0.21.1",
+ "lodash": "4.17.21"
}
}
diff --git a/api-client/src/calibration/types.ts b/api-client/src/calibration/types.ts
index 645f904d45b..c14ce57e64a 100644
--- a/api-client/src/calibration/types.ts
+++ b/api-client/src/calibration/types.ts
@@ -8,7 +8,7 @@ export interface PipOffsetDeletionParams {
export interface TipLengthDeletionParams {
calType: 'tipLength'
- tiprack_hash: string
+ tiprack_uri: string
pipette_id: string
}
export type DeleteCalRequestParams =
@@ -93,7 +93,7 @@ export interface TipLengthCalibration {
source: CalibrationSourceType
status: IndividualCalibrationHealthStatus
id: string
- uri?: string | null
+ uri: string
}
export interface AllTipLengthCalibrations {
diff --git a/api-client/src/deck_configuration/__stubs__/index.ts b/api-client/src/deck_configuration/__stubs__/index.ts
deleted file mode 100644
index 2197c25baaa..00000000000
--- a/api-client/src/deck_configuration/__stubs__/index.ts
+++ /dev/null
@@ -1,73 +0,0 @@
-import { v4 as uuidv4 } from 'uuid'
-
-import {
- STAGING_AREA_LOAD_NAME,
- STANDARD_SLOT_LOAD_NAME,
- TRASH_BIN_LOAD_NAME,
- WASTE_CHUTE_LOAD_NAME,
-} from '@opentrons/shared-data'
-
-import type { Fixture } from '@opentrons/shared-data'
-
-export const DECK_CONFIG_STUB: { [fixtureLocation: string]: Fixture } = {
- cutoutA1: {
- fixtureLocation: 'cutoutA1',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutB1: {
- fixtureLocation: 'cutoutB1',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutC1: {
- fixtureLocation: 'cutoutC1',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutD1: {
- fixtureLocation: 'cutoutD1',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutA2: {
- fixtureLocation: 'cutoutA2',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutB2: {
- fixtureLocation: 'cutoutB2',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutC2: {
- fixtureLocation: 'cutoutC2',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutD2: {
- fixtureLocation: 'cutoutD2',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutA3: {
- fixtureLocation: 'cutoutA3',
- loadName: TRASH_BIN_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutB3: {
- fixtureLocation: 'cutoutB3',
- loadName: STANDARD_SLOT_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutC3: {
- fixtureLocation: 'cutoutC3',
- loadName: STAGING_AREA_LOAD_NAME,
- fixtureId: uuidv4(),
- },
- cutoutD3: {
- fixtureLocation: 'cutoutD3',
- loadName: WASTE_CHUTE_LOAD_NAME,
- fixtureId: uuidv4(),
- },
-}
diff --git a/api-client/src/deck_configuration/createDeckConfiguration.ts b/api-client/src/deck_configuration/createDeckConfiguration.ts
deleted file mode 100644
index 09a2f3b73d7..00000000000
--- a/api-client/src/deck_configuration/createDeckConfiguration.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-// import { POST, request } from '../request'
-import { DECK_CONFIG_STUB } from './__stubs__'
-
-import type { DeckConfiguration } from '@opentrons/shared-data'
-// import type { ResponsePromise } from '../request'
-import type { HostConfig } from '../types'
-
-// TODO(bh, 2023-09-26): uncomment and remove deck config stub when backend api is ready
-// export function createDeckConfiguration(
-// config: HostConfig,
-// data: DeckConfiguration
-// ): ResponsePromise {
-// return request(
-// POST,
-// `/deck_configuration`,
-// { data },
-// config
-// )
-// }
-
-export function createDeckConfiguration(
- config: HostConfig,
- data: DeckConfiguration
-): Promise<{ data: DeckConfiguration }> {
- data.forEach(fixture => {
- DECK_CONFIG_STUB[fixture.fixtureLocation] = fixture
- })
- return Promise.resolve({ data: Object.values(DECK_CONFIG_STUB) })
-}
diff --git a/api-client/src/deck_configuration/deleteDeckConfiguration.ts b/api-client/src/deck_configuration/deleteDeckConfiguration.ts
deleted file mode 100644
index e3689f01559..00000000000
--- a/api-client/src/deck_configuration/deleteDeckConfiguration.ts
+++ /dev/null
@@ -1,30 +0,0 @@
-// import { DELETE, request } from '../request'
-import { DECK_CONFIG_STUB } from './__stubs__'
-
-import type { Fixture } from '@opentrons/shared-data'
-// import type { ResponsePromise } from '../request'
-import type { EmptyResponse, HostConfig } from '../types'
-
-// TODO(bh, 2023-09-26): uncomment and remove deck config stub when backend api is ready
-// export function deleteDeckConfiguration(
-// config: HostConfig,
-// data: Fixture
-// ): ResponsePromise {
-// const { fixtureLocation, ...rest } = data
-// return request }>(
-// DELETE,
-// `/deck_configuration/${fixtureLocation}`,
-// { data: rest },
-// config
-// )
-// }
-
-export function deleteDeckConfiguration(
- config: HostConfig,
- data: Fixture
-): Promise {
- const { fixtureLocation } = data
- // eslint-disable-next-line @typescript-eslint/no-dynamic-delete
- delete DECK_CONFIG_STUB[fixtureLocation]
- return Promise.resolve({ data: null })
-}
diff --git a/api-client/src/deck_configuration/getDeckConfiguration.ts b/api-client/src/deck_configuration/getDeckConfiguration.ts
index bc8c556e255..900f5e381e9 100644
--- a/api-client/src/deck_configuration/getDeckConfiguration.ts
+++ b/api-client/src/deck_configuration/getDeckConfiguration.ts
@@ -1,19 +1,16 @@
-// import { GET, request } from '../request'
-import { DECK_CONFIG_STUB } from './__stubs__'
+import { GET, request } from '../request'
-import type { DeckConfiguration } from '@opentrons/shared-data'
-// import type { ResponsePromise } from '../request'
+import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
-
-// TODO(bh, 2023-09-26): uncomment and remove deck config stub when backend api is ready
-// export function getDeckConfiguration(
-// config: HostConfig
-// ): ResponsePromise {
-// return request(GET, `/deck_configuration`, null, config)
-// }
+import type { DeckConfigurationResponse } from './types'
export function getDeckConfiguration(
config: HostConfig
-): Promise<{ data: DeckConfiguration }> {
- return Promise.resolve({ data: Object.values(DECK_CONFIG_STUB) })
+): ResponsePromise {
+ return request(
+ GET,
+ `/deck_configuration`,
+ null,
+ config
+ )
}
diff --git a/api-client/src/deck_configuration/index.ts b/api-client/src/deck_configuration/index.ts
index c22cba0ae78..3da16feea96 100644
--- a/api-client/src/deck_configuration/index.ts
+++ b/api-client/src/deck_configuration/index.ts
@@ -1,4 +1,7 @@
-export { createDeckConfiguration } from './createDeckConfiguration'
-export { deleteDeckConfiguration } from './deleteDeckConfiguration'
export { getDeckConfiguration } from './getDeckConfiguration'
export { updateDeckConfiguration } from './updateDeckConfiguration'
+
+export type {
+ DeckConfigurationResponse,
+ UpdateDeckConfigurationRequest,
+} from './types'
diff --git a/api-client/src/deck_configuration/types.ts b/api-client/src/deck_configuration/types.ts
new file mode 100644
index 00000000000..8ed7db78658
--- /dev/null
+++ b/api-client/src/deck_configuration/types.ts
@@ -0,0 +1,14 @@
+import type { DeckConfiguration } from '@opentrons/shared-data'
+
+export interface UpdateDeckConfigurationRequest {
+ data: {
+ cutoutFixtures: DeckConfiguration
+ }
+}
+
+export interface DeckConfigurationResponse {
+ data: {
+ cutoutFixtures: DeckConfiguration
+ lastModifiedAt: string
+ }
+}
diff --git a/api-client/src/deck_configuration/updateDeckConfiguration.ts b/api-client/src/deck_configuration/updateDeckConfiguration.ts
index a02fb1af4b0..236aef59904 100644
--- a/api-client/src/deck_configuration/updateDeckConfiguration.ts
+++ b/api-client/src/deck_configuration/updateDeckConfiguration.ts
@@ -1,32 +1,21 @@
-import { v4 as uuidv4 } from 'uuid'
+import { PUT, request } from '../request'
-// import { PATCH, request } from '../request'
-import { DECK_CONFIG_STUB } from './__stubs__'
-
-import type { Fixture } from '@opentrons/shared-data'
-// import type { ResponsePromise } from '../request'
+import type { DeckConfiguration } from '@opentrons/shared-data'
+import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
-
-// TODO(bh, 2023-09-26): uncomment and remove deck config stub when backend api is ready
-// export function updateDeckConfiguration(
-// config: HostConfig,
-// data: Omit
-// ): ResponsePromise {
-// const { fixtureLocation, ...rest } = data
-// return request }>(
-// PATCH,
-// `/deck_configuration/${fixtureLocation}`,
-// { data: rest },
-// config
-// )
-// }
+import type {
+ DeckConfigurationResponse,
+ UpdateDeckConfigurationRequest,
+} from './types'
export function updateDeckConfiguration(
config: HostConfig,
- data: Omit
-): Promise<{ data: Fixture }> {
- const { fixtureLocation } = data
- const fixtureId = uuidv4()
- DECK_CONFIG_STUB[fixtureLocation] = { ...data, fixtureId }
- return Promise.resolve({ data: DECK_CONFIG_STUB[fixtureLocation] })
+ deckConfig: DeckConfiguration
+): ResponsePromise {
+ return request(
+ PUT,
+ '/deck_configuration',
+ { data: { cutoutFixtures: deckConfig } },
+ config
+ )
}
diff --git a/api-client/src/maintenance_runs/createMaintenanceRunAction.ts b/api-client/src/maintenance_runs/createMaintenanceRunAction.ts
deleted file mode 100644
index 27c0a5bb47d..00000000000
--- a/api-client/src/maintenance_runs/createMaintenanceRunAction.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-import { POST, request } from '../request'
-
-import type { ResponsePromise } from '../request'
-import type { HostConfig } from '../types'
-import type { MaintenanceRunAction, MaintenanceRunActionType } from './types'
-
-export interface CreateMaintenanceRunActionData {
- actionType: MaintenanceRunActionType
-}
-
-export function createMaintenanceRunAction(
- config: HostConfig,
- maintenanceRunId: string,
- data: CreateMaintenanceRunActionData
-): ResponsePromise {
- return request<
- MaintenanceRunAction,
- { data: CreateMaintenanceRunActionData }
- >(POST, `/maintenance_runs/${maintenanceRunId}/actions`, { data }, config)
-}
diff --git a/api-client/src/maintenance_runs/createMaintenanceRunLabwareDefinition.ts b/api-client/src/maintenance_runs/createMaintenanceRunLabwareDefinition.ts
index 5e5e875caa0..85615b01849 100644
--- a/api-client/src/maintenance_runs/createMaintenanceRunLabwareDefinition.ts
+++ b/api-client/src/maintenance_runs/createMaintenanceRunLabwareDefinition.ts
@@ -3,7 +3,7 @@ import { POST, request } from '../request'
import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
import type { LabwareDefinitionSummary } from './types'
-import { LabwareDefinition2 } from '@opentrons/shared-data'
+import type { LabwareDefinition2 } from '@opentrons/shared-data'
export function createMaintenanceRunLabwareDefinition(
config: HostConfig,
diff --git a/api-client/src/maintenance_runs/index.ts b/api-client/src/maintenance_runs/index.ts
index 2dd20325652..1f48025cd4d 100644
--- a/api-client/src/maintenance_runs/index.ts
+++ b/api-client/src/maintenance_runs/index.ts
@@ -2,7 +2,6 @@ export { getMaintenanceRun } from './getMaintenanceRun'
export { deleteMaintenanceRun } from './deleteMaintenanceRun'
export { createMaintenanceRun } from './createMaintenanceRun'
export { createMaintenanceCommand } from './createMaintenanceCommand'
-export { createMaintenanceRunAction } from './createMaintenanceRunAction'
export { createMaintenanceRunLabwareDefinition } from './createMaintenanceRunLabwareDefinition'
export { getCurrentMaintenanceRun } from './getCurrentMaintenanceRun'
diff --git a/api-client/src/maintenance_runs/types.ts b/api-client/src/maintenance_runs/types.ts
index cda8f8fa0b5..6696e3ba072 100644
--- a/api-client/src/maintenance_runs/types.ts
+++ b/api-client/src/maintenance_runs/types.ts
@@ -4,37 +4,19 @@ import type {
LoadedModule,
LoadedPipette,
} from '@opentrons/shared-data'
-import type { RunCommandSummary, LabwareOffsetCreateData } from '../runs'
-
-export const ENGINE_STATUS_IDLE = 'idle' as const
-export const ENGINE_STATUS_RUNNING = 'running' as const
-export const ENGINE_STATUS_PAUSE_REQUESTED = 'pause-requested' as const
-export const ENGINE_STATUS_PAUSED = 'paused'
-export const ENGINE_STATUS_STOP_REQUESTED = 'stop-requested' as const
-export const ENGINE_STATUS_STOPPED = 'stopped' as const
-export const ENGINE_STATUS_FAILED = 'failed' as const
-export const ENGINE_STATUS_FINISHING = 'finishing' as const
-export const ENGINE_STATUS_SUCCEEDED = 'succeeded' as const
-export const ENGINE_STATUS_BLOCKED_BY_OPEN_DOOR = 'blocked-by-open-door' as const
-
-export type EngineStatus =
- | typeof ENGINE_STATUS_IDLE
- | typeof ENGINE_STATUS_RUNNING
- | typeof ENGINE_STATUS_PAUSE_REQUESTED
- | typeof ENGINE_STATUS_PAUSED
- | typeof ENGINE_STATUS_STOP_REQUESTED
- | typeof ENGINE_STATUS_STOPPED
- | typeof ENGINE_STATUS_FAILED
- | typeof ENGINE_STATUS_FINISHING
- | typeof ENGINE_STATUS_SUCCEEDED
- | typeof ENGINE_STATUS_BLOCKED_BY_OPEN_DOOR
+import type {
+ RunCommandSummary,
+ LabwareOffsetCreateData,
+ RunStatus,
+ RunAction,
+} from '../runs'
export interface MaintenanceRunData {
id: string
createdAt: string
- status: EngineStatus
+ status: RunStatus
current: boolean
- actions: MaintenanceRunAction[]
+ actions: RunAction[]
errors: MaintenanceRunError[]
pipettes: LoadedPipette[]
modules: LoadedModule[]
@@ -48,25 +30,6 @@ export interface MaintenanceRun {
data: MaintenanceRunData
}
-export const MAINTENANCE_RUN_ACTION_TYPE_PLAY: 'play' = 'play'
-export const MAINTENANCE_RUN_ACTION_TYPE_PAUSE: 'pause' = 'pause'
-export const MAINTENANCE_RUN_ACTION_TYPE_STOP: 'stop' = 'stop'
-
-export type MaintenanceRunActionType =
- | typeof MAINTENANCE_RUN_ACTION_TYPE_PLAY
- | typeof MAINTENANCE_RUN_ACTION_TYPE_PAUSE
- | typeof MAINTENANCE_RUN_ACTION_TYPE_STOP
-
-export interface MaintenanceRunAction {
- id: string
- createdAt: string
- actionType: MaintenanceRunActionType
-}
-
-export interface MaintenanceCreateRunActionData {
- actionType: MaintenanceRunActionType
-}
-
export interface MaintenanceCommandData {
data: RunCommandSummary
}
diff --git a/api-client/src/protocols/__fixtures__/simpleAnalysisFile.json b/api-client/src/protocols/__fixtures__/simpleAnalysisFile.json
index df8fcad1d98..bb6aacccd6e 100644
--- a/api-client/src/protocols/__fixtures__/simpleAnalysisFile.json
+++ b/api-client/src/protocols/__fixtures__/simpleAnalysisFile.json
@@ -3936,5 +3936,59 @@
"description": "",
"displayColor": "#b925ff"
}
- ]
+ ],
+ "runTimeParameters": [
+ {
+ "type": "int",
+ "displayName": "number of samples",
+ "variableName": "num_samples",
+ "description": "How many samples do you want to run?",
+ "value": 96,
+ "min": 1,
+ "max": 96,
+ "default": 96
+ },
+ {
+ "type": "float",
+ "displayName": "samples volume",
+ "variableName": "vol_sample",
+ "description": "What sample volume are you using?",
+ "value": 10.0,
+ "min": 1,
+ "max": 20.0,
+ "default": 10.0
+ },
+ {
+ "displayName": "Additional mix for reagent 2?",
+ "variableName": "extra_mix",
+ "description": "When on, we do an extra mix for reagent 2.",
+ "type": "bool",
+ "default": false,
+ "value": false
+ },
+ {
+ "displayName": "Number of PCR Cycles",
+ "variableName": "real_mode",
+ "description": "Cycle map",
+ "type": "int",
+ "unit": "cycles",
+ "default": 15,
+ "value": 15,
+ "choices": [
+ {
+ "displayName": "1 & 10ng (15 cycles)",
+ "value": 15
+ },
+ {
+ "displayName": "100ng (15 cycles)",
+ "value": 15
+ },
+ {
+ "displayName": "1ug (10 cycles)",
+ "value": 10
+ }
+ ]
+ }
+ ],
+ "robotType": "OT-2 Standard"
}
diff --git a/api-client/src/protocols/__tests__/utils.test.ts b/api-client/src/protocols/__tests__/utils.test.ts
index c9edcae0068..8be565de451 100644
--- a/api-client/src/protocols/__tests__/utils.test.ts
+++ b/api-client/src/protocols/__tests__/utils.test.ts
@@ -1,3 +1,4 @@
+import { describe, expect, it } from 'vitest'
import {
parsePipetteEntity,
parseInitialPipetteNamesByMount,
@@ -261,7 +262,7 @@ describe('parseInitialLoadedLabwareByAdapter', () => {
})
})
describe('parseInitialLoadedLabwareBySlot', () => {
- it('returns only labware loaded in slots', () => {
+ it('returns labware loaded in slots', () => {
const expected = {
2: mockRunTimeCommands.find(
c =>
@@ -282,6 +283,48 @@ describe('parseInitialLoadedLabwareBySlot', () => {
expected
)
})
+ it('returns labware loaded in addressable areas', () => {
+ const mockAddressableAreaLoadedLabwareCommand = ([
+ {
+ id: 'commands.LOAD_LABWARE-3',
+ createdAt: '2022-04-01T15:46:01.745870+00:00',
+ commandType: 'loadLabware',
+ key: 'commands.LOAD_LABWARE-3',
+ status: 'succeeded',
+ params: {
+ location: {
+ addressableAreaName: 'D4',
+ },
+ loadName: 'nest_96_wellplate_100ul_pcr_full_skirt',
+ namespace: 'opentrons',
+ version: 1,
+ labwareId: null,
+ displayName: 'NEST 96 Well Plate 100 µL PCR Full Skirt',
+ },
+ result: {
+ labwareId: 'labware-3',
+ definition: {},
+ offsetId: null,
+ },
+ error: null,
+ startedAt: '2022-04-01T15:46:01.745870+00:00',
+ completedAt: '2022-04-01T15:46:01.745870+00:00',
+ },
+ ] as any) as RunTimeCommand[]
+
+ const expected = {
+ D4: mockAddressableAreaLoadedLabwareCommand.find(
+ c =>
+ c.commandType === 'loadLabware' &&
+ typeof c.params.location === 'object' &&
+ 'addressableAreaName' in c.params?.location &&
+ c.params?.location?.addressableAreaName === 'D4'
+ ),
+ }
+ expect(
+ parseInitialLoadedLabwareBySlot(mockAddressableAreaLoadedLabwareCommand)
+ ).toEqual(expected)
+ })
})
describe('parseInitialLoadedLabwareByModuleId', () => {
it('returns only labware loaded in modules', () => {
diff --git a/api-client/src/protocols/createProtocol.ts b/api-client/src/protocols/createProtocol.ts
index 64593d1a953..a4f9961b9c9 100644
--- a/api-client/src/protocols/createProtocol.ts
+++ b/api-client/src/protocols/createProtocol.ts
@@ -2,15 +2,24 @@ import { POST, request } from '../request'
import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
import type { Protocol } from './types'
+import type { RunTimeParameterCreateData } from '../runs'
export function createProtocol(
config: HostConfig,
files: File[],
- protocolKey?: string
+ protocolKey?: string,
+ runTimeParameterValues?: RunTimeParameterCreateData
): ResponsePromise {
const formData = new FormData()
- files.forEach(file => formData.append('files', file, file.name))
+ files.forEach(file => {
+ formData.append('files', file, file.name)
+ })
if (protocolKey != null) formData.append('key', protocolKey)
+ if (runTimeParameterValues != null)
+ formData.append(
+ 'runTimeParameterValues',
+ JSON.stringify(runTimeParameterValues)
+ )
return request(POST, '/protocols', formData, config)
}
diff --git a/api-client/src/protocols/createProtocolAnalysis.ts b/api-client/src/protocols/createProtocolAnalysis.ts
new file mode 100644
index 00000000000..81ab83c11af
--- /dev/null
+++ b/api-client/src/protocols/createProtocolAnalysis.ts
@@ -0,0 +1,28 @@
+import { POST, request } from '../request'
+
+import type { ProtocolAnalysisSummary } from '@opentrons/shared-data'
+import type { ResponsePromise } from '../request'
+import type { HostConfig } from '../types'
+import type { RunTimeParameterCreateData } from '../runs'
+
+interface CreateProtocolAnalysisData {
+ runTimeParameterValues: RunTimeParameterCreateData
+ forceReAnalyze: boolean
+}
+
+export function createProtocolAnalysis(
+ config: HostConfig,
+ protocolKey: string,
+ runTimeParameterValues?: RunTimeParameterCreateData,
+ forceReAnalyze?: boolean
+): ResponsePromise {
+ const data = {
+ runTimeParameterValues: runTimeParameterValues ?? {},
+ forceReAnalyze: forceReAnalyze ?? false,
+ }
+ const response = request<
+ ProtocolAnalysisSummary[],
+ { data: CreateProtocolAnalysisData }
+ >(POST, `/protocols/${protocolKey}/analyses`, { data }, config)
+ return response
+}
diff --git a/api-client/src/protocols/index.ts b/api-client/src/protocols/index.ts
index 6febd0795cf..f035fa000e1 100644
--- a/api-client/src/protocols/index.ts
+++ b/api-client/src/protocols/index.ts
@@ -3,6 +3,7 @@ export { getProtocolAnalyses } from './getProtocolAnalyses'
export { getProtocolAnalysisAsDocument } from './getProtocolAnalysisAsDocument'
export { deleteProtocol } from './deleteProtocol'
export { createProtocol } from './createProtocol'
+export { createProtocolAnalysis } from './createProtocolAnalysis'
export { getProtocols } from './getProtocols'
export { getProtocolIds } from './getProtocolIds'
diff --git a/api-client/src/protocols/utils.ts b/api-client/src/protocols/utils.ts
index 3ed44a9053a..5b7eef86be9 100644
--- a/api-client/src/protocols/utils.ts
+++ b/api-client/src/protocols/utils.ts
@@ -1,14 +1,12 @@
// set of functions that parse details out of a protocol record and it's internals
import reduce from 'lodash/reduce'
-import { COLORS } from '@opentrons/components/src/ui-style-constants'
-import { getLabwareDefURI } from '@opentrons/shared-data'
+import { getLabwareDefURI, DEFAULT_LIQUID_COLORS } from '@opentrons/shared-data'
import type {
Liquid,
LoadedLabware,
LoadedModule,
LoadedPipette,
- LoadFixtureRunTimeCommand,
LoadLabwareRunTimeCommand,
LoadLiquidRunTimeCommand,
LoadModuleRunTimeCommand,
@@ -16,7 +14,6 @@ import type {
ModuleModel,
PipetteName,
RunTimeCommand,
- AddressableAreaName,
} from '@opentrons/shared-data'
interface PipetteNamesByMount {
@@ -118,11 +115,14 @@ export function parseInitialLoadedLabwareBySlot(
return reduce(
loadLabwareCommandsReversed,
(acc, command) => {
- if (
- typeof command.params.location === 'object' &&
- 'slotName' in command.params.location
- ) {
- return { ...acc, [command.params.location.slotName]: command }
+ if (typeof command.params.location === 'object') {
+ let slot: string
+ if ('slotName' in command.params.location) {
+ slot = command.params.location.slotName
+ } else if ('addressableAreaName' in command.params.location) {
+ slot = command.params.location.addressableAreaName
+ } else return acc
+ return { ...acc, [slot]: command }
} else {
return acc
}
@@ -226,83 +226,6 @@ export function parseInitialLoadedModulesBySlot(
)
}
-export interface LoadedFixturesBySlot {
- [slotName: string]: LoadFixtureRunTimeCommand
-}
-// TODO(bh, 2023-11-09): remove this util, there will be no loadFixture command
-export function parseInitialLoadedFixturesByCutout(
- commands: RunTimeCommand[]
-): LoadedFixturesBySlot {
- const loadFixtureCommandsReversed = commands
- .filter(
- (command): command is LoadFixtureRunTimeCommand =>
- command.commandType === 'loadFixture'
- )
- .reverse()
- return reduce(
- loadFixtureCommandsReversed,
- (acc, command) => ({ ...acc, [command.params.location.cutout]: command }),
- {}
- )
-}
-
-export function parseAllAddressableAreas(
- commands: RunTimeCommand[]
-): AddressableAreaName[] {
- return commands.reduce((acc, command) => {
- if (
- command.commandType === 'moveLabware' &&
- command.params.newLocation !== 'offDeck' &&
- 'slotName' in command.params.newLocation &&
- !acc.includes(command.params.newLocation.slotName as AddressableAreaName)
- ) {
- return [
- ...acc,
- command.params.newLocation.slotName as AddressableAreaName,
- ]
- } else if (
- command.commandType === 'moveLabware' &&
- command.params.newLocation !== 'offDeck' &&
- 'addressableAreaName' in command.params.newLocation &&
- !acc.includes(
- command.params.newLocation.addressableAreaName as AddressableAreaName
- )
- ) {
- return [
- ...acc,
- command.params.newLocation.addressableAreaName as AddressableAreaName,
- ]
- } else if (
- (command.commandType === 'loadLabware' ||
- command.commandType === 'loadModule') &&
- command.params.location !== 'offDeck' &&
- 'slotName' in command.params.location &&
- !acc.includes(command.params.location.slotName as AddressableAreaName)
- ) {
- return [...acc, command.params.location.slotName as AddressableAreaName]
- } else if (
- command.commandType === 'loadLabware' &&
- command.params.location !== 'offDeck' &&
- 'addressableAreaName' in command.params.location &&
- !acc.includes(
- command.params.location.addressableAreaName as AddressableAreaName
- )
- ) {
- return [
- ...acc,
- command.params.location.addressableAreaName as AddressableAreaName,
- ]
- }
- // TODO(BC, 11/6/23): once moveToAddressableArea command exists add it back here
- // else if (command.commandType === 'moveToAddressableArea') {
- // ...
- // }
- else {
- return acc
- }
- }, [])
-}
-
export interface LiquidsById {
[liquidId: string]: {
displayName: string
@@ -330,7 +253,7 @@ export function parseLiquidsInLoadOrder(
...liquid,
displayColor:
liquid.displayColor ??
- COLORS.liquidColors[index % COLORS.liquidColors.length],
+ DEFAULT_LIQUID_COLORS[index % DEFAULT_LIQUID_COLORS.length],
}
})
@@ -352,10 +275,12 @@ interface LabwareLiquidInfo {
volumeByWell: { [well: string]: number }
}
+/** @deprecated instead use LabwareByLiquidId from components/src/hardware-sim/ProtocolDeck/types */
export interface LabwareByLiquidId {
[liquidId: string]: LabwareLiquidInfo[]
}
+/** @deprecated instead use getLabwareInfoByLiquidId from components/src/hardware-sim/ProtocolDeck/utils */
export function parseLabwareInfoByLiquidId(
commands: RunTimeCommand[]
): LabwareByLiquidId {
diff --git a/api-client/src/robot/getRobotSettings.ts b/api-client/src/robot/getRobotSettings.ts
new file mode 100644
index 00000000000..ffe0014fcb0
--- /dev/null
+++ b/api-client/src/robot/getRobotSettings.ts
@@ -0,0 +1,11 @@
+import { GET, request } from '../request'
+
+import type { ResponsePromise } from '../request'
+import type { HostConfig } from '../types'
+import type { RobotSettingsResponse } from './types'
+
+export function getRobotSettings(
+ config: HostConfig
+): ResponsePromise {
+ return request(GET, '/settings', null, config)
+}
diff --git a/api-client/src/robot/index.ts b/api-client/src/robot/index.ts
index 96ef28165b0..55052d7b7c8 100644
--- a/api-client/src/robot/index.ts
+++ b/api-client/src/robot/index.ts
@@ -3,11 +3,18 @@ export { getEstopStatus } from './getEstopStatus'
export { acknowledgeEstopDisengage } from './acknowledgeEstopDisengage'
export { getLights } from './getLights'
export { setLights } from './setLights'
+export { getRobotSettings } from './getRobotSettings'
+export { updateRobotSetting } from './updateRobotSetting'
+
export type {
DoorStatus,
EstopPhysicalStatus,
EstopState,
EstopStatus,
Lights,
+ RobotSettings,
+ RobotSettingsField,
+ RobotSettingsResponse,
SetLightsData,
+ UpdateRobotSettingRequest,
} from './types'
diff --git a/api-client/src/robot/types.ts b/api-client/src/robot/types.ts
index 00d887b9c4e..41ef7f1281e 100644
--- a/api-client/src/robot/types.ts
+++ b/api-client/src/robot/types.ts
@@ -27,3 +27,23 @@ export interface Lights {
export interface SetLightsData {
on: boolean
}
+
+export interface RobotSettingsField {
+ id: string
+ title: string
+ description: string
+ value: boolean | null
+ restart_required?: boolean
+}
+
+export type RobotSettings = RobotSettingsField[]
+
+export interface UpdateRobotSettingRequest {
+ id: string
+ value: boolean | null
+}
+
+export interface RobotSettingsResponse {
+ settings: RobotSettings
+ links?: { restart?: string }
+}
diff --git a/api-client/src/robot/updateRobotSetting.ts b/api-client/src/robot/updateRobotSetting.ts
new file mode 100644
index 00000000000..a5775abaeee
--- /dev/null
+++ b/api-client/src/robot/updateRobotSetting.ts
@@ -0,0 +1,18 @@
+import { POST, request } from '../request'
+
+import type { ResponsePromise } from '../request'
+import type { HostConfig } from '../types'
+import type { RobotSettingsResponse, UpdateRobotSettingRequest } from './types'
+
+export function updateRobotSetting(
+ config: HostConfig,
+ id: string,
+ value: boolean
+): ResponsePromise {
+ return request(
+ POST,
+ '/settings',
+ { id, value },
+ config
+ )
+}
diff --git a/api-client/src/runs/commands/getCommandsAsPreSerializedList.ts b/api-client/src/runs/commands/getCommandsAsPreSerializedList.ts
new file mode 100644
index 00000000000..420f984b280
--- /dev/null
+++ b/api-client/src/runs/commands/getCommandsAsPreSerializedList.ts
@@ -0,0 +1,22 @@
+import { GET, request } from '../../request'
+
+import type { ResponsePromise } from '../../request'
+import type { HostConfig } from '../../types'
+import type {
+ CommandsAsPreSerializedListData,
+ GetCommandsParams,
+} from './types'
+
+export function getCommandsAsPreSerializedList(
+ config: HostConfig,
+ runId: string,
+ params: GetCommandsParams
+): ResponsePromise {
+ return request(
+ GET,
+ `/runs/${runId}/commandsAsPreSerializedList`,
+ null,
+ config,
+ params
+ )
+}
diff --git a/api-client/src/runs/commands/types.ts b/api-client/src/runs/commands/types.ts
index acea40e1880..d0b443b297a 100644
--- a/api-client/src/runs/commands/types.ts
+++ b/api-client/src/runs/commands/types.ts
@@ -34,6 +34,12 @@ export interface CommandsData {
links: CommandsLinks
}
+export interface CommandsAsPreSerializedListData {
+ data: string[]
+ meta: GetCommandsParams & { totalLength: number }
+ links: CommandsLinks
+}
+
export interface CreateCommandParams {
waitUntilComplete?: boolean
timeout?: number
diff --git a/api-client/src/runs/constants.ts b/api-client/src/runs/constants.ts
new file mode 100644
index 00000000000..9f0d8293ef6
--- /dev/null
+++ b/api-client/src/runs/constants.ts
@@ -0,0 +1,11 @@
+import {
+ RUN_STATUS_FAILED,
+ RUN_STATUS_STOPPED,
+ RUN_STATUS_SUCCEEDED,
+} from './types'
+
+export const RUN_STATUSES_TERMINAL = [
+ RUN_STATUS_SUCCEEDED,
+ RUN_STATUS_FAILED,
+ RUN_STATUS_STOPPED,
+]
diff --git a/api-client/src/runs/createRun.ts b/api-client/src/runs/createRun.ts
index 285802d85b2..7f0fb1ad72d 100644
--- a/api-client/src/runs/createRun.ts
+++ b/api-client/src/runs/createRun.ts
@@ -2,11 +2,16 @@ import { POST, request } from '../request'
import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
-import type { Run, LabwareOffsetCreateData } from './types'
+import type {
+ Run,
+ LabwareOffsetCreateData,
+ RunTimeParameterCreateData,
+} from './types'
export interface CreateRunData {
protocolId?: string
labwareOffsets?: LabwareOffsetCreateData[]
+ runTimeParameterValues?: RunTimeParameterCreateData
}
export function createRun(
diff --git a/api-client/src/runs/index.ts b/api-client/src/runs/index.ts
index fa38dade02f..01653713c81 100644
--- a/api-client/src/runs/index.ts
+++ b/api-client/src/runs/index.ts
@@ -7,9 +7,10 @@ export { createCommand } from './commands/createCommand'
export { createLiveCommand } from './commands/createLiveCommand'
export { getCommand } from './commands/getCommand'
export { getCommands } from './commands/getCommands'
+export { getCommandsAsPreSerializedList } from './commands/getCommandsAsPreSerializedList'
export { createRunAction } from './createRunAction'
export * from './createLabwareOffset'
export * from './createLabwareDefinition'
-
+export * from './constants'
export * from './types'
export type { CreateRunData } from './createRun'
diff --git a/api-client/src/runs/types.ts b/api-client/src/runs/types.ts
index 319cb568d3a..761a60a8112 100644
--- a/api-client/src/runs/types.ts
+++ b/api-client/src/runs/types.ts
@@ -1,11 +1,13 @@
import type {
+ Liquid,
LoadedLabware,
LoadedModule,
LoadedPipette,
ModuleModel,
RunTimeCommand,
+ RunTimeParameter,
} from '@opentrons/shared-data'
-import type { ResourceLink } from '../types'
+import type { ResourceLink, ErrorDetails } from '../types'
export * from './commands/types'
export const RUN_STATUS_IDLE = 'idle' as const
@@ -18,6 +20,7 @@ export const RUN_STATUS_FAILED = 'failed' as const
export const RUN_STATUS_FINISHING = 'finishing' as const
export const RUN_STATUS_SUCCEEDED = 'succeeded' as const
export const RUN_STATUS_BLOCKED_BY_OPEN_DOOR = 'blocked-by-open-door' as const
+export const RUN_STATUS_AWAITING_RECOVERY = 'awaiting-recovery' as const
export type RunStatus =
| typeof RUN_STATUS_IDLE
@@ -30,8 +33,9 @@ export type RunStatus =
| typeof RUN_STATUS_FINISHING
| typeof RUN_STATUS_SUCCEEDED
| typeof RUN_STATUS_BLOCKED_BY_OPEN_DOOR
+ | typeof RUN_STATUS_AWAITING_RECOVERY
-export interface RunData {
+export interface LegacyGoodRunData {
id: string
createdAt: string
completedAt?: string
@@ -43,10 +47,25 @@ export interface RunData {
pipettes: LoadedPipette[]
labware: LoadedLabware[]
modules: LoadedModule[]
+ liquids: Liquid[]
protocolId?: string
labwareOffsets?: LabwareOffset[]
+ runTimeParameters: RunTimeParameter[]
}
+export interface KnownGoodRunData extends LegacyGoodRunData {
+ ok: true
+}
+
+export interface KnownInvalidRunData extends LegacyGoodRunData {
+ ok: false
+ dataError: ErrorDetails
+}
+
+export type GoodRunData = KnownGoodRunData | LegacyGoodRunData
+
+export type RunData = GoodRunData | KnownInvalidRunData
+
export interface VectorOffset {
x: number
y: number
@@ -80,11 +99,14 @@ export interface Runs {
export const RUN_ACTION_TYPE_PLAY: 'play' = 'play'
export const RUN_ACTION_TYPE_PAUSE: 'pause' = 'pause'
export const RUN_ACTION_TYPE_STOP: 'stop' = 'stop'
+export const RUN_ACTION_TYPE_RESUME_FROM_RECOVERY: 'resume-from-recovery' =
+ 'resume-from-recovery'
export type RunActionType =
| typeof RUN_ACTION_TYPE_PLAY
| typeof RUN_ACTION_TYPE_PAUSE
| typeof RUN_ACTION_TYPE_STOP
+ | typeof RUN_ACTION_TYPE_RESUME_FROM_RECOVERY
export interface RunAction {
id: string
@@ -107,6 +129,10 @@ export interface LabwareOffsetCreateData {
vector: VectorOffset
}
+export interface RunTimeParameterCreateData {
+ [key: string]: string | boolean | number
+}
+
export interface CommandData {
data: RunTimeCommand
}
diff --git a/api-client/src/subsystems/types.ts b/api-client/src/subsystems/types.ts
index 14f45324f62..564d59b21b2 100644
--- a/api-client/src/subsystems/types.ts
+++ b/api-client/src/subsystems/types.ts
@@ -6,6 +6,7 @@ export type Subsystem =
| 'pipette_right'
| 'gripper'
| 'rear_panel'
+ | 'hepa_uv'
type UpdateStatus = 'queued' | 'updating' | 'done'
export interface SubsystemUpdateProgressData {
diff --git a/api-client/src/system/__tests__/utils.test.ts b/api-client/src/system/__tests__/utils.test.ts
new file mode 100644
index 00000000000..3121c061a59
--- /dev/null
+++ b/api-client/src/system/__tests__/utils.test.ts
@@ -0,0 +1,20 @@
+import { describe, expect, it } from 'vitest'
+import { sanitizeFileName } from '../utils'
+
+describe('sanitizeFileName', () => {
+ it('returns original alphanumeric file name', () => {
+ expect(sanitizeFileName('an0ther_otie_logo.png')).toEqual(
+ 'an0ther_otie_logo.png'
+ )
+ })
+
+ it('sanitizes a file name', () => {
+ expect(
+ sanitizeFileName(
+ `otie's birthday/party - (& the bouncy castle cost ~$100,000).jpeg`
+ )
+ ).toEqual(
+ 'otie_s_birthday_party_-____the_bouncy_castle_cost___100_000_.jpeg'
+ )
+ })
+})
diff --git a/api-client/src/system/createSplash.ts b/api-client/src/system/createSplash.ts
new file mode 100644
index 00000000000..abaa280b226
--- /dev/null
+++ b/api-client/src/system/createSplash.ts
@@ -0,0 +1,26 @@
+import { POST, request } from '../request'
+import { sanitizeFileName } from './utils'
+import type { ResponsePromise } from '../request'
+import type { HostConfig } from '../types'
+
+export function createSplash(
+ config: HostConfig,
+ file: File
+): ResponsePromise {
+ // sanitize file name to ensure no spaces or special characters
+ const newFileName = sanitizeFileName(file.name)
+ const renamedFile = new File([file], newFileName, {
+ type: 'image/png',
+ })
+
+ const formData = new FormData()
+ formData.append('file', renamedFile)
+
+ // eslint-disable-next-line @typescript-eslint/no-invalid-void-type
+ return request(
+ POST,
+ '/system/oem_mode/upload_splash',
+ formData,
+ config
+ )
+}
diff --git a/api-client/src/system/index.ts b/api-client/src/system/index.ts
index 025a303a5b5..4dc86594d2c 100644
--- a/api-client/src/system/index.ts
+++ b/api-client/src/system/index.ts
@@ -1,4 +1,6 @@
export { createAuthorization } from './createAuthorization'
export { createRegistration } from './createRegistration'
+export { createSplash } from './createSplash'
export { getConnections } from './getConnections'
export * from './types'
+export * from './utils'
diff --git a/api-client/src/system/utils.ts b/api-client/src/system/utils.ts
new file mode 100644
index 00000000000..cc0eea11130
--- /dev/null
+++ b/api-client/src/system/utils.ts
@@ -0,0 +1,3 @@
+export function sanitizeFileName(fileName: string): string {
+ return fileName.replace(/[^a-zA-Z0-9-.]/gi, '_')
+}
diff --git a/api/.flake8 b/api/.flake8
index 7cf00cb00ec..ee1a726e611 100644
--- a/api/.flake8
+++ b/api/.flake8
@@ -14,6 +14,9 @@ extend-ignore =
ANN102
# do not require docstring for __init__, put them on the class
D107,
+ # Don't forbid the function signature from being mentioned in the first line of the
+ # docstring. It tends to raise false positives when referring to other functions.
+ D402,
# configure flake8-docstrings
# https://pypi.org/project/flake8-docstrings/
@@ -30,7 +33,7 @@ per-file-ignores =
src/opentrons/simulate.py:ANN,D
src/opentrons/types.py:ANN,D
src/opentrons/calibration_storage/*:ANN,D
- src/opentrons/commands/*:D
+ src/opentrons/legacy_commands/*:D
src/opentrons/config/*:ANN,D
src/opentrons/drivers/*:ANN,D
src/opentrons/hardware_control/*:ANN,D
@@ -48,7 +51,7 @@ per-file-ignores =
tests/opentrons/test_types.py:ANN,D
tests/opentrons/conftest.py:ANN,D
tests/opentrons/calibration_storage/*:ANN,D
- tests/opentrons/commands/*:ANN,D
+ tests/opentrons/legacy_commands/*:ANN,D
tests/opentrons/config/*:ANN,D
tests/opentrons/data/*:ANN,D
tests/opentrons/drivers/*:ANN,D
diff --git a/api/Makefile b/api/Makefile
index c6e78d04939..d43ff4b11e7 100755
--- a/api/Makefile
+++ b/api/Makefile
@@ -19,7 +19,7 @@ sphinx_build_allow_warnings := $(pipenv) run sphinx-build
ot_project := $(OPENTRONS_PROJECT)
project_rs_default = $(if $(ot_project),$(ot_project),robot-stack)
-project_ot3_default = $(if $(ot_project),$(ot_project),ot3)
+project_ir_default = $(if $(ot_project),$(ot_project),ot3)
# Find the version of the wheel from git using a helper script. We
# use python here so we can use the same version normalization that will be
@@ -27,10 +27,10 @@ project_ot3_default = $(if $(ot_project),$(ot_project),ot3)
wheel_file = dist/$(call python_get_wheelname,api,$(project_rs_default),opentrons,$(BUILD_NUMBER))
# Find the version of the sdist file from git using a helper script.
-sdist_file = dist/$(call python_get_sdistname,api,$(project_ot3_default),opentrons)
+sdist_file = dist/$(call python_get_sdistname,api,$(project_rs_default),opentrons)
# Find the branch, sha, version that will be used to update the VERSION.json file
-version_file = $(call python_get_git_version,api,$(project_ot3_default),opentrons)
+version_file = $(call python_get_git_version,api,$(project_rs_default),opentrons)
# These variables are for simulating python protocols
sim_log_level ?= info
@@ -100,7 +100,7 @@ wheel:
.PHONY: sdist
-sdist: export OPENTRONS_PROJECT=$(project_ot3_default)
+sdist: export OPENTRONS_PROJECT=$(project_rs_default)
sdist:
$(clean_sdist_cmd)
$(python) setup.py sdist
@@ -130,14 +130,11 @@ format:
$(python) -m black src tests setup.py
docs/build/html/v%: docs/v%
- $(sphinx_build_allow_warnings) -b html -d docs/build/doctrees -n $< $@
-# sphinx wont automatically do this because it's only in a template
- $(SHX) cp docs/img/lightbulb.jpg $@/_images/
+ $(sphinx_build) -b html -d docs/build/doctrees -n $< $@
docs/build/html/hardware: docs/hardware src/opentrons/hardware_control
$(sphinx_build_allow_warnings) -b html -d docs/build/doctrees -n $< $@
$(SHX) mkdir $@/_images/
- $(SHX) cp docs/img/lightbulb.jpg $@/_images/
docs/dist/v%: docs/build/html/v%
$(SHX) mkdir -p $@
@@ -202,8 +199,8 @@ emulator:
-$(python) -m opentrons.hardware_control.emulation.app
.PHONY: deploy
-deploy: wheel
- $(call python_upload_package,$(twine_auth_args),$(twine_repository_url),$(wheel_file))
+deploy: wheel sdist
+ $(call python_upload_package,$(twine_auth_args),$(twine_repository_url),$(wheel_file),$(sdist_file))
# User must currently specify host, e.g.: `make term host=169.254.202.176`
.PHONY: term
diff --git a/api/Pipfile b/api/Pipfile
index ac63c870096..c59151f98e6 100755
--- a/api/Pipfile
+++ b/api/Pipfile
@@ -3,42 +3,50 @@ url = "https://pypi.python.org/simple"
verify_ssl = true
name = "pypi"
+[packages]
+jsonschema = "==4.17.3"
+pydantic = "==1.10.12"
+anyio = "==3.7.1"
+opentrons-shared-data = { editable = true, path = "../shared-data/python" }
+opentrons = { editable = true, path = "." }
+opentrons-hardware = { editable = true, path = "./../hardware", extras=["FLEX"] }
+numpy = "==1.22.3"
+packaging = "==21.3"
+
[dev-packages]
# atomicwrites and colorama are pytest dependencies on windows,
# spec'd here to force lockfile inclusion
# https://github.com/pypa/pipenv/issues/4408#issuecomment-668324177
-atomicwrites = { version = "==1.4.0", sys_platform = "== 'win32'" }
-colorama = { version = "==0.4.4", sys_platform = "== 'win32'" }
-coverage = "==5.1"
-mypy = "==0.910"
+atomicwrites = { version = "==1.4.0", markers="sys_platform=='win32'" }
+colorama = { version = "==0.4.4", markers="sys_platform=='win32'" }
+coverage = "==7.4.1"
+mypy = "==1.8.0"
numpydoc = "==0.9.1"
-pytest = "==7.0.1"
-pytest-asyncio = "~=0.18"
-pytest-cov = "==2.10.1"
+pytest = "==7.4.4"
+pytest-asyncio = "~=0.23.0"
+pytest-cov = "==4.1.0"
pytest-lazy-fixture = "==0.6.3"
-pytest-xdist = "~=2.2.1"
+pytest-xdist = "~=2.5.0"
sphinx = "==5.0.1"
-twine = "==4.0.2"
-wheel = "==0.30.0"
-typeguard = "==2.12.1"
+twine = "==4.0.0"
+wheel = "==0.37.0"
+typeguard = "==4.1.5"
sphinx-substitution-extensions = "==2020.9.30.0"
sphinxext-opengraph = "==0.8.1"
sphinx-tabs = ">=3.4.1,<4"
-mock = "~=4.0.2"
-flake8 = "~=3.9.0"
-flake8-annotations = "~=2.6.2"
-flake8-docstrings = "~=1.6.0"
-flake8-noqa = "~=1.2.1"
-decoy = "~=1.11"
+mock = "==5.1.0"
+flake8 = "==7.0.0"
+flake8-annotations = "~=3.0.1"
+flake8-docstrings = "~=1.7.0"
+flake8-noqa = "~=1.4.0"
+decoy = "==2.1.1"
black = "==22.3.0"
-types-mock = "==4.0.1"
+types-mock = "~=5.1.0"
types-setuptools = "==57.0.2"
-opentrons-shared-data = { editable = true, path = "../shared-data/python" }
-opentrons = { editable = true, path = "." }
-opentrons-hardware = { editable = true, path = "./../hardware", extras= ["FLEX"] }
# specify typing-extensions explicitly to force lockfile inclusion on Python >= 3.8
typing-extensions = ">=4.0.0,<5"
pytest-profiling = "~=1.7.0"
# TODO(mc, 2022-03-31): upgrade sphinx, remove this subdep pin
jinja2 = ">=2.3,<3.1"
-hypothesis = ">=6.36,<7"
+hypothesis = "==6.96.1"
+performance-metrics = {file = "../performance-metrics", editable = true}
diff --git a/api/Pipfile.lock b/api/Pipfile.lock
index 6a0090887a1..3687d72d921 100644
--- a/api/Pipfile.lock
+++ b/api/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "e53961988b9cfe876088bf31ed01f5e145545b9b6d014eac255b723c262a245f"
+ "sha256": "e1976802bb86d5fd07ba0b89826c63033785fb7d3a45bebcf638cd3555bdc1ec"
},
"pipfile-spec": 6,
"requires": {},
@@ -13,16 +13,7 @@
}
]
},
- "default": {},
- "develop": {
- "aenum": {
- "hashes": [
- "sha256:27b1710b9d084de6e2e695dab78fe9f269de924b51ae2850170ee7e1ca6288a5",
- "sha256:8cbd76cd18c4f870ff39b24284d3ea028fbe8731a58df3aa581e434c575b9559",
- "sha256:e0dfaeea4c2bd362144b87377e2c61d91958c5ed0b4daf89cb6f45ae23af6288"
- ],
- "version": "==3.1.15"
- },
+ "default": {
"aionotify": {
"hashes": [
"sha256:385e1becfaac2d9f4326673033d53912ef9565b6febdedbec593ee966df392c6",
@@ -30,21 +21,384 @@
],
"version": "==0.2.0"
},
- "alabaster": {
+ "anyio": {
+ "hashes": [
+ "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780",
+ "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==3.7.1"
+ },
+ "attrs": {
+ "hashes": [
+ "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.2.0"
+ },
+ "click": {
+ "hashes": [
+ "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==8.1.7"
+ },
+ "exceptiongroup": {
+ "hashes": [
+ "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad",
+ "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"
+ ],
+ "markers": "python_version < '3.11'",
+ "version": "==1.2.1"
+ },
+ "idna": {
"hashes": [
- "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3",
- "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"
+ "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc",
+ "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"
],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.7"
+ },
+ "jsonschema": {
+ "hashes": [
+ "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d",
+ "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==4.17.3"
+ },
+ "msgpack": {
+ "hashes": [
+ "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982",
+ "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3",
+ "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40",
+ "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee",
+ "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693",
+ "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950",
+ "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151",
+ "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24",
+ "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305",
+ "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b",
+ "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c",
+ "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659",
+ "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d",
+ "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18",
+ "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746",
+ "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868",
+ "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2",
+ "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba",
+ "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228",
+ "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2",
+ "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273",
+ "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c",
+ "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653",
+ "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a",
+ "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596",
+ "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd",
+ "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8",
+ "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa",
+ "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85",
+ "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc",
+ "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836",
+ "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3",
+ "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58",
+ "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128",
+ "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db",
+ "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f",
+ "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77",
+ "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad",
+ "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13",
+ "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8",
+ "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b",
+ "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a",
+ "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543",
+ "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b",
+ "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce",
+ "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d",
+ "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a",
+ "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c",
+ "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f",
+ "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e",
+ "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011",
+ "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04",
+ "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480",
+ "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a",
+ "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d",
+ "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"
+ ],
+ "markers": "platform_system != 'Windows'",
+ "version": "==1.0.8"
+ },
+ "numpy": {
+ "hashes": [
+ "sha256:07a8c89a04997625236c5ecb7afe35a02af3896c8aa01890a849913a2309c676",
+ "sha256:08d9b008d0156c70dc392bb3ab3abb6e7a711383c3247b410b39962263576cd4",
+ "sha256:201b4d0552831f7250a08d3b38de0d989d6f6e4658b709a02a73c524ccc6ffce",
+ "sha256:2c10a93606e0b4b95c9b04b77dc349b398fdfbda382d2a39ba5a822f669a0123",
+ "sha256:3ca688e1b9b95d80250bca34b11a05e389b1420d00e87a0d12dc45f131f704a1",
+ "sha256:48a3aecd3b997bf452a2dedb11f4e79bc5bfd21a1d4cc760e703c31d57c84b3e",
+ "sha256:568dfd16224abddafb1cbcce2ff14f522abe037268514dd7e42c6776a1c3f8e5",
+ "sha256:5bfb1bb598e8229c2d5d48db1860bcf4311337864ea3efdbe1171fb0c5da515d",
+ "sha256:639b54cdf6aa4f82fe37ebf70401bbb74b8508fddcf4797f9fe59615b8c5813a",
+ "sha256:8251ed96f38b47b4295b1ae51631de7ffa8260b5b087808ef09a39a9d66c97ab",
+ "sha256:92bfa69cfbdf7dfc3040978ad09a48091143cffb778ec3b03fa170c494118d75",
+ "sha256:97098b95aa4e418529099c26558eeb8486e66bd1e53a6b606d684d0c3616b168",
+ "sha256:a3bae1a2ed00e90b3ba5f7bd0a7c7999b55d609e0c54ceb2b076a25e345fa9f4",
+ "sha256:c34ea7e9d13a70bf2ab64a2532fe149a9aced424cd05a2c4ba662fd989e3e45f",
+ "sha256:dbc7601a3b7472d559dc7b933b18b4b66f9aa7452c120e87dfb33d02008c8a18",
+ "sha256:e7927a589df200c5e23c57970bafbd0cd322459aa7b1ff73b7c2e84d6e3eae62",
+ "sha256:f8c1f39caad2c896bc0018f699882b345b2a63708008be29b1f355ebf6f933fe",
+ "sha256:f950f8845b480cffe522913d35567e29dd381b0dc7e4ce6a4a9f9156417d2430",
+ "sha256:fade0d4f4d292b6f39951b6836d7a3c7ef5b2347f3c420cd9820a1d90d794802",
+ "sha256:fdf3c08bce27132395d3c3ba1503cac12e17282358cb4bddc25cc46b0aca07aa"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==1.22.3"
+ },
+ "opentrons": {
+ "editable": true,
+ "markers": "python_version >= '3.10'",
+ "path": "."
+ },
+ "opentrons-hardware": {
+ "editable": true,
+ "extras": [
+ "FLEX"
+ ],
+ "path": "./../hardware"
+ },
+ "opentrons-shared-data": {
+ "editable": true,
+ "markers": "python_version >= '3.10'",
+ "path": "../shared-data/python"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
+ "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
+ ],
+ "index": "pypi",
"markers": "python_version >= '3.6'",
- "version": "==0.7.13"
+ "version": "==21.3"
},
- "anyio": {
+ "pydantic": {
"hashes": [
- "sha256:929a6852074397afe1d989002aa96d457e3e1e5441357c60d03e7eea0e65e1b0",
- "sha256:ae57a67583e5ff8b4af47666ff5651c3732d45fd26c929253748e796af860374"
+ "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303",
+ "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe",
+ "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47",
+ "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494",
+ "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33",
+ "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86",
+ "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d",
+ "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c",
+ "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a",
+ "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565",
+ "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb",
+ "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62",
+ "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62",
+ "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0",
+ "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523",
+ "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d",
+ "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405",
+ "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f",
+ "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b",
+ "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718",
+ "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed",
+ "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb",
+ "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5",
+ "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc",
+ "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942",
+ "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe",
+ "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246",
+ "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350",
+ "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303",
+ "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09",
+ "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33",
+ "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8",
+ "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a",
+ "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1",
+ "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6",
+ "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"
],
- "markers": "python_full_version >= '3.6.2'",
- "version": "==3.3.0"
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==1.10.12"
+ },
+ "pyparsing": {
+ "hashes": [
+ "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"
+ ],
+ "markers": "python_full_version >= '3.6.8'",
+ "version": "==3.1.2"
+ },
+ "pyrsistent": {
+ "hashes": [
+ "sha256:0724c506cd8b63c69c7f883cc233aac948c1ea946ea95996ad8b1380c25e1d3f",
+ "sha256:09848306523a3aba463c4b49493a760e7a6ca52e4826aa100ee99d8d39b7ad1e",
+ "sha256:0f3b1bcaa1f0629c978b355a7c37acd58907390149b7311b5db1b37648eb6958",
+ "sha256:21cc459636983764e692b9eba7144cdd54fdec23ccdb1e8ba392a63666c60c34",
+ "sha256:2e14c95c16211d166f59c6611533d0dacce2e25de0f76e4c140fde250997b3ca",
+ "sha256:2e2c116cc804d9b09ce9814d17df5edf1df0c624aba3b43bc1ad90411487036d",
+ "sha256:4021a7f963d88ccd15b523787d18ed5e5269ce57aa4037146a2377ff607ae87d",
+ "sha256:4c48f78f62ab596c679086084d0dd13254ae4f3d6c72a83ffdf5ebdef8f265a4",
+ "sha256:4f5c2d012671b7391803263419e31b5c7c21e7c95c8760d7fc35602353dee714",
+ "sha256:58b8f6366e152092194ae68fefe18b9f0b4f89227dfd86a07770c3d86097aebf",
+ "sha256:59a89bccd615551391f3237e00006a26bcf98a4d18623a19909a2c48b8e986ee",
+ "sha256:5cdd7ef1ea7a491ae70d826b6cc64868de09a1d5ff9ef8d574250d0940e275b8",
+ "sha256:6288b3fa6622ad8a91e6eb759cfc48ff3089e7c17fb1d4c59a919769314af224",
+ "sha256:6d270ec9dd33cdb13f4d62c95c1a5a50e6b7cdd86302b494217137f760495b9d",
+ "sha256:79ed12ba79935adaac1664fd7e0e585a22caa539dfc9b7c7c6d5ebf91fb89054",
+ "sha256:7d29c23bdf6e5438c755b941cef867ec2a4a172ceb9f50553b6ed70d50dfd656",
+ "sha256:8441cf9616d642c475684d6cf2520dd24812e996ba9af15e606df5f6fd9d04a7",
+ "sha256:881bbea27bbd32d37eb24dd320a5e745a2a5b092a17f6debc1349252fac85423",
+ "sha256:8c3aba3e01235221e5b229a6c05f585f344734bd1ad42a8ac51493d74722bbce",
+ "sha256:a14798c3005ec892bbada26485c2eea3b54109cb2533713e355c806891f63c5e",
+ "sha256:b14decb628fac50db5e02ee5a35a9c0772d20277824cfe845c8a8b717c15daa3",
+ "sha256:b318ca24db0f0518630e8b6f3831e9cba78f099ed5c1d65ffe3e023003043ba0",
+ "sha256:c1beb78af5423b879edaf23c5591ff292cf7c33979734c99aa66d5914ead880f",
+ "sha256:c55acc4733aad6560a7f5f818466631f07efc001fd023f34a6c203f8b6df0f0b",
+ "sha256:ca52d1ceae015859d16aded12584c59eb3825f7b50c6cfd621d4231a6cc624ce",
+ "sha256:cae40a9e3ce178415040a0383f00e8d68b569e97f31928a3a8ad37e3fde6df6a",
+ "sha256:e78d0c7c1e99a4a45c99143900ea0546025e41bb59ebc10182e947cf1ece9174",
+ "sha256:ef3992833fbd686ee783590639f4b8343a57f1f75de8633749d984dc0eb16c86",
+ "sha256:f058a615031eea4ef94ead6456f5ec2026c19fb5bd6bfe86e9665c4158cf802f",
+ "sha256:f5ac696f02b3fc01a710427585c855f65cd9c640e14f52abe52020722bb4906b",
+ "sha256:f920385a11207dc372a028b3f1e1038bb244b3ec38d448e6d8e43c6b3ba20e98",
+ "sha256:fed2c3216a605dc9a6ea50c7e84c82906e3684c4e80d2908208f662a6cbf9022"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.20.0"
+ },
+ "pyserial": {
+ "hashes": [
+ "sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb",
+ "sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0"
+ ],
+ "version": "==3.5"
+ },
+ "python-can": {
+ "hashes": [
+ "sha256:6ad50f4613289f3c4d276b6d2ac8901d776dcb929994cce93f55a69e858c595f",
+ "sha256:7eea9b81b0ff908000a825db024313f622895bd578e8a17433e0474cd7d2da83"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==4.2.2"
+ },
+ "setuptools": {
+ "hashes": [
+ "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4",
+ "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==70.0.0"
+ },
+ "sniffio": {
+ "hashes": [
+ "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2",
+ "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==1.3.1"
+ },
+ "typing-extensions": {
+ "hashes": [
+ "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a",
+ "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.12.1"
+ },
+ "wrapt": {
+ "hashes": [
+ "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc",
+ "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81",
+ "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09",
+ "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e",
+ "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca",
+ "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0",
+ "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb",
+ "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487",
+ "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40",
+ "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c",
+ "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060",
+ "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202",
+ "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41",
+ "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9",
+ "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b",
+ "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664",
+ "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d",
+ "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362",
+ "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00",
+ "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc",
+ "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1",
+ "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267",
+ "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956",
+ "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966",
+ "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1",
+ "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228",
+ "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72",
+ "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d",
+ "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292",
+ "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0",
+ "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0",
+ "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36",
+ "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c",
+ "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5",
+ "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f",
+ "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73",
+ "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b",
+ "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2",
+ "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593",
+ "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39",
+ "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389",
+ "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf",
+ "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf",
+ "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89",
+ "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c",
+ "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c",
+ "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f",
+ "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440",
+ "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465",
+ "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136",
+ "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b",
+ "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8",
+ "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3",
+ "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8",
+ "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6",
+ "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e",
+ "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f",
+ "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c",
+ "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e",
+ "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8",
+ "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2",
+ "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020",
+ "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35",
+ "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d",
+ "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3",
+ "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537",
+ "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809",
+ "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d",
+ "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a",
+ "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==1.16.0"
+ }
+ },
+ "develop": {
+ "alabaster": {
+ "hashes": [
+ "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65",
+ "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"
+ ],
+ "markers": "python_version >= '3.9'",
+ "version": "==0.7.16"
},
"atomicwrites": {
"hashes": [
@@ -56,19 +410,27 @@
},
"attrs": {
"hashes": [
- "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
- "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
+ "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
],
"markers": "python_version >= '3.7'",
- "version": "==23.1.0"
+ "version": "==23.2.0"
},
"babel": {
"hashes": [
- "sha256:04c3e2d28d2b7681644508f836be388ae49e0cfe91465095340395b60d00f210",
- "sha256:fbfcae1575ff78e26c7449136f1abbefc3c13ce542eeb13d43d50d8b047216ec"
+ "sha256:08706bdad8d0a3413266ab61bd6c34d0c28d6e1e7badf40a2cebe67644e2e1fb",
+ "sha256:8daf0e265d05768bc6c7a314cf1321e9a123afc328cc635c18622a2f30a04413"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.13.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.15.0"
+ },
+ "backports.tarfile": {
+ "hashes": [
+ "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34",
+ "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"
+ ],
+ "markers": "python_version < '3.12'",
+ "version": "==1.2.0"
},
"black": {
"hashes": [
@@ -97,119 +459,112 @@
"sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"
],
"index": "pypi",
+ "markers": "python_full_version >= '3.6.2'",
"version": "==22.3.0"
},
- "bleach": {
- "hashes": [
- "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414",
- "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==6.0.0"
- },
"certifi": {
"hashes": [
- "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
- "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
+ "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516",
+ "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"
],
"markers": "python_version >= '3.6'",
- "version": "==2023.7.22"
+ "version": "==2024.6.2"
},
"charset-normalizer": {
"hashes": [
- "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843",
- "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786",
- "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e",
- "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8",
- "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4",
- "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa",
- "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d",
- "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82",
- "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7",
- "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895",
- "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d",
- "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a",
- "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382",
- "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678",
- "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b",
- "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e",
- "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741",
- "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4",
- "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596",
- "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9",
- "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69",
- "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c",
- "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77",
- "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13",
- "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459",
- "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e",
- "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7",
- "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908",
- "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a",
- "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f",
- "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8",
- "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482",
- "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d",
- "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d",
- "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545",
- "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34",
- "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86",
- "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6",
- "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe",
- "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e",
- "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc",
- "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7",
- "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd",
- "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c",
- "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557",
- "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a",
- "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89",
- "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078",
- "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e",
- "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4",
- "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403",
- "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0",
- "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89",
- "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115",
- "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9",
- "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05",
- "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a",
- "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec",
- "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56",
- "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38",
- "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479",
- "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c",
- "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e",
- "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd",
- "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186",
- "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455",
- "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c",
- "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65",
- "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78",
- "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287",
- "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df",
- "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43",
- "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1",
- "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7",
- "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989",
- "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a",
- "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63",
- "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884",
- "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649",
- "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810",
- "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828",
- "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4",
- "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2",
- "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd",
- "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5",
- "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe",
- "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293",
- "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e",
- "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e",
- "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==3.3.0"
+ "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
+ "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
+ "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
+ "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
+ "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
+ "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
+ "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
+ "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
+ "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
+ "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
+ "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
+ "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
+ "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
+ "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
+ "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
+ "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
+ "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
+ "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
+ "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
+ "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
+ "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
+ "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
+ "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
+ "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
+ "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
+ "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
+ "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
+ "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
+ "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
+ "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
+ "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
+ "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
+ "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
+ "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
+ "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
+ "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
+ "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
+ "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
+ "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
+ "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
+ "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
+ "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
+ "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
+ "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
+ "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
+ "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
+ "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
+ "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
+ "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
+ "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
+ "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
+ "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
+ "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
+ "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
+ "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
+ "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
+ "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
+ "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
+ "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
+ "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
+ "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
+ "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
+ "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
+ "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
+ "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
+ "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
+ "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
+ "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
+ "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
+ "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
+ "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
+ "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
+ "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
+ "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
+ "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.3.2"
},
"click": {
"hashes": [
@@ -227,58 +582,134 @@
"markers": "sys_platform == 'win32'",
"version": "==0.4.4"
},
+ "contourpy": {
+ "hashes": [
+ "sha256:00e5388f71c1a0610e6fe56b5c44ab7ba14165cdd6d695429c5cd94021e390b2",
+ "sha256:10a37ae557aabf2509c79715cd20b62e4c7c28b8cd62dd7d99e5ed3ce28c3fd9",
+ "sha256:11959f0ce4a6f7b76ec578576a0b61a28bdc0696194b6347ba3f1c53827178b9",
+ "sha256:187fa1d4c6acc06adb0fae5544c59898ad781409e61a926ac7e84b8f276dcef4",
+ "sha256:1a07fc092a4088ee952ddae19a2b2a85757b923217b7eed584fdf25f53a6e7ce",
+ "sha256:1cac0a8f71a041aa587410424ad46dfa6a11f6149ceb219ce7dd48f6b02b87a7",
+ "sha256:1d59e739ab0e3520e62a26c60707cc3ab0365d2f8fecea74bfe4de72dc56388f",
+ "sha256:2855c8b0b55958265e8b5888d6a615ba02883b225f2227461aa9127c578a4922",
+ "sha256:2e785e0f2ef0d567099b9ff92cbfb958d71c2d5b9259981cd9bee81bd194c9a4",
+ "sha256:309be79c0a354afff9ff7da4aaed7c3257e77edf6c1b448a779329431ee79d7e",
+ "sha256:39f3ecaf76cd98e802f094e0d4fbc6dc9c45a8d0c4d185f0f6c2234e14e5f75b",
+ "sha256:457499c79fa84593f22454bbd27670227874cd2ff5d6c84e60575c8b50a69619",
+ "sha256:49e70d111fee47284d9dd867c9bb9a7058a3c617274900780c43e38d90fe1205",
+ "sha256:4c75507d0a55378240f781599c30e7776674dbaf883a46d1c90f37e563453480",
+ "sha256:4c863140fafc615c14a4bf4efd0f4425c02230eb8ef02784c9a156461e62c965",
+ "sha256:4d8908b3bee1c889e547867ca4cdc54e5ab6be6d3e078556814a22457f49423c",
+ "sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd",
+ "sha256:6022cecf8f44e36af10bd9118ca71f371078b4c168b6e0fab43d4a889985dbb5",
+ "sha256:6150ffa5c767bc6332df27157d95442c379b7dce3a38dff89c0f39b63275696f",
+ "sha256:62828cada4a2b850dbef89c81f5a33741898b305db244904de418cc957ff05dc",
+ "sha256:7b4182299f251060996af5249c286bae9361fa8c6a9cda5efc29fe8bfd6062ec",
+ "sha256:94b34f32646ca0414237168d68a9157cb3889f06b096612afdd296003fdd32fd",
+ "sha256:9ce6889abac9a42afd07a562c2d6d4b2b7134f83f18571d859b25624a331c90b",
+ "sha256:9cffe0f850e89d7c0012a1fb8730f75edd4320a0a731ed0c183904fe6ecfc3a9",
+ "sha256:a12a813949e5066148712a0626895c26b2578874e4cc63160bb007e6df3436fe",
+ "sha256:a1eea9aecf761c661d096d39ed9026574de8adb2ae1c5bd7b33558af884fb2ce",
+ "sha256:a31f94983fecbac95e58388210427d68cd30fe8a36927980fab9c20062645609",
+ "sha256:ac58bdee53cbeba2ecad824fa8159493f0bf3b8ea4e93feb06c9a465d6c87da8",
+ "sha256:af3f4485884750dddd9c25cb7e3915d83c2db92488b38ccb77dd594eac84c4a0",
+ "sha256:b33d2bc4f69caedcd0a275329eb2198f560b325605810895627be5d4b876bf7f",
+ "sha256:b59c0ffceff8d4d3996a45f2bb6f4c207f94684a96bf3d9728dbb77428dd8cb8",
+ "sha256:bb6834cbd983b19f06908b45bfc2dad6ac9479ae04abe923a275b5f48f1a186b",
+ "sha256:bd3db01f59fdcbce5b22afad19e390260d6d0222f35a1023d9adc5690a889364",
+ "sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040",
+ "sha256:c2528d60e398c7c4c799d56f907664673a807635b857df18f7ae64d3e6ce2d9f",
+ "sha256:d31a63bc6e6d87f77d71e1abbd7387ab817a66733734883d1fc0021ed9bfa083",
+ "sha256:d4492d82b3bc7fbb7e3610747b159869468079fe149ec5c4d771fa1f614a14df",
+ "sha256:ddcb8581510311e13421b1f544403c16e901c4e8f09083c881fab2be80ee31ba",
+ "sha256:e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445",
+ "sha256:eb3315a8a236ee19b6df481fc5f997436e8ade24a9f03dfdc6bd490fea20c6da",
+ "sha256:ef2b055471c0eb466033760a521efb9d8a32b99ab907fc8358481a1dd29e3bd3",
+ "sha256:ef5adb9a3b1d0c645ff694f9bca7702ec2c70f4d734f9922ea34de02294fdf72",
+ "sha256:f32c38afb74bd98ce26de7cc74a67b40afb7b05aae7b42924ea990d51e4dac02",
+ "sha256:fe0ccca550bb8e5abc22f530ec0466136379c01321fd94f30a22231e8a48d985"
+ ],
+ "markers": "python_version >= '3.9'",
+ "version": "==1.2.1"
+ },
"coverage": {
- "hashes": [
- "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a",
- "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355",
- "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65",
- "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7",
- "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9",
- "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1",
- "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0",
- "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55",
- "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c",
- "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6",
- "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef",
- "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019",
- "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e",
- "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0",
- "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf",
- "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24",
- "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2",
- "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c",
- "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4",
- "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0",
- "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd",
- "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04",
- "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e",
- "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730",
- "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2",
- "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768",
- "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796",
- "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7",
- "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a",
- "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489",
- "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"
+ "extras": [
+ "toml"
+ ],
+ "hashes": [
+ "sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61",
+ "sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1",
+ "sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7",
+ "sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7",
+ "sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75",
+ "sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd",
+ "sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35",
+ "sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04",
+ "sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6",
+ "sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042",
+ "sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166",
+ "sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1",
+ "sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d",
+ "sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c",
+ "sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66",
+ "sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70",
+ "sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1",
+ "sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676",
+ "sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630",
+ "sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a",
+ "sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74",
+ "sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad",
+ "sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19",
+ "sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6",
+ "sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448",
+ "sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018",
+ "sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218",
+ "sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756",
+ "sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54",
+ "sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45",
+ "sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628",
+ "sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968",
+ "sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d",
+ "sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25",
+ "sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60",
+ "sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950",
+ "sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06",
+ "sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295",
+ "sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b",
+ "sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c",
+ "sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc",
+ "sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74",
+ "sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1",
+ "sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee",
+ "sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011",
+ "sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156",
+ "sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766",
+ "sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5",
+ "sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581",
+ "sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016",
+ "sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c",
+ "sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3"
],
"index": "pypi",
- "version": "==5.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==7.4.1"
},
"cycler": {
"hashes": [
- "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3",
- "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"
+ "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30",
+ "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"
],
- "markers": "python_version >= '3.6'",
- "version": "==0.11.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==0.12.1"
},
"decoy": {
"hashes": [
- "sha256:57327a6ec24c4f4804d978f9c770cb0ff778d2ed751a45ffc61226bf10fc9f90",
- "sha256:dea3634ed92eca686f71e66dfd43350adc1a96c814fb5492a08d3c251c531149"
+ "sha256:575bdbe81afb4c152cd99a34568a9aa4369461f79d6172c678279c5d5585befe",
+ "sha256:7ddcc08b8ce991f7705cee76fae9061dcb17352e0a1ca2d9a0d4a0306ebd51cd"
],
"index": "pypi",
- "version": "==1.11.3"
+ "markers": "python_version >= '3.7' and python_version < '4.0'",
+ "version": "==2.1.1"
},
"docutils": {
"hashes": [
@@ -290,59 +721,103 @@
},
"exceptiongroup": {
"hashes": [
- "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9",
- "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"
+ "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad",
+ "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"
],
"markers": "python_version < '3.11'",
- "version": "==1.1.3"
+ "version": "==1.2.1"
},
"execnet": {
"hashes": [
- "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
- "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"
+ "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc",
+ "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.0.2"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.1.1"
},
"flake8": {
"hashes": [
- "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b",
- "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"
+ "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132",
+ "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"
],
"index": "pypi",
- "version": "==3.9.2"
+ "markers": "python_full_version >= '3.8.1'",
+ "version": "==7.0.0"
},
"flake8-annotations": {
"hashes": [
- "sha256:0d6cd2e770b5095f09689c9d84cc054c51b929c41a68969ea1beb4b825cac515",
- "sha256:d10c4638231f8a50c0a597c4efce42bd7b7d85df4f620a0ddaca526138936a4f"
+ "sha256:af78e3216ad800d7e144745ece6df706c81b3255290cbf870e54879d495e8ade",
+ "sha256:ff37375e71e3b83f2a5a04d443c41e2c407de557a884f3300a7fa32f3c41cb0a"
],
"index": "pypi",
- "version": "==2.6.2"
+ "markers": "python_full_version >= '3.8.1'",
+ "version": "==3.0.1"
},
"flake8-docstrings": {
"hashes": [
- "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde",
- "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b"
+ "sha256:4c8cc748dc16e6869728699e5d0d685da9a10b0ea718e090b1ba088e67a941af",
+ "sha256:51f2344026da083fc084166a9353f5082b01f72901df422f74b4d953ae88ac75"
],
"index": "pypi",
- "version": "==1.6.0"
+ "markers": "python_version >= '3.7'",
+ "version": "==1.7.0"
},
"flake8-noqa": {
"hashes": [
- "sha256:26d92ca6b72dec732d294e587a2bdeb66dab01acc609ed6a064693d6baa4e789",
- "sha256:445618162e0bbae1b9d983326d4e39066c5c6de71ba0c444ca2d4d1fa5b2cdb7"
+ "sha256:4465e16a19be433980f6f563d05540e2e54797eb11facb9feb50fed60624dc45",
+ "sha256:771765ab27d1efd157528379acd15131147f9ae578a72d17fb432ca197881243"
],
"index": "pypi",
- "version": "==1.2.9"
+ "markers": "python_version >= '3.7'",
+ "version": "==1.4.0"
},
"fonttools": {
"hashes": [
- "sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1",
- "sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==4.38.0"
+ "sha256:099634631b9dd271d4a835d2b2a9e042ccc94ecdf7e2dd9f7f34f7daf333358d",
+ "sha256:0c555e039d268445172b909b1b6bdcba42ada1cf4a60e367d68702e3f87e5f64",
+ "sha256:1e677bfb2b4bd0e5e99e0f7283e65e47a9814b0486cb64a41adf9ef110e078f2",
+ "sha256:2367d47816cc9783a28645bc1dac07f8ffc93e0f015e8c9fc674a5b76a6da6e4",
+ "sha256:28d072169fe8275fb1a0d35e3233f6df36a7e8474e56cb790a7258ad822b6fd6",
+ "sha256:31f0e3147375002aae30696dd1dc596636abbd22fca09d2e730ecde0baad1d6b",
+ "sha256:3e0ad3c6ea4bd6a289d958a1eb922767233f00982cf0fe42b177657c86c80a8f",
+ "sha256:45b4afb069039f0366a43a5d454bc54eea942bfb66b3fc3e9a2c07ef4d617380",
+ "sha256:4a2a6ba400d386e904fd05db81f73bee0008af37799a7586deaa4aef8cd5971e",
+ "sha256:4f520d9ac5b938e6494f58a25c77564beca7d0199ecf726e1bd3d56872c59749",
+ "sha256:52a6e0a7a0bf611c19bc8ec8f7592bdae79c8296c70eb05917fd831354699b20",
+ "sha256:5a4788036201c908079e89ae3f5399b33bf45b9ea4514913f4dbbe4fac08efe0",
+ "sha256:6b4f04b1fbc01a3569d63359f2227c89ab294550de277fd09d8fca6185669fa4",
+ "sha256:715b41c3e231f7334cbe79dfc698213dcb7211520ec7a3bc2ba20c8515e8a3b5",
+ "sha256:73121a9b7ff93ada888aaee3985a88495489cc027894458cb1a736660bdfb206",
+ "sha256:74ae2441731a05b44d5988d3ac2cf784d3ee0a535dbed257cbfff4be8bb49eb9",
+ "sha256:7d6166192dcd925c78a91d599b48960e0a46fe565391c79fe6de481ac44d20ac",
+ "sha256:7f193f060391a455920d61684a70017ef5284ccbe6023bb056e15e5ac3de11d1",
+ "sha256:907fa0b662dd8fc1d7c661b90782ce81afb510fc4b7aa6ae7304d6c094b27bce",
+ "sha256:93156dd7f90ae0a1b0e8871032a07ef3178f553f0c70c386025a808f3a63b1f4",
+ "sha256:93bc9e5aaa06ff928d751dc6be889ff3e7d2aa393ab873bc7f6396a99f6fbb12",
+ "sha256:95db0c6581a54b47c30860d013977b8a14febc206c8b5ff562f9fe32738a8aca",
+ "sha256:973d030180eca8255b1bce6ffc09ef38a05dcec0e8320cc9b7bcaa65346f341d",
+ "sha256:9cd7a6beec6495d1dffb1033d50a3f82dfece23e9eb3c20cd3c2444d27514068",
+ "sha256:9fe9096a60113e1d755e9e6bda15ef7e03391ee0554d22829aa506cdf946f796",
+ "sha256:a209d2e624ba492df4f3bfad5996d1f76f03069c6133c60cd04f9a9e715595ec",
+ "sha256:a239afa1126b6a619130909c8404070e2b473dd2b7fc4aacacd2e763f8597fea",
+ "sha256:ba9f09ff17f947392a855e3455a846f9855f6cf6bec33e9a427d3c1d254c712f",
+ "sha256:bb7273789f69b565d88e97e9e1da602b4ee7ba733caf35a6c2affd4334d4f005",
+ "sha256:bd5bc124fae781a4422f61b98d1d7faa47985f663a64770b78f13d2c072410c2",
+ "sha256:bff98816cb144fb7b85e4b5ba3888a33b56ecef075b0e95b95bcd0a5fbf20f06",
+ "sha256:c4ee5a24e281fbd8261c6ab29faa7fd9a87a12e8c0eed485b705236c65999109",
+ "sha256:c93ed66d32de1559b6fc348838c7572d5c0ac1e4a258e76763a5caddd8944002",
+ "sha256:d1a24f51a3305362b94681120c508758a88f207fa0a681c16b5a4172e9e6c7a9",
+ "sha256:d8f191a17369bd53a5557a5ee4bab91d5330ca3aefcdf17fab9a497b0e7cff7a",
+ "sha256:daaef7390e632283051e3cf3e16aff2b68b247e99aea916f64e578c0449c9c68",
+ "sha256:e40013572bfb843d6794a3ce076c29ef4efd15937ab833f520117f8eccc84fd6",
+ "sha256:eceef49f457253000e6a2d0f7bd08ff4e9fe96ec4ffce2dbcb32e34d9c1b8161",
+ "sha256:ee595d7ba9bba130b2bec555a40aafa60c26ce68ed0cf509983e0f12d88674fd",
+ "sha256:ef50ec31649fbc3acf6afd261ed89d09eb909b97cc289d80476166df8438524d",
+ "sha256:fa1f3e34373aa16045484b4d9d352d4c6b5f9f77ac77a178252ccbc851e8b2ee",
+ "sha256:fca66d9ff2ac89b03f5aa17e0b21a97c21f3491c46b583bb131eb32c7bab33af"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.53.0"
},
"gprof2dot": {
"hashes": [
@@ -354,19 +829,20 @@
},
"hypothesis": {
"hashes": [
- "sha256:5ce05bc70aa4f20114effaf3375dc8b51d09a04026a0cf89d4514fc0b69f6304",
- "sha256:e9a9ff3dc3f3eebbf214d6852882ac96ad72023f0e9770139fd3d3c1b87673e2"
+ "sha256:848ea0952f0bdfd02eac59e41b03f1cbba8fa2cffeffa8db328bbd6cfe159974",
+ "sha256:955a57e56be4607c81c17ca53e594af54aadeed91e07b88bb7f84e8208ea7739"
],
"index": "pypi",
- "version": "==6.79.4"
+ "markers": "python_version >= '3.8'",
+ "version": "==6.96.1"
},
"idna": {
"hashes": [
- "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
- "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
+ "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc",
+ "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"
],
"markers": "python_version >= '3.5'",
- "version": "==3.4"
+ "version": "==3.7"
},
"imagesize": {
"hashes": [
@@ -378,19 +854,11 @@
},
"importlib-metadata": {
"hashes": [
- "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116",
- "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"
+ "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570",
+ "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"
],
- "markers": "python_version < '3.10' and python_version < '3.12'",
- "version": "==4.13.0"
- },
- "importlib-resources": {
- "hashes": [
- "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6",
- "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"
- ],
- "markers": "python_version < '3.9'",
- "version": "==5.12.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==7.1.0"
},
"iniconfig": {
"hashes": [
@@ -402,11 +870,27 @@
},
"jaraco.classes": {
"hashes": [
- "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158",
- "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"
+ "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd",
+ "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790"
],
- "markers": "python_version >= '3.7'",
- "version": "==3.2.3"
+ "markers": "python_version >= '3.8'",
+ "version": "==3.4.0"
+ },
+ "jaraco.context": {
+ "hashes": [
+ "sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266",
+ "sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==5.3.0"
+ },
+ "jaraco.functools": {
+ "hashes": [
+ "sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664",
+ "sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.0.1"
},
"jinja2": {
"hashes": [
@@ -414,22 +898,25 @@
"sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"
],
"index": "pypi",
+ "markers": "python_version >= '3.6'",
"version": "==3.0.3"
},
"jsonschema": {
"hashes": [
- "sha256:5f9c0a719ca2ce14c5de2fd350a64fd2d13e8539db29836a86adc990bb1a068f",
- "sha256:8d4a2b7b6c2237e0199c8ea1a6d3e05bf118e289ae2b9d7ba444182a2959560d"
+ "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d",
+ "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"
],
- "version": "==3.0.2"
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==4.17.3"
},
"keyring": {
"hashes": [
- "sha256:3d44a48fa9a254f6c72879d7c88604831ebdaac6ecb0b214308b02953502c510",
- "sha256:bc402c5e501053098bcbd149c4ddbf8e36c6809e572c2d098d4961e88d4c270d"
+ "sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50",
+ "sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b"
],
- "markers": "python_version >= '3.7'",
- "version": "==24.1.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==25.2.1"
},
"kiwisolver": {
"hashes": [
@@ -543,125 +1030,119 @@
},
"markdown-it-py": {
"hashes": [
- "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30",
- "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"
+ "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
+ "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.2.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==3.0.0"
},
"markupsafe": {
"hashes": [
- "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e",
- "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e",
- "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431",
- "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686",
- "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c",
- "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559",
- "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc",
- "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb",
- "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939",
- "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c",
- "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0",
- "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4",
- "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9",
- "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575",
- "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba",
- "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d",
- "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd",
- "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3",
- "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00",
- "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155",
- "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac",
- "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52",
- "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f",
- "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8",
- "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b",
- "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007",
- "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24",
- "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea",
- "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198",
- "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0",
- "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee",
- "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be",
- "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2",
- "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1",
- "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707",
- "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6",
- "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c",
- "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58",
- "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823",
- "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779",
- "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636",
- "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c",
- "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad",
- "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee",
- "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc",
- "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2",
- "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48",
- "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7",
- "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e",
- "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b",
- "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa",
- "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5",
- "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e",
- "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb",
- "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9",
- "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57",
- "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc",
- "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc",
- "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2",
- "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"
+ "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf",
+ "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
+ "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f",
+ "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3",
+ "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532",
+ "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
+ "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617",
+ "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df",
+ "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4",
+ "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906",
+ "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f",
+ "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4",
+ "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8",
+ "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371",
+ "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2",
+ "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465",
+ "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52",
+ "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6",
+ "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169",
+ "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
+ "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2",
+ "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0",
+ "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029",
+ "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f",
+ "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a",
+ "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced",
+ "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
+ "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c",
+ "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf",
+ "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9",
+ "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb",
+ "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad",
+ "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3",
+ "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1",
+ "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46",
+ "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
+ "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a",
+ "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee",
+ "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
+ "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5",
+ "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea",
+ "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f",
+ "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5",
+ "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e",
+ "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a",
+ "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f",
+ "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50",
+ "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a",
+ "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
+ "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4",
+ "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff",
+ "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2",
+ "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
+ "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b",
+ "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf",
+ "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5",
+ "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5",
+ "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab",
+ "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
+ "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"
],
"markers": "python_version >= '3.7'",
- "version": "==2.1.3"
+ "version": "==2.1.5"
},
"matplotlib": {
"hashes": [
- "sha256:0bcdfcb0f976e1bac6721d7d457c17be23cf7501f977b6a38f9d38a3762841f7",
- "sha256:1e64ac9be9da6bfff0a732e62116484b93b02a0b4d4b19934fb4f8e7ad26ad6a",
- "sha256:22227c976ad4dc8c5a5057540421f0d8708c6560744ad2ad638d48e2984e1dbc",
- "sha256:2886cc009f40e2984c083687251821f305d811d38e3df8ded414265e4583f0c5",
- "sha256:2e6d184ebe291b9e8f7e78bbab7987d269c38ea3e062eace1fe7d898042ef804",
- "sha256:3211ba82b9f1518d346f6309df137b50c3dc4421b4ed4815d1d7eadc617f45a1",
- "sha256:339cac48b80ddbc8bfd05daae0a3a73414651a8596904c2a881cfd1edb65f26c",
- "sha256:35a8ad4dddebd51f94c5d24bec689ec0ec66173bf614374a1244c6241c1595e0",
- "sha256:3b4fa56159dc3c7f9250df88f653f085068bcd32dcd38e479bba58909254af7f",
- "sha256:43e9d3fa077bf0cc95ded13d331d2156f9973dce17c6f0c8b49ccd57af94dbd9",
- "sha256:57f1b4e69f438a99bb64d7f2c340db1b096b41ebaa515cf61ea72624279220ce",
- "sha256:5c096363b206a3caf43773abebdbb5a23ea13faef71d701b21a9c27fdcef72f4",
- "sha256:6bb93a0492d68461bd458eba878f52fdc8ac7bdb6c4acdfe43dba684787838c2",
- "sha256:6ea6aef5c4338e58d8d376068e28f80a24f54e69f09479d1c90b7172bad9f25b",
- "sha256:6fe807e8a22620b4cd95cfbc795ba310dc80151d43b037257250faf0bfcd82bc",
- "sha256:73dd93dc35c85dece610cca8358003bf0760d7986f70b223e2306b4ea6d1406b",
- "sha256:839d47b8ead7ad9669aaacdbc03f29656dc21f0d41a6fea2d473d856c39c8b1c",
- "sha256:874df7505ba820e0400e7091199decf3ff1fde0583652120c50cd60d5820ca9a",
- "sha256:879c7e5fce4939c6aa04581dfe08d57eb6102a71f2e202e3314d5fbc072fd5a0",
- "sha256:94ff86af56a3869a4ae26a9637a849effd7643858a1a04dd5ee50e9ab75069a7",
- "sha256:99482b83ebf4eb6d5fc6813d7aacdefdd480f0d9c0b52dcf9f1cc3b2c4b3361a",
- "sha256:9ab29589cef03bc88acfa3a1490359000c18186fc30374d8aa77d33cc4a51a4a",
- "sha256:9befa5954cdbc085e37d974ff6053da269474177921dd61facdad8023c4aeb51",
- "sha256:a206a1b762b39398efea838f528b3a6d60cdb26fe9d58b48265787e29cd1d693",
- "sha256:ab8d26f07fe64f6f6736d635cce7bfd7f625320490ed5bfc347f2cdb4fae0e56",
- "sha256:b28de401d928890187c589036857a270a032961411934bdac4cf12dde3d43094",
- "sha256:b428076a55fb1c084c76cb93e68006f27d247169f056412607c5c88828d08f88",
- "sha256:bf618a825deb6205f015df6dfe6167a5d9b351203b03fab82043ae1d30f16511",
- "sha256:c995f7d9568f18b5db131ab124c64e51b6820a92d10246d4f2b3f3a66698a15b",
- "sha256:cd45a6f3e93a780185f70f05cf2a383daed13c3489233faad83e81720f7ede24",
- "sha256:d2484b350bf3d32cae43f85dcfc89b3ed7bd2bcd781ef351f93eb6fb2cc483f9",
- "sha256:d62880e1f60e5a30a2a8484432bcb3a5056969dc97258d7326ad465feb7ae069",
- "sha256:dacddf5bfcec60e3f26ec5c0ae3d0274853a258b6c3fc5ef2f06a8eb23e042be",
- "sha256:f3840c280ebc87a48488a46f760ea1c0c0c83fcf7abbe2e6baf99d033fd35fd8",
- "sha256:f814504e459c68118bf2246a530ed953ebd18213dc20e3da524174d84ed010b2"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==3.5.3"
+ "sha256:1c13f041a7178f9780fb61cc3a2b10423d5e125480e4be51beaf62b172413b67",
+ "sha256:232ce322bfd020a434caaffbd9a95333f7c2491e59cfc014041d95e38ab90d1c",
+ "sha256:493e9f6aa5819156b58fce42b296ea31969f2aab71c5b680b4ea7a3cb5c07d94",
+ "sha256:50bac6e4d77e4262c4340d7a985c30912054745ec99756ce213bfbc3cb3808eb",
+ "sha256:606e3b90897554c989b1e38a258c626d46c873523de432b1462f295db13de6f9",
+ "sha256:6209e5c9aaccc056e63b547a8152661324404dd92340a6e479b3a7f24b42a5d0",
+ "sha256:6485ac1f2e84676cff22e693eaa4fbed50ef5dc37173ce1f023daef4687df616",
+ "sha256:6addbd5b488aedb7f9bc19f91cd87ea476206f45d7116fcfe3d31416702a82fa",
+ "sha256:72f9322712e4562e792b2961971891b9fbbb0e525011e09ea0d1f416c4645661",
+ "sha256:7a6769f58ce51791b4cb8b4d7642489df347697cd3e23d88266aaaee93b41d9a",
+ "sha256:8080d5081a86e690d7688ffa542532e87f224c38a6ed71f8fbed34dd1d9fedae",
+ "sha256:843cbde2f0946dadd8c5c11c6d91847abd18ec76859dc319362a0964493f0ba6",
+ "sha256:8aac397d5e9ec158960e31c381c5ffc52ddd52bd9a47717e2a694038167dffea",
+ "sha256:8f65c9f002d281a6e904976007b2d46a1ee2bcea3a68a8c12dda24709ddc9106",
+ "sha256:90df07db7b599fe7035d2f74ab7e438b656528c68ba6bb59b7dc46af39ee48ef",
+ "sha256:9bb0189011785ea794ee827b68777db3ca3f93f3e339ea4d920315a0e5a78d54",
+ "sha256:a0e47eda4eb2614300fc7bb4657fced3e83d6334d03da2173b09e447418d499f",
+ "sha256:abc9d838f93583650c35eca41cfcec65b2e7cb50fd486da6f0c49b5e1ed23014",
+ "sha256:ac24233e8f2939ac4fd2919eed1e9c0871eac8057666070e94cbf0b33dd9c338",
+ "sha256:b12ba985837e4899b762b81f5b2845bd1a28f4fdd1a126d9ace64e9c4eb2fb25",
+ "sha256:b7a2a253d3b36d90c8993b4620183b55665a429da8357a4f621e78cd48b2b30b",
+ "sha256:c7064120a59ce6f64103c9cefba8ffe6fba87f2c61d67c401186423c9a20fd35",
+ "sha256:c89ee9314ef48c72fe92ce55c4e95f2f39d70208f9f1d9db4e64079420d8d732",
+ "sha256:cc4ccdc64e3039fc303defd119658148f2349239871db72cd74e2eeaa9b80b71",
+ "sha256:ce1edd9f5383b504dbc26eeea404ed0a00656c526638129028b758fd43fc5f10",
+ "sha256:ecd79298550cba13a43c340581a3ec9c707bd895a6a061a78fa2524660482fc0",
+ "sha256:f51c4c869d4b60d769f7b4406eec39596648d9d70246428745a681c327a8ad30",
+ "sha256:fb44f53af0a62dc80bba4443d9b27f2fde6acfdac281d95bc872dc148a6509cc"
+ ],
+ "markers": "python_version >= '3.9'",
+ "version": "==3.8.4"
},
"mccabe": {
"hashes": [
- "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
- "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
+ "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
+ "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
],
- "version": "==0.6.1"
+ "markers": "python_version >= '3.6'",
+ "version": "==0.7.0"
},
"mdurl": {
"hashes": [
@@ -673,92 +1154,110 @@
},
"mock": {
"hashes": [
- "sha256:122fcb64ee37cfad5b3f48d7a7d51875d7031aaf3d8be7c42e2bee25044eee62",
- "sha256:7d3fbbde18228f4ff2f1f119a45cdffa458b4c0dee32eb4d2bb2f82554bac7bc"
+ "sha256:18c694e5ae8a208cdb3d2c20a993ca1a7b0efa258c247a1e565150f477f83744",
+ "sha256:5e96aad5ccda4718e0a229ed94b2024df75cc2d55575ba5762d31f5767b8767d"
],
"index": "pypi",
- "version": "==4.0.3"
+ "markers": "python_version >= '3.6'",
+ "version": "==5.1.0"
},
"more-itertools": {
"hashes": [
- "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d",
- "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"
+ "sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684",
+ "sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1"
],
- "markers": "python_version >= '3.7'",
- "version": "==9.1.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==10.2.0"
},
"mypy": {
"hashes": [
- "sha256:088cd9c7904b4ad80bec811053272986611b84221835e079be5bcad029e79dd9",
- "sha256:0aadfb2d3935988ec3815952e44058a3100499f5be5b28c34ac9d79f002a4a9a",
- "sha256:119bed3832d961f3a880787bf621634ba042cb8dc850a7429f643508eeac97b9",
- "sha256:1a85e280d4d217150ce8cb1a6dddffd14e753a4e0c3cf90baabb32cefa41b59e",
- "sha256:3c4b8ca36877fc75339253721f69603a9c7fdb5d4d5a95a1a1b899d8b86a4de2",
- "sha256:3e382b29f8e0ccf19a2df2b29a167591245df90c0b5a2542249873b5c1d78212",
- "sha256:42c266ced41b65ed40a282c575705325fa7991af370036d3f134518336636f5b",
- "sha256:53fd2eb27a8ee2892614370896956af2ff61254c275aaee4c230ae771cadd885",
- "sha256:704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150",
- "sha256:7df1ead20c81371ccd6091fa3e2878559b5c4d4caadaf1a484cf88d93ca06703",
- "sha256:866c41f28cee548475f146aa4d39a51cf3b6a84246969f3759cb3e9c742fc072",
- "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457",
- "sha256:adaeee09bfde366d2c13fe6093a7df5df83c9a2ba98638c7d76b010694db760e",
- "sha256:b6fb13123aeef4a3abbcfd7e71773ff3ff1526a7d3dc538f3929a49b42be03f0",
- "sha256:b94e4b785e304a04ea0828759172a15add27088520dc7e49ceade7834275bedb",
- "sha256:c0df2d30ed496a08de5daed2a9ea807d07c21ae0ab23acf541ab88c24b26ab97",
- "sha256:c6c2602dffb74867498f86e6129fd52a2770c48b7cd3ece77ada4fa38f94eba8",
- "sha256:ceb6e0a6e27fb364fb3853389607cf7eb3a126ad335790fa1e14ed02fba50811",
- "sha256:d9dd839eb0dc1bbe866a288ba3c1afc33a202015d2ad83b31e875b5905a079b6",
- "sha256:e4dab234478e3bd3ce83bac4193b2ecd9cf94e720ddd95ce69840273bf44f6de",
- "sha256:ec4e0cd079db280b6bdabdc807047ff3e199f334050db5cbb91ba3e959a67504",
- "sha256:ecd2c3fe726758037234c93df7e98deb257fd15c24c9180dacf1ef829da5f921",
- "sha256:ef565033fa5a958e62796867b1df10c40263ea9ded87164d67572834e57a174d"
+ "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6",
+ "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d",
+ "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02",
+ "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d",
+ "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3",
+ "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3",
+ "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3",
+ "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66",
+ "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259",
+ "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835",
+ "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd",
+ "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d",
+ "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8",
+ "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07",
+ "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b",
+ "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e",
+ "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6",
+ "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae",
+ "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9",
+ "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d",
+ "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a",
+ "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592",
+ "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218",
+ "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817",
+ "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4",
+ "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410",
+ "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"
],
"index": "pypi",
- "version": "==0.910"
+ "markers": "python_version >= '3.8'",
+ "version": "==1.8.0"
},
"mypy-extensions": {
"hashes": [
- "sha256:c8b707883a96efe9b4bb3aaf0dcc07e7e217d7d8368eec4db4049ee9e142f4fd"
+ "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
+ "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
],
- "markers": "python_version >= '2.7'",
- "version": "==0.4.4"
+ "markers": "python_version >= '3.5'",
+ "version": "==1.0.0"
+ },
+ "nh3": {
+ "hashes": [
+ "sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a",
+ "sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911",
+ "sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb",
+ "sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a",
+ "sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc",
+ "sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028",
+ "sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9",
+ "sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3",
+ "sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351",
+ "sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10",
+ "sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71",
+ "sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f",
+ "sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b",
+ "sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a",
+ "sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062",
+ "sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a"
+ ],
+ "version": "==0.2.17"
},
"numpy": {
"hashes": [
- "sha256:1dbe1c91269f880e364526649a52eff93ac30035507ae980d2fed33aaee633ac",
- "sha256:357768c2e4451ac241465157a3e929b265dfac85d9214074985b1786244f2ef3",
- "sha256:3820724272f9913b597ccd13a467cc492a0da6b05df26ea09e78b171a0bb9da6",
- "sha256:4391bd07606be175aafd267ef9bea87cf1b8210c787666ce82073b05f202add1",
- "sha256:4aa48afdce4660b0076a00d80afa54e8a97cd49f457d68a4342d188a09451c1a",
- "sha256:58459d3bad03343ac4b1b42ed14d571b8743dc80ccbf27444f266729df1d6f5b",
- "sha256:5c3c8def4230e1b959671eb959083661b4a0d2e9af93ee339c7dada6759a9470",
- "sha256:5f30427731561ce75d7048ac254dbe47a2ba576229250fb60f0fb74db96501a1",
- "sha256:643843bcc1c50526b3a71cd2ee561cf0d8773f062c8cbaf9ffac9fdf573f83ab",
- "sha256:67c261d6c0a9981820c3a149d255a76918278a6b03b6a036800359aba1256d46",
- "sha256:67f21981ba2f9d7ba9ade60c9e8cbaa8cf8e9ae51673934480e45cf55e953673",
- "sha256:6aaf96c7f8cebc220cdfc03f1d5a31952f027dda050e5a703a0d1c396075e3e7",
- "sha256:7c4068a8c44014b2d55f3c3f574c376b2494ca9cc73d2f1bd692382b6dffe3db",
- "sha256:7c7e5fa88d9ff656e067876e4736379cc962d185d5cd808014a8a928d529ef4e",
- "sha256:7f5ae4f304257569ef3b948810816bc87c9146e8c446053539947eedeaa32786",
- "sha256:82691fda7c3f77c90e62da69ae60b5ac08e87e775b09813559f8901a88266552",
- "sha256:8737609c3bbdd48e380d463134a35ffad3b22dc56295eff6f79fd85bd0eeeb25",
- "sha256:9f411b2c3f3d76bba0865b35a425157c5dcf54937f82bbeb3d3c180789dd66a6",
- "sha256:a6be4cb0ef3b8c9250c19cc122267263093eee7edd4e3fa75395dfda8c17a8e2",
- "sha256:bcb238c9c96c00d3085b264e5c1a1207672577b93fa666c3b14a45240b14123a",
- "sha256:bf2ec4b75d0e9356edea834d1de42b31fe11f726a81dfb2c2112bc1eaa508fcf",
- "sha256:d136337ae3cc69aa5e447e78d8e1514be8c3ec9b54264e680cf0b4bd9011574f",
- "sha256:d4bf4d43077db55589ffc9009c0ba0a94fa4908b9586d6ccce2e0b164c86303c",
- "sha256:d6a96eef20f639e6a97d23e57dd0c1b1069a7b4fd7027482a4c5c451cd7732f4",
- "sha256:d9caa9d5e682102453d96a0ee10c7241b72859b01a941a397fd965f23b3e016b",
- "sha256:dd1c8f6bd65d07d3810b90d02eba7997e32abbdf1277a481d698969e921a3be0",
- "sha256:e31f0bb5928b793169b87e3d1e070f2342b22d5245c755e2b81caa29756246c3",
- "sha256:ecb55251139706669fdec2ff073c98ef8e9a84473e51e716211b41aa0f18e656",
- "sha256:ee5ec40fdd06d62fe5d4084bef4fd50fd4bb6bfd2bf519365f569dc470163ab0",
- "sha256:f17e562de9edf691a42ddb1eb4a5541c20dd3f9e65b09ded2beb0799c0cf29bb",
- "sha256:fdffbfb6832cd0b300995a2b08b8f6fa9f6e856d562800fea9182316d99c4e8e"
- ],
- "markers": "python_version < '3.11' and python_version >= '3.7'",
- "version": "==1.21.6"
+ "sha256:07a8c89a04997625236c5ecb7afe35a02af3896c8aa01890a849913a2309c676",
+ "sha256:08d9b008d0156c70dc392bb3ab3abb6e7a711383c3247b410b39962263576cd4",
+ "sha256:201b4d0552831f7250a08d3b38de0d989d6f6e4658b709a02a73c524ccc6ffce",
+ "sha256:2c10a93606e0b4b95c9b04b77dc349b398fdfbda382d2a39ba5a822f669a0123",
+ "sha256:3ca688e1b9b95d80250bca34b11a05e389b1420d00e87a0d12dc45f131f704a1",
+ "sha256:48a3aecd3b997bf452a2dedb11f4e79bc5bfd21a1d4cc760e703c31d57c84b3e",
+ "sha256:568dfd16224abddafb1cbcce2ff14f522abe037268514dd7e42c6776a1c3f8e5",
+ "sha256:5bfb1bb598e8229c2d5d48db1860bcf4311337864ea3efdbe1171fb0c5da515d",
+ "sha256:639b54cdf6aa4f82fe37ebf70401bbb74b8508fddcf4797f9fe59615b8c5813a",
+ "sha256:8251ed96f38b47b4295b1ae51631de7ffa8260b5b087808ef09a39a9d66c97ab",
+ "sha256:92bfa69cfbdf7dfc3040978ad09a48091143cffb778ec3b03fa170c494118d75",
+ "sha256:97098b95aa4e418529099c26558eeb8486e66bd1e53a6b606d684d0c3616b168",
+ "sha256:a3bae1a2ed00e90b3ba5f7bd0a7c7999b55d609e0c54ceb2b076a25e345fa9f4",
+ "sha256:c34ea7e9d13a70bf2ab64a2532fe149a9aced424cd05a2c4ba662fd989e3e45f",
+ "sha256:dbc7601a3b7472d559dc7b933b18b4b66f9aa7452c120e87dfb33d02008c8a18",
+ "sha256:e7927a589df200c5e23c57970bafbd0cd322459aa7b1ff73b7c2e84d6e3eae62",
+ "sha256:f8c1f39caad2c896bc0018f699882b345b2a63708008be29b1f355ebf6f933fe",
+ "sha256:f950f8845b480cffe522913d35567e29dd381b0dc7e4ce6a4a9f9156417d2430",
+ "sha256:fade0d4f4d292b6f39951b6836d7a3c7ef5b2347f3c420cd9820a1d90d794802",
+ "sha256:fdf3c08bce27132395d3c3ba1503cac12e17282358cb4bddc25cc46b0aca07aa"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==1.22.3"
},
"numpydoc": {
"hashes": [
@@ -767,129 +1266,130 @@
"index": "pypi",
"version": "==0.9.1"
},
- "opentrons": {
- "editable": true,
- "path": "."
- },
- "opentrons-hardware": {
- "editable": true,
- "path": "./../hardware"
- },
"opentrons-shared-data": {
"editable": true,
- "path": "./../shared-data/python"
+ "markers": "python_version >= '3.10'",
+ "path": "../shared-data/python"
},
"packaging": {
"hashes": [
- "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5",
- "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"
+ "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
+ "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
],
- "markers": "python_version >= '3.7'",
- "version": "==23.2"
+ "index": "pypi",
+ "markers": "python_version >= '3.6'",
+ "version": "==21.3"
},
"pathspec": {
"hashes": [
- "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20",
- "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"
+ "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
+ "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
],
- "markers": "python_version >= '3.7'",
- "version": "==0.11.2"
+ "markers": "python_version >= '3.8'",
+ "version": "==0.12.1"
+ },
+ "performance-metrics": {
+ "editable": true,
+ "file": "../performance-metrics"
},
"pillow": {
"hashes": [
- "sha256:07999f5834bdc404c442146942a2ecadd1cb6292f5229f4ed3b31e0a108746b1",
- "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba",
- "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a",
- "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799",
- "sha256:229e2c79c00e85989a34b5981a2b67aa079fd08c903f0aaead522a1d68d79e51",
- "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb",
- "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5",
- "sha256:2dfaaf10b6172697b9bceb9a3bd7b951819d1ca339a5ef294d1f1ac6d7f63270",
- "sha256:322724c0032af6692456cd6ed554bb85f8149214d97398bb80613b04e33769f6",
- "sha256:35f6e77122a0c0762268216315bf239cf52b88865bba522999dc38f1c52b9b47",
- "sha256:375f6e5ee9620a271acb6820b3d1e94ffa8e741c0601db4c0c4d3cb0a9c224bf",
- "sha256:3ded42b9ad70e5f1754fb7c2e2d6465a9c842e41d178f262e08b8c85ed8a1d8e",
- "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b",
- "sha256:482877592e927fd263028c105b36272398e3e1be3269efda09f6ba21fd83ec66",
- "sha256:489f8389261e5ed43ac8ff7b453162af39c3e8abd730af8363587ba64bb2e865",
- "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec",
- "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c",
- "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1",
- "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38",
- "sha256:5d4ebf8e1db4441a55c509c4baa7a0587a0210f7cd25fcfe74dbbce7a4bd1906",
- "sha256:60037a8db8750e474af7ffc9faa9b5859e6c6d0a50e55c45576bf28be7419705",
- "sha256:608488bdcbdb4ba7837461442b90ea6f3079397ddc968c31265c1e056964f1ef",
- "sha256:6608ff3bf781eee0cd14d0901a2b9cc3d3834516532e3bd673a0a204dc8615fc",
- "sha256:662da1f3f89a302cc22faa9f14a262c2e3951f9dbc9617609a47521c69dd9f8f",
- "sha256:7002d0797a3e4193c7cdee3198d7c14f92c0836d6b4a3f3046a64bd1ce8df2bf",
- "sha256:763782b2e03e45e2c77d7779875f4432e25121ef002a41829d8868700d119392",
- "sha256:77165c4a5e7d5a284f10a6efaa39a0ae8ba839da344f20b111d62cc932fa4e5d",
- "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe",
- "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32",
- "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5",
- "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7",
- "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44",
- "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d",
- "sha256:8aca1152d93dcc27dc55395604dcfc55bed5f25ef4c98716a928bacba90d33a3",
- "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625",
- "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e",
- "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829",
- "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089",
- "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3",
- "sha256:99eb6cafb6ba90e436684e08dad8be1637efb71c4f2180ee6b8f940739406e78",
- "sha256:9adf58f5d64e474bed00d69bcd86ec4bcaa4123bfa70a65ce72e424bfb88ed96",
- "sha256:9b1af95c3a967bf1da94f253e56b6286b50af23392a886720f563c547e48e964",
- "sha256:a0aa9417994d91301056f3d0038af1199eb7adc86e646a36b9e050b06f526597",
- "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99",
- "sha256:a127ae76092974abfbfa38ca2d12cbeddcdeac0fb71f9627cc1135bedaf9d51a",
- "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140",
- "sha256:aca1c196f407ec7cf04dcbb15d19a43c507a81f7ffc45b690899d6a76ac9fda7",
- "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16",
- "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903",
- "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1",
- "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296",
- "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572",
- "sha256:c446d2245ba29820d405315083d55299a796695d747efceb5717a8b450324115",
- "sha256:c830a02caeb789633863b466b9de10c015bded434deb3ec87c768e53752ad22a",
- "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd",
- "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4",
- "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1",
- "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb",
- "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa",
- "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a",
- "sha256:e49eb4e95ff6fd7c0c402508894b1ef0e01b99a44320ba7d8ecbabefddcc5569",
- "sha256:f8286396b351785801a976b1e85ea88e937712ee2c3ac653710a4a57a8da5d9c",
- "sha256:f8fc330c3370a81bbf3f88557097d1ea26cd8b019d6433aa59f71195f5ddebbf",
- "sha256:fbd359831c1657d69bb81f0db962905ee05e5e9451913b18b831febfe0519082",
- "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062",
- "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==9.5.0"
+ "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c",
+ "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2",
+ "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb",
+ "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d",
+ "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa",
+ "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3",
+ "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1",
+ "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a",
+ "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd",
+ "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8",
+ "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999",
+ "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599",
+ "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936",
+ "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375",
+ "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d",
+ "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b",
+ "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60",
+ "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572",
+ "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3",
+ "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced",
+ "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f",
+ "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b",
+ "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19",
+ "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f",
+ "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d",
+ "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383",
+ "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795",
+ "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355",
+ "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57",
+ "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09",
+ "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b",
+ "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462",
+ "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf",
+ "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f",
+ "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a",
+ "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad",
+ "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9",
+ "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d",
+ "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45",
+ "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994",
+ "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d",
+ "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338",
+ "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463",
+ "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451",
+ "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591",
+ "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c",
+ "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd",
+ "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32",
+ "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9",
+ "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf",
+ "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5",
+ "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828",
+ "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3",
+ "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5",
+ "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2",
+ "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b",
+ "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2",
+ "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475",
+ "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3",
+ "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb",
+ "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef",
+ "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015",
+ "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002",
+ "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170",
+ "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84",
+ "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57",
+ "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f",
+ "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27",
+ "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==10.3.0"
},
"pkginfo": {
"hashes": [
- "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546",
- "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"
+ "sha256:6d4998d1cd42c297af72cc0eab5f5bab1d356fb8a55b828fa914173f8bc1ba05",
+ "sha256:dba885aa82e31e80d615119874384923f4e011c2a39b0c4b7104359e36cb7087"
],
- "markers": "python_version >= '3.6'",
- "version": "==1.9.6"
+ "markers": "python_version >= '3.8'",
+ "version": "==1.11.0"
},
"platformdirs": {
"hashes": [
- "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3",
- "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"
+ "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee",
+ "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"
],
- "markers": "python_version >= '3.7'",
- "version": "==3.11.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.2.2"
},
"pluggy": {
"hashes": [
- "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849",
- "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"
+ "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1",
+ "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"
],
- "markers": "python_version >= '3.7'",
- "version": "==1.2.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==1.5.0"
},
"py": {
"hashes": [
@@ -901,39 +1401,54 @@
},
"pycodestyle": {
"hashes": [
- "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068",
- "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"
+ "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f",
+ "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==2.7.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.11.1"
},
"pydantic": {
"hashes": [
- "sha256:021ea0e4133e8c824775a0cfe098677acf6fa5a3cbf9206a376eed3fc09302cd",
- "sha256:05ddfd37c1720c392f4e0d43c484217b7521558302e7069ce8d318438d297739",
- "sha256:05ef5246a7ffd2ce12a619cbb29f3307b7c4509307b1b49f456657b43529dc6f",
- "sha256:10e5622224245941efc193ad1d159887872776df7a8fd592ed746aa25d071840",
- "sha256:18b5ea242dd3e62dbf89b2b0ec9ba6c7b5abaf6af85b95a97b00279f65845a23",
- "sha256:234a6c19f1c14e25e362cb05c68afb7f183eb931dd3cd4605eafff055ebbf287",
- "sha256:244ad78eeb388a43b0c927e74d3af78008e944074b7d0f4f696ddd5b2af43c62",
- "sha256:26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b",
- "sha256:41b542c0b3c42dc17da70554bc6f38cbc30d7066d2c2815a94499b5684582ecb",
- "sha256:4a03cbbe743e9c7247ceae6f0d8898f7a64bb65800a45cbdc52d65e370570820",
- "sha256:4be75bebf676a5f0f87937c6ddb061fa39cbea067240d98e298508c1bda6f3f3",
- "sha256:54cd5121383f4a461ff7644c7ca20c0419d58052db70d8791eacbbe31528916b",
- "sha256:589eb6cd6361e8ac341db97602eb7f354551482368a37f4fd086c0733548308e",
- "sha256:8621559dcf5afacf0069ed194278f35c255dc1a1385c28b32dd6c110fd6531b3",
- "sha256:8b223557f9510cf0bfd8b01316bf6dd281cf41826607eada99662f5e4963f316",
- "sha256:99a9fc39470010c45c161a1dc584997f1feb13f689ecf645f59bb4ba623e586b",
- "sha256:a7c6002203fe2c5a1b5cbb141bb85060cbff88c2d78eccbc72d97eb7022c43e4",
- "sha256:a83db7205f60c6a86f2c44a61791d993dff4b73135df1973ecd9eed5ea0bda20",
- "sha256:ac8eed4ca3bd3aadc58a13c2aa93cd8a884bcf21cb019f8cfecaae3b6ce3746e",
- "sha256:e710876437bc07bd414ff453ac8ec63d219e7690128d925c6e82889d674bb505",
- "sha256:ea5cb40a3b23b3265f6325727ddfc45141b08ed665458be8c6285e7b85bd73a1",
- "sha256:fec866a0b59f372b7e776f2d7308511784dace622e0992a0b59ea3ccee0ae833"
- ],
- "markers": "python_full_version >= '3.6.1'",
- "version": "==1.8.2"
+ "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303",
+ "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe",
+ "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47",
+ "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494",
+ "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33",
+ "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86",
+ "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d",
+ "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c",
+ "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a",
+ "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565",
+ "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb",
+ "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62",
+ "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62",
+ "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0",
+ "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523",
+ "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d",
+ "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405",
+ "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f",
+ "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b",
+ "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718",
+ "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed",
+ "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb",
+ "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5",
+ "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc",
+ "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942",
+ "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe",
+ "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246",
+ "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350",
+ "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303",
+ "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09",
+ "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33",
+ "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8",
+ "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a",
+ "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1",
+ "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6",
+ "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==1.10.12"
},
"pydocstyle": {
"hashes": [
@@ -945,91 +1460,92 @@
},
"pyflakes": {
"hashes": [
- "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3",
- "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"
+ "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f",
+ "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==2.3.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==3.2.0"
},
"pygments": {
"hashes": [
- "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692",
- "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"
+ "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199",
+ "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.16.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.18.0"
},
"pyparsing": {
"hashes": [
- "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb",
- "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"
+ "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad",
+ "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"
],
"markers": "python_full_version >= '3.6.8'",
- "version": "==3.1.1"
+ "version": "==3.1.2"
},
"pyrsistent": {
"hashes": [
- "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8",
- "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440",
- "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a",
- "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c",
- "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3",
- "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393",
- "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9",
- "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da",
- "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf",
- "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64",
- "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a",
- "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3",
- "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98",
- "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2",
- "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8",
- "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf",
- "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc",
- "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7",
- "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28",
- "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2",
- "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b",
- "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a",
- "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64",
- "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19",
- "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1",
- "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9",
- "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==0.19.3"
- },
- "pyserial": {
- "hashes": [
- "sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb",
- "sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0"
- ],
- "version": "==3.5"
+ "sha256:0724c506cd8b63c69c7f883cc233aac948c1ea946ea95996ad8b1380c25e1d3f",
+ "sha256:09848306523a3aba463c4b49493a760e7a6ca52e4826aa100ee99d8d39b7ad1e",
+ "sha256:0f3b1bcaa1f0629c978b355a7c37acd58907390149b7311b5db1b37648eb6958",
+ "sha256:21cc459636983764e692b9eba7144cdd54fdec23ccdb1e8ba392a63666c60c34",
+ "sha256:2e14c95c16211d166f59c6611533d0dacce2e25de0f76e4c140fde250997b3ca",
+ "sha256:2e2c116cc804d9b09ce9814d17df5edf1df0c624aba3b43bc1ad90411487036d",
+ "sha256:4021a7f963d88ccd15b523787d18ed5e5269ce57aa4037146a2377ff607ae87d",
+ "sha256:4c48f78f62ab596c679086084d0dd13254ae4f3d6c72a83ffdf5ebdef8f265a4",
+ "sha256:4f5c2d012671b7391803263419e31b5c7c21e7c95c8760d7fc35602353dee714",
+ "sha256:58b8f6366e152092194ae68fefe18b9f0b4f89227dfd86a07770c3d86097aebf",
+ "sha256:59a89bccd615551391f3237e00006a26bcf98a4d18623a19909a2c48b8e986ee",
+ "sha256:5cdd7ef1ea7a491ae70d826b6cc64868de09a1d5ff9ef8d574250d0940e275b8",
+ "sha256:6288b3fa6622ad8a91e6eb759cfc48ff3089e7c17fb1d4c59a919769314af224",
+ "sha256:6d270ec9dd33cdb13f4d62c95c1a5a50e6b7cdd86302b494217137f760495b9d",
+ "sha256:79ed12ba79935adaac1664fd7e0e585a22caa539dfc9b7c7c6d5ebf91fb89054",
+ "sha256:7d29c23bdf6e5438c755b941cef867ec2a4a172ceb9f50553b6ed70d50dfd656",
+ "sha256:8441cf9616d642c475684d6cf2520dd24812e996ba9af15e606df5f6fd9d04a7",
+ "sha256:881bbea27bbd32d37eb24dd320a5e745a2a5b092a17f6debc1349252fac85423",
+ "sha256:8c3aba3e01235221e5b229a6c05f585f344734bd1ad42a8ac51493d74722bbce",
+ "sha256:a14798c3005ec892bbada26485c2eea3b54109cb2533713e355c806891f63c5e",
+ "sha256:b14decb628fac50db5e02ee5a35a9c0772d20277824cfe845c8a8b717c15daa3",
+ "sha256:b318ca24db0f0518630e8b6f3831e9cba78f099ed5c1d65ffe3e023003043ba0",
+ "sha256:c1beb78af5423b879edaf23c5591ff292cf7c33979734c99aa66d5914ead880f",
+ "sha256:c55acc4733aad6560a7f5f818466631f07efc001fd023f34a6c203f8b6df0f0b",
+ "sha256:ca52d1ceae015859d16aded12584c59eb3825f7b50c6cfd621d4231a6cc624ce",
+ "sha256:cae40a9e3ce178415040a0383f00e8d68b569e97f31928a3a8ad37e3fde6df6a",
+ "sha256:e78d0c7c1e99a4a45c99143900ea0546025e41bb59ebc10182e947cf1ece9174",
+ "sha256:ef3992833fbd686ee783590639f4b8343a57f1f75de8633749d984dc0eb16c86",
+ "sha256:f058a615031eea4ef94ead6456f5ec2026c19fb5bd6bfe86e9665c4158cf802f",
+ "sha256:f5ac696f02b3fc01a710427585c855f65cd9c640e14f52abe52020722bb4906b",
+ "sha256:f920385a11207dc372a028b3f1e1038bb244b3ec38d448e6d8e43c6b3ba20e98",
+ "sha256:fed2c3216a605dc9a6ea50c7e84c82906e3684c4e80d2908208f662a6cbf9022"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.20.0"
},
"pytest": {
"hashes": [
- "sha256:9ce3ff477af913ecf6321fe337b93a2c0dcf2a0a1439c43f5452112c1e4280db",
- "sha256:e30905a0c131d3d94b89624a1cc5afec3e0ba2fbdb151867d8e0ebd49850f171"
+ "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280",
+ "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"
],
"index": "pypi",
- "version": "==7.0.1"
+ "markers": "python_version >= '3.7'",
+ "version": "==7.4.4"
},
"pytest-asyncio": {
"hashes": [
- "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d",
- "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"
+ "sha256:009b48127fbe44518a547bddd25611551b0e43ccdbf1e67d12479f569832c20b",
+ "sha256:5f5c72948f4c49e7db4f29f2521d4031f1c27f86e57b046126654083d4770268"
],
"index": "pypi",
- "version": "==0.21.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==0.23.7"
},
"pytest-cov": {
"hashes": [
- "sha256:45ec2d5182f89a81fc3eb29e3d1ed3113b9e9a873bcddb2a71faaab066110191",
- "sha256:47bd0ce14056fdd79f93e1713f88fad7bdcc583dcd7783da86ef2f085a0bb88e"
+ "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6",
+ "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"
],
"index": "pypi",
- "version": "==2.10.1"
+ "markers": "python_version >= '3.7'",
+ "version": "==4.1.0"
},
"pytest-forked": {
"hashes": [
@@ -1059,50 +1575,36 @@
},
"pytest-xdist": {
"hashes": [
- "sha256:2447a1592ab41745955fb870ac7023026f20a5f0bfccf1b52a879bd193d46450",
- "sha256:718887296892f92683f6a51f25a3ae584993b06f7076ce1e1fd482e59a8220a2"
+ "sha256:4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf",
+ "sha256:6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65"
],
"index": "pypi",
- "version": "==2.2.1"
- },
- "python-can": {
- "hashes": [
- "sha256:2d3c223b7adc4dd46ce258d4a33b7e0dbb6c339e002faa40ee4a69d5fdce9449"
- ],
- "markers": "python_version >= '2.7'",
- "version": "==3.3.4"
+ "markers": "python_version >= '3.6'",
+ "version": "==2.5.0"
},
"python-dateutil": {
"hashes": [
- "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
- "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"
+ "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3",
+ "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'",
- "version": "==2.8.2"
- },
- "pytz": {
- "hashes": [
- "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b",
- "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"
- ],
- "markers": "python_version < '3.9'",
- "version": "==2023.3.post1"
+ "version": "==2.9.0.post0"
},
"readme-renderer": {
"hashes": [
- "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273",
- "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"
+ "sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311",
+ "sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9"
],
- "markers": "python_version >= '3.7'",
- "version": "==37.3"
+ "markers": "python_version >= '3.8'",
+ "version": "==43.0"
},
"requests": {
"hashes": [
- "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
- "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
+ "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760",
+ "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.31.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.32.3"
},
"requests-toolbelt": {
"hashes": [
@@ -1122,11 +1624,11 @@
},
"rich": {
"hashes": [
- "sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245",
- "sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef"
+ "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222",
+ "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"
],
- "markers": "python_version >= '3.7'",
- "version": "==13.6.0"
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==13.7.1"
},
"six": {
"hashes": [
@@ -1136,14 +1638,6 @@
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'",
"version": "==1.16.0"
},
- "sniffio": {
- "hashes": [
- "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101",
- "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==1.3.0"
- },
"snowballstemmer": {
"hashes": [
"sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1",
@@ -1164,6 +1658,7 @@
"sha256:f4da1187785a5bc7312cc271b0e867a93946c319d106363e102936a3d9857306"
],
"index": "pypi",
+ "markers": "python_version >= '3.6'",
"version": "==5.0.1"
},
"sphinx-prompt": {
@@ -1182,35 +1677,36 @@
},
"sphinx-tabs": {
"hashes": [
- "sha256:7cea8942aeccc5d01a995789c01804b787334b55927f29b36ba16ed1e7cb27c6",
- "sha256:d2a09f9e8316e400d57503f6df1c78005fdde220e5af589cc79d493159e1b832"
+ "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09",
+ "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531"
],
"index": "pypi",
- "version": "==3.4.1"
+ "markers": "python_version ~= '3.7'",
+ "version": "==3.4.5"
},
"sphinxcontrib-applehelp": {
"hashes": [
- "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a",
- "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"
+ "sha256:c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619",
+ "sha256:cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4"
],
- "markers": "python_version >= '3.5'",
- "version": "==1.0.2"
+ "markers": "python_version >= '3.9'",
+ "version": "==1.0.8"
},
"sphinxcontrib-devhelp": {
"hashes": [
- "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e",
- "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"
+ "sha256:6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f",
+ "sha256:9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3"
],
- "markers": "python_version >= '3.5'",
- "version": "==1.0.2"
+ "markers": "python_version >= '3.9'",
+ "version": "==1.0.6"
},
"sphinxcontrib-htmlhelp": {
"hashes": [
- "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07",
- "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"
+ "sha256:0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015",
+ "sha256:393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04"
],
- "markers": "python_version >= '3.6'",
- "version": "==2.0.0"
+ "markers": "python_version >= '3.9'",
+ "version": "==2.0.5"
},
"sphinxcontrib-jsmath": {
"hashes": [
@@ -1222,19 +1718,19 @@
},
"sphinxcontrib-qthelp": {
"hashes": [
- "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72",
- "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"
+ "sha256:053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6",
+ "sha256:e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182"
],
- "markers": "python_version >= '3.5'",
- "version": "==1.0.3"
+ "markers": "python_version >= '3.9'",
+ "version": "==1.0.7"
},
"sphinxcontrib-serializinghtml": {
"hashes": [
- "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd",
- "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"
+ "sha256:326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7",
+ "sha256:93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f"
],
- "markers": "python_version >= '3.5'",
- "version": "==1.1.5"
+ "markers": "python_version >= '3.9'",
+ "version": "==1.1.10"
},
"sphinxext-opengraph": {
"hashes": [
@@ -1242,16 +1738,9 @@
"sha256:64fe993d4974c65202d1c8f1c986abb559154a814a6378f9d3aaf8c7c9bd62bc"
],
"index": "pypi",
+ "markers": "python_version >= '3.7'",
"version": "==0.8.1"
},
- "toml": {
- "hashes": [
- "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
- "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
- ],
- "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'",
- "version": "==0.10.2"
- },
"tomli": {
"hashes": [
"sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
@@ -1262,63 +1751,30 @@
},
"twine": {
"hashes": [
- "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8",
- "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"
+ "sha256:6f7496cf14a3a8903474552d5271c79c71916519edb42554f23f42a8563498a9",
+ "sha256:817aa0c0bdc02a5ebe32051e168e23c71a0608334e624c793011f120dbbc05b7"
],
"index": "pypi",
- "version": "==4.0.2"
- },
- "typed-ast": {
- "hashes": [
- "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace",
- "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff",
- "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266",
- "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528",
- "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6",
- "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808",
- "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4",
- "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363",
- "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341",
- "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04",
- "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41",
- "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e",
- "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3",
- "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899",
- "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805",
- "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c",
- "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c",
- "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39",
- "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a",
- "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3",
- "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7",
- "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f",
- "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075",
- "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0",
- "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40",
- "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428",
- "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927",
- "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3",
- "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f",
- "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"
- ],
- "markers": "python_version < '3.8' and implementation_name == 'cpython' and python_version < '3.8'",
- "version": "==1.4.3"
+ "markers": "python_version >= '3.7'",
+ "version": "==4.0.0"
},
"typeguard": {
"hashes": [
- "sha256:c2af8b9bdd7657f4bd27b45336e7930171aead796711bc4cfc99b4731bb9d051",
- "sha256:cc15ef2704c9909ef9c80e19c62fb8468c01f75aad12f651922acf4dbe822e02"
+ "sha256:8923e55f8873caec136c892c3bed1f676eae7be57cdb94819281b3d3bc9c0953",
+ "sha256:ea0a113bbc111bcffc90789ebb215625c963411f7096a7e9062d4e4630c155fd"
],
"index": "pypi",
- "version": "==2.12.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.1.5"
},
"types-mock": {
"hashes": [
- "sha256:1a470543be8de673e2ea14739622de3bfb8c9b10429f50338ba9ca1e868c15e9",
- "sha256:1ad09970f4f5ec45a138ab1e88d032f010e851bccef7765b34737ed390bbc5c8"
+ "sha256:5281a645d72e827d70043e3cc144fe33b1c003db084f789dc203aa90e812a5a4",
+ "sha256:d586a01d39ad919d3ddcd73de6cde73ca7f3c69707219f722d1b8d7733641ad7"
],
"index": "pypi",
- "version": "==4.0.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==5.1.0.20240425"
},
"types-setuptools": {
"hashes": [
@@ -1330,123 +1786,36 @@
},
"typing-extensions": {
"hashes": [
- "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36",
- "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"
+ "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a",
+ "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"
],
- "index": "pypi",
- "version": "==4.7.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.12.1"
},
"urllib3": {
"hashes": [
- "sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2",
- "sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564"
+ "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.0.6"
- },
- "webencodings": {
- "hashes": [
- "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78",
- "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"
- ],
- "version": "==0.5.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.2.1"
},
"wheel": {
"hashes": [
- "sha256:9515fe0a94e823fd90b08d22de45d7bde57c90edce705b22f5e1ecf7e1b653c8",
- "sha256:e721e53864f084f956f40f96124a74da0631ac13fbbd1ba99e8e2b5e9cafdf64"
+ "sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd",
+ "sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad"
],
"index": "pypi",
- "version": "==0.30.0"
- },
- "wrapt": {
- "hashes": [
- "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0",
- "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420",
- "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a",
- "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c",
- "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079",
- "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923",
- "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f",
- "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1",
- "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8",
- "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86",
- "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0",
- "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364",
- "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e",
- "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c",
- "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e",
- "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c",
- "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727",
- "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff",
- "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e",
- "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29",
- "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7",
- "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72",
- "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475",
- "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a",
- "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317",
- "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2",
- "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd",
- "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640",
- "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98",
- "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248",
- "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e",
- "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d",
- "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec",
- "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1",
- "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e",
- "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9",
- "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92",
- "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb",
- "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094",
- "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46",
- "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29",
- "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd",
- "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705",
- "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8",
- "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975",
- "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb",
- "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e",
- "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b",
- "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418",
- "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019",
- "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1",
- "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba",
- "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6",
- "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2",
- "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3",
- "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7",
- "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752",
- "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416",
- "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f",
- "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1",
- "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc",
- "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145",
- "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee",
- "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a",
- "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7",
- "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b",
- "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653",
- "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0",
- "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90",
- "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29",
- "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6",
- "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034",
- "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09",
- "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559",
- "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"
- ],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
- "version": "==1.15.0"
+ "version": "==0.37.0"
},
"zipp": {
"hashes": [
- "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b",
- "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"
+ "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091",
+ "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"
],
- "markers": "python_version < '3.10'",
- "version": "==3.15.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==3.19.1"
}
}
}
diff --git a/api/docs/img/Flex-and-OT-2-decks.svg b/api/docs/img/Flex-and-OT-2-decks.svg
deleted file mode 100644
index d8615b34752..00000000000
--- a/api/docs/img/Flex-and-OT-2-decks.svg
+++ /dev/null
@@ -1,225 +0,0 @@
-
diff --git a/api/docs/img/OT-2-deck.svg b/api/docs/img/OT-2-deck.svg
new file mode 100644
index 00000000000..36d15a77d1b
--- /dev/null
+++ b/api/docs/img/OT-2-deck.svg
@@ -0,0 +1,94 @@
+
+
+
diff --git a/api/docs/img/flex-deck.svg b/api/docs/img/flex-deck.svg
new file mode 100644
index 00000000000..50681b9e01d
--- /dev/null
+++ b/api/docs/img/flex-deck.svg
@@ -0,0 +1,477 @@
+
+
+
diff --git a/api/docs/img/lightbulb.jpg b/api/docs/img/lightbulb.jpg
deleted file mode 100644
index 41f67329264..00000000000
Binary files a/api/docs/img/lightbulb.jpg and /dev/null differ
diff --git a/api/docs/v1/atomic_commands.rst b/api/docs/v1/atomic_commands.rst
index c629188b2af..0c942b9259a 100644
--- a/api/docs/v1/atomic_commands.rst
+++ b/api/docs/v1/atomic_commands.rst
@@ -22,19 +22,19 @@ This section demonstrates the options available for controlling tips
'''
from opentrons import labware, instruments, robot
- tiprack = labware.load('opentrons_96_tiprack_300ul', '2')
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "2")
- pipette = instruments.P300_Single(mount='left')
+ pipette = instruments.P300_Single(mount="left")
Pick Up Tip
===========
-Before any liquid handling can be done, your pipette must have a tip on it. The command ``pick_up_tip()`` will move the pipette over to the specified tip, the press down into it to create a vacuum seal. The below example picks up the tip at location ``'A1'``.
+Before any liquid handling can be done, your pipette must have a tip on it. The command ``pick_up_tip()`` will move the pipette over to the specified tip, the press down into it to create a vacuum seal. The below example picks up the tip at location ``"A1"``.
.. code-block:: python
- pipette.pick_up_tip(tiprack.wells('A1'))
+ pipette.pick_up_tip(tiprack.wells("A1"))
Drop Tip
===========
@@ -44,24 +44,24 @@ If no location is specified, it will go to the fixed trash location on the deck.
.. code-block:: python
- pipette.drop_tip(tiprack.wells('A1'))
+ pipette.drop_tip(tiprack.wells("A1"))
Instead of returning a tip to the tip rack, we can also drop it in an alternative trash container besides the fixed trash on the deck.
.. code-block:: python
- trash = labware.load('trash-box', '1')
- pipette.pick_up_tip(tiprack.wells('A2'))
+ trash = labware.load("trash-box", "1")
+ pipette.pick_up_tip(tiprack.wells("A2"))
pipette.drop_tip(trash)
Return Tip
===========
-When we need to return the tip to its originating location on the tip rack, we can simply call ``return_tip()``. The example below will automatically return the tip to ``'A3'`` on the tip rack.
+When we need to return the tip to its originating location on the tip rack, we can simply call ``return_tip()``. The example below will automatically return the tip to ``"A3"`` on the tip rack.
.. code-block:: python
- pipette.pick_up_tip(tiprack.wells('A3'))
+ pipette.pick_up_tip(tiprack.wells("A3"))
pipette.return_tip()
@@ -80,9 +80,9 @@ If no location is specified, the pipette will move to the next available tip by
'''
from opentrons import labware, instruments, robot
- trash = labware.load('trash-box', '1')
- tip_rack_1 = containers.load('opentrons_96_tiprack_300ul', '2')
- tip_rack_2 = containers.load('opentrons_96_tiprack_300ul', '3')
+ trash = labware.load("trash-box", "1")
+ tip_rack_1 = containers.load("opentrons_96_tiprack_300ul", "2")
+ tip_rack_2 = containers.load("opentrons_96_tiprack_300ul", "3")
Attach Tip Rack to Pipette
--------------------------
@@ -95,7 +95,7 @@ Multiple tip racks are can be attached with the option ``tip_racks=[RACK_1, RACK
.. code-block:: python
- pipette = instruments.P300_Single(mount='left',
+ pipette = instruments.P300_Single(mount="left",
tip_racks=[tip_rack_1, tip_rack_2],
trash_container=trash)
@@ -147,18 +147,18 @@ If you plan to change out tipracks during the protocol run, you must reset tip t
Select Starting Tip
-------------------
-Calls to ``pick_up_tip()`` will by default start at the attached tip rack's ``'A1'`` location in order of tipracks listed. If you however want to start automatic tip iterating at a different tip, you can use ``start_at_tip()``.
+Calls to ``pick_up_tip()`` will by default start at the attached tip rack's ``"A1"`` location in order of tipracks listed. If you however want to start automatic tip iterating at a different tip, you can use ``start_at_tip()``.
.. code-block:: python
- pipette.start_at_tip(tip_rack_1.well('C3'))
+ pipette.start_at_tip(tip_rack_1.well("C3"))
pipette.pick_up_tip() # pick up C3 from "tip_rack_1"
pipette.return_tip()
Get Current Tip
---------------
-Get the source location of the pipette's current tip by calling ``current_tip()``. If the tip was from the ``'A1'`` position on our tip rack, ``current_tip()`` will return that position.
+Get the source location of the pipette's current tip by calling ``current_tip()``. If the tip was from the ``"A1"`` position on our tip rack, ``current_tip()`` will return that position.
.. code-block:: python
@@ -191,8 +191,8 @@ Please note that the default now for pipette aspirate and dispense location is a
'''
Examples in this section expect the following:
'''
- plate = labware.load('96-flat', '1')
- pipette = instruments.P300_Single(mount='left')
+ plate = labware.load("96-flat", "1")
+ pipette = instruments.P300_Single(mount="left")
pipette.pick_up_tip()
@@ -203,11 +203,11 @@ To aspirate is to pull liquid up into the pipette's tip. When calling aspirate o
.. code-block:: python
- pipette.aspirate(50, plate.wells('A1')) # aspirate 50uL from plate:A1
+ pipette.aspirate(50, plate.wells("A1")) # aspirate 50uL from plate:A1
Now our pipette's tip is holding 50uL.
-We can also simply specify how many microliters to aspirate, and not mention a location. The pipette in this circumstance will aspirate from it's current location (which we previously set as ``plate.wells('A1'))``.
+We can also simply specify how many microliters to aspirate, and not mention a location. The pipette in this circumstance will aspirate from it's current location (which we previously set as ``plate.wells("A1"))``.
.. code-block:: python
@@ -219,7 +219,7 @@ We can also specify only the location to aspirate from. If we do not tell the pi
.. code-block:: python
- pipette.aspirate(plate.wells('A2')) # aspirate until pipette fills from plate:A2
+ pipette.aspirate(plate.wells("A2")) # aspirate until pipette fills from plate:A2
Dispense
@@ -229,11 +229,11 @@ To dispense is to push out liquid from the pipette's tip. It's usage in the Open
.. code-block:: python
- pipette.dispense(50, plate.wells('B1')) # dispense 50uL to plate:B1
+ pipette.dispense(50, plate.wells("B1")) # dispense 50uL to plate:B1
pipette.dispense(50) # dispense 50uL to current position
- pipette.dispense(plate.wells('B2')) # dispense until pipette empties to plate:B2
+ pipette.dispense(plate.wells("B2")) # dispense until pipette empties to plate:B2
-That final dispense without specifying a micoliter amount will dispense all remaining liquids in the tip to ``plate.wells('B2')``, and now our pipette is empty.
+That final dispense without specifying a micoliter amount will dispense all remaining liquids in the tip to ``plate.wells("B2")``, and now our pipette is empty.
Blow Out
========
@@ -245,7 +245,7 @@ When calling ``blow_out()`` on a pipette, we have the option to specify a locati
.. code-block:: python
pipette.blow_out() # blow out in current location
- pipette.blow_out(plate.wells('B3')) # blow out in current plate:B3
+ pipette.blow_out(plate.wells("B3")) # blow out in current plate:B3
Touch Tip
@@ -259,7 +259,7 @@ When calling ``touch_tip()`` on a pipette, we have the option to specify a locat
pipette.touch_tip() # touch tip within current location
pipette.touch_tip(v_offset=-2) # touch tip 2mm below the top of the current location
- pipette.touch_tip(plate.wells('B1')) # touch tip within plate:B1
+ pipette.touch_tip(plate.wells("B1")) # touch tip within plate:B1
Mix
@@ -271,7 +271,7 @@ The mix command takes three arguments: ``mix(repetitions, volume, location)``
.. code-block:: python
- pipette.mix(4, 100, plate.wells('A2')) # mix 4 times, 100uL, in plate:A2
+ pipette.mix(4, 100, plate.wells("A2")) # mix 4 times, 100uL, in plate:A2
pipette.mix(3, 50) # mix 3 times, 50uL, in current location
pipette.mix(2) # mix 2 times, pipette's max volume, in current location
@@ -283,7 +283,7 @@ Some liquids need an extra amount of air in the pipette's tip to prevent it from
.. code-block:: python
- pipette.aspirate(100, plate.wells('B4'))
+ pipette.aspirate(100, plate.wells("B4"))
pipette.air_gap(20)
pipette.drop_tip()
@@ -297,10 +297,10 @@ Some liquids need an extra amount of air in the pipette's tip to prevent it from
'''
Examples in this section expect the following
'''
- tiprack = labware.load('opentrons_96_tiprack_300ul', '1')
- plate = labware.load('96-flat', '2')
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "1")
+ plate = labware.load("96-flat", "2")
- pipette = instruments.P300_Single(mount='right', tip_racks=[tiprack])
+ pipette = instruments.P300_Single(mount="right", tip_racks=[tiprack])
Controlling Speed
=================
@@ -316,10 +316,10 @@ using our `set_flow_rate` function. This can be called at any time during the pr
'''
Examples in this section expect the following
'''
- tiprack = labware.load('opentrons_96_tiprack_300ul', '1')
- plate = labware.load('96-flat', '2')
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "1")
+ plate = labware.load("96-flat", "2")
- pipette = instruments.P300_Single(mount='right', tip_racks=[tiprack])
+ pipette = instruments.P300_Single(mount="right", tip_racks=[tiprack])
pipette.set_flow_rate(aspirate=50, dispense=100)
@@ -341,35 +341,35 @@ For example, we can move to the first tip in our tip rack:
.. code-block:: python
- pipette.move_to(tiprack.wells('A1'))
+ pipette.move_to(tiprack.wells("A1"))
You can also specify at what height you would like the robot to move to inside of a location using ``top()`` and ``bottom()`` methods on that location.
.. code-block:: python
- pipette.move_to(plate.wells('A1').bottom()) # move to the bottom of well A1
- pipette.move_to(plate.wells('A1').top()) # move to the top of well A1
- pipette.move_to(plate.wells('A1').bottom(2)) # move to 2mm above the bottom of well A1
- pipette.move_to(plate.wells('A1').top(-2)) # move to 2mm below the top of well A1
+ pipette.move_to(plate.wells("A1").bottom()) # move to the bottom of well A1
+ pipette.move_to(plate.wells("A1").top()) # move to the top of well A1
+ pipette.move_to(plate.wells("A1").bottom(2)) # move to 2mm above the bottom of well A1
+ pipette.move_to(plate.wells("A1").top(-2)) # move to 2mm below the top of well A1
-The above commands will cause the robot's head to first move upwards, then over to above the target location, then finally downwards until the target location is reached. If instead you would like the robot to move in a straight line to the target location, you can set the movement strategy to ``'direct'``.
+The above commands will cause the robot's head to first move upwards, then over to above the target location, then finally downwards until the target location is reached. If instead you would like the robot to move in a straight line to the target location, you can set the movement strategy to ``"direct"``.
.. code-block:: python
- pipette.move_to(plate.wells('A1'), strategy='direct')
+ pipette.move_to(plate.wells("A1"), strategy="direct")
.. note::
- Moving with ``strategy='direct'`` will run the risk of colliding with things on your deck. Be very careful when using this option.
+ Moving with ``strategy="direct"`` will run the risk of colliding with things on your deck. Be very careful when using this option.
-Usually the ``strategy='direct'`` option is useful when moving inside of a well. Take a look at the below sequence of movements, which first move the head to a well, and use 'direct' movements inside that well, then finally move on to a different well.
+Usually the ``strategy="direct"`` option is useful when moving inside of a well. Take a look at the below sequence of movements, which first move the head to a well, and use "direct" movements inside that well, then finally move on to a different well.
.. code-block:: python
- pipette.move_to(plate.wells('A1'))
- pipette.move_to(plate.wells('A1').bottom(1), strategy='direct')
- pipette.move_to(plate.wells('A1').top(-2), strategy='direct')
- pipette.move_to(plate.wells('A1'))
+ pipette.move_to(plate.wells("A1"))
+ pipette.move_to(plate.wells("A1").bottom(1), strategy="direct")
+ pipette.move_to(plate.wells("A1").top(-2), strategy="direct")
+ pipette.move_to(plate.wells("A1"))
Delay
=====
diff --git a/api/docs/v1/complex_commands.rst b/api/docs/v1/complex_commands.rst
index 7c30bffe51a..1106408384d 100644
--- a/api/docs/v1/complex_commands.rst
+++ b/api/docs/v1/complex_commands.rst
@@ -12,12 +12,12 @@ The examples below will use the following set-up:
from opentrons import robot, labware, instruments
- plate = labware.load('96-flat', '1')
+ plate = labware.load("96-flat", "1")
- tiprack = labware.load('opentrons_96_tiprack_300ul', '2')
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "2")
pipette = instruments.P300_Single(
- mount='left',
+ mount="left",
tip_racks=[tiprack])
You could simulate the protocol using our protocol simulator, which can be installed by following the instructions `here. `_
@@ -33,11 +33,11 @@ For transferring with a multi-channel, please refer to the :ref:`multi-channel-l
Basic
-----
-The example below will transfer 100 uL from well ``'A1'`` to well ``'B1'``, automatically picking up a new tip and then disposing it when finished.
+The example below will transfer 100 uL from well ``"A1"`` to well ``"B1"``, automatically picking up a new tip and then disposing it when finished.
.. code-block:: python
- pipette.transfer(100, plate.wells('A1'), plate.wells('B1'))
+ pipette.transfer(100, plate.wells("A1"), plate.wells("B1"))
Transfer commands will automatically create entire series of ``aspirate()``, ``dispense()``, and other ``Pipette`` commands.
@@ -49,7 +49,7 @@ Volumes larger than the pipette's ``max_volume`` will automatically divide into
.. code-block:: python
- pipette.transfer(700, plate.wells('A2'), plate.wells('B2'))
+ pipette.transfer(700, plate.wells("A2"), plate.wells("B2"))
will have the steps...
@@ -72,7 +72,7 @@ Transfer commands are most useful when moving liquid between multiple wells.
.. code-block:: python
- pipette.transfer(100, plate.cols('1'), plate.cols('2'))
+ pipette.transfer(100, plate.cols("1"), plate.cols("2"))
will have the steps...
@@ -105,7 +105,7 @@ You can transfer from a single source to multiple destinations, and the other wa
.. code-block:: python
- pipette.transfer(100, plate.wells('A1'), plate.cols('2'))
+ pipette.transfer(100, plate.wells("A1"), plate.cols("2"))
will have the steps...
@@ -141,8 +141,8 @@ What happens if, for example, you tell your pipette to transfer from 2 source we
pipette.transfer(
100,
- plate.wells('A1', 'A2'),
- plate.wells('B1', 'B2', 'B3', 'B4'))
+ plate.wells("A1", "A2"),
+ plate.wells("B1", "B2", "B3", "B4"))
will have the steps...
@@ -169,8 +169,8 @@ Instead of applying a single volume amount to all source/destination wells, you
pipette.transfer(
[20, 40, 60],
- plate.wells('A1'),
- plate.wells('B1', 'B2', 'B3'))
+ plate.wells("A1"),
+ plate.wells("B1", "B2", "B3"))
will have the steps...
@@ -196,8 +196,8 @@ Create a linear gradient between a start and ending volume (uL). The start and e
pipette.transfer(
(100, 30),
- plate.wells('A1'),
- plate.cols('2'))
+ plate.wells("A1"),
+ plate.cols("2"))
will have the steps...
@@ -238,7 +238,7 @@ Volumes going to the same destination well are combined within the same tip, so
.. code-block:: python
- pipette.consolidate(30, plate.cols('2'), plate.wells('A1'))
+ pipette.consolidate(30, plate.cols("2"), plate.wells("A1"))
will have the steps...
@@ -262,7 +262,7 @@ If there are multiple destination wells, the pipette will never combine their vo
.. code-block:: python
- pipette.consolidate(30, plate.cols('1'), plate.wells('A1', 'A2'))
+ pipette.consolidate(30, plate.cols("1"), plate.wells("A1", "A2"))
will have the steps...
@@ -291,7 +291,7 @@ Volumes from the same source well are combined within the same tip, so that one
.. code-block:: python
- pipette.distribute(55, plate.wells('A1'), plate.rows('A'))
+ pipette.distribute(55, plate.wells("A1"), plate.rows("A"))
will have the steps...
@@ -326,7 +326,7 @@ If there are multiple source wells, the pipette will never combine their volumes
.. code-block:: python
- pipette.distribute(30, plate.wells('A1', 'A2'), plate.rows('A'))
+ pipette.distribute(30, plate.wells("A1", "A2"), plate.rows("A"))
will have the steps...
@@ -362,8 +362,8 @@ When dispensing multiple times from the same tip, it is recommended to aspirate
pipette.distribute(
30,
- plate.wells('A1', 'A2'),
- plate.cols('2'),
+ plate.wells("A1", "A2"),
+ plate.cols("2"),
disposal_vol=10) # include extra liquid to make dispenses more accurate
@@ -406,9 +406,9 @@ The pipette can optionally get a new tip at the beginning of each aspirate, to h
pipette.transfer(
100,
- plate.wells('A1', 'A2', 'A3'),
- plate.wells('B1', 'B2', 'B3'),
- new_tip='always') # always pick up a new tip
+ plate.wells("A1", "A2", "A3"),
+ plate.wells("B1", "B2", "B3"),
+ new_tip="always") # always pick up a new tip
will have the steps...
@@ -440,9 +440,9 @@ For scenarios where you instead are calling ``pick_up_tip()`` and ``drop_tip()``
...
pipette.transfer(
100,
- plate.wells('A1', 'A2', 'A3'),
- plate.wells('B1', 'B2', 'B3'),
- new_tip='never') # never pick up or drop a tip
+ plate.wells("A1", "A2", "A3"),
+ plate.wells("B1", "B2", "B3"),
+ new_tip="never") # never pick up or drop a tip
...
pipette.drop_tip()
@@ -473,9 +473,9 @@ The default behavior of complex commands is to use one tip:
pipette.transfer(
100,
- plate.wells('A1', 'A2', 'A3'),
- plate.wells('B1', 'B2', 'B3'),
- new_tip='once') # use one tip (default behavior)
+ plate.wells("A1", "A2", "A3"),
+ plate.wells("B1", "B2", "B3"),
+ new_tip="once") # use one tip (default behavior)
will have the steps...
@@ -500,8 +500,8 @@ By default, the transfer command will drop the pipette's tips in the trash conta
pipette.transfer(
100,
- plate.wells('A1'),
- plate.wells('B1'),
+ plate.wells("A1"),
+ plate.wells("B1"),
trash=False) # do not trash tip
@@ -525,8 +525,8 @@ A touch-tip can be performed after every aspirate and dispense by setting ``touc
pipette.transfer(
100,
- plate.wells('A1'),
- plate.wells('A2'),
+ plate.wells("A1"),
+ plate.wells("A2"),
touch_tip=True) # touch tip to each well's edge
@@ -551,8 +551,8 @@ A blow-out can be performed after every dispense that leaves the tip empty by se
pipette.transfer(
100,
- plate.wells('A1'),
- plate.wells('A2'),
+ plate.wells("A1"),
+ plate.wells("A2"),
blow_out=True) # blow out droplets when tip is empty
@@ -576,8 +576,8 @@ A mix can be performed before every aspirate by setting ``mix_before=``. The val
pipette.transfer(
100,
- plate.wells('A1'),
- plate.wells('A2'),
+ plate.wells("A1"),
+ plate.wells("A2"),
mix_before=(2, 50), # mix 2 times with 50uL before aspirating
mix_after=(3, 75)) # mix 3 times with 75uL after dispensing
@@ -613,8 +613,8 @@ An air gap can be performed after every aspirate by setting ``air_gap=int``, whe
pipette.transfer(
100,
- plate.wells('A1'),
- plate.wells('A2'),
+ plate.wells("A1"),
+ plate.wells("A2"),
air_gap=20) # add 20uL of air after each aspirate
@@ -648,14 +648,14 @@ We will be using the code-block below to perform our examples.
from opentrons import robot, labware, instruments
- plate_96 = labware.load('96-flat', '1')
- plate_384 = labware.load('384-plate', '3')
- trough = labware.load('trough-12row', '4')
+ plate_96 = labware.load("96-flat", "1")
+ plate_384 = labware.load("384-plate", "3")
+ trough = labware.load("trough-12row", "4")
- tiprack = labware.load('opentrons_96_tiprack_300ul', '2')
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "2")
multi_pipette = instruments.P300_Multi(
- mount='left',
+ mount="left",
tip_racks=[tiprack])
Transfer in a 96 Well Plate
@@ -666,7 +666,7 @@ following:
.. code-block:: python
- multi_pipette.transfer(50, plate_96.columns('1'), plate_96.columns('2', to='12'))
+ multi_pipette.transfer(50, plate_96.columns("1"), plate_96.columns("2", to="12"))
will have the steps
@@ -702,7 +702,7 @@ or
.. code-block:: python
- multi_pipette.transfer(50, plate_96.wells('A1'), plate_96.columns('2', to='12'))
+ multi_pipette.transfer(50, plate_96.wells("A1"), plate_96.columns("2", to="12"))
will have the steps
@@ -740,14 +740,14 @@ will have the steps
.. code-block:: python
- multi_pipette.transfer(50, plate_96.wells('A1'), plate_96.wells())
+ multi_pipette.transfer(50, plate_96.wells("A1"), plate_96.wells())
The multi-channel would visit **every** well in the plate and dispense liquid
outside of the plate boundaries so be careful!
.. code-block:: python
- multi_pipette.transfer(50, plate_96.wells('A1'), plate_96.rows('A'))
+ multi_pipette.transfer(50, plate_96.wells("A1"), plate_96.rows("A"))
In this scenario, the multi-channel would only visit the first column of the plate.
@@ -755,8 +755,8 @@ will have the steps
Transfer in a 384 Well Plate
----------------------------
-In a 384 Well plate, there are 2 sets of 'columns' that the multi-channel can
-dispense into ['A1', 'C1'...'A2', 'C2'...] and ['B1', 'D1'...'B2', 'D2'].
+In a 384 Well plate, there are 2 sets of "columns" that the multi-channel can
+dispense into ["A1", "C1"..."A2", "C2"...] and ["B1", "D1"..."B2", "D2"].
If you want to transfer to a 384 well plate in order, you can do:
@@ -764,9 +764,9 @@ If you want to transfer to a 384 well plate in order, you can do:
alternating_wells = []
for row in plate_384.rows():
- alternating_wells.append(row.wells('A'))
- alternating_wells.append(row.wells('B'))
- multi_pipette.transfer(50, trough.wells('A1'), alternating_wells)
+ alternating_wells.append(row.wells("A"))
+ alternating_wells.append(row.wells("B"))
+ multi_pipette.transfer(50, trough.wells("A1"), alternating_wells)
or you can choose to dispense by row first, moving first through row A
@@ -774,5 +774,5 @@ and then through row B of the 384 well plate.
.. code-block:: python
- list_of_wells = [for well in plate_384.rows('A')] + [for well in plate_384.rows('B')]
- multi_pipette.transfer(50, trough.wells('A1'), list_of_wells)
+ list_of_wells = [for well in plate_384.rows("A")] + [for well in plate_384.rows("B")]
+ multi_pipette.transfer(50, trough.wells("A1"), list_of_wells)
diff --git a/api/docs/v1/examples.rst b/api/docs/v1/examples.rst
index 89c86401f60..d5775c41a8d 100644
--- a/api/docs/v1/examples.rst
+++ b/api/docs/v1/examples.rst
@@ -10,14 +10,14 @@ All examples on this page assume the following labware and pipette:
from opentrons import robot, labware, instruments
- plate = labware.load('96-flat', '1')
- trough = labware.load('trough-12row', '2')
+ plate = labware.load("96-flat", "1")
+ trough = labware.load("trough-12row", "2")
- tiprack_1 = labware.load('opentrons_96_tiprack_300ul', '3')
- tiprack_2 = labware.load('opentrons_96_tiprack_300ul', '4')
+ tiprack_1 = labware.load("opentrons_96_tiprack_300ul", "3")
+ tiprack_2 = labware.load("opentrons_96_tiprack_300ul", "4")
p300 = instruments.P300_Single(
- mount='left',
+ mount="left",
tip_racks=[tiprack_2])
******************************
@@ -30,15 +30,15 @@ Moving 100uL from one well to another:
.. code-block:: python
- p300.transfer(100, plate.wells('A1'), plate.wells('B1'))
+ p300.transfer(100, plate.wells("A1"), plate.wells("B1"))
If you prefer to not use the ``.transfer()`` command, the following pipette commands will create the some results:
.. code-block:: python
p300.pick_up_tip()
- p300.aspirate(100, plate.wells('A1'))
- p300.dispense(100, plate.wells('A1'))
+ p300.aspirate(100, plate.wells("A1"))
+ p300.dispense(100, plate.wells("A1"))
p300.return_tip()
******************************
@@ -74,7 +74,7 @@ The Opentrons liquid handler can do some things that a human cannot do with a pi
for well in trough.wells():
p300.aspirate(35, well).air_gap(10)
- p300.dispense(plate.wells('A1'))
+ p300.dispense(plate.wells("A1"))
p300.return_tip()
@@ -88,7 +88,7 @@ This example first spreads a dilutent to all wells of a plate. It then dilutes 8
.. code-block:: python
- p300.distribute(50, trough.wells('A12'), plate.wells()) # dilutent
+ p300.distribute(50, trough.wells("A12"), plate.wells()) # dilutent
# loop through each row
for i in range(8):
@@ -98,11 +98,11 @@ This example first spreads a dilutent to all wells of a plate. It then dilutes 8
row = plate.rows(i)
# transfer 30uL of source to first well in column
- p300.transfer(30, source, column.wells('1'))
+ p300.transfer(30, source, column.wells("1"))
# dilute the sample down the column
p300.transfer(
- 30, row.wells('1', to='11'), row.wells('2', to='12'),
+ 30, row.wells("1", to="11"), row.wells("2", to="12"),
mix_after=(3, 25))
******************************
@@ -131,7 +131,7 @@ Deposit various volumes of liquids into the same plate of wells, and automatical
89, 90, 91, 92, 93, 94, 95, 96
]
- p300.distribute(water_volumes, trough.wells('A12'), plate)
+ p300.distribute(water_volumes, trough.wells("A12"), plate)
The final volumes can also be read from a CSV, and opened by your protocol.
@@ -155,7 +155,7 @@ The final volumes can also be read from a CSV, and opened by your protocol.
# open file with absolute path (will be different depending on operating system)
# file paths on Windows look more like 'C:\\path\\to\\your\\csv_file.csv'
- with open('/path/to/your/csv_file.csv') as my_file:
+ with open("/path/to/your/csv_file.csv") as my_file:
# save all volumes from CSV file into a list
volumes = []
@@ -163,11 +163,11 @@ The final volumes can also be read from a CSV, and opened by your protocol.
# loop through each line (the plate's columns)
for l in my_file.read().splitlines():
# loop through each comma-separated value (the plate's rows)
- for v in l.split(','):
+ for v in l.split(","):
volumes.append(float(v)) # save the volume
# distribute those volumes to the plate
- p300.distribute(volumes, trough.wells('A1'), plate.wells())
+ p300.distribute(volumes, trough.wells("A1"), plate.wells())
@@ -183,16 +183,16 @@ This example shows how to deposit liquid around the edge of a well using
.. code-block:: python
p300.pick_up_tip()
- p300.aspirate(200, trough.wells('A1'))
+ p300.aspirate(200, trough.wells("A1"))
# rotate around the edge of the well, dropping 20ul at a time
theta = 0.0
while p300.current_volume > 0:
# we can move around a circle with radius (r) and theta (degrees)
- well_edge = plate.wells('B1').from_center(r=1.0, theta=theta, h=0.9)
+ well_edge = plate.wells("B1").from_center(r=1.0, theta=theta, h=0.9)
# combine a Well with a Vector in a tuple
- destination = (plate.wells('B1'), well_edge)
- p300.move_to(destination, strategy='direct') # move straight there
+ destination = (plate.wells("B1"), well_edge)
+ p300.move_to(destination, strategy="direct") # move straight there
p300.dispense(20)
theta += 0.314
diff --git a/api/docs/v1/hardware_control.rst b/api/docs/v1/hardware_control.rst
index 58ba29a6957..9781d6aaf49 100644
--- a/api/docs/v1/hardware_control.rst
+++ b/api/docs/v1/hardware_control.rst
@@ -22,16 +22,16 @@ The robot module can be thought of as the parent for all aspects of the Opentron
'''
from opentrons import robot, labware, instruments
- plate = labware.load('96-flat', 'B1', 'my-plate')
- tiprack = labware.load('opentrons_96_tiprack_300ul', 'A1', 'my-rack')
+ plate = labware.load("96-flat", "B1", "my-plate")
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "A1", "my-rack")
- pipette = instruments.P300_Single(mount='left', tip_racks=[tiprack])
+ pipette = instruments.P300_Single(mount="left", tip_racks=[tiprack])
User-Specified Pause
====================
-This will pause your protocol at a specific step. You can resume by pressing 'resume' in your OT App.
+This will pause your protocol at a specific step. You can resume by pressing "resume" in your OT App.
.. code-block:: python
@@ -42,7 +42,7 @@ Head Speed
The speed of the robot's motors can be set using ``robot.head_speed()``. The units are all millimeters-per-second (mm/sec). The ``x``, ``y``, ``z``, ``a``, ``b``, ``c`` parameters set the maximum speed of the corresponding axis on Smoothie.
-'x': lateral motion, 'y': front to back motion, 'z': vertical motion of the left mount, 'a': vertical motion of the right mount, 'b': plunger motor for the left pipette, 'c': plunger motor for the right pipette.
+"x": lateral motion, "y": front to back motion, "z": vertical motion of the left mount, "a": vertical motion of the right mount, "b": plunger motor for the left pipette, "c": plunger motor for the right pipette.
The ``combined_speed`` parameter sets the speed across all axes to either the specified value or the axis max, whichever is lower. Defaults are specified by ``DEFAULT_MAX_SPEEDS`` in `robot_configs.py`__.
@@ -51,7 +51,7 @@ __ https://github.com/Opentrons/opentrons/blob/edge/api/src/opentrons/config/rob
.. code-block:: python
max_speed_per_axis = {
- 'x': 600, 'y': 400, 'z': 125, 'a': 125, 'b': 50, 'c': 50}
+ "x": 600, "y": 400, "z": 125, "a": 125, "b": 50, "c": 50}
robot.head_speed(
combined_speed=max(max_speed_per_axis.values()),
**max_speed_per_axis)
@@ -65,7 +65,7 @@ You can `home` the robot by calling ``home()``. You can also specify axes. The r
.. code-block:: python
robot.home() # home the robot on all axis
- robot.home('z') # home the Z axis only
+ robot.home("z") # home the Z axis only
Commands
========
@@ -76,8 +76,8 @@ __ https://docs.python.org/3.5/tutorial/datastructures.html#more-on-lists
.. code-block:: python
- pipette.pick_up_tip(tiprack.wells('A1'))
- pipette.drop_tip(tiprack.wells('A1'))
+ pipette.pick_up_tip(tiprack.wells("A1"))
+ pipette.drop_tip(tiprack.wells("A1"))
for c in robot.commands():
print(c)
@@ -97,11 +97,11 @@ We can erase the robot command history by calling ``robot.clear_commands()``. An
.. code-block:: python
robot.clear_commands()
- pipette.pick_up_tip(tiprack['A1'])
- print('There is', len(robot.commands()), 'command')
+ pipette.pick_up_tip(tiprack["A1"])
+ print("There is", len(robot.commands()), "command")
robot.clear_commands()
- print('There are now', len(robot.commands()), 'commands')
+ print("There are now", len(robot.commands()), "commands")
will print out...
@@ -119,10 +119,10 @@ You can add a custom message to the list of command descriptions you see when ru
robot.clear_commands()
- pipette.pick_up_tip(tiprack['A1'])
+ pipette.pick_up_tip(tiprack["A1"])
robot.comment("Hello, just picked up tip A1")
- pipette.pick_up_tip(tiprack['A1'])
+ pipette.pick_up_tip(tiprack["A1"])
robot.comment("Goodbye, just dropped tip A1")
for c in robot.commands():
diff --git a/api/docs/v1/index.rst b/api/docs/v1/index.rst
index 0e62e948c7f..0c82b900185 100644
--- a/api/docs/v1/index.rst
+++ b/api/docs/v1/index.rst
@@ -36,7 +36,7 @@ Overview
How it Looks
++++++++++++
-The design goal of the Opentrons API is to make code readable and easy to understand. For example, below is a short set of instruction to transfer from well ``'A1'`` to well ``'B1'`` that even a computer could understand:
+The design goal of the Opentrons API is to make code readable and easy to understand. For example, below is a short set of instruction to transfer from well ``"A1"`` to well ``"B1"`` that even a computer could understand:
.. code-block:: none
@@ -44,12 +44,12 @@ The design goal of the Opentrons API is to make code readable and easy to unders
This protocol is by me; it’s called Opentrons Protocol Tutorial and is used for demonstrating the Opentrons API
- Add a 96 well plate, and place it in slot '2' of the robot deck
- Add a 200uL tip rack, and place it in slot '1' of the robot deck
+ Add a 96 well plate, and place it in slot "2" of the robot deck
+ Add a 200uL tip rack, and place it in slot "1" of the robot deck
Add a single-channel 300uL pipette to the left mount, and tell it to use that tip rack
- Transfer 100uL from the plate's 'A1' well to it's 'B2' well
+ Transfer 100uL from the plate's "A1" well to it's "B2" well
If we were to rewrite this with the Opentrons API, it would look like the following:
@@ -60,20 +60,20 @@ If we were to rewrite this with the Opentrons API, it would look like the follow
# metadata
metadata = {
- 'protocolName': 'My Protocol',
- 'author': 'Name ',
- 'description': 'Simple protocol to get started using OT2',
+ "protocolName": "My Protocol",
+ "author": "Name ",
+ "description": "Simple protocol to get started using OT2",
}
# labware
- plate = labware.load('96-flat', '2')
- tiprack = labware.load('opentrons_96_tiprack_300ul', '1')
+ plate = labware.load("96-flat", "2")
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "1")
# pipettes
- pipette = instruments.P300_Single(mount='left', tip_racks=[tiprack])
+ pipette = instruments.P300_Single(mount="left", tip_racks=[tiprack])
# commands
- pipette.transfer(100, plate.wells('A1'), plate.wells('B2'))
+ pipette.transfer(100, plate.wells("A1"), plate.wells("B2"))
How it's Organized
@@ -118,21 +118,21 @@ Labware
While the imports section is usually the same across protocols, the labware section is different depending on the tip racks, well plates, troughs, or tubes you're using on the robot.
-Each labware is given a type (ex: ``'96-flat'``), and the slot on the robot it will be placed (ex: ``'2'``).
+Each labware is given a type (ex: ``"96-flat"``), and the slot on the robot it will be placed (ex: ``"2"``).
From the example above, the "labware" section looked like:
.. code-block:: python
- plate = labware.load('96-flat', '2')
- tiprack = labware.load('opentrons_96_tiprack_300ul', '1')
+ plate = labware.load("96-flat", "2")
+ tiprack = labware.load("opentrons_96_tiprack_300ul", "1")
.. _index-pipettes:
Pipettes
^^^^^^^^
-Next, pipettes are created and attached to a specific mount on the OT-2 (``'left'`` or ``'right'``).
+Next, pipettes are created and attached to a specific mount on the OT-2 (``"left"`` or ``"right"``).
There are other parameters for pipettes, but the most important are the tip rack(s) it will use during the protocol.
@@ -140,7 +140,7 @@ From the example above, the "pipettes" section looked like:
.. code-block:: python
- pipette = instruments.P300_Single(mount='left', tip_racks=[tiprack])
+ pipette = instruments.P300_Single(mount="left", tip_racks=[tiprack])
.. _index-commands:
@@ -155,7 +155,7 @@ From the example above, the "commands" section looked like:
.. code-block:: python
- pipette.transfer(100, plate.wells('A1'), plate.wells('B1'))
+ pipette.transfer(100, plate.wells("A1"), plate.wells("B1"))
******************
diff --git a/api/docs/v1/labware.rst b/api/docs/v1/labware.rst
index 0d45e079ebd..bdc4ab06245 100644
--- a/api/docs/v1/labware.rst
+++ b/api/docs/v1/labware.rst
@@ -65,7 +65,7 @@ To tell the robot what labware will be on the deck for your protocol, use
# ...
- tiprack = labware.load('opentrons_96_tiprack_300ul', slot='1')
+ tiprack = labware.load("opentrons_96_tiprack_300ul", slot="1")
**********************
@@ -89,16 +89,16 @@ labware in a certain slot.
.. code-block:: python
- my_labware = labware.load('usascientific_12_reservoir_22ml', slot='1')
+ my_labware = labware.load("usascientific_12_reservoir_22ml", slot="1")
A third optional argument can be used to give a labware a nickname for display
in the Opentrons App.
.. code-block:: python
- my_labware = labware.load('usascientific_12_reservoir_22ml',
- slot='2',
- label='any-name-you-want')
+ my_labware = labware.load("usascientific_12_reservoir_22ml",
+ slot="2",
+ label="any-name-you-want")
Sometimes, you may need to place a labware on top of something else on the
@@ -108,9 +108,9 @@ deck, like modules. For this, you should use the ``share`` parameter.
from opentrons import labware, modules
- td = modules.load('tempdeck', slot='1')
- plate = labware.load('opentrons_96_aluminumblock_biorad_wellplate_200ul',
- slot='1',
+ td = modules.load("tempdeck", slot="1")
+ plate = labware.load("opentrons_96_aluminumblock_biorad_wellplate_200ul",
+ slot="1",
share=True)
To specify the version of the labware definition to use, you can use the ``version``
@@ -120,16 +120,16 @@ parameter:
from opentrons import labware
block1 = labware.load(
- 'opentrons_96_aluminumblock_biorad_wellplate_200ul',
- slot='1',
+ "opentrons_96_aluminumblock_biorad_wellplate_200ul",
+ slot="1",
version=2) # version 2 of the aluminum block definition
block2 = labware.load(
- 'opentrons_96_aluminumblock_biorad_wellplate_200ul',
- slot='2',
+ "opentrons_96_aluminumblock_biorad_wellplate_200ul",
+ slot="2",
version=1) # version 1 of the aluminum block definition
block3 = labware.load(
- 'opentrons_96_aluminumblock_biorad_wellplate_200ul',
- slot='2') # if you don't specify version, version 1 is used
+ "opentrons_96_aluminumblock_biorad_wellplate_200ul",
+ slot="2") # if you don't specify version, version 1 is used
Create
@@ -147,7 +147,7 @@ regularly-spaced columns and rows.
.. code-block:: python
- custom_plate_name = 'custom_18_wellplate_200ul'
+ custom_plate_name = "custom_18_wellplate_200ul"
if plate_name not in labware.list():
labware.create(
@@ -158,7 +158,7 @@ regularly-spaced columns and rows.
depth=10, # depth (mm) of each well
volume=200) # volume (µL) of each well
- custom_plate = labware.load(custom_plate_name, slot='3')
+ custom_plate = labware.load(custom_plate_name, slot="3")
for well in custom_plate.wells():
print(well)
@@ -202,7 +202,7 @@ If you would like to delete a labware you have already added to the database
from opentrons.data_storage import database
- database.delete_container('custom_18_wellplate_200ul')
+ database.delete_container("custom_18_wellplate_200ul")
.. Note::
There is some specialty labware that will require you to specify the
@@ -239,8 +239,8 @@ transfer liquids to and from.
The OT-2 deck and labware are all set up with the same coordinate system
-- Lettered rows ``['A']-['END']``
-- Numbered columns ``['1']-['END']``.
+- Lettered rows ``["A"]-["END"]``
+- Numbered columns ``["1"]-["END"]``.
.. image:: ../img/well_iteration/Well_Iteration.png
@@ -251,7 +251,7 @@ The OT-2 deck and labware are all set up with the same coordinate system
'''
from opentrons import labware
- plate = labware.load('corning_24_wellplate_3.4ml_flat', slot='1')
+ plate = labware.load("corning_24_wellplate_3.4ml_flat", slot="1")
Wells by Name
-------------
@@ -262,8 +262,8 @@ well as an argument, and will return the well at that location.
.. code-block:: python
- a1 = plate.wells('A1')
- d6 = plate.wells('D6')
+ a1 = plate.wells("A1")
+ d6 = plate.wells("D6")
Wells by Index
--------------
@@ -289,8 +289,8 @@ Columns and Rows
A labware's wells are organized within a series of columns and rows, which are
also labelled on standard labware. In the API, rows are given letter names
-(``'A'`` through ``'D'`` for example) and go left to right, while columns are
-given numbered names (``'1'`` through ``'6'`` for example) and go from front to
+(``"A"`` through ``"D"`` for example) and go left to right, while columns are
+given numbered names (``"1"`` through ``"6"`` for example) and go from front to
back.
You can access a specific row or column by using the ``rows()`` and
@@ -299,8 +299,8 @@ or column.
.. code-block:: python
- row = plate.rows('A')
- column = plate.columns('1')
+ row = plate.rows("A")
+ column = plate.columns("1")
print('Column "1" has', len(column), 'wells')
print('Row "A" has', len(row), 'wells')
@@ -314,16 +314,16 @@ will print out...
The ``rows()`` or ``cols()`` methods can be used in combination with the
``wells()`` method to access wells within that row or column. In the example
-below, both lines refer to well ``'A1'``.
+below, both lines refer to well ``"A1"``.
.. code-block:: python
- plate.cols('1').wells('A')
- plate.rows('A').wells('1')
+ plate.cols("1").wells("A")
+ plate.rows("A").wells("1")
.. Tip::
The example above works but is a little convoluted. If you can, always get
- individual wells like A1 with ``wells('A1')`` or ``wells(0)``
+ individual wells like A1 with ``wells("A1")`` or ``wells(0)``
Multiple Wells
@@ -344,7 +344,7 @@ liquid's source and/or destination. Or, we can get a group of wells and loop
'''
from opentrons import labware
- plate = labware.load('corning_24_wellplate_3.4ml_flat', slot='1')
+ plate = labware.load("corning_24_wellplate_3.4ml_flat", slot="1")
Wells
-----
@@ -356,7 +356,7 @@ Here is an example or accessing a list of wells, each specified by name:
.. code-block:: python
- w = plate.wells('A1', 'B2', 'C3', 'D4')
+ w = plate.wells("A1", "B2", "C3", "D4")
print(w)
@@ -371,7 +371,7 @@ iterated through:
.. code-block:: python
- for w in plate.wells('A1', 'B2', 'C3', 'D4'):
+ for w in plate.wells("A1", "B2", "C3", "D4"):
print(w)
will print out...
@@ -392,7 +392,7 @@ the ``to=`` argument is the last well.
.. code-block:: python
- for w in plate.wells('A1', to='D1'):
+ for w in plate.wells("A1", to="D1"):
print(w)
will print out...
@@ -410,7 +410,7 @@ starting position is allowed:
.. code-block:: python
- for w in plate.wells('D1', to='A1'):
+ for w in plate.wells("D1", to="A1"):
print(w)
will print out...
@@ -427,11 +427,11 @@ Wells Length
Another way you can create a list of wells is by specifying the length of the
well list you need, including the starting well. The example below will
-return 4 wells, starting at well ``'A1'``:
+return 4 wells, starting at well ``"A1"``:
.. code-block:: python
- for w in plate.wells('A1', length=4):
+ for w in plate.wells("A1", length=4):
print(w)
will print out...
@@ -453,7 +453,7 @@ Here is an example of iterating through rows:
.. code-block:: python
- for r in plate.rows('A', length=3):
+ for r in plate.rows("A", length=3):
print(r)
will print out...
@@ -468,7 +468,7 @@ And here is an example of iterating through columns:
.. code-block:: python
- for c in plate.cols('1', to='6'):
+ for c in plate.cols("1", to="6"):
print(c)
will print out...
@@ -511,7 +511,7 @@ The API's labware are also prepared to take string values for the slice's
.. code-block:: python
- for w in plate['A1':'A2':2]:
+ for w in plate["A1":"A2":2]:
print(w)
will print out...
@@ -523,7 +523,7 @@ will print out...
.. code-block:: python
- for w in plate.rows['B']['1'::2]:
+ for w in plate.rows["B"]["1"::2]:
print(w)
will print out...
diff --git a/api/docs/v1/modules.rst b/api/docs/v1/modules.rst
index 1090287b781..23749419c91 100644
--- a/api/docs/v1/modules.rst
+++ b/api/docs/v1/modules.rst
@@ -18,7 +18,7 @@ within a protocol. To do this, you call:
from opentrons import modules
- module = modules.load('Module Name', slot)
+ module = modules.load("Module Name", slot)
Above, `Module Name` represents either `tempdeck` or `magdeck`.
@@ -29,7 +29,7 @@ To add a labware onto a given module, you will need to use the `share=True` call
from opentrons import labware
- labware = labware.load('96-flat', slot, share=True)
+ labware = labware.load("96-flat", slot, share=True)
Where slot is the same slot in which you loaded your module.
@@ -49,7 +49,7 @@ be done like the following:
robot.connect()
robot.discover_modules()
- module = modules.load('Module Name', slot)
+ module = modules.load("Module Name", slot)
... etc
Checking the status of your Module
@@ -60,7 +60,7 @@ Both modules have the ability to check what state they are currently in. To do t
from opentrons import modules
- module = modules.load('Module Name', slot)
+ module = modules.load("Module Name", slot)
status = module.status
For the temperature module this will return a string stating whether it's `heating`, `cooling`, `holding at target` or `idle`.
@@ -84,8 +84,8 @@ To set the temperature module to a given temperature in degrees celsius do the f
from opentrons import modules, labware
- module = modules.load('tempdeck', slot)
- plate = labware.load('96-flat', slot, share=True)
+ module = modules.load("tempdeck", slot)
+ plate = labware.load("96-flat", slot, share=True)
module.set_temperature(4)
@@ -99,8 +99,8 @@ This function will pause your protocol until your target temperature is reached.
from opentrons import modules, labware
- module = modules.load('tempdeck', slot)
- plate = labware.load('96-flat', slot, share=True)
+ module = modules.load("tempdeck", slot)
+ plate = labware.load("96-flat", slot, share=True)
module.set_temperature(4)
module.wait_for_temp()
@@ -120,8 +120,8 @@ You can read the current real-time temperature of the module by the following:
from opentrons import modules, labware
- module = modules.load('tempdeck', slot)
- plate = labware.load('96-flat', slot, share=True)
+ module = modules.load("tempdeck", slot)
+ plate = labware.load("96-flat", slot, share=True)
temperature = module.temperature
@@ -135,8 +135,8 @@ We can read the target temperature of the module by the following:
from opentrons import modules, labware
- module = modules.load('tempdeck', slot)
- plate = labware.load('96-flat', slot, share=True)
+ module = modules.load("tempdeck", slot)
+ plate = labware.load("96-flat", slot, share=True)
temperature = module.target
@@ -152,8 +152,8 @@ or cooling phase again.
from opentrons import modules, labware
- module = modules.load('tempdeck', slot)
- plate = labware.load('96-flat', slot, share=True)
+ module = modules.load("tempdeck", slot)
+ plate = labware.load("96-flat", slot, share=True)
module.set_temperature(4)
module.wait_for_temp()
@@ -192,8 +192,8 @@ Engage
from opentrons import modules, labware
- module = modules.load('magdeck', slot)
- plate = labware.load('biorad-hardshell-96-PCR', slot, share=True)
+ module = modules.load("magdeck", slot)
+ plate = labware.load("biorad-hardshell-96-PCR", slot, share=True)
module.engage()
@@ -213,8 +213,8 @@ You can also use a custom height parameter with engage():
from opentrons import modules, labware
- module = modules.load('magdeck', slot)
- plate = labware.load('96-deep-well', slot, share=True)
+ module = modules.load("magdeck", slot)
+ plate = labware.load("96-deep-well", slot, share=True)
module.engage(height=12)
@@ -232,8 +232,8 @@ Disengage
from opentrons import modules, labware
- module = modules.load('magdeck', slot)
- plate = labware.load('biorad-hardshell-96-PCR', slot, share=True)
+ module = modules.load("magdeck", slot)
+ plate = labware.load("biorad-hardshell-96-PCR", slot, share=True)
module.engage()
## OTHER PROTOCOL ACTIONS
diff --git a/api/docs/v1/pipettes.rst b/api/docs/v1/pipettes.rst
index 1e4d59eaf2d..72dc73d349f 100644
--- a/api/docs/v1/pipettes.rst
+++ b/api/docs/v1/pipettes.rst
@@ -36,12 +36,12 @@ They are as follows:
For every pipette type you are using in a protocol, you must use one of the
model names specified above and call it out as ``instruments.(Model Name)``.
-You must also specify a mount. The mount can be either ``'left'`` or ``'right'``.
+You must also specify a mount. The mount can be either ``"left"`` or ``"right"``.
In this example, we are using a Single-Channel 300uL pipette.
.. code-block:: python
- pipette = instruments.P300_Single(mount='left')
+ pipette = instruments.P300_Single(mount="left")
Pipette GEN2 Backwards Compatibility
====================================
@@ -72,7 +72,7 @@ The speeds at which the pipette will aspirate and dispense can be set through ``
.. code-block:: python
pipette = instruments.P300_Single(
- mount='right',
+ mount="right",
aspirate_flow_rate=200,
dispense_flow_rate=600,
blow_out_flow_rate=600)
@@ -89,7 +89,7 @@ varying defaults depending on the model.
.. code-block:: python
pipette = instruments.P10_Single(
- mount='right',
+ mount="right",
min_volume=2,
max_volume=8)
diff --git a/api/docs/v1/writing.rst b/api/docs/v1/writing.rst
index 675ccbaf6e1..df2ac455f52 100644
--- a/api/docs/v1/writing.rst
+++ b/api/docs/v1/writing.rst
@@ -115,7 +115,7 @@ This also provides an entrypoint to use the Opentrons simulation package from ot
from opentrons.simulate import simulate, format_runlog
# read the file
- protocol_file = open('/path/to/protocol.py')
+ protocol_file = open("/path/to/protocol.py")
# simulate() the protocol, keeping the runlog
runlog, _bundle = simulate(protocol_file)
# print the runlog
diff --git a/api/docs/v2/adapting_ot2_flex.rst b/api/docs/v2/adapting_ot2_flex.rst
index c92534c18c7..6b788273d9b 100644
--- a/api/docs/v2/adapting_ot2_flex.rst
+++ b/api/docs/v2/adapting_ot2_flex.rst
@@ -20,7 +20,7 @@ Flex requires you to specify an ``apiLevel`` of 2.15 or higher. If your OT-2 pro
.. note::
Consult the :ref:`list of changes in API versions ` to see what effect raising the ``apiLevel`` will have. If you increased it by multiple minor versions to get your protocol running on Flex, make sure that your protocol isn't using removed commands or commands whose behavior has changed in a way that may affect your scientific results.
-You also need to specify ``'robotType': 'Flex'``. If you omit ``robotType`` in the ``requirements`` dictionary, the API will assume the protocol is designed for the OT-2.
+You also need to specify ``"robotType": "Flex"``. If you omit ``robotType`` in the ``requirements`` dictionary, the API will assume the protocol is designed for the OT-2.
.. tabs::
@@ -34,7 +34,7 @@ You also need to specify ``'robotType': 'Flex'``. If you omit ``robotType`` in t
metadata = {
"protocolName": "My Protocol",
"description": "This protocol uses the OT-2",
- "apiLevel": "2.14"
+ "apiLevel": "|apiLevel|"
}
.. tab:: Updated Flex code
@@ -84,6 +84,13 @@ This example converts OT-2 code that uses a P300 Single-Channel GEN2 pipette and
"flex_1channel_1000", "left", tip_racks[tips]
)
+Trash Container
+===============
+
+OT-2 protocols always have a :py:obj:`.fixed_trash` in slot 12. In Flex protocols specifying API version 2.16 or later, you need to :ref:`load a trash bin `. Put it in slot A3 to match the physical position of the OT-2 fixed trash::
+
+ trash = protocol.load_trash_bin("A3")
+
Deck Slot Labels
================
diff --git a/api/docs/v2/basic_commands/liquids.rst b/api/docs/v2/basic_commands/liquids.rst
index 00e81449a40..06f5dfd3bf8 100644
--- a/api/docs/v2/basic_commands/liquids.rst
+++ b/api/docs/v2/basic_commands/liquids.rst
@@ -18,14 +18,14 @@ To draw liquid up into a pipette tip, call the :py:meth:`.InstrumentContext.aspi
.. code-block:: python
pipette.pick_up_tip()
- pipette.aspirate(200, plate['A1'])
+ pipette.aspirate(200, plate["A1"])
-If the pipette doesn't move, you can specify an additional aspiration action without including a location. To demonstrate, this code snippet pauses the protocol, automatically resumes it, and aspirates a second time from ``plate['A1']``).
+If the pipette doesn't move, you can specify an additional aspiration action without including a location. To demonstrate, this code snippet pauses the protocol, automatically resumes it, and aspirates a second time from ``plate["A1"]``).
.. code-block:: python
pipette.pick_up_tip()
- pipette.aspirate(200, plate['A1'])
+ pipette.aspirate(200, plate["A1"])
protocol.delay(seconds=5) # pause for 5 seconds
pipette.aspirate(100) # aspirate 100 µL at current position
@@ -36,16 +36,16 @@ Aspirate by Well or Location
The :py:meth:`~.InstrumentContext.aspirate` method includes a ``location`` parameter that accepts either a :py:class:`.Well` or a :py:class:`~.types.Location`.
-If you specify a well, like ``plate['A1']``, the pipette will aspirate from a default position 1 mm above the bottom center of that well. To change the default clearance, first set the ``aspirate`` attribute of :py:obj:`.well_bottom_clearance`::
+If you specify a well, like ``plate["A1"]``, the pipette will aspirate from a default position 1 mm above the bottom center of that well. To change the default clearance, first set the ``aspirate`` attribute of :py:obj:`.well_bottom_clearance`::
pipette.pick_up_tip
pipette.well_bottom_clearance.aspirate = 2 # tip is 2 mm above well bottom
- pipette.aspirate(200, plate['A1'])
+ pipette.aspirate(200, plate["A1"])
You can also aspirate from a location along the center vertical axis within a well using the :py:meth:`.Well.top` and :py:meth:`.Well.bottom` methods. These methods move the pipette to a specified distance relative to the top or bottom center of a well::
pipette.pick_up_tip()
- depth = plate['A1'].bottom(z=2) # tip is 2 mm above well bottom
+ depth = plate["A1"].bottom(z=2) # tip is 2 mm above well bottom
pipette.aspirate(200, depth)
See also:
@@ -60,7 +60,7 @@ Aspiration Flow Rates
Flex and OT-2 pipettes aspirate at :ref:`default flow rates ` measured in µL/s. Specifying the ``rate`` parameter multiplies the flow rate by that value. As a best practice, don't set the flow rate higher than 3x the default. For example, this code causes the pipette to aspirate at twice its normal rate::
- pipette.aspirate(200, plate['A1'], rate=2.0)
+ pipette.aspirate(200, plate["A1"], rate=2.0)
.. versionadded:: 2.0
@@ -73,13 +73,20 @@ To dispense liquid from a pipette tip, call the :py:meth:`.InstrumentContext.dis
.. code-block:: python
- pipette.dispense(200, plate['B1'])
+ pipette.dispense(200, plate["B1"])
+
+.. note::
+ In API version 2.16 and earlier, you could pass a ``volume`` argument to ``dispense()`` greater than what was aspirated into the pipette. In this case, the API would ignore ``volume`` and dispense the pipette's :py:obj:`~.InstrumentContext.current_volume`. The robot *would not* move the plunger lower as a result.
+
+ In version 2.17 and later, passing such values raises an error.
+
+ To move the plunger a small extra amount, add a :ref:`push out `. Or to move it a large amount, use :ref:`blow out `.
If the pipette doesn’t move, you can specify an additional dispense action without including a location. To demonstrate, this code snippet pauses the protocol, automatically resumes it, and dispense a second time from location B1.
.. code-block:: python
- pipette.dispense(100, plate['B1'])
+ pipette.dispense(100, plate["B1"])
protocol.delay(seconds=5) # pause for 5 seconds
pipette.dispense(100) # dispense 100 µL at current position
@@ -88,14 +95,14 @@ Dispense by Well or Location
The :py:meth:`~.InstrumentContext.dispense` method includes a ``location`` parameter that accepts either a :py:class:`.Well` or a :py:class:`~.types.Location`.
-If you specify a well, like ``plate['B1']``, the pipette will dispense from a default position 1 mm above the bottom center of that well. To change the default clearance, you would call :py:obj:`.well_bottom_clearance`::
+If you specify a well, like ``plate["B1"]``, the pipette will dispense from a default position 1 mm above the bottom center of that well. To change the default clearance, you would call :py:obj:`.well_bottom_clearance`::
pipette.well_bottom_clearance.dispense=2 # tip is 2 mm above well bottom
- pipette.dispense(200, plate['B1'])
+ pipette.dispense(200, plate["B1"])
You can also dispense from a location along the center vertical axis within a well using the :py:meth:`.Well.top` and :py:meth:`.Well.bottom` methods. These methods move the pipette to a specified distance relative to the top or bottom center of a well::
- depth = plate['B1'].bottom(z=2) # tip is 2 mm above well bottom
+ depth = plate["B1"].bottom(z=2) # tip is 2 mm above well bottom
pipette.dispense(200, depth)
See also:
@@ -109,12 +116,26 @@ Dispense Flow Rates
Flex and OT-2 pipettes dispense at :ref:`default flow rates ` measured in µL/s. Adding a number to the ``rate`` parameter multiplies the flow rate by that value. As a best practice, don't set the flow rate higher than 3x the default. For example, this code causes the pipette to dispense at twice its normal rate::
- pipette.dispense(200, plate['B1'], rate=2.0)
-
-.. Removing the 2 notes here from the original. Covered by new revisions.
+ pipette.dispense(200, plate["B1"], rate=2.0)
.. versionadded:: 2.0
+.. _push-out-dispense:
+
+Push Out After Dispense
+-----------------------
+
+The optional ``push_out`` parameter of ``dispense()`` helps ensure all liquid leaves the tip. Use ``push_out`` for applications that require moving the pipette plunger lower than the default, without performing a full :ref:`blow out `.
+
+For example, this dispense action moves the plunger the equivalent of an additional 5 µL beyond where it would stop if ``push_out`` was set to zero or omitted::
+
+ pipette.pick_up_tip()
+ pipette.aspirate(100, plate["A1"])
+ pipette.dispense(100, plate["B1"], push_out=5)
+ pipette.drop_tip()
+
+.. versionadded:: 2.15
+
.. _new-blow-out:
.. _blow-out:
@@ -128,13 +149,15 @@ To blow an extra amount of air through the pipette's tip, call the :py:meth:`.In
You can also specify a particular well as the blowout location::
- pipette.blow_out(plate['B1'])
+ pipette.blow_out(plate["B1"])
-Many protocols use the trash bin for blowing out the pipette. You can specify the trash bin as the blowout location by using the :py:obj:`.ProtocolContext.fixed_trash` property::
+Many protocols use a trash container for blowing out the pipette. You can specify the pipette's current trash container as the blowout location by using the :py:obj:`.InstrumentContext.trash_container` property::
- pipette.blow_out(protocol.fixed_trash['A1'])
+ pipette.blow_out(pipette.trash_container)
.. versionadded:: 2.0
+.. versionchanged:: 2.16
+ Added support for ``TrashBin`` and ``WasteChute`` locations.
.. _touch-tip:
@@ -152,7 +175,7 @@ These optional location arguments give you control over where the tip will touch
This example demonstrates touching the tip in a specific well::
- pipette.touch_tip(plate['B1'])
+ pipette.touch_tip(plate["B1"])
This example uses an offset to set the touch tip location 2mm below the top of the current well::
@@ -160,7 +183,7 @@ This example uses an offset to set the touch tip location 2mm below the top of t
This example moves the pipette 75% of well's total radius and 2 mm below the top of well::
- pipette.touch_tip(plate['B1'],
+ pipette.touch_tip(plate["B1"],
radius=0.75,
v_offset=-2)
@@ -178,7 +201,7 @@ Touch speed controls how fast the pipette moves in mm/s during a touch tip step.
This example specifies a well location and sets the speed to 20 mm/s::
- pipette.touch_tip(plate['B1'], speed=20)
+ pipette.touch_tip(plate["B1"], speed=20)
This example uses the current well and sets the speed to 80 mm/s::
@@ -202,7 +225,7 @@ This example draws 100 µL from the current well and mixes it three times::
This example draws 100 µL from well B1 and mixes it three times::
- pipette.mix(3, 100, plate['B1'])
+ pipette.mix(3, 100, plate["B1"])
This example draws an amount equal to the pipette's maximum rated volume and mixes it three times::
diff --git a/api/docs/v2/basic_commands/pipette_tips.rst b/api/docs/v2/basic_commands/pipette_tips.rst
index c3fb7de569d..f4871540391 100644
--- a/api/docs/v2/basic_commands/pipette_tips.rst
+++ b/api/docs/v2/basic_commands/pipette_tips.rst
@@ -28,9 +28,9 @@ This simple statement works because the variable ``tiprack_1`` in the sample pro
If you omit the ``tip_rack`` argument from the ``pipette`` variable, the API will raise an error. You must pass in the tip rack's location to ``pick_up_tip`` like this::
- pipette.pick_up_tip(tiprack_1['A1'])
+ pipette.pick_up_tip(tiprack_1["A1"])
pipette.drop_tip()
- pipette.pick_up_tip(tiprack_1['B1'])
+ pipette.pick_up_tip(tiprack_1["B1"])
If coding the location of each tip seems inefficient or tedious, try using a ``for`` loop to automate a sequential tip pick up process. When using a loop, the API keeps track of tips and manages tip pickup for you. But ``pick_up_tip`` is still a powerful feature. It gives you direct control over tip use when that’s important in your protocol.
@@ -77,7 +77,7 @@ For a more advanced "real-world" example, review the :ref:`off-deck location pro
Dropping a Tip
==============
-To drop a tip in the trash bin, call the :py:meth:`~.InstrumentContext.drop_tip` method with no arguments::
+To drop a tip in the pipette's trash container, call the :py:meth:`~.InstrumentContext.drop_tip` method with no arguments::
pipette.pick_up_tip()
@@ -86,7 +86,7 @@ You can also specify where to drop the tip by passing in a location. For example
pipette.pick_up_tip() # picks up tip from rack location A1
pipette.drop_tip() # drops tip in trash bin
pipette.pick_up_tip() # picks up tip from rack location B1
- pipette.drop_tip(tiprack['A1']) # drops tip in rack location A1
+ pipette.drop_tip(tiprack["A1"]) # drops tip in rack location A1
.. versionadded:: 2.0
@@ -99,6 +99,13 @@ To return a tip to its original location, call the :py:meth:`~.InstrumentContext
pipette.return_tip()
+.. versionadded:: 2.0
+
+.. note::
+ You can't return tips with a pipette that's configured to use :ref:`partial tip pickup `. This restriction ensures that the pipette has clear access to unused tips. For example, a 96-channel pipette in column configuration can't reach column 2 unless column 1 is empty.
+
+ If you call ``return_tip()`` while using partial tip pickup, the API will raise an error. Use ``drop_tip()`` to dispose the tips instead.
+
Working With Used Tips
======================
@@ -108,7 +115,7 @@ Currently, the API considers tips as "used" after being picked up. For example,
pipette.return_tip() # drops tip in rack location A1
pipette.pick_up_tip() # picks up tip from rack location B1
pipette.drop_tip() # drops tip in trash bin
- pipette.pick_up_tip(tiprack_1['A1']) # picks up tip from rack location A1
+ pipette.pick_up_tip(tiprack_1["A1"]) # picks up tip from rack location A1
Early API versions treated returned tips as unused items. They could be picked up again without an explicit argument. For example::
diff --git a/api/docs/v2/basic_commands/utilities.rst b/api/docs/v2/basic_commands/utilities.rst
index 1117d0d0b97..b1593785d31 100644
--- a/api/docs/v2/basic_commands/utilities.rst
+++ b/api/docs/v2/basic_commands/utilities.rst
@@ -30,7 +30,7 @@ Pause Until Resumed
Call the :py:meth:`.ProtocolContext.pause` method to stop a protocol at a specific step. Unlike a delay, :py:meth:`~.ProtocolContext.pause` does not restart your protocol automatically. To resume, you'll respond to a prompt on the touchscreen or in the Opentrons App. This method also lets you specify an optional message that provides on-screen or in-app instructions on how to proceed. This example inserts a pause and includes a brief message::
- protocol.pause('Remember to get more pipette tips')
+ protocol.pause("Remember to get more pipette tips")
.. versionadded:: 2.0
@@ -47,12 +47,12 @@ To home the gantry, call :py:meth:`.ProtocolContext.home`::
To home a specific pipette's Z axis and plunger, call :py:meth:`.InstrumentContext.home`::
- pipette = protocol.load_instrument('flex_1channel_1000', 'right')
+ pipette = protocol.load_instrument("flex_1channel_1000", "right")
pipette.home()
To home a specific pipette's plunger only, you can call :py:meth:`.InstrumentContext.home_plunger`::
- pipette = protocol.load_instrument('flex_1channel_1000', 'right')
+ pipette = protocol.load_instrument("flex_1channel_1000", "right")
pipette.home_plunger()
.. versionadded:: 2.0
@@ -62,7 +62,7 @@ Comment
Call the :py:meth:`.ProtocolContext.comment` method if you want to write and display a brief message in the Opentrons App during a protocol run::
- protocol.comment('Hello, world!')
+ protocol.comment("Hello, world!")
.. versionadded:: 2.0
diff --git a/api/docs/v2/complex_commands/sources_destinations.rst b/api/docs/v2/complex_commands/sources_destinations.rst
index 597f20787ea..3c8e725a9aa 100644
--- a/api/docs/v2/complex_commands/sources_destinations.rst
+++ b/api/docs/v2/complex_commands/sources_destinations.rst
@@ -53,7 +53,7 @@ The following table summarizes the source and destination restrictions for each
- **Source:** Any number of wells.
- **Destination:** Exactly one well.
-A single well can be passed by itself or as a list with one item: ``source=plate['A1']`` and ``source=[plate['A1']]`` are equivalent.
+A single well can be passed by itself or as a list with one item: ``source=plate["A1"]`` and ``source=[plate["A1"]]`` are equivalent.
The section on :ref:`many-to-many transfers ` below covers how ``transfer()`` works when specifying sources and destinations of different sizes. However, if they don't meet the even divisibility requirement, the API will raise an error. You can work around such situations by making multiple calls to ``transfer()`` in sequence or by using a :ref:`list of volumes ` to skip certain wells.
@@ -103,7 +103,7 @@ Aspirating and Dispensing
See :ref:`complex-tip-refilling` below for cases where the total amount to be dispensed is greater than the capacity of the tip.
.. figure:: ../../img/complex_commands/robot_distribute.png
- :name: Transfer
+ :name: Distribute
:scale: 35%
:align: center
@@ -120,7 +120,7 @@ See :ref:`complex-tip-refilling` below for cases where the total amount to be di
See :ref:`complex-tip-refilling` below for cases where the total amount to be aspirated is greater than the capacity of the tip.
.. figure:: ../../img/complex_commands/robot_consolidate.png
- :name: Transfer
+ :name: Consolidate
:scale: 35%
:align: center
diff --git a/api/docs/v2/conf.py b/api/docs/v2/conf.py
index 29524982522..480cc2ed872 100644
--- a/api/docs/v2/conf.py
+++ b/api/docs/v2/conf.py
@@ -96,13 +96,10 @@
# setup the code block substitution extension to auto-update apiLevel
extensions += ['sphinx-prompt', 'sphinx_substitution_extensions']
-# get the max API level
-from opentrons.protocol_api import MAX_SUPPORTED_VERSION # noqa
-max_apiLevel = str(MAX_SUPPORTED_VERSION)
-
# use rst_prolog to hold the subsitution
+# update the apiLevel value whenever a new minor version is released
rst_prolog = f"""
-.. |apiLevel| replace:: {max_apiLevel}
+.. |apiLevel| replace:: 2.17
.. |release| replace:: {release}
"""
@@ -427,3 +424,25 @@
# -- Options for tabs -----------------------------------------------------
sphinx_tabs_disable_tab_closing = True
+
+# -- Suppress autodoc warnings --------------------------------------------
+
+# Ignore warnings for deliberately missing/undocumented things that appear
+# in automatically generated type signatures.
+#
+# The goal here is to pass through any warnings for bad targets of MANUALLY
+# created links.
+nitpick_ignore_regex = [
+ ("py:class", r".*Optional\[.*"), # any Optional with bad members
+ ("py:class", r".*commands\.types.*"),
+ ("py:class", r".*hardware_control.*"),
+ ("py:class", r".*legacy_broker.*"),
+ ("py:class", r".*protocol_api\.core.*"),
+ ("py:class", r".*api_support.*"),
+ ("py:class", r".*duration\.estimator.*"),
+ ("py:class", r".*protocols\.types.*"),
+ ("py:class", r".*protocol_api\.deck.*"),
+ ("py:class", r".*protocol_api\.config.*"),
+ ("py:class", r".*opentrons_shared_data.*"),
+ ("py:class", r'.*AbstractLabware|APIVersion|LabwareLike|LoadedCoreMap|ModuleTypes|NoneType|OffDeckType|ProtocolCore|WellCore'), # laundry list of not fully qualified things
+]
diff --git a/api/docs/v2/deck_slots.rst b/api/docs/v2/deck_slots.rst
index 83781227e3c..2c38e70755f 100644
--- a/api/docs/v2/deck_slots.rst
+++ b/api/docs/v2/deck_slots.rst
@@ -1,33 +1,31 @@
:og:description: How to specify deck slots in the Python Protocol API.
-..
- Allow concise cross-referencing to ProtocolContext.load_labware() et. al., without barfing out the whole import path.
-.. py:currentmodule:: opentrons.protocol_api
-
-
.. _deck-slots:
**********
Deck Slots
**********
-When you load an item onto the robot's deck, like with :py:obj:`ProtocolContext.load_labware()` or :py:obj:`ProtocolContext.load_module()`, you need to specify which slot to put it in. The API accepts values that correspond to the physical deck slot labels on an OT-2 or Flex robot.
+Deck slots are where you place hardware items on the deck surface of your Opentrons robot. In the API, you load the corresponding items into your protocol with methods like :py:obj:`.ProtocolContext.load_labware`, :py:obj:`.ProtocolContext.load_module`, or :py:obj:`.ProtocolContext.load_trash_bin`. When you call these methods, you need to specify which slot to load the item in.
Physical Deck Labels
====================
-The Opentrons Flex uses a coordinate labeling system for slots A1 (back left) through D3 (front right).
+Flex uses a coordinate labeling system for slots A1 (back left) through D4 (front right). Columns 1 through 3 are in the *working area* and are accessible by pipettes and the gripper. Column 4 is in the *staging area* and is only accessible by the gripper. For more information on staging area slots, see :ref:`deck-configuration` below.
+
+.. image:: ../img/flex-deck.svg
+ :width: 80%
-The Opentrons OT-2 uses a numeric labeling system for slots 1 (front left) through 11 (back center). The back right slot is occupied by the fixed trash.
+OT-2 uses a numeric labeling system for slots 1 (front left) through 11 (back center). The back right slot is occupied by the fixed trash.
-.. image:: ../img/Flex-and-OT-2-decks.svg
- :width: 100%
+.. image:: ../img/OT-2-deck.svg
+ :width: 55%
API Deck Labels
===============
-Specify a slot in either the Flex or OT-2 format:
+The API accepts values that correspond to the physical deck slot labels on a Flex or OT-2 robot. Specify a slot in either format:
* A coordinate like ``"A1"``. This format must be a string.
* A number like ``"10"`` or ``10``. This format can be a string or an integer.
@@ -83,3 +81,113 @@ The correspondence between deck labels is based on the relative locations of the
- 3
.. TODO staging slots and error handling of A4–D4 in OT-2 protocols
+
+Slots A4, B4, C4, and D4 on Flex have no equivalent on OT-2.
+
+.. _deck-configuration:
+
+Deck Configuration
+==================
+
+A Flex running robot system version 7.1.0 or higher lets you specify its deck configuration on the touchscreen or in the Opentrons App. This tells the robot the positions of unpowered *deck fixtures*: items that replace standard deck slots. The following table lists currently supported deck fixtures and their allowed deck locations.
+
+.. list-table::
+ :header-rows: 1
+
+ * - Fixture
+ - Slots
+ * - Staging area slots
+ - A3–D3
+ * - Trash bin
+ - A1–D1, A3-D3
+ * - Waste chute
+ - D3
+
+Which fixtures you need to configure depend on both load methods and the effects of other methods called in your protocol. The following sections explain how to configure each type of fixture.
+
+.. _configure-staging-area-slots:
+
+Staging Area Slots
+------------------
+
+Slots A4 through D4 are the staging area slots. Pipettes can't reach the staging area, but these slots are always available in the API for loading and moving labware. Using a slot in column 4 as the ``location`` argument of :py:meth:`~.ProtocolContext.load_labware` or the ``new_location`` argument of :py:meth:`.move_labware` will require the corresponding staging area slot in the robot's deck configuration::
+
+ plate_1 = protocol.load_labware(
+ load_name="corning_96_wellplate_360ul_flat", location="C3"
+ ) # no staging slots required
+ plate_2 = protocol.load_labware(
+ load_name="corning_96_wellplate_360ul_flat", location="D4"
+ ) # one staging slot required
+ protocol.move_labware(
+ labware=plate_1, new_location="C4"
+ ) # two staging slots required
+
+.. versionadded:: 2.16
+
+Since staging area slots also include a standard deck slot in column 3, they are physically incompatible with powered modules in the same row of column 3. For example, if you try to load a module in C3 and labware in C4, the API will raise an error::
+
+ temp_mod = protocol.load_module(
+ module_name="temperature module gen2",
+ location="C3"
+ )
+ staging_plate = protocol.load_labware(
+ load_name="corning_96_wellplate_360ul_flat", location="C4"
+ ) # deck conflict error
+
+It is possible to use slot D4 along with the waste chute. See the :ref:`Waste Chute ` section below for details.
+
+.. _configure-trash-bin:
+
+Trash Bin
+---------
+
+In version 2.15 of the API, Flex can only have a single trash bin in slot A3. You do not have to (and cannot) load the trash in version 2.15 protocols.
+
+Starting in API version 2.16, you must load trash bin fixtures in your protocol in order to use them. Use :py:meth:`.load_trash_bin` to load a movable trash bin. This example loads a single bin in the default location::
+
+ default_trash = protocol.load_trash_bin(location = "A3")
+
+.. versionadded:: 2.16
+
+.. note::
+ The :py:class:`.TrashBin` class doesn't have any callable methods, so you don't have to save the result of ``load_trash_bin()`` to a variable, especially if your protocol only loads a single trash container. Being able to reference the trash bin by name is useful when dealing with multiple trash containers.
+
+Call ``load_trash_bin()`` multiple times to add more than one bin. See :ref:`pipette-trash-containers` for more information on using pipettes with multiple trash bins.
+
+.. _configure-waste-chute:
+
+Waste Chute
+-----------
+
+The waste chute accepts various materials from Flex pipettes or the Flex Gripper and uses gravity to transport them outside of the robot for disposal. Pipettes can dispose of liquid or drop tips into the chute. The gripper can drop tip racks and other labware into the chute.
+
+To use the waste chute, first use :py:meth:`.load_waste_chute` to load it in slot D3::
+
+ chute = protocol.load_waste_chute()
+
+.. versionadded:: 2.16
+
+The ``load_waste_chute()`` method takes no arguments, since D3 is the only valid location for the chute. However, there are multiple variant configurations of the waste chute, depending on how other methods in your protocol use it.
+
+The waste chute is installed either on a standard deck plate adapter or on a deck plate adapter with a staging area. If any :py:meth:`~.ProtocolContext.load_labware` or :py:meth:`.move_labware` calls in your protocol reference slot D4, you have to use the deck plate adapter with staging area.
+
+The waste chute has a removable cover with a narrow opening which helps prevent aerosols and droplets from contaminating the working area. 1- and 8-channel pipettes can dispense liquid, blow out, or drop tips through the opening in the cover. Any of the following require you to remove the cover.
+
+ - :py:meth:`.dispense`, :py:meth:`.blow_out`, or :py:meth:`.drop_tip` with a 96-channel pipette.
+ - :py:meth:`.move_labware` with the chute as ``new_location`` and ``use_gripper=True``.
+
+If your protocol *does not* call any of these methods, your deck configuration should include the cover.
+
+In total, there are four possible deck configurations for the waste chute.
+ - Waste chute only
+ - Waste chute with cover
+ - Waste chute with staging area slot
+ - Waste chute with staging area slot and cover
+
+Deck Conflicts
+==============
+
+A deck conflict check occurs when preparing to run a Python protocol on a Flex running robot system version 7.1.0 or higher. The Opentrons App and touchscreen will prevent you from starting the protocol run until any conflicts are resolved. You can resolve them one of two ways:
+
+ - Physically move hardware around the deck, and update the deck configuration.
+ - Alter your protocol to work with the current deck configuration, and resend the protocol to your Flex.
diff --git a/api/docs/v2/example_protocols/dilution_tutorial.py b/api/docs/v2/example_protocols/dilution_tutorial.py
index 9ed16902095..a7d38c53eb4 100644
--- a/api/docs/v2/example_protocols/dilution_tutorial.py
+++ b/api/docs/v2/example_protocols/dilution_tutorial.py
@@ -1,24 +1,24 @@
from opentrons import protocol_api
metadata = {
- 'apiLevel': '2.15',
- 'protocolName': 'Serial Dilution Tutorial – OT-2 single-channel',
- 'description': '''This protocol is the outcome of following the
+ "apiLevel": "2.16",
+ "protocolName": "Serial Dilution Tutorial – OT-2 single-channel",
+ "description": """This protocol is the outcome of following the
Python Protocol API Tutorial located at
https://docs.opentrons.com/v2/tutorial.html. It takes a
solution and progressively dilutes it by transferring it
- stepwise across a plate.''',
- 'author': 'New API User'
+ stepwise across a plate.""",
+ "author": "New API User"
}
def run(protocol: protocol_api.ProtocolContext):
- tips = protocol.load_labware('opentrons_96_tiprack_300ul', 1)
- reservoir = protocol.load_labware('nest_12_reservoir_15ml', 2)
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 3)
- left_pipette = protocol.load_instrument('p300_single_gen2', 'left', tip_racks=[tips])
+ tips = protocol.load_labware("opentrons_96_tiprack_300ul", 1)
+ reservoir = protocol.load_labware("nest_12_reservoir_15ml", 2)
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", 3)
+ left_pipette = protocol.load_instrument("p300_single_gen2", "left", tip_racks=[tips])
# distribute diluent
- left_pipette.transfer(100, reservoir['A1'], plate.wells())
+ left_pipette.transfer(100, reservoir["A1"], plate.wells())
# loop through each row
for i in range(8):
@@ -27,7 +27,7 @@ def run(protocol: protocol_api.ProtocolContext):
row = plate.rows()[i]
# transfer solution to first well in column
- left_pipette.transfer(100, reservoir['A2'], row[0], mix_after=(3, 50))
+ left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50))
# dilute the sample down the row
left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50))
\ No newline at end of file
diff --git a/api/docs/v2/example_protocols/dilution_tutorial_flex.py b/api/docs/v2/example_protocols/dilution_tutorial_flex.py
index 04f03388db0..bc3cad10dd7 100644
--- a/api/docs/v2/example_protocols/dilution_tutorial_flex.py
+++ b/api/docs/v2/example_protocols/dilution_tutorial_flex.py
@@ -1,28 +1,29 @@
from opentrons import protocol_api
metadata = {
- 'protocolName': 'Serial Dilution Tutorial – Flex 1-channel',
- 'description': '''This protocol is the outcome of following the
+ "protocolName": "Serial Dilution Tutorial – Flex 1-channel",
+ "description": """This protocol is the outcome of following the
Python Protocol API Tutorial located at
https://docs.opentrons.com/v2/tutorial.html. It takes a
solution and progressively dilutes it by transferring it
- stepwise across a plate.''',
- 'author': 'New API User'
+ stepwise across a plate.""",
+ "author": "New API User"
}
requirements = {
- 'robotType': 'Flex',
- 'apiLevel': '2.15'
+ "robotType": "Flex",
+ "apiLevel": "2.16"
}
def run(protocol: protocol_api.ProtocolContext):
- tips = protocol.load_labware('opentrons_flex_96_tiprack_200ul', 'D1')
- reservoir = protocol.load_labware('nest_12_reservoir_15ml', 'D2')
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 'D3')
- left_pipette = protocol.load_instrument('flex_1channel_1000', 'left', tip_racks=[tips])
+ tips = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "D1")
+ reservoir = protocol.load_labware("nest_12_reservoir_15ml", "D2")
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D3")
+ trash = protocol.load_trash_bin("A3")
+ left_pipette = protocol.load_instrument("flex_1channel_1000", "left", tip_racks=[tips])
# distribute diluent
- left_pipette.transfer(100, reservoir['A1'], plate.wells())
+ left_pipette.transfer(100, reservoir["A1"], plate.wells())
# loop through each row
for i in range(8):
@@ -31,7 +32,7 @@ def run(protocol: protocol_api.ProtocolContext):
row = plate.rows()[i]
# transfer solution to first well in column
- left_pipette.transfer(100, reservoir['A2'], row[0], mix_after=(3, 50))
+ left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50))
# dilute the sample down the row
left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50))
\ No newline at end of file
diff --git a/api/docs/v2/example_protocols/dilution_tutorial_multi.py b/api/docs/v2/example_protocols/dilution_tutorial_multi.py
index dee28ff86ff..a121d345a58 100644
--- a/api/docs/v2/example_protocols/dilution_tutorial_multi.py
+++ b/api/docs/v2/example_protocols/dilution_tutorial_multi.py
@@ -1,24 +1,24 @@
from opentrons import protocol_api
metadata = {
- 'apiLevel': '2.15',
- 'protocolName': 'Serial Dilution Tutorial – OT-2 8-channel',
- 'description': '''This protocol is the outcome of following the
+ "apiLevel": "2.16",
+ "protocolName": "Serial Dilution Tutorial – OT-2 8-channel",
+ "description": """This protocol is the outcome of following the
Python Protocol API Tutorial located at
https://docs.opentrons.com/v2/tutorial.html. It takes a
solution and progressively dilutes it by transferring it
- stepwise across a plate.''',
- 'author': 'New API User'
+ stepwise across a plate.""",
+ "author": "New API User"
}
def run(protocol: protocol_api.ProtocolContext):
- tips = protocol.load_labware('opentrons_96_tiprack_300ul', 1)
- reservoir = protocol.load_labware('nest_12_reservoir_15ml', 2)
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 3)
- left_pipette = protocol.load_instrument('p300_multi_gen2', 'right', tip_racks=[tips])
+ tips = protocol.load_labware("opentrons_96_tiprack_300ul", 1)
+ reservoir = protocol.load_labware("nest_12_reservoir_15ml", 2)
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", 3)
+ left_pipette = protocol.load_instrument("p300_multi_gen2", "right", tip_racks=[tips])
# distribute diluent
- left_pipette.transfer(100, reservoir['A1'], plate.rows()[0])
+ left_pipette.transfer(100, reservoir["A1"], plate.rows()[0])
# no loop, 8-channel pipette
@@ -26,7 +26,7 @@ def run(protocol: protocol_api.ProtocolContext):
row = plate.rows()[0]
# transfer solution to first well in column
- left_pipette.transfer(100, reservoir['A2'], row[0], mix_after=(3, 50))
+ left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50))
# dilute the sample down the row
left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50))
\ No newline at end of file
diff --git a/api/docs/v2/example_protocols/dilution_tutorial_multi_flex.py b/api/docs/v2/example_protocols/dilution_tutorial_multi_flex.py
index 704eff20218..21f659db62c 100644
--- a/api/docs/v2/example_protocols/dilution_tutorial_multi_flex.py
+++ b/api/docs/v2/example_protocols/dilution_tutorial_multi_flex.py
@@ -1,36 +1,37 @@
from opentrons import protocol_api
metadata = {
- 'protocolName': 'Serial Dilution Tutorial – Flex 8-channel',
- 'description': '''This protocol is the outcome of following the
+ "protocolName": "Serial Dilution Tutorial – Flex 8-channel",
+ "description": """This protocol is the outcome of following the
Python Protocol API Tutorial located at
https://docs.opentrons.com/v2/tutorial.html. It takes a
solution and progressively dilutes it by transferring it
- stepwise across a plate.''',
- 'author': 'New API User'
+ stepwise across a plate.""",
+ "author": "New API User"
}
requirements = {
- 'robotType': 'Flex',
- 'apiLevel': '2.15'
+ "robotType": "Flex",
+ "apiLevel": "2.16"
}
def run(protocol: protocol_api.ProtocolContext):
- tips = protocol.load_labware('opentrons_96_tiprack_300ul', 'D1')
- reservoir = protocol.load_labware('nest_12_reservoir_15ml', 'D2')
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 'D3')
- left_pipette = protocol.load_instrument('flex_8channel_1000', 'right', tip_racks=[tips])
+ tips = protocol.load_labware("opentrons_96_tiprack_300ul", "D1")
+ reservoir = protocol.load_labware("nest_12_reservoir_15ml", "D2")
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D3")
+ trash = protocol.load_trash_bin("A3")
+ left_pipette = protocol.load_instrument("flex_8channel_1000", "right", tip_racks=[tips])
- # distribute diluent
- left_pipette.transfer(100, reservoir['A1'], plate.rows()[0])
+ # distribute diluent
+ left_pipette.transfer(100, reservoir["A1"], plate.rows()[0])
- # no loop, 8-channel pipette
+ # no loop, 8-channel pipette
- # save the destination row to a variable
- row = plate.rows()[0]
+ # save the destination row to a variable
+ row = plate.rows()[0]
- # transfer solution to first well in column
- left_pipette.transfer(100, reservoir['A2'], row[0], mix_after=(3, 50))
+ # transfer solution to first well in column
+ left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50))
- # dilute the sample down the row
- left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50))
\ No newline at end of file
+ # dilute the sample down the row
+ left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50))
\ No newline at end of file
diff --git a/api/docs/v2/index.rst b/api/docs/v2/index.rst
index c64af1c1082..5e29296241d 100644
--- a/api/docs/v2/index.rst
+++ b/api/docs/v2/index.rst
@@ -36,7 +36,7 @@ Getting Started
If you want to **dive right into code**, take a look at our :ref:`new-examples` and the comprehensive :ref:`protocol-api-reference`.
-When you're ready to **try out a protocol**, download the `Opentrons App `_, import the protocol file, and run it on your robot.
+When you're ready to **try out a protocol**, download the `Opentrons App `__, import the protocol file, and run it on your robot.
.. _overview-section-v2:
@@ -81,6 +81,7 @@ For example, if we wanted to transfer liquid from well A1 to well B1 on a plate,
tiprack = protocol.load_labware(
"opentrons_flex_96_tiprack_200ul", location="D2"
)
+ trash = protocol.load_trash_bin(location="A3")
# pipettes
left_pipette = protocol.load_instrument(
@@ -123,7 +124,7 @@ For example, if we wanted to transfer liquid from well A1 to well B1 on a plate,
}
# requirements
- requirements = {"robotType": "OT-2", "apiLevel": "2.14"}
+ requirements = {"robotType": "OT-2", "apiLevel": "|apiLevel|"}
# protocol run function
def run(protocol: protocol_api.ProtocolContext):
@@ -149,7 +150,7 @@ For example, if we wanted to transfer liquid from well A1 to well B1 on a plate,
This example proceeds completely linearly. Following it line-by-line, you can see that it has the following effects:
1. Gives the name, contact information, and a brief description for the protocol.
- 2. Indicates the protocol should run on an OT-2 robot, using API version 2.14.
+ 2. Indicates the protocol should run on an OT-2 robot, using API version |apiLevel|.
3. Tells the robot that there is:
a. A 96-well flat plate in slot 1.
b. A rack of 300 µL tips in slot 2.
@@ -170,17 +171,17 @@ More Resources
Opentrons App
+++++++++++++
-The `Opentrons App `_ is the easiest way to run your Python protocols. The app `supports `_ the latest versions of macOS, Windows, and Ubuntu.
+The `Opentrons App `__ is the easiest way to run your Python protocols. The app runs on the latest versions of macOS, Windows, and Ubuntu.
Support
+++++++
-Questions about setting up your robot, using Opentrons software, or troubleshooting? Check out our `support articles `_ or `get in touch directly `_ with Opentrons Support.
+Questions about setting up your robot, using Opentrons software, or troubleshooting? Check out our `support articles `_ or `contact Opentrons Support directly `_.
Custom Protocol Service
+++++++++++++++++++++++
-Don't have the time or resources to write your own protocols? The `Opentrons Custom Protocols `_ service can get you set up in as little as a week.
+Don't have the time or resources to write your own protocols? Our `custom protocol development service `_ can get you set up in two weeks.
Contributing
++++++++++++
diff --git a/api/docs/v2/modules/heater_shaker.rst b/api/docs/v2/modules/heater_shaker.rst
index f7d1f1c2081..b56d0dcfb28 100644
--- a/api/docs/v2/modules/heater_shaker.rst
+++ b/api/docs/v2/modules/heater_shaker.rst
@@ -101,8 +101,8 @@ You can use these standalone adapter definitions to load Opentrons verified or c
For example, these commands load a well plate on top of the flat bottom adapter::
- hs_adapter = hs_mod.load_adapter('opentrons_96_flat_bottom_adapter')
- hs_plate = hs_adapter.load_labware('nest_96_wellplate_200ul_flat')
+ hs_adapter = hs_mod.load_adapter("opentrons_96_flat_bottom_adapter")
+ hs_plate = hs_adapter.load_labware("nest_96_wellplate_200ul_flat")
.. versionadded:: 2.15
The ``load_adapter()`` method.
@@ -183,8 +183,8 @@ To pipette while the Heater-Shaker is heating, use :py:meth:`~.HeaterShakerConte
hs_mod.set_target_temperature(75)
pipette.pick_up_tip()
- pipette.aspirate(50, plate['A1'])
- pipette.dispense(50, plate['B1'])
+ pipette.aspirate(50, plate["A1"])
+ pipette.dispense(50, plate["B1"])
pipette.drop_tip()
hs_mod.wait_for_temperature()
protocol.delay(minutes=1)
@@ -199,8 +199,8 @@ Additionally, if you want to pipette while the module holds a temperature for a
hs_mod.set_and_wait_for_temperature(75)
start_time = time.monotonic() # set reference time
pipette.pick_up_tip()
- pipette.aspirate(50, plate['A1'])
- pipette.dispense(50, plate['B1'])
+ pipette.aspirate(50, plate["A1"])
+ pipette.dispense(50, plate["B1"])
pipette.drop_tip()
# delay for the difference between now and 60 seconds after the reference time
protocol.delay(max(0, start_time+60 - time.monotonic()))
diff --git a/api/docs/v2/modules/magnetic_module.rst b/api/docs/v2/modules/magnetic_module.rst
index db12df7b58c..ae293811927 100644
--- a/api/docs/v2/modules/magnetic_module.rst
+++ b/api/docs/v2/modules/magnetic_module.rst
@@ -19,10 +19,10 @@ The examples in this section apply to an OT-2 with a Magnetic Module GEN2 loaded
def run(protocol: protocol_api.ProtocolContext):
mag_mod = protocol.load_module(
- module_name='magnetic module gen2',
- location='6')
+ module_name="magnetic module gen2",
+ location="6")
plate = mag_mod.load_labware(
- name='nest_96_wellplate_100ul_pcr_full_skirt')
+ name="nest_96_wellplate_100ul_pcr_full_skirt")
.. versionadded:: 2.3
diff --git a/api/docs/v2/modules/multiple_same_type.rst b/api/docs/v2/modules/multiple_same_type.rst
index ce44065487a..386f8d7f281 100644
--- a/api/docs/v2/modules/multiple_same_type.rst
+++ b/api/docs/v2/modules/multiple_same_type.rst
@@ -28,12 +28,12 @@ When working with multiple modules of the same type, load them in your protocol
def run(protocol: protocol_api.ProtocolContext):
# Load Temperature Module 1 in deck slot D1 on USB port 2
temperature_module_1 = protocol.load_module(
- module_name='temperature module gen2',
+ module_name="temperature module gen2",
location="D1")
# Load Temperature Module 2 in deck slot C1 on USB port 6
temperature_module_2 = protocol.load_module(
- module_name='temperature module gen2',
+ module_name="temperature module gen2",
location="C1")
The Temperature Modules are connected as shown here:
@@ -46,21 +46,23 @@ When working with multiple modules of the same type, load them in your protocol
In this example, ``temperature_module_1`` loads first because it's connected to USB port 1. ``temperature_module_2`` loads next because it's connected to USB port 3.
.. code-block:: python
+ :substitutions:
- from opentrons import protocol_api
+ from opentrons import protocol_api
- metadata = { 'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
- def run(protocol: protocol_api.ProtocolContext):
- # Load Temperature Module 1 in deck slot C1 on USB port 1
- temperature_module_1 = protocol.load_module(
- load_name='temperature module gen2',
- location="1")
- # Load Temperature Module 2 in deck slot D3 on USB port 2
- temperature_module_2 = protocol.load_module(
- load_name='temperature module gen2',
- location="3")
+ def run(protocol: protocol_api.ProtocolContext):
+ # Load Temperature Module 1 in deck slot C1 on USB port 1
+ temperature_module_1 = protocol.load_module(
+ load_name="temperature module gen2", location="1"
+ )
+
+ # Load Temperature Module 2 in deck slot D3 on USB port 2
+ temperature_module_2 = protocol.load_module(
+ load_name="temperature module gen2", location="3"
+ )
The Temperature Modules are connected as shown here:
diff --git a/api/docs/v2/modules/setup.rst b/api/docs/v2/modules/setup.rst
index b538ceeff3a..c6badd82954 100644
--- a/api/docs/v2/modules/setup.rst
+++ b/api/docs/v2/modules/setup.rst
@@ -22,16 +22,16 @@ Use :py:meth:`.ProtocolContext.load_module` to load a module.
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel': '|apiLevel|'}
+ requirements = {"robotType": "Flex", "apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
# Load a Heater-Shaker Module GEN1 in deck slot D1.
heater_shaker = protocol.load_module(
- module_name='heaterShakerModuleV1', location='D1')
+ module_name="heaterShakerModuleV1", location="D1")
# Load a Temperature Module GEN2 in deck slot D3.
temperature_module = protocol.load_module(
- module_name='temperature module gen2', location='D3')
+ module_name="temperature module gen2", location="D3")
After the ``load_module()`` method loads the modules into your protocol, it returns the :py:class:`~opentrons.protocol_api.HeaterShakerContext` and :py:class:`~opentrons.protocol_api.TemperatureModuleContext` objects.
@@ -42,16 +42,16 @@ Use :py:meth:`.ProtocolContext.load_module` to load a module.
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
# Load a Magnetic Module GEN2 in deck slot 1.
magnetic_module = protocol.load_module(
- module_name='magnetic module gen2', location=1)
+ module_name="magnetic module gen2", location=1)
# Load a Temperature Module GEN1 in deck slot 3.
temperature_module = protocol.load_module(
- module_name='temperature module', location=3)
+ module_name="temperature module", location=3)
After the ``load_module()`` method loads the modules into your protocol, it returns the :py:class:`~opentrons.protocol_api.MagneticModuleContext` and :py:class:`~opentrons.protocol_api.TemperatureModuleContext` objects.
diff --git a/api/docs/v2/modules/temperature_module.rst b/api/docs/v2/modules/temperature_module.rst
index 60e02879795..5debe628a95 100644
--- a/api/docs/v2/modules/temperature_module.rst
+++ b/api/docs/v2/modules/temperature_module.rst
@@ -43,10 +43,10 @@ You can use these standalone adapter definitions to load Opentrons verified or c
For example, these commands load a PCR plate on top of the 96-well block::
temp_adapter = temp_mod.load_adapter(
- 'opentrons_96_well_aluminum_block'
+ "opentrons_96_well_aluminum_block"
)
temp_plate = temp_adapter.load_labware(
- 'nest_96_wellplate_100ul_pcr_full_skirt'
+ "nest_96_wellplate_100ul_pcr_full_skirt"
)
.. versionadded:: 2.15
@@ -81,7 +81,7 @@ You can use these combination labware definitions to load various types of tubes
For example, this command loads the 24-well block with generic 2 mL tubes::
temp_tubes = temp_mod.load_labware(
- 'opentrons_24_aluminumblock_generic_2ml_screwcap'
+ "opentrons_24_aluminumblock_generic_2ml_screwcap"
)
.. versionadded:: 2.0
@@ -137,9 +137,9 @@ If you need to confirm in software whether the Temperature Module is holding at
.. code-block:: python
temp_mod.set_temperature(celsius=90)
- temp_mod.status # 'holding at target'
+ temp_mod.status # "holding at target"
temp_mod.deactivate()
- temp_mod.status # 'idle'
+ temp_mod.status # "idle"
If you don't need to use the status value in your code, and you have physical access to the module, you can read its status and temperature from the LED and display on the module.
diff --git a/api/docs/v2/modules/thermocycler.rst b/api/docs/v2/modules/thermocycler.rst
index ca8fcd772b8..9322e0a96f0 100644
--- a/api/docs/v2/modules/thermocycler.rst
+++ b/api/docs/v2/modules/thermocycler.rst
@@ -14,8 +14,8 @@ The examples in this section will use a Thermocycler Module GEN2 loaded as follo
.. code-block:: python
- tc_mod = protocol.load_module(module_name='thermocyclerModuleV2')
- plate = tc_mod.load_labware(name='nest_96_wellplate_100ul_pcr_full_skirt')
+ tc_mod = protocol.load_module(module_name="thermocyclerModuleV2")
+ plate = tc_mod.load_labware(name="nest_96_wellplate_100ul_pcr_full_skirt")
.. versionadded:: 2.13
@@ -105,8 +105,8 @@ For example, this profile commands the Thermocycler to reach 10 °C and hold for
.. code-block:: python
profile = [
- {'temperature':10, 'hold_time_seconds':30},
- {'temperature':60, 'hold_time_seconds':45}
+ {"temperature":10, "hold_time_seconds":30},
+ {"temperature":60, "hold_time_seconds":45}
]
Once you have written the steps of your profile, execute it with :py:meth:`~.ThermocyclerContext.execute_profile`. This function executes your profile steps multiple times depending on the ``repetitions`` parameter. It also takes a ``block_max_volume`` parameter, which is the same as that of the :py:meth:`~.ThermocyclerContext.set_block_temperature` function.
@@ -116,9 +116,9 @@ For instance, a PCR prep protocol might define and execute a profile like this:
.. code-block:: python
profile = [
- {'temperature':95, 'hold_time_seconds':30},
- {'temperature':57, 'hold_time_seconds':30},
- {'temperature':72, 'hold_time_seconds':60}
+ {"temperature":95, "hold_time_seconds":30},
+ {"temperature":57, "hold_time_seconds":30},
+ {"temperature":72, "hold_time_seconds":60}
]
tc_mod.execute_profile(steps=profile, repetitions=20, block_max_volume=32)
diff --git a/api/docs/v2/moving_labware.rst b/api/docs/v2/moving_labware.rst
index a1d94449132..7dc67f1921a 100644
--- a/api/docs/v2/moving_labware.rst
+++ b/api/docs/v2/moving_labware.rst
@@ -17,8 +17,8 @@ Use the :py:meth:`.ProtocolContext.move_labware` method to initiate a move, rega
:substitutions:
def run(protocol: protocol_api.ProtocolContext):
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 'D1')
- protocol.move_labware(labware=plate, new_location='D2')
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D1")
+ protocol.move_labware(labware=plate, new_location="D2")
.. versionadded:: 2.15
@@ -26,11 +26,12 @@ The required arguments of ``move_labware()`` are the ``labware`` you want to mov
When the move step is complete, the API updates the labware's location, so you can move the plate multiple times::
- protocol.move_labware(labware=plate, new_location='D2')
- protocol.move_labware(labware=plate, new_location='D3')
+ protocol.move_labware(labware=plate, new_location="D2")
+ protocol.move_labware(labware=plate, new_location="D3")
For the first move, the API knows to find the plate in its initial load location, slot D1. For the second move, the API knows to find the plate in D2.
+.. _automatic-manual-moves:
Automatic vs Manual Moves
=========================
@@ -45,23 +46,23 @@ The ``use_gripper`` parameter of :py:meth:`~.ProtocolContext.move_labware` deter
.. code-block:: python
def run(protocol: protocol_api.ProtocolContext):
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 'D1')
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D1")
# have the gripper move the plate from D1 to D2
- protocol.move_labware(labware=plate, new_location='D2', use_gripper=True)
+ protocol.move_labware(labware=plate, new_location="D2", use_gripper=True)
# pause to move the plate manually from D2 to D3
- protocol.move_labware(labware=plate, new_location='D3', use_gripper=False)
+ protocol.move_labware(labware=plate, new_location="D3", use_gripper=False)
# pause to move the plate manually from D3 to C1
- protocol.move_labware(labware=plate, new_location='C1')
+ protocol.move_labware(labware=plate, new_location="C1")
.. versionadded:: 2.15
.. note::
Don't add a ``pause()`` command before ``move_labware()``. When ``use_gripper`` is unset or ``False``, the protocol pauses when it reaches the movement step. The Opentrons App or the touchscreen on Flex shows an animation of the labware movement that you need to perform manually. The protocol only resumes when you press **Confirm and resume**.
-The above example is a complete and valid ``run()`` function. You don't have to load the gripper as an instrument, and there is no ``InstrumentContext`` for the gripper. All you have to do to specify that a protocol requires the gripper is to include at least one ``move_labware()`` command with ``use_labware=True``.
+The above example is a complete and valid ``run()`` function. You don't have to load the gripper as an instrument, and there is no ``InstrumentContext`` for the gripper. All you have to do to specify that a protocol requires the gripper is to include at least one ``move_labware()`` command with ``use_gripper=True``.
If you attempt to use the gripper to move labware in an OT-2 protocol, the API will raise an error.
@@ -129,6 +130,22 @@ Also note the ``hs_mod.open_labware_latch()`` command in the above example. To m
If the labware is inaccessible, the API will raise an error.
+Movement into the Waste Chute
+=============================
+
+Move used tip racks and well plates to the waste chute to dispose of them. This requires you to first :ref:`configure the waste chute ` in your protocol. Then use the loaded :py:class:`.WasteChute` object as the value of ``new_location``::
+
+ chute = protocol.load_waste_chute()
+ protocol.move_labware(
+ labware=plate, new_location=chute, use_gripper=True
+ )
+
+.. versionadded:: 2.16
+
+This will pick up ``plate`` from its current location and drop it into the chute.
+
+Always specify ``use_gripper=True`` when moving labware into the waste chute. The chute is not designed for manual movement. You can still manually move labware to other locations, including off-deck, with the chute installed.
+
.. _off-deck-location:
The Off-Deck Location
@@ -144,31 +161,34 @@ Remove labware from the deck to perform tasks like retrieving samples or discard
Moving labware off-deck always requires user intervention, because the gripper can't reach outside of the robot. Omit the ``use_gripper`` parameter or explicitly set it to ``False``. If you try to move labware off-deck with ``use_gripper=True``, the API will raise an error.
-You can also load labware off-deck, in preparation for a ``move_labware()`` command that brings it `onto` the deck. For example, you could assign two tip racks to a pipette — one on-deck, and one off-deck — and then swap out the first rack for the second one::
-
- from opentrons import protocol_api
-
- metadata = {"apiLevel": "2.15", "protocolName": "Tip rack replacement"}
- requirements = {"robotType": "OT-2"}
-
-
- def run(protocol: protocol_api.ProtocolContext):
- tips1 = protocol.load_labware("opentrons_96_tiprack_1000ul", 1)
- # load another tip rack but don't put it in a slot yet
- tips2 = protocol.load_labware(
- "opentrons_96_tiprack_1000ul", protocol_api.OFF_DECK
- )
- pipette = protocol.load_instrument(
- "p1000_single_gen2", "left", tip_racks=[tips1, tips2]
- )
- # use all the on-deck tips
- for i in range(96):
+You can also load labware off-deck, in preparation for a ``move_labware()`` command that brings it *onto* the deck. For example, you could assign two tip racks to a pipette — one on-deck, and one off-deck — and then swap out the first rack for the second one:
+
+ .. code-block:: python
+ :substitutions:
+
+ from opentrons import protocol_api
+
+ metadata = {"apiLevel": "|apiLevel|", "protocolName": "Tip rack replacement"}
+ requirements = {"robotType": "OT-2"}
+
+
+ def run(protocol: protocol_api.ProtocolContext):
+ tips1 = protocol.load_labware("opentrons_96_tiprack_1000ul", 1)
+ # load another tip rack but don't put it in a slot yet
+ tips2 = protocol.load_labware(
+ "opentrons_96_tiprack_1000ul", protocol_api.OFF_DECK
+ )
+ pipette = protocol.load_instrument(
+ "p1000_single_gen2", "left", tip_racks=[tips1, tips2]
+ )
+ # use all the on-deck tips
+ for i in range(96):
+ pipette.pick_up_tip()
+ pipette.drop_tip()
+ # pause to move the spent tip rack off-deck
+ protocol.move_labware(labware=tips1, new_location=protocol_api.OFF_DECK)
+ # pause to move the fresh tip rack on-deck
+ protocol.move_labware(labware=tips2, new_location=1)
pipette.pick_up_tip()
- pipette.drop_tip()
- # pause to move the spent tip rack off-deck
- protocol.move_labware(labware=tips1, new_location=protocol_api.OFF_DECK)
- # pause to move the fresh tip rack on-deck
- protocol.move_labware(labware=tips2, new_location=1)
- pipette.pick_up_tip()
Using the off-deck location to remove or replace labware lets you continue your workflow in a single protocol, rather than needing to end a protocol, reset the deck, and start a new protocol run.
diff --git a/api/docs/v2/new_advanced_running.rst b/api/docs/v2/new_advanced_running.rst
index 6af07555148..5a867c0d172 100644
--- a/api/docs/v2/new_advanced_running.rst
+++ b/api/docs/v2/new_advanced_running.rst
@@ -32,7 +32,7 @@ Rather than writing a ``run`` function and embedding commands within it, start
:substitutions:
import opentrons.execute
- protocol = opentrons.execute.get_protocol_api('|apiLevel|')
+ protocol = opentrons.execute.get_protocol_api("|apiLevel|")
protocol.home()
The first command you execute should always be :py:meth:`~opentrons.protocol_api.ProtocolContext.home`. If you try to execute other commands first, you will get a ``MustHomeError``. (When running protocols through the Opentrons App, the robot homes automatically.)
@@ -57,7 +57,7 @@ Since a typical protocol only `defines` the ``run`` function but doesn't `call`
.. code-block:: python
:substitutions:
- protocol = opentrons.execute.get_protocol_api('|apiLevel|')
+ protocol = opentrons.execute.get_protocol_api("|apiLevel|")
run(protocol) # your protocol will now run
.. _using_lpc:
@@ -145,6 +145,7 @@ Command Line
------------
.. TODO update with separate links to OT-2 and Flex setup, when new Flex process is in manual or on help site
+
The robot's command line is accessible either by going to **New > Terminal** in Jupyter or `via SSH `_.
To execute a protocol from the robot's command line, copy the protocol file to the robot with ``scp`` and then run the protocol with ``opentrons_execute``:
diff --git a/api/docs/v2/new_examples.rst b/api/docs/v2/new_examples.rst
index c68a39fcbf0..85b4f75ea41 100644
--- a/api/docs/v2/new_examples.rst
+++ b/api/docs/v2/new_examples.rst
@@ -15,13 +15,13 @@ These sample protocols are designed for anyone using an Opentrons Flex or OT-2 l
# This code uses named arguments
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D2")
# This code uses positional arguments
- tiprack_1 = protocol.load_labware('opentrons_flex_96_tiprack_200ul','D2')
+ tiprack_1 = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "D2")
-Both examples instantiate the variable ``tiprack_1`` with a Flex tip rack, but the former is more explicit. It shows the parameter name and its value together (e.g. ``location='D2'``), which may be helpful when you're unsure about what's going on in a protocol code sample.
+Both examples instantiate the variable ``tiprack_1`` with a Flex tip rack, but the former is more explicit. It shows the parameter name and its value together (e.g. ``location="D2"``), which may be helpful when you're unsure about what's going on in a protocol code sample.
Python developers with more experience should feel free to ignore the code styling used here and work with these examples as you like.
@@ -91,6 +91,8 @@ This code only loads the instruments and labware listed above, and performs no o
reservoir = protocol.load_labware(
load_name="usascientific_12_reservoir_22ml", location="D1"
)
+ # load trash bin in deck slot A3
+ trash = protocol.load_trash_bin(location="A3")
# Put protocol commands here
.. tab:: OT-2
@@ -100,7 +102,7 @@ This code only loads the instruments and labware listed above, and performs no o
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
# load tip rack in deck slot 3
@@ -142,24 +144,25 @@ This protocol uses some :ref:`building block commands ` to t
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel':'|apiLevel|'}
+ requirements = {"robotType": "Flex", "apiLevel":"|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1")
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
- pipette_1 = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D2")
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
tip_racks=[tiprack_1])
- pipette_1.pick_up_tip()
- pipette_1.aspirate(100, plate['A1'])
- pipette_1.dispense(100, plate['B1'])
- pipette_1.drop_tip()
+ pipette.pick_up_tip()
+ pipette.aspirate(100, plate["A1"])
+ pipette.dispense(100, plate["B1"])
+ pipette.drop_tip()
.. tab:: OT-2
@@ -168,29 +171,29 @@ This protocol uses some :ref:`building block commands ` to t
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
+ load_name="corning_96_wellplate_360ul_flat",
location=1)
tiprack_1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=2)
p300 = protocol.load_instrument(
- instrument_name='p300_single',
- mount='left',
+ instrument_name="p300_single",
+ mount="left",
tip_racks=[tiprack_1])
p300.pick_up_tip()
- p300.aspirate(100, plate['A1'])
- p300.dispense(100, plate['B1'])
+ p300.aspirate(100, plate["A1"])
+ p300.dispense(100, plate["B1"])
p300.drop_tip()
Advanced Method
---------------
-This protocol accomplishes the same thing as the previous example, but does it a little more efficiently. Notice how it uses the :py:meth:`.InstrumentContext.transfer` method to move liquid between well plates. The source and destination well arguments (e.g., ``plate['A1'], plate['B1']``) are part of ``transfer()`` method parameters. You don't need separate calls to ``aspirate`` or ``dispense`` here.
+This protocol accomplishes the same thing as the previous example, but does it a little more efficiently. Notice how it uses the :py:meth:`.InstrumentContext.transfer` method to move liquid between well plates. The source and destination well arguments (e.g., ``plate["A1"], plate["B1"]``) are part of ``transfer()`` method parameters. You don't need separate calls to ``aspirate`` or ``dispense`` here.
.. tabs::
@@ -201,21 +204,22 @@ This protocol accomplishes the same thing as the previous example, but does it a
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel': '|apiLevel|'}
+ requirements = {"robotType": "Flex", "apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1")
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
- pipette_1 = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D2")
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
tip_racks=[tiprack_1])
# transfer 100 µL from well A1 to well B1
- pipette_1.transfer(100, plate['A1'], plate['B1'])
+ pipette.transfer(100, plate["A1"], plate["B1"])
.. tab:: OT-2
@@ -224,21 +228,21 @@ This protocol accomplishes the same thing as the previous example, but does it a
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
+ load_name="corning_96_wellplate_360ul_flat",
location=1)
tiprack_1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=2)
p300 = protocol.load_instrument(
- instrument_name='p300_single',
- mount='left',
+ instrument_name="p300_single",
+ mount="left",
tip_racks=[tiprack_1])
# transfer 100 µL from well A1 to well B1
- p300.transfer(100, plate['A1'], plate['B1'])
+ p300.transfer(100, plate["A1"], plate["B1"])
Loops
@@ -257,21 +261,22 @@ When used in a protocol, loops automate repetitive steps such as aspirating and
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel':'|apiLevel|'}
+ requirements = {"robotType": "Flex", "apiLevel":"|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1")
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D2")
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
- location='D3')
- pipette_1 = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
+ load_name="usascientific_12_reservoir_22ml",
+ location="D3")
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
tip_racks=[tiprack_1])
# distribute 20 µL from reservoir:A1 -> plate:row:1
@@ -279,7 +284,7 @@ When used in a protocol, loops automate repetitive steps such as aspirating and
# etc...
# range() starts at 0 and stops before 8, creating a range of 0-7
for i in range(8):
- pipette_1.distribute(200, reservoir.wells()[i], plate.rows()[i])
+ pipette.distribute(200, reservoir.wells()[i], plate.rows()[i])
.. tab:: OT-2
@@ -288,21 +293,21 @@ When used in a protocol, loops automate repetitive steps such as aspirating and
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
+ load_name="corning_96_wellplate_360ul_flat",
location=1)
tiprack_1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=2)
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
+ load_name="usascientific_12_reservoir_22ml",
location=4)
p300 = protocol.load_instrument(
- instrument_name='p300_single',
- mount='left',
+ instrument_name="p300_single",
+ mount="left",
tip_racks=[tiprack_1])
# distribute 20 µL from reservoir:A1 -> plate:row:1
@@ -328,33 +333,34 @@ Opentrons electronic pipettes can do some things that a human cannot do with a p
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel':'|apiLevel|'}
+ requirements = {"robotType": "Flex", "apiLevel":"|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1")
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
+ load_name="opentrons_flex_96_tiprack_1000ul",
+ location="D2")
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
- location='D3')
- pipette_1 = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
+ load_name="usascientific_12_reservoir_22ml",
+ location="D3")
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
tip_racks=[tiprack_1])
- pipette_1.pick_up_tip()
+ pipette.pick_up_tip()
# aspirate from the first 5 wells
- for well in reservoir.wells()[:4]:
- pipette_1.aspirate(volume=35, location=well)
- pipette_1.air_gap(10)
-
- pipette_1.dispense(225, plate['A1'])
+ for well in reservoir.wells()[:5]:
+ pipette.aspirate(volume=35, location=well)
+ pipette.air_gap(10)
- pipette_1.return_tip()
+ pipette.dispense(225, plate["A1"])
+
+ pipette.return_tip()
.. tab:: OT-2
@@ -363,31 +369,31 @@ Opentrons electronic pipettes can do some things that a human cannot do with a p
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
+ load_name="corning_96_wellplate_360ul_flat",
location=1)
tiprack_1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=2)
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
+ load_name="usascientific_12_reservoir_22ml",
location=3)
p300 = protocol.load_instrument(
- instrument_name='p300_single',
- mount='right',
+ instrument_name="p300_single",
+ mount="right",
tip_racks=[tiprack_1])
p300.pick_up_tip()
# aspirate from the first 5 wells
- for well in reservoir.wells()[:4]:
+ for well in reservoir.wells()[:5]:
p300.aspirate(volume=35, location=well)
p300.air_gap(10)
- p300.dispense(225, plate['A1'])
+ p300.dispense(225, plate["A1"])
p300.return_tip()
@@ -407,27 +413,28 @@ This protocol dispenses diluent to all wells of a Corning 96-well plate. Next, i
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel': '|apiLevel|'}
+ requirements = {"robotType": "Flex", "apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1")
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D2")
tiprack_2 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D3')
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D3")
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
- location='C1')
- pipette_1 = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
+ load_name="usascientific_12_reservoir_22ml",
+ location="C1")
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
tip_racks=[tiprack_1, tiprack_2])
# Dispense diluent
- pipette_1.distribute(50, reservoir['A12'], plate.wells())
+ pipette.distribute(50, reservoir["A12"], plate.wells())
# loop through each row
for i in range(8):
@@ -436,10 +443,10 @@ This protocol dispenses diluent to all wells of a Corning 96-well plate. Next, i
row = plate.rows()[i]
# transfer 30 µL of source to first well in column
- pipette_1.transfer(30, source, row[0], mix_after=(3, 25))
+ pipette.transfer(30, source, row[0], mix_after=(3, 25))
# dilute the sample down the column
- pipette_1.transfer(
+ pipette.transfer(
30, row[:11], row[1:],
mix_after=(3, 25))
@@ -450,27 +457,27 @@ This protocol dispenses diluent to all wells of a Corning 96-well plate. Next, i
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
+ load_name="corning_96_wellplate_360ul_flat",
location=1)
tiprack_1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=2)
tiprack_2 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=3)
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
+ load_name="usascientific_12_reservoir_22ml",
location=4)
p300 = protocol.load_instrument(
- instrument_name='p300_single',
- mount='right',
+ instrument_name="p300_single",
+ mount="right",
tip_racks=[tiprack_1, tiprack_2])
# Dispense diluent
- p300.distribute(50, reservoir['A12'], plate.wells())
+ p300.distribute(50, reservoir["A12"], plate.wells())
# loop through each row
for i in range(8):
@@ -503,24 +510,25 @@ This protocol dispenses different volumes of liquids to a well plate and automat
from opentrons import protocol_api
- requirements = {'robotType': 'Flex', 'apiLevel': '2.15'}
+ requirements = {"robotType": "Flex", "apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1")
tiprack_1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D2')
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D2")
tiprack_2 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul',
- location='D3')
+ load_name="opentrons_flex_96_tiprack_200ul",
+ location="D3")
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
- location='C1')
- pipette_1 = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='right',
+ load_name="usascientific_12_reservoir_22ml",
+ location="C1")
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="right",
tip_racks=[tiprack_1, tiprack_2])
# Volume amounts are for demonstration purposes only
@@ -539,7 +547,7 @@ This protocol dispenses different volumes of liquids to a well plate and automat
89, 90, 91, 92, 93, 94, 95, 96
]
- pipette_1.distribute(water_volumes, reservoir['A12'], plate.wells())
+ pipette.distribute(water_volumes, reservoir["A12"], plate.wells())
.. tab:: OT-2
@@ -547,24 +555,24 @@ This protocol dispenses different volumes of liquids to a well plate and automat
:substitutions:
from opentrons import protocol_api
- metadata = {'apiLevel': '2.14'}
+ metadata = {"apiLevel": "|apiLevel|"}
def run(protocol: protocol_api.ProtocolContext):
plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
+ load_name="corning_96_wellplate_360ul_flat",
location=1)
tiprack_1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=2)
tiprack_2 = protocol.load_labware(
- load_name='opentrons_96_tiprack_300ul',
+ load_name="opentrons_96_tiprack_300ul",
location=3)
reservoir = protocol.load_labware(
- load_name='usascientific_12_reservoir_22ml',
+ load_name="usascientific_12_reservoir_22ml",
location=4)
p300 = protocol.load_instrument(
- instrument_name='p300_single',
- mount='right',
+ instrument_name="p300_single",
+ mount="right",
tip_racks=[tiprack_1, tiprack_2])
# Volume amounts are for demonstration purposes only
@@ -583,4 +591,4 @@ This protocol dispenses different volumes of liquids to a well plate and automat
89, 90, 91, 92, 93, 94, 95, 96
]
- p300.distribute(water_volumes, reservoir['A12'], plate.wells())
+ p300.distribute(water_volumes, reservoir["A12"], plate.wells())
diff --git a/api/docs/v2/new_labware.rst b/api/docs/v2/new_labware.rst
index 2a850308d3e..50428d4a232 100644
--- a/api/docs/v2/new_labware.rst
+++ b/api/docs/v2/new_labware.rst
@@ -10,7 +10,7 @@ Labware are the durable or consumable items that you work with, reuse, or discar
.. note::
- Code snippets use coordinate deck slot locations (e.g. ``'D1'``, ``'D2'``), like those found on Flex. If you have an OT-2 and are using API version 2.14 or earlier, replace the coordinate with its numeric OT-2 equivalent. For example, slot D1 on Flex corresponds to slot 1 on an OT-2. See :ref:`deck-slots` for more information.
+ Code snippets use coordinate deck slot locations (e.g. ``"D1"``, ``"D2"``), like those found on Flex. If you have an OT-2 and are using API version 2.14 or earlier, replace the coordinate with its numeric OT-2 equivalent. For example, slot D1 on Flex corresponds to slot 1 on an OT-2. See :ref:`deck-slots` for more information.
*************
Labware Types
@@ -32,6 +32,8 @@ After you've created your labware, save it as a ``.json`` file and add it to the
If other people need to use your custom labware definition, they must also add it to their Opentrons App.
+.. _loading-labware:
+
***************
Loading Labware
***************
@@ -60,14 +62,14 @@ Similar to the code sample in :ref:`overview-section-v2`, here's how you use the
.. code-block:: python
#Flex
- tiprack = protocol.load_labware('opentrons_flex_96_tiprack_200ul', 'D1')
- plate = protocol.load_labware('corning_96_wellplate_360ul_flat', 'D2')
+ tiprack = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "D1")
+ plate = protocol.load_labware("corning_96_wellplate_360ul_flat", "D2")
.. code-block:: python
#OT-2
- tiprack = protocol.load_labware('opentrons_96_tiprack_300ul', '1')
- plate = protocol.load_labware('corning_96_wellplate_360ul_flat', '2')
+ tiprack = protocol.load_labware("opentrons_96_tiprack_300ul", "1")
+ plate = protocol.load_labware("corning_96_wellplate_360ul_flat", "2")
.. versionadded:: 2.0
@@ -80,9 +82,9 @@ When the ``load_labware`` method loads labware into your protocol, it returns a
The ``load_labware`` method includes an optional ``label`` argument. You can use it to identify labware with a descriptive name. If used, the label value is displayed in the Opentrons App. For example::
tiprack = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat',
- location='D1',
- label='any-name-you-want')
+ load_name="corning_96_wellplate_360ul_flat",
+ location="D1",
+ label="any-name-you-want")
.. _labware-on-adapters:
@@ -98,9 +100,9 @@ Loading Separately
The ``load_adapter()`` method is available on ``ProtocolContext`` and module contexts. It behaves similarly to ``load_labware()``, requiring the load name and location for the desired adapter. Load a module, adapter, and labware with separate calls to specify each layer of the physical stack of components individually::
- hs_mod = protocol.load_module('heaterShakerModuleV1', 'D1')
- hs_adapter = hs_mod.load_adapter('opentrons_96_flat_bottom_adapter')
- hs_plate = hs_adapter.load_labware('nest_96_wellplate_200ul_flat')
+ hs_mod = protocol.load_module("heaterShakerModuleV1", "D1")
+ hs_adapter = hs_mod.load_adapter("opentrons_96_flat_bottom_adapter")
+ hs_plate = hs_adapter.load_labware("nest_96_wellplate_200ul_flat")
.. versionadded:: 2.15
The ``load_adapter()`` method.
@@ -111,8 +113,8 @@ Loading Together
Use the ``adapter`` argument of ``load_labware()`` to load an adapter at the same time as labware. For example, to load the same 96-well plate and adapter from the previous section at once::
hs_plate = hs_mod.load_labware(
- name='nest_96_wellplate_200ul_flat',
- adapter='opentrons_96_flat_bottom_adapter'
+ name="nest_96_wellplate_200ul_flat",
+ adapter="opentrons_96_flat_bottom_adapter"
)
.. versionadded:: 2.15
@@ -121,7 +123,7 @@ Use the ``adapter`` argument of ``load_labware()`` to load an adapter at the sam
The API also has some "combination" labware definitions, which treat the adapter and labware as a unit::
hs_combo = hs_mod.load_labware(
- 'opentrons_96_flat_bottom_adapter_nest_wellplate_200ul_flat'
+ "opentrons_96_flat_bottom_adapter_nest_wellplate_200ul_flat"
)
Loading labware this way prevents you from :ref:`moving the labware ` onto or off of the adapter, so it's less flexible than loading the two separately. Avoid using combination definitions unless your protocol specifies an ``apiLevel`` of 2.14 or lower.
@@ -137,9 +139,9 @@ Well Ordering
You need to select which wells to transfer liquids to and from over the course of a protocol.
-Rows of wells on a labware have labels that are capital letters starting with A. For instance, an 96-well plate has 8 rows, labeled ``'A'`` through ``'H'``.
+Rows of wells on a labware have labels that are capital letters starting with A. For instance, an 96-well plate has 8 rows, labeled ``"A"`` through ``"H"``.
-Columns of wells on a labware have labels that are numbers starting with 1. For instance, a 96-well plate has columns ``'1'`` through ``'12'``.
+Columns of wells on a labware have labels that are numbers starting with 1. For instance, a 96-well plate has columns ``"1"`` through ``"12"``.
All well-accessing functions start with the well at the top left corner of the labware. The ending well is in the bottom right. The order of travel from top left to bottom right depends on which function you use.
@@ -149,7 +151,7 @@ The code in this section assumes that ``plate`` is a 24-well plate. For example:
.. code-block:: python
- plate = protocol.load_labware('corning_24_wellplate_3.4ml_flat', location='D1')
+ plate = protocol.load_labware("corning_24_wellplate_3.4ml_flat", location="D1")
.. _well-accessor-methods:
@@ -176,28 +178,30 @@ The API provides many different ways to access wells inside labware. Different m
- ``[[labware:A1, labware:B1...], [labware:A2, labware:B2...]]``
* - :py:meth:`.Labware.wells_by_name`
- Dictionary with well names as keys.
- - ``{'A1': labware:A1, 'B1': labware:B1}``
+ - ``{"A1": labware:A1, "B1": labware:B1}``
* - :py:meth:`.Labware.rows_by_name`
- Dictionary with row names as keys.
- - ``{'A': [labware:A1, labware:A2...], 'B': [labware:B1, labware:B2...]}``
+ - ``{"A": [labware:A1, labware:A2...], "B": [labware:B1, labware:B2...]}``
* - :py:meth:`.Labware.columns_by_name`
- Dictionary with column names as keys.
- - ``{'1': [labware:A1, labware:B1...], '2': [labware:A2, labware:B2...]}``
+ - ``{"1": [labware:A1, labware:B1...], "2": [labware:A2, labware:B2...]}``
Accessing Individual Wells
==========================
+.. _well-dictionary-access:
+
Dictionary Access
-----------------
-The simplest way to refer to a single well is by its name, like A1 or D6. :py:meth:`.Labware.wells_by_name` accomplishes this. This is such a common task that the API also has an equivalent shortcut: dictionary indexing.
+The simplest way to refer to a single well is by its :py:obj:`.well_name`, like A1 or D6. Referencing a particular key in the result of :py:meth:`.Labware.wells_by_name` accomplishes this. This is such a common task that the API also has an equivalent shortcut: dictionary indexing.
.. code-block:: python
- a1 = plate.wells_by_name()['A1']
- d6 = plate['D6'] # dictionary indexing
+ a1 = plate.wells_by_name()["A1"]
+ d6 = plate["D6"] # dictionary indexing
-If a well does not exist in the labware, such as ``plate['H12']`` on a 24-well plate, the API will raise a ``KeyError``. In contrast, it would be a valid reference on a standard 96-well plate.
+If a well does not exist in the labware, such as ``plate["H12"]`` on a 24-well plate, the API will raise a ``KeyError``. In contrast, it would be a valid reference on a standard 96-well plate.
.. versionadded:: 2.0
@@ -227,9 +231,9 @@ Use :py:meth:`.Labware.rows_by_name` to access a specific row of wells or :py:m
.. code-block:: python
- row_dict = plate.rows_by_name()['A']
+ row_dict = plate.rows_by_name()["A"]
row_list = plate.rows()[0] # equivalent to the line above
- column_dict = plate.columns_by_name()['1']
+ column_dict = plate.columns_by_name()["1"]
column_list = plate.columns()[0] # equivalent to the line above
print('Column "1" has', len(column_dict), 'wells') # Column "1" has 4 wells
@@ -237,15 +241,15 @@ Use :py:meth:`.Labware.rows_by_name` to access a specific row of wells or :py:m
Since these methods return either lists or dictionaries, you can iterate through them as you would regular Python data structures.
-For example, to transfer 50 µL of liquid from the first well of a reservoir to each of the wells of row ``'A'`` on a plate::
+For example, to transfer 50 µL of liquid from the first well of a reservoir to each of the wells of row ``"A"`` on a plate::
for well in plate.rows()[0]:
- pipette.transfer(reservoir['A1'], well, 50)
+ pipette.transfer(reservoir["A1"], well, 50)
Equivalently, using ``rows_by_name``::
- for well in plate.rows_by_name()['A'].values():
- pipette.transfer(reservoir['A1'], well, 50)
+ for well in plate.rows_by_name()["A"].values():
+ pipette.transfer(reservoir["A1"], well, 50)
.. versionadded:: 2.0
@@ -331,8 +335,8 @@ Use :py:attr:`.Well.depth` to get the distance in mm between the very top of the
.. code-block:: python
:substitutions:
- plate = protocol.load_labware('corning_96_wellplate_360ul_flat', 'D1')
- depth = plate['A1'].depth # 10.67
+ plate = protocol.load_labware("corning_96_wellplate_360ul_flat", "D1")
+ depth = plate["A1"].depth # 10.67
Diameter
========
@@ -342,8 +346,8 @@ Use :py:attr:`.Well.diameter` to get the diameter of a given well in mm. Since d
.. code-block:: python
:substitutions:
- plate = protocol.load_labware('corning_96_wellplate_360ul_flat', 'D1')
- diameter = plate['A1'].diameter # 6.86
+ plate = protocol.load_labware("corning_96_wellplate_360ul_flat", "D1")
+ diameter = plate["A1"].diameter # 6.86
Length
======
@@ -353,8 +357,8 @@ Use :py:attr:`.Well.length` to get the length of a given well in mm. Length is d
.. code-block:: python
:substitutions:
- plate = protocol.load_labware('nest_12_reservoir_15ml', 'D1')
- length = plate['A1'].length # 8.2
+ plate = protocol.load_labware("nest_12_reservoir_15ml", "D1")
+ length = plate["A1"].length # 8.2
Width
@@ -366,8 +370,8 @@ Use :py:attr:`.Well.width` to get the width of a given well in mm. Width is defi
.. code-block:: python
:substitutions:
- plate = protocol.load_labware('nest_12_reservoir_15ml', 'D1')
- width = plate['A1'].width # 71.2
+ plate = protocol.load_labware("nest_12_reservoir_15ml", "D1")
+ width = plate["A1"].width # 71.2
.. versionadded:: 2.9
diff --git a/api/docs/v2/new_modules.rst b/api/docs/v2/new_modules.rst
index 5bd9912213f..956a2bc7989 100644
--- a/api/docs/v2/new_modules.rst
+++ b/api/docs/v2/new_modules.rst
@@ -33,5 +33,5 @@ Pages in this section of the documentation cover:
.. note::
- Throughout these pages, most code examples use coordinate deck slot locations (e.g. ``'D1'``, ``'D2'``), like those found on Flex. If you have an OT-2 and are using API version 2.14 or earlier, replace the coordinate with its numeric OT-2 equivalent. For example, slot D1 on Flex corresponds to slot 1 on an OT-2. See :ref:`deck-slots` for more information.
+ Throughout these pages, most code examples use coordinate deck slot locations (e.g. ``"D1"``, ``"D2"``), like those found on Flex. If you have an OT-2 and are using API version 2.14 or earlier, replace the coordinate with its numeric OT-2 equivalent. For example, slot D1 on Flex corresponds to slot 1 on an OT-2. See :ref:`deck-slots` for more information.
diff --git a/api/docs/v2/new_pipette.rst b/api/docs/v2/new_pipette.rst
index 9fafc2e5c95..dfeee1ed169 100644
--- a/api/docs/v2/new_pipette.rst
+++ b/api/docs/v2/new_pipette.rst
@@ -2,471 +2,27 @@
.. _new-pipette:
-########
+********
Pipettes
-########
+********
-When writing a protocol, you must inform the Protocol API about the pipettes you will be using on your robot. The :py:meth:`.ProtocolContext.load_instrument` function provides this information and returns an :py:class:`.InstrumentContext` object.
-
-For information about liquid handling, see :ref:`v2-atomic-commands` and :ref:`v2-complex-commands`.
-
-.. _new-create-pipette:
-
-Loading Pipettes
-================
-
-As noted above, you call the :py:meth:`~.ProtocolContext.load_instrument` method to load a pipette. This method also requires the :ref:`pipette's API load name `, its left or right mount position, and (optionally) a list of associated tip racks. Even if you don't use the pipette anywhere else in your protocol, the Opentrons App and the robot won't let you start the protocol run until all pipettes loaded by ``load_instrument()`` are attached properly.
-
-Loading Flex 1- and 8-Channel Pipettes
---------------------------------------
-
-This code sample loads a Flex 1-Channel Pipette in the left mount and a Flex 8-Channel Pipette in the right mount. Both pipettes are 1000 µL. Each pipette uses its own 1000 µL tip rack.
-
-.. code-block:: Python
- :substitutions:
-
- from opentrons import protocol_api
-
- requirements = {'robotType': 'Flex', 'apiLevel':'|apiLevel|'}
-
- def run(protocol: protocol_api.ProtocolContext):
- tiprack1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_1000ul', location='D1')
- tiprack2 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_1000ul', location='C1')
- left = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
- tip_racks=[tiprack1])
- right = protocol.load_instrument(
- instrument_name='flex_8channel_1000',
- mount='right',
- tip_racks=[tiprack2])
-
-If you're writing a protocol that uses the Flex Gripper, you might think that this would be the place in your protocol to declare that. However, the gripper doesn't require ``load_instrument``! Whether your gripper requires a protocol is determined by the presence of :py:meth:`.ProtocolContext.move_labware` commands. See :ref:`moving-labware` for more details.
-
-Loading a Flex 96-Channel Pipette
----------------------------------
-
-This code sample loads the Flex 96-Channel Pipette. Because of its size, the Flex 96-Channel Pipette requires the left *and* right pipette mounts. You cannot use this pipette with 1- or 8-Channel Pipette in the same protocol or when these instruments are attached to the robot. To load the 96-Channel Pipette, specify its position as ``mount='left'`` as shown here:
-
-.. code-block:: python
-
- def run(protocol: protocol_api.ProtocolContext):
- left = protocol.load_instrument(
- instrument_name='flex_96channel_1000', mount='left')
-
-.. versionadded:: 2.15
-
-Loading OT-2 Pipettes
----------------------
-
-This code sample loads a P1000 Single-Channel GEN2 pipette in the left mount and a P300 Single-Channel GEN2 pipette in the right mount. Each pipette uses its own 1000 µL tip rack.
-
-.. code-block:: python
- :substitutions:
-
- from opentrons import protocol_api
-
- metadata = {'apiLevel': '2.14'}
-
- def run(protocol: protocol_api.ProtocolContext):
- tiprack1 = protocol.load_labware(
- load_name='opentrons_96_tiprack_1000ul', location=1)
- tiprack2 = protocol.load_labware(
- load_name='opentrons_96_tiprack_1000ul', location=2)
- left = protocol.load_instrument(
- instrument_name='p1000_single_gen2',
- mount='left',
- tip_racks=[tiprack1])
- right = protocol.load_instrument(
- instrument_name='p300_multi_gen2',
- mount='right',
- tip_racks=[tiprack1])
-
-.. versionadded:: 2.0
-
-.. _new-multichannel-pipettes:
-
-Multi-Channel Pipettes
-======================
-
-All building block and advanced commands work with single- and multi-channel pipettes.
-
-To keep the interface to the Opentrons API consistent between single- and multi-channel pipettes, commands treat the *backmost channel* (furthest from the door) of a multi-channel pipette as the location of the pipette. Location arguments to building block and advanced commands are specified for the backmost channel.
-
-Also, this means that offset changes (such as :py:meth:`.Well.top` or :py:meth:`.Well.bottom`) can be applied to the single specified well, and each pipette channel will be at the same position relative to the well that it is over.
-
-Finally, because there is only one motor in a multi-channel pipette, these pipettes always aspirate and dispense on all channels simultaneously.
-
-8-Channel, 96-Well Plate Example
---------------------------------
-
-To demonstrate these concepts, let's write a protocol that uses a Flex 8-Channel Pipette and a 96-well plate. We'll then aspirate and dispense a liquid to different locations on the same well plate. To start, let's load a pipette in the right mount and add our labware.
-
-.. code-block:: python
- :substitutions:
-
- from opentrons import protocol_api
-
- requirements = {'robotType': 'Flex', 'apiLevel':'|apiLevel|'}
-
- def run(protocol: protocol_api.ProtocolContext):
- # Load a tiprack for 1000 µL tips
- tiprack1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_1000ul', location='D1')
- # Load a 96-well plate
- plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat', location='C1')
- # Load an 8-channel pipette on the right mount
- right = protocol.load_instrument(
- instrument_name='flex_8channel_1000',
- mount='right',
- tip_racks=[tiprack1])
-
-After loading our instruments and labware, let's tell the robot to pick up a pipette tip from location ``A1`` in ``tiprack1``::
-
- right.pick_up_tip()
-
-With the backmost pipette channel above location A1 on the tip rack, all eight channels are above the eight tip rack wells in column 1.
-
-After picking up a tip, let's tell the robot to aspirate 300 µL from the well plate at location ``A2``::
-
- right.aspirate(volume=300, location=plate['A2'])
-
-With the backmost pipette tip above location A2 on the well plate, all eight channels are above the eight wells in column 2.
-
-Finally, let's tell the robot to dispense 300 µL into the well plate at location ``A3``::
-
- right.dispense(volume=300, location=plate['A3'].top())
-
-With the backmost pipette tip above location A3, all eight channels are above the eight wells in column 3. The pipette will dispense liquid into all the wells simultaneously.
-
-8-Channel, 384-Well Plate Example
----------------------------------
-
-In general, you should specify wells in the first row of a well plate when using multi-channel pipettes. An exception to this rule is when using 384-well plates. The greater well density means the nozzles of a multi-channel pipette can only access every other well in a column. Specifying well A1 accesses every other well starting with the first (rows A, C, E, G, I, K, M, and O). Similarly, specifying well B1 also accesses every other well, but starts with the second (rows B, D, F, H, J, L, N, and P).
-
-To demonstrate these concepts, let's write a protocol that uses a Flex 8-Channel Pipette and a 384-well plate. We'll then aspirate and dispense a liquid to different locations on the same well plate. To start, let's load a pipette in the right mount and add our labware.
-
-.. code-block:: python
-
- def run(protocol: protocol_api.ProtocolContext):
- # Load a tiprack for 200 µL tips
- tiprack1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul', location="D1")
- # Load a well plate
- plate = protocol.load_labware(
- load_name='corning_384_wellplate_112ul_flat', location="D2")
- # Load an 8-channel pipette on the right mount
- right = protocol.load_instrument(
- instrument_name='flex_8channel_1000',
- mount='right',
- tip_racks=[tiprack1])
-
-
-After loading our instruments and labware, let's tell the robot to pick up a pipette tip from location ``A1`` in ``tiprack1``::
-
- right.pick_up_tip()
-
-With the backmost pipette channel above location A1 on the tip rack, all eight channels are above the eight tip rack wells in column 1.
-
-After picking up a tip, let's tell the robot to aspirate 100 µL from the well plate at location ``A1``::
-
- right.aspirate(volume=100, location=plate['A1'])
-
-The eight pipette channels will only aspirate from every other well in the column: A1, C1, E1, G1, I1, K1, M1, and O1.
-
-Finally, let's tell the robot to dispense 100 µL into the well plate at location ``B1``::
-
- right.dispense(volume=100, location=plate['B1'])
-
-The eight pipette channels will only dispense into every other well in the column: B1, D1, F1, H1, J1, L1, N1, and P1.
-
-.. _pipette-tip-racks:
-
-Adding Tip Racks
-================
-
-The ``load_instrument()`` method includes the optional argument ``tip_racks``. This parameter accepts a list of tip rack labware objects, which lets you to specify as many tip racks as you want. The advantage of using ``tip_racks`` is twofold. First, associating tip racks with your pipette allows for automatic tip tracking throughout your protocol. Second, it removes the need to specify tip locations in the :py:meth:`.InstrumentContext.pick_up_tip` method. For example, let's start by loading loading some labware and instruments like this::
-
- def run(protocol: protocol_api.ProtocolContext):
- tiprack_left = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul', location='D1')
- tiprack_right = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_200ul', location='D2')
- left_pipette = protocol.load_instrument(
- instrument_name='flex_8channel_1000', mount='left')
- right_pipette = protocol.load_instrument(
- instrument_name='flex_8channel_1000',
- mount='right',
- tip_racks=[tiprack_right])
-
-Let's pick up a tip with the left pipette. We need to specify the location as an argument of ``pick_up_tip()``, since we loaded the left pipette without a ``tip_racks`` argument.
-
-.. code-block:: python
-
- left_pipette.pick_up_tip(tiprack_left['A1'])
- left_pipette.drop_tip()
-
-But now you have to specify ``tiprack_left`` every time you call ``pick_up_tip``, which means you're doing all your own tip tracking::
-
- left_pipette.pick_up_tip(tiprack_left['A2'])
- left_pipette.drop_tip()
- left_pipette.pick_up_tip(tiprack_left['A3'])
- left_pipette.drop_tip()
-
-However, because you specified a tip rack location for the right pipette, the robot will automatically pick up from location ``A1`` of its associated tiprack::
-
- right_pipette.pick_up_tip()
- right_pipette.drop_tip()
-
-Additional calls to ``pick_up_tip`` will automatically progress through the tips in the right rack::
-
- right_pipette.pick_up_tip() # picks up from A2
- right_pipette.drop_tip()
- right_pipette.pick_up_tip() # picks up from A3
- right_pipette.drop_tip()
-
-See also, :ref:`v2-atomic-commands` and :ref:`v2-complex-commands`.
-
-.. versionadded:: 2.0
-
-.. _new-pipette-models:
-
-API Load Names
-==============
-
-The pipette's API load name (``instrument_name``) is the first parameter of the ``load_instrument()`` method. It tells your robot which attached pipette you're going to use in a protocol. The tables below list the API load names for the currently available Flex and OT-2 pipettes.
-
-.. tabs::
-
- .. tab:: Flex Pipettes
-
- +-------------------------+---------------+-------------------------+
- | Pipette Model | Volume (µL) | API Load Name |
- +=========================+===============+===+=====================+
- | Flex 1-Channel Pipette | 1–50 | ``flex_1channel_50`` |
- + +---------------+-------------------------+
- | | 5–1000 | ``flex_1channel_1000`` |
- +-------------------------+---------------+-------------------------+
- | Flex 8-Channel Pipette | 1–50 | ``flex_8channel_50`` |
- + +---------------+-------------------------+
- | | 5–1000 | ``flex_8channel_1000`` |
- +-------------------------+---------------+-------------------------+
- | Flex 96-Channel Pipette | 5–1000 | ``flex_96channel_1000`` |
- +-------------------------+---------------+-------------------------+
-
- .. tab:: OT-2 Pipettes
-
- +-----------------------------+--------------------+-----------------------+
- | Pipette Model | Volume (µL) | API Load Name |
- +=============================+====================+=======================+
- | P20 Single-Channel GEN2 | 1-20 | ``p20_single_gen2`` |
- +-----------------------------+ +-----------------------+
- | P20 Multi-Channel GEN2 | | ``p20_multi_gen2`` |
- +-----------------------------+--------------------+-----------------------+
- | P300 Single-Channel GEN2 | 20-300 | ``p300_single_gen2`` |
- +-----------------------------+ +-----------------------+
- | P300 Multi-Channel GEN2 | | ``p300_multi_gen2`` |
- +-----------------------------+--------------------+-----------------------+
- | P1000 Single-Channel GEN2 | 100-1000 | ``p1000_single_gen2`` |
- +-----------------------------+--------------------+-----------------------+
-
- See the OT-2 Pipette Generations section below if you're using GEN1 pipettes on an OT-2. The GEN1 family includes the P10, P50, and P300 single- and multi-channel pipettes, along with the P1000 single-chanel model.
-
-
-OT-2 Pipette Generations
-========================
-
-The OT-2 works with the GEN1 and GEN2 pipette models. The newer GEN2 pipettes have different volume ranges than the older GEN1 pipettes. With some exceptions, the volume ranges for GEN2 pipettes overlap those used by the GEN1 models. If your protocol specifies a GEN1 pipette, but you have a GEN2 pipette with a compatible volume range, you can still run your protocol. The OT-2 will consider the GEN2 pipette to have the same minimum volume as the GEN1 pipette. The following table lists the volume compatibility between the GEN2 and GEN1 pipettes.
-
-.. list-table::
- :header-rows: 1
-
- * - GEN2 Pipette
- - GEN1 Pipette
- - GEN1 Volume
- * - P20 Single-Channel GEN2
- - P10 Single-Channel GEN1
- - 1-10 µL
- * - P20 Multi-Channel GEN2
- - P10 Multi-Channel GEN1
- - 1-10 µL
- * - P300 Single-Channel GEN2
- - P300 Single-Channel GEN1
- - 30-300 µL
- * - P300 Multi-Channel GEN2
- - P300 Multi-Channel GEN1
- - 20-200 µL
- * - P1000 Single-Channel GEN2
- - P1000 Single-Channel GEN1
- - 100-1000 µL
-
-The single- and multi-channel P50 GEN1 pipettes are the exceptions here. If your protocol uses a P50 GEN1 pipette, there is no backward compatibility with a related GEN2 pipette. To replace a P50 GEN1 with a corresponding GEN2 pipette, edit your protocol to load a P20 Single-Channel GEN2 (for volumes below 20 µL) or a P300 Single-Channel GEN2 (for volumes between 20 and 50 µL).
-
-.. _pipette-volume-modes:
-
-Volume Modes
-============
-
-The Flex 1-Channel 50 µL and Flex 8-Channel 50 µL pipettes must operate in a low-volume mode to accurately dispense very small volumes of liquid. Set the volume mode by calling :py:meth:`.InstrumentContext.configure_for_volume` with the amount of liquid you plan to aspirate, in µL::
-
- pipette50.configure_for_volume(1)
- pipette50.pick_up_tip()
- pipette50.aspirate(1, plate["A1"])
+.. toctree::
+ pipettes/loading
+ pipettes/characteristics
+ pipettes/partial_tip_pickup
+ pipettes/volume_modes
-.. versionadded:: 2.15
+Opentrons pipettes are configurable devices used to move liquids throughout the working area during the execution of protocols. Flex and OT-2 each have their own pipettes, which are available for use in the Python API.
-Passing different values to ``configure_for_volume()`` changes the minimum and maximum volume of Flex 50 µL pipettes as follows:
+Pages in this section of the documentation cover:
-.. list-table::
- :header-rows: 1
- :widths: 2 3 3
-
- * - Value
- - Minimum Volume (µL)
- - Maximum Volume (µL)
- * - 1–4.9
- - 1
- - 30
- * - 5–50
- - 5
- - 50
-
-.. note::
- The pipette must not contain liquid when you call ``configure_for_volume()``, or the API will raise an error.
-
- Also, if the pipette is in a well location that may contain liquid, it will move upward to ensure it is not immersed in liquid before changing its mode. Calling ``configure_for_volume()`` *before* ``pick_up_tip()`` helps to avoid this situation.
-
-In a protocol that handles many different volumes, it's a good practice to call ``configure_for_volume()`` once for each :py:meth:`.transfer` or :py:meth:`.aspirate`, specifying the volume that you are about to handle. When operating with a list of volumes, nest ``configure_for_volume()`` inside a ``for`` loop to ensure that the pipette is properly configured for each volume:
-
-.. code-block:: python
-
- volumes = [1, 2, 3, 4, 1, 5, 2, 8]
- sources = plate.columns()[0]
- destinations = plate.columns()[1]
- for i in range(8):
- pipette50.configure_for_volume(volumes[i])
- pipette50.pick_up_tip()
- pipette50.aspirate(volume=volumes[i], location=sources[i])
- pipette50.dispense(location=destinations[i])
- pipette50.drop_tip()
-
-If you know that all your liquid handling will take place in a specific mode, then you can call ``configure_for_volume()`` just once with a representative volume. Or if all the volumes correspond to the pipette's default mode, you don't have to call ``configure_for_volume()`` at all.
-
-
-.. _new-plunger-flow-rates:
-
-Pipette Flow Rates
-==================
-
-Measured in µL/s, the flow rate determines how much liquid a pipette can aspirate, dispense, and blow out. Opentrons pipettes have their own default flow rates. The API lets you change the flow rate on a loaded :py:class:`.InstrumentContext` by altering the :py:obj:`.InstrumentContext.flow_rate` properties listed below.
-
-* Aspirate: ``InstrumentContext.flow_rate.aspirate``
-* Dispense: ``InstrumentContext.flow_rate.dispense``
-* Blow out: ``InstrumentContext.flow_rate.blow_out``
-
-These flow rate properties operate independently. This means you can specify different flow rates for each property within the same protocol. For example, let's load a simple protocol and set different flow rates for the attached pipette.
-
-.. code-block:: python
-
- def run(protocol: protocol_api.ProtocolContext):
- tiprack1 = protocol.load_labware(
- load_name='opentrons_flex_96_tiprack_1000ul', location='D1')
- pipette = protocol.load_instrument(
- instrument_name='flex_1channel_1000',
- mount='left',
- tip_racks=[tiprack1])
- plate = protocol.load_labware(
- load_name='corning_96_wellplate_360ul_flat', location='D3')
- pipette.pick_up_tip()
+ - :ref:`Loading pipettes ` into your protocol.
+ - :ref:`Pipette characteristics `, such as how fast they can move liquid and how they move around the deck.
+ - The :ref:`partial tip pickup ` configuration for the Flex 96-Channel Pipette, which uses only 8 channels for pipetting. Full and partial tip pickup can be combined in a single protocol.
+ - The :ref:`volume modes ` of Flex 50 µL pipettes, which must operate in low-volume mode to accurately dispense very small volumes of liquid.
-Let's tell the robot to aspirate, dispense, and blow out the liquid using default flow rates. Notice how you don't need to specify a ``flow_rate`` attribute to use the defaults::
-
- pipette.aspirate(200, plate['A1']) # 160 µL/s
- pipette.dispense(200, plate['A2']) # 160 µL/s
- pipette.blow_out() # 80 µL/s
-
-Now let's change the flow rates for each action::
-
- pipette.flow_rate.aspirate = 50
- pipette.flow_rate.dispense = 100
- pipette.flow_rate.blow_out = 75
- pipette.aspirate(200, plate['A1']) # 50 µL/s
- pipette.dispense(200, plate['A2']) # 100 µL/s
- pipette.blow_out() # 75 µL/s
-
-These flow rates will remain in effect until you change the ``flow_rate`` attribute again *or* call ``configure_for_volume()``. Calling ``configure_for_volume()`` always resets all pipette flow rates to the defaults for the mode that it sets.
-
-.. TODO add mode ranges and flow defaults to sections below
-
-.. note::
- In API version 2.13 and earlier, :py:obj:`.InstrumentContext.speed` offered similar functionality to ``.flow_rate``. It attempted to set the plunger speed in mm/s. Due to technical limitations, that speed could only be approximate. You must use ``.flow_rate`` in version 2.14 and later, and you should consider replacing older code that sets ``.speed``.
-
-.. versionadded:: 2.0
-
-
-Flex Pipette Flow Rates
------------------------
-
-The default flow rates for Flex pipettes depend on the maximum volume of the pipette and the capacity of the currently attached tip. For each pipette–tip configuration, the default flow rate is the same for aspirate, dispense, and blowout actions.
-
-.. list-table::
- :header-rows: 1
-
- * - Pipette Model
- - Tip Capacity (µL)
- - Flow Rate (µL/s)
- * - 50 µL (1- and 8-channel)
- - All capacities
- - 57
- * - 1000 µL (1-, 8-, and 96-channel)
- - 50
- - 478
- * - 1000 µL (1-, 8-, and 96-channel)
- - 200
- - 716
- * - 1000 µL (1-, 8-, and 96-channel)
- - 1000
- - 716
-
-
-Additionally, all Flex pipettes have a well bottom clearance of 1 mm for aspirate and dispense actions.
-
-.. _ot2-flow-rates:
+For information about liquid handling, see :ref:`v2-atomic-commands` and :ref:`v2-complex-commands`.
-OT-2 Pipette Flow Rates
------------------------
-The following table provides data on the default aspirate, dispense, and blowout flow rates (in µL/s) for OT-2 GEN2 pipettes. Default flow rates are the same across all three actions.
-.. list-table::
- :header-rows: 1
- * - Pipette Model
- - Volume (µL)
- - Flow Rates (µL/s)
- * - P20 Single-Channel GEN2
- - 1–20
- -
- * API v2.6 or higher: 7.56
- * API v2.5 or lower: 3.78
- * - P300 Single-Channel GEN2
- - 20–300
- -
- * API v2.6 or higher: 92.86
- * API v2.5 or lower: 46.43
- * - P1000 Single-Channel GEN2
- - 100–1000
- -
- * API v2.6 or higher: 274.7
- * API v2.5 or lower: 137.35
- * - P20 Multi-Channel GEN2
- - 1–20
- - 7.6
- * - P300 Multi-Channel GEN2
- - 20–300
- - 94
-
-Additionally, all OT-2 GEN2 pipettes have a default head speed of 400 mm/s and a well bottom clearance of 1 mm for aspirate and dispense actions.
diff --git a/api/docs/v2/new_protocol_api.rst b/api/docs/v2/new_protocol_api.rst
index 9ddd0ca6407..3bd6ac38658 100644
--- a/api/docs/v2/new_protocol_api.rst
+++ b/api/docs/v2/new_protocol_api.rst
@@ -2,41 +2,54 @@
.. _protocol-api-reference:
+***********************
API Version 2 Reference
-=======================
+***********************
.. _protocol_api-protocols-and-instruments:
-Protocols and Instruments
--------------------------
+Protocols
+=========
.. module:: opentrons.protocol_api
.. autoclass:: opentrons.protocol_api.ProtocolContext
:members:
- :exclude-members: location_cache, cleanup, clear_commands, load_waste_chute
+ :exclude-members: location_cache, cleanup, clear_commands, params
+Instruments
+===========
.. autoclass:: opentrons.protocol_api.InstrumentContext
:members:
:exclude-members: delay
-.. autoclass:: opentrons.protocol_api.Liquid
-
.. _protocol-api-labware:
-Labware and Wells
------------------
+Labware
+=======
.. autoclass:: opentrons.protocol_api.Labware
:members:
:exclude-members: next_tip, use_tips, previous_tip, return_tips
+..
+ The trailing ()s at the end of TrashBin and WasteChute here hide the __init__()
+ signatures, since users should never construct these directly.
+
+.. autoclass:: opentrons.protocol_api.TrashBin()
+
+.. autoclass:: opentrons.protocol_api.WasteChute()
+
+Wells and Liquids
+=================
.. autoclass:: opentrons.protocol_api.Well
:members:
:exclude-members: geometry
+.. autoclass:: opentrons.protocol_api.Liquid
+
.. _protocol-api-modules:
Modules
--------
+=======
.. autoclass:: opentrons.protocol_api.HeaterShakerContext
:members:
@@ -66,8 +79,8 @@ Modules
.. _protocol-api-types:
-Useful Types and Definitions
-----------------------------
+Useful Types
+============
..
The opentrons.types module contains a mixture of public Protocol API things and private internal things.
@@ -80,7 +93,7 @@ Useful Types and Definitions
:no-value:
Executing and Simulating Protocols
-----------------------------------
+==================================
.. automodule:: opentrons.execute
:members:
diff --git a/api/docs/v2/pipettes/characteristics.rst b/api/docs/v2/pipettes/characteristics.rst
new file mode 100644
index 00000000000..a91f58d55e2
--- /dev/null
+++ b/api/docs/v2/pipettes/characteristics.rst
@@ -0,0 +1,278 @@
+:og:description: Details on Opentrons pipette movement and flow rates.
+
+.. _pipette-characteristics:
+
+***********************
+Pipette Characteristics
+***********************
+
+Each Opentrons pipette has different capabilities, which you'll want to take advantage of in your protocols. This page covers some fundamental pipette characteristics.
+
+:ref:`new-multichannel-pipettes` gives examples of how multi-channel pipettes move around the deck by using just one of their channels as a reference point. Taking this into account is important for commanding your pipettes to perform actions in the correct locations.
+
+:ref:`new-plunger-flow-rates` discusses how quickly each type of pipette can handle liquids. The defaults are designed to operate quickly, based on the pipette's hardware and assuming that you're handling aqueous liquids. You can speed up or slow down a pipette's flow rate to suit your protocol's needs.
+
+Finally, the volume ranges of pipettes affect what you can do with them. The volume ranges for current pipettes are listed on the :ref:`Loading Pipettes ` page. The :ref:`ot2-pipette-generations` section of this page describes how the API behaves when running protocols that specify older OT-2 pipettes.
+
+.. _new-multichannel-pipettes:
+
+Multi-Channel Movement
+======================
+
+All :ref:`building block ` and :ref:`complex commands ` work with single- and multi-channel pipettes.
+
+To keep the protocol API consistent when using single- and multi-channel pipettes, commands treat the back left channel of a multi-channel pipette as its *primary channel*. Location arguments of pipetting commands use the primary channel. The :py:meth:`.InstrumentContext.configure_nozzle_layout` method can change the pipette's primary channel, using its ``start`` parameter. See :ref:`partial-tip-pickup` for more information.
+
+With a pipette's default settings, you can generally access the wells indicated in the table below. Moving to any other well may cause the pipette to crash.
+
+.. list-table::
+ :header-rows: 1
+
+ * - Channels
+ - 96-well plate
+ - 384-well plate
+ * - 1
+ - Any well, A1–H12
+ - Any well, A1–P24
+ * - 8
+ - A1–A12
+ - A1–B24
+ * - 96
+ - A1 only
+ - A1–B2
+
+Also, you should apply any location offset, such as :py:meth:`.Well.top` or :py:meth:`.Well.bottom`, to the well accessed by the primary channel. Since all of the pipette's channels move together, each channel will have the same offset relative to the well that it is over.
+
+Finally, because each multi-channel pipette has only one motor, they always aspirate and dispense on all channels simultaneously.
+
+8-Channel, 96-Well Plate Example
+--------------------------------
+
+To demonstrate these concepts, let's write a protocol that uses a Flex 8-Channel Pipette and a 96-well plate. We'll then aspirate and dispense a liquid to different locations on the same well plate. To start, let's load a pipette in the right mount and add our labware.
+
+.. code-block:: python
+ :substitutions:
+
+ from opentrons import protocol_api
+
+ requirements = {"robotType": "Flex", "apiLevel":"|apiLevel|"}
+
+ def run(protocol: protocol_api.ProtocolContext):
+ # Load a tiprack for 1000 µL tips
+ tiprack1 = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_1000ul", location="D1")
+ # Load a 96-well plate
+ plate = protocol.load_labware(
+ load_name="corning_96_wellplate_360ul_flat", location="C1")
+ # Load an 8-channel pipette on the right mount
+ right = protocol.load_instrument(
+ instrument_name="flex_8channel_1000",
+ mount="right",
+ tip_racks=[tiprack1])
+
+After loading our instruments and labware, let's tell the robot to pick up a pipette tip from location ``A1`` in ``tiprack1``::
+
+ right.pick_up_tip()
+
+With the backmost pipette channel above location A1 on the tip rack, all eight channels are above the eight tip rack wells in column 1.
+
+After picking up a tip, let's tell the robot to aspirate 300 µL from the well plate at location ``A2``::
+
+ right.aspirate(volume=300, location=plate["A2"])
+
+With the backmost pipette tip above location A2 on the well plate, all eight channels are above the eight wells in column 2.
+
+Finally, let's tell the robot to dispense 300 µL into the well plate at location ``A3``::
+
+ right.dispense(volume=300, location=plate["A3"].top())
+
+With the backmost pipette tip above location A3, all eight channels are above the eight wells in column 3. The pipette will dispense liquid into all the wells simultaneously.
+
+8-Channel, 384-Well Plate Example
+---------------------------------
+
+In general, you should specify wells in the first row of a well plate when using multi-channel pipettes. An exception to this rule is when using 384-well plates. The greater well density means the nozzles of a multi-channel pipette can only access every other well in a column. Specifying well A1 accesses every other well starting with the first (rows A, C, E, G, I, K, M, and O). Similarly, specifying well B1 also accesses every other well, but starts with the second (rows B, D, F, H, J, L, N, and P).
+
+To demonstrate these concepts, let's write a protocol that uses a Flex 8-Channel Pipette and a 384-well plate. We'll then aspirate and dispense a liquid to different locations on the same well plate. To start, let's load a pipette in the right mount and add our labware.
+
+.. code-block:: python
+
+ def run(protocol: protocol_api.ProtocolContext):
+ # Load a tiprack for 200 µL tips
+ tiprack1 = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_200ul", location="D1")
+ # Load a well plate
+ plate = protocol.load_labware(
+ load_name="corning_384_wellplate_112ul_flat", location="D2")
+ # Load an 8-channel pipette on the right mount
+ right = protocol.load_instrument(
+ instrument_name="flex_8channel_1000",
+ mount="right",
+ tip_racks=[tiprack1])
+
+
+After loading our instruments and labware, let's tell the robot to pick up a pipette tip from location ``A1`` in ``tiprack1``::
+
+ right.pick_up_tip()
+
+With the backmost pipette channel above location A1 on the tip rack, all eight channels are above the eight tip rack wells in column 1.
+
+After picking up a tip, let's tell the robot to aspirate 100 µL from the well plate at location ``A1``::
+
+ right.aspirate(volume=100, location=plate["A1"])
+
+The eight pipette channels will only aspirate from every other well in the column: A1, C1, E1, G1, I1, K1, M1, and O1.
+
+Finally, let's tell the robot to dispense 100 µL into the well plate at location ``B1``::
+
+ right.dispense(volume=100, location=plate["B1"])
+
+The eight pipette channels will only dispense into every other well in the column: B1, D1, F1, H1, J1, L1, N1, and P1.
+
+
+.. _new-plunger-flow-rates:
+
+Pipette Flow Rates
+==================
+
+Measured in µL/s, the flow rate determines how much liquid a pipette can aspirate, dispense, and blow out. Opentrons pipettes have their own default flow rates. The API lets you change the flow rate on a loaded :py:class:`.InstrumentContext` by altering the :py:obj:`.InstrumentContext.flow_rate` properties listed below.
+
+* Aspirate: ``InstrumentContext.flow_rate.aspirate``
+* Dispense: ``InstrumentContext.flow_rate.dispense``
+* Blow out: ``InstrumentContext.flow_rate.blow_out``
+
+These flow rate properties operate independently. This means you can specify different flow rates for each property within the same protocol. For example, let's load a simple protocol and set different flow rates for the attached pipette.
+
+.. code-block:: python
+
+ def run(protocol: protocol_api.ProtocolContext):
+ tiprack1 = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_1000ul", location="D1")
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
+ tip_racks=[tiprack1])
+ plate = protocol.load_labware(
+ load_name="corning_96_wellplate_360ul_flat", location="D3")
+ pipette.pick_up_tip()
+
+Let's tell the robot to aspirate, dispense, and blow out the liquid using default flow rates. Notice how you don't need to specify a ``flow_rate`` attribute to use the defaults::
+
+ pipette.aspirate(200, plate["A1"]) # 160 µL/s
+ pipette.dispense(200, plate["A2"]) # 160 µL/s
+ pipette.blow_out() # 80 µL/s
+
+Now let's change the flow rates for each action::
+
+ pipette.flow_rate.aspirate = 50
+ pipette.flow_rate.dispense = 100
+ pipette.flow_rate.blow_out = 75
+ pipette.aspirate(200, plate["A1"]) # 50 µL/s
+ pipette.dispense(200, plate["A2"]) # 100 µL/s
+ pipette.blow_out() # 75 µL/s
+
+These flow rates will remain in effect until you change the ``flow_rate`` attribute again *or* call ``configure_for_volume()``. Calling ``configure_for_volume()`` always resets all pipette flow rates to the defaults for the mode that it sets.
+
+.. TODO add mode ranges and flow defaults to sections below
+
+.. note::
+ In API version 2.13 and earlier, :py:obj:`.InstrumentContext.speed` offered similar functionality to ``.flow_rate``. It attempted to set the plunger speed in mm/s. Due to technical limitations, that speed could only be approximate. You must use ``.flow_rate`` in version 2.14 and later, and you should consider replacing older code that sets ``.speed``.
+
+.. versionadded:: 2.0
+
+
+Flex Pipette Flow Rates
+-----------------------
+
+The default flow rates for Flex pipettes depend on the maximum volume of the pipette and the capacity of the currently attached tip. For each pipette–tip configuration, the default flow rate is the same for aspirate, dispense, and blowout actions.
+
+.. list-table::
+ :header-rows: 1
+
+ * - Pipette Model
+ - Tip Capacity (µL)
+ - Flow Rate (µL/s)
+ * - 50 µL (1- and 8-channel)
+ - All capacities
+ - 57
+ * - 1000 µL (1-, 8-, and 96-channel)
+ - 50
+ - 478
+ * - 1000 µL (1-, 8-, and 96-channel)
+ - 200
+ - 716
+ * - 1000 µL (1-, 8-, and 96-channel)
+ - 1000
+ - 716
+
+
+Additionally, all Flex pipettes have a well bottom clearance of 1 mm for aspirate and dispense actions.
+
+.. _ot2-flow-rates:
+
+OT-2 Pipette Flow Rates
+-----------------------
+
+The following table provides data on the default aspirate, dispense, and blowout flow rates (in µL/s) for OT-2 GEN2 pipettes. Default flow rates are the same across all three actions.
+
+.. list-table::
+ :header-rows: 1
+
+ * - Pipette Model
+ - Volume (µL)
+ - Flow Rates (µL/s)
+ * - P20 Single-Channel GEN2
+ - 1–20
+ -
+ * API v2.6 or higher: 7.56
+ * API v2.5 or lower: 3.78
+ * - P300 Single-Channel GEN2
+ - 20–300
+ -
+ * API v2.6 or higher: 92.86
+ * API v2.5 or lower: 46.43
+ * - P1000 Single-Channel GEN2
+ - 100–1000
+ -
+ * API v2.6 or higher: 274.7
+ * API v2.5 or lower: 137.35
+ * - P20 Multi-Channel GEN2
+ - 1–20
+ - 7.6
+ * - P300 Multi-Channel GEN2
+ - 20–300
+ - 94
+
+Additionally, all OT-2 GEN2 pipettes have a default head speed of 400 mm/s and a well bottom clearance of 1 mm for aspirate and dispense actions.
+
+.. _ot2-pipette-generations:
+
+OT-2 Pipette Generations
+========================
+
+The OT-2 works with the GEN1 and GEN2 pipette models. The newer GEN2 pipettes have different volume ranges than the older GEN1 pipettes. With some exceptions, the volume ranges for GEN2 pipettes overlap those used by the GEN1 models. If your protocol specifies a GEN1 pipette, but you have a GEN2 pipette with a compatible volume range, you can still run your protocol. The OT-2 will consider the GEN2 pipette to have the same minimum volume as the GEN1 pipette. The following table lists the volume compatibility between the GEN2 and GEN1 pipettes.
+
+.. list-table::
+ :header-rows: 1
+
+ * - GEN2 Pipette
+ - GEN1 Pipette
+ - GEN1 Volume
+ * - P20 Single-Channel GEN2
+ - P10 Single-Channel GEN1
+ - 1-10 µL
+ * - P20 Multi-Channel GEN2
+ - P10 Multi-Channel GEN1
+ - 1-10 µL
+ * - P300 Single-Channel GEN2
+ - P300 Single-Channel GEN1
+ - 30-300 µL
+ * - P300 Multi-Channel GEN2
+ - P300 Multi-Channel GEN1
+ - 20-200 µL
+ * - P1000 Single-Channel GEN2
+ - P1000 Single-Channel GEN1
+ - 100-1000 µL
+
+The single- and multi-channel P50 GEN1 pipettes are the exceptions here. If your protocol uses a P50 GEN1 pipette, there is no backward compatibility with a related GEN2 pipette. To replace a P50 GEN1 with a corresponding GEN2 pipette, edit your protocol to load a P20 Single-Channel GEN2 (for volumes below 20 µL) or a P300 Single-Channel GEN2 (for volumes between 20 and 50 µL).
+
diff --git a/api/docs/v2/pipettes/loading.rst b/api/docs/v2/pipettes/loading.rst
new file mode 100644
index 00000000000..72a13ce3409
--- /dev/null
+++ b/api/docs/v2/pipettes/loading.rst
@@ -0,0 +1,217 @@
+:og:description: How to load Opentrons pipettes and add tip racks to them in a Python protocol.
+
+.. _new-create-pipette:
+.. _loading-pipettes:
+
+****************
+Loading Pipettes
+****************
+
+When writing a protocol, you must inform the Protocol API about the pipettes you will be using on your robot. The :py:meth:`.ProtocolContext.load_instrument` function provides this information and returns an :py:class:`.InstrumentContext` object.
+
+As noted above, you call the :py:meth:`~.ProtocolContext.load_instrument` method to load a pipette. This method also requires the :ref:`pipette's API load name `, its left or right mount position, and (optionally) a list of associated tip racks. Even if you don't use the pipette anywhere else in your protocol, the Opentrons App and the robot won't let you start the protocol run until all pipettes loaded by ``load_instrument()`` are attached properly.
+
+.. _new-pipette-models:
+
+API Load Names
+==============
+
+The pipette's API load name (``instrument_name``) is the first parameter of the ``load_instrument()`` method. It tells your robot which attached pipette you're going to use in a protocol. The tables below list the API load names for the currently available Flex and OT-2 pipettes.
+
+.. tabs::
+
+ .. tab:: Flex Pipettes
+
+ +-------------------------+---------------+-------------------------+
+ | Pipette Model | Volume (µL) | API Load Name |
+ +=========================+===============+===+=====================+
+ | Flex 1-Channel Pipette | 1–50 | ``flex_1channel_50`` |
+ + +---------------+-------------------------+
+ | | 5–1000 | ``flex_1channel_1000`` |
+ +-------------------------+---------------+-------------------------+
+ | Flex 8-Channel Pipette | 1–50 | ``flex_8channel_50`` |
+ + +---------------+-------------------------+
+ | | 5–1000 | ``flex_8channel_1000`` |
+ +-------------------------+---------------+-------------------------+
+ | Flex 96-Channel Pipette | 5–1000 | ``flex_96channel_1000`` |
+ +-------------------------+---------------+-------------------------+
+
+ .. tab:: OT-2 Pipettes
+
+ +-----------------------------+--------------------+-----------------------+
+ | Pipette Model | Volume (µL) | API Load Name |
+ +=============================+====================+=======================+
+ | P20 Single-Channel GEN2 | 1-20 | ``p20_single_gen2`` |
+ +-----------------------------+ +-----------------------+
+ | P20 Multi-Channel GEN2 | | ``p20_multi_gen2`` |
+ +-----------------------------+--------------------+-----------------------+
+ | P300 Single-Channel GEN2 | 20-300 | ``p300_single_gen2`` |
+ +-----------------------------+ +-----------------------+
+ | P300 Multi-Channel GEN2 | | ``p300_multi_gen2`` |
+ +-----------------------------+--------------------+-----------------------+
+ | P1000 Single-Channel GEN2 | 100-1000 | ``p1000_single_gen2`` |
+ +-----------------------------+--------------------+-----------------------+
+
+ See the :ref:`OT-2 Pipette Generations ` section if you're using GEN1 pipettes on an OT-2. The GEN1 family includes the P10, P50, and P300 single- and multi-channel pipettes, along with the P1000 single-channel model.
+
+Loading Flex 1- and 8-Channel Pipettes
+======================================
+
+This code sample loads a Flex 1-Channel Pipette in the left mount and a Flex 8-Channel Pipette in the right mount. Both pipettes are 1000 µL. Each pipette uses its own 1000 µL tip rack.
+
+.. code-block:: Python
+ :substitutions:
+
+ from opentrons import protocol_api
+
+ requirements = {"robotType": "Flex", "apiLevel":"|apiLevel|"}
+
+ def run(protocol: protocol_api.ProtocolContext):
+ tiprack1 = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_1000ul", location="D1")
+ tiprack2 = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_1000ul", location="C1")
+ left = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left",
+ tip_racks=[tiprack1])
+ right = protocol.load_instrument(
+ instrument_name="flex_8channel_1000",
+ mount="right",
+ tip_racks=[tiprack2])
+
+If you're writing a protocol that uses the Flex Gripper, you might think that this would be the place in your protocol to declare that. However, the gripper doesn't require ``load_instrument``! Whether your gripper requires a protocol is determined by the presence of :py:meth:`.ProtocolContext.move_labware` commands. See :ref:`moving-labware` for more details.
+
+Loading a Flex 96-Channel Pipette
+=================================
+
+This code sample loads the Flex 96-Channel Pipette. Because of its size, the Flex 96-Channel Pipette requires the left *and* right pipette mounts. You cannot use this pipette with 1- or 8-Channel Pipette in the same protocol or when these instruments are attached to the robot. Load the 96-channel pipette as follows:
+
+.. code-block:: python
+
+ def run(protocol: protocol_api.ProtocolContext):
+ pipette = protocol.load_instrument(
+ instrument_name="flex_96channel_1000"
+ )
+
+In protocols specifying API version 2.15, also include ``mount="left"`` as a parameter of ``load_instrument()``.
+
+.. versionadded:: 2.15
+.. versionchanged:: 2.16
+ The ``mount`` parameter is optional.
+
+Loading OT-2 Pipettes
+=====================
+
+This code sample loads a P1000 Single-Channel GEN2 pipette in the left mount and a P300 Single-Channel GEN2 pipette in the right mount. Each pipette uses its own 1000 µL tip rack.
+
+.. code-block:: python
+ :substitutions:
+
+ from opentrons import protocol_api
+
+ metadata = {"apiLevel": "|apiLevel|"}
+
+ def run(protocol: protocol_api.ProtocolContext):
+ tiprack1 = protocol.load_labware(
+ load_name="opentrons_96_tiprack_1000ul", location=1)
+ tiprack2 = protocol.load_labware(
+ load_name="opentrons_96_tiprack_1000ul", location=2)
+ left = protocol.load_instrument(
+ instrument_name="p1000_single_gen2",
+ mount="left",
+ tip_racks=[tiprack1])
+ right = protocol.load_instrument(
+ instrument_name="p300_multi_gen2",
+ mount="right",
+ tip_racks=[tiprack1])
+
+.. versionadded:: 2.0
+
+.. _pipette-tip-racks:
+
+Adding Tip Racks
+================
+
+The ``load_instrument()`` method includes the optional argument ``tip_racks``. This parameter accepts a list of tip rack labware objects, which lets you to specify as many tip racks as you want. You can also edit a pipette's tip racks after loading it by setting its :py:obj:`.InstrumentContext.tip_racks` property.
+
+.. note::
+ Some methods, like :py:meth:`.configure_nozzle_layout`, reset a pipette's tip racks. See :ref:`partial-tip-pickup` for more information.
+
+The advantage of using ``tip_racks`` is twofold. First, associating tip racks with your pipette allows for automatic tip tracking throughout your protocol. Second, it removes the need to specify tip locations in the :py:meth:`.InstrumentContext.pick_up_tip` method. For example, let's start by loading loading some labware and instruments like this::
+
+ def run(protocol: protocol_api.ProtocolContext):
+ tiprack_left = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_200ul", location="D1")
+ tiprack_right = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_200ul", location="D2")
+ left_pipette = protocol.load_instrument(
+ instrument_name="flex_8channel_1000", mount="left")
+ right_pipette = protocol.load_instrument(
+ instrument_name="flex_8channel_1000",
+ mount="right",
+ tip_racks=[tiprack_right])
+
+Let's pick up a tip with the left pipette. We need to specify the location as an argument of ``pick_up_tip()``, since we loaded the left pipette without a ``tip_racks`` argument.
+
+.. code-block:: python
+
+ left_pipette.pick_up_tip(tiprack_left["A1"])
+ left_pipette.drop_tip()
+
+But now you have to specify ``tiprack_left`` every time you call ``pick_up_tip``, which means you're doing all your own tip tracking::
+
+ left_pipette.pick_up_tip(tiprack_left["A2"])
+ left_pipette.drop_tip()
+ left_pipette.pick_up_tip(tiprack_left["A3"])
+ left_pipette.drop_tip()
+
+However, because you specified a tip rack location for the right pipette, the robot will automatically pick up from location ``A1`` of its associated tiprack::
+
+ right_pipette.pick_up_tip()
+ right_pipette.drop_tip()
+
+Additional calls to ``pick_up_tip`` will automatically progress through the tips in the right rack::
+
+ right_pipette.pick_up_tip() # picks up from A2
+ right_pipette.drop_tip()
+ right_pipette.pick_up_tip() # picks up from A3
+ right_pipette.drop_tip()
+
+.. versionadded:: 2.0
+
+See also :ref:`v2-atomic-commands` and :ref:`v2-complex-commands`.
+
+.. _pipette-trash-containers:
+
+Adding Trash Containers
+=======================
+
+The API automatically assigns a :py:obj:`.trash_container` to pipettes, if one is available in your protocol. The ``trash_container`` is where the pipette will dispose tips when you call :py:meth:`.drop_tip` with no arguments. You can change the trash container, if you don't want to use the default.
+
+One example of when you might want to change the trash container is a Flex protocol that goes through a lot of tips. In a case where the protocol uses two pipettes, you could load two trash bins and assign one to each pipette::
+
+ left_pipette = protocol.load_instrument(
+ instrument_name="flex_8channel_1000", mount="left"
+ )
+ right_pipette = protocol.load_instrument(
+ instrument_name="flex_8channel_50", mount="right"
+ )
+ left_trash = load_trash_bin("A3")
+ right_trash = load_trash_bin("B3")
+ left_pipette.trash_container = left_trash
+ right_pipette.trash_container = right_trash
+
+Another example is a Flex protocol that uses a waste chute. Say you want to only dispose labware in the chute, and you want the pipette to drop tips in a trash bin. You can implicitly get the trash bin to be the pipette's ``trash_container`` based on load order, or you can ensure it by setting it after all the load commands::
+
+ pipette = protocol.load_instrument(
+ instrument_name="flex_1channel_1000",
+ mount="left"
+ )
+ chute = protocol.load_waste_chute() # default because loaded first
+ trash = protocol.load_trash_bin("A3")
+ pipette.trash_container = trash # overrides default
+
+.. versionadded:: 2.0
+.. versionchanged:: 2.16
+ Added support for ``TrashBin`` and ``WasteChute`` objects.
diff --git a/api/docs/v2/pipettes/partial_tip_pickup.rst b/api/docs/v2/pipettes/partial_tip_pickup.rst
new file mode 100644
index 00000000000..a1e78fed570
--- /dev/null
+++ b/api/docs/v2/pipettes/partial_tip_pickup.rst
@@ -0,0 +1,189 @@
+:og:description: How to change the number of tips an Opentrons pipette uses.
+
+.. _partial-tip-pickup:
+
+******************
+Partial Tip Pickup
+******************
+
+The 96-channel pipette occupies both pipette mounts on Flex, so it's not possible to attach another pipette at the same time. Partial tip pickup lets you perform some of the same actions that you would be able to perform with a second pipette. As of version 2.16 of the API, you can configure the 96-channel pipette to pick up a single column of tips, similar to the behavior of an 8-channel pipette.
+
+Nozzle Layout
+=============
+
+Use the :py:meth:`.configure_nozzle_layout` method to choose how many tips the 96-channel pipette will pick up. The method's ``style`` parameter accepts special layout constants. You must import these constants at the top of your protocol, or you won't be able to configure the pipette for partial tip pickup.
+
+At minimum, import the API from the ``opentrons`` package::
+
+ from opentrons import protocol_api
+
+Then when you call ``configure_nozzle_layout`` later in your protocol, you can set ``style=protocol_api.COLUMN``.
+
+For greater convenience, also import the individual layout constants that you plan to use in your protocol::
+
+ from opentrons.protocol_api import COLUMN, ALL
+
+Then when you call ``configure_nozzle_layout`` later in your protocol, you can set ``style=COLUMN``.
+
+Here is the start of a protocol that performs both imports, loads a 96-channel pipette, and sets it to pick up a single column of tips.
+
+.. code-block:: python
+ :substitutions:
+
+ from opentrons import protocol_api
+ from opentrons.protocol_api import COLUMN, ALL
+
+ requirements = {"robotType": "Flex", "apiLevel": "|apiLevel|"}
+
+ def run(protocol: protocol_api.ProtocolContext):
+ column_rack = protocol.load_labware(
+ load_name="opentrons_flex_96_tiprack_1000ul",
+ location="D3"
+ )
+ trash = protocol.load_trash_bin("A3")
+ pipette = protocol.load_instrument("flex_96channel_1000")
+ pipette.configure_nozzle_layout(
+ style=COLUMN,
+ start="A12",
+ tip_racks=[column_rack]
+ )
+
+.. versionadded:: 2.16
+
+Let's unpack some of the details of this code.
+
+First, we've given a special name to the tip rack, ``column_rack``. You can name your tip racks whatever you like, but if you're performing full pickup and partial pickup in the same protocol, you'll need to keep them separate. See :ref:`partial-tip-rack-adapters` below.
+
+Next, we load the 96-channel pipette. Note that :py:meth:`.load_instrument` only has a single argument. The 96-channel pipette occupies both mounts, so ``mount`` is omissible. The ``tip_racks`` argument is always optional. But it would have no effect to declare it here, because every call to ``configure_nozzle_layout()`` resets the pipette's :py:obj:`.InstrumentContext.tip_racks` property.
+
+Finally, we configure the nozzle layout, with three arguments.
+
+ - The ``style`` parameter directly accepts the ``COLUMN`` constant, since we imported it at the top of the protocol.
+ - The ``start`` parameter accepts a nozzle name, representing the back-left nozzle in the layout, as a string. ``"A12"`` tells the pipette to use its rightmost column of nozzles for pipetting.
+ - The ``tip_racks`` parameter tells the pipette which racks to use for tip tracking, just like :ref:`adding tip racks ` when loading a pipette.
+
+In this configuration, pipetting actions will use a single column::
+
+ # configured in COLUMN mode
+ pipette.pick_up_tip() # picks up A1-H1 from tip rack
+ pipette.drop_tip()
+ pipette.pick_up_tip() # picks up A2-H2 from tip rack
+
+.. warning::
+
+ :py:meth:`.InstrumentContext.pick_up_tip` always accepts a ``location`` argument, regardless of nozzle configuration. Do not pass a value that would lead the pipette to line up over more unused tips than specified by the current layout. For example, setting ``COLUMN`` layout and then calling ``pipette.pick_up_tip(tip_rack["A2"])`` on a full tip rack will lead to unexpected pipetting behavior and potential crashes.
+
+.. _partial-tip-rack-adapters:
+
+Tip Rack Adapters
+=================
+
+You can use both partial and full tip pickup in the same protocol. This requires having some tip racks directly on the deck, and some tip racks in the tip rack adapter.
+
+Do not use a tip rack adapter when performing partial tip pickup. Instead, place the tip rack on the deck. During partial tip pickup, the 96-channel pipette lowers onto the tip rack in a horizontally offset position. If the tip rack were in its adapter, the pipette would collide with the adapter's posts, which protrude above the top of the tip rack. If you configure the pipette for partial pickup and then call ``pick_up_tip()`` on a tip rack that's loaded onto an adapter, the API will raise an error.
+
+On the other hand, you must use the tip rack adapter for full tip pickup. If the 96-channel pipette is in a full layout, either by default or by configuring ``style=ALL``, and you then call ``pick_up_tip()`` on a tip rack that's not in an adapter, the API will raise an error.
+
+When switching between full and partial pickup, you may want to organize your tip racks into lists, depending on whether they're loaded on adapters or not.
+
+.. code-block:: python
+
+ tips_1 = protocol.load_labware(
+ "opentrons_flex_96_tiprack_1000ul", "C1"
+ )
+ tips_2 = protocol.load_labware(
+ "opentrons_flex_96_tiprack_1000ul", "D1"
+ )
+ tips_3 = protocol.load_labware(
+ "opentrons_flex_96_tiprack_1000ul", "C3",
+ adapter="opentrons_flex_96_tiprack_adapter"
+ )
+ tips_4 = protocol.load_labware(
+ "opentrons_flex_96_tiprack_1000ul", "D3",
+ adapter="opentrons_flex_96_tiprack_adapter"
+ )
+
+ partial_tip_racks = [tips_1, tips_2]
+ full_tip_racks = [tips_3, tips_4]
+
+Now, when you configure the nozzle layout, you can reference the appropriate list as the value of ``tip_racks``::
+
+ pipette.configure_nozzle_layout(
+ style=COLUMN,
+ start="A12",
+ tip_racks=partial_tip_racks
+ )
+ # partial pipetting commands go here
+
+ pipette.configure_nozzle_layout(
+ style=ALL,
+ tip_racks=full_tip_racks
+ )
+ pipette.pick_up_tip() # picks up full rack in C1
+
+This keeps tip tracking consistent across each type of pickup. And it reduces the risk of errors due to the incorrect presence or absence of a tip rack adapter.
+
+
+Tip Pickup and Conflicts
+========================
+
+During partial tip pickup, 96-channel pipette moves into spaces above adjacent slots. To avoid crashes, the API prevents you from performing partial tip pickup when there is tall labware in these spaces. The current nozzle layout determines which labware can safely occupy adjacent slots.
+
+The API will raise errors for potential labware crashes when using a column nozzle configuration. Nevertheless, it's a good idea to do the following when working with partial tip pickup:
+
+ - Plan your deck layout carefully. Make a diagram and visualize everywhere the pipette will travel.
+ - Simulate your protocol and compare the run preview to your expectations of where the pipette will travel.
+ - Perform a dry run with only tip racks on the deck. Have the Emergency Stop Pendant handy in case you see an impending crash.
+
+For column pickup, Opentrons recommends using the nozzles in column 12 of the pipette.
+
+Using Column 12
+---------------
+
+The examples in this section use a 96-channel pipette configured to pick up tips with column 12::
+
+ pipette.configure_nozzle_layout(
+ style=COLUMN,
+ start="A12",
+ )
+
+When using column 12, the pipette overhangs space to the left of wherever it is picking up tips or pipetting. For this reason, it's a good idea to organize tip racks front to back on the deck. If you place them side by side, the rack to the right will be inaccessible. For example, let's load three tip racks in the front left corner of the deck::
+
+ tips_C1 = protocol.load_labware("opentrons_flex_96_tiprack_1000ul", "C1")
+ tips_D1 = protocol.load_labware("opentrons_flex_96_tiprack_1000ul", "D1")
+ tips_D2 = protocol.load_labware("opentrons_flex_96_tiprack_1000ul", "D2")
+
+Now the pipette will be able to access the racks in column 1 only. ``pick_up_tip(tips_D2["A1"])`` will raise an error due to the tip rack immediately to its left, in slot D1. There a couple of ways to avoid this error:
+
+ - Load the tip rack in a different slot, with no tall labware to its left.
+ - Use all the tips in slot D1 first, and then use :py:meth:`.move_labware` to make space for the pipette before picking up tips from D2.
+
+You would get a similar error trying to aspirate from or dispense into a well plate in slot D3, since there is a tip rack to the left.
+
+.. tip::
+
+ When using column 12 for partial tip pickup and pipetting, generally organize your deck with the shortest labware on the left side of the deck, and the tallest labware on the right side.
+
+Using Column 1
+--------------
+
+If your application can't accommodate a deck layout that works well with column 12, you can configure the 96-channel pipette to pick up tips with column 1::
+
+ pipette.configure_nozzle_layout(
+ style=COLUMN,
+ start="A1",
+ )
+
+The major drawback of this configuration, compared to using column 12, is that tip tracking is not available with column 1. You must always specify a ``location`` parameter for :py:meth:`.pick_up_tip`. This *requires careful tip tracking* so you don't place the pipette over more than a single column of unused tips at once. You can write some additional code to manage valid tip pickup locations, like this::
+
+ tip_rack = protocol.load_labware("opentrons_flex_96_tiprack_1000ul", "C1")
+ pipette.configure_nozzle_layout(style=COLUMN, start="A1")
+ row_a = tip_rack.rows()[0]
+ pipette.pick_up_tip(row_a.pop()) # pick up A12-H12
+ pipette.drop_tip()
+ pipette.pick_up_tip(row_a.pop()) # pick up A11-H11
+ pipette.drop_tip()
+
+This code first constructs a list of all the wells in row A of the tip rack. Then, when picking up a tip, instead of referencing one of those wells directly, the ``location`` is set to ``row_a.pop()``. This uses the `built-in pop method `_ to get the last item from the list and remove it from the list. If you keep using this approach to pick up tips, you'll get an error once the tip rack is empty — not from the API, but from Python itself, since you're trying to ``pop`` an item from an empty list.
+
+Additionally, you can't access the rightmost columns in labware in column 3, since they are beyond the movement limit of the pipette. The exact number of inaccessible columns varies by labware type. Any well that is within 29 mm of the right edge of the slot may be inaccessible in a column 1 configuration. Call ``configure_nozzle_layout()`` again to switch to a column 12 layout if you need to pipette in that area.
diff --git a/api/docs/v2/pipettes/volume_modes.rst b/api/docs/v2/pipettes/volume_modes.rst
new file mode 100644
index 00000000000..af1bc71fa51
--- /dev/null
+++ b/api/docs/v2/pipettes/volume_modes.rst
@@ -0,0 +1,51 @@
+:og:description: How to work with very small volumes of liquid on Opentrons Flex.
+
+.. _pipette-volume-modes:
+
+Volume Modes
+============
+
+The Flex 1-Channel 50 µL and Flex 8-Channel 50 µL pipettes must operate in a low-volume mode to accurately dispense very small volumes of liquid. Set the volume mode by calling :py:meth:`.InstrumentContext.configure_for_volume` with the amount of liquid you plan to aspirate, in µL::
+
+ pipette50.configure_for_volume(1)
+ pipette50.pick_up_tip()
+ pipette50.aspirate(1, plate["A1"])
+
+.. versionadded:: 2.15
+
+Passing different values to ``configure_for_volume()`` changes the minimum and maximum volume of Flex 50 µL pipettes as follows:
+
+.. list-table::
+ :header-rows: 1
+ :widths: 2 3 3
+
+ * - Value
+ - Minimum Volume (µL)
+ - Maximum Volume (µL)
+ * - 1–4.9
+ - 1
+ - 30
+ * - 5–50
+ - 5
+ - 50
+
+.. note::
+ The pipette must not contain liquid when you call ``configure_for_volume()``, or the API will raise an error.
+
+ Also, if the pipette is in a well location that may contain liquid, it will move upward to ensure it is not immersed in liquid before changing its mode. Calling ``configure_for_volume()`` *before* ``pick_up_tip()`` helps to avoid this situation.
+
+In a protocol that handles many different volumes, it's a good practice to call ``configure_for_volume()`` once for each :py:meth:`.transfer` or :py:meth:`.aspirate`, specifying the volume that you are about to handle. When operating with a list of volumes, nest ``configure_for_volume()`` inside a ``for`` loop to ensure that the pipette is properly configured for each volume:
+
+.. code-block:: python
+
+ volumes = [1, 2, 3, 4, 1, 5, 2, 8]
+ sources = plate.columns()[0]
+ destinations = plate.columns()[1]
+ for i in range(8):
+ pipette50.configure_for_volume(volumes[i])
+ pipette50.pick_up_tip()
+ pipette50.aspirate(volume=volumes[i], location=sources[i])
+ pipette50.dispense(location=destinations[i])
+ pipette50.drop_tip()
+
+If you know that all your liquid handling will take place in a specific mode, then you can call ``configure_for_volume()`` just once with a representative volume. Or if all the volumes correspond to the pipette's default mode, you don't have to call ``configure_for_volume()`` at all.
diff --git a/api/docs/v2/robot_position.rst b/api/docs/v2/robot_position.rst
index 2fbba1dab8a..8b2ed762e71 100644
--- a/api/docs/v2/robot_position.rst
+++ b/api/docs/v2/robot_position.rst
@@ -28,14 +28,14 @@ Let's look at the :py:meth:`.Well.top` method. It returns a position level with
.. code-block:: python
- plate['A1'].top() # the top center of the well
+ plate["A1"].top() # the top center of the well
This is a good position to use for a :ref:`blow out operation ` or an activity where you don't want the tip to contact the liquid. In addition, you can adjust the height of this position with the optional argument ``z``, which is measured in mm. Positive ``z`` numbers move the position up, negative ``z`` numbers move it down.
.. code-block:: python
- plate['A1'].top(z=1) # 1 mm above the top center of the well
- plate['A1'].top(z=-1) # 1 mm below the top center of the well
+ plate["A1"].top(z=1) # 1 mm above the top center of the well
+ plate["A1"].top(z=-1) # 1 mm below the top center of the well
.. versionadded:: 2.0
@@ -46,14 +46,14 @@ Let's look at the :py:meth:`.Well.bottom` method. It returns a position level wi
.. code-block:: python
- plate['A1'].bottom() # the bottom center of the well
+ plate["A1"].bottom() # the bottom center of the well
This is a good position for :ref:`aspirating liquid ` or an activity where you want the tip to contact the liquid. Similar to the ``Well.top()`` method, you can adjust the height of this position with the optional argument ``z``, which is measured in mm. Positive ``z`` numbers move the position up, negative ``z`` numbers move it down.
.. code-block:: python
- plate['A1'].bottom(z=1) # 1 mm above the bottom center of the well
- plate['A1'].bottom(z=-1) # 1 mm below the bottom center of the well
+ plate["A1"].bottom(z=1) # 1 mm above the bottom center of the well
+ plate["A1"].bottom(z=-1) # 1 mm below the bottom center of the well
# this may be dangerous!
.. warning::
@@ -73,7 +73,7 @@ Let's look at the :py:meth:`.Well.center` method. It returns a position centered
.. code-block:: python
- plate['A1'].center() # the vertical and horizontal center of the well
+ plate["A1"].center() # the vertical and horizontal center of the well
.. versionadded:: 2.0
@@ -90,22 +90,22 @@ If you need to change the aspiration or dispensing height for multiple operation
Modifying these attributes will affect all subsequent aspirate and dispense actions performed by the attached pipette, even those executed as part of a :py:meth:`.transfer` operation. This snippet from a sample protocol demonstrates how to work with and change the default clearance::
# aspirate 1 mm above the bottom of the well (default)
- pipette.aspirate(50, plate['A1'])
+ pipette.aspirate(50, plate["A1"])
# dispense 1 mm above the bottom of the well (default)
- pipette.dispense(50, plate['A1'])
+ pipette.dispense(50, plate["A1"])
# change clearance for aspiration to 2 mm
pipette.well_bottom_clearance.aspirate = 2
# aspirate 2 mm above the bottom of the well
- pipette.aspirate(50, plate['A1'])
+ pipette.aspirate(50, plate["A1"])
# still dispensing 1 mm above the bottom
- pipette.dispense(50, plate['A1'])
+ pipette.dispense(50, plate["A1"])
- pipette.aspirate(50, plate['A1'])
+ pipette.aspirate(50, plate["A1"])
# change clearance for dispensing to 10 mm
pipette.well_bottom_clearance.dispense = 10
# dispense high above the well
- pipette.dispense(50, plate['A1'])
+ pipette.dispense(50, plate["A1"])
.. versionadded:: 2.0
@@ -116,6 +116,8 @@ All positions relative to labware are adjusted automatically based on labware of
You should only adjust labware offsets in your Python code if you plan to run your protocol in Jupyter Notebook or from the command line. See :ref:`using_lpc` in the Advanced Control article for information.
+.. _protocol-api-deck-coords:
+
Position Relative to the Deck
=============================
@@ -144,17 +146,17 @@ The :py:meth:`~.InstrumentContext.move_to` method requires the :py:class:`.Locat
.. code-block:: python
- pipette.move_to(plate['A1']) # error; can't move to a well itself
- pipette.move_to(plate['A1'].bottom()) # move to the bottom of well A1
- pipette.move_to(plate['A1'].top()) # move to the top of well A1
- pipette.move_to(plate['A1'].bottom(z=2)) # move to 2 mm above the bottom of well A1
- pipette.move_to(plate['A1'].top(z=-2)) # move to 2 mm below the top of well A1
+ pipette.move_to(plate["A1"]) # error; can't move to a well itself
+ pipette.move_to(plate["A1"].bottom()) # move to the bottom of well A1
+ pipette.move_to(plate["A1"].top()) # move to the top of well A1
+ pipette.move_to(plate["A1"].bottom(z=2)) # move to 2 mm above the bottom of well A1
+ pipette.move_to(plate["A1"].top(z=-2)) # move to 2 mm below the top of well A1
When using ``move_to()``, by default the pipette will move in an arc: first upwards, then laterally to a position above the target location, and finally downwards to the target location. If you have a reason for doing so, you can force the pipette to move in a straight line to the target location:
.. code-block:: python
- pipette.move_to(plate['A1'].top(), force_direct=True)
+ pipette.move_to(plate["A1"].top(), force_direct=True)
.. warning::
@@ -162,10 +164,10 @@ When using ``move_to()``, by default the pipette will move in an arc: first upwa
Small, direct movements can be useful for working inside of a well, without having the tip exit and re-enter the well. This code sample demonstrates how to move the pipette to a well, make direct movements inside that well, and then move on to a different well::
- pipette.move_to(plate['A1'].top())
- pipette.move_to(plate['A1'].bottom(1), force_direct=True)
- pipette.move_to(plate['A1'].top(-2), force_direct=True)
- pipette.move_to(plate['A2'].top())
+ pipette.move_to(plate["A1"].top())
+ pipette.move_to(plate["A1"].bottom(1), force_direct=True)
+ pipette.move_to(plate["A1"].top(-2), force_direct=True)
+ pipette.move_to(plate["A2"].top())
.. versionadded:: 2.0
@@ -185,7 +187,7 @@ When instructing the robot to move, it's important to consider the difference be
This distinction is important for the :py:meth:`.Location.move` method, which operates on a location, takes a point as an argument, and outputs an updated location. To use this method, include ``from opentrons import types`` at the start of your protocol. The ``move()`` method does not mutate the location it is called on, so to perform an action at the updated location, use it as an argument of another method or save it to a variable. For example::
# get the location at the center of well A1
- center_location = plate['A1'].center()
+ center_location = plate["A1"].center()
# get a location 1 mm right, 1 mm back, and 1 mm up from the center of well A1
adjusted_location = center_location.move(types.Point(x=1, y=1, z=1))
@@ -203,11 +205,11 @@ This distinction is important for the :py:meth:`.Location.move` method, which op
.. code-block:: python
# the following are equivalent
- pipette.move_to(plate['A1'].bottom(z=2))
- pipette.move_to(plate['A1'].bottom().move(types.Point(z=2)))
+ pipette.move_to(plate["A1"].bottom(z=2))
+ pipette.move_to(plate["A1"].bottom().move(types.Point(z=2)))
# adjust along the y-axis
- pipette.move_to(plate['A1'].bottom().move(types.Point(y=2)))
+ pipette.move_to(plate["A1"].bottom().move(types.Point(y=2)))
.. versionadded:: 2.0
@@ -217,6 +219,9 @@ Movement Speeds
In addition to instructing the robot where to move a pipette, you can also control the speed at which it moves. Speed controls can be applied either to all pipette motions or to movement along a particular axis.
+.. note::
+ Like all mechanical systems, Opentrons robots have resonant frequencies that depend on their construction and current configuration. It's possible to set a speed that causes your robot to resonate, producing louder sounds than typical operation. This is safe, but if you find it annoying, increase or decrease the speed slightly.
+
.. _gantry_speed:
Gantry Speed
@@ -225,9 +230,9 @@ Gantry Speed
The robot's gantry usually moves as fast as it can given its construction. The default speed for Flex varies between 300 and 350 mm/s. The OT-2 default is 400 mm/s. However, some experiments or liquids may require slower movements. In this case, you can reduce the gantry speed for a specific pipette by setting :py:obj:`.InstrumentContext.default_speed` like this::
- pipette.move_to(plate['A1'].top()) # move to the first well at default speed
+ pipette.move_to(plate["A1"].top()) # move to the first well at default speed
pipette.default_speed = 100 # reduce pipette speed
- pipette.move_to(plate['D6'].top()) # move to the last well at the slower speed
+ pipette.move_to(plate["D6"].top()) # move to the last well at the slower speed
.. warning::
@@ -247,10 +252,10 @@ In addition to controlling the overall gantry speed, you can set speed limits fo
.. code-block:: python
:substitutions:
- protocol.max_speeds['x'] = 50 # limit x-axis to 50 mm/s
- del protocol.max_speeds['x'] # reset x-axis limit
- protocol.max_speeds['a'] = 10 # limit a-axis to 10 mm/s
- protocol.max_speeds['a'] = None # reset a-axis limit
+ protocol.max_speeds["x"] = 50 # limit x-axis to 50 mm/s
+ del protocol.max_speeds["x"] # reset x-axis limit
+ protocol.max_speeds["a"] = 10 # limit a-axis to 10 mm/s
+ protocol.max_speeds["a"] = None # reset a-axis limit
Note that ``max_speeds`` can't set limits for the pipette plunger axes (``b`` and ``c``); instead, set the flow rates or plunger speeds as described in :ref:`new-plunger-flow-rates`.
diff --git a/api/docs/v2/tutorial.rst b/api/docs/v2/tutorial.rst
index 5f22ac49155..473ad6e40c0 100644
--- a/api/docs/v2/tutorial.rst
+++ b/api/docs/v2/tutorial.rst
@@ -2,30 +2,29 @@
.. _tutorial:
-########
+********
Tutorial
-########
+********
-************
Introduction
-************
+============
-This tutorial will guide you through creating a Python protocol file from scratch. At the end of this process you’ll have a complete protocol that can run on a Flex or an OT-2 robot. If you don’t have a Flex or an OT-2 (or if you’re away from your lab, or if your robot is in use), you can use the same file to simulate the protocol on your computer instead.
+This tutorial will guide you through creating a Python protocol file from scratch. At the end of this process you'll have a complete protocol that can run on a Flex or an OT-2 robot. If you don’t have a Flex or an OT-2 (or if you’re away from your lab, or if your robot is in use), you can use the same file to simulate the protocol on your computer instead.
-What You’ll Automate
-^^^^^^^^^^^^^^^^^^^^
+What You'll Automate
+--------------------
-The lab task that you’ll automate in this tutorial is `serial dilution`: taking a solution and progressively diluting it by transferring it stepwise across a plate from column 1 to column 12. With just a dozen or so lines of code, you can instruct your robot to perform the hundreds of individual pipetting actions necessary to fill an entire 96-well plate. And all of those liquid transfers will be done automatically, so you’ll have more time to do other work in your lab.
+The lab task that you'll automate in this tutorial is `serial dilution`: taking a solution and progressively diluting it by transferring it stepwise across a plate from column 1 to column 12. With just a dozen or so lines of code, you can instruct your robot to perform the hundreds of individual pipetting actions necessary to fill an entire 96-well plate. And all of those liquid transfers will be done automatically, so you’ll have more time to do other work in your lab.
Before You Begin
-^^^^^^^^^^^^^^^^
+----------------
You're going to write some Python code, but you don't need to be a Python expert to get started writing Opentrons protocols. You should know some basic Python syntax, like how it uses `indentation `_ to group blocks of code, dot notation for `calling methods `_, and the format of `lists `_ and `dictionaries `_. You’ll also be using `common control structures `_ like ``if`` statements and ``for`` loops.
To run your code, make sure that you've installed `Python 3 `_ and the `pip package installer `_. You should write your code in your favorite plaintext editor or development environment and save it in a file with a ``.py`` extension, like ``dilution-tutorial.py``.
Hardware and Labware
-^^^^^^^^^^^^^^^^^^^^
+--------------------
Before running a protocol, you’ll want to have the right kind of hardware and labware ready for your Flex or OT-2.
@@ -57,9 +56,8 @@ The Flex and OT-2 use similar labware for serial dilution. The tutorial code wil
For the liquids, you can use plain water as the diluent and water dyed with food coloring as the solution.
-**********************
Create a Protocol File
-**********************
+======================
Let’s start from scratch to create your serial dilution protocol. Open up a new file in your editor and start with the line:
@@ -76,28 +74,27 @@ For this tutorial, you’ll write very little Python outside of the ``run()`` fu
.. _tutorial-metadata:
Metadata
-^^^^^^^^
+--------
-Every protocol needs to have a metadata dictionary with information about the protocol. At minimum, you need to specify what :ref:`version of the API ` the protocol requires. The `scripts `_ for this tutorial were validated against API version 2.15, so specify:
+Every protocol needs to have a metadata dictionary with information about the protocol. At minimum, you need to specify what :ref:`version of the API ` the protocol requires. The `scripts `_ for this tutorial were validated against API version 2.16, so specify:
.. code-block:: python
- metadata = {'apiLevel': '2.15'}
+ metadata = {"apiLevel": "2.16"}
You can include any other information you like in the metadata dictionary. The fields ``protocolName``, ``description``, and ``author`` are all displayed in the Opentrons App, so it’s a good idea to expand the dictionary to include them:
.. code-block:: python
- :substitutions:
metadata = {
- 'apiLevel': '2.15',
- 'protocolName': 'Serial Dilution Tutorial',
- 'description': '''This protocol is the outcome of following the
+ "apiLevel": "2.16",
+ "protocolName": "Serial Dilution Tutorial",
+ "description": """This protocol is the outcome of following the
Python Protocol API Tutorial located at
https://docs.opentrons.com/v2/tutorial.html. It takes a
solution and progressively dilutes it by transferring it
- stepwise across a plate.''',
- 'author': 'New API User'
+ stepwise across a plate.""",
+ "author": "New API User"
}
Note, if you have a Flex, or are using an OT-2 with API v2.15 (or higher), we recommend adding a ``requirements`` section to your code. See the Requirements section below.
@@ -105,7 +102,7 @@ Note, if you have a Flex, or are using an OT-2 with API v2.15 (or higher), we re
.. _tutorial-requirements:
Requirements
-^^^^^^^^^^^^
+------------
The ``requirements`` code block can appear before *or* after the ``metadata`` code block in a Python protocol. It uses the following syntax and accepts two arguments: ``robotType`` and ``apiLevel``.
@@ -114,16 +111,18 @@ Whether you need a ``requirements`` block depends on your robot model and API ve
- **Flex:** The ``requirements`` block is always required. And, the API version does not go in the ``metadata`` section. The API version belongs in the ``requirements``. For example::
- requirements = {"robotType": "Flex", "apiLevel": "2.15"}
+ requirements = {"robotType": "Flex", "apiLevel": "2.16"}
- **OT-2:** The ``requirements`` block is optional, but including it is a recommended best practice, particularly if you’re using API version 2.15 or greater. If you do use it, remember to remove the API version from the ``metadata``. For example::
- requirements = {"robotType": "OT-2", "apiLevel": "2.15"}
+ requirements = {"robotType": "OT-2", "apiLevel": "2.16"}
With the metadata and requirements defined, you can move on to creating the ``run()`` function for your protocol.
+.. _run-function:
+
The ``run()`` function
-^^^^^^^^^^^^^^^^^^^^^^
+----------------------
Now it’s time to actually instruct the Flex or OT-2 how to perform serial dilution. All of this information is contained in a single Python function, which has to be named ``run``. This function takes one argument, which is the *protocol context*. Many examples in these docs use the argument name ``protocol``, and sometimes they specify the argument’s type:
@@ -134,7 +133,7 @@ Now it’s time to actually instruct the Flex or OT-2 how to perform serial dilu
With the protocol context argument named and typed, you can start calling methods on ``protocol`` to add labware and hardware.
Labware
--------
+^^^^^^^
For serial dilution, you need to load a tip rack, reservoir, and 96-well plate on the deck of your Flex or OT-2. Loading labware is done with the :py:meth:`~.ProtocolContext.load_labware` method of the protocol context, which takes two arguments: the standard labware name as defined in the `Opentrons Labware Library `_, and the position where you'll place the labware on the robot's deck.
@@ -145,12 +144,11 @@ For serial dilution, you need to load a tip rack, reservoir, and 96-well plate o
Here’s how to load the labware on a Flex in slots D1, D2, and D3 (repeating the ``def`` statement from above to show proper indenting):
.. code-block:: python
- :substitutions:
def run(protocol: protocol_api.ProtocolContext):
- tips = protocol.load_labware('opentrons_flex_96_tiprack_200ul', 'D1')
- reservoir = protocol.load_labware('nest_12_reservoir_15ml', 'D2')
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 'D3')
+ tips = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "D1")
+ reservoir = protocol.load_labware("nest_12_reservoir_15ml", "D2")
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "D3")
If you’re using a different model of labware, find its name in the Labware Library and replace it in your code.
@@ -167,12 +165,11 @@ For serial dilution, you need to load a tip rack, reservoir, and 96-well plate o
Here’s how to load the labware on an OT-2 in slots 1, 2, and 3 (repeating the ``def`` statement from above to show proper indenting):
.. code-block:: python
- :substitutions:
def run(protocol: protocol_api.ProtocolContext):
- tips = protocol.load_labware('opentrons_96_tiprack_300ul', 1)
- reservoir = protocol.load_labware('nest_12_reservoir_15ml', 2)
- plate = protocol.load_labware('nest_96_wellplate_200ul_flat', 3)
+ tips = protocol.load_labware("opentrons_96_tiprack_300ul", 1)
+ reservoir = protocol.load_labware("nest_12_reservoir_15ml", 2)
+ plate = protocol.load_labware("nest_96_wellplate_200ul_flat", 3)
If you’re using a different model of labware, find its name in the Labware Library and replace it in your code.
@@ -186,20 +183,32 @@ For serial dilution, you need to load a tip rack, reservoir, and 96-well plate o
You may notice that these deck maps don't show where the liquids will be at the start of the protocol. Liquid definitions aren’t required in Python protocols, unlike protocols made in `Protocol Designer `_. If you want to identify liquids, see `Labeling Liquids in Wells `_. (Sneak peek: you’ll put the diluent in column 1 of the reservoir and the solution in column 2 of the reservoir.)
+Trash Bin
+^^^^^^^^^
+
+Flex and OT-2 both come with a trash bin for disposing used tips.
+
+The OT-2 trash bin is fixed in slot 12. Since it can't go anywhere else on the deck, you don't need to write any code to tell the API where it is. Skip ahead to the Pipettes section below.
+
+Flex lets you put a :ref:`trash bin ` in multiple locations on the deck. You can even have more than one trash bin, or none at all (if you use the :ref:`waste chute ` instead, or if your protocol never trashes any tips). For serial dilution, you'll need to dispose used tips, so you also need to tell the API where the trash container is located on your robot. Loading a trash bin on Flex is done with the :py:meth:`.load_trash_bin` method, which takes one argument: its location. Here's how to load the trash in slot A3::
+
+ trash = protocol.load_trash_bin("A3")
+
+
Pipettes
---------
+^^^^^^^^
Next you’ll specify what pipette to use in the protocol. Loading a pipette is done with the :py:meth:`.load_instrument` method, which takes three arguments: the name of the pipette, the mount it’s installed in, and the tip racks it should use when performing transfers. Load whatever pipette you have installed in your robot by using its :ref:`standard pipette name `. Here’s how to load the pipette in the left mount and instantiate it as a variable named ``left_pipette``:
.. code-block:: python
# Flex
- left_pipette = protocol.load_instrument('flex_1channel_1000', 'left', tip_racks=[tips])
+ left_pipette = protocol.load_instrument("flex_1channel_1000", "left", tip_racks=[tips])
.. code-block:: python
# OT-2
- left_pipette = protocol.load_instrument('p300_single_gen2', 'left', tip_racks=[tips])
+ left_pipette = protocol.load_instrument("p300_single_gen2", "left", tip_racks=[tips])
Since the pipette is so fundamental to the protocol, it might seem like you should have specified it first. But there’s a good reason why pipettes are loaded after labware: you need to have already loaded ``tips`` in order to tell the pipette to use it. And now you won’t have to reference ``tips`` again in your code — it’s assigned to the ``left_pipette`` and the robot will know to use it when commanded to pick up tips.
@@ -210,7 +219,7 @@ Since the pipette is so fundamental to the protocol, it might seem like you shou
.. _tutorial-commands:
Commands
---------
+^^^^^^^^
Finally, all of your labware and hardware is in place, so it’s time to give the robot pipetting commands. The required steps of the serial dilution process break down into three main phases:
@@ -224,7 +233,7 @@ Let’s start with the diluent. This phase takes a larger quantity of liquid and
.. code-block:: python
- left_pipette.transfer(100, reservoir['A1'], plate.wells())
+ left_pipette.transfer(100, reservoir["A1"], plate.wells())
Breaking down these single lines of code shows the power of :ref:`complex commands `. The first argument is the amount to transfer to each destination, 100 µL. The second argument is the source, column 1 of the reservoir (which is still specified with grid-style coordinates as ``A1`` — a reservoir only has an A row). The third argument is the destination. Here, calling the :py:meth:`.wells` method of ``plate`` returns a list of *every well*, and the command will apply to all of them.
@@ -253,7 +262,7 @@ In each row, you first need to add solution. This will be similar to what you di
.. code-block:: python
- left_pipette.transfer(100, reservoir['A2'], row[0], mix_after(3, 50))
+ left_pipette.transfer(100, reservoir["A2"], row[0], mix_after(3, 50))
As before, the first argument specifies to transfer 100 µL. The second argument is the source, column 2 of the reservoir. The third argument is the destination, the element at index 0 of the current ``row``. Since Python lists are zero-indexed, but columns on labware start numbering at 1, this will be well A1 on the first time through the loop, B1 the second time, and so on. The fourth argument specifies to mix 3 times with 50 µL of fluid each time.
@@ -285,7 +294,7 @@ All that remains is for the loop to repeat these steps, filling each row down th
That’s it! If you’re using a single-channel pipette, you’re ready to try out your protocol.
8-Channel Pipette
------------------
+^^^^^^^^^^^^^^^^^
If you’re using an 8-channel pipette, you’ll need to make a couple tweaks to the single-channel code from above. Most importantly, whenever you target a well in row A of a plate with an 8-channel pipette, it will move its topmost tip to row A, lining itself up over the entire column.
@@ -293,21 +302,20 @@ Thus, when adding the diluent, instead of targeting every well on the plate, you
.. code-block:: python
- left_pipette.transfer(100, reservoir['A1'], plate.rows()[0])
+ left_pipette.transfer(100, reservoir["A1"], plate.rows()[0])
And by accessing an entire column at once, the 8-channel pipette effectively implements the ``for`` loop in hardware, so you’ll need to remove it:
.. code-block:: python
row = plate.rows()[0]
- left_pipette.transfer(100, reservoir['A2'], row[0], mix_after=(3, 50))
+ left_pipette.transfer(100, reservoir["A2"], row[0], mix_after=(3, 50))
left_pipette.transfer(100, row[:11], row[1:], mix_after=(3, 50))
Instead of tracking the current row in the ``row`` variable, this code sets it to always be row A (index 0).
-*****************
Try Your Protocol
-*****************
+=================
There are two ways to try out your protocol: simulation on your computer, or a live run on a Flex or OT-2. Even if you plan to run your protocol on a robot, it’s a good idea to check the simulation output first.
@@ -321,10 +329,9 @@ If you get any errors in simulation, or you don't get the outcome you expected w
.. _tutorial-simulate:
In Simulation
-^^^^^^^^^^^^^
-.. suggest linking to pip install rather than just using text in ``code`` format. Help reader find resource
+-------------
-Simulation doesn’t require having a robot connected to your computer. You just need to install the `Opentrons Python module `_ from Pip (``pip install opentrons``). This will give you access to the ``opentrons_simulate`` command-line utility (``opentrons_simulate.exe`` on Windows).
+Simulation doesn’t require having a robot connected to your computer. You just need to install the `Opentrons Python module `_ using pip (``pip install opentrons``). This will give you access to the ``opentrons_simulate`` command-line utility (``opentrons_simulate.exe`` on Windows).
To see a text preview of the steps your Flex or OT-2 will take, use the change directory (``cd``) command to navigate to the location of your saved protocol file and run:
@@ -343,7 +350,7 @@ The ``-e`` flag estimates duration, and ``-o nothing`` suppresses printing the r
If that’s too long, you can always cancel your run partway through or modify ``for i in range(8)`` to loop through fewer rows.
On a Robot
-^^^^^^^^^^
+----------
The simplest way to run your protocol on a Flex or OT-2 is to use the `Opentrons App `_. When you first launch the Opentrons App, you will see the Protocols screen. (Click **Protocols** in the left sidebar to access it at any other time.) Click **Import** in the top right corner to reveal the Import a Protocol pane. Then click **Choose File** and find your protocol in the system file picker, or drag and drop your protocol file into the well.
@@ -360,8 +367,7 @@ When it’s all done, check the results of your serial dilution procedure — yo
:align: center
:alt: An overhead view of a well plate on the metal OT-2 deck, with dark blue liquid in the leftmost column smoothly transitioning to very light blue in the rightmost column.
-**********
Next Steps
-**********
+==========
This tutorial has relied heavily on the ``transfer()`` method, but there's much more that the Python Protocol API can do. Many advanced applications use :ref:`building block commands ` for finer control over the robot. These commands let you aspirate and dispense separately, add air gaps, blow out excess liquid, move the pipette to any location, and more. For protocols that use :ref:`Opentrons hardware modules `, there are methods to control their behavior. And all of the API's classes and methods are catalogued in the :ref:`API Reference `.
diff --git a/api/docs/v2/versioning.rst b/api/docs/v2/versioning.rst
index f635a84812f..5819bee4b47 100644
--- a/api/docs/v2/versioning.rst
+++ b/api/docs/v2/versioning.rst
@@ -30,11 +30,11 @@ You must specify the API version you are targeting in your Python protocol. In a
from opentrons import protocol_api
metadata = {
- 'apiLevel': '|apiLevel|',
- 'author': 'A. Biologist'}
+ "apiLevel": "|apiLevel|",
+ "author": "A. Biologist"}
def run(protocol: protocol_api.ProtocolContext):
- protocol.comment('Hello, world!')
+ protocol.comment("Hello, world!")
From version 2.15 onward, you can specify ``apiLevel`` in the ``requirements`` dictionary instead:
@@ -43,11 +43,11 @@ From version 2.15 onward, you can specify ``apiLevel`` in the ``requirements`` d
from opentrons import protocol_api
- metadata = {'author': 'A. Biologist'}
- requirements = {'apiLevel': '2.15', 'robotType': 'Flex'}
+ metadata = {"author": "A. Biologist"}
+ requirements = {"apiLevel": "|apiLevel|", "robotType": "Flex"}
def run(protocol: protocol_api.ProtocolContext):
- protocol.comment('Hello, Flex!')
+ protocol.comment("Hello, Flex!")
Choose only one of these places to specify ``apiLevel``. If you put it in neither or both places, you will not be able to simulate or run your protocol.
@@ -59,6 +59,8 @@ When choosing an API level, consider what features you need and how widely you p
On the one hand, using the highest available version will give your protocol access to all the latest :ref:`features and fixes `. On the other hand, using the lowest possible version lets the protocol work on a wider range of robot software versions. For example, a protocol that uses the Heater-Shaker and specifies version 2.13 of the API should work equally well on a robot running version 6.1.0 or 6.2.0 of the robot software. Specifying version 2.14 would limit the protocol to robots running 6.2.0 or higher.
+.. _max-version:
+
Maximum Supported Versions
==========================
@@ -66,9 +68,9 @@ The maximum supported API version for your robot is listed in the Opentrons App
If you upload a protocol that specifies a higher API level than the maximum supported, your robot won't be able to analyze or run your protocol. You can increase the maximum supported version by updating your robot software and Opentrons App.
-Opentrons robots running the latest software (7.0.0) support the following version ranges:
+Opentrons robots running the latest software (7.2.0) support the following version ranges:
- * **Flex:** version 2.15.
+ * **Flex:** version 2.15–|apiLevel|.
* **OT-2:** versions 2.0–|apiLevel|.
@@ -82,6 +84,10 @@ This table lists the correspondence between Protocol API versions and robot soft
+-------------+------------------------------+
| API Version | Introduced in Robot Software |
+=============+==============================+
+| 2.17 | 7.2.0 |
++-------------+------------------------------+
+| 2.16 | 7.1.0 |
++-------------+------------------------------+
| 2.15 | 7.0.0 |
+-------------+------------------------------+
| 2.14 | 6.3.0 |
@@ -122,6 +128,33 @@ This table lists the correspondence between Protocol API versions and robot soft
Changes in API Versions
=======================
+Version 2.17
+------------
+
+- :py:meth:`.dispense` now raises an error if you try to dispense more than :py:obj:`.InstrumentContext.current_volume`.
+
+Version 2.16
+------------
+
+This version introduces new features for Flex and adds and improves methods for aspirating and dispensing. Note that when updating Flex protocols to version 2.16, you *must* load a trash container before dropping tips.
+
+- New features
+
+ - Use :py:meth:`.configure_nozzle_layout` to pick up a single column of tips with the 96-channel pipette. See :ref:`Partial Tip Pickup `.
+ - Specify the trash containers attached to your Flex with :py:meth:`.load_waste_chute` and :py:meth:`.load_trash_bin`.
+ - Dispense, blow out, drop tips, and dispose labware in the waste chute. Disposing labware requires the gripper and calling :py:meth:`.move_labware` with ``use_gripper=True``.
+ - Perform actions in staging area slots by referencing slots A4 through D4. See :ref:`deck-slots`.
+ - Explicitly command a pipette to :py:meth:`.prepare_to_aspirate`. The API usually prepares pipettes to aspirate automatically, but this is useful for certain applications, like pre-wetting routines.
+
+- Improved features
+
+ - :py:meth:`.aspirate`, :py:meth:`.dispense`, and :py:meth:`.mix` will not move any liquid when called with ``volume=0``.
+
+- Other changes
+
+ - :py:obj:`.ProtocolContext.fixed_trash` and :py:obj:`.InstrumentContext.trash_container` now return :py:class:`.TrashBin` objects instead of :py:class:`.Labware` objects.
+ - Flex will no longer automatically drop tips in the trash at the end of a protocol. You can add a :py:meth:`.drop_tip()` command to your protocol or use the Opentrons App to drop the tips.
+
Version 2.15
------------
@@ -149,13 +182,13 @@ This version introduces support for the Opentrons Flex robot, instruments, modul
- Use coordinates or numbers to specify :ref:`deck slots `. These formats match physical labels on Flex and OT-2, but you can use either system, regardless of ``robotType``.
- - The new :py:meth:`.load_adapter` method lets you load adapters and labware separately on modules, and lets you load adapters directly in deck slots. See :ref:`labware-on-adapters`.
+ - The new module context ``load_adapter()`` methods let you load adapters and labware separately on modules, and :py:meth:`.ProtocolContext.load_adapter` lets you load adapters directly in deck slots. See :ref:`labware-on-adapters`.
- Move labware manually using :py:meth:`.move_labware`, without having to stop your protocol.
- Manual labware moves support moving to or from the new :py:obj:`~.protocol_api.OFF_DECK` location (outside of the robot).
- - :py:meth:`.load_labware` also accepts :py:obj:`~.protocol_api.OFF_DECK` as a location. This lets you prepare labware to be moved onto the deck later in a protocol.
+ - :py:meth:`.ProtocolContext.load_labware` also accepts :py:obj:`~.protocol_api.OFF_DECK` as a location. This lets you prepare labware to be moved onto the deck later in a protocol.
- The new ``push_out`` parameter of the :py:meth:`.dispense` method helps ensure that the pipette dispenses all of its liquid when working with very small volumes.
@@ -211,10 +244,10 @@ If you specify an API version of ``2.13`` or lower, your protocols will continue
because the plunger's speed is a stepwise function of the volume.
Use :py:attr:`.InstrumentContext.flow_rate` to set the flow rate in µL/s, instead.
- - ``ModuleContext.load_labware_object`` was removed as an unnecessary internal method.
+ - ``load_labware_object()`` was removed from module contexts as an unnecessary internal method.
- - ``ModuleContext.geometry`` was removed in favor of
- :py:attr:`.ModuleContext.model` and :py:attr:`.ModuleContext.type`
+ - ``geometry`` was removed from module contexts in favor of
+ ``model`` and ``type`` attributes.
- ``Well.geometry`` was removed as unnecessary.
@@ -304,7 +337,7 @@ Version 2.8
Version 2.7
-----------
-- Added :py:meth:`.InstrumentContext.pair_with`, an experimental feature for moving both pipettes simultaneously.
+- Added ``InstrumentContext.pair_with()``, an experimental feature for moving both pipettes simultaneously.
.. note::
diff --git a/api/mypy.ini b/api/mypy.ini
index 56b8435855c..6cbbea90d34 100644
--- a/api/mypy.ini
+++ b/api/mypy.ini
@@ -4,7 +4,7 @@ show_error_codes = True
warn_unused_configs = True
strict = True
# TODO(mc, 2021-09-12): work through and remove these exclusions
-exclude = tests/opentrons/(hardware_control/test_.*py|hardware_control/integration/|hardware_control/emulation/|hardware_control/modules/|protocols/advanced_control/|protocols/api_support/|protocols/duration/|protocols/execution/|protocols/fixtures/|protocols/geometry/)
+exclude = tests/opentrons/(hardware_control/test_(?!(ot3|module_control)).*py|hardware_control/integration/|hardware_control/emulation/|hardware_control/modules/|protocols/advanced_control/|protocols/api_support/|protocols/duration/|protocols/execution/|protocols/fixtures/|protocols/geometry/)
[pydantic-mypy]
init_forbid_extra = True
diff --git a/api/release-notes-internal.md b/api/release-notes-internal.md
index 426958f061b..353df2e8833 100644
--- a/api/release-notes-internal.md
+++ b/api/release-notes-internal.md
@@ -2,19 +2,75 @@ For more details about this release, please see the full [technical change log][
[technical change log]: https://github.com/Opentrons/opentrons/releases
+## Internal Release 1.5.0-alpha.1
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
+---
+
+## Internal Release 1.5.0-alpha.0
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
---
-# Internal Release 1.0.0
+## Internal Release 1.4.0-alpha.1
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+This release is primarily to unblock Flex runs. That fix is in
+
+### All changes
+
+
+
+---
+
+## Internal Release 1.4.0-alpha.0
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
+---
+
+## Internal Release 1.3.0-alpha.0
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
+---
+
+# Internal Release 1.1.0
## New Stuff In This Release
-- Fixed an issue where the robot wasn't actually checking for updates; you will now correctly get prompted to update your robot from the settings tab of the ODD when an update is available, including during onboarding
-- You can update the robot by putting a system update (ot3-system.zip) on a flash drive and plugging it in the front USB port, then going to robot settings
-- Support for 96-channel pipettes in protocols
-- Early provisional support for deck configuration and trash chutes in protocols
+This is a tracking internal release coming off of the edge branch to contain rapid dev on new features for 7.1.0. Features will change drastically between successive alphas even over the course of the day. For this reason, these release notes will not be in their usual depth.
+
+The biggest new features, however, are
+- There is a new protocol API version, 2.16, which changes how the default trash is loaded and gates features like partial tip pickup and waste chute usage:
+ - Protocols do not load a trash by default. To load the normal trash, load ``opentrons_1_trash_3200ml_fixed`` in slot ``A3``.
+ - But also you can load it in any other edge slot if you want (columns 1 and 3).
+ - Protocols can load trash chutes; the details of exactly how this works are still in flux.
+ - Protocols can configure their 96 and 8 channel pipettes to pick up only a subset of tips using ``configure_nozzle_layout``.
+- Support for json protocol V8 and command V8, which adds JSON protocol support for the above features.
+- ODD support for rendering the above features in protocols
+- ODD support for configuring the loaded deck fixtures like trash chutes
+- Labware position check now uses the calibration probe (the same one used for pipette and module calibration) instead of a tip; this should increase the accuracy of LPC.
+- Support for P1000S v3.6
+- Updated liquid handling functions for all 96 channel pipettes
+
+## Known Issues
+- The ``MoveToAddressableArea`` command will noop. This means that all commands that use the movable trash bin will not "move to the trash bin". The command will analyze successfully.
+- The deck configuration on the robot is not persistent, this means that between boots of a robot, you must PUT a deck configuration on the robot via HTTP.
-## Big Things That Don't Work Yet So Don't Report Bugs About Them
+## Other changes
-### Robot Control
-- Pipette partial tip pickup is present but not fully validated or developed yet. Partial tip pickup on 96 channel pipettes will not use correct motion parameters; using the front channel of a pipette in partial tip pickup does not work.
+- Protocol engine now does not allow loading any items in locations (whether deck slot/ module/ adapter) that are already occupied.
+Previously there were gaps in our checks for this in the API. Also, one could write HTTP/ JSON protocols (not PD generated) that loaded multiple items in a given location. Protocols were most likely exploiting this loophole to perform labware movement prior to DSM support. They should now use the correct labware movement API instead.
diff --git a/api/release-notes.md b/api/release-notes.md
index 5326fa3f4b9..737b4063c9c 100644
--- a/api/release-notes.md
+++ b/api/release-notes.md
@@ -4,14 +4,132 @@ log][]. For a list of currently known issues, please see the [Opentrons issue tr
[technical change log]: https://github.com/Opentrons/opentrons/releases
[opentrons issue tracker]: https://github.com/Opentrons/opentrons/issues?q=is%3Aopen+is%3Aissue+label%3Abug
+---
+
+## Opentrons Robot Software Changes in 7.3.0
+
+Welcome to the v7.3.0 release of the Opentrons robot software!
+
+### New Features
+
+- Runtime parameters: read, write, and use parameters in Python protocol runs.
+
+### Improved Features
+
+- Automatic tip tracking is now available for all nozzle configurations.
+- Flex no longer shows unnecessary pipette calibration warnings.
+- Python protocols can once again set labware offsets outside of Labware Position Check.
+
+### Changed Features
+
+- Calling `GET /runs/{id}/commands` for a JSON protocol no longer returns a full list of queued commands. Use protocol analysis to get a full list of commands.
+
+### Bug Fixes
+
+- Fixed an edge case where capitalizing part of a labware load name could cause unexpected behavior or collisions.
+- Fixed Python packages installed on the OT-2 with `pip` not being found by `import` statements.
+
+---
+
+## Opentrons Robot Software Changes in 7.2.2
+
+Welcome to the v7.2.2 release of the Opentrons robot software!
+
+### Improved Features
+
+- Improved the low-volume performance of recently produced Flex 96-Channel Pipettes.
+
+### Bug Fixes
+
+- Restores the ability to use the speaker and camera on OT-2.
+- Restores the ability to use the camera on Flex.
+
+---
+
+## Opentrons Robot Software Changes in 7.2.1
+
+Welcome to the v7.2.1 release of the Opentrons robot software!
+
+### Bug Fixes
+
+- Fixed an issue where OT-2 tip length calibrations created before v4.1.0 would cause a "missing calibration data" error that you could only resolve by resetting calibration.
+- Fixed collision prediction being too conservative in certain conditions on Flex, leading to errors even when collisions wouldn't take place.
+- Flex now properly homes after an instrument collision.
+- `opentrons_simulate` now outputs entries for commands that drop tips in the default trash container in protocols that specify Python API version 2.16 or newer.
+
+---
+
+## Opentrons Robot Software Changes in 7.2.0
+
+Welcome to the v7.2.0 release of the Opentrons robot software!
+
+This update may take longer than usual if your robot has a lot of long protocols and runs stored on it. Allow *approximately 20 minutes* for your robot to restart. This delay will only happen once.
+
+If you don't care about preserving your labware offsets and run history, you can avoid the delay by clearing your runs and protocols before starting this update. Go to **Robot Settings** > **Device Reset** and select **Clear protocol run history**.
+
+### Improved Features
+
+- The robot software now runs Python 3.10. Many built-in Python packages were updated to match. If you have installed your own Python packages on the robot, re-install them to ensure compatibility.
+- Added error handling when dispensing. The `/runs/commands`, `/maintenance_runs/commands`, and `/protocols` HTTP API endpoints now return an error if you try to dispense more than you've aspirated.
+- Improved performance of the `/runs/commands` endpoints. They are now significantly faster when requesting a small number of commands from a stored run.
+
+### Bug Fixes
+
+- The OT-2 now consistently applies tip length calibration. There used to be a height discrepancy between Labware Position Check and protocol runs. If you previously compensated for the inconsistent pipette height with labware offsets, re-run Labware Position Check to avoid pipette crashes.
+- The OT-2 now accurately calculates the position of the Thermocycler. If you previously compensated for the incorrect position with labware offsets, re-run Labware Position Check to avoid pipette crashes.
+- The Flex Gripper will no longer pick up large labware that could collide with tips held by an adjoining pipette.
+- Flex now properly configures itself when connected by Ethernet directly to a computer.
+
+### Removals
+
+- Removed the `notify_server` Python package and `/notifications/subscribe` WebSocket endpoint, as they were never fully used. (See pull request [#14280](https://github.com/Opentrons/opentrons/pull/14280) for details.)
+
+### Known Issues
+
+- Downgrading an OT-2 to an earlier software version will delete tip length calibrations created with version 7.2.0. If you need to downgrade, re-run all pipette calibrations afterward.
+
+---
+
+## Opentrons Robot Software Changes in 7.1.1
+
+Welcome to the v7.1.1 release of the Opentrons robot software!
+
+### Bug Fixes
+
+- Fixed an issue with the pipette definition for Flex 1-Channel 1000 µL pipettes.
+
+---
+
+## Opentrons Robot Software Changes in 7.1.0
+
+Welcome to the v7.1.0 release of the Opentrons robot software! This release includes support for deck configuration on Opentrons Flex, partial tip pickup with the Flex 96-Channel Pipette, and other improvements.
+
+### New Features
+
+- Pick up either a column of 8 tips or all 96 tips with the Flex 96-Channel Pipette.
+- Specify the deck configuration of Flex, including the movable trash bin, waste chute, and staging area slots.
+- Use the Flex Gripper to drop labware into the waste chute, or use Flex pipettes to dispense liquid or drop tips into the waste chute.
+- Manually prepare a pipette for aspiration, when required for your application.
+
+### Improved Features
+
+- The Ethernet port on Flex now supports direct connection to a computer.
+- Improves aspirate, dispense, and mix behavior with volumes set to zero.
+- The `opentrons_simulate` command-line tool now works with all Python API versions.
+
+### Known Issues
+
+JSON protocols created or modified with Protocol Designer v6.0.0 or higher can't be simulated with `opentrons_simulate`.
+
+---
## Opentrons Robot Software Changes in 7.0.2
-The 7.0.2 hotfix release does not contain any changes to the robot software
+The 7.0.2 hotfix release does not contain any changes to the robot software.
### Known Issues
-JSON protocols created or modified with Protocol Designer v6.0.0 or higher can't be simulated with the `opentrons_simulate` command-line tool
+JSON protocols created or modified with Protocol Designer v6.0.0 or higher can't be simulated with the `opentrons_simulate` command-line tool.
---
diff --git a/api/setup.py b/api/setup.py
index 869305ca284..2e1fa5380f8 100755
--- a/api/setup.py
+++ b/api/setup.py
@@ -46,9 +46,6 @@ def get_version():
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
]
@@ -61,14 +58,15 @@ def get_version():
INSTALL_REQUIRES = [
f"opentrons-shared-data=={VERSION}",
"aionotify==0.2.0",
- "anyio==3.3.0",
- "jsonschema==3.0.2",
- "numpy>=1.15.1,<2",
- "pydantic==1.8.2",
- "pyserial==3.5",
+ "anyio>=3.6.1,<4.0.0",
+ "jsonschema>=3.0.1,<4.18.0",
+ "numpy>=1.20.0,<2",
+ "pydantic>=1.10.9,<2.0.0",
+ "pyserial>=3.5",
"typing-extensions>=4.0.0,<5",
"click>=8.0.0,<9",
'importlib-metadata >= 1.0 ; python_version < "3.8"',
+ "packaging>=21.0",
]
EXTRAS = {
@@ -88,7 +86,7 @@ def read(*parts):
if __name__ == "__main__":
setup(
- python_requires=">=3.7",
+ python_requires=">=3.10",
name=DISTNAME,
description=DESCRIPTION,
license=LICENSE,
diff --git a/api/src/opentrons/__init__.py b/api/src/opentrons/__init__.py
index 7240dcfbb76..ac4e0c54262 100755
--- a/api/src/opentrons/__init__.py
+++ b/api/src/opentrons/__init__.py
@@ -10,6 +10,7 @@
API as HardwareAPI,
ThreadManager,
ThreadManagedHardware,
+ types as hw_types,
)
from opentrons.config import (
@@ -118,6 +119,7 @@ async def _create_thread_manager() -> ThreadManagedHardware:
use_usb_bus=ff.rear_panel_integration(),
threadmanager_nonblocking=True,
status_bar_enabled=ff.status_bar_enabled(),
+ feature_flags=hw_types.HardwareFeatureFlags.build_from_ff(),
)
else:
thread_manager = ThreadManager(
@@ -125,6 +127,7 @@ async def _create_thread_manager() -> ThreadManagedHardware:
threadmanager_nonblocking=True,
port=_get_motor_control_serial_port(),
firmware=_find_smoothie_file(),
+ feature_flags=hw_types.HardwareFeatureFlags.build_from_ff(),
)
try:
diff --git a/api/src/opentrons/_version.py b/api/src/opentrons/_version.py
index 385f024a111..6b26eac4ce5 100644
--- a/api/src/opentrons/_version.py
+++ b/api/src/opentrons/_version.py
@@ -9,7 +9,7 @@
import importlib_metadata as metadata # type: ignore[no-redef]
try:
- version: str = metadata.version("opentrons") # type: ignore[attr-defined]
+ version: str = metadata.version("opentrons")
except Exception as e:
logging.warning(
"Could not determine version for opentrons, may be dev install, using 0.0.0-dev"
diff --git a/api/src/opentrons/calibration_storage/__init__.py b/api/src/opentrons/calibration_storage/__init__.py
index 80b389223a8..1ddbfdd1582 100644
--- a/api/src/opentrons/calibration_storage/__init__.py
+++ b/api/src/opentrons/calibration_storage/__init__.py
@@ -1,6 +1,11 @@
from .ot3 import gripper_offset
from .ot2 import mark_bad_calibration
+from .deck_configuration import (
+ serialize_deck_configuration,
+ deserialize_deck_configuration,
+)
+
# TODO these functions are only used in robot server. We should think about moving them and/or
# abstracting it away from a robot specific function. We should also check if the tip rack
# definition information is still needed.
@@ -32,6 +37,9 @@
"save_robot_belt_attitude",
"get_robot_belt_attitude",
"delete_robot_belt_attitude",
+ # deck configuration functions
+ "serialize_deck_configuration",
+ "deserialize_deck_configuration",
# functions only used in robot server
"_save_custom_tiprack_definition",
"get_custom_tiprack_definition_for_tlc",
diff --git a/api/src/opentrons/calibration_storage/deck_configuration.py b/api/src/opentrons/calibration_storage/deck_configuration.py
new file mode 100644
index 00000000000..a627fce73c9
--- /dev/null
+++ b/api/src/opentrons/calibration_storage/deck_configuration.py
@@ -0,0 +1,62 @@
+from datetime import datetime
+from typing import List, Optional, Tuple
+
+import pydantic
+
+from .types import CutoutFixturePlacement
+from . import file_operators as io
+
+
+class _CutoutFixturePlacementModel(pydantic.BaseModel):
+ cutoutId: str
+ cutoutFixtureId: str
+ opentronsModuleSerialNumber: Optional[str]
+
+
+class _DeckConfigurationModel(pydantic.BaseModel):
+ """The on-filesystem representation of a deck configuration."""
+
+ cutoutFixtures: List[_CutoutFixturePlacementModel]
+ lastModified: datetime
+
+
+def serialize_deck_configuration(
+ cutout_fixture_placements: List[CutoutFixturePlacement], last_modified: datetime
+) -> bytes:
+ """Serialize a deck configuration for storing on the filesystem."""
+ data = _DeckConfigurationModel.construct(
+ cutoutFixtures=[
+ _CutoutFixturePlacementModel.construct(
+ cutoutId=e.cutout_id,
+ cutoutFixtureId=e.cutout_fixture_id,
+ opentronsModuleSerialNumber=e.opentrons_module_serial_number,
+ )
+ for e in cutout_fixture_placements
+ ],
+ lastModified=last_modified,
+ )
+ return io.serialize_pydantic_model(data)
+
+
+# TODO(mm, 2023-11-21): If the data is corrupt, we should propagate the underlying error.
+# And there should be an enumerated "corrupt storage" error in shared-data.
+def deserialize_deck_configuration(
+ serialized: bytes,
+) -> Optional[Tuple[List[CutoutFixturePlacement], datetime]]:
+ """Deserialize bytes previously serialized by `serialize_deck_configuration()`.
+
+ Returns a tuple `(deck_configuration, last_modified_time)`, or `None` if the data is corrupt.
+ """
+ parsed = io.deserialize_pydantic_model(serialized, _DeckConfigurationModel)
+ if parsed is None:
+ return None
+ else:
+ cutout_fixture_placements = [
+ CutoutFixturePlacement(
+ cutout_id=e.cutoutId,
+ cutout_fixture_id=e.cutoutFixtureId,
+ opentrons_module_serial_number=e.opentronsModuleSerialNumber,
+ )
+ for e in parsed.cutoutFixtures
+ ]
+ return cutout_fixture_placements, parsed.lastModified
diff --git a/api/src/opentrons/calibration_storage/file_operators.py b/api/src/opentrons/calibration_storage/file_operators.py
index 3ec91cb25b5..70c16297ecd 100644
--- a/api/src/opentrons/calibration_storage/file_operators.py
+++ b/api/src/opentrons/calibration_storage/file_operators.py
@@ -5,15 +5,20 @@
module, except in the special case of v2 labware support in
the v1 API.
"""
-import json
import datetime
+import json
+import logging
import typing
-from pydantic import BaseModel
from pathlib import Path
+import pydantic
+
from .encoder_decoder import DateTimeEncoder, DateTimeDecoder
+_log = logging.getLogger(__name__)
+
+
DecoderType = typing.Type[json.JSONDecoder]
EncoderType = typing.Type[json.JSONEncoder]
@@ -27,8 +32,9 @@ def delete_file(path: Path) -> None:
pass
+# TODO: This is private but used by other files.
def _remove_json_files_in_directories(p: Path) -> None:
- """Delete json file by the path"""
+ """Delete .json files in the given directory and its subdirectories."""
for item in p.iterdir():
if item.is_dir():
_remove_json_files_in_directories(item)
@@ -47,12 +53,12 @@ def _assert_last_modified_value(calibration_dict: typing.Dict[str, typing.Any])
def read_cal_file(
- filepath: Path, decoder: DecoderType = DateTimeDecoder
+ file_path: Path, decoder: DecoderType = DateTimeDecoder
) -> typing.Dict[str, typing.Any]:
"""
Function used to read data from a file
- :param filepath: path to look for data at
+ :param file_path: path to look for data at
:param decoder: if there is any specialized decoder needed.
The default decoder is the date time decoder.
:return: Data from the file
@@ -63,7 +69,7 @@ def read_cal_file(
# This can be done when the labware endpoints
# are refactored to grab tip length calibration
# from the correct locations.
- with open(filepath, "r") as f:
+ with open(file_path, "r", encoding="utf-8") as f:
calibration_data = typing.cast(
typing.Dict[str, typing.Any],
json.load(f, cls=decoder),
@@ -76,22 +82,61 @@ def read_cal_file(
def save_to_file(
- directorypath: Path,
+ directory_path: Path,
+ # todo(mm, 2023-11-15): This file_name argument does not include the file
+ # extension, which is inconsistent with read_cal_file(). The two should match.
file_name: str,
- data: typing.Union[BaseModel, typing.Dict[str, typing.Any], typing.Any],
+ data: typing.Union[pydantic.BaseModel, typing.Dict[str, typing.Any], typing.Any],
encoder: EncoderType = DateTimeEncoder,
) -> None:
"""
Function used to save data to a file
- :param filepath: path to save data at
- :param data: data to save
+ :param directory_path: path to the directory in which to save the data
+ :param file_name: name of the file within the directory, *without the extension*.
+ :param data: The data to save. Either a Pydantic model, or a JSON-like dict to pass to
+ `json.dumps()`. If you're storing a Pydantic model, prefer `save_pydantic_model_to_file()`
+ and `read_pydantic_model_from_file()` for new code.
:param encoder: if there is any specialized encoder needed.
The default encoder is the date time encoder.
"""
- directorypath.mkdir(parents=True, exist_ok=True)
- filepath = directorypath / f"{file_name}.json"
+ directory_path.mkdir(parents=True, exist_ok=True)
+ file_path = directory_path / f"{file_name}.json"
json_data = (
- data.json() if isinstance(data, BaseModel) else json.dumps(data, cls=encoder)
+ data.json()
+ if isinstance(data, pydantic.BaseModel)
+ else json.dumps(data, cls=encoder)
)
- filepath.write_text(json_data, encoding="utf-8")
+ file_path.write_text(json_data, encoding="utf-8")
+
+
+def serialize_pydantic_model(data: pydantic.BaseModel) -> bytes:
+ """Safely serialize data from a Pydantic model into a form suitable for storing on disk."""
+ return data.json(by_alias=True).encode("utf-8")
+
+
+_ModelT = typing.TypeVar("_ModelT", bound=pydantic.BaseModel)
+
+
+# TODO(mm, 2023-11-20): We probably want to distinguish "missing file" from "corrupt file."
+# The caller needs to deal with those cases separately because the appropriate action depends on
+# context. For example, when running protocols through robot-server, if the file is corrupt, it's
+# safe-ish to fall back to a default because the Opentrons App will let the user confirm everything
+# before starting the run. But when running protocols through the non-interactive
+# `opentrons_execute`, we don't want it to silently use default data if the file is corrupt.
+def deserialize_pydantic_model(
+ serialized: bytes,
+ model: typing.Type[_ModelT],
+) -> typing.Optional[_ModelT]:
+ """Safely read bytes from `serialize_pydantic_model()` back into a Pydantic model.
+
+ Returns `None` if the file is missing or corrupt.
+ """
+ try:
+ return model.parse_raw(serialized)
+ except json.JSONDecodeError:
+ _log.warning("Data is not valid JSON.", exc_info=True)
+ return None
+ except pydantic.ValidationError:
+ _log.warning(f"Data is malformed as a {model}.", exc_info=True)
+ return None
diff --git a/api/src/opentrons/calibration_storage/helpers.py b/api/src/opentrons/calibration_storage/helpers.py
index 011c3401bd3..b4cc6afe777 100644
--- a/api/src/opentrons/calibration_storage/helpers.py
+++ b/api/src/opentrons/calibration_storage/helpers.py
@@ -5,7 +5,7 @@
labware calibration to its designated file location.
"""
import json
-from typing import Any, Union, List, Dict, TYPE_CHECKING, cast
+from typing import Any, Union, List, Dict, TYPE_CHECKING, cast, Tuple
from dataclasses import is_dataclass, asdict
@@ -18,10 +18,7 @@
from opentrons_shared_data.pipette.dev_types import LabwareUri
-DictionaryFactoryType = Union[List, Dict]
-
-
-def dict_filter_none(data: DictionaryFactoryType) -> Dict[str, Any]:
+def dict_filter_none(data: List[Tuple[str, Any]]) -> Dict[str, Any]:
"""
Helper function to filter out None keys from a dataclass
before saving to file.
diff --git a/api/src/opentrons/calibration_storage/ot2/deck_attitude.py b/api/src/opentrons/calibration_storage/ot2/deck_attitude.py
index 3f85ad25c17..8edd2e52662 100644
--- a/api/src/opentrons/calibration_storage/ot2/deck_attitude.py
+++ b/api/src/opentrons/calibration_storage/ot2/deck_attitude.py
@@ -79,7 +79,7 @@ def get_robot_deck_attitude() -> Optional[v1.DeckCalibrationModel]:
pass
except (json.JSONDecodeError, ValidationError):
log.warning(
- "Deck calibration is malformed. Please factory reset your calibrations."
+ "Deck calibration is malformed. Please factory reset your calibrations.",
+ exc_info=True,
)
- pass
return None
diff --git a/api/src/opentrons/calibration_storage/ot2/models/v1.py b/api/src/opentrons/calibration_storage/ot2/models/v1.py
index d70f5731d41..585700c84c5 100644
--- a/api/src/opentrons/calibration_storage/ot2/models/v1.py
+++ b/api/src/opentrons/calibration_storage/ot2/models/v1.py
@@ -32,8 +32,12 @@ class TipLengthModel(BaseModel):
default_factory=CalibrationStatus,
description="The status of the calibration data.",
)
- uri: typing.Union[LabwareUri, Literal[""]] = Field(
- ..., description="The tiprack URI associated with the tip length data."
+ # Old data may have a `uri` field, replaced later by `definitionHash`.
+ # uri: typing.Union[LabwareUri, Literal[""]] = Field(
+ # ..., description="The tiprack URI associated with the tip length data."
+ # )
+ definitionHash: str = Field(
+ ..., description="The tiprack hash associated with the tip length data."
)
@validator("tipLength")
@@ -51,7 +55,7 @@ class DeckCalibrationModel(BaseModel):
attitude: types.AttitudeMatrix = Field(
..., description="Attitude matrix for deck found from calibration."
)
- last_modified: datetime = Field(
+ last_modified: typing.Optional[datetime] = Field(
default=None, description="The last time this deck was calibrated."
)
source: types.SourceType = Field(
diff --git a/api/src/opentrons/calibration_storage/ot2/pipette_offset.py b/api/src/opentrons/calibration_storage/ot2/pipette_offset.py
index ac09a736b4e..a4175b90545 100644
--- a/api/src/opentrons/calibration_storage/ot2/pipette_offset.py
+++ b/api/src/opentrons/calibration_storage/ot2/pipette_offset.py
@@ -92,7 +92,8 @@ def get_pipette_offset(
return None
except (json.JSONDecodeError, ValidationError):
log.warning(
- f"Malformed calibrations for {pipette_id} on {mount}. Please factory reset your calibrations."
+ f"Malformed calibrations for {pipette_id} on {mount}. Please factory reset your calibrations.",
+ exc_info=True,
)
# TODO: Delete the bad calibration here maybe?
return None
diff --git a/api/src/opentrons/calibration_storage/ot2/tip_length.py b/api/src/opentrons/calibration_storage/ot2/tip_length.py
index eca8f723f09..8b5e5369805 100644
--- a/api/src/opentrons/calibration_storage/ot2/tip_length.py
+++ b/api/src/opentrons/calibration_storage/ot2/tip_length.py
@@ -7,6 +7,7 @@
from opentrons import config
from .. import file_operators as io, helpers, types as local_types
+from opentrons_shared_data.pipette.dev_types import LabwareUri
from opentrons.protocols.api_support.constants import OPENTRONS_NAMESPACE
from opentrons.util.helpers import utc_now
@@ -22,9 +23,9 @@
# Get Tip Length Calibration
-def _conver_tip_length_model_to_dict(
- to_dict: typing.Dict[str, v1.TipLengthModel]
-) -> typing.Dict[str, typing.Any]:
+def _convert_tip_length_model_to_dict(
+ to_dict: typing.Dict[LabwareUri, v1.TipLengthModel]
+) -> typing.Dict[LabwareUri, typing.Any]:
# This is a workaround since pydantic doesn't have a nice way to
# add encoders when converting to a dict.
dict_of_tip_lengths = {}
@@ -35,23 +36,44 @@ def _conver_tip_length_model_to_dict(
def tip_lengths_for_pipette(
pipette_id: str,
-) -> typing.Dict[str, v1.TipLengthModel]:
- tip_lengths = {}
+) -> typing.Dict[LabwareUri, v1.TipLengthModel]:
try:
tip_length_filepath = config.get_tip_length_cal_path() / f"{pipette_id}.json"
all_tip_lengths_for_pipette = io.read_cal_file(tip_length_filepath)
- for tiprack, data in all_tip_lengths_for_pipette.items():
- try:
- tip_lengths[tiprack] = v1.TipLengthModel(**data)
- except (json.JSONDecodeError, ValidationError):
- log.warning(
- f"Tip length calibration is malformed for {tiprack} on {pipette_id}"
- )
- pass
- return tip_lengths
except FileNotFoundError:
log.debug(f"Tip length calibrations not found for {pipette_id}")
- return tip_lengths
+ return {}
+ except json.JSONDecodeError:
+ log.warning(
+ f"Tip length calibration is malformed for {pipette_id}", exc_info=True
+ )
+ return {}
+
+ tip_lengths: typing.Dict[LabwareUri, v1.TipLengthModel] = {}
+
+ for tiprack_identifier, data in all_tip_lengths_for_pipette.items():
+ # We normally key these calibrations by their tip rack URI,
+ # but older software had them keyed by their tip rack hash.
+ # Migrate from the old format, if necessary.
+ tiprack_identifier_is_uri = "/" in tiprack_identifier
+ if not tiprack_identifier_is_uri:
+ data["definitionHash"] = tiprack_identifier
+ uri = data.pop("uri", None)
+ if uri is None:
+ # We don't have a way to migrate old records without a URI,
+ # so skip over them.
+ continue
+ else:
+ tiprack_identifier = uri
+
+ try:
+ tip_lengths[LabwareUri(tiprack_identifier)] = v1.TipLengthModel(**data)
+ except ValidationError:
+ log.warning(
+ f"Tip length calibration is malformed for {tiprack_identifier} on {pipette_id}",
+ exc_info=True,
+ )
+ return tip_lengths
def load_tip_length_calibration(
@@ -64,10 +86,10 @@ def load_tip_length_calibration(
:param pip_id: pipette you are using
:param definition: full definition of the tiprack
"""
- labware_hash = helpers.hash_labware_def(definition)
+ labware_uri = helpers.uri_from_definition(definition)
load_name = definition["parameters"]["loadName"]
try:
- return tip_lengths_for_pipette(pip_id)[labware_hash]
+ return tip_lengths_for_pipette(pip_id)[labware_uri]
except KeyError as e:
raise local_types.TipLengthCalNotFound(
f"Tip length of {load_name} has not been "
@@ -89,16 +111,16 @@ def get_all_tip_length_calibrations() -> typing.List[v1.TipLengthCalibration]:
if filepath.stem == "index":
continue
tip_lengths = tip_lengths_for_pipette(filepath.stem)
- for tiprack_hash, tip_length in tip_lengths.items():
+ for tiprack_uri, tip_length in tip_lengths.items():
all_tip_lengths_available.append(
v1.TipLengthCalibration(
pipette=filepath.stem,
- tiprack=tiprack_hash,
+ tiprack=tip_length.definitionHash,
tipLength=tip_length.tipLength,
lastModified=tip_length.lastModified,
source=tip_length.source,
status=tip_length.status,
- uri=tip_length.uri,
+ uri=tiprack_uri,
)
)
return all_tip_lengths_available
@@ -129,28 +151,45 @@ def get_custom_tiprack_definition_for_tlc(labware_uri: str) -> "LabwareDefinitio
# Delete Tip Length Calibration
-def delete_tip_length_calibration(tiprack: str, pipette_id: str) -> None:
+def delete_tip_length_calibration(
+ pipette_id: str,
+ tiprack_uri: typing.Optional[LabwareUri] = None,
+ tiprack_hash: typing.Optional[str] = None,
+) -> None:
"""
- Delete tip length calibration based on tiprack hash and
- pipette serial number
+ Delete tip length calibration based on an optional tiprack uri or
+ tiprack hash and pipette serial number.
- :param tiprack: tiprack hash
+ :param tiprack_uri: tiprack uri
+ :param tiprack_hash: tiprack uri
:param pipette: pipette serial number
"""
tip_lengths = tip_lengths_for_pipette(pipette_id)
-
- if tiprack in tip_lengths:
+ tip_length_dir = config.get_tip_length_cal_path()
+ if tiprack_uri in tip_lengths:
# maybe make modify and delete same file?
- del tip_lengths[tiprack]
- tip_length_dir = config.get_tip_length_cal_path()
+ del tip_lengths[tiprack_uri]
+
+ if tip_lengths:
+ dict_of_tip_lengths = _convert_tip_length_model_to_dict(tip_lengths)
+ io.save_to_file(tip_length_dir, pipette_id, dict_of_tip_lengths)
+ else:
+ io.delete_file(tip_length_dir / f"{pipette_id}.json")
+ elif tiprack_hash and any(tiprack_hash in v.dict() for v in tip_lengths.values()):
+ # NOTE this is for backwards compatibilty only
+ # TODO delete this check once the tip_length DELETE router
+ # no longer depends on a tiprack hash
+ for k, v in tip_lengths.items():
+ if tiprack_hash in v.dict():
+ tip_lengths.pop(k)
if tip_lengths:
- dict_of_tip_lengths = _conver_tip_length_model_to_dict(tip_lengths)
+ dict_of_tip_lengths = _convert_tip_length_model_to_dict(tip_lengths)
io.save_to_file(tip_length_dir, pipette_id, dict_of_tip_lengths)
else:
io.delete_file(tip_length_dir / f"{pipette_id}.json")
else:
raise local_types.TipLengthCalNotFound(
- f"Tip length for hash {tiprack} has not been "
+ f"Tip length for uri {tiprack_uri} and hash {tiprack_hash} has not been "
f"calibrated for this pipette: {pipette_id} and cannot"
"be loaded"
)
@@ -176,7 +215,7 @@ def create_tip_length_data(
cal_status: typing.Optional[
typing.Union[local_types.CalibrationStatus, v1.CalibrationStatus]
] = None,
-) -> typing.Dict[str, v1.TipLengthModel]:
+) -> typing.Dict[LabwareUri, v1.TipLengthModel]:
"""
Function to correctly format tip length data.
@@ -197,13 +236,13 @@ def create_tip_length_data(
lastModified=utc_now(),
source=local_types.SourceType.user,
status=cal_status_model,
- uri=labware_uri,
+ definitionHash=labware_hash,
)
if not definition.get("namespace") == OPENTRONS_NAMESPACE:
_save_custom_tiprack_definition(labware_uri, definition)
- data = {labware_hash: tip_length_data}
+ data = {labware_uri: tip_length_data}
return data
@@ -220,7 +259,7 @@ def _save_custom_tiprack_definition(
def save_tip_length_calibration(
pip_id: str,
- tip_length_cal: typing.Dict[str, v1.TipLengthModel],
+ tip_length_cal: typing.Dict[LabwareUri, v1.TipLengthModel],
) -> None:
"""
Function used to save tip length calibration to file.
@@ -235,5 +274,5 @@ def save_tip_length_calibration(
all_tip_lengths.update(tip_length_cal)
- dict_of_tip_lengths = _conver_tip_length_model_to_dict(all_tip_lengths)
+ dict_of_tip_lengths = _convert_tip_length_model_to_dict(all_tip_lengths)
io.save_to_file(tip_length_dir_path, pip_id, dict_of_tip_lengths)
diff --git a/api/src/opentrons/calibration_storage/ot3/deck_attitude.py b/api/src/opentrons/calibration_storage/ot3/deck_attitude.py
index 8f779e4338a..6187459d461 100644
--- a/api/src/opentrons/calibration_storage/ot3/deck_attitude.py
+++ b/api/src/opentrons/calibration_storage/ot3/deck_attitude.py
@@ -77,7 +77,7 @@ def get_robot_belt_attitude() -> Optional[v1.BeltCalibrationModel]:
pass
except (json.JSONDecodeError, ValidationError):
log.warning(
- "Belt calibration is malformed. Please factory reset your calibrations."
+ "Belt calibration is malformed. Please factory reset your calibrations.",
+ exc_info=True,
)
- pass
return None
diff --git a/api/src/opentrons/calibration_storage/ot3/module_offset.py b/api/src/opentrons/calibration_storage/ot3/module_offset.py
index 800ab8380e6..b9a030d1208 100644
--- a/api/src/opentrons/calibration_storage/ot3/module_offset.py
+++ b/api/src/opentrons/calibration_storage/ot3/module_offset.py
@@ -108,7 +108,8 @@ def get_module_offset(
return None
except (json.JSONDecodeError, ValidationError):
log.warning(
- f"Malformed calibrations for {module_id} on slot {slot}. Please factory reset your calibrations."
+ f"Malformed calibrations for {module_id} on slot {slot}. Please factory reset your calibrations.",
+ exc_info=True,
)
return None
@@ -130,7 +131,8 @@ def load_all_module_offsets() -> List[v1.ModuleOffsetModel]:
)
except (json.JSONDecodeError, ValidationError):
log.warning(
- f"Malformed module calibrations for {file}. Please factory reset your calibrations."
+ f"Malformed module calibrations for {file}. Please factory reset your calibrations.",
+ exc_info=True,
)
continue
return calibrations
diff --git a/api/src/opentrons/calibration_storage/ot3/pipette_offset.py b/api/src/opentrons/calibration_storage/ot3/pipette_offset.py
index fcd53bbbf3e..a1e6e1090db 100644
--- a/api/src/opentrons/calibration_storage/ot3/pipette_offset.py
+++ b/api/src/opentrons/calibration_storage/ot3/pipette_offset.py
@@ -89,6 +89,7 @@ def get_pipette_offset(
return None
except (json.JSONDecodeError, ValidationError):
log.warning(
- f"Malformed calibrations for {pipette_id} on {mount}. Please factory reset your calibrations."
+ f"Malformed calibrations for {pipette_id} on {mount}. Please factory reset your calibrations.",
+ exc_info=True,
)
return None
diff --git a/api/src/opentrons/calibration_storage/types.py b/api/src/opentrons/calibration_storage/types.py
index 03aacab252a..bd80af33719 100644
--- a/api/src/opentrons/calibration_storage/types.py
+++ b/api/src/opentrons/calibration_storage/types.py
@@ -34,3 +34,12 @@ class UriDetails:
namespace: str
load_name: str
version: int
+
+
+# TODO(mm, 2023-11-20): Deduplicate this with similar types in robot_server
+# and opentrons.protocol_engine.
+@dataclass
+class CutoutFixturePlacement:
+ cutout_fixture_id: str
+ cutout_id: str
+ opentrons_module_serial_number: typing.Optional[str]
diff --git a/api/src/opentrons/cli/analyze.py b/api/src/opentrons/cli/analyze.py
index 9f5af67c584..96784a340d7 100644
--- a/api/src/opentrons/cli/analyze.py
+++ b/api/src/opentrons/cli/analyze.py
@@ -1,13 +1,29 @@
"""Opentrons analyze CLI."""
import click
-from anyio import run, Path as AsyncPath
+from anyio import run
+from contextlib import contextmanager
+from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from pydantic import BaseModel
-from typing import Any, Dict, List, Optional, Sequence, Union
-from typing_extensions import Literal
+from typing import (
+ Any,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Union,
+ Literal,
+ Callable,
+ IO,
+ TypeVar,
+ Iterator,
+)
+import logging
+import sys
+from opentrons.protocol_engine.types import RunTimeParameter
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocol_reader import (
ProtocolReader,
@@ -15,8 +31,9 @@
ProtocolType,
JsonProtocolConfig,
ProtocolFilesInvalidError,
+ ProtocolSource,
)
-from opentrons.protocol_runner import create_simulating_runner
+from opentrons.protocol_runner import create_simulating_runner, RunResult
from opentrons.protocol_engine import (
Command,
ErrorOccurrence,
@@ -27,6 +44,15 @@
)
from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons.util.performance_helpers import track_analysis
+
+OutputKind = Literal["json", "human-json"]
+
+
+@dataclass(frozen=True)
+class _Output:
+ to_file: IO[bytes]
+ kind: OutputKind
@click.command()
@@ -38,16 +64,108 @@
)
@click.option(
"--json-output",
- help="Return analysis results as machine-readable JSON.",
- type=click.Path(path_type=AsyncPath),
+ help="Return analysis results as machine-readable JSON. Specify --json-output=- to use stdout, but be aware that Python protocols may contain print() which will make the output JSON invalid.",
+ type=click.File(mode="wb"),
+)
+@click.option(
+ "--human-json-output",
+ help="Return analysis results as JSON, formatted for human eyes. Specify --human-json-output=- to use stdout, but be aware that Python protocols may contain print() which will make the output JSON invalid.",
+ type=click.File(mode="wb"),
+)
+@click.option(
+ "--check",
+ help="Fail (via exit code) if the protocol had an error. If not specified, always succeed.",
+ is_flag=True,
+ default=False,
+)
+@click.option(
+ "--log-output",
+ help="Where to send logs. Can be a path, - for stdout, or stderr for stderr.",
+ default="stderr",
+ type=str,
)
-def analyze(files: Sequence[Path], json_output: Optional[Path]) -> None:
+@click.option(
+ "--log-level",
+ help="Level of logs to capture.",
+ type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False),
+ default="WARNING",
+)
+def analyze(
+ files: Sequence[Path],
+ json_output: Optional[IO[bytes]],
+ human_json_output: Optional[IO[bytes]],
+ log_output: str,
+ log_level: str,
+ check: bool,
+) -> int:
"""Analyze a protocol.
You can use `opentrons analyze` to get a protocol's expected
equipment and commands.
"""
- run(_analyze, files, json_output)
+ outputs = _get_outputs(json=json_output, human_json=human_json_output)
+ if not outputs and not check:
+ raise click.UsageError(
+ message="Please specify at least --check or one of the output options."
+ )
+
+ try:
+ with _capture_logs(log_output, log_level):
+ sys.exit(run(_analyze, files, outputs, check))
+ except click.ClickException:
+ raise
+ except Exception as e:
+ raise click.ClickException(str(e))
+
+
+@contextmanager
+def _capture_logs_to_stream(stream: IO[str]) -> Iterator[None]:
+ handler = logging.StreamHandler(stream)
+ logging.getLogger().addHandler(handler)
+ try:
+ yield
+ finally:
+ logging.getLogger().removeHandler(handler)
+
+
+@contextmanager
+def _capture_logs_to_file(filepath: Path) -> Iterator[None]:
+ handler = logging.FileHandler(filepath, mode="w")
+ logging.getLogger().addHandler(handler)
+ try:
+ yield
+ finally:
+ logging.getLogger().removeHandler(handler)
+
+
+@contextmanager
+def _capture_logs(write_to: str, log_level: str) -> Iterator[None]:
+ try:
+ level = getattr(logging, log_level)
+ except AttributeError:
+ raise click.ClickException(f"No such log level {log_level}")
+ logging.getLogger().setLevel(level)
+ if write_to in ("-", "stdout"):
+ with _capture_logs_to_stream(sys.stdout):
+ yield
+ elif write_to == "stderr":
+ with _capture_logs_to_stream(sys.stderr):
+ yield
+ else:
+ with _capture_logs_to_file(Path(write_to)):
+ yield
+
+
+def _get_outputs(
+ json: Optional[IO[bytes]],
+ human_json: Optional[IO[bytes]],
+) -> List[_Output]:
+ outputs: List[_Output] = []
+ if json:
+ outputs.append(_Output(to_file=json, kind="json"))
+ if human_json:
+ outputs.append(_Output(to_file=human_json, kind="human-json"))
+ return outputs
def _get_input_files(files_and_dirs: Sequence[Path]) -> List[Path]:
@@ -62,12 +180,37 @@ def _get_input_files(files_and_dirs: Sequence[Path]) -> List[Path]:
return results
+R = TypeVar("R")
+
+
+def _call_for_output_of_kind(
+ kind: OutputKind, outputs: Sequence[_Output], fn: Callable[[IO[bytes]], R]
+) -> Optional[R]:
+ for output in outputs:
+ if output.kind == kind:
+ return fn(output.to_file)
+ return None
+
+
+def _get_return_code(analysis: RunResult) -> int:
+ if analysis.state_summary.errors:
+ return -1
+ return 0
+
+
+@track_analysis
+async def _do_analyze(protocol_source: ProtocolSource) -> RunResult:
+
+ runner = await create_simulating_runner(
+ robot_type=protocol_source.robot_type, protocol_config=protocol_source.config
+ )
+ return await runner.run(deck_configuration=[], protocol_source=protocol_source)
+
+
async def _analyze(
- files_and_dirs: Sequence[Path],
- json_output: Optional[AsyncPath],
-) -> None:
+ files_and_dirs: Sequence[Path], outputs: Sequence[_Output], check: bool
+) -> int:
input_files = _get_input_files(files_and_dirs)
-
try:
protocol_source = await ProtocolReader().read_saved(
files=input_files,
@@ -76,46 +219,52 @@ async def _analyze(
except ProtocolFilesInvalidError as error:
raise click.ClickException(str(error))
- runner = await create_simulating_runner(
- robot_type=protocol_source.robot_type, protocol_config=protocol_source.config
- )
- analysis = await runner.run(protocol_source)
-
- if json_output:
- results = AnalyzeResults.construct(
- createdAt=datetime.now(tz=timezone.utc),
- files=[
- ProtocolFile.construct(name=f.path.name, role=f.role)
- for f in protocol_source.files
- ],
- config=(
- JsonConfig.construct(
- schemaVersion=protocol_source.config.schema_version
- )
- if isinstance(protocol_source.config, JsonProtocolConfig)
- else PythonConfig.construct(
- apiVersion=protocol_source.config.api_version
- )
- ),
- metadata=protocol_source.metadata,
- robotType=protocol_source.robot_type,
- commands=analysis.commands,
- errors=analysis.state_summary.errors,
- labware=analysis.state_summary.labware,
- pipettes=analysis.state_summary.pipettes,
- modules=analysis.state_summary.modules,
- liquids=analysis.state_summary.liquids,
- )
+ analysis = await _do_analyze(protocol_source)
+ return_code = _get_return_code(analysis)
- await json_output.write_text(
- results.json(exclude_none=True),
- encoding="utf-8",
- )
+ if not outputs:
+ return return_code
+
+ results = AnalyzeResults.construct(
+ createdAt=datetime.now(tz=timezone.utc),
+ files=[
+ ProtocolFile.construct(name=f.path.name, role=f.role)
+ for f in protocol_source.files
+ ],
+ config=(
+ JsonConfig.construct(schemaVersion=protocol_source.config.schema_version)
+ if isinstance(protocol_source.config, JsonProtocolConfig)
+ else PythonConfig.construct(apiVersion=protocol_source.config.api_version)
+ ),
+ metadata=protocol_source.metadata,
+ robotType=protocol_source.robot_type,
+ runTimeParameters=analysis.parameters,
+ commands=analysis.commands,
+ errors=analysis.state_summary.errors,
+ labware=analysis.state_summary.labware,
+ pipettes=analysis.state_summary.pipettes,
+ modules=analysis.state_summary.modules,
+ liquids=analysis.state_summary.liquids,
+ )
+ _call_for_output_of_kind(
+ "json",
+ outputs,
+ lambda to_file: to_file.write(
+ results.json(exclude_none=True).encode("utf-8"),
+ ),
+ )
+ _call_for_output_of_kind(
+ "human-json",
+ outputs,
+ lambda to_file: to_file.write(
+ results.json(exclude_none=True, indent=2).encode("utf-8")
+ ),
+ )
+ if check:
+ return return_code
else:
- raise click.UsageError(
- "Currently, this tool only supports JSON mode. Use `--json-output`."
- )
+ return 0
class ProtocolFile(BaseModel):
@@ -145,11 +294,18 @@ class AnalyzeResults(BaseModel):
See robot-server's analysis models for field documentation.
"""
+ # We want to unify this local analysis model with the one that robot-server returns.
+ # Until that happens, we need to keep these fields in sync manually.
+
+ # Fields that are currently unique to this local analysis module, missing from robot-server:
createdAt: datetime
files: List[ProtocolFile]
config: Union[JsonConfig, PythonConfig]
metadata: Dict[str, Any]
+
+ # Fields that should match robot-server:
robotType: RobotType
+ runTimeParameters: List[RunTimeParameter]
commands: List[Command]
labware: List[LoadedLabware]
pipettes: List[LoadedPipette]
diff --git a/api/src/opentrons/commands/helpers.py b/api/src/opentrons/commands/helpers.py
deleted file mode 100644
index 96d41ed3f6a..00000000000
--- a/api/src/opentrons/commands/helpers.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from typing import List, Union
-
-from opentrons.protocol_api.labware import Well
-from opentrons.types import Location
-
-
-CommandLocation = Union[Location, Well]
-
-
-def listify(
- location: Union[CommandLocation, List[CommandLocation]]
-) -> List[CommandLocation]:
- if isinstance(location, list):
- try:
- return listify(location[0])
- except IndexError:
- # TODO(mc, 2021-10-20): this looks like a bug; should this
- # return an empty list, instead?
- return [location] # type: ignore[list-item]
- else:
- return [location]
-
-
-def _stringify_new_loc(loc: CommandLocation) -> str:
- if isinstance(loc, Location):
- if loc.labware.is_empty:
- return str(loc.point)
- else:
- return repr(loc.labware)
- elif isinstance(loc, Well):
- return str(loc)
- else:
- raise TypeError(loc)
-
-
-def stringify_location(location: Union[CommandLocation, List[CommandLocation]]) -> str:
- loc_str_list = [_stringify_new_loc(loc) for loc in listify(location)]
- return ", ".join(loc_str_list)
diff --git a/api/src/opentrons/commands/types.py b/api/src/opentrons/commands/types.py
deleted file mode 100755
index 912ad1cc29d..00000000000
--- a/api/src/opentrons/commands/types.py
+++ /dev/null
@@ -1,829 +0,0 @@
-from __future__ import annotations
-
-from typing_extensions import Literal, Final, TypedDict
-from typing import Optional, List, Sequence, TYPE_CHECKING, Union
-from opentrons.hardware_control.modules import ThermocyclerStep
-
-if TYPE_CHECKING:
- from opentrons.protocol_api import InstrumentContext
- from opentrons.protocol_api.labware import Well
-
-from opentrons.types import Location
-
-
-# type for subscriptions
-COMMAND: Final = "command"
-
-# Robot #
-
-DELAY: Final = "command.DELAY"
-HOME: Final = "command.HOME"
-PAUSE: Final = "command.PAUSE"
-RESUME: Final = "command.RESUME"
-COMMENT: Final = "command.COMMENT"
-
-# Pipette #
-
-ASPIRATE: Final = "command.ASPIRATE"
-DISPENSE: Final = "command.DISPENSE"
-MIX: Final = "command.MIX"
-CONSOLIDATE: Final = "command.CONSOLIDATE"
-DISTRIBUTE: Final = "command.DISTRIBUTE"
-TRANSFER: Final = "command.TRANSFER"
-PICK_UP_TIP: Final = "command.PICK_UP_TIP"
-DROP_TIP: Final = "command.DROP_TIP"
-BLOW_OUT: Final = "command.BLOW_OUT"
-AIR_GAP: Final = "command.AIR_GAP"
-TOUCH_TIP: Final = "command.TOUCH_TIP"
-RETURN_TIP: Final = "command.RETURN_TIP"
-MOVE_TO: Final = "command.MOVE_TO"
-
-# Modules #
-
-HEATER_SHAKER_SET_TARGET_TEMPERATURE: Final = (
- "command.HEATER_SHAKER_SET_TARGET_TEMPERATURE"
-)
-HEATER_SHAKER_WAIT_FOR_TEMPERATURE: Final = "command.HEATER_SHAKER_WAIT_FOR_TEMPERATURE"
-HEATER_SHAKER_SET_AND_WAIT_FOR_SHAKE_SPEED: Final = (
- "command.HEATER_SHAKER_SET_AND_WAIT_FOR_SHAKE_SPEED"
-)
-HEATER_SHAKER_OPEN_LABWARE_LATCH: Final = "command.HEATER_SHAKER_OPEN_LABWARE_LATCH"
-HEATER_SHAKER_CLOSE_LABWARE_LATCH: Final = "command.HEATER_SHAKER_CLOSE_LABWARE_LATCH"
-HEATER_SHAKER_DEACTIVATE_SHAKER: Final = "command.HEATER_SHAKER_DEACTIVATE_SHAKER"
-HEATER_SHAKER_DEACTIVATE_HEATER: Final = "command.HEATER_SHAKER_DEACTIVATE_HEATER"
-
-MAGDECK_CALIBRATE: Final = "command.MAGDECK_CALIBRATE"
-MAGDECK_DISENGAGE: Final = "command.MAGDECK_DISENGAGE"
-MAGDECK_ENGAGE: Final = "command.MAGDECK_ENGAGE"
-
-TEMPDECK_DEACTIVATE: Final = "command.TEMPDECK_DEACTIVATE"
-TEMPDECK_SET_TEMP: Final = "command.TEMPDECK_SET_TEMP"
-TEMPDECK_AWAIT_TEMP: Final = "command.TEMPDECK_AWAIT_TEMP"
-
-THERMOCYCLER_OPEN: Final = "command.THERMOCYCLER_OPEN"
-THERMOCYCLER_CLOSE: Final = "command.THERMOCYCLER_CLOSE"
-THERMOCYCLER_SET_BLOCK_TEMP: Final = "command.THERMOCYCLER_SET_BLOCK_TEMP"
-THERMOCYCLER_EXECUTE_PROFILE: Final = "command.THERMOCYCLER_EXECUTE_PROFILE"
-THERMOCYCLER_DEACTIVATE: Final = "command.THERMOCYCLER_DEACTIVATE"
-THERMOCYCLER_WAIT_FOR_HOLD: Final = "command.THERMOCYCLER_WAIT_FOR_HOLD"
-THERMOCYCLER_WAIT_FOR_TEMP: Final = "command.THERMOCYCLER_WAIT_FOR_TEMP"
-THERMOCYCLER_WAIT_FOR_LID_TEMP: Final = "command.THERMOCYCLER_WAIT_FOR_LID_TEMP"
-THERMOCYCLER_SET_LID_TEMP: Final = "command.THERMOCYCLER_SET_LID_TEMP"
-THERMOCYCLER_DEACTIVATE_LID: Final = "command.THERMOCYCLER_DEACTIVATE_LID"
-THERMOCYCLER_DEACTIVATE_BLOCK: Final = "command.THERMOCYCLER_DEACTIVATE_BLOCK"
-
-
-class TextOnlyPayload(TypedDict):
- text: str
-
-
-class MultiLocationPayload(TypedDict):
- locations: Sequence[Union[Location, Well]]
-
-
-class OptionalMultiLocationPayload(TypedDict):
- locations: Optional[Sequence[Union[Location, Well]]]
-
-
-class SingleInstrumentPayload(TypedDict):
- instrument: InstrumentContext
-
-
-class MultiInstrumentPayload(TypedDict):
- instruments: Sequence[InstrumentContext]
-
-
-class CommentCommandPayload(TextOnlyPayload):
- pass
-
-
-class CommentCommand(TypedDict):
- name: Literal["command.COMMENT"]
- payload: CommentCommandPayload
-
-
-class DelayCommandPayload(TextOnlyPayload):
- minutes: float
- seconds: float
-
-
-class DelayCommand(TypedDict):
- name: Literal["command.DELAY"]
- payload: DelayCommandPayload
-
-
-class PauseCommandPayload(TextOnlyPayload):
- userMessage: Optional[str]
-
-
-class PauseCommand(TypedDict):
- name: Literal["command.PAUSE"]
- payload: PauseCommandPayload
-
-
-class ResumeCommandPayload(TextOnlyPayload):
- pass
-
-
-class ResumeCommand(TypedDict):
- name: Literal["command.RESUME"]
- payload: ResumeCommandPayload
-
-
-class HeaterShakerSetTargetTemperaturePayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerSetTargetTemperatureCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_SET_TARGET_TEMPERATURE"]
- payload: HeaterShakerSetTargetTemperaturePayload
-
-
-class HeaterShakerWaitForTemperaturePayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerWaitForTemperatureCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_WAIT_FOR_TEMPERATURE"]
- payload: HeaterShakerWaitForTemperaturePayload
-
-
-class HeaterShakerSetAndWaitForShakeSpeedPayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerSetAndWaitForShakeSpeedCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_SET_AND_WAIT_FOR_SHAKE_SPEED"]
- payload: HeaterShakerSetAndWaitForShakeSpeedPayload
-
-
-class HeaterShakerOpenLabwareLatchPayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerOpenLabwareLatchCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_OPEN_LABWARE_LATCH"]
- payload: HeaterShakerOpenLabwareLatchPayload
-
-
-class HeaterShakerCloseLabwareLatchPayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerCloseLabwareLatchCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_CLOSE_LABWARE_LATCH"]
- payload: HeaterShakerCloseLabwareLatchPayload
-
-
-class HeaterShakerDeactivateShakerPayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerDeactivateShakerCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_DEACTIVATE_SHAKER"]
- payload: HeaterShakerDeactivateShakerPayload
-
-
-class HeaterShakerDeactivateHeaterPayload(TextOnlyPayload):
- pass
-
-
-class HeaterShakerDeactivateHeaterCommand(TypedDict):
- name: Literal["command.HEATER_SHAKER_DEACTIVATE_HEATER"]
- payload: HeaterShakerDeactivateHeaterPayload
-
-
-class MagdeckEngageCommandPayload(TextOnlyPayload):
- pass
-
-
-class MagdeckEngageCommand(TypedDict):
- name: Literal["command.MAGDECK_ENGAGE"]
- payload: MagdeckEngageCommandPayload
-
-
-class MagdeckDisengageCommandPayload(TextOnlyPayload):
- pass
-
-
-class MagdeckDisengageCommand(TypedDict):
- name: Literal["command.MAGDECK_DISENGAGE"]
- payload: MagdeckDisengageCommandPayload
-
-
-class MagdeckCalibrateCommandPayload(TextOnlyPayload):
- pass
-
-
-class MagdeckCalibrateCommand(TypedDict):
- name: Literal["command.MAGDECK_CALIBRATE"]
- payload: MagdeckCalibrateCommandPayload
-
-
-class TempdeckSetTempCommandPayload(TextOnlyPayload):
- celsius: float
-
-
-class TempdeckSetTempCommand(TypedDict):
- name: Literal["command.TEMPDECK_SET_TEMP"]
- payload: TempdeckSetTempCommandPayload
-
-
-class TempdeckAwaitTempCommandPayload(TextOnlyPayload):
- celsius: float
-
-
-class TempdeckAwaitTempCommand(TypedDict):
- name: Literal["command.TEMPDECK_AWAIT_TEMP"]
- payload: TempdeckAwaitTempCommandPayload
-
-
-class TempdeckDeactivateCommandPayload(TextOnlyPayload):
- pass
-
-
-class TempdeckDeactivateCommand(TypedDict):
- name: Literal["command.TEMPDECK_DEACTIVATE"]
- payload: TempdeckDeactivateCommandPayload
-
-
-class ThermocyclerOpenCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerOpenCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_OPEN"]
- payload: ThermocyclerOpenCommandPayload
-
-
-class ThermocyclerSetBlockTempCommandPayload(TextOnlyPayload):
- temperature: float
- hold_time: Optional[float]
-
-
-class ThermocyclerSetBlockTempCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_SET_BLOCK_TEMP"]
- payload: ThermocyclerSetBlockTempCommandPayload
-
-
-class ThermocyclerExecuteProfileCommandPayload(TextOnlyPayload):
- steps: List[ThermocyclerStep]
-
-
-class ThermocyclerExecuteProfileCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_EXECUTE_PROFILE"]
- payload: ThermocyclerExecuteProfileCommandPayload
-
-
-class ThermocyclerWaitForHoldCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerWaitForHoldCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_WAIT_FOR_HOLD"]
- payload: ThermocyclerWaitForHoldCommandPayload
-
-
-class ThermocyclerWaitForTempCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerWaitForTempCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_WAIT_FOR_TEMP"]
- payload: ThermocyclerWaitForTempCommandPayload
-
-
-class ThermocyclerSetLidTempCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerSetLidTempCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_SET_LID_TEMP"]
- payload: ThermocyclerSetLidTempCommandPayload
-
-
-class ThermocyclerDeactivateLidCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerDeactivateLidCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_DEACTIVATE_LID"]
- payload: ThermocyclerDeactivateLidCommandPayload
-
-
-class ThermocyclerDeactivateBlockCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerDeactivateBlockCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_DEACTIVATE_BLOCK"]
- payload: ThermocyclerDeactivateBlockCommandPayload
-
-
-class ThermocyclerDeactivateCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerDeactivateCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_DEACTIVATE"]
- payload: ThermocyclerDeactivateCommandPayload
-
-
-class ThermocyclerWaitForLidTempCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerWaitForLidTempCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_WAIT_FOR_LID_TEMP"]
- payload: ThermocyclerWaitForLidTempCommandPayload
-
-
-class ThermocyclerCloseCommandPayload(TextOnlyPayload):
- pass
-
-
-class ThermocyclerCloseCommand(TypedDict):
- name: Literal["command.THERMOCYCLER_CLOSE"]
- payload: ThermocyclerCloseCommandPayload
-
-
-class HomeCommandPayload(TextOnlyPayload):
- axis: str
-
-
-class HomeCommand(TypedDict):
- name: Literal["command.HOME"]
- payload: HomeCommandPayload
-
-
-class AspirateDispenseCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- location: Location
- volume: float
- rate: float
-
-
-class AspirateCommand(TypedDict):
- name: Literal["command.ASPIRATE"]
- payload: AspirateDispenseCommandPayload
-
-
-class DispenseCommand(TypedDict):
- name: Literal["command.DISPENSE"]
- payload: AspirateDispenseCommandPayload
-
-
-class ConsolidateCommandPayload(
- TextOnlyPayload, MultiLocationPayload, SingleInstrumentPayload
-):
- volume: Union[float, List[float]]
- source: List[Union[Location, Well]]
- dest: Union[Location, Well]
-
-
-class ConsolidateCommand(TypedDict):
- name: Literal["command.CONSOLIDATE"]
- payload: ConsolidateCommandPayload
-
-
-class DistributeCommandPayload(
- TextOnlyPayload, MultiLocationPayload, SingleInstrumentPayload
-):
- volume: Union[float, List[float]]
- source: Union[Location, Well]
- dest: List[Union[Location, Well]]
-
-
-class DistributeCommand(TypedDict):
- name: Literal["command.DISTRIBUTE"]
- payload: DistributeCommandPayload
-
-
-class TransferCommandPayload(
- TextOnlyPayload, MultiLocationPayload, SingleInstrumentPayload
-):
- volume: Union[float, List[float]]
- source: List[Union[Location, Well]]
- dest: List[Union[Location, Well]]
-
-
-class TransferCommand(TypedDict):
- name: Literal["command.TRANSFER"]
- payload: TransferCommandPayload
-
-
-class MixCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- location: Union[None, Location, Well]
- volume: float
- repetitions: int
-
-
-class MixCommand(TypedDict):
- name: Literal["command.MIX"]
- payload: MixCommandPayload
-
-
-class BlowOutCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- location: Optional[Location]
-
-
-class BlowOutCommand(TypedDict):
- name: Literal["command.BLOW_OUT"]
- payload: BlowOutCommandPayload
-
-
-class TouchTipCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- pass
-
-
-class TouchTipCommand(TypedDict):
- name: Literal["command.TOUCH_TIP"]
- payload: TouchTipCommandPayload
-
-
-class AirGapCommandPayload(TextOnlyPayload):
- pass
-
-
-class AirGapCommand(TypedDict):
- name: Literal["command.AIR_GAP"]
- payload: AirGapCommandPayload
-
-
-class ReturnTipCommandPayload(TextOnlyPayload):
- pass
-
-
-class ReturnTipCommand(TypedDict):
- name: Literal["command.RETURN_TIP"]
- payload: ReturnTipCommandPayload
-
-
-class PickUpTipCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- location: Well
-
-
-class PickUpTipCommand(TypedDict):
- name: Literal["command.PICK_UP_TIP"]
- payload: PickUpTipCommandPayload
-
-
-class DropTipCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- location: Well
-
-
-class DropTipCommand(TypedDict):
- name: Literal["command.DROP_TIP"]
- payload: DropTipCommandPayload
-
-
-class MoveToCommand(TypedDict):
- name: Literal["command.MOVE_TO"]
- payload: MoveToCommandPayload
-
-
-class MoveToCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
- location: Location
-
-
-Command = Union[
- DropTipCommand,
- PickUpTipCommand,
- ReturnTipCommand,
- AirGapCommand,
- TouchTipCommand,
- BlowOutCommand,
- MixCommand,
- TransferCommand,
- DistributeCommand,
- ConsolidateCommand,
- DispenseCommand,
- AspirateCommand,
- HomeCommand,
- HeaterShakerSetTargetTemperatureCommand,
- HeaterShakerWaitForTemperatureCommand,
- HeaterShakerSetAndWaitForShakeSpeedCommand,
- HeaterShakerOpenLabwareLatchCommand,
- HeaterShakerCloseLabwareLatchCommand,
- HeaterShakerDeactivateShakerCommand,
- HeaterShakerDeactivateHeaterCommand,
- ThermocyclerCloseCommand,
- ThermocyclerWaitForLidTempCommand,
- ThermocyclerDeactivateCommand,
- ThermocyclerDeactivateBlockCommand,
- ThermocyclerDeactivateLidCommand,
- ThermocyclerSetLidTempCommand,
- ThermocyclerWaitForTempCommand,
- ThermocyclerWaitForHoldCommand,
- ThermocyclerExecuteProfileCommand,
- ThermocyclerSetBlockTempCommand,
- ThermocyclerOpenCommand,
- TempdeckDeactivateCommand,
- TempdeckAwaitTempCommand,
- TempdeckSetTempCommand,
- MagdeckCalibrateCommand,
- MagdeckDisengageCommand,
- MagdeckEngageCommand,
- ResumeCommand,
- PauseCommand,
- DelayCommand,
- CommentCommand,
- MoveToCommand,
-]
-
-
-CommandPayload = Union[
- CommentCommandPayload,
- ResumeCommandPayload,
- HeaterShakerSetTargetTemperaturePayload,
- HeaterShakerWaitForTemperaturePayload,
- HeaterShakerSetAndWaitForShakeSpeedPayload,
- HeaterShakerOpenLabwareLatchPayload,
- HeaterShakerCloseLabwareLatchPayload,
- HeaterShakerDeactivateShakerPayload,
- HeaterShakerDeactivateHeaterPayload,
- MagdeckEngageCommandPayload,
- MagdeckDisengageCommandPayload,
- MagdeckCalibrateCommandPayload,
- ThermocyclerOpenCommandPayload,
- ThermocyclerWaitForHoldCommandPayload,
- ThermocyclerWaitForTempCommandPayload,
- ThermocyclerSetLidTempCommandPayload,
- ThermocyclerDeactivateLidCommandPayload,
- ThermocyclerDeactivateBlockCommandPayload,
- ThermocyclerDeactivateCommandPayload,
- ThermocyclerWaitForLidTempCommand,
- ThermocyclerCloseCommandPayload,
- AirGapCommandPayload,
- ReturnTipCommandPayload,
- DropTipCommandPayload,
- PickUpTipCommandPayload,
- TouchTipCommandPayload,
- BlowOutCommandPayload,
- MixCommandPayload,
- TransferCommandPayload,
- DistributeCommandPayload,
- ConsolidateCommandPayload,
- AspirateDispenseCommandPayload,
- HomeCommandPayload,
- ThermocyclerExecuteProfileCommandPayload,
- ThermocyclerSetBlockTempCommandPayload,
- TempdeckAwaitTempCommandPayload,
- TempdeckSetTempCommandPayload,
- PauseCommandPayload,
- DelayCommandPayload,
- MoveToCommandPayload,
-]
-
-
-MessageSequenceId = Union[Literal["before"], Literal["after"]]
-
-
-CommandMessageFields = TypedDict(
- "CommandMessageFields",
- {"$": MessageSequenceId, "id": str, "error": Optional[Exception]},
-)
-
-
-class MoveToMessage(CommandMessageFields, MoveToCommand):
- pass
-
-
-class DropTipMessage(CommandMessageFields, DropTipCommand):
- pass
-
-
-class PickUpTipMessage(CommandMessageFields, PickUpTipCommand):
- pass
-
-
-class ReturnTipMessage(CommandMessageFields, ReturnTipCommand):
- pass
-
-
-class AirGapMessage(CommandMessageFields, AirGapCommand):
- pass
-
-
-class TouchTipMessage(CommandMessageFields, TouchTipCommand):
- pass
-
-
-class BlowOutMessage(CommandMessageFields, BlowOutCommand):
- pass
-
-
-class MixMessage(CommandMessageFields, MixCommand):
- pass
-
-
-class TransferMessage(CommandMessageFields, TransferCommand):
- pass
-
-
-class DistributeMessage(CommandMessageFields, DistributeCommand):
- pass
-
-
-class ConsolidateMessage(CommandMessageFields, ConsolidateCommand):
- pass
-
-
-class DispenseMessage(CommandMessageFields, DispenseCommand):
- pass
-
-
-class AspirateMessage(CommandMessageFields, AspirateCommand):
- pass
-
-
-class HomeMessage(CommandMessageFields, HomeCommand):
- pass
-
-
-class HeaterShakerSetTargetTemperatureMessage(
- CommandMessageFields, HeaterShakerSetTargetTemperatureCommand
-):
- pass
-
-
-class HeaterShakerWaitForTemperatureMessage(
- CommandMessageFields, HeaterShakerWaitForTemperatureCommand
-):
- pass
-
-
-class HeaterShakerSetAndWaitForShakeSpeedMessage(
- CommandMessageFields, HeaterShakerSetAndWaitForShakeSpeedCommand
-):
- pass
-
-
-class HeaterShakerOpenLabwareLatchMessage(
- CommandMessageFields, HeaterShakerOpenLabwareLatchCommand
-):
- pass
-
-
-class HeaterShakerCloseLabwareLatchMessage(
- CommandMessageFields, HeaterShakerCloseLabwareLatchCommand
-):
- pass
-
-
-class HeaterShakerDeactivateShakerMessage(
- CommandMessageFields, HeaterShakerDeactivateShakerCommand
-):
- pass
-
-
-class HeaterShakerDeactivateHeaterMessage(
- CommandMessageFields, HeaterShakerDeactivateHeaterCommand
-):
- pass
-
-
-class ThermocyclerCloseMessage(CommandMessageFields, ThermocyclerCloseCommand):
- pass
-
-
-class ThermocyclerWaitForLidTempMessage(
- CommandMessageFields, ThermocyclerWaitForLidTempCommand
-):
- pass
-
-
-class ThermocyclerDeactivateMessage(
- CommandMessageFields, ThermocyclerDeactivateCommand
-):
- pass
-
-
-class ThermocyclerDeactivateBlockMessage(
- CommandMessageFields, ThermocyclerDeactivateBlockCommand
-):
- pass
-
-
-class ThermocyclerDeactivateLidMessage(
- CommandMessageFields, ThermocyclerDeactivateLidCommand
-):
- pass
-
-
-class ThermocyclerSetLidTempMessage(
- CommandMessageFields, ThermocyclerSetLidTempCommand
-):
- pass
-
-
-class ThermocyclerWaitForTempMessage(
- CommandMessageFields, ThermocyclerWaitForTempCommand
-):
- pass
-
-
-class ThermocyclerWaitForHoldMessage(
- CommandMessageFields, ThermocyclerWaitForHoldCommand
-):
- pass
-
-
-class ThermocyclerExecuteProfileMessage(
- CommandMessageFields, ThermocyclerExecuteProfileCommand
-):
- pass
-
-
-class ThermocyclerSetBlockTempMessage(
- CommandMessageFields, ThermocyclerSetBlockTempCommand
-):
- pass
-
-
-class ThermocyclerOpenMessage(CommandMessageFields, ThermocyclerOpenCommand):
- pass
-
-
-class TempdeckDeactivateMessage(CommandMessageFields, TempdeckDeactivateCommand):
- pass
-
-
-class TempdeckAwaitTempMessage(CommandMessageFields, TempdeckAwaitTempCommand):
- pass
-
-
-class TempdeckSetTempMessage(CommandMessageFields, TempdeckSetTempCommand):
- pass
-
-
-class MagdeckCalibrateMessage(CommandMessageFields, MagdeckCalibrateCommand):
- pass
-
-
-class MagdeckDisengageMessage(CommandMessageFields, MagdeckDisengageCommand):
- pass
-
-
-class MagdeckEngageMessage(CommandMessageFields, MagdeckEngageCommand):
- pass
-
-
-class ResumeMessage(CommandMessageFields, ResumeCommand):
- pass
-
-
-class PauseMessage(CommandMessageFields, PauseCommand):
- pass
-
-
-class DelayMessage(CommandMessageFields, DelayCommand):
- pass
-
-
-class CommentMessage(CommandMessageFields, CommentCommand):
- pass
-
-
-CommandMessage = Union[
- DropTipMessage,
- PickUpTipMessage,
- ReturnTipMessage,
- AirGapMessage,
- TouchTipMessage,
- BlowOutMessage,
- MixMessage,
- TransferMessage,
- DistributeMessage,
- ConsolidateMessage,
- DispenseMessage,
- AspirateMessage,
- HomeMessage,
- HeaterShakerSetTargetTemperatureMessage,
- HeaterShakerWaitForTemperatureMessage,
- HeaterShakerSetAndWaitForShakeSpeedMessage,
- HeaterShakerOpenLabwareLatchMessage,
- HeaterShakerCloseLabwareLatchMessage,
- HeaterShakerDeactivateShakerMessage,
- HeaterShakerDeactivateHeaterMessage,
- ThermocyclerCloseMessage,
- ThermocyclerWaitForLidTempMessage,
- ThermocyclerDeactivateMessage,
- ThermocyclerDeactivateBlockMessage,
- ThermocyclerDeactivateLidMessage,
- ThermocyclerSetLidTempMessage,
- ThermocyclerWaitForTempMessage,
- ThermocyclerWaitForHoldMessage,
- ThermocyclerExecuteProfileMessage,
- ThermocyclerSetBlockTempMessage,
- ThermocyclerOpenMessage,
- TempdeckSetTempMessage,
- TempdeckDeactivateMessage,
- MagdeckEngageMessage,
- MagdeckDisengageMessage,
- MagdeckCalibrateMessage,
- CommentMessage,
- DelayMessage,
- PauseMessage,
- ResumeMessage,
- MoveToMessage,
-]
diff --git a/api/src/opentrons/config/__init__.py b/api/src/opentrons/config/__init__.py
index 6429ae154fb..a4571521211 100644
--- a/api/src/opentrons/config/__init__.py
+++ b/api/src/opentrons/config/__init__.py
@@ -184,7 +184,7 @@ class ConfigElement(NamedTuple):
"Deck Calibration",
Path("deck_calibration.json"),
ConfigElementType.FILE,
- "The file storing the deck calibration",
+ "The file storing the deck calibration. Superseded in v4 by robot_calibration_dir.",
),
ConfigElement(
"log_dir",
@@ -284,6 +284,13 @@ class ConfigElement(NamedTuple):
ConfigElementType.DIR,
"The dir where module calibration is stored",
),
+ ConfigElement(
+ "performance_metrics_dir",
+ "Performance Metrics Directory",
+ Path("performance_metrics_data"),
+ ConfigElementType.DIR,
+ "The dir where performance metrics are stored",
+ ),
)
#: The available configuration file elements to modify. All of these can be
#: changed by editing opentrons.json, where the keys are the name elements,
@@ -602,3 +609,7 @@ def get_tip_length_cal_path() -> Path:
def get_custom_tiprack_def_path() -> Path:
return get_opentrons_path("custom_tiprack_dir")
+
+
+def get_performance_metrics_data_dir() -> Path:
+ return get_opentrons_path("performance_metrics_dir")
diff --git a/api/src/opentrons/config/advanced_settings.py b/api/src/opentrons/config/advanced_settings.py
index 97629fcd2e9..f65b5824eb1 100644
--- a/api/src/opentrons/config/advanced_settings.py
+++ b/api/src/opentrons/config/advanced_settings.py
@@ -17,7 +17,6 @@
)
from opentrons.config import CONFIG, ARCHITECTURE, SystemArchitecture
-from opentrons.system import log_control
from opentrons_shared_data.robot.dev_types import RobotTypeEnum
if TYPE_CHECKING:
@@ -109,22 +108,6 @@ def __init__(self) -> None:
robot_type=[RobotTypeEnum.OT2, RobotTypeEnum.FLEX],
)
- async def on_change(self, value: Optional[bool]) -> None:
- """Special side effect for this setting"""
- if ARCHITECTURE == SystemArchitecture.BUILDROOT:
- code, stdout, stderr = await log_control.set_syslog_level(
- "emerg" if value else "info"
- )
- if code != 0:
- log.error(
- f"Could not set log control: {code}: stdout={stdout}"
- f" stderr={stderr}"
- )
- raise SettingException(
- f"Failed to set log upstreaming: {code}", "log-config-failure"
- )
- await super().on_change(value)
-
class Setting(NamedTuple):
value: Optional[bool]
@@ -176,19 +159,6 @@ class Setting(NamedTuple):
robot_type=[RobotTypeEnum.OT2],
default_true_on_robot_types=[RobotTypeEnum.FLEX],
),
- SettingDefinition(
- _id="disableFastProtocolUpload",
- title="Use older protocol analysis method",
- description=(
- "Use an older, slower method of analyzing uploaded protocols. "
- "This changes how the OT-2 validates your protocol during the upload "
- "step, but does not affect how your protocol actually runs. "
- "Opentrons Support might ask you to change this setting if you encounter "
- "problems with the newer, faster protocol analysis method."
- ),
- restart_required=False,
- robot_type=[RobotTypeEnum.OT2, RobotTypeEnum.FLEX],
- ),
SettingDefinition(
_id="enableOT3HardwareController",
title="Enable experimental OT-3 hardware controller",
@@ -232,16 +202,41 @@ class Setting(NamedTuple):
),
SettingDefinition(
_id="disableOverpressureDetection",
- title="Disable overpressure detection on pipettes.",
- description="This setting disables overpressure detection on pipettes, do not turn this feature off unless recommended.",
+ title="Disable Flex pipette pressure sensing.",
+ description="When this setting is on, Flex will continue its activities regardless of pressure changes inside the pipette. Do not turn this setting on unless you are intentionally causing pressures over 8 kPa inside the pipette air channel.",
+ robot_type=[RobotTypeEnum.FLEX],
+ ),
+ SettingDefinition(
+ _id="enableErrorRecoveryExperiments",
+ title="Enable error recovery experiments",
+ description=(
+ "Do not enable."
+ " This is an Opentrons internal setting to experiment with"
+ " in-development error recovery features."
+ " This will interfere with your protocol runs,"
+ " corrupt your robot's storage,"
+ " bring misfortune and pestilence upon you and your livestock, etc."
+ ),
robot_type=[RobotTypeEnum.FLEX],
+ internal_only=True,
),
SettingDefinition(
- _id="disableTipPresenceDetection",
- title="Disable tip presence detection on pipettes.",
- description="This setting disables tip presence detection on pipettes, do not turn this feature off unless recommended.",
+ _id="enableOEMMode",
+ title="Enable OEM Mode",
+ description="This setting anonymizes Opentrons branding in the ODD app.",
robot_type=[RobotTypeEnum.FLEX],
),
+ SettingDefinition(
+ _id="enablePerformanceMetrics",
+ title="Enable performance metrics",
+ description=(
+ "Do not enable."
+ " This is an Opentrons internal setting to collect performance metrics."
+ " Do not turn this on unless you are playing with the performance metrics system."
+ ),
+ robot_type=[RobotTypeEnum.OT2, RobotTypeEnum.FLEX],
+ internal_only=True,
+ ),
]
if (
@@ -683,6 +678,54 @@ def _migrate28to29(previous: SettingsMap) -> SettingsMap:
return newmap
+def _migrate29to30(previous: SettingsMap) -> SettingsMap:
+ """Migrate to version 30 of the feature flags file.
+
+ - Removes the disableTipPresenceDetection flag.
+ """
+ return {k: v for k, v in previous.items() if "disableTipPresenceDetection" != k}
+
+
+def _migrate30to31(previous: SettingsMap) -> SettingsMap:
+ """Migrate to version 31 of the feature flags file.
+
+ - Adds the enableErrorRecoveryExperiments config element.
+ """
+ newmap = {k: v for k, v in previous.items()}
+ newmap["enableErrorRecoveryExperiments"] = None
+ return newmap
+
+
+def _migrate31to32(previous: SettingsMap) -> SettingsMap:
+ """Migrate to version 32 of the feature flags file.
+
+ - Adds the enableOEMMode config element.
+ """
+ newmap = {k: v for k, v in previous.items()}
+ newmap["enableOEMMode"] = None
+ return newmap
+
+
+def _migrate32to33(previous: SettingsMap) -> SettingsMap:
+ """Migrate to version 33 of the feature flags file.
+
+ - Adds the enablePerformanceMetrics config element.
+ """
+ newmap = {k: v for k, v in previous.items()}
+ newmap["enablePerformanceMetrics"] = None
+ return newmap
+
+
+def _migrate33to34(previous: SettingsMap) -> SettingsMap:
+ """Migrate to version 34 of the feature flags file.
+
+ - Removes disableFastProtocolUpload
+ """
+ removals = ["disableFastProtocolUpload"]
+ newmap = {k: v for k, v in previous.items() if k not in removals}
+ return newmap
+
+
_MIGRATIONS = [
_migrate0to1,
_migrate1to2,
@@ -713,6 +756,11 @@ def _migrate28to29(previous: SettingsMap) -> SettingsMap:
_migrate26to27,
_migrate27to28,
_migrate28to29,
+ _migrate29to30,
+ _migrate30to31,
+ _migrate31to32,
+ _migrate32to33,
+ _migrate33to34,
]
"""
List of all migrations to apply, indexed by (version - 1). See _migrate below
diff --git a/api/src/opentrons/config/containers/default-containers.json b/api/src/opentrons/config/containers/default-containers.json
deleted file mode 100644
index 44824a024a4..00000000000
--- a/api/src/opentrons/config/containers/default-containers.json
+++ /dev/null
@@ -1,27097 +0,0 @@
-{
- "containers": {
- "temperature-plate": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34,
- "z": 97
- },
- "locations":{}
- },
-
- "tube-rack-5ml-96": {
- "locations": {
- "A1": {
- "y": 0,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B1": {
- "y": 0,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C1": {
- "y": 0,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D1": {
- "y": 0,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E1": {
- "y": 0,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F1": {
- "y": 0,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G1": {
- "y": 0,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H1": {
- "y": 0,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A2": {
- "y": 18,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B2": {
- "y": 18,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C2": {
- "y": 18,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D2": {
- "y": 18,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E2": {
- "y": 18,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F2": {
- "y": 18,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G2": {
- "y": 18,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H2": {
- "y": 18,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A3": {
- "y": 36,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B3": {
- "y": 36,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C3": {
- "y": 36,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D3": {
- "y": 36,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E3": {
- "y": 36,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F3": {
- "y": 36,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G3": {
- "y": 36,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H3": {
- "y": 36,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A4": {
- "y": 54,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B4": {
- "y": 54,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C4": {
- "y": 54,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D4": {
- "y": 54,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E4": {
- "y": 54,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F4": {
- "y": 54,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G4": {
- "y": 54,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H4": {
- "y": 54,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A5": {
- "y": 72,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B5": {
- "y": 72,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C5": {
- "y": 72,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D5": {
- "y": 72,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E5": {
- "y": 72,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F5": {
- "y": 72,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G5": {
- "y": 72,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H5": {
- "y": 72,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A6": {
- "y": 90,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B6": {
- "y": 90,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C6": {
- "y": 90,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D6": {
- "y": 90,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E6": {
- "y": 90,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F6": {
- "y": 90,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G6": {
- "y": 90,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H6": {
- "y": 90,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A7": {
- "y": 108,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B7": {
- "y": 108,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C7": {
- "y": 108,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D7": {
- "y": 108,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E7": {
- "y": 108,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F7": {
- "y": 108,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G7": {
- "y": 108,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H7": {
- "y": 108,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A8": {
- "y": 126,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B8": {
- "y": 126,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C8": {
- "y": 126,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D8": {
- "y": 126,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E8": {
- "y": 126,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F8": {
- "y": 126,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G8": {
- "y": 126,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H8": {
- "y": 126,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A9": {
- "y": 144,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B9": {
- "y": 144,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C9": {
- "y": 144,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D9": {
- "y": 144,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E9": {
- "y": 144,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F9": {
- "y": 144,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G9": {
- "y": 144,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H9": {
- "y": 144,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A10": {
- "y": 162,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B10": {
- "y": 162,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C10": {
- "y": 162,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D10": {
- "y": 162,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E10": {
- "y": 162,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F10": {
- "y": 162,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G10": {
- "y": 162,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H10": {
- "y": 162,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A11": {
- "y": 180,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B11": {
- "y": 180,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C11": {
- "y": 180,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D11": {
- "y": 180,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E11": {
- "y": 180,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F11": {
- "y": 180,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G11": {
- "y": 180,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H11": {
- "y": 180,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
-
- "A12": {
- "y": 198,
- "x": 0,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "B12": {
- "y": 198,
- "x": 18,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "C12": {
- "y": 198,
- "x": 36,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "D12": {
- "y": 198,
- "x": 54,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "E12": {
- "y": 198,
- "x": 72,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "F12": {
- "y": 198,
- "x": 90,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "G12": {
- "y": 198,
- "x": 108,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- },
- "H12": {
- "y": 198,
- "x": 126,
- "z": 0,
- "depth": 72,
- "diameter": 15,
- "total-liquid-volume": 5000
- }
-
- }
- },
-
- "tube-rack-2ml-9x9": {
- "locations": {
- "A1": {
- "y": 0,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B1": {
- "y": 0,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C1": {
- "y": 0,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D1": {
- "y": 0,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E1": {
- "y": 0,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F1": {
- "y": 0,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G1": {
- "y": 0,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H1": {
- "y": 0,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I1": {
- "y": 0,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A2": {
- "y": 14.75,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B2": {
- "y": 14.75,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C2": {
- "y": 14.75,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D2": {
- "y": 14.75,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E2": {
- "y": 14.75,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F2": {
- "y": 14.75,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G2": {
- "y": 14.75,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H2": {
- "y": 14.75,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I2": {
- "y": 14.75,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A3": {
- "y": 29.5,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B3": {
- "y": 29.5,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C3": {
- "y": 29.5,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D3": {
- "y": 29.5,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E3": {
- "y": 29.5,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F3": {
- "y": 29.5,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G3": {
- "y": 29.5,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H3": {
- "y": 29.5,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I3": {
- "y": 29.5,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A4": {
- "y": 44.25,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B4": {
- "y": 44.25,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C4": {
- "y": 44.25,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D4": {
- "y": 44.25,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E4": {
- "y": 44.25,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F4": {
- "y": 44.25,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G4": {
- "y": 44.25,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H4": {
- "y": 44.25,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I4": {
- "y": 44.25,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A5": {
- "y": 59,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B5": {
- "y": 59,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C5": {
- "y": 59,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D5": {
- "y": 59,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E5": {
- "y": 59,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F5": {
- "y": 59,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G5": {
- "y": 59,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H5": {
- "y": 59,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I5": {
- "y": 59,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A6": {
- "y": 73.75,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B6": {
- "y": 73.75,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C6": {
- "y": 73.75,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D6": {
- "y": 73.75,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E6": {
- "y": 73.75,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F6": {
- "y": 73.75,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G6": {
- "y": 73.75,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H6": {
- "y": 73.75,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I6": {
- "y": 73.75,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A7": {
- "y": 88.5,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B7": {
- "y": 88.5,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C7": {
- "y": 88.5,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D7": {
- "y": 88.5,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E7": {
- "y": 88.5,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F7": {
- "y": 88.5,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G7": {
- "y": 88.5,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H7": {
- "y": 88.5,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I7": {
- "y": 88.5,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A8": {
- "y": 103.25,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B8": {
- "y": 103.25,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C8": {
- "y": 103.25,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D8": {
- "y": 103.25,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E8": {
- "y": 103.25,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F8": {
- "y": 103.25,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G8": {
- "y": 103.25,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H8": {
- "y": 103.25,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I8": {
- "y": 103.25,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
-
- "A9": {
- "y": 118,
- "x": 0,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "B9": {
- "y": 118,
- "x": 14.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "C9": {
- "y": 118,
- "x": 29.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "D9": {
- "y": 118,
- "x": 44.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "E9": {
- "y": 118,
- "x": 59,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "F9": {
- "y": 118,
- "x": 73.75,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "G9": {
- "y": 118,
- "x": 88.5,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "H9": {
- "y": 118,
- "x": 103.25,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- },
- "I9": {
- "y": 118,
- "x": 118,
- "z": 0,
- "depth": 45,
- "diameter": 10,
- "total-liquid-volume": 2000
- }
- }
- },
- "96-well-plate-20mm": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 20.2,
- "diameter": 5.46,
- "total-liquid-volume": 300
- }
- }
- },
- "6-well-plate": {
- "origin-offset": {
- "x": 23.16,
- "y": 24.76
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 22.5,
- "total-liquid-volume": 16800
- },
- "B1": {
- "x": 39.12,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 22.5,
- "total-liquid-volume": 16800
- },
- "A2": {
- "x": 0,
- "y": 39.12,
- "z": 0,
- "depth": 17.4,
- "diameter": 22.5,
- "total-liquid-volume": 16800
- },
- "B2": {
- "x": 39.12,
- "y": 39.12,
- "z": 0,
- "depth": 17.4,
- "diameter": 22.5,
- "total-liquid-volume": 16800
- },
- "A3": {
- "x": 0,
- "y": 78.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 22.5,
- "total-liquid-volume": 16800
- },
- "B3": {
- "x": 39.12,
- "y": 78.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 22.5,
- "total-liquid-volume": 16800
- }
- }
- },
- "12-well-plate": {
- "origin-offset": {
- "x": 16.79,
- "y": 24.94
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "B1": {
- "x": 26.01,
- "y": 0,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "C1": {
- "x": 52.02,
- "y": 0,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "A2": {
- "x": 0,
- "y": 26.01,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "B2": {
- "x": 26.01,
- "y": 26.01,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "C2": {
- "x": 52.02,
- "y": 26.01,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "A3": {
- "x": 0,
- "y": 52.02,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "B3": {
- "x": 26.01,
- "y": 52.02,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "C3": {
- "x": 52.02,
- "y": 52.02,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "A4": {
- "x": 0,
- "y": 78.03,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "B4": {
- "x": 26.01,
- "y": 78.03,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- },
- "C4": {
- "x": 52.02,
- "y": 78.03,
- "z": 0,
- "depth": 17.53,
- "diameter": 22.5,
- "total-liquid-volume": 6900
- }
- }
- },
- "24-well-plate": {
- "origin-offset": {
- "x": 13.67,
- "y": 15
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "B1": {
- "x": 19.3,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "C1": {
- "x": 38.6,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "D1": {
- "x": 57.9,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "A2": {
- "x": 0,
- "y": 19.3,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "B2": {
- "x": 19.3,
- "y": 19.3,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "C2": {
- "x": 38.6,
- "y": 19.3,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "D2": {
- "x": 57.9,
- "y": 19.3,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "A3": {
- "x": 0,
- "y": 38.6,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "B3": {
- "x": 19.3,
- "y": 38.6,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "C3": {
- "x": 38.6,
- "y": 38.6,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "D3": {
- "x": 57.9,
- "y": 38.6,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "A4": {
- "x": 0,
- "y": 57.9,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "B4": {
- "x": 19.3,
- "y": 57.9,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "C4": {
- "x": 38.6,
- "y": 57.9,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "D4": {
- "x": 57.9,
- "y": 57.9,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "A5": {
- "x": 0,
- "y": 77.2,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "B5": {
- "x": 19.3,
- "y": 77.2,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "C5": {
- "x": 38.6,
- "y": 77.2,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "D5": {
- "x": 57.9,
- "y": 77.2,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "A6": {
- "x": 0,
- "y": 96.5,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "B6": {
- "x": 19.3,
- "y": 96.5,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "C6": {
- "x": 38.6,
- "y": 96.5,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- },
- "D6": {
- "x": 57.9,
- "y": 96.5,
- "z": 0,
- "depth": 17.4,
- "diameter": 16,
- "total-liquid-volume": 1900
- }
- }
- },
- "48-well-plate": {
- "origin-offset": {
- "x": 10.08,
- "y": 18.16
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B1": {
- "x": 13.08,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C1": {
- "x": 26.16,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D1": {
- "x": 39.24,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E1": {
- "x": 52.32,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F1": {
- "x": 65.4,
- "y": 0,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A2": {
- "x": 0,
- "y": 13.08,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B2": {
- "x": 13.08,
- "y": 13.08,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C2": {
- "x": 26.16,
- "y": 13.08,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D2": {
- "x": 39.24,
- "y": 13.08,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E2": {
- "x": 52.32,
- "y": 13.08,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F2": {
- "x": 65.4,
- "y": 13.08,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A3": {
- "x": 0,
- "y": 26.16,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B3": {
- "x": 13.08,
- "y": 26.16,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C3": {
- "x": 26.16,
- "y": 26.16,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D3": {
- "x": 39.24,
- "y": 26.16,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E3": {
- "x": 52.32,
- "y": 26.16,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F3": {
- "x": 65.4,
- "y": 26.16,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A4": {
- "x": 0,
- "y": 39.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B4": {
- "x": 13.08,
- "y": 39.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C4": {
- "x": 26.16,
- "y": 39.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D4": {
- "x": 39.24,
- "y": 39.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E4": {
- "x": 52.32,
- "y": 39.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F4": {
- "x": 65.4,
- "y": 39.24,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A5": {
- "x": 0,
- "y": 52.32,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B5": {
- "x": 13.08,
- "y": 52.32,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C5": {
- "x": 26.16,
- "y": 52.32,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D5": {
- "x": 39.24,
- "y": 52.32,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E5": {
- "x": 52.32,
- "y": 52.32,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F5": {
- "x": 65.4,
- "y": 52.32,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A6": {
- "x": 0,
- "y": 65.4,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B6": {
- "x": 13.08,
- "y": 65.4,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C6": {
- "x": 26.16,
- "y": 65.4,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D6": {
- "x": 39.24,
- "y": 65.4,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E6": {
- "x": 52.32,
- "y": 65.4,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F6": {
- "x": 65.4,
- "y": 65.4,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A7": {
- "x": 0,
- "y": 78.48,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B7": {
- "x": 13.08,
- "y": 78.48,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C7": {
- "x": 26.16,
- "y": 78.48,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D7": {
- "x": 39.24,
- "y": 78.48,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E7": {
- "x": 52.32,
- "y": 78.48,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F7": {
- "x": 65.4,
- "y": 78.48,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "A8": {
- "x": 0,
- "y": 91.56,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "B8": {
- "x": 13.08,
- "y": 91.56,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "C8": {
- "x": 26.16,
- "y": 91.56,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "D8": {
- "x": 39.24,
- "y": 91.56,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "E8": {
- "x": 52.32,
- "y": 91.56,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- },
- "F8": {
- "x": 65.4,
- "y": 91.56,
- "z": 0,
- "depth": 17.4,
- "diameter": 11.25,
- "total-liquid-volume": 950
- }
- }
- },
- "trough-1row-25ml": {
- "locations": {
- "A1": {
- "x": 42.75,
- "y": 63.875,
- "z": 0,
- "depth": 26,
- "diameter": 10,
- "total-liquid-volume": 25000
- }
- }
- },
- "trough-1row-test": {
- "locations": {
- "A1": {
- "x": 42.75,
- "y": 63.875,
- "z": 0,
- "depth": 26,
- "diameter": 10,
- "total-liquid-volume": 25000
- }
- }
- },
- "hampton-1ml-deep-block": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 38,
- "diameter": 7.5,
- "total-liquid-volume": 1000
- }
- }
- },
-
- "rigaku-compact-crystallization-plate": {
- "origin-offset": {
- "x": 9,
- "y": 11
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 2.5,
- "diameter": 2,
- "total-liquid-volume": 6
- },
-
- "A13": {
- "x": 3.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B13": {
- "x": 12.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C13": {
- "x": 21.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D13": {
- "x": 30.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E13": {
- "x": 39.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F13": {
- "x": 48.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G13": {
- "x": 57.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H13": {
- "x": 66.5,
- "y": 3.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A14": {
- "x": 3.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B14": {
- "x": 12.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C14": {
- "x": 21.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D14": {
- "x": 30.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E14": {
- "x": 39.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F14": {
- "x": 48.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G14": {
- "x": 57.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H14": {
- "x": 66.5,
- "y": 12.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A15": {
- "x": 3.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B15": {
- "x": 12.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C15": {
- "x": 21.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D15": {
- "x": 30.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E15": {
- "x": 39.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F15": {
- "x": 48.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G15": {
- "x": 57.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H15": {
- "x": 66.5,
- "y": 21.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A16": {
- "x": 3.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B16": {
- "x": 12.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C16": {
- "x": 21.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D16": {
- "x": 30.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E16": {
- "x": 39.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F16": {
- "x": 48.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G16": {
- "x": 57.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H16": {
- "x": 66.5,
- "y": 30.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A17": {
- "x": 3.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B17": {
- "x": 12.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C17": {
- "x": 21.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D17": {
- "x": 30.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E17": {
- "x": 39.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F17": {
- "x": 48.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G17": {
- "x": 57.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H17": {
- "x": 66.5,
- "y": 39.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A18": {
- "x": 3.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B18": {
- "x": 12.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C18": {
- "x": 21.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D18": {
- "x": 30.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E18": {
- "x": 39.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F18": {
- "x": 48.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G18": {
- "x": 57.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H18": {
- "x": 66.5,
- "y": 48.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A19": {
- "x": 3.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B19": {
- "x": 12.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C19": {
- "x": 21.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D19": {
- "x": 30.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E19": {
- "x": 39.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F19": {
- "x": 48.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G19": {
- "x": 57.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H19": {
- "x": 66.5,
- "y": 57.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A20": {
- "x": 3.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B20": {
- "x": 12.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C20": {
- "x": 21.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D20": {
- "x": 30.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E20": {
- "x": 39.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F20": {
- "x": 48.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G20": {
- "x": 57.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H20": {
- "x": 66.5,
- "y": 66.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A21": {
- "x": 3.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B21": {
- "x": 12.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C21": {
- "x": 21.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D21": {
- "x": 30.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E21": {
- "x": 39.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F21": {
- "x": 48.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G21": {
- "x": 57.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H21": {
- "x": 66.5,
- "y": 75.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A22": {
- "x": 3.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B22": {
- "x": 12.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C22": {
- "x": 21.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D22": {
- "x": 30.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E22": {
- "x": 39.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F22": {
- "x": 48.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G22": {
- "x": 57.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H22": {
- "x": 66.5,
- "y": 84.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A23": {
- "x": 3.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B23": {
- "x": 12.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C23": {
- "x": 21.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D23": {
- "x": 30.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E23": {
- "x": 39.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F23": {
- "x": 48.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G23": {
- "x": 57.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H23": {
- "x": 66.5,
- "y": 93.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
-
- "A24": {
- "x": 3.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "B24": {
- "x": 12.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "C24": {
- "x": 21.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "D24": {
- "x": 30.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "E24": {
- "x": 39.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "F24": {
- "x": 48.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "G24": {
- "x": 57.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- },
- "H24": {
- "x": 66.5,
- "y": 102.5,
- "z": -6,
- "depth": 6.5,
- "diameter": 2.5,
- "total-liquid-volume": 300
- }
- }
- },
- "alum-block-pcr-strips": {
-
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A2": {
- "x": 0,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B2": {
- "x": 9,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C2": {
- "x": 18,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D2": {
- "x": 27,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E2": {
- "x": 36,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F2": {
- "x": 45,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G2": {
- "x": 54,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H2": {
- "x": 63,
- "y": 117,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- }
- }
- },
- "T75-flask": {
- "origin-offset": {
- "x": 42.75,
- "y": 63.875
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 163,
- "diameter": 25,
- "total-liquid-volume": 75000
- }
- }
- },
- "T25-flask": {
- "origin-offset": {
- "x": 42.75,
- "y": 63.875
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 99,
- "diameter": 18,
- "total-liquid-volume": 25000
- }
- }
- },
- "magdeck": {
- "origin-offset": {
- "x": 0,
- "y": 0
- },
- "locations": {
- "A1": {
- "x": 63.9,
- "y": 42.8,
- "z": 0,
- "depth": 82.25
- }
- }
- },
- "tempdeck": {
- "origin-offset": {
- "x": 0,
- "y": 0
- },
- "locations": {
- "A1": {
- "x": 63.9,
- "y": 42.8,
- "z": 0,
- "depth": 80.09
- }
- }
- },
- "trash-box": {
- "origin-offset": {
- "x": 42.75,
- "y": 63.875
- },
- "locations": {
- "A1": {
- "x": 60,
- "y": 45,
- "z": 0,
- "depth": 40
- }
- }
- },
- "fixed-trash": {
- "origin-offset": {
- "x": 0,
- "y": 0
- },
- "locations": {
- "A1": {
- "x": 80,
- "y": 80,
- "z": 5,
- "depth": 58
- }
- }
- },
- "tall-fixed-trash": {
- "origin-offset": {
- "x": 0,
- "y": 0
- },
- "locations": {
- "A1": {
- "x": 80,
- "y": 80,
- "z": 5,
- "depth": 80
- }
- }
- },
- "wheaton_vial_rack": {
- "origin-offset": {
- "x": 9,
- "y": 9
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 35,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 70,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 105,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "E1": {
- "x": 140,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "F1": {
- "x": 175,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "G1": {
- "x": 210,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "H1": {
- "x": 245,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "I1": {
- "x": 280,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "J1": {
- "x": 315,
- "y": 0,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
-
- "A2": {
- "x": 0,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 35,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 70,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 105,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "E2": {
- "x": 140,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "F2": {
- "x": 175,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "G2": {
- "x": 210,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "H2": {
- "x": 245,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "I2": {
- "x": 280,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "J2": {
- "x": 315,
- "y": 35,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
-
- "A3": {
- "x": 0,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 35,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 70,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 105,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "E3": {
- "x": 140,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "F3": {
- "x": 175,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "G3": {
- "x": 210,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "H3": {
- "x": 245,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "I3": {
- "x": 280,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "J3": {
- "x": 315,
- "y": 70,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
-
- "A4": {
- "x": 0,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 35,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 70,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 105,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "E4": {
- "x": 140,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "F4": {
- "x": 175,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "G4": {
- "x": 210,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "H4": {
- "x": 245,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "I4": {
- "x": 280,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "J4": {
- "x": 315,
- "y": 105,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
-
- "A5": {
- "x": 0,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 35,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 70,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 105,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "E5": {
- "x": 140,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "F5": {
- "x": 175,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "G5": {
- "x": 210,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "H5": {
- "x": 245,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "I5": {
- "x": 280,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- },
- "J5": {
- "x": 315,
- "y": 140,
- "z": 0,
- "depth": 95,
- "diameter": 18,
- "total-liquid-volume": 2000
- }
- }
- },
- "tube-rack-80well": {
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 13.2,
- "y": 0,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 26.5,
- "y": 0,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 39.7,
- "y": 0,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E1": {
- "x": 52.9,
- "y": 0,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 13.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 13.2,
- "y": 13.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 26.5,
- "y": 13.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 39.7,
- "y": 13.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E2": {
- "x": 52.9,
- "y": 13.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 26.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 13.2,
- "y": 26.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 26.5,
- "y": 26.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 39.7,
- "y": 26.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E3": {
- "x": 52.9,
- "y": 26.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 39.7,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 13.2,
- "y": 39.7,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 26.5,
- "y": 39.7,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 39.7,
- "y": 39.7,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E4": {
- "x": 52.9,
- "y": 39.7,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 52.9,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 13.2,
- "y": 52.9,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 26.5,
- "y": 52.9,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 39.7,
- "y": 52.9,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E5": {
- "x": 52.9,
- "y": 52.9,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 66.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 13.2,
- "y": 66.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 26.5,
- "y": 66.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 39.7,
- "y": 66.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E6": {
- "x": 52.9,
- "y": 66.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A7": {
- "x": 0,
- "y": 79.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B7": {
- "x": 13.2,
- "y": 79.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C7": {
- "x": 26.5,
- "y": 79.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D7": {
- "x": 39.7,
- "y": 79.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E7": {
- "x": 52.9,
- "y": 79.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A8": {
- "x": 0,
- "y": 92.6,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B8": {
- "x": 13.2,
- "y": 92.6,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C8": {
- "x": 26.5,
- "y": 92.6,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D8": {
- "x": 39.7,
- "y": 92.6,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E8": {
- "x": 52.9,
- "y": 92.6,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A9": {
- "x": 0,
- "y": 105.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B9": {
- "x": 13.2,
- "y": 105.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C9": {
- "x": 26.5,
- "y": 105.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D9": {
- "x": 39.7,
- "y": 105.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E9": {
- "x": 52.9,
- "y": 105.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A10": {
- "x": 0,
- "y": 119.1,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B10": {
- "x": 13.2,
- "y": 119.1,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C10": {
- "x": 26.5,
- "y": 119.1,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D10": {
- "x": 39.7,
- "y": 119.1,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E10": {
- "x": 52.9,
- "y": 119.1,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A11": {
- "x": 0,
- "y": 132.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B11": {
- "x": 13.2,
- "y": 132.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C11": {
- "x": 26.5,
- "y": 132.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D11": {
- "x": 39.7,
- "y": 132.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E11": {
- "x": 52.9,
- "y": 132.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A12": {
- "x": 0,
- "y": 145.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B12": {
- "x": 13.2,
- "y": 145.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C12": {
- "x": 26.5,
- "y": 145.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D12": {
- "x": 39.7,
- "y": 145.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E12": {
- "x": 52.9,
- "y": 145.5,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A13": {
- "x": 0,
- "y": 158.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B13": {
- "x": 13.2,
- "y": 158.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C13": {
- "x": 26.5,
- "y": 158.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D13": {
- "x": 39.7,
- "y": 158.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E13": {
- "x": 52.9,
- "y": 158.8,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A14": {
- "x": 0,
- "y": 172,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B14": {
- "x": 13.2,
- "y": 172,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C14": {
- "x": 26.5,
- "y": 172,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D14": {
- "x": 39.7,
- "y": 172,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E14": {
- "x": 52.9,
- "y": 172,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A15": {
- "x": 0,
- "y": 185.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B15": {
- "x": 13.2,
- "y": 185.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C15": {
- "x": 26.5,
- "y": 185.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D15": {
- "x": 39.7,
- "y": 185.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E15": {
- "x": 52.9,
- "y": 185.2,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A16": {
- "x": 0,
- "y": 198.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B16": {
- "x": 13.2,
- "y": 198.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C16": {
- "x": 26.5,
- "y": 198.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D16": {
- "x": 39.7,
- "y": 198.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "E16": {
- "x": 52.9,
- "y": 198.4,
- "z": 0,
- "depth": 35,
- "diameter": 6,
- "total-liquid-volume": 2000
- }
- }
- },
- "point": {
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 0,
- "total-liquid-volume": 1
- }
- }
- },
- "tiprack-10ul": {
- "origin-offset": {
- "x": 14.24,
- "y": 14.54
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 56,
- "diameter": 3.5
- }
- }
- },
-
- "tiprack-10ul-H": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 60,
- "diameter": 6.4
- }
- }
- },
-
- "tiprack-200ul": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "diameter": 3.5,
- "depth": 60
- }
- }
- },
- "opentrons-tiprack-10ul": {
- "origin-offset": {
- "x": 10.77,
- "y": 11.47
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 25.46,
- "diameter": 3.5,
- "depth": 39.2
- }
- }
- },
- "opentrons-tiprack-300ul": {
- "origin-offset": {
- "x": 12.59,
- "y": 14.85
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 6,
- "diameter": 3.5,
- "depth": 60
- }
- }
- },
-
- "tiprack-1000ul": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "diameter": 6.4,
- "depth": 101.0
- }
- }
- },
- "opentrons-tiprack-1000ul": {
- "origin-offset": {
- "x": 8.5,
- "y": 11.18
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "diameter": 7.62,
- "depth": 98.07
- }
- }
- },
- "tube-rack-.75ml": {
- "origin-offset": {
- "x": 13.5,
- "y": 15,
- "z": 55
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "B1": {
- "x": 19.5,
- "y": 0,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "C1": {
- "x": 39,
- "y": 0,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "D1": {
- "x": 58.5,
- "y": 0,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "A2": {
- "x": 0,
- "y": 19.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "B2": {
- "x": 19.5,
- "y": 19.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "C2": {
- "x": 39,
- "y": 19.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "D2": {
- "x": 58.5,
- "y": 19.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "A3": {
- "x": 0,
- "y": 39,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "B3": {
- "x": 19.5,
- "y": 39,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "C3": {
- "x": 39,
- "y": 39,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "D3": {
- "x": 58.5,
- "y": 39,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "A4": {
- "x": 0,
- "y": 58.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "B4": {
- "x": 19.5,
- "y": 58.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "C4": {
- "x": 39,
- "y": 58.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "D4": {
- "x": 58.5,
- "y": 58.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "A5": {
- "x": 0,
- "y": 78,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "B5": {
- "x": 19.5,
- "y": 78,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "C5": {
- "x": 39,
- "y": 78,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "D5": {
- "x": 58.5,
- "y": 78,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "A6": {
- "x": 0,
- "y": 97.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "B6": {
- "x": 19.5,
- "y": 97.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "C6": {
- "x": 39,
- "y": 97.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- },
- "D6": {
- "x": 58.5,
- "y": 97.5,
- "z": 0,
- "depth": 20,
- "diameter": 6,
- "total-liquid-volume": 750
- }
- }
- },
-
- "tube-rack-2ml": {
- "origin-offset": {
- "x": 13,
- "y": 16,
- "z": 52
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 19.5,
- "y": 0,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 39,
- "y": 0,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 58.5,
- "y": 0,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 19.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 19.5,
- "y": 19.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 39,
- "y": 19.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 58.5,
- "y": 19.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 39,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 19.5,
- "y": 39,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 39,
- "y": 39,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 58.5,
- "y": 39,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 58.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 19.5,
- "y": 58.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 39,
- "y": 58.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 58.5,
- "y": 58.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 78,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 19.5,
- "y": 78,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 39,
- "y": 78,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 58.5,
- "y": 78,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 97.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 19.5,
- "y": 97.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 39,
- "y": 97.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 58.5,
- "y": 97.5,
- "z": 0,
- "depth": 40,
- "diameter": 6,
- "total-liquid-volume": 2000
- }
- }
- },
-
- "tube-rack-15_50ml": {
- "origin-offset": {
- "x": 11,
- "y": 19
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 5,
- "depth": 115,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B1": {
- "x": 31.5,
- "y": 0,
- "z": 5,
- "depth": 115,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C1": {
- "x": 63,
- "y": 0,
- "z": 5,
- "depth": 115,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "A2": {
- "x": 0,
- "y": 22.7,
- "z": 5,
- "depth": 115,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B2": {
- "x": 31.5,
- "y": 22.7,
- "z": 5,
- "depth": 115,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C2": {
- "x": 63,
- "y": 22.7,
- "z": 0,
- "depth": 115,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "A3": {
- "x": 5.9,
- "y": 51.26,
- "z": 5,
- "depth": 115,
- "diameter": 30,
- "total-liquid-volume": 50000
- },
- "B3": {
- "x": 51.26,
- "y": 51.26,
- "z": 5,
- "depth": 115,
- "diameter": 30,
- "total-liquid-volume": 50000
- },
- "A4": {
- "x": 5.9,
- "y": 87.1,
- "z": 5,
- "depth": 115,
- "diameter": 30,
- "total-liquid-volume": 50000
- },
- "B4": {
- "x": 51.26,
- "y": 87.1,
- "z": 5,
- "depth": 115,
- "diameter": 30,
- "total-liquid-volume": 50000
- }
- }
- },
-
- "trough-12row": {
- "origin-offset": {
- "x": 7.75,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 8,
- "depth": 38,
- "total-liquid-volume": 22000
- }
- }
- },
-
- "trough-12row-short": {
- "origin-offset": {
- "x": 7.75,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 20,
- "total-liquid-volume": 22000
- }
- }
- },
-
- "24-vial-rack": {
- "origin-offset": {
- "x": 13.67,
- "y": 16
- },
- "locations": {
- "A1": {
- "x": 0.0,
- "total-liquid-volume": 3400,
- "y": 0.0,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "A2": {
- "x": 0.0,
- "total-liquid-volume": 3400,
- "y": 19.3,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "A3": {
- "x": 0.0,
- "total-liquid-volume": 3400,
- "y": 38.6,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "A4": {
- "x": 0.0,
- "total-liquid-volume": 3400,
- "y": 57.9,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "A5": {
- "x": 0.0,
- "total-liquid-volume": 3400,
- "y": 77.2,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "A6": {
- "x": 0.0,
- "total-liquid-volume": 3400,
- "y": 96.5,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "B1": {
- "x": 19.3,
- "total-liquid-volume": 3400,
- "y": 0.0,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "B2": {
- "x": 19.3,
- "total-liquid-volume": 3400,
- "y": 19.3,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "B3": {
- "x": 19.3,
- "total-liquid-volume": 3400,
- "y": 38.6,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "B4": {
- "x": 19.3,
- "total-liquid-volume": 3400,
- "y": 57.9,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "B5": {
- "x": 19.3,
- "total-liquid-volume": 3400,
- "y": 77.2,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "B6": {
- "x": 19.3,
- "total-liquid-volume": 3400,
- "y": 96.5,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "C1": {
- "x": 38.6,
- "total-liquid-volume": 3400,
- "y": 0.0,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "C2": {
- "x": 38.6,
- "total-liquid-volume": 3400,
- "y": 19.3,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "C3": {
- "x": 38.6,
- "total-liquid-volume": 3400,
- "y": 38.6,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "C4": {
- "x": 38.6,
- "total-liquid-volume": 3400,
- "y": 57.9,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "C5": {
- "x": 38.6,
- "total-liquid-volume": 3400,
- "y": 77.2,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "C6": {
- "x": 38.6,
- "total-liquid-volume": 3400,
- "y": 96.5,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "D1": {
- "x": 57.9,
- "total-liquid-volume": 3400,
- "y": 0.0,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "D2": {
- "x": 57.9,
- "total-liquid-volume": 3400,
- "y": 19.3,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "D3": {
- "x": 57.9,
- "total-liquid-volume": 3400,
- "y": 38.6,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "D4": {
- "x": 57.9,
- "total-liquid-volume": 3400,
- "y": 57.9,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "D5": {
- "x": 57.9,
- "total-liquid-volume": 3400,
- "y": 77.2,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- },
- "D6": {
- "x": 57.9,
- "total-liquid-volume": 3400,
- "y": 96.5,
- "depth": 16.2,
- "z": 0,
- "diameter": 15.62
- }
- }
- },
-
- "96-deep-well": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 33.5,
- "diameter": 7.5,
- "total-liquid-volume": 2000
- }
- }
- },
-
- "96-PCR-tall": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 15.4,
- "diameter": 6.4,
- "total-liquid-volume": 300
- }
- }
- },
-
- "96-PCR-flat": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 300
- }
- }
- },
-
- "biorad-hardshell-96-PCR": {
- "origin-offset": {
- "x": 18.24,
- "y": 13.63
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 4.25,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 300
- }
- }
- },
-
- "96-flat": {
- "origin-offset": {
- "x": 17.64,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 3.85,
- "depth": 10.5,
- "diameter": 6.4,
- "total-liquid-volume": 400
- }
- }
- },
-
- "PCR-strip-tall": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 19.5,
- "diameter": 6.4,
- "total-liquid-volume": 280
- }
- }
- },
-
- "384-plate": {
- "origin-offset": {
- "x": 9,
- "y": 12.13
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B1": {
- "x": 4.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D1": {
- "x": 13.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F1": {
- "x": 22.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H1": {
- "x": 31.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J1": {
- "x": 40.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L1": {
- "x": 49.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N1": {
- "x": 58.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P1": {
- "x": 67.5,
- "y": 0,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A2": {
- "x": 0,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B2": {
- "x": 4.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C2": {
- "x": 9,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D2": {
- "x": 13.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E2": {
- "x": 18,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F2": {
- "x": 22.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G2": {
- "x": 27,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H2": {
- "x": 31.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I2": {
- "x": 36,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J2": {
- "x": 40.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K2": {
- "x": 45,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L2": {
- "x": 49.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M2": {
- "x": 54,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N2": {
- "x": 58.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O2": {
- "x": 63,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P2": {
- "x": 67.5,
- "y": 4.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A3": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B3": {
- "x": 4.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C3": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D3": {
- "x": 13.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E3": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F3": {
- "x": 22.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G3": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H3": {
- "x": 31.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I3": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J3": {
- "x": 40.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K3": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L3": {
- "x": 49.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M3": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N3": {
- "x": 58.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O3": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P3": {
- "x": 67.5,
- "y": 9,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A4": {
- "x": 0,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B4": {
- "x": 4.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C4": {
- "x": 9,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D4": {
- "x": 13.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E4": {
- "x": 18,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F4": {
- "x": 22.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G4": {
- "x": 27,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H4": {
- "x": 31.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I4": {
- "x": 36,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J4": {
- "x": 40.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K4": {
- "x": 45,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L4": {
- "x": 49.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M4": {
- "x": 54,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N4": {
- "x": 58.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O4": {
- "x": 63,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P4": {
- "x": 67.5,
- "y": 13.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A5": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B5": {
- "x": 4.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C5": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D5": {
- "x": 13.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E5": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F5": {
- "x": 22.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G5": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H5": {
- "x": 31.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I5": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J5": {
- "x": 40.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K5": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L5": {
- "x": 49.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M5": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N5": {
- "x": 58.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O5": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P5": {
- "x": 67.5,
- "y": 18,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A6": {
- "x": 0,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B6": {
- "x": 4.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C6": {
- "x": 9,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D6": {
- "x": 13.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E6": {
- "x": 18,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F6": {
- "x": 22.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G6": {
- "x": 27,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H6": {
- "x": 31.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I6": {
- "x": 36,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J6": {
- "x": 40.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K6": {
- "x": 45,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L6": {
- "x": 49.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M6": {
- "x": 54,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N6": {
- "x": 58.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O6": {
- "x": 63,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P6": {
- "x": 67.5,
- "y": 22.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A7": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B7": {
- "x": 4.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C7": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D7": {
- "x": 13.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E7": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F7": {
- "x": 22.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G7": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H7": {
- "x": 31.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I7": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J7": {
- "x": 40.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K7": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L7": {
- "x": 49.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M7": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N7": {
- "x": 58.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O7": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P7": {
- "x": 67.5,
- "y": 27,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A8": {
- "x": 0,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B8": {
- "x": 4.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C8": {
- "x": 9,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D8": {
- "x": 13.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E8": {
- "x": 18,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F8": {
- "x": 22.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G8": {
- "x": 27,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H8": {
- "x": 31.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I8": {
- "x": 36,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J8": {
- "x": 40.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K8": {
- "x": 45,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L8": {
- "x": 49.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M8": {
- "x": 54,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N8": {
- "x": 58.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O8": {
- "x": 63,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P8": {
- "x": 67.5,
- "y": 31.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A9": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B9": {
- "x": 4.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C9": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D9": {
- "x": 13.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E9": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F9": {
- "x": 22.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G9": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H9": {
- "x": 31.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I9": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J9": {
- "x": 40.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K9": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L9": {
- "x": 49.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M9": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N9": {
- "x": 58.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O9": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P9": {
- "x": 67.5,
- "y": 36,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A10": {
- "x": 0,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B10": {
- "x": 4.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C10": {
- "x": 9,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D10": {
- "x": 13.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E10": {
- "x": 18,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F10": {
- "x": 22.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G10": {
- "x": 27,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H10": {
- "x": 31.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I10": {
- "x": 36,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J10": {
- "x": 40.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K10": {
- "x": 45,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L10": {
- "x": 49.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M10": {
- "x": 54,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N10": {
- "x": 58.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O10": {
- "x": 63,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P10": {
- "x": 67.5,
- "y": 40.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A11": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B11": {
- "x": 4.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C11": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D11": {
- "x": 13.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E11": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F11": {
- "x": 22.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G11": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H11": {
- "x": 31.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I11": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J11": {
- "x": 40.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K11": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L11": {
- "x": 49.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M11": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N11": {
- "x": 58.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O11": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P11": {
- "x": 67.5,
- "y": 45,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A12": {
- "x": 0,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B12": {
- "x": 4.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C12": {
- "x": 9,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D12": {
- "x": 13.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E12": {
- "x": 18,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F12": {
- "x": 22.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G12": {
- "x": 27,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H12": {
- "x": 31.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I12": {
- "x": 36,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J12": {
- "x": 40.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K12": {
- "x": 45,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L12": {
- "x": 49.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M12": {
- "x": 54,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N12": {
- "x": 58.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O12": {
- "x": 63,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P12": {
- "x": 67.5,
- "y": 49.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A13": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B13": {
- "x": 4.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C13": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D13": {
- "x": 13.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E13": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F13": {
- "x": 22.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G13": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H13": {
- "x": 31.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I13": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J13": {
- "x": 40.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K13": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L13": {
- "x": 49.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M13": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N13": {
- "x": 58.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O13": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P13": {
- "x": 67.5,
- "y": 54,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A14": {
- "x": 0,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B14": {
- "x": 4.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C14": {
- "x": 9,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D14": {
- "x": 13.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E14": {
- "x": 18,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F14": {
- "x": 22.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G14": {
- "x": 27,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H14": {
- "x": 31.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I14": {
- "x": 36,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J14": {
- "x": 40.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K14": {
- "x": 45,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L14": {
- "x": 49.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M14": {
- "x": 54,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N14": {
- "x": 58.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O14": {
- "x": 63,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P14": {
- "x": 67.5,
- "y": 58.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A15": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B15": {
- "x": 4.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C15": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D15": {
- "x": 13.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E15": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F15": {
- "x": 22.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G15": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H15": {
- "x": 31.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I15": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J15": {
- "x": 40.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K15": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L15": {
- "x": 49.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M15": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N15": {
- "x": 58.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O15": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P15": {
- "x": 67.5,
- "y": 63,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A16": {
- "x": 0,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B16": {
- "x": 4.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C16": {
- "x": 9,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D16": {
- "x": 13.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E16": {
- "x": 18,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F16": {
- "x": 22.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G16": {
- "x": 27,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H16": {
- "x": 31.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I16": {
- "x": 36,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J16": {
- "x": 40.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K16": {
- "x": 45,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L16": {
- "x": 49.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M16": {
- "x": 54,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N16": {
- "x": 58.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O16": {
- "x": 63,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P16": {
- "x": 67.5,
- "y": 67.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A17": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B17": {
- "x": 4.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C17": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D17": {
- "x": 13.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E17": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F17": {
- "x": 22.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G17": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H17": {
- "x": 31.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I17": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J17": {
- "x": 40.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K17": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L17": {
- "x": 49.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M17": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N17": {
- "x": 58.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O17": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P17": {
- "x": 67.5,
- "y": 72,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A18": {
- "x": 0,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B18": {
- "x": 4.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C18": {
- "x": 9,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D18": {
- "x": 13.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E18": {
- "x": 18,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F18": {
- "x": 22.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G18": {
- "x": 27,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H18": {
- "x": 31.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I18": {
- "x": 36,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J18": {
- "x": 40.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K18": {
- "x": 45,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L18": {
- "x": 49.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M18": {
- "x": 54,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N18": {
- "x": 58.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O18": {
- "x": 63,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P18": {
- "x": 67.5,
- "y": 76.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A19": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B19": {
- "x": 4.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C19": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D19": {
- "x": 13.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E19": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F19": {
- "x": 22.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G19": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H19": {
- "x": 31.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I19": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J19": {
- "x": 40.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K19": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L19": {
- "x": 49.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M19": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N19": {
- "x": 58.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O19": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P19": {
- "x": 67.5,
- "y": 81,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A20": {
- "x": 0,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B20": {
- "x": 4.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C20": {
- "x": 9,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D20": {
- "x": 13.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E20": {
- "x": 18,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F20": {
- "x": 22.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G20": {
- "x": 27,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H20": {
- "x": 31.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I20": {
- "x": 36,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J20": {
- "x": 40.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K20": {
- "x": 45,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L20": {
- "x": 49.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M20": {
- "x": 54,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N20": {
- "x": 58.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O20": {
- "x": 63,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P20": {
- "x": 67.5,
- "y": 85.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A21": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B21": {
- "x": 4.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C21": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D21": {
- "x": 13.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E21": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F21": {
- "x": 22.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G21": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H21": {
- "x": 31.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I21": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J21": {
- "x": 40.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K21": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L21": {
- "x": 49.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M21": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N21": {
- "x": 58.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O21": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P21": {
- "x": 67.5,
- "y": 90,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A22": {
- "x": 0,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B22": {
- "x": 4.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C22": {
- "x": 9,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D22": {
- "x": 13.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E22": {
- "x": 18,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F22": {
- "x": 22.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G22": {
- "x": 27,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H22": {
- "x": 31.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I22": {
- "x": 36,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J22": {
- "x": 40.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K22": {
- "x": 45,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L22": {
- "x": 49.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M22": {
- "x": 54,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N22": {
- "x": 58.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O22": {
- "x": 63,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P22": {
- "x": 67.5,
- "y": 94.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A23": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B23": {
- "x": 4.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C23": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D23": {
- "x": 13.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E23": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F23": {
- "x": 22.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G23": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H23": {
- "x": 31.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I23": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J23": {
- "x": 40.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K23": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L23": {
- "x": 49.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M23": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N23": {
- "x": 58.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O23": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P23": {
- "x": 67.5,
- "y": 99,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A24": {
- "x": 0,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B24": {
- "x": 4.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C24": {
- "x": 9,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D24": {
- "x": 13.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E24": {
- "x": 18,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F24": {
- "x": 22.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G24": {
- "x": 27,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H24": {
- "x": 31.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I24": {
- "x": 36,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J24": {
- "x": 40.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K24": {
- "x": 45,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L24": {
- "x": 49.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M24": {
- "x": 54,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N24": {
- "x": 58.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O24": {
- "x": 63,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P24": {
- "x": 67.5,
- "y": 103.5,
- "z": 0,
- "depth": 9.5,
- "diameter": 3.1,
- "total-liquid-volume": 55
- }
- }
- },
-
- "MALDI-plate": {
- "origin-offset": {
- "x": 9,
- "y": 12
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B1": {
- "x": 4.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D1": {
- "x": 13.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F1": {
- "x": 22.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H1": {
- "x": 31.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J1": {
- "x": 40.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L1": {
- "x": 49.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N1": {
- "x": 58.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P1": {
- "x": 67.5,
- "y": 0,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A2": {
- "x": 0,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B2": {
- "x": 4.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C2": {
- "x": 9,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D2": {
- "x": 13.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E2": {
- "x": 18,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F2": {
- "x": 22.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G2": {
- "x": 27,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H2": {
- "x": 31.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I2": {
- "x": 36,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J2": {
- "x": 40.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K2": {
- "x": 45,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L2": {
- "x": 49.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M2": {
- "x": 54,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N2": {
- "x": 58.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O2": {
- "x": 63,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P2": {
- "x": 67.5,
- "y": 4.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A3": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B3": {
- "x": 4.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C3": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D3": {
- "x": 13.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E3": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F3": {
- "x": 22.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G3": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H3": {
- "x": 31.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I3": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J3": {
- "x": 40.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K3": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L3": {
- "x": 49.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M3": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N3": {
- "x": 58.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O3": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P3": {
- "x": 67.5,
- "y": 9,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A4": {
- "x": 0,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B4": {
- "x": 4.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C4": {
- "x": 9,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D4": {
- "x": 13.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E4": {
- "x": 18,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F4": {
- "x": 22.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G4": {
- "x": 27,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H4": {
- "x": 31.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I4": {
- "x": 36,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J4": {
- "x": 40.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K4": {
- "x": 45,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L4": {
- "x": 49.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M4": {
- "x": 54,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N4": {
- "x": 58.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O4": {
- "x": 63,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P4": {
- "x": 67.5,
- "y": 13.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A5": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B5": {
- "x": 4.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C5": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D5": {
- "x": 13.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E5": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F5": {
- "x": 22.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G5": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H5": {
- "x": 31.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I5": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J5": {
- "x": 40.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K5": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L5": {
- "x": 49.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M5": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N5": {
- "x": 58.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O5": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P5": {
- "x": 67.5,
- "y": 18,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A6": {
- "x": 0,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B6": {
- "x": 4.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C6": {
- "x": 9,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D6": {
- "x": 13.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E6": {
- "x": 18,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F6": {
- "x": 22.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G6": {
- "x": 27,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H6": {
- "x": 31.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I6": {
- "x": 36,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J6": {
- "x": 40.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K6": {
- "x": 45,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L6": {
- "x": 49.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M6": {
- "x": 54,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N6": {
- "x": 58.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O6": {
- "x": 63,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P6": {
- "x": 67.5,
- "y": 22.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A7": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B7": {
- "x": 4.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C7": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D7": {
- "x": 13.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E7": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F7": {
- "x": 22.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G7": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H7": {
- "x": 31.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I7": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J7": {
- "x": 40.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K7": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L7": {
- "x": 49.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M7": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N7": {
- "x": 58.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O7": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P7": {
- "x": 67.5,
- "y": 27,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A8": {
- "x": 0,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B8": {
- "x": 4.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C8": {
- "x": 9,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D8": {
- "x": 13.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E8": {
- "x": 18,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F8": {
- "x": 22.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G8": {
- "x": 27,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H8": {
- "x": 31.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I8": {
- "x": 36,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J8": {
- "x": 40.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K8": {
- "x": 45,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L8": {
- "x": 49.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M8": {
- "x": 54,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N8": {
- "x": 58.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O8": {
- "x": 63,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P8": {
- "x": 67.5,
- "y": 31.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A9": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B9": {
- "x": 4.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C9": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D9": {
- "x": 13.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E9": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F9": {
- "x": 22.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G9": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H9": {
- "x": 31.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I9": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J9": {
- "x": 40.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K9": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L9": {
- "x": 49.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M9": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N9": {
- "x": 58.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O9": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P9": {
- "x": 67.5,
- "y": 36,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A10": {
- "x": 0,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B10": {
- "x": 4.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C10": {
- "x": 9,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D10": {
- "x": 13.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E10": {
- "x": 18,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F10": {
- "x": 22.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G10": {
- "x": 27,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H10": {
- "x": 31.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I10": {
- "x": 36,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J10": {
- "x": 40.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K10": {
- "x": 45,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L10": {
- "x": 49.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M10": {
- "x": 54,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N10": {
- "x": 58.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O10": {
- "x": 63,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P10": {
- "x": 67.5,
- "y": 40.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A11": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B11": {
- "x": 4.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C11": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D11": {
- "x": 13.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E11": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F11": {
- "x": 22.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G11": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H11": {
- "x": 31.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I11": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J11": {
- "x": 40.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K11": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L11": {
- "x": 49.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M11": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N11": {
- "x": 58.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O11": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P11": {
- "x": 67.5,
- "y": 45,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A12": {
- "x": 0,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B12": {
- "x": 4.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C12": {
- "x": 9,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D12": {
- "x": 13.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E12": {
- "x": 18,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F12": {
- "x": 22.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G12": {
- "x": 27,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H12": {
- "x": 31.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I12": {
- "x": 36,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J12": {
- "x": 40.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K12": {
- "x": 45,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L12": {
- "x": 49.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M12": {
- "x": 54,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N12": {
- "x": 58.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O12": {
- "x": 63,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P12": {
- "x": 67.5,
- "y": 49.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A13": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B13": {
- "x": 4.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C13": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D13": {
- "x": 13.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E13": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F13": {
- "x": 22.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G13": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H13": {
- "x": 31.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I13": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J13": {
- "x": 40.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K13": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L13": {
- "x": 49.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M13": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N13": {
- "x": 58.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O13": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P13": {
- "x": 67.5,
- "y": 54,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A14": {
- "x": 0,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B14": {
- "x": 4.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C14": {
- "x": 9,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D14": {
- "x": 13.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E14": {
- "x": 18,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F14": {
- "x": 22.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G14": {
- "x": 27,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H14": {
- "x": 31.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I14": {
- "x": 36,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J14": {
- "x": 40.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K14": {
- "x": 45,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L14": {
- "x": 49.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M14": {
- "x": 54,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N14": {
- "x": 58.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O14": {
- "x": 63,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P14": {
- "x": 67.5,
- "y": 58.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A15": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B15": {
- "x": 4.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C15": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D15": {
- "x": 13.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E15": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F15": {
- "x": 22.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G15": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H15": {
- "x": 31.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I15": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J15": {
- "x": 40.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K15": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L15": {
- "x": 49.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M15": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N15": {
- "x": 58.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O15": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P15": {
- "x": 67.5,
- "y": 63,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A16": {
- "x": 0,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B16": {
- "x": 4.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C16": {
- "x": 9,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D16": {
- "x": 13.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E16": {
- "x": 18,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F16": {
- "x": 22.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G16": {
- "x": 27,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H16": {
- "x": 31.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I16": {
- "x": 36,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J16": {
- "x": 40.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K16": {
- "x": 45,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L16": {
- "x": 49.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M16": {
- "x": 54,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N16": {
- "x": 58.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O16": {
- "x": 63,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P16": {
- "x": 67.5,
- "y": 67.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A17": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B17": {
- "x": 4.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C17": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D17": {
- "x": 13.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E17": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F17": {
- "x": 22.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G17": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H17": {
- "x": 31.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I17": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J17": {
- "x": 40.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K17": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L17": {
- "x": 49.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M17": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N17": {
- "x": 58.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O17": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P17": {
- "x": 67.5,
- "y": 72,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A18": {
- "x": 0,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B18": {
- "x": 4.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C18": {
- "x": 9,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D18": {
- "x": 13.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E18": {
- "x": 18,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F18": {
- "x": 22.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G18": {
- "x": 27,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H18": {
- "x": 31.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I18": {
- "x": 36,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J18": {
- "x": 40.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K18": {
- "x": 45,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L18": {
- "x": 49.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M18": {
- "x": 54,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N18": {
- "x": 58.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O18": {
- "x": 63,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P18": {
- "x": 67.5,
- "y": 76.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A19": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B19": {
- "x": 4.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C19": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D19": {
- "x": 13.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E19": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F19": {
- "x": 22.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G19": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H19": {
- "x": 31.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I19": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J19": {
- "x": 40.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K19": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L19": {
- "x": 49.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M19": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N19": {
- "x": 58.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O19": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P19": {
- "x": 67.5,
- "y": 81,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A20": {
- "x": 0,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B20": {
- "x": 4.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C20": {
- "x": 9,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D20": {
- "x": 13.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E20": {
- "x": 18,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F20": {
- "x": 22.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G20": {
- "x": 27,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H20": {
- "x": 31.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I20": {
- "x": 36,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J20": {
- "x": 40.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K20": {
- "x": 45,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L20": {
- "x": 49.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M20": {
- "x": 54,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N20": {
- "x": 58.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O20": {
- "x": 63,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P20": {
- "x": 67.5,
- "y": 85.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A21": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B21": {
- "x": 4.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C21": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D21": {
- "x": 13.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E21": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F21": {
- "x": 22.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G21": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H21": {
- "x": 31.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I21": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J21": {
- "x": 40.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K21": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L21": {
- "x": 49.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M21": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N21": {
- "x": 58.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O21": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P21": {
- "x": 67.5,
- "y": 90,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A22": {
- "x": 0,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B22": {
- "x": 4.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C22": {
- "x": 9,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D22": {
- "x": 13.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E22": {
- "x": 18,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F22": {
- "x": 22.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G22": {
- "x": 27,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H22": {
- "x": 31.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I22": {
- "x": 36,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J22": {
- "x": 40.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K22": {
- "x": 45,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L22": {
- "x": 49.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M22": {
- "x": 54,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N22": {
- "x": 58.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O22": {
- "x": 63,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P22": {
- "x": 67.5,
- "y": 94.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A23": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B23": {
- "x": 4.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C23": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D23": {
- "x": 13.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E23": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F23": {
- "x": 22.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G23": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H23": {
- "x": 31.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I23": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J23": {
- "x": 40.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K23": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L23": {
- "x": 49.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M23": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N23": {
- "x": 58.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O23": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P23": {
- "x": 67.5,
- "y": 99,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "A24": {
- "x": 0,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "B24": {
- "x": 4.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "C24": {
- "x": 9,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "D24": {
- "x": 13.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "E24": {
- "x": 18,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "F24": {
- "x": 22.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "G24": {
- "x": 27,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "H24": {
- "x": 31.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "I24": {
- "x": 36,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "J24": {
- "x": 40.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "K24": {
- "x": 45,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "L24": {
- "x": 49.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "M24": {
- "x": 54,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "N24": {
- "x": 58.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "O24": {
- "x": 63,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- },
- "P24": {
- "x": 67.5,
- "y": 103.5,
- "z": 0,
- "depth": 0,
- "diameter": 3.1,
- "total-liquid-volume": 55
- }
- }
- },
-
- "48-vial-plate": {
- "origin-offset": {
- "x": 10.5,
- "y": 18
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 13,
- "y": 0,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 26,
- "y": 0,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 39,
- "y": 0,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E1": {
- "x": 52,
- "y": 0,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F1": {
- "x": 65,
- "y": 0,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A2": {
- "x": 0,
- "y": 13,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 13,
- "y": 13,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 26,
- "y": 13,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 39,
- "y": 13,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E2": {
- "x": 52,
- "y": 13,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F2": {
- "x": 65,
- "y": 13,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A3": {
- "x": 0,
- "y": 26,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 13,
- "y": 26,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 26,
- "y": 26,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 39,
- "y": 26,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E3": {
- "x": 52,
- "y": 26,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F3": {
- "x": 65,
- "y": 26,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A4": {
- "x": 0,
- "y": 39,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 13,
- "y": 39,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 26,
- "y": 39,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 39,
- "y": 39,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E4": {
- "x": 52,
- "y": 39,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F4": {
- "x": 65,
- "y": 39,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A5": {
- "x": 0,
- "y": 52,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 13,
- "y": 52,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 26,
- "y": 52,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 39,
- "y": 52,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E5": {
- "x": 52,
- "y": 52,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F5": {
- "x": 65,
- "y": 52,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A6": {
- "x": 0,
- "y": 65,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 13,
- "y": 65,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 26,
- "y": 65,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 39,
- "y": 65,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E6": {
- "x": 52,
- "y": 65,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F6": {
- "x": 65,
- "y": 65,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A7": {
- "x": 0,
- "y": 78,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B7": {
- "x": 13,
- "y": 78,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C7": {
- "x": 26,
- "y": 78,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D7": {
- "x": 39,
- "y": 78,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E7": {
- "x": 52,
- "y": 78,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F7": {
- "x": 65,
- "y": 78,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
-
- "A8": {
- "x": 0,
- "y": 91,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "B8": {
- "x": 13,
- "y": 91,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "C8": {
- "x": 26,
- "y": 91,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "D8": {
- "x": 39,
- "y": 91,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "E8": {
- "x": 52,
- "y": 91,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- },
- "F8": {
- "x": 65,
- "y": 91,
- "z": 0,
- "depth": 30,
- "diameter": 11.5,
- "total-liquid-volume": 2000
- }
- }
- },
-
- "e-gelgol": {
- "origin-offset": {
- "x": 11.24,
- "y": 14.34
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 0,
- "depth": 2,
- "diameter": 1,
- "total-liquid-volume": 2
- }
- }
- },
-
- "5ml-3x4": {
- "origin-offset": {
- "x": 18,
- "y": 19
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "B1": {
- "x": 25,
- "y": 0,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "C1": {
- "x": 50,
- "y": 0,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "A2": {
- "x": 0,
- "y": 30,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "B2": {
- "x": 25,
- "y": 30,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "C2": {
- "x": 50,
- "y": 30,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "A3": {
- "x": 0,
- "y": 60,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "B3": {
- "x": 25,
- "y": 60,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "C3": {
- "x": 50,
- "y": 60,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "A4": {
- "x": 0,
- "y": 90,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "B4": {
- "x": 25,
- "y": 90,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- },
- "C4": {
- "x": 50,
- "y": 90,
- "z": 0,
- "depth": 55,
- "diameter": 14,
- "total-liquid-volume": 50000
- }
- }
- },
-
- "small_vial_rack_16x45": {
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "B1": {
- "x": 40,
- "y": 0,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "C1": {
- "x": 80,
- "y": 0,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "D1": {
- "x": 120,
- "y": 0,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "A2": {
- "x": 0,
- "y": 20,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "B2": {
- "x": 40,
- "y": 20,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "C2": {
- "x": 80,
- "y": 20,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "D2": {
- "x": 120,
- "y": 20,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "A3": {
- "x": 0,
- "y": 40,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "B3": {
- "x": 40,
- "y": 40,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "C3": {
- "x": 80,
- "y": 40,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "D3": {
- "x": 120,
- "y": 40,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "A4": {
- "x": 0,
- "y": 60,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "B4": {
- "x": 40,
- "y": 60,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "C4": {
- "x": 80,
- "y": 60,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "D4": {
- "x": 120,
- "y": 60,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "A5": {
- "x": 0,
- "y": 80,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "B5": {
- "x": 40,
- "y": 80,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "C5": {
- "x": 80,
- "y": 80,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "D5": {
- "x": 120,
- "y": 80,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "A6": {
- "x": 0,
- "y": 100,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "B6": {
- "x": 40,
- "y": 100,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "C6": {
- "x": 80,
- "y": 100,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- },
- "D6": {
- "x": 120,
- "y": 100,
- "z": 0,
- "depth": 45,
- "diameter": 16,
- "total-liquid-volume": 10000
- }
- }
- },
-
- "opentrons-tuberack-15ml": {
- "origin-offset": {
- "x": 34.375,
- "y": 13.5
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B1": {
- "x": 25,
- "y": 0,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C1": {
- "x": 50,
- "y": 0,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "A2": {
- "x": 0,
- "y": 25,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B2": {
- "x": 25,
- "y": 25,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C2": {
- "x": 50,
- "y": 25,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "A3": {
- "x": 0,
- "y": 50,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B3": {
- "x": 25,
- "y": 50,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C3": {
- "x": 50,
- "y": 50,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "A4": {
- "x": 0,
- "y": 75,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B4": {
- "x": 25,
- "y": 75,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C4": {
- "x": 50,
- "y": 75,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "A5": {
- "x": 0,
- "y": 100,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "B5": {
- "x": 25,
- "y": 100,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- },
- "C5": {
- "x": 50,
- "y": 100,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 17,
- "total-liquid-volume": 15000
- }
- }
- },
-
- "opentrons-tuberack-50ml": {
- "origin-offset": {
- "x": 39.875,
- "y": 37
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 17,
- "total-liquid-volume": 50000
- },
- "B1": {
- "x": 35,
- "y": 0,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 17,
- "total-liquid-volume": 50000
- },
- "A2": {
- "x": 0,
- "y": 35,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 17,
- "total-liquid-volume": 50000
- },
- "B2": {
- "x": 35,
- "y": 35,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 17,
- "total-liquid-volume": 50000
- },
- "A3": {
- "x": 0,
- "y": 70,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 17,
- "total-liquid-volume": 50000
- },
- "B3": {
- "x": 35,
- "y": 70,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 17,
- "total-liquid-volume": 50000
- }
- }
- },
-
- "opentrons-tuberack-15_50ml": {
- "origin-offset": {
- "x": 32.75,
- "y": 14.875
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 14.5,
- "total-liquid-volume": 15000
- },
- "B1": {
- "x": 25,
- "y": 0,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 14.5,
- "total-liquid-volume": 15000
- },
- "C1": {
- "x": 50,
- "y": 0,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 14.5,
- "total-liquid-volume": 15000
- },
- "A2": {
- "x": 0,
- "y": 25,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 14.5,
- "total-liquid-volume": 15000
- },
- "B2": {
- "x": 25,
- "y": 25,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 14.5,
- "total-liquid-volume": 15000
- },
- "C2": {
- "x": 50,
- "y": 25,
- "z": 6.78,
- "depth": 117.98,
- "diameter": 14.5,
- "total-liquid-volume": 15000
- },
- "A3": {
- "x": 18.25,
- "y": 57.5,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 26.45,
- "total-liquid-volume": 50000
- },
- "B3": {
- "x": 53.25,
- "y": 57.5,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 26.45,
- "total-liquid-volume": 50000
- },
- "A4": {
- "x": 18.25,
- "y": 92.5,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 26.45,
- "total-liquid-volume": 50000
- },
- "B4": {
- "x": 53.25,
- "y": 92.5,
- "z": 6.95,
- "depth": 112.85,
- "diameter": 26.45,
- "total-liquid-volume": 50000
- }
- }
- },
-
- "opentrons-tuberack-2ml-eppendorf": {
- "origin-offset": {
- "x": 21.07,
- "y": 18.21
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 19.28,
- "y": 0,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 38.56,
- "y": 0,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 57.84,
- "y": 0,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 19.89,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 19.28,
- "y": 19.89,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 38.56,
- "y": 19.89,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 57.84,
- "y": 19.89,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 39.78,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 19.28,
- "y": 39.78,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 38.56,
- "y": 39.78,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 57.84,
- "y": 39.78,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 59.67,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 19.28,
- "y": 59.67,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 38.56,
- "y": 59.67,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 57.84,
- "y": 59.67,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 79.56,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 19.28,
- "y": 79.56,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 38.56,
- "y": 79.56,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 57.84,
- "y": 79.56,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 99.45,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 19.28,
- "y": 99.45,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 38.56,
- "y": 99.45,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 57.84,
- "y": 99.45,
- "z": 43.3,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- }
- }
- },
-
- "opentrons-tuberack-2ml-screwcap": {
- "origin-offset": {
- "x": 21.07,
- "y": 18.21
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 19.28,
- "y": 0,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 38.56,
- "y": 0,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 57.84,
- "y": 0,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 19.89,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 19.28,
- "y": 19.89,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 38.56,
- "y": 19.89,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 57.84,
- "y": 19.89,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 39.78,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 19.28,
- "y": 39.78,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 38.56,
- "y": 39.78,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 57.84,
- "y": 39.78,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 59.67,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 19.28,
- "y": 59.67,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 38.56,
- "y": 59.67,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 57.84,
- "y": 59.67,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 79.56,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 19.28,
- "y": 79.56,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 38.56,
- "y": 79.56,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 57.84,
- "y": 79.56,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 99.45,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 19.28,
- "y": 99.45,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 38.56,
- "y": 99.45,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 57.84,
- "y": 99.45,
- "z": 45.2,
- "depth": 42,
- "diameter": 8.5,
- "total-liquid-volume": 2000
- }
- }
- },
-
- "opentrons-tuberack-1.5ml-eppendorf": {
- "origin-offset": {
- "x": 21.07,
- "y": 18.21
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "B1": {
- "x": 19.28,
- "y": 0,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "C1": {
- "x": 38.56,
- "y": 0,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "D1": {
- "x": 57.84,
- "y": 0,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "A2": {
- "x": 0,
- "y": 19.89,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "B2": {
- "x": 19.28,
- "y": 19.89,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "C2": {
- "x": 38.56,
- "y": 19.89,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "D2": {
- "x": 57.84,
- "y": 19.89,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "A3": {
- "x": 0,
- "y": 39.78,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "B3": {
- "x": 19.28,
- "y": 39.78,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "C3": {
- "x": 38.56,
- "y": 39.78,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "D3": {
- "x": 57.84,
- "y": 39.78,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "A4": {
- "x": 0,
- "y": 59.67,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "B4": {
- "x": 19.28,
- "y": 59.67,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "C4": {
- "x": 38.56,
- "y": 59.67,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "D4": {
- "x": 57.84,
- "y": 59.67,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "A5": {
- "x": 0,
- "y": 79.56,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "B5": {
- "x": 19.28,
- "y": 79.56,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "C5": {
- "x": 38.56,
- "y": 79.56,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "D5": {
- "x": 57.84,
- "y": 79.56,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "A6": {
- "x": 0,
- "y": 99.45,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "B6": {
- "x": 19.28,
- "y": 99.45,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "C6": {
- "x": 38.56,
- "y": 99.45,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- },
- "D6": {
- "x": 57.84,
- "y": 99.45,
- "z": 43.3,
- "depth": 37.0,
- "diameter": 9,
- "total-liquid-volume": 1500
- }
- }
- },
-
- "opentrons-aluminum-block-2ml-eppendorf": {
- "origin-offset": {
- "x": 25.88,
- "y": 20.75
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 17.25,
- "y": 0,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 34.5,
- "y": 0,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 51.75,
- "y": 0,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 17.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 17.25,
- "y": 17.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 34.5,
- "y": 17.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 51.75,
- "y": 17.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 34.5,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 17.25,
- "y": 34.5,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 34.5,
- "y": 34.5,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 51.75,
- "y": 34.5,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 51.75,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 17.25,
- "y": 51.75,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 34.5,
- "y": 51.75,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 51.75,
- "y": 51.75,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 69,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 17.25,
- "y": 69,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 34.5,
- "y": 69,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 51.75,
- "y": 69,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 86.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 17.25,
- "y": 86.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 34.5,
- "y": 86.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 51.75,
- "y": 86.25,
- "z": 5.5,
- "depth": 38.5,
- "diameter": 9,
- "total-liquid-volume": 2000
- }
- }
- },
- "opentrons-aluminum-block-2ml-screwcap": {
- "origin-offset": {
- "x": 25.88,
- "y": 20.75
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B1": {
- "x": 17.25,
- "y": 0,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C1": {
- "x": 34.5,
- "y": 0,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D1": {
- "x": 51.75,
- "y": 0,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A2": {
- "x": 0,
- "y": 17.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B2": {
- "x": 17.25,
- "y": 17.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C2": {
- "x": 34.5,
- "y": 17.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D2": {
- "x": 51.75,
- "y": 17.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A3": {
- "x": 0,
- "y": 34.5,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B3": {
- "x": 17.25,
- "y": 34.5,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C3": {
- "x": 34.5,
- "y": 34.5,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D3": {
- "x": 51.75,
- "y": 34.5,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A4": {
- "x": 0,
- "y": 51.75,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B4": {
- "x": 17.25,
- "y": 51.75,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C4": {
- "x": 34.5,
- "y": 51.75,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D4": {
- "x": 51.75,
- "y": 51.75,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A5": {
- "x": 0,
- "y": 69,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B5": {
- "x": 17.25,
- "y": 69,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C5": {
- "x": 34.5,
- "y": 69,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D5": {
- "x": 51.75,
- "y": 69,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "A6": {
- "x": 0,
- "y": 86.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "B6": {
- "x": 17.25,
- "y": 86.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "C6": {
- "x": 34.5,
- "y": 86.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- },
- "D6": {
- "x": 51.75,
- "y": 86.25,
- "z": 6.5,
- "depth": 42,
- "diameter": 9,
- "total-liquid-volume": 2000
- }
- }
- },
- "opentrons-aluminum-block-96-PCR-plate": {
- "origin-offset": {
- "x": 17.25,
- "y": 13.38
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 7.44,
- "depth": 14.81,
- "diameter": 5.4,
- "total-liquid-volume": 200
- }
- }
- },
- "opentrons-aluminum-block-PCR-strips-200ul": {
- "origin-offset": {
- "x": 17.25,
- "y": 13.38
- },
- "locations": {
- "A1": {
- "x": 0,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B1": {
- "x": 9,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C1": {
- "x": 18,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D1": {
- "x": 27,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E1": {
- "x": 36,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F1": {
- "x": 45,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G1": {
- "x": 54,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H1": {
- "x": 63,
- "y": 0,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A2": {
- "x": 0,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B2": {
- "x": 9,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C2": {
- "x": 18,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D2": {
- "x": 27,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E2": {
- "x": 36,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F2": {
- "x": 45,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G2": {
- "x": 54,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H2": {
- "x": 63,
- "y": 9,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A3": {
- "x": 0,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B3": {
- "x": 9,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C3": {
- "x": 18,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D3": {
- "x": 27,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E3": {
- "x": 36,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F3": {
- "x": 45,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G3": {
- "x": 54,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H3": {
- "x": 63,
- "y": 18,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A4": {
- "x": 0,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B4": {
- "x": 9,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C4": {
- "x": 18,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D4": {
- "x": 27,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E4": {
- "x": 36,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F4": {
- "x": 45,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G4": {
- "x": 54,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H4": {
- "x": 63,
- "y": 27,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A5": {
- "x": 0,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B5": {
- "x": 9,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C5": {
- "x": 18,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D5": {
- "x": 27,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E5": {
- "x": 36,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F5": {
- "x": 45,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G5": {
- "x": 54,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H5": {
- "x": 63,
- "y": 36,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A6": {
- "x": 0,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B6": {
- "x": 9,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C6": {
- "x": 18,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D6": {
- "x": 27,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E6": {
- "x": 36,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F6": {
- "x": 45,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G6": {
- "x": 54,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H6": {
- "x": 63,
- "y": 45,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A7": {
- "x": 0,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B7": {
- "x": 9,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C7": {
- "x": 18,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D7": {
- "x": 27,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E7": {
- "x": 36,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F7": {
- "x": 45,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G7": {
- "x": 54,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H7": {
- "x": 63,
- "y": 54,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A8": {
- "x": 0,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B8": {
- "x": 9,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C8": {
- "x": 18,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D8": {
- "x": 27,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E8": {
- "x": 36,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F8": {
- "x": 45,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G8": {
- "x": 54,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H8": {
- "x": 63,
- "y": 63,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A9": {
- "x": 0,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B9": {
- "x": 9,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C9": {
- "x": 18,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D9": {
- "x": 27,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E9": {
- "x": 36,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F9": {
- "x": 45,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G9": {
- "x": 54,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H9": {
- "x": 63,
- "y": 72,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A10": {
- "x": 0,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B10": {
- "x": 9,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C10": {
- "x": 18,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D10": {
- "x": 27,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E10": {
- "x": 36,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F10": {
- "x": 45,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G10": {
- "x": 54,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H10": {
- "x": 63,
- "y": 81,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A11": {
- "x": 0,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B11": {
- "x": 9,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C11": {
- "x": 18,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D11": {
- "x": 27,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E11": {
- "x": 36,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F11": {
- "x": 45,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G11": {
- "x": 54,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H11": {
- "x": 63,
- "y": 90,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "A12": {
- "x": 0,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "B12": {
- "x": 9,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "C12": {
- "x": 18,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "D12": {
- "x": 27,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "E12": {
- "x": 36,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "F12": {
- "x": 45,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "G12": {
- "x": 54,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- },
- "H12": {
- "x": 63,
- "y": 99,
- "z": 4.19,
- "depth": 20.30,
- "diameter": 5.4,
- "total-liquid-volume": 300
- }
- }
- }
- }
-}
diff --git a/api/src/opentrons/config/defaults_ot3.py b/api/src/opentrons/config/defaults_ot3.py
index 5ac94affe45..0b2499feaab 100644
--- a/api/src/opentrons/config/defaults_ot3.py
+++ b/api/src/opentrons/config/defaults_ot3.py
@@ -1,8 +1,8 @@
-from typing import Any, Dict, cast, List, Iterable, Tuple
+from typing import Any, Dict, cast, List, Iterable, Tuple, Optional
from typing_extensions import Final
from dataclasses import asdict
-from opentrons.hardware_control.types import OT3AxisKind
+from opentrons.hardware_control.types import OT3AxisKind, InstrumentProbeType
from .types import (
OT3Config,
ByGantryLoad,
@@ -15,8 +15,10 @@
LiquidProbeSettings,
ZSenseSettings,
EdgeSenseSettings,
+ OutputOptions,
)
+
DEFAULT_PIPETTE_OFFSET = [0.0, 0.0, 0.0]
DEFAULT_MODULE_OFFSET = [0.0, 0.0, 0.0]
@@ -28,11 +30,11 @@
plunger_speed=5,
sensor_threshold_pascals=40,
expected_liquid_height=110,
- log_pressure=True,
+ output_option=OutputOptions.stream_to_csv,
aspirate_while_sensing=False,
auto_zero_sensor=True,
num_baseline_reads=10,
- data_file="/var/pressure_sensor_data.csv",
+ data_files={InstrumentProbeType.PRIMARY: "/data/pressure_sensor_data.csv"},
)
DEFAULT_CALIBRATION_SETTINGS: Final[OT3CalibrationSettings] = OT3CalibrationSettings(
@@ -75,7 +77,6 @@
DEFAULT_LEFT_MOUNT_OFFSET: Final[Offset] = (-13.5, -60.5, 255.675)
DEFAULT_RIGHT_MOUNT_OFFSET: Final[Offset] = (40.5, -60.5, 255.675)
DEFAULT_GRIPPER_MOUNT_OFFSET: Final[Offset] = (84.55, -12.75, 93.85)
-DEFAULT_Z_RETRACT_DISTANCE: Final = 2
DEFAULT_SAFE_HOME_DISTANCE: Final = 5
DEFAULT_CALIBRATION_AXIS_MAX_SPEED: Final = 30
@@ -193,6 +194,49 @@
)
+def _build_output_option_with_default(
+ from_conf: Any, default: OutputOptions
+) -> OutputOptions:
+ if from_conf is None:
+ return default
+ else:
+ if isinstance(from_conf, OutputOptions):
+ return from_conf
+ else:
+ try:
+ enumval = OutputOptions[from_conf]
+ except KeyError: # not an enum entry
+ return default
+ else:
+ return enumval
+
+
+def _build_log_files_with_default(
+ from_conf: Any,
+ default: Optional[Dict[InstrumentProbeType, str]],
+) -> Optional[Dict[InstrumentProbeType, str]]:
+ print(f"from_conf {from_conf} default {default}")
+ if not isinstance(from_conf, dict):
+ if default is None:
+ return None
+ else:
+ return {k: v for k, v in default.items()}
+ else:
+ validated: Dict[InstrumentProbeType, str] = {}
+ for k, v in from_conf.items():
+ if isinstance(k, InstrumentProbeType):
+ validated[k] = v
+ else:
+ try:
+ enumval = InstrumentProbeType[k]
+ except KeyError: # not an enum entry
+ pass
+ else:
+ validated[enumval] = v
+ print(f"result {validated}")
+ return validated
+
+
def _build_dict_with_default(
from_conf: Any,
default: Dict[OT3AxisKind, float],
@@ -277,6 +321,17 @@ def _build_default_cap_pass(
def _build_default_liquid_probe(
from_conf: Any, default: LiquidProbeSettings
) -> LiquidProbeSettings:
+ output_option = _build_output_option_with_default(
+ from_conf.get("output_option", None), default.output_option
+ )
+ data_files: Optional[Dict[InstrumentProbeType, str]] = None
+ if (
+ output_option is OutputOptions.sync_buffer_to_csv
+ or output_option is OutputOptions.stream_to_csv
+ ):
+ data_files = _build_log_files_with_default(
+ from_conf.get("data_files", {}), default.data_files
+ )
return LiquidProbeSettings(
starting_mount_height=from_conf.get(
"starting_mount_height", default.starting_mount_height
@@ -291,7 +346,7 @@ def _build_default_liquid_probe(
expected_liquid_height=from_conf.get(
"expected_liquid_height", default.expected_liquid_height
),
- log_pressure=from_conf.get("log_pressure", default.log_pressure),
+ output_option=from_conf.get("output_option", default.output_option),
aspirate_while_sensing=from_conf.get(
"aspirate_while_sensing", default.aspirate_while_sensing
),
@@ -301,7 +356,7 @@ def _build_default_liquid_probe(
num_baseline_reads=from_conf.get(
"num_baseline_reads", default.num_baseline_reads
),
- data_file=from_conf.get("data_file", default.data_file),
+ data_files=data_files,
)
@@ -381,9 +436,6 @@ def build_with_defaults(robot_settings: Dict[str, Any]) -> OT3Config:
DEFAULT_RUN_CURRENT,
),
),
- z_retract_distance=robot_settings.get(
- "z_retract_distance", DEFAULT_Z_RETRACT_DISTANCE
- ),
safe_home_distance=robot_settings.get(
"safe_home_distance", DEFAULT_SAFE_HOME_DISTANCE
),
@@ -414,7 +466,7 @@ def build_with_defaults(robot_settings: Dict[str, Any]) -> OT3Config:
def serialize(config: OT3Config) -> Dict[str, Any]:
def _build_dict(pairs: Iterable[Tuple[Any, Any]]) -> Dict[str, Any]:
def _normalize_key(key: Any) -> Any:
- if isinstance(key, OT3AxisKind):
+ if isinstance(key, OT3AxisKind) or isinstance(key, InstrumentProbeType):
return key.name
return key
diff --git a/api/src/opentrons/config/feature_flags.py b/api/src/opentrons/config/feature_flags.py
index 5bf289a49d2..719c0dc43f3 100644
--- a/api/src/opentrons/config/feature_flags.py
+++ b/api/src/opentrons/config/feature_flags.py
@@ -24,12 +24,6 @@ def enable_door_safety_switch(robot_type: RobotTypeEnum) -> bool:
return advs.get_setting_with_env_overload("enableDoorSafetySwitch", robot_type)
-def disable_fast_protocol_upload() -> bool:
- return advs.get_setting_with_env_overload(
- "disableFastProtocolUpload", RobotTypeEnum.FLEX
- )
-
-
def enable_ot3_hardware_controller() -> bool:
"""Get whether to use the OT-3 hardware controller."""
@@ -65,15 +59,22 @@ def status_bar_enabled() -> bool:
)
-def tip_presence_detection_enabled() -> bool:
- """Whether tip presence is enabled on the Flex"""
- return not advs.get_setting_with_env_overload(
- "disableTipPresenceDetection", RobotTypeEnum.FLEX
- )
-
-
def require_estop() -> bool:
"""Whether the OT3 should allow gantry movements with no Estop plugged in."""
return not advs.get_setting_with_env_overload(
"estopNotRequired", RobotTypeEnum.FLEX
)
+
+
+def enable_error_recovery_experiments() -> bool:
+ return advs.get_setting_with_env_overload(
+ "enableErrorRecoveryExperiments", RobotTypeEnum.FLEX
+ )
+
+
+def enable_performance_metrics(robot_type: RobotTypeEnum) -> bool:
+ return advs.get_setting_with_env_overload("enablePerformanceMetrics", robot_type)
+
+
+def oem_mode_enabled() -> bool:
+ return advs.get_setting_with_env_overload("enableOEMMode", RobotTypeEnum.FLEX)
diff --git a/api/src/opentrons/config/reset.py b/api/src/opentrons/config/reset.py
index 2e71c69aa45..eac5cf26982 100644
--- a/api/src/opentrons/config/reset.py
+++ b/api/src/opentrons/config/reset.py
@@ -35,6 +35,7 @@ class ResetOptionId(str, Enum):
boot_scripts = "bootScripts"
deck_calibration = "deckCalibration"
+ deck_configuration = "deckConfiguration"
pipette_offset = "pipetteOffsetCalibrations"
gripper_offset = "gripperOffsetCalibrations"
tip_length_calibrations = "tipLengthCalibrations"
@@ -50,6 +51,7 @@ class ResetOptionId(str, Enum):
ResetOptionId.pipette_offset,
ResetOptionId.tip_length_calibrations,
ResetOptionId.runs_history,
+ ResetOptionId.deck_configuration,
ResetOptionId.authorized_keys,
]
_FLEX_RESET_OPTIONS = [
@@ -58,6 +60,7 @@ class ResetOptionId(str, Enum):
ResetOptionId.gripper_offset,
ResetOptionId.runs_history,
ResetOptionId.on_device_display,
+ ResetOptionId.deck_configuration,
ResetOptionId.module_calibration,
ResetOptionId.authorized_keys,
]
@@ -82,8 +85,8 @@ class ResetOptionId(str, Enum):
name="Tip Length Calibrations",
description="Clear tip length calibrations (will also clear pipette offset)",
),
- # TODO(mm, 2022-05-23): runs_history and on_device_display are robot-server things,
- # and are not concepts known to this package (the `opentrons` library).
+ # TODO(mm, 2022-05-23): runs_history, on_device_display, and deck_configuration are
+ # robot-server things, and are not concepts known to this package (the `opentrons` library).
# This option is defined here only as a convenience for robot-server.
# Find a way to split things up and define this in robot-server instead.
ResetOptionId.runs_history: CommonResetOption(
@@ -94,6 +97,10 @@ class ResetOptionId(str, Enum):
name="On-Device Display Configuration",
description="Clear the configuration of the on-device display (touchscreen)",
),
+ ResetOptionId.deck_configuration: CommonResetOption(
+ name="Deck Configuration",
+ description="Clear deck configuration",
+ ),
ResetOptionId.module_calibration: CommonResetOption(
name="Module Calibrations", description="Clear module offset calibrations"
),
diff --git a/api/src/opentrons/config/robot_configs.py b/api/src/opentrons/config/robot_configs.py
index d30109dc697..bcb6d6076da 100755
--- a/api/src/opentrons/config/robot_configs.py
+++ b/api/src/opentrons/config/robot_configs.py
@@ -148,7 +148,7 @@ def _load_json(filename: Union[str, Path]) -> Dict[str, Any]:
log.warning("{0} not found. Loading defaults".format(filename))
res = {}
except json.decoder.JSONDecodeError:
- log.warning("{0} is corrupt. Loading defaults".format(filename))
+ log.warning("{0} is corrupt. Loading defaults".format(filename), exc_info=True)
res = {}
return cast(Dict[str, Any], res)
diff --git a/api/src/opentrons/config/types.py b/api/src/opentrons/config/types.py
index 9e4076e1ee0..f13d5a5e6e3 100644
--- a/api/src/opentrons/config/types.py
+++ b/api/src/opentrons/config/types.py
@@ -1,8 +1,8 @@
from enum import Enum
from dataclasses import dataclass, asdict, fields
-from typing import Dict, Tuple, TypeVar, Generic, List, cast
+from typing import Dict, Tuple, TypeVar, Generic, List, cast, Optional
from typing_extensions import TypedDict, Literal
-from opentrons.hardware_control.types import OT3AxisKind
+from opentrons.hardware_control.types import OT3AxisKind, InstrumentProbeType
class AxisDict(TypedDict):
@@ -116,6 +116,16 @@ class ZSenseSettings:
pass_settings: CapacitivePassSettings
+# str enum so it can be json serializable
+class OutputOptions(int, Enum):
+ """Specifies where we should report sensor data to during a sensor pass."""
+
+ stream_to_csv = 0x1
+ sync_buffer_to_csv = 0x2
+ can_bus_only = 0x4
+ sync_only = 0x8
+
+
@dataclass
class LiquidProbeSettings:
starting_mount_height: float
@@ -125,11 +135,11 @@ class LiquidProbeSettings:
plunger_speed: float
sensor_threshold_pascals: float
expected_liquid_height: float
- log_pressure: bool
+ output_option: OutputOptions
aspirate_while_sensing: bool
auto_zero_sensor: bool
num_baseline_reads: int
- data_file: str
+ data_files: Optional[Dict[InstrumentProbeType, str]]
@dataclass(frozen=True)
@@ -174,7 +184,6 @@ class OT3Config:
log_level: str
motion_settings: OT3MotionSettings
current_settings: OT3CurrentSettings
- z_retract_distance: float
safe_home_distance: float
deck_transform: OT3Transform
carriage_offset: Offset
diff --git a/api/src/opentrons/drivers/asyncio/communication/async_serial.py b/api/src/opentrons/drivers/asyncio/communication/async_serial.py
index 9910b73215f..8d2db0ddda7 100644
--- a/api/src/opentrons/drivers/asyncio/communication/async_serial.py
+++ b/api/src/opentrons/drivers/asyncio/communication/async_serial.py
@@ -7,7 +7,7 @@
from typing import Optional, AsyncGenerator, Union
from typing_extensions import Literal
-from serial import Serial, serial_for_url # type: ignore[import]
+from serial import Serial, serial_for_url # type: ignore[import-untyped]
TimeoutProperties = Union[Literal["write_timeout"], Literal["timeout"]]
diff --git a/api/src/opentrons/drivers/heater_shaker/simulator.py b/api/src/opentrons/drivers/heater_shaker/simulator.py
index ae90bc33027..8844d069cfa 100644
--- a/api/src/opentrons/drivers/heater_shaker/simulator.py
+++ b/api/src/opentrons/drivers/heater_shaker/simulator.py
@@ -1,4 +1,4 @@
-from typing import Dict
+from typing import Dict, Optional
from opentrons.util.async_helpers import ensure_yield
from opentrons.drivers.heater_shaker.abstract import AbstractHeaterShakerDriver
from opentrons.drivers.types import Temperature, RPM, HeaterShakerLabwareLatchStatus
@@ -7,12 +7,13 @@
class SimulatingDriver(AbstractHeaterShakerDriver):
DEFAULT_TEMP = 23
- def __init__(self) -> None:
+ def __init__(self, serial_number: Optional[str] = None) -> None:
self._labware_latch_state = HeaterShakerLabwareLatchStatus.IDLE_UNKNOWN
self._current_temperature = self.DEFAULT_TEMP
self._temperature = Temperature(current=self.DEFAULT_TEMP, target=None)
self._rpm = RPM(current=0, target=None)
self._homing_status = True
+ self._serial_number = serial_number
@ensure_yield
async def connect(self) -> None:
@@ -83,7 +84,7 @@ async def deactivate(self) -> None:
@ensure_yield
async def get_device_info(self) -> Dict[str, str]:
return {
- "serial": "dummySerialHS",
+ "serial": self._serial_number if self._serial_number else "dummySerialHS",
"model": "dummyModelHS",
"version": "dummyVersionHS",
}
diff --git a/api/src/opentrons/drivers/mag_deck/simulator.py b/api/src/opentrons/drivers/mag_deck/simulator.py
index 1b8bc545bf4..303711ce6c2 100644
--- a/api/src/opentrons/drivers/mag_deck/simulator.py
+++ b/api/src/opentrons/drivers/mag_deck/simulator.py
@@ -11,9 +11,12 @@
class SimulatingDriver(AbstractMagDeckDriver):
- def __init__(self, sim_model: Optional[str] = None) -> None:
+ def __init__(
+ self, sim_model: Optional[str] = None, serial_number: Optional[str] = None
+ ) -> None:
self._height = 0.0
self._model = MAG_DECK_MODELS[sim_model] if sim_model else "mag_deck_v1.1"
+ self._serial_number = serial_number
@ensure_yield
async def probe_plate(self) -> None:
@@ -30,7 +33,7 @@ async def move(self, location: float) -> None:
@ensure_yield
async def get_device_info(self) -> Dict[str, str]:
return {
- "serial": "dummySerialMD",
+ "serial": self._serial_number if self._serial_number else "dummySerialMD",
"model": self._model,
"version": "dummyVersionMD",
}
diff --git a/api/src/opentrons/drivers/rpi_drivers/gpio.py b/api/src/opentrons/drivers/rpi_drivers/gpio.py
index 69eb2d49b41..d692fa1f795 100755
--- a/api/src/opentrons/drivers/rpi_drivers/gpio.py
+++ b/api/src/opentrons/drivers/rpi_drivers/gpio.py
@@ -7,7 +7,7 @@
from . import RevisionPinsError
from .types import gpio_group, PinDir, GPIOPin
-import gpiod # type: ignore[import]
+import gpiod # type: ignore[import-not-found]
"""
Raspberry Pi GPIO control module
diff --git a/api/src/opentrons/drivers/rpi_drivers/interfaces.py b/api/src/opentrons/drivers/rpi_drivers/interfaces.py
index 3923b250a27..f3986ae78d7 100644
--- a/api/src/opentrons/drivers/rpi_drivers/interfaces.py
+++ b/api/src/opentrons/drivers/rpi_drivers/interfaces.py
@@ -1,12 +1,15 @@
-from typing import List
+from typing import List, Union
from typing_extensions import Protocol
-from opentrons.hardware_control.modules.types import ModuleAtPort
+from opentrons.hardware_control.modules.types import (
+ ModuleAtPort,
+ SimulatingModuleAtPort,
+)
class USBDriverInterface(Protocol):
def match_virtual_ports(
self,
- virtual_port: List[ModuleAtPort],
- ) -> List[ModuleAtPort]:
+ virtual_port: Union[List[ModuleAtPort], List[SimulatingModuleAtPort]],
+ ) -> Union[List[ModuleAtPort], List[SimulatingModuleAtPort]]:
...
diff --git a/api/src/opentrons/drivers/rpi_drivers/usb.py b/api/src/opentrons/drivers/rpi_drivers/usb.py
index 499284368e0..04ee5496c4a 100644
--- a/api/src/opentrons/drivers/rpi_drivers/usb.py
+++ b/api/src/opentrons/drivers/rpi_drivers/usb.py
@@ -8,9 +8,12 @@
import subprocess
import re
import os
-from typing import List
+from typing import List, Union
-from opentrons.hardware_control.modules.types import ModuleAtPort
+from opentrons.hardware_control.modules.types import (
+ ModuleAtPort,
+ SimulatingModuleAtPort,
+)
from opentrons.hardware_control.types import BoardRevision
from .interfaces import USBDriverInterface
@@ -79,8 +82,8 @@ def _read_usb_bus(self) -> List[USBPort]:
def match_virtual_ports(
self,
- virtual_ports: List[ModuleAtPort],
- ) -> List[ModuleAtPort]:
+ virtual_ports: Union[List[ModuleAtPort], List[SimulatingModuleAtPort]],
+ ) -> Union[List[ModuleAtPort], List[SimulatingModuleAtPort]]:
"""
Match Virtual Ports
@@ -89,7 +92,7 @@ def match_virtual_ports(
the physical usb port information.
The virtual port path looks something like:
dev/ot_module_[MODULE NAME]
- :param virtual_ports: A list of ModuleAtPort objects
+ :param virtual_ports: A list of ModuleAtPort or SimulatingModuleAtPort objects
that hold the name and virtual port of each module
connected to the robot.
diff --git a/api/src/opentrons/drivers/rpi_drivers/usb_simulator.py b/api/src/opentrons/drivers/rpi_drivers/usb_simulator.py
index d3931c00fdd..be7cec2e48e 100644
--- a/api/src/opentrons/drivers/rpi_drivers/usb_simulator.py
+++ b/api/src/opentrons/drivers/rpi_drivers/usb_simulator.py
@@ -4,15 +4,19 @@
A class to convert info from the usb bus into a
more readable format.
"""
-from typing import List
+from typing import List, Union
-from opentrons.hardware_control.modules.types import ModuleAtPort
+from opentrons.hardware_control.modules.types import (
+ ModuleAtPort,
+ SimulatingModuleAtPort,
+)
from .interfaces import USBDriverInterface
class USBBusSimulator(USBDriverInterface):
def match_virtual_ports(
- self, virtual_port: List[ModuleAtPort]
- ) -> List[ModuleAtPort]:
+ self,
+ virtual_port: Union[List[ModuleAtPort], List[SimulatingModuleAtPort]],
+ ) -> Union[List[ModuleAtPort], List[SimulatingModuleAtPort]]:
return virtual_port
diff --git a/api/src/opentrons/drivers/serial_communication.py b/api/src/opentrons/drivers/serial_communication.py
index 854921c35a6..9e2ee465504 100755
--- a/api/src/opentrons/drivers/serial_communication.py
+++ b/api/src/opentrons/drivers/serial_communication.py
@@ -1,12 +1,12 @@
from typing import List, Optional, Iterator
-import serial # type: ignore[import]
+import serial # type: ignore[import-untyped]
from serial import Serial
-from serial.tools import list_ports # type: ignore[import]
+from serial.tools import list_ports # type: ignore[import-untyped]
import contextlib
import logging
-from serial.tools.list_ports_common import ListPortInfo # type: ignore[import]
+from serial.tools.list_ports_common import ListPortInfo # type: ignore[import-untyped]
log = logging.getLogger(__name__)
diff --git a/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py b/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
index 41bd554682c..c43f8d3f13d 100755
--- a/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
+++ b/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
@@ -18,7 +18,7 @@
from math import isclose
from opentrons.drivers.serial_communication import get_ports_by_name
-from serial.serialutil import SerialException # type: ignore[import]
+from serial.serialutil import SerialException # type: ignore[import-untyped]
from opentrons.drivers.smoothie_drivers.connection import SmoothieConnection
from opentrons.drivers.smoothie_drivers.constants import (
@@ -1667,7 +1667,7 @@ async def unstick_axes(
self.push_active_current()
self.set_active_current(
{
- ax: self._config.high_current["default"][ax] # type: ignore[misc]
+ ax: self._config.high_current["default"][ax] # type: ignore[literal-required]
for ax in axes
}
)
@@ -1848,7 +1848,7 @@ async def hard_halt(self) -> None:
await asyncio.sleep(0.25)
self.run_flag.set()
- async def update_firmware( # noqa: C901
+ async def update_firmware(
self,
filename: str,
loop: Optional[asyncio.AbstractEventLoop] = None,
@@ -1896,8 +1896,8 @@ async def update_firmware( # noqa: C901
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.PIPE,
}
- if loop:
- kwargs["loop"] = loop
+ # if loop:
+ # kwargs["loop"] = loop
log.info(update_cmd)
before = time()
proc = await asyncio.create_subprocess_shell(update_cmd, **kwargs)
diff --git a/api/src/opentrons/drivers/smoothie_drivers/simulator.py b/api/src/opentrons/drivers/smoothie_drivers/simulator.py
index ad1197f4aa5..4db3c28adf7 100644
--- a/api/src/opentrons/drivers/smoothie_drivers/simulator.py
+++ b/api/src/opentrons/drivers/smoothie_drivers/simulator.py
@@ -57,11 +57,11 @@ async def update_pipette_config(
- endstop debounce M365.2 (NOT for zprobe debounce)
- retract from endstop distance M365.3
"""
- pass
+ return {}
@property
def current(self) -> Dict[str, float]:
- pass
+ return {}
@property
def speed(self) -> None:
diff --git a/api/src/opentrons/drivers/temp_deck/simulator.py b/api/src/opentrons/drivers/temp_deck/simulator.py
index efce88ea234..09a4f791e01 100644
--- a/api/src/opentrons/drivers/temp_deck/simulator.py
+++ b/api/src/opentrons/drivers/temp_deck/simulator.py
@@ -11,10 +11,13 @@
class SimulatingDriver(AbstractTempDeckDriver):
- def __init__(self, sim_model: Optional[str] = None):
+ def __init__(
+ self, sim_model: Optional[str] = None, serial_number: Optional[str] = None
+ ):
self._temp = Temperature(target=None, current=0)
self._port: Optional[str] = None
self._model = TEMP_DECK_MODELS[sim_model] if sim_model else "temp_deck_v1.1"
+ self._serial_number = serial_number
@ensure_yield
async def set_temperature(self, celsius: float) -> None:
@@ -48,7 +51,7 @@ async def enter_programming_mode(self) -> None:
@ensure_yield
async def get_device_info(self) -> Dict[str, str]:
return {
- "serial": "dummySerialTD",
+ "serial": self._serial_number if self._serial_number else "dummySerialTD",
"model": self._model,
"version": "dummyVersionTD",
}
diff --git a/api/src/opentrons/drivers/thermocycler/simulator.py b/api/src/opentrons/drivers/thermocycler/simulator.py
index 4a92bb12587..302391a988d 100644
--- a/api/src/opentrons/drivers/thermocycler/simulator.py
+++ b/api/src/opentrons/drivers/thermocycler/simulator.py
@@ -10,7 +10,9 @@
class SimulatingDriver(AbstractThermocyclerDriver):
DEFAULT_TEMP = 23
- def __init__(self, model: Optional[str] = None) -> None:
+ def __init__(
+ self, model: Optional[str] = None, serial_number: Optional[str] = None
+ ) -> None:
self._ramp_rate: Optional[float] = None
self._lid_status = ThermocyclerLidStatus.OPEN
self._lid_temperature = Temperature(current=self.DEFAULT_TEMP, target=None)
@@ -18,6 +20,7 @@ def __init__(self, model: Optional[str] = None) -> None:
current=self.DEFAULT_TEMP, target=None, hold=None
)
self._model = model if model else "thermocyclerModuleV1"
+ self._serial_number = serial_number
def model(self) -> str:
return self._model
@@ -103,7 +106,7 @@ async def deactivate_all(self) -> None:
@ensure_yield
async def get_device_info(self) -> Dict[str, str]:
return {
- "serial": "dummySerialTC",
+ "serial": self._serial_number if self._serial_number else "dummySerialTC",
"model": "dummyModelTC",
"version": "dummyVersionTC",
}
diff --git a/api/src/opentrons/execute.py b/api/src/opentrons/execute.py
index bd450db8086..e851d8a44f0 100644
--- a/api/src/opentrons/execute.py
+++ b/api/src/opentrons/execute.py
@@ -28,19 +28,20 @@
from opentrons import protocol_api, __version__, should_use_ot3
-from opentrons.commands import types as command_types
+from opentrons.legacy_commands import types as command_types
from opentrons.hardware_control import (
API as OT2API,
ThreadManagedHardware,
ThreadManager,
)
+from opentrons.hardware_control.types import HardwareFeatureFlags
from opentrons.protocols import parse
from opentrons.protocols.api_support.deck_type import (
guess_from_global_config as guess_deck_type_from_global_config,
should_load_fixed_trash,
- should_load_fixed_trash_for_python_protocol,
+ should_load_fixed_trash_labware_for_python_protocol,
)
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols.execution import execute as execute_apiv2
@@ -332,7 +333,7 @@ def execute( # noqa: C901
'text': string_command_text,
# The rest of this struct is
# command-dependent; see
- # opentrons.commands.commands.
+ # opentrons.legacy_commands.commands.
}
}
@@ -360,6 +361,7 @@ def execute( # noqa: C901
stack_logger = logging.getLogger("opentrons")
stack_logger.propagate = propagate_logs
stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING))
+ # TODO(mm, 2023-11-20): We should restore the original log settings when we're done.
# TODO(mm, 2023-10-02): Switch this truthy check to `is not None`
# to match documented behavior.
@@ -538,7 +540,9 @@ def _create_live_context_pe(
config=_get_protocol_engine_config(),
drop_tips_after_run=False,
post_run_hardware_state=PostRunHardwareState.STAY_ENGAGED_IN_PLACE,
- load_fixed_trash=should_load_fixed_trash_for_python_protocol(api_version),
+ load_fixed_trash=should_load_fixed_trash_labware_for_python_protocol(
+ api_version
+ ),
)
)
@@ -596,7 +600,9 @@ def _run_file_non_pe(
context.home()
try:
- execute_apiv2.run_protocol(protocol, context)
+ # TODO (spp, 2024-03-18): use true run-time param overrides once enabled
+ # for cli protocol simulation/ execution
+ execute_apiv2.run_protocol(protocol, context, run_time_param_overrides=None)
finally:
context.cleanup()
@@ -627,7 +633,10 @@ async def run(protocol_source: ProtocolSource) -> None:
try:
# TODO(mm, 2023-06-30): This will home and drop tips at the end, which is not how
# things have historically behaved with PAPIv2.13 and older or JSONv5 and older.
- result = await protocol_runner.run(protocol_source)
+ result = await protocol_runner.run(
+ deck_configuration=entrypoint_util.get_deck_configuration(),
+ protocol_source=protocol_source,
+ )
finally:
unsubscribe()
@@ -653,6 +662,8 @@ def _get_protocol_engine_config() -> Config:
# We deliberately omit ignore_pause=True because, in the current implementation of
# opentrons.protocol_api.core.engine, that would incorrectly make
# ProtocolContext.is_simulating() return True.
+ use_simulated_deck_config=True,
+ # TODO the above is not correct for this and it should use the robot's actual config
)
@@ -668,9 +679,15 @@ def _get_global_hardware_controller(robot_type: RobotType) -> ThreadManagedHardw
# Conditional import because this isn't installed on OT-2s.
from opentrons.hardware_control.ot3api import OT3API
- _THREAD_MANAGED_HW = ThreadManager(OT3API.build_hardware_controller)
+ _THREAD_MANAGED_HW = ThreadManager(
+ OT3API.build_hardware_controller,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
+ )
else:
- _THREAD_MANAGED_HW = ThreadManager(OT2API.build_hardware_controller)
+ _THREAD_MANAGED_HW = ThreadManager(
+ OT2API.build_hardware_controller,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
+ )
return _THREAD_MANAGED_HW
diff --git a/api/src/opentrons/hardware_control/__init__.py b/api/src/opentrons/hardware_control/__init__.py
index 356923f1aff..b49f1462249 100644
--- a/api/src/opentrons/hardware_control/__init__.py
+++ b/api/src/opentrons/hardware_control/__init__.py
@@ -13,24 +13,29 @@
from .api import API
from .pause_manager import PauseManager
from .backends import Controller, Simulator
-from .types import CriticalPoint, ExecutionState
+from .types import CriticalPoint, ExecutionState, OT3Mount
from .constants import DROP_TIP_RELEASE_DISTANCE
from .thread_manager import ThreadManager
from .execution_manager import ExecutionManager
from .threaded_async_lock import ThreadedAsyncLock, ThreadedAsyncForbidden
-from .protocols import HardwareControlInterface
+from .protocols import HardwareControlInterface, FlexHardwareControlInterface
from .instruments import AbstractInstrument, Gripper
from typing import Union
from .ot3_calibration import OT3Transforms
from .robot_calibration import RobotCalibration
+from opentrons.config.types import RobotConfig, OT3Config
+
+from opentrons.types import Mount
# TODO (lc 12-05-2022) We should 1. figure out if we need
# to globally export a class that is strictly used in the hardware controller
# and 2. how to properly export an ot2 and ot3 pipette.
from .instruments.ot2.pipette import Pipette
-OT2HardwareControlAPI = HardwareControlInterface[RobotCalibration]
-OT3HardwareControlAPI = HardwareControlInterface[OT3Transforms]
+OT2HardwareControlAPI = HardwareControlInterface[RobotCalibration, Mount, RobotConfig]
+OT3HardwareControlAPI = FlexHardwareControlInterface[
+ OT3Transforms, Union[Mount, OT3Mount], OT3Config
+]
HardwareControlAPI = Union[OT2HardwareControlAPI, OT3HardwareControlAPI]
ThreadManagedHardware = ThreadManager[HardwareControlAPI]
@@ -55,4 +60,6 @@
"ThreadedAsyncForbidden",
"ThreadManagedHardware",
"SyncHardwareAPI",
+ "OT2HardwareControlAPI",
+ "OT3HardwareControlAPI",
]
diff --git a/api/src/opentrons/hardware_control/__main__.py b/api/src/opentrons/hardware_control/__main__.py
index 31f3462a05d..ff41b88312b 100644
--- a/api/src/opentrons/hardware_control/__main__.py
+++ b/api/src/opentrons/hardware_control/__main__.py
@@ -14,6 +14,7 @@
from typing import Optional, Dict, Any
from . import API
+from .types import HardwareFeatureFlags
from opentrons.config import robot_configs as rc
from opentrons.config.types import RobotConfig
@@ -45,7 +46,9 @@ async def arun(
:param port: Optional smoothie port override
"""
rconf = config or rc.load()
- hc = await API.build_hardware_controller(rconf, port) # noqa: F841
+ hc = await API.build_hardware_controller( # noqa: F841
+ config=rconf, port=port, feature_flags=HardwareFeatureFlags.build_from_ff()
+ )
def run(config: Optional[RobotConfig] = None, port: Optional[str] = None) -> None:
diff --git a/api/src/opentrons/hardware_control/adapters.py b/api/src/opentrons/hardware_control/adapters.py
index 330734fc6f0..4497da88bf8 100644
--- a/api/src/opentrons/hardware_control/adapters.py
+++ b/api/src/opentrons/hardware_control/adapters.py
@@ -2,13 +2,12 @@
"""
import asyncio
import functools
-from typing import Generic, TypeVar, Callable, Any, cast
+from typing import Generic, TypeVar, Callable, Any, cast, Awaitable
from .protocols import AsyncioConfigurable
WrappedObj = TypeVar("WrappedObj", bound=AsyncioConfigurable, covariant=True)
WrappedReturn = TypeVar("WrappedReturn")
-WrappedFunc = TypeVar("WrappedFunc", bound=Callable[..., WrappedReturn])
# TODO: BC 2020-02-25 instead of overwriting __get_attribute__ in this class
@@ -54,7 +53,7 @@ def __repr__(self) -> str:
@staticmethod
def call_coroutine_sync(
loop: asyncio.AbstractEventLoop,
- to_call: WrappedFunc,
+ to_call: Callable[..., Awaitable[WrappedReturn]],
*args: Any,
**kwargs: Any,
) -> WrappedReturn:
diff --git a/api/src/opentrons/hardware_control/api.py b/api/src/opentrons/hardware_control/api.py
index f3f70c16b9a..718d0d8796a 100644
--- a/api/src/opentrons/hardware_control/api.py
+++ b/api/src/opentrons/hardware_control/api.py
@@ -57,6 +57,7 @@
EstopState,
SubSystem,
SubSystemState,
+ HardwareFeatureFlags,
)
from . import modules
from .robot_calibration import (
@@ -87,7 +88,7 @@ class API(
# of methods that are present in the protocol will call the (empty,
# do-nothing) methods in the protocol. This will happily make all the
# tests fail.
- HardwareControlInterface[RobotCalibration],
+ HardwareControlInterface[RobotCalibration, top_types.Mount, RobotConfig],
):
"""This API is the primary interface to the hardware controller.
@@ -111,6 +112,7 @@ def __init__(
backend: Union[Controller, Simulator],
loop: asyncio.AbstractEventLoop,
config: RobotConfig,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> None:
"""Initialize an API instance.
@@ -122,6 +124,8 @@ def __init__(
self._config = config
self._backend = backend
self._loop = loop
+ # If no feature flag set is defined, we will use the default values
+ self._feature_flags = feature_flags or HardwareFeatureFlags()
self._callbacks: Set[HardwareEventHandler] = set()
# {'X': 0.0, 'Y': 0.0, 'Z': 0.0, 'A': 0.0, 'B': 0.0, 'C': 0.0}
@@ -163,13 +167,14 @@ def _update_door_state(self, door_state: DoorState) -> None:
def _reset_last_mount(self) -> None:
self._last_moved_mount = None
- @classmethod # noqa: C901
- async def build_hardware_controller(
+ @classmethod
+ async def build_hardware_controller( # noqa: C901
cls,
config: Union[RobotConfig, OT3Config, None] = None,
port: Optional[str] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
firmware: Optional[Tuple[pathlib.Path, str]] = None,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> "API":
"""Build a hardware controller that will actually talk to hardware.
@@ -221,7 +226,12 @@ async def blink() -> None:
mod_log.error(msg)
raise RuntimeError(msg)
- api_instance = cls(backend, loop=checked_loop, config=checked_config)
+ api_instance = cls(
+ backend,
+ loop=checked_loop,
+ config=checked_config,
+ feature_flags=feature_flags,
+ )
await api_instance.cache_instruments()
module_controls = await AttachedModulesControl.build(
api_instance, board_revision=backend.board_revision
@@ -245,10 +255,11 @@ async def build_hardware_simulator(
attached_instruments: Optional[
Dict[top_types.Mount, Dict[str, Optional[str]]]
] = None,
- attached_modules: Optional[List[str]] = None,
+ attached_modules: Optional[Dict[str, List[str]]] = None,
config: Optional[Union[RobotConfig, OT3Config]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
strict_attached_instruments: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> "API":
"""Build a simulating hardware controller.
@@ -260,7 +271,7 @@ async def build_hardware_simulator(
attached_instruments = {}
if None is attached_modules:
- attached_modules = []
+ attached_modules = {}
checked_loop = use_or_initialize_loop(loop)
if isinstance(config, RobotConfig):
@@ -274,7 +285,12 @@ async def build_hardware_simulator(
checked_loop,
strict_attached_instruments,
)
- api_instance = cls(backend, loop=checked_loop, config=checked_config)
+ api_instance = cls(
+ backend,
+ loop=checked_loop,
+ config=checked_config,
+ feature_flags=feature_flags,
+ )
await api_instance.cache_instruments()
module_controls = await AttachedModulesControl.build(
api_instance, board_revision=backend.board_revision
@@ -331,6 +347,7 @@ def fw_version(self) -> str:
def board_revision(self) -> str:
return str(self._backend.board_revision)
+ @property
def attached_subsystems(self) -> Dict[SubSystem, SubSystemState]:
return {}
@@ -410,8 +427,13 @@ async def update_firmware(
firmware_file, checked_loop, explicit_modeset
)
+ def has_gripper(self) -> bool:
+ return False
+
async def cache_instruments(
- self, require: Optional[Dict[top_types.Mount, PipetteName]] = None
+ self,
+ require: Optional[Dict[top_types.Mount, PipetteName]] = None,
+ skip_if_would_block: bool = False,
) -> None:
"""
Scan the attached instruments, take necessary configuration actions,
@@ -435,6 +457,7 @@ async def cache_instruments(
req_instr,
pip_id,
pip_offset_cal,
+ self._feature_flags.use_old_aspiration_functions,
)
self._attached_instruments[mount] = p
if req_instr and p:
@@ -598,6 +621,7 @@ async def _do_plunger_home(
home_flagged_axes=False,
)
+ @ExecutionManagerProvider.wait_for_running
async def home_plunger(self, mount: top_types.Mount) -> None:
"""
Home the plunger motor for a mount, and then return it to the 'bottom'
@@ -740,7 +764,7 @@ async def move_to(
top_types.Point(0, 0, 0),
)
- await self._cache_and_maybe_retract_mount(mount)
+ await self.prepare_for_mount_movement(mount)
await self._move(target_position, speed=speed, max_speeds=max_speeds)
async def move_axes(
@@ -800,7 +824,7 @@ async def move_rel(
detail={"mount": str(mount), "unhomed_axes": str(unhomed)},
)
- await self._cache_and_maybe_retract_mount(mount)
+ await self.prepare_for_mount_movement(mount)
await self._move(
target_position,
speed=speed,
@@ -820,6 +844,9 @@ async def _cache_and_maybe_retract_mount(self, mount: top_types.Mount) -> None:
await self.retract(self._last_moved_mount, 10)
self._last_moved_mount = mount
+ async def prepare_for_mount_movement(self, mount: top_types.Mount) -> None:
+ await self._cache_and_maybe_retract_mount(mount)
+
@ExecutionManagerProvider.wait_for_running
async def _move(
self,
@@ -888,11 +915,11 @@ def engaged_axes(self) -> Dict[Axis, bool]:
async def disengage_axes(self, which: List[Axis]) -> None:
await self._backend.disengage_axes([ot2_axis_to_string(ax) for ax in which])
+ @ExecutionManagerProvider.wait_for_running
async def _fast_home(self, axes: Sequence[str], margin: float) -> Dict[str, float]:
converted_axes = "".join(axes)
return await self._backend.fast_home(converted_axes, margin)
- @ExecutionManagerProvider.wait_for_running
async def retract(self, mount: top_types.Mount, margin: float = 10) -> None:
"""Pull the specified mount up to its home position.
@@ -900,7 +927,6 @@ async def retract(self, mount: top_types.Mount, margin: float = 10) -> None:
"""
await self.retract_axis(Axis.by_mount(mount), margin)
- @ExecutionManagerProvider.wait_for_running
async def retract_axis(self, axis: Axis, margin: float = 10) -> None:
"""Pull the specified axis up to its home position.
@@ -961,6 +987,14 @@ async def update_config(self, **kwargs: Any) -> None:
"""
self._config = replace(self._config, **kwargs)
+ @property
+ def hardware_feature_flags(self) -> HardwareFeatureFlags:
+ return self._feature_flags
+
+ @hardware_feature_flags.setter
+ def hardware_feature_flags(self, feature_flags: HardwareFeatureFlags) -> None:
+ self._feature_flags = feature_flags
+
async def update_deck_calibration(self, new_transform: RobotCalibration) -> None:
pass
@@ -1178,9 +1212,9 @@ async def drop_tip(self, mount: top_types.Mount, home_after: bool = True) -> Non
home_flagged_axes=False,
)
if move.home_after:
- smoothie_pos = await self._backend.fast_home(
- [ot2_axis_to_string(ax) for ax in move.home_axes],
- move.home_after_safety_margin,
+ smoothie_pos = await self._fast_home(
+ axes=[ot2_axis_to_string(ax) for ax in move.home_axes],
+ margin=move.home_after_safety_margin,
)
self._current_position = deck_from_machine(
machine_pos=self._axis_map_from_string_map(smoothie_pos),
diff --git a/api/src/opentrons/hardware_control/backends/controller.py b/api/src/opentrons/hardware_control/backends/controller.py
index 5525dce3105..f35d6092134 100644
--- a/api/src/opentrons/hardware_control/backends/controller.py
+++ b/api/src/opentrons/hardware_control/backends/controller.py
@@ -19,7 +19,7 @@
from pathlib import Path
try:
- import aionotify # type: ignore[import]
+ import aionotify # type: ignore[import-untyped]
except (OSError, ModuleNotFoundError):
aionotify = None
@@ -111,7 +111,12 @@ def _build_event_watcher() -> aionotify.Watcher:
watcher.watch(
alias="modules",
path="/dev",
- flags=(aionotify.Flags.CREATE | aionotify.Flags.DELETE),
+ flags=(
+ aionotify.Flags.CREATE
+ | aionotify.Flags.DELETE
+ | aionotify.Flags.MOVED_FROM
+ | aionotify.Flags.MOVED_TO
+ ),
)
return watcher
diff --git a/api/src/opentrons/hardware_control/estop_state.py b/api/src/opentrons/hardware_control/backends/estop_state.py
similarity index 96%
rename from api/src/opentrons/hardware_control/estop_state.py
rename to api/src/opentrons/hardware_control/backends/estop_state.py
index 2c8884dcb26..d421af6a77a 100644
--- a/api/src/opentrons/hardware_control/estop_state.py
+++ b/api/src/opentrons/hardware_control/backends/estop_state.py
@@ -13,6 +13,7 @@
EstopAttachLocation,
EstopStateNotification,
HardwareEventHandler,
+ HardwareEventUnsubscriber,
)
@@ -51,10 +52,12 @@ def __del__(self) -> None:
if self._detector is not None:
self._detector.remove_listener(self.detector_listener)
- def add_listener(self, listener: HardwareEventHandler) -> None:
+ def add_listener(self, listener: HardwareEventHandler) -> HardwareEventUnsubscriber:
"""Add a hardware event listener for estop event changes."""
if listener not in self._listeners:
self._listeners.append(listener)
+ return lambda: self.remove_listener(listener)
+ return lambda: None
def remove_listener(self, listener: HardwareEventHandler) -> None:
"""Remove an existing hardware event listener for estop detector changes."""
diff --git a/api/src/opentrons/hardware_control/backends/flex_protocol.py b/api/src/opentrons/hardware_control/backends/flex_protocol.py
new file mode 100644
index 00000000000..7bd2969de6b
--- /dev/null
+++ b/api/src/opentrons/hardware_control/backends/flex_protocol.py
@@ -0,0 +1,446 @@
+import asyncio
+from contextlib import asynccontextmanager
+from typing import (
+ Protocol,
+ Dict,
+ Optional,
+ List,
+ Mapping,
+ AsyncIterator,
+ Sequence,
+ Tuple,
+ Set,
+ TypeVar,
+)
+from opentrons_shared_data.pipette.dev_types import (
+ PipetteName,
+)
+from opentrons.config.types import GantryLoad, OutputOptions
+from opentrons.hardware_control.types import (
+ BoardRevision,
+ Axis,
+ OT3Mount,
+ OT3AxisMap,
+ InstrumentProbeType,
+ MotorStatus,
+ UpdateStatus,
+ SubSystem,
+ SubSystemState,
+ TipStateType,
+ GripperJawState,
+ HardwareFeatureFlags,
+ EstopOverallStatus,
+ EstopState,
+ HardwareEventHandler,
+ HardwareEventUnsubscriber,
+ HepaFanState,
+ HepaUVState,
+ StatusBarState,
+)
+from opentrons.hardware_control.module_control import AttachedModulesControl
+from ..dev_types import OT3AttachedInstruments
+from .types import HWStopCondition
+
+Cls = TypeVar("Cls")
+
+
+class FlexBackend(Protocol):
+ """Flex backend mypy protocol."""
+
+ async def get_serial_number(self) -> Optional[str]:
+ ...
+
+ @asynccontextmanager
+ def restore_system_constraints(self) -> AsyncIterator[None]:
+ ...
+
+ def update_constraints_for_gantry_load(self, gantry_load: GantryLoad) -> None:
+ ...
+
+ def update_constraints_for_calibration_with_gantry_load(
+ self,
+ gantry_load: GantryLoad,
+ ) -> None:
+ ...
+
+ def update_constraints_for_plunger_acceleration(
+ self, mount: OT3Mount, acceleration: float, gantry_load: GantryLoad
+ ) -> None:
+ ...
+
+ @property
+ def initialized(self) -> bool:
+ """True when the hardware controller has initialized and is ready."""
+ ...
+
+ @initialized.setter
+ def initialized(self, value: bool) -> None:
+ ...
+
+ @property
+ def gear_motor_position(self) -> Optional[float]:
+ ...
+
+ @property
+ def board_revision(self) -> BoardRevision:
+ """Get the board revision"""
+ ...
+
+ @property
+ def module_controls(self) -> AttachedModulesControl:
+ """Get the module controls."""
+ ...
+
+ @module_controls.setter
+ def module_controls(self, module_controls: AttachedModulesControl) -> None:
+ """Set the module controls"""
+ ...
+
+ async def update_to_default_current_settings(self, gantry_load: GantryLoad) -> None:
+ ...
+
+ def update_feature_flags(self, feature_flags: HardwareFeatureFlags) -> None:
+ """Update the hardware feature flags used by the hardware controller."""
+ ...
+
+ async def update_motor_status(self) -> None:
+ """Retreieve motor and encoder status and position from all present devices"""
+ ...
+
+ async def update_motor_estimation(self, axes: Sequence[Axis]) -> None:
+ """Update motor position estimation for commanded axes, and update cache of data."""
+ # Simulate conditions as if there are no stalls, aka do nothing
+ ...
+
+ def _get_motor_status(
+ self, axes: Sequence[Axis]
+ ) -> Dict[Axis, Optional[MotorStatus]]:
+ ...
+
+ def get_invalid_motor_axes(self, axes: Sequence[Axis]) -> List[Axis]:
+ """Get axes that currently do not have the motor-ok flag."""
+ ...
+
+ def get_invalid_encoder_axes(self, axes: Sequence[Axis]) -> List[Axis]:
+ """Get axes that currently do not have the encoder-ok flag."""
+ ...
+
+ def check_motor_status(self, axes: Sequence[Axis]) -> bool:
+ ...
+
+ def check_encoder_status(self, axes: Sequence[Axis]) -> bool:
+ ...
+
+ async def update_position(self) -> OT3AxisMap[float]:
+ """Get the current position."""
+ ...
+
+ async def update_encoder_position(self) -> OT3AxisMap[float]:
+ """Get the encoder current position."""
+ ...
+
+ async def liquid_probe(
+ self,
+ mount: OT3Mount,
+ max_z_distance: float,
+ mount_speed: float,
+ plunger_speed: float,
+ threshold_pascals: float,
+ output_format: OutputOptions = OutputOptions.can_bus_only,
+ data_files: Optional[Dict[InstrumentProbeType, str]] = None,
+ auto_zero_sensor: bool = True,
+ num_baseline_reads: int = 10,
+ probe: InstrumentProbeType = InstrumentProbeType.PRIMARY,
+ ) -> float:
+ ...
+
+ async def move(
+ self,
+ origin: Dict[Axis, float],
+ target: Dict[Axis, float],
+ speed: float,
+ stop_condition: HWStopCondition = HWStopCondition.none,
+ nodes_in_moves_only: bool = True,
+ ) -> None:
+ """Move to a position.
+
+ Args:
+ target_position: Map of axis to position.
+ home_flagged_axes: Whether to home afterwords.
+ speed: Optional speed
+ axis_max_speeds: Optional map of axis to speed.
+
+ Returns:
+ None
+ """
+ ...
+
+ async def home(
+ self, axes: Sequence[Axis], gantry_load: GantryLoad
+ ) -> OT3AxisMap[float]:
+ """Home axes.
+
+ Args:
+ axes: Optional list of axes.
+
+ Returns:
+ Homed position.
+ """
+ ...
+
+ async def gripper_grip_jaw(
+ self,
+ duty_cycle: float,
+ expected_displacement: float,
+ stop_condition: HWStopCondition = HWStopCondition.none,
+ stay_engaged: bool = True,
+ ) -> None:
+ """Move gripper inward."""
+ ...
+
+ async def gripper_home_jaw(self, duty_cycle: float) -> None:
+ """Move gripper outward."""
+ ...
+
+ async def gripper_hold_jaw(
+ self,
+ encoder_position_um: int,
+ ) -> None:
+ ...
+
+ async def get_jaw_state(self) -> GripperJawState:
+ """Get the state of the gripper jaw."""
+ ...
+
+ async def tip_action(
+ self, origin: Dict[Axis, float], targets: List[Tuple[Dict[Axis, float], float]]
+ ) -> None:
+ ...
+
+ async def home_tip_motors(
+ self,
+ distance: float,
+ velocity: float,
+ back_off: bool = True,
+ ) -> None:
+ ...
+
+ async def get_attached_instruments(
+ self, expected: Mapping[OT3Mount, PipetteName]
+ ) -> Mapping[OT3Mount, OT3AttachedInstruments]:
+ """Get attached instruments.
+
+ Args:
+ expected: Which mounts are expected.
+
+ Returns:
+ A map of mount to pipette name.
+ """
+ ...
+
+ async def get_limit_switches(self) -> OT3AxisMap[bool]:
+ """Get the state of the gantry's limit switches on each axis."""
+ ...
+
+ async def set_active_current(self, axis_currents: OT3AxisMap[float]) -> None:
+ """Set the active current.
+
+ Args:
+ axis_currents: Axes' currents
+
+ Returns:
+ None
+ """
+ ...
+
+ @asynccontextmanager
+ def motor_current(
+ self,
+ run_currents: Optional[OT3AxisMap[float]] = None,
+ hold_currents: Optional[OT3AxisMap[float]] = None,
+ ) -> AsyncIterator[None]:
+ """Save the current."""
+ ...
+
+ @asynccontextmanager
+ def restore_z_r_run_current(self) -> AsyncIterator[None]:
+ """
+ Temporarily restore the active current ONLY when homing or
+ retracting the Z_R axis while the 96-channel is attached.
+ """
+ ...
+
+ @asynccontextmanager
+ def increase_z_l_hold_current(self) -> AsyncIterator[None]:
+ """
+ Temporarily increase the hold current when engaging the Z_L axis
+ while the 96-channel is attached
+ """
+ ...
+
+ async def watch(self, loop: asyncio.AbstractEventLoop) -> None:
+ ...
+
+ @property
+ def axis_bounds(self) -> OT3AxisMap[Tuple[float, float]]:
+ """Get the axis bounds."""
+ ...
+
+ @property
+ def fw_version(self) -> Dict[SubSystem, int]:
+ """Get the firmware version."""
+ ...
+
+ def axis_is_present(self, axis: Axis) -> bool:
+ ...
+
+ @property
+ def update_required(self) -> bool:
+ ...
+
+ def update_firmware(
+ self,
+ subsystems: Set[SubSystem],
+ force: bool = False,
+ ) -> AsyncIterator[UpdateStatus]:
+ """Updates the firmware on the OT3."""
+ ...
+
+ def engaged_axes(self) -> OT3AxisMap[bool]:
+ """Get engaged axes."""
+ ...
+
+ async def update_engaged_axes(self) -> None:
+ """Update engaged axes."""
+ ...
+
+ async def is_motor_engaged(self, axis: Axis) -> bool:
+ """Check if axis is enabled."""
+ ...
+
+ async def disengage_axes(self, axes: List[Axis]) -> None:
+ """Disengage axes."""
+ ...
+
+ async def engage_axes(self, axes: List[Axis]) -> None:
+ """Engage axes."""
+ ...
+
+ async def set_lights(self, button: Optional[bool], rails: Optional[bool]) -> None:
+ """Set the light states."""
+ ...
+
+ async def get_lights(self) -> Dict[str, bool]:
+ """Get the light state."""
+ ...
+
+ def pause(self) -> None:
+ """Pause the controller activity."""
+ ...
+
+ def resume(self) -> None:
+ """Resume the controller activity."""
+ ...
+
+ async def halt(self) -> None:
+ """Halt the motors."""
+ ...
+
+ async def probe(self, axis: Axis, distance: float) -> OT3AxisMap[float]:
+ """Probe."""
+ ...
+
+ async def clean_up(self) -> None:
+ """Clean up."""
+ ...
+
+ @staticmethod
+ def home_position() -> OT3AxisMap[float]:
+ ...
+
+ async def capacitive_probe(
+ self,
+ mount: OT3Mount,
+ moving: Axis,
+ distance_mm: float,
+ speed_mm_per_s: float,
+ sensor_threshold_pf: float,
+ probe: InstrumentProbeType,
+ ) -> bool:
+ ...
+
+ async def capacitive_pass(
+ self,
+ mount: OT3Mount,
+ moving: Axis,
+ distance_mm: float,
+ speed_mm_per_s: float,
+ probe: InstrumentProbeType,
+ ) -> List[float]:
+ ...
+
+ @property
+ def subsystems(self) -> Dict[SubSystem, SubSystemState]:
+ ...
+
+ async def get_tip_status(
+ self, mount: OT3Mount, ht_operation_sensor: Optional[InstrumentProbeType] = None
+ ) -> TipStateType:
+ ...
+
+ def current_tip_state(self, mount: OT3Mount) -> Optional[bool]:
+ ...
+
+ async def update_tip_detector(self, mount: OT3Mount, sensor_count: int) -> None:
+ ...
+
+ async def teardown_tip_detector(self, mount: OT3Mount) -> None:
+ ...
+
+ async def set_status_bar_state(self, state: StatusBarState) -> None:
+ ...
+
+ async def set_status_bar_enabled(self, enabled: bool) -> None:
+ ...
+
+ def get_status_bar_state(self) -> StatusBarState:
+ ...
+
+ @property
+ def estop_status(self) -> EstopOverallStatus:
+ ...
+
+ def estop_acknowledge_and_clear(self) -> EstopOverallStatus:
+ ...
+
+ def get_estop_state(self) -> EstopState:
+ ...
+
+ def add_estop_callback(self, cb: HardwareEventHandler) -> HardwareEventUnsubscriber:
+ ...
+
+ def check_gripper_position_within_bounds(
+ self,
+ expected_grip_width: float,
+ grip_width_uncertainty_wider: float,
+ grip_width_uncertainty_narrower: float,
+ jaw_width: float,
+ max_allowed_grip_error: float,
+ hard_limit_lower: float,
+ hard_limit_upper: float,
+ ) -> None:
+ ...
+
+ async def set_hepa_fan_state(self, fan_on: bool, duty_cycle: int) -> bool:
+ """Sets the state and duty cycle of the Hepa/UV module."""
+ ...
+
+ async def get_hepa_fan_state(self) -> Optional[HepaFanState]:
+ ...
+
+ async def set_hepa_uv_state(self, light_on: bool, uv_duration_s: int) -> bool:
+ """Sets the state and duration (seconds) of the UV light for the Hepa/UV module."""
+ ...
+
+ async def get_hepa_uv_state(self) -> Optional[HepaUVState]:
+ ...
diff --git a/api/src/opentrons/hardware_control/backends/ot3controller.py b/api/src/opentrons/hardware_control/backends/ot3controller.py
index 9718e298dfd..9a22a3e2e13 100644
--- a/api/src/opentrons/hardware_control/backends/ot3controller.py
+++ b/api/src/opentrons/hardware_control/backends/ot3controller.py
@@ -6,6 +6,7 @@
from functools import wraps
import logging
from copy import deepcopy
+from numpy import isclose
from typing import (
Any,
Awaitable,
@@ -14,7 +15,6 @@
List,
Optional,
Tuple,
- TYPE_CHECKING,
Sequence,
AsyncIterator,
cast,
@@ -23,9 +23,10 @@
Iterator,
KeysView,
Union,
+ Mapping,
)
-from opentrons.config.types import OT3Config, GantryLoad
-from opentrons.config import gripper_config, feature_flags as ff
+from opentrons.config.types import OT3Config, GantryLoad, OutputOptions
+from opentrons.config import gripper_config
from .ot3utils import (
axis_convert,
create_move_group,
@@ -46,11 +47,14 @@
map_pipette_type_to_sensor_id,
moving_axes_in_move_group,
gripper_jaw_state_from_fw,
+ get_system_constraints,
+ get_system_constraints_for_calibration,
+ get_system_constraints_for_plunger_acceleration,
)
from .tip_presence_manager import TipPresenceManager
try:
- import aionotify # type: ignore[import]
+ import aionotify # type: ignore[import-untyped]
except (OSError, ModuleNotFoundError):
aionotify = None
@@ -67,20 +71,22 @@
from opentrons_hardware.drivers.eeprom import EEPROMDriver, EEPROMData
from opentrons_hardware.hardware_control.move_group_runner import MoveGroupRunner
from opentrons_hardware.hardware_control.motion_planning import (
- Move,
- Coordinates,
+ MoveManager,
+ MoveTarget,
+ ZeroLengthMoveError,
)
from opentrons_hardware.hardware_control.estop.detector import (
EstopDetector,
)
-from opentrons.hardware_control.estop_state import EstopStateMachine
+from opentrons.hardware_control.backends.estop_state import EstopStateMachine
from opentrons_hardware.hardware_control.motor_enable_disable import (
set_enable_motor,
set_disable_motor,
set_enable_tip_motor,
set_disable_tip_motor,
+ get_motor_enabled,
)
from opentrons_hardware.hardware_control.motor_position_status import (
get_motor_position,
@@ -127,8 +133,13 @@
SubSystemState,
SubSystem,
TipStateType,
- EstopState,
GripperJawState,
+ HardwareFeatureFlags,
+ EstopOverallStatus,
+ EstopAttachLocation,
+ EstopState,
+ HardwareEventHandler,
+ HardwareEventUnsubscriber,
)
from opentrons.hardware_control.errors import (
InvalidPipetteName,
@@ -159,6 +170,12 @@
from opentrons_hardware.hardware_control.gripper_settings import (
get_gripper_jaw_state,
)
+from opentrons_hardware.hardware_control.hepa_uv_settings import (
+ set_hepa_fan_state as set_hepa_fan_state_fw,
+ get_hepa_fan_state as get_hepa_fan_state_fw,
+ set_hepa_uv_state as set_hepa_uv_state_fw,
+ get_hepa_uv_state as get_hepa_uv_state_fw,
+)
from opentrons_hardware.drivers.gpio import OT3GPIO, RemoteOT3GPIO
from opentrons_shared_data.pipette.dev_types import PipetteName
@@ -173,16 +190,21 @@
EStopNotPresentError,
PipetteOverpressureError,
FirmwareUpdateRequiredError,
+ FailedGripperPickupError,
)
from .subsystem_manager import SubsystemManager
-if TYPE_CHECKING:
- from ..dev_types import (
- AttachedPipette,
- AttachedGripper,
- OT3AttachedInstruments,
- )
+from ..dev_types import (
+ AttachedPipette,
+ AttachedGripper,
+ OT3AttachedInstruments,
+)
+from ..types import HepaFanState, HepaUVState, StatusBarState
+
+from .types import HWStopCondition
+from .flex_protocol import FlexBackend
+from .status_bar_state import StatusBarStateController
log = logging.getLogger(__name__)
@@ -211,7 +233,7 @@ def requires_estop(func: Wrapped) -> Wrapped:
@wraps(func)
async def wrapper(self: OT3Controller, *args: Any, **kwargs: Any) -> Any:
state = self._estop_state_machine.state
- if state == EstopState.NOT_PRESENT and ff.require_estop():
+ if state == EstopState.NOT_PRESENT and self._feature_flags.require_estop:
raise EStopNotPresentError(
message="An Estop must be plugged in to move the robot."
)
@@ -228,7 +250,7 @@ async def wrapper(self: OT3Controller, *args: Any, **kwargs: Any) -> Any:
return cast(Wrapped, wrapper)
-class OT3Controller:
+class OT3Controller(FlexBackend):
"""OT3 Hardware Controller Backend."""
_initialized: bool
@@ -238,10 +260,15 @@ class OT3Controller:
_encoder_position: Dict[NodeId, float]
_motor_status: Dict[NodeId, MotorStatus]
_subsystem_manager: SubsystemManager
+ _engaged_axes: OT3AxisMap[bool]
@classmethod
async def build(
- cls, config: OT3Config, use_usb_bus: bool = False, check_updates: bool = True
+ cls,
+ config: OT3Config,
+ use_usb_bus: bool = False,
+ check_updates: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> OT3Controller:
"""Create the OT3Controller instance.
@@ -262,7 +289,11 @@ async def build(
)
raise e
inst = cls(
- config, driver=driver, usb_driver=usb_driver, check_updates=check_updates
+ config,
+ driver=driver,
+ usb_driver=usb_driver,
+ check_updates=check_updates,
+ feature_flags=feature_flags,
)
await inst._subsystem_manager.start()
return inst
@@ -274,6 +305,7 @@ def __init__(
usb_driver: Optional[SerialUsbDriver] = None,
eeprom_driver: Optional[EEPROMDriver] = None,
check_updates: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> None:
"""Construct.
@@ -288,6 +320,7 @@ def __init__(
self._drivers = self._build_system_hardware(
self._messenger, usb_driver, eeprom_driver
)
+ self._feature_flags = feature_flags or HardwareFeatureFlags()
self._usb_messenger = self._drivers.usb_messenger
self._gpio_dev = self._drivers.gpio_dev
self._subsystem_manager = SubsystemManager(
@@ -303,9 +336,12 @@ def __init__(
self._gear_motor_position: Dict[NodeId, float] = {}
self._encoder_position = self._get_home_position()
self._motor_status = {}
+ self._engaged_axes = {}
self._check_updates = check_updates
self._initialized = False
self._status_bar = status_bar.StatusBar(messenger=self._usb_messenger)
+ self._status_bar_controller = StatusBarStateController(self._status_bar)
+
try:
self._event_watcher = self._build_event_watcher()
except AttributeError:
@@ -315,6 +351,46 @@ def __init__(
)
self._current_settings: Optional[OT3AxisMap[CurrentConfig]] = None
self._tip_presence_manager = TipPresenceManager(self._messenger)
+ self._move_manager = MoveManager(
+ constraints=get_system_constraints(
+ self._configuration.motion_settings, GantryLoad.LOW_THROUGHPUT
+ )
+ )
+
+ @asynccontextmanager
+ async def restore_system_constraints(self) -> AsyncIterator[None]:
+ old_system_constraints = deepcopy(self._move_manager.get_constraints())
+ try:
+ yield
+ finally:
+ self._move_manager.update_constraints(old_system_constraints)
+ log.debug(f"Restore previous system constraints: {old_system_constraints}")
+
+ def update_constraints_for_calibration_with_gantry_load(
+ self,
+ gantry_load: GantryLoad,
+ ) -> None:
+ self._move_manager.update_constraints(
+ get_system_constraints_for_calibration(
+ self._configuration.motion_settings, gantry_load
+ )
+ )
+ log.debug(
+ f"Set system constraints for calibration: {self._move_manager.get_constraints()}"
+ )
+
+ def update_constraints_for_gantry_load(self, gantry_load: GantryLoad) -> None:
+ self._move_manager.update_constraints(
+ get_system_constraints(self._configuration.motion_settings, gantry_load)
+ )
+
+ def update_constraints_for_plunger_acceleration(
+ self, mount: OT3Mount, acceleration: float, gantry_load: GantryLoad
+ ) -> None:
+ new_constraints = get_system_constraints_for_plunger_acceleration(
+ self._configuration.motion_settings, gantry_load, mount, acceleration
+ )
+ self._move_manager.update_constraints(new_constraints)
async def get_serial_number(self) -> Optional[str]:
if not self.initialized:
@@ -383,8 +459,8 @@ def _build_system_hardware(
)
@property
- def gear_motor_position(self) -> Dict[NodeId, float]:
- return self._gear_motor_position
+ def gear_motor_position(self) -> Optional[float]:
+ return self._gear_motor_position.get(NodeId.pipette_left, None)
def _motor_nodes(self) -> Set[NodeId]:
"""Get a list of the motor controller nodes of all attached and ok devices."""
@@ -408,6 +484,10 @@ async def update_to_default_current_settings(self, gantry_load: GantryLoad) -> N
self._current_settings = self.get_current_settings(gantry_load)
await self.set_default_currents()
+ def update_feature_flags(self, feature_flags: HardwareFeatureFlags) -> None:
+ """Update the hardware feature flags used by the hardware controller."""
+ self._feature_flags = feature_flags
+
async def update_motor_status(self) -> None:
"""Retreieve motor and encoder status and position from all present nodes"""
motor_nodes = self._motor_nodes()
@@ -515,7 +595,7 @@ def _handle_motor_status_response(
# "encoder_ok" flag staying set (it will only be False if the motor axis has not been
# homed since a power cycle)
motor_ok_latch = (
- (not ff.stall_detection_enabled())
+ (not self._feature_flags.stall_detection_enabled)
and ((axis in self._motor_status) and self._motor_status[axis].motor_ok)
and self._motor_status[axis].encoder_ok
)
@@ -532,9 +612,10 @@ def _handle_motor_status_response(
@requires_estop
async def move(
self,
- origin: Coordinates[Axis, float],
- moves: List[Move[Axis]],
- stop_condition: MoveStopCondition = MoveStopCondition.none,
+ origin: Dict[Axis, float],
+ target: Dict[Axis, float],
+ speed: float,
+ stop_condition: HWStopCondition = HWStopCondition.none,
nodes_in_moves_only: bool = True,
) -> None:
"""Move to a position.
@@ -553,6 +634,17 @@ async def move(
Returns:
None
"""
+ move_target = MoveTarget.build(position=target, max_speed=speed)
+ try:
+ _, movelist = self._move_manager.plan_motion(
+ origin=origin, target_list=[move_target]
+ )
+ except ZeroLengthMoveError as zme:
+ log.warning(f"Not moving because move was zero length {str(zme)}")
+ return
+ moves = movelist[0]
+ log.info(f"move: machine {target} from {origin} requires {moves}")
+
ordered_nodes = self._motor_nodes()
if nodes_in_moves_only:
moving_axes = {
@@ -560,12 +652,17 @@ async def move(
}
ordered_nodes = ordered_nodes.intersection(moving_axes)
- group = create_move_group(origin, moves, ordered_nodes, stop_condition)
+ group = create_move_group(
+ origin, moves, ordered_nodes, MoveStopCondition[stop_condition.name]
+ )
move_group, _ = group
runner = MoveGroupRunner(
move_groups=[move_group],
- ignore_stalls=True if not ff.stall_detection_enabled() else False,
+ ignore_stalls=True
+ if not self._feature_flags.stall_detection_enabled
+ else False,
)
+
mounts_moving = [
k
for k in moving_axes_in_move_group(move_group)
@@ -714,44 +811,66 @@ async def home_tip_motors(
runner = MoveGroupRunner(
move_groups=[move_group],
- ignore_stalls=True if not ff.stall_detection_enabled() else False,
+ ignore_stalls=True
+ if not self._feature_flags.stall_detection_enabled
+ else False,
)
- positions = await runner.run(can_messenger=self._messenger)
- if NodeId.pipette_left in positions:
- self._gear_motor_position = {
- NodeId.pipette_left: positions[NodeId.pipette_left].motor_position
- }
- else:
- log.debug("no position returned from NodeId.pipette_left")
+ try:
+ positions = await runner.run(can_messenger=self._messenger)
+ if NodeId.pipette_left in positions:
+ self._gear_motor_position = {
+ NodeId.pipette_left: positions[NodeId.pipette_left].motor_position
+ }
+ else:
+ log.debug("no position returned from NodeId.pipette_left")
+ self._gear_motor_position = {}
+ except Exception as e:
+ log.error("Clearing tip motor position due to failed movement")
+ self._gear_motor_position = {}
+ raise e
async def tip_action(
- self,
- moves: List[Move[Axis]],
+ self, origin: Dict[Axis, float], targets: List[Tuple[Dict[Axis, float], float]]
) -> None:
- move_group = create_tip_action_group(moves, [NodeId.pipette_left], "clamp")
+ move_targets = [
+ MoveTarget.build(target_pos, speed) for target_pos, speed in targets
+ ]
+ _, moves = self._move_manager.plan_motion(
+ origin=origin, target_list=move_targets
+ )
+ move_group = create_tip_action_group(moves[0], [NodeId.pipette_left], "clamp")
runner = MoveGroupRunner(
move_groups=[move_group],
- ignore_stalls=True if not ff.stall_detection_enabled() else False,
+ ignore_stalls=True
+ if not self._feature_flags.stall_detection_enabled
+ else False,
)
- positions = await runner.run(can_messenger=self._messenger)
- if NodeId.pipette_left in positions:
- self._gear_motor_position = {
- NodeId.pipette_left: positions[NodeId.pipette_left].motor_position
- }
- else:
- log.debug("no position returned from NodeId.pipette_left")
+ try:
+ positions = await runner.run(can_messenger=self._messenger)
+ if NodeId.pipette_left in positions:
+ self._gear_motor_position = {
+ NodeId.pipette_left: positions[NodeId.pipette_left].motor_position
+ }
+ else:
+ log.debug("no position returned from NodeId.pipette_left")
+ self._gear_motor_position = {}
+ except Exception as e:
+ log.error("Clearing tip motor position due to failed movement")
+ self._gear_motor_position = {}
+ raise e
@requires_update
@requires_estop
async def gripper_grip_jaw(
self,
duty_cycle: float,
- stop_condition: MoveStopCondition = MoveStopCondition.none,
+ expected_displacement: float, # not used on real hardware
+ stop_condition: HWStopCondition = HWStopCondition.none,
stay_engaged: bool = True,
) -> None:
move_group = create_gripper_jaw_grip_group(
- duty_cycle, stop_condition, stay_engaged
+ duty_cycle, MoveStopCondition[stop_condition.name], stay_engaged
)
runner = MoveGroupRunner(move_groups=[move_group])
positions = await runner.run(can_messenger=self._messenger)
@@ -859,7 +978,7 @@ def _generate_attached_instrs(
)
async def get_attached_instruments(
- self, expected: Dict[OT3Mount, PipetteName]
+ self, expected: Mapping[OT3Mount, PipetteName]
) -> Dict[OT3Mount, OT3AttachedInstruments]:
"""Get attached instruments.
@@ -938,8 +1057,8 @@ async def set_hold_current(self, axis_currents: OT3AxisMap[float]) -> None:
@asynccontextmanager
async def motor_current(
self,
- run_currents: OT3AxisMap[float] = {},
- hold_currents: OT3AxisMap[float] = {},
+ run_currents: Optional[OT3AxisMap[float]] = None,
+ hold_currents: Optional[OT3AxisMap[float]] = None,
) -> AsyncIterator[None]:
"""Update and restore current."""
assert self._current_settings
@@ -982,13 +1101,36 @@ async def restore_z_r_run_current(self) -> AsyncIterator[None]:
{Axis.Z_R: high_throughput_settings[Axis.Z_R].run_current}
)
+ @asynccontextmanager
+ async def increase_z_l_hold_current(self) -> AsyncIterator[None]:
+ """
+ Temporarily increase the hold current when engaging the Z_L axis
+ while the 96-channel is attached
+ """
+ assert self._current_settings
+ high_throughput_settings = deepcopy(self._current_settings)
+ await self.set_hold_current(
+ {Axis.Z_L: high_throughput_settings[Axis.Z_L].run_current}
+ )
+ try:
+ yield
+ finally:
+ await self.set_hold_current(
+ {Axis.Z_L: high_throughput_settings[Axis.Z_L].hold_current}
+ )
+
@staticmethod
def _build_event_watcher() -> aionotify.Watcher:
watcher = aionotify.Watcher()
watcher.watch(
alias="modules",
path="/dev",
- flags=(aionotify.Flags.CREATE | aionotify.Flags.DELETE),
+ flags=(
+ aionotify.Flags.CREATE
+ | aionotify.Flags.DELETE
+ | aionotify.Flags.MOVED_FROM
+ | aionotify.Flags.MOVED_TO
+ ),
)
return watcher
@@ -999,9 +1141,10 @@ async def _handle_watch_event(self) -> None:
log.debug("incomplete read error when quitting watcher")
return
if event is not None:
+ flags = aionotify.Flags.parse(event.flags)
+ log.debug(f"aionotify: {flags} {event.name}")
if "ot_module" in event.name:
event_name = event.name
- flags = aionotify.Flags.parse(event.flags)
event_description = AionotifyEvent.build(event_name, flags)
await self.module_controls.handle_module_appearance(event_description)
@@ -1017,37 +1160,58 @@ async def watch(self, loop: asyncio.AbstractEventLoop) -> None:
def axis_bounds(self) -> OT3AxisMap[Tuple[float, float]]:
"""Get the axis bounds."""
# TODO (AL, 2021-11-18): The bounds need to be defined
- phony_bounds = (0, 10000)
return {
- Axis.Z_L: phony_bounds,
- Axis.Z_R: phony_bounds,
- Axis.P_L: phony_bounds,
- Axis.P_R: phony_bounds,
- Axis.X: phony_bounds,
- Axis.Y: phony_bounds,
- Axis.Z_G: phony_bounds,
- Axis.Q: phony_bounds,
+ Axis.Z_L: (0, 300),
+ Axis.Z_R: (0, 300),
+ Axis.P_L: (0, 200),
+ Axis.P_R: (0, 200),
+ Axis.X: (0, 550),
+ Axis.Y: (0, 550),
+ Axis.Z_G: (0, 300),
+ Axis.Q: (0, 200),
}
def engaged_axes(self) -> OT3AxisMap[bool]:
"""Get engaged axes."""
- return {}
+ return self._engaged_axes
+
+ async def update_engaged_axes(self) -> None:
+ """Update engaged axes."""
+ motor_nodes = self._motor_nodes()
+ results = await get_motor_enabled(self._messenger, motor_nodes)
+ for node, status in results.items():
+ self._engaged_axes[node_to_axis(node)] = status
+
+ async def is_motor_engaged(self, axis: Axis) -> bool:
+ node = axis_to_node(axis)
+ result = await get_motor_enabled(self._messenger, {node})
+ engaged = result[node]
+ self._engaged_axes.update({axis: engaged})
+ return engaged
async def disengage_axes(self, axes: List[Axis]) -> None:
"""Disengage axes."""
if Axis.Q in axes:
await set_disable_tip_motor(self._messenger, {axis_to_node(Axis.Q)})
- nodes = {axis_to_node(ax) for ax in axes if ax is not Axis.Q}
- if len(nodes) > 0:
- await set_disable_motor(self._messenger, nodes)
+ self._engaged_axes[Axis.Q] = False
+ axes = [ax for ax in axes if ax is not Axis.Q]
+
+ if len(axes) > 0:
+ await set_disable_motor(self._messenger, {axis_to_node(ax) for ax in axes})
+ for ax in axes:
+ self._engaged_axes[ax] = False
async def engage_axes(self, axes: List[Axis]) -> None:
"""Engage axes."""
if Axis.Q in axes:
await set_enable_tip_motor(self._messenger, {axis_to_node(Axis.Q)})
- nodes = {axis_to_node(ax) for ax in axes if ax is not Axis.Q}
- if len(nodes) > 0:
- await set_enable_motor(self._messenger, nodes)
+ self._engaged_axes[Axis.Q] = True
+ axes = [ax for ax in axes if ax is not Axis.Q]
+
+ if len(axes) > 0:
+ await set_enable_motor(self._messenger, {axis_to_node(ax) for ax in axes})
+ for ax in axes:
+ self._engaged_axes[ax] = True
@requires_update
async def set_lights(self, button: Optional[bool], rails: Optional[bool]) -> None:
@@ -1085,7 +1249,6 @@ async def probe(self, axis: Axis, distance: float) -> OT3AxisMap[float]:
async def clean_up(self) -> None:
"""Clean up."""
-
try:
loop = asyncio.get_event_loop()
except RuntimeError:
@@ -1094,6 +1257,15 @@ async def clean_up(self) -> None:
if hasattr(self, "_event_watcher"):
if loop.is_running() and self._event_watcher:
self._event_watcher.close()
+
+ messenger = getattr(self, "_messenger", None)
+ if messenger:
+ await messenger.stop()
+
+ usb_messenger = getattr(self, "_usb_messenger", None)
+ if usb_messenger:
+ await usb_messenger.stop()
+
return None
@staticmethod
@@ -1139,7 +1311,7 @@ def _axis_map_to_present_nodes(
@asynccontextmanager
async def _monitor_overpressure(self, mounts: List[NodeId]) -> AsyncIterator[None]:
msg = "The pressure sensor on the {} mount has exceeded operational limits."
- if ff.overpressure_detection_enabled() and mounts:
+ if self._feature_flags.overpressure_detection_enabled and mounts:
tools_with_id = map_pipette_type_to_sensor_id(
mounts, self._subsystem_manager.device_info
)
@@ -1178,30 +1350,56 @@ async def liquid_probe(
mount_speed: float,
plunger_speed: float,
threshold_pascals: float,
- log_pressure: bool = True,
+ output_option: OutputOptions = OutputOptions.can_bus_only,
+ data_files: Optional[Dict[InstrumentProbeType, str]] = None,
auto_zero_sensor: bool = True,
num_baseline_reads: int = 10,
probe: InstrumentProbeType = InstrumentProbeType.PRIMARY,
- ) -> Dict[NodeId, float]:
+ ) -> float:
+ if output_option == OutputOptions.sync_buffer_to_csv:
+ assert (
+ self._subsystem_manager.device_info[
+ SubSystem.of_mount(mount)
+ ].revision.tertiary
+ == "1"
+ )
head_node = axis_to_node(Axis.by_mount(mount))
tool = sensor_node_for_pipette(OT3Mount(mount.value))
+ csv_output = bool(output_option.value & OutputOptions.stream_to_csv.value)
+ sync_buffer_output = bool(
+ output_option.value & OutputOptions.sync_buffer_to_csv.value
+ )
+ can_bus_only_output = bool(
+ output_option.value & OutputOptions.can_bus_only.value
+ )
+ data_files_transposed = (
+ None
+ if data_files is None
+ else {
+ sensor_id_for_instrument(probe): data_files[probe]
+ for probe in data_files.keys()
+ }
+ )
positions = await liquid_probe(
- self._messenger,
- tool,
- head_node,
- max_z_distance,
- plunger_speed,
- mount_speed,
- threshold_pascals,
- log_pressure,
- auto_zero_sensor,
- num_baseline_reads,
- sensor_id_for_instrument(probe),
+ messenger=self._messenger,
+ tool=tool,
+ head_node=head_node,
+ max_z_distance=max_z_distance,
+ plunger_speed=plunger_speed,
+ mount_speed=mount_speed,
+ threshold_pascals=threshold_pascals,
+ csv_output=csv_output,
+ sync_buffer_output=sync_buffer_output,
+ can_bus_only_output=can_bus_only_output,
+ data_files=data_files_transposed,
+ auto_zero_sensor=auto_zero_sensor,
+ num_baseline_reads=num_baseline_reads,
+ sensor_id=sensor_id_for_instrument(probe),
)
for node, point in positions.items():
self._position.update({node: point.motor_position})
self._encoder_position.update({node: point.encoder_position})
- return self._position
+ return self._position[axis_to_node(Axis.by_mount(mount))]
async def capacitive_probe(
self,
@@ -1301,9 +1499,6 @@ def _door_listener(msg: BinaryMessageDefinition) -> None:
),
)
- def status_bar_interface(self) -> status_bar.StatusBar:
- return self._status_bar
-
async def build_estop_detector(self) -> bool:
"""Must be called to set up the estop detector & state machine."""
if self._drivers.usb_messenger is None:
@@ -1314,11 +1509,6 @@ async def build_estop_detector(self) -> bool:
self._estop_state_machine.subscribe_to_detector(self._estop_detector)
return True
- @property
- def estop_state_machine(self) -> EstopStateMachine:
- """Accessor for the API to get the state machine, if it exists."""
- return self._estop_state_machine
-
@property
def tip_presence_manager(self) -> TipPresenceManager:
return self._tip_presence_manager
@@ -1331,8 +1521,148 @@ async def update_tip_detector(self, mount: OT3Mount, sensor_count: int) -> None:
async def teardown_tip_detector(self, mount: OT3Mount) -> None:
await self._tip_presence_manager.clear_detector(mount)
- async def get_tip_status(self, mount: OT3Mount) -> TipStateType:
- return await self.tip_presence_manager.get_tip_status(mount)
+ async def get_tip_status(
+ self,
+ mount: OT3Mount,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> TipStateType:
+ return await self.tip_presence_manager.get_tip_status(
+ mount, follow_singular_sensor
+ )
def current_tip_state(self, mount: OT3Mount) -> Optional[bool]:
return self.tip_presence_manager.current_tip_state(mount)
+
+ async def set_status_bar_state(self, state: StatusBarState) -> None:
+ await self._status_bar_controller.set_status_bar_state(state)
+
+ async def set_status_bar_enabled(self, enabled: bool) -> None:
+ await self._status_bar_controller.set_enabled(enabled)
+
+ def get_status_bar_state(self) -> StatusBarState:
+ return self._status_bar_controller.get_current_state()
+
+ @property
+ def estop_status(self) -> EstopOverallStatus:
+ return EstopOverallStatus(
+ state=self._estop_state_machine.state,
+ left_physical_state=self._estop_state_machine.get_physical_status(
+ EstopAttachLocation.LEFT
+ ),
+ right_physical_state=self._estop_state_machine.get_physical_status(
+ EstopAttachLocation.RIGHT
+ ),
+ )
+
+ def estop_acknowledge_and_clear(self) -> EstopOverallStatus:
+ """Attempt to acknowledge an Estop event and clear the status.
+
+ Returns the estop status after clearing the status."""
+ self._estop_state_machine.acknowledge_and_clear()
+ return self.estop_status
+
+ def get_estop_state(self) -> EstopState:
+ return self._estop_state_machine.state
+
+ def add_estop_callback(self, cb: HardwareEventHandler) -> HardwareEventUnsubscriber:
+ return self._estop_state_machine.add_listener(cb)
+
+ def check_gripper_position_within_bounds(
+ self,
+ expected_grip_width: float,
+ grip_width_uncertainty_wider: float,
+ grip_width_uncertainty_narrower: float,
+ jaw_width: float,
+ max_allowed_grip_error: float,
+ hard_limit_lower: float,
+ hard_limit_upper: float,
+ ) -> None:
+ """
+ Check if the gripper is at the expected location.
+
+ While this doesn't seem like it belongs here, it needs to act differently
+ when we're simulating, so it does.
+ """
+ expected_gripper_position_min = (
+ expected_grip_width - grip_width_uncertainty_narrower
+ )
+ expected_gripper_position_max = (
+ expected_grip_width + grip_width_uncertainty_wider
+ )
+ current_gripper_position = jaw_width
+ if isclose(current_gripper_position, hard_limit_lower):
+ raise FailedGripperPickupError(
+ message="Failed to grip: jaws all the way closed",
+ details={
+ "failure-type": "jaws-all-the-way-closed",
+ "actual-jaw-width": current_gripper_position,
+ },
+ )
+ if isclose(current_gripper_position, hard_limit_upper):
+ raise FailedGripperPickupError(
+ message="Failed to grip: jaws all the way open",
+ details={
+ "failure-type": "jaws-all-the-way-open",
+ "actual-jaw-width": current_gripper_position,
+ },
+ )
+ if (
+ current_gripper_position - expected_gripper_position_min
+ < -max_allowed_grip_error
+ ):
+ raise FailedGripperPickupError(
+ message="Failed to grip: jaws closed too far",
+ details={
+ "failure-type": "jaws-more-closed-than-expected",
+ "lower-bound-labware-width": expected_grip_width
+ - grip_width_uncertainty_narrower,
+ "actual-jaw-width": current_gripper_position,
+ },
+ )
+ if (
+ current_gripper_position - expected_gripper_position_max
+ > max_allowed_grip_error
+ ):
+ raise FailedGripperPickupError(
+ message="Failed to grip: jaws could not close far enough",
+ details={
+ "failure-type": "jaws-more-open-than-expected",
+ "upper-bound-labware-width": expected_grip_width
+ - grip_width_uncertainty_narrower,
+ "actual-jaw-width": current_gripper_position,
+ },
+ )
+
+ async def set_hepa_fan_state(self, fan_on: bool, duty_cycle: int) -> bool:
+ return await set_hepa_fan_state_fw(self._messenger, fan_on, duty_cycle)
+
+ async def get_hepa_fan_state(self) -> Optional[HepaFanState]:
+ res = await get_hepa_fan_state_fw(self._messenger)
+ return (
+ HepaFanState(
+ fan_on=res.fan_on,
+ duty_cycle=res.duty_cycle,
+ )
+ if res
+ else None
+ )
+
+ async def set_hepa_uv_state(self, light_on: bool, uv_duration_s: int) -> bool:
+ return await set_hepa_uv_state_fw(self._messenger, light_on, uv_duration_s)
+
+ async def get_hepa_uv_state(self) -> Optional[HepaUVState]:
+ res = await get_hepa_uv_state_fw(self._messenger)
+ return (
+ HepaUVState(
+ light_on=res.uv_light_on,
+ uv_duration_s=res.uv_duration_s,
+ remaining_time_s=res.remaining_time_s,
+ )
+ if res
+ else None
+ )
+
+ def _update_tip_state(self, mount: OT3Mount, status: bool) -> None:
+ """This is something we only use in the simulator.
+ It is required so that PE simulations using ot3api don't break."""
+ pass
diff --git a/api/src/opentrons/hardware_control/backends/ot3simulator.py b/api/src/opentrons/hardware_control/backends/ot3simulator.py
index da111472c19..e0c8fe1bc89 100644
--- a/api/src/opentrons/hardware_control/backends/ot3simulator.py
+++ b/api/src/opentrons/hardware_control/backends/ot3simulator.py
@@ -17,38 +17,16 @@
Mapping,
)
-from opentrons.config.types import OT3Config, GantryLoad
+from opentrons.config.types import OT3Config, GantryLoad, OutputOptions
from opentrons.config import gripper_config
-from .ot3utils import (
- axis_convert,
- create_move_group,
- get_current_settings,
- node_to_axis,
- axis_to_node,
- create_gripper_jaw_hold_group,
- create_gripper_jaw_grip_group,
- create_gripper_jaw_home_group,
- NODEID_SUBSYSTEM,
- motor_nodes,
- target_to_subsystem,
-)
-from opentrons_hardware.firmware_bindings.constants import (
- NodeId,
- SensorId,
- FirmwareTarget,
-)
-from opentrons_hardware.hardware_control.motion_planning import (
- Move,
- Coordinates,
-)
-from opentrons.hardware_control.estop_state import EstopStateMachine
-from opentrons_hardware.drivers.eeprom import EEPROMData
from opentrons.hardware_control.module_control import AttachedModulesControl
from opentrons.hardware_control import modules
from opentrons.hardware_control.types import (
BoardRevision,
Axis,
+ HepaFanState,
+ HepaUVState,
OT3Mount,
OT3AxisMap,
CurrentConfig,
@@ -60,9 +38,14 @@
SubSystemState,
TipStateType,
GripperJawState,
+ HardwareFeatureFlags,
+ StatusBarState,
+ EstopOverallStatus,
+ EstopState,
+ EstopPhysicalStatus,
+ HardwareEventHandler,
+ HardwareEventUnsubscriber,
)
-from opentrons_hardware.hardware_control.motion import MoveStopCondition
-from opentrons_hardware.hardware_control import status_bar
from opentrons_shared_data.pipette.dev_types import PipetteName, PipetteModel
from opentrons_shared_data.pipette import (
@@ -71,7 +54,6 @@
)
from opentrons_shared_data.gripper.gripper_definition import GripperModel
from opentrons.hardware_control.dev_types import (
- InstrumentHardwareConfigs,
PipetteSpec,
GripperSpec,
AttachedPipette,
@@ -79,25 +61,54 @@
OT3AttachedInstruments,
)
from opentrons.util.async_helpers import ensure_yield
+from .types import HWStopCondition
+from .flex_protocol import FlexBackend
+
log = logging.getLogger(__name__)
+AXIS_TO_SUBSYSTEM = {
+ Axis.X: SubSystem.gantry_x,
+ Axis.Y: SubSystem.gantry_y,
+ Axis.Z_L: SubSystem.head,
+ Axis.Z_R: SubSystem.head,
+ Axis.Z_G: SubSystem.gripper,
+ Axis.G: SubSystem.gripper,
+ Axis.P_L: SubSystem.pipette_left,
+ Axis.P_R: SubSystem.pipette_right,
+}
+
+
+def coalesce_move_segments(
+ origin: Dict[Axis, float], targets: List[Dict[Axis, float]]
+) -> Dict[Axis, float]:
+ for target in targets:
+ for axis, increment in target.items():
+ origin[axis] += increment
+ return origin
-class OT3Simulator:
+
+def axis_pad(positions: Dict[Axis, float], default_value: float) -> Dict[Axis, float]:
+ return {ax: positions.get(ax, default_value) for ax in Axis.node_axes()}
+
+
+class OT3Simulator(FlexBackend):
"""OT3 Hardware Controller Backend."""
- _position: Dict[NodeId, float]
- _encoder_position: Dict[NodeId, float]
- _motor_status: Dict[NodeId, MotorStatus]
+ _position: Dict[Axis, float]
+ _encoder_position: Dict[Axis, float]
+ _motor_status: Dict[Axis, MotorStatus]
+ _engaged_axes: Dict[Axis, bool]
@classmethod
async def build(
cls,
attached_instruments: Dict[OT3Mount, Dict[str, Optional[str]]],
- attached_modules: List[str],
+ attached_modules: Dict[str, List[str]],
config: OT3Config,
loop: asyncio.AbstractEventLoop,
strict_attached_instruments: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> OT3Simulator:
"""Create the OT3Simulator instance.
@@ -113,15 +124,17 @@ async def build(
config,
loop,
strict_attached_instruments,
+ feature_flags,
)
def __init__(
self,
attached_instruments: Dict[OT3Mount, Dict[str, Optional[str]]],
- attached_modules: List[str],
+ attached_modules: Dict[str, List[str]],
config: OT3Config,
loop: asyncio.AbstractEventLoop,
strict_attached_instruments: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> None:
"""Construct.
@@ -136,8 +149,9 @@ def __init__(
self._update_required = False
self._initialized = False
self._lights = {"button": False, "rails": False}
- self._estop_state_machine = EstopStateMachine(detector=None)
- self._gear_motor_position: Dict[NodeId, float] = {}
+ self._gear_motor_position: Dict[Axis, float] = {}
+ self._engaged_axes: Dict[Axis, bool] = {}
+ self._feature_flags = feature_flags or HardwareFeatureFlags()
def _sanitize_attached_instrument(
mount: OT3Mount, passed_ai: Optional[Dict[str, Optional[str]]] = None
@@ -177,26 +191,53 @@ def _sanitize_attached_instrument(
self._position = self._get_home_position()
self._encoder_position = self._get_home_position()
self._motor_status = {}
- nodes = set((NodeId.head_l, NodeId.head_r, NodeId.gantry_x, NodeId.gantry_y))
+ axes = set((Axis.Z_L, Axis.Z_R, Axis.X, Axis.Y))
if self._attached_instruments[OT3Mount.LEFT].get("model", None):
- nodes.add(NodeId.pipette_left)
+ axes.add(Axis.P_L)
if self._attached_instruments[OT3Mount.RIGHT].get("model", None):
- nodes.add(NodeId.pipette_right)
+ axes.add(Axis.P_L)
if self._attached_instruments.get(
OT3Mount.GRIPPER
) and self._attached_instruments[OT3Mount.GRIPPER].get("model", None):
- nodes.add(NodeId.gripper)
- self._present_nodes = nodes
+ axes.update((Axis.G, Axis.Z_G))
+ self._present_axes = axes
self._current_settings: Optional[OT3AxisMap[CurrentConfig]] = None
self._sim_jaw_state = GripperJawState.HOMED_READY
self._sim_tip_state: Dict[OT3Mount, Optional[bool]] = {
mount: False if self._attached_instruments[mount] else None
for mount in [OT3Mount.LEFT, OT3Mount.RIGHT]
}
+ self._sim_gantry_load = GantryLoad.LOW_THROUGHPUT
+ self._sim_status_bar_state = StatusBarState.IDLE
+ self._sim_estop_state = EstopState.DISENGAGED
+ self._sim_estop_left_state = EstopPhysicalStatus.DISENGAGED
+ self._sim_estop_right_state = EstopPhysicalStatus.DISENGAGED
async def get_serial_number(self) -> Optional[str]:
return "simulator"
+ @asynccontextmanager
+ async def restore_system_constraints(self) -> AsyncIterator[None]:
+ log.debug("Simulating saving system constraints")
+ try:
+ yield
+ finally:
+ log.debug("Simulating restoring system constraints")
+
+ def update_constraints_for_gantry_load(self, gantry_load: GantryLoad) -> None:
+ self._sim_gantry_load = gantry_load
+
+ def update_constraints_for_calibration_with_gantry_load(
+ self,
+ gantry_load: GantryLoad,
+ ) -> None:
+ self._sim_gantry_load = gantry_load
+
+ def update_constraints_for_plunger_acceleration(
+ self, mount: OT3Mount, acceleration: float, gantry_load: GantryLoad
+ ) -> None:
+ self._sim_gantry_load = gantry_load
+
@property
def initialized(self) -> bool:
"""True when the hardware controller has initialized and is ready."""
@@ -207,12 +248,8 @@ def initialized(self, value: bool) -> None:
self._initialized = value
@property
- def eeprom_data(self) -> EEPROMData:
- return EEPROMData()
-
- @property
- def gear_motor_position(self) -> Dict[NodeId, float]:
- return self._gear_motor_position
+ def gear_motor_position(self) -> Optional[float]:
+ return self._gear_motor_position.get(Axis.Q, None)
@property
def board_revision(self) -> BoardRevision:
@@ -233,11 +270,13 @@ def module_controls(self, module_controls: AttachedModulesControl) -> None:
@ensure_yield
async def update_to_default_current_settings(self, gantry_load: GantryLoad) -> None:
- self._current_settings = get_current_settings(
- self._configuration.current_settings, gantry_load
- )
+ self._gantry_load = gantry_load
- def _handle_motor_status_update(self, response: Dict[NodeId, float]) -> None:
+ def update_feature_flags(self, feature_flags: HardwareFeatureFlags) -> None:
+ """Update the hardware feature flags used by the hardware controller."""
+ self._feature_flags = feature_flags
+
+ def _handle_motor_status_update(self, response: Dict[Axis, float]) -> None:
self._position.update(response)
self._encoder_position.update(response)
self._motor_status.update(
@@ -246,27 +285,27 @@ def _handle_motor_status_update(self, response: Dict[NodeId, float]) -> None:
@ensure_yield
async def update_motor_status(self) -> None:
- """Retreieve motor and encoder status and position from all present nodes"""
+ """Retreieve motor and encoder status and position from all present devices"""
if not self._motor_status:
# Simulate condition at boot, status would not be ok
self._motor_status.update(
- (node, MotorStatus(False, False)) for node in self._present_nodes
+ (axis, MotorStatus(False, False)) for axis in self._present_axes
)
else:
self._motor_status.update(
- (node, MotorStatus(True, True)) for node in self._present_nodes
+ (axis, MotorStatus(True, True)) for axis in self._present_axes
)
@ensure_yield
async def update_motor_estimation(self, axes: Sequence[Axis]) -> None:
- """Update motor position estimation for commanded nodes, and update cache of data."""
+ """Update motor position estimation for commanded axes, and update cache of data."""
# Simulate conditions as if there are no stalls, aka do nothing
return None
def _get_motor_status(
self, axes: Sequence[Axis]
) -> Dict[Axis, Optional[MotorStatus]]:
- return {ax: self._motor_status.get(axis_to_node(ax)) for ax in axes}
+ return {ax: self._motor_status.get(ax) for ax in axes}
def get_invalid_motor_axes(self, axes: Sequence[Axis]) -> List[Axis]:
"""Get axes that currently do not have the motor-ok flag."""
@@ -292,17 +331,11 @@ def check_encoder_status(self, axes: Sequence[Axis]) -> bool:
async def update_position(self) -> OT3AxisMap[float]:
"""Get the current position."""
- return axis_convert(self._position, 0.0)
+ return axis_pad(self._position, 0.0)
async def update_encoder_position(self) -> OT3AxisMap[float]:
"""Get the encoder current position."""
- return axis_convert(self._encoder_position, 0.0)
-
- @asynccontextmanager
- async def monitor_overpressure(
- self, mount: OT3Mount, sensor_id: SensorId = SensorId.S0
- ) -> AsyncIterator[None]:
- yield
+ return axis_pad(self._encoder_position, 0.0)
@ensure_yield
async def liquid_probe(
@@ -312,25 +345,27 @@ async def liquid_probe(
mount_speed: float,
plunger_speed: float,
threshold_pascals: float,
- log_pressure: bool = True,
+ output_format: OutputOptions = OutputOptions.can_bus_only,
+ data_files: Optional[Dict[InstrumentProbeType, str]] = None,
auto_zero_sensor: bool = True,
num_baseline_reads: int = 10,
probe: InstrumentProbeType = InstrumentProbeType.PRIMARY,
- ) -> Dict[NodeId, float]:
-
- head_node = axis_to_node(Axis.by_mount(mount))
+ ) -> float:
+ z_axis = Axis.by_mount(mount)
pos = self._position
- pos[head_node] += max_z_distance
+ pos[z_axis] += max_z_distance
self._position.update(pos)
self._encoder_position.update(pos)
- return self._position
+ return self._position[z_axis]
@ensure_yield
async def move(
self,
- origin: Coordinates[Axis, float],
- moves: List[Move[Axis]],
- stop_condition: MoveStopCondition = MoveStopCondition.none,
+ origin: Dict[Axis, float],
+ target: Dict[Axis, float],
+ speed: Optional[float] = None,
+ stop_condition: HWStopCondition = HWStopCondition.none,
+ nodes_in_moves_only: bool = True,
) -> None:
"""Move to a position.
@@ -343,9 +378,10 @@ async def move(
Returns:
None
"""
- _, final_positions = create_move_group(origin, moves, self._present_nodes)
- self._position.update(final_positions)
- self._encoder_position.update(final_positions)
+ for ax in origin:
+ self._engaged_axes[ax] = True
+ self._position.update(target)
+ self._encoder_position.update(target)
@ensure_yield
async def home(
@@ -360,30 +396,32 @@ async def home(
Homed position.
"""
if axes:
- homed = [axis_to_node(a) for a in axes]
+ homed = axes
else:
- homed = list(self._position.keys())
+ homed = list(iter(self._position.keys()))
for h in homed:
self._position[h] = self._get_home_position()[h]
self._motor_status[h] = MotorStatus(True, True)
- return axis_convert(self._position, 0.0)
+ self._engaged_axes[h] = True
+ return axis_pad(self._position, 0.0)
@ensure_yield
async def gripper_grip_jaw(
self,
duty_cycle: float,
- stop_condition: MoveStopCondition = MoveStopCondition.none,
+ expected_displacement: float,
+ stop_condition: HWStopCondition = HWStopCondition.none,
stay_engaged: bool = True,
) -> None:
"""Move gripper inward."""
- _ = create_gripper_jaw_grip_group(duty_cycle, stop_condition, stay_engaged)
self._sim_jaw_state = GripperJawState.GRIPPING
+ self._encoder_position[Axis.G] = expected_displacement
@ensure_yield
async def gripper_home_jaw(self, duty_cycle: float) -> None:
"""Move gripper outward."""
- _ = create_gripper_jaw_home_group(duty_cycle)
- self._motor_status[NodeId.gripper_g] = MotorStatus(True, True)
+ self._motor_status[Axis.G] = MotorStatus(True, True)
+ self._encoder_position[Axis.G] = self._get_home_position()[Axis.G]
self._sim_jaw_state = GripperJawState.HOMED_READY
@ensure_yield
@@ -391,8 +429,7 @@ async def gripper_hold_jaw(
self,
encoder_position_um: int,
) -> None:
- _ = create_gripper_jaw_hold_group(encoder_position_um)
- self._encoder_position[NodeId.gripper_g] = encoder_position_um / 1000.0
+ self._encoder_position[Axis.G] = encoder_position_um / 1000.0
self._sim_jaw_state = GripperJawState.HOLDING
async def get_jaw_state(self) -> GripperJawState:
@@ -400,10 +437,12 @@ async def get_jaw_state(self) -> GripperJawState:
return self._sim_jaw_state
async def tip_action(
- self,
- moves: List[Move[Axis]],
+ self, origin: Dict[Axis, float], targets: List[Tuple[Dict[Axis, float], float]]
) -> None:
- pass
+ self._gear_motor_position.update(
+ coalesce_move_segments(origin, [target[0] for target in targets])
+ )
+ await asyncio.sleep(0)
async def home_tip_motors(
self,
@@ -416,7 +455,7 @@ async def home_tip_motors(
def _attached_to_mount(
self, mount: OT3Mount, expected_instr: Optional[PipetteName]
) -> OT3AttachedInstruments:
- init_instr = self._attached_instruments.get(mount, {"model": None, "id": None}) # type: ignore
+ init_instr = self._attached_instruments.get(mount, {"model": None, "id": None})
if mount is OT3Mount.GRIPPER:
return self._attached_gripper_to_mount(cast(GripperSpec, init_instr))
return self._attached_pipette_to_mount(
@@ -467,13 +506,20 @@ def _attached_pipette_to_mount(
),
"id": None,
}
- if found_model and expected_instr or found_model:
+ if found_model and init_instr["id"] is not None:
# Instrument detected matches instrument expected (note:
# "instrument detected" means passed as an argument to the
# constructor of this class)
# OR Instrument detected and no expected instrument specified
- converted_name = pipette_load_name.convert_pipette_model(found_model)
+
+ found_model_version = ""
+ if found_model.find("flex") > -1:
+ found_model = found_model.replace("_flex", "") # type: ignore
+ found_model_version = f"{init_instr['id'][4]}.{init_instr['id'][5]}"
+ converted_name = pipette_load_name.convert_pipette_model(
+ found_model, found_model_version
+ )
return {
"config": load_pipette_data.load_definition(
converted_name.pipette_type,
@@ -534,8 +580,8 @@ async def set_active_current(self, axis_currents: OT3AxisMap[float]) -> None:
@asynccontextmanager
async def motor_current(
self,
- run_currents: OT3AxisMap[float] = {},
- hold_currents: OT3AxisMap[float] = {},
+ run_currents: Optional[OT3AxisMap[float]] = None,
+ hold_currents: Optional[OT3AxisMap[float]] = None,
) -> AsyncIterator[None]:
"""Save the current."""
yield
@@ -548,12 +594,26 @@ async def restore_z_r_run_current(self) -> AsyncIterator[None]:
"""
yield
+ @asynccontextmanager
+ async def increase_z_l_hold_current(self) -> AsyncIterator[None]:
+ """
+ Temporarily increase the hold current when engaging the Z_L axis
+ while the 96-channel is attached
+ """
+ yield
+
@ensure_yield
async def watch(self, loop: asyncio.AbstractEventLoop) -> None:
- new_mods_at_ports = [
- modules.ModuleAtPort(port=f"/dev/ot_module_sim_{mod}{str(idx)}", name=mod)
- for idx, mod in enumerate(self._stubbed_attached_modules)
- ]
+ new_mods_at_ports = []
+ for mod, serials in self._stubbed_attached_modules.items():
+ for serial in serials:
+ new_mods_at_ports.append(
+ modules.SimulatingModuleAtPort(
+ port=f"/dev/ot_module_sim_{mod}{str(serial)}",
+ name=mod,
+ serial_number=serial,
+ )
+ )
await self.module_controls.register_modules(new_mods_at_ports=new_mods_at_ports)
@property
@@ -575,18 +635,10 @@ def axis_bounds(self) -> OT3AxisMap[Tuple[float, float]]:
@property
def fw_version(self) -> Dict[SubSystem, int]:
"""Get the firmware version."""
- return {
- NODEID_SUBSYSTEM[node.application_for()]: 0 for node in self._present_nodes
- }
+ return {AXIS_TO_SUBSYSTEM[axis]: 0 for axis in self._present_axes}
def axis_is_present(self, axis: Axis) -> bool:
- try:
- return axis_to_node(axis) in motor_nodes(
- cast(Set[FirmwareTarget], self._present_nodes)
- )
- except KeyError:
- # Currently unhandled axis
- return False
+ return axis in self._present_axes
@property
def update_required(self) -> bool:
@@ -611,16 +663,29 @@ async def update_firmware(
def engaged_axes(self) -> OT3AxisMap[bool]:
"""Get engaged axes."""
- return {}
+ return self._engaged_axes
+
+ async def update_engaged_axes(self) -> None:
+ """Update engaged axes."""
+ return None
+
+ async def is_motor_engaged(self, axis: Axis) -> bool:
+ if axis not in self._engaged_axes.keys():
+ return False
+ return self._engaged_axes[axis]
@ensure_yield
async def disengage_axes(self, axes: List[Axis]) -> None:
"""Disengage axes."""
+ for ax in axes:
+ self._engaged_axes.update({ax: False})
return None
@ensure_yield
async def engage_axes(self, axes: List[Axis]) -> None:
"""Engage axes."""
+ for ax in axes:
+ self._engaged_axes.update({ax: True})
return None
@ensure_yield
@@ -658,31 +723,22 @@ async def clean_up(self) -> None:
"""Clean up."""
pass
- @ensure_yield
- async def configure_mount(
- self, mount: OT3Mount, config: InstrumentHardwareConfigs
- ) -> None:
- """Configure a mount."""
- return None
-
@staticmethod
- def _get_home_position() -> Dict[NodeId, float]:
+ def _get_home_position() -> Dict[Axis, float]:
return {
- NodeId.head_l: 0,
- NodeId.head_r: 0,
- NodeId.gantry_x: 0,
- NodeId.gantry_y: 0,
- NodeId.pipette_left: 0,
- NodeId.pipette_right: 0,
- NodeId.gripper_z: 0,
- NodeId.gripper_g: 0,
+ Axis.Z_L: 0,
+ Axis.Z_R: 0,
+ Axis.X: 0,
+ Axis.Y: 0,
+ Axis.P_L: 0,
+ Axis.P_R: 0,
+ Axis.Z_G: 0,
+ Axis.G: 0,
}
@staticmethod
def home_position() -> OT3AxisMap[float]:
- return {
- node_to_axis(k): v for k, v in OT3Simulator._get_home_position().items()
- }
+ return OT3Simulator._get_home_position()
@ensure_yield
async def capacitive_probe(
@@ -694,7 +750,7 @@ async def capacitive_probe(
sensor_threshold_pf: float,
probe: InstrumentProbeType,
) -> bool:
- self._position[axis_to_node(moving)] += distance_mm
+ self._position[moving] += distance_mm
return True
@ensure_yield
@@ -706,21 +762,13 @@ async def capacitive_pass(
speed_mm_per_s: float,
probe: InstrumentProbeType,
) -> List[float]:
- self._position[axis_to_node(moving)] += distance_mm
+ self._position[moving] += distance_mm
return []
- @ensure_yield
- async def connect_usb_to_rear_panel(self) -> None:
- """Connect to rear panel over usb."""
- return None
-
- def status_bar_interface(self) -> status_bar.StatusBar:
- return status_bar.StatusBar(None)
-
@property
def subsystems(self) -> Dict[SubSystem, SubSystemState]:
return {
- target_to_subsystem(target): SubSystemState(
+ AXIS_TO_SUBSYSTEM[axis]: SubSystemState(
ok=True,
current_fw_version=1,
next_fw_version=1,
@@ -729,15 +777,14 @@ def subsystems(self) -> Dict[SubSystem, SubSystemState]:
pcba_revision="A1",
update_state=None,
)
- for target in self._present_nodes
+ for axis in self._present_axes
}
- @property
- def estop_state_machine(self) -> EstopStateMachine:
- """Return an estop state machine locked in the "disengaged" state."""
- return self._estop_state_machine
-
- async def get_tip_status(self, mount: OT3Mount) -> TipStateType:
+ async def get_tip_status(
+ self,
+ mount: OT3Mount,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> TipStateType:
return TipStateType(self._sim_tip_state[mount])
def current_tip_state(self, mount: OT3Mount) -> Optional[bool]:
@@ -748,3 +795,67 @@ async def update_tip_detector(self, mount: OT3Mount, sensor_count: int) -> None:
async def teardown_tip_detector(self, mount: OT3Mount) -> None:
pass
+
+ async def set_status_bar_state(self, state: StatusBarState) -> None:
+ self._sim_status_bar_state = state
+ await asyncio.sleep(0)
+
+ async def set_status_bar_enabled(self, enabled: bool) -> None:
+ await asyncio.sleep(0)
+
+ def get_status_bar_state(self) -> StatusBarState:
+ return self._sim_status_bar_state
+
+ @property
+ def estop_status(self) -> EstopOverallStatus:
+ return EstopOverallStatus(
+ state=self._sim_estop_state,
+ left_physical_state=self._sim_estop_left_state,
+ right_physical_state=self._sim_estop_right_state,
+ )
+
+ def estop_acknowledge_and_clear(self) -> EstopOverallStatus:
+ """Attempt to acknowledge an Estop event and clear the status.
+
+ Returns the estop status after clearing the status."""
+ self._sim_estop_state = EstopState.DISENGAGED
+ self._sim_estop_left_state = EstopPhysicalStatus.DISENGAGED
+ self._sim_estop_right_state = EstopPhysicalStatus.DISENGAGED
+ return self.estop_status
+
+ def get_estop_state(self) -> EstopState:
+ return self._sim_estop_state
+
+ def add_estop_callback(self, cb: HardwareEventHandler) -> HardwareEventUnsubscriber:
+ return lambda: None
+
+ def check_gripper_position_within_bounds(
+ self,
+ expected_grip_width: float,
+ grip_width_uncertainty_wider: float,
+ grip_width_uncertainty_narrower: float,
+ jaw_width: float,
+ max_allowed_grip_error: float,
+ hard_limit_lower: float,
+ hard_limit_upper: float,
+ ) -> None:
+ # This is a (pretty bad) simulation of the gripper actually gripping something,
+ # but it should work.
+ self._encoder_position[Axis.G] = (hard_limit_upper - jaw_width) / 2
+
+ async def set_hepa_fan_state(self, fan_on: bool, duty_cycle: int) -> bool:
+ return False
+
+ async def get_hepa_fan_state(self) -> Optional[HepaFanState]:
+ return None
+
+ async def set_hepa_uv_state(self, light_on: bool, timeout_s: int) -> bool:
+ return False
+
+ async def get_hepa_uv_state(self) -> Optional[HepaUVState]:
+ return None
+
+ def _update_tip_state(self, mount: OT3Mount, status: bool) -> None:
+ """This is something we only use in the simulator.
+ It is required so that PE simulations using ot3api don't break."""
+ self._sim_tip_state[mount] = status
diff --git a/api/src/opentrons/hardware_control/backends/ot3utils.py b/api/src/opentrons/hardware_control/backends/ot3utils.py
index 2b1d50f5ade..a9108c2365e 100644
--- a/api/src/opentrons/hardware_control/backends/ot3utils.py
+++ b/api/src/opentrons/hardware_control/backends/ot3utils.py
@@ -80,6 +80,7 @@
SubSystem.pipette_left: NodeId.pipette_left,
SubSystem.pipette_right: NodeId.pipette_right,
SubSystem.gripper: NodeId.gripper,
+ SubSystem.hepa_uv: NodeId.hepa_uv,
}
NODEID_SUBSYSTEM = {node: subsystem for subsystem, node in SUBSYSTEM_NODEID.items()}
@@ -105,16 +106,7 @@ def axis_nodes() -> List["NodeId"]:
def node_axes() -> List[Axis]:
- return [
- Axis.X,
- Axis.Y,
- Axis.Z_L,
- Axis.Z_R,
- Axis.P_L,
- Axis.P_R,
- Axis.Z_G,
- Axis.G,
- ]
+ return Axis.node_axes()
def home_axes() -> List[Axis]:
@@ -359,8 +351,13 @@ def motor_nodes(devices: Set[FirmwareTarget]) -> Set[NodeId]:
NodeId.head_bootloader,
NodeId.gripper_bootloader,
}
+ hepa_uv_nodes = {
+ NodeId.hepa_uv,
+ NodeId.hepa_uv_bootloader,
+ }
# remove any bootloader nodes
motor_nodes -= bootloader_nodes
+ motor_nodes -= hepa_uv_nodes
# filter out usb nodes
return {NodeId(target) for target in motor_nodes if target in NodeId}
@@ -547,6 +544,7 @@ def sensor_node_for_pipette(mount: OT3Mount) -> PipetteProbeTarget:
_instr_sensor_id_lookup: Dict[InstrumentProbeType, SensorId] = {
InstrumentProbeType.PRIMARY: SensorId.S0,
InstrumentProbeType.SECONDARY: SensorId.S1,
+ InstrumentProbeType.BOTH: SensorId.BOTH,
}
diff --git a/api/src/opentrons/hardware_control/backends/simulator.py b/api/src/opentrons/hardware_control/backends/simulator.py
index d8bca2db353..4066afa4bb5 100644
--- a/api/src/opentrons/hardware_control/backends/simulator.py
+++ b/api/src/opentrons/hardware_control/backends/simulator.py
@@ -49,7 +49,7 @@ class Simulator:
async def build(
cls,
attached_instruments: Dict[types.Mount, Dict[str, Optional[str]]],
- attached_modules: List[str],
+ attached_modules: Dict[str, List[str]],
config: RobotConfig,
loop: asyncio.AbstractEventLoop,
strict_attached_instruments: bool = True,
@@ -105,7 +105,7 @@ async def build(
def __init__(
self,
attached_instruments: Dict[types.Mount, Dict[str, Optional[str]]],
- attached_modules: List[str],
+ attached_modules: Dict[str, List[str]],
config: RobotConfig,
loop: asyncio.AbstractEventLoop,
gpio_chardev: GPIODriverLike,
@@ -332,10 +332,16 @@ def set_active_current(self, axis_currents: Dict[Axis, float]) -> None:
@ensure_yield
async def watch(self) -> None:
- new_mods_at_ports = [
- modules.ModuleAtPort(port=f"/dev/ot_module_sim_{mod}{str(idx)}", name=mod)
- for idx, mod in enumerate(self._stubbed_attached_modules)
- ]
+ new_mods_at_ports = []
+ for mod, serials in self._stubbed_attached_modules.items():
+ for serial in serials:
+ new_mods_at_ports.append(
+ modules.SimulatingModuleAtPort(
+ port=f"/dev/ot_module_sim_{mod}{str(serial)}",
+ name=mod,
+ serial_number=serial,
+ )
+ )
await self.module_controls.register_modules(new_mods_at_ports=new_mods_at_ports)
@contextmanager
diff --git a/api/src/opentrons/hardware_control/status_bar_state.py b/api/src/opentrons/hardware_control/backends/status_bar_state.py
similarity index 99%
rename from api/src/opentrons/hardware_control/status_bar_state.py
rename to api/src/opentrons/hardware_control/backends/status_bar_state.py
index b38a709be86..616fec2ff3a 100644
--- a/api/src/opentrons/hardware_control/status_bar_state.py
+++ b/api/src/opentrons/hardware_control/backends/status_bar_state.py
@@ -1,4 +1,4 @@
-from .types import StatusBarState
+from opentrons.hardware_control.types import StatusBarState
from opentrons_hardware.hardware_control import status_bar
from opentrons_hardware.firmware_bindings.binary_constants import (
LightAnimationType,
diff --git a/api/src/opentrons/hardware_control/backends/tip_presence_manager.py b/api/src/opentrons/hardware_control/backends/tip_presence_manager.py
index efe2a863c7b..f2401d23f69 100644
--- a/api/src/opentrons/hardware_control/backends/tip_presence_manager.py
+++ b/api/src/opentrons/hardware_control/backends/tip_presence_manager.py
@@ -3,7 +3,7 @@
from typing import cast, Callable, Optional, List, Set
from typing_extensions import TypedDict, Literal
-from opentrons.hardware_control.types import TipStateType, OT3Mount
+from opentrons.hardware_control.types import TipStateType, OT3Mount, InstrumentProbeType
from opentrons_hardware.drivers.can_bus import CanMessenger
from opentrons_hardware.firmware_bindings.constants import NodeId
@@ -14,8 +14,11 @@
from opentrons_shared_data.errors.exceptions import (
TipDetectorNotFound,
UnmatchedTipPresenceStates,
+ GeneralError,
)
+from .ot3utils import sensor_id_for_instrument
+
log = logging.getLogger(__name__)
TipListener = Callable[[OT3Mount, bool], None]
@@ -108,12 +111,27 @@ def _handle_tip_update(
def current_tip_state(self, mount: OT3Mount) -> Optional[bool]:
state = self._last_state[self._get_key(mount)]
- if state is None:
- log.warning(f"Tip state for {mount} is unknown")
return state
@staticmethod
- def _get_tip_presence(results: List[tip_types.TipNotification]) -> TipStateType:
+ def _get_tip_presence(
+ results: List[tip_types.TipNotification],
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> TipStateType:
+ """
+ We can use follow_singular_sensor used to specify that we only care
+ about the status of one tip presence sensor on a high throughput
+ pipette, and the other is allowed to be different.
+ """
+ if follow_singular_sensor:
+ target_sensor_id = sensor_id_for_instrument(follow_singular_sensor)
+ for r in results:
+ if r.sensor == target_sensor_id:
+ return TipStateType(r.presence)
+ # raise an error if requested sensor response isn't found
+ raise GeneralError(
+ message=f"Requested status for sensor {follow_singular_sensor} not found."
+ )
# more than one sensor reported, we have to check if their states match
if len(set(r.presence for r in results)) > 1:
raise UnmatchedTipPresenceStates(
@@ -121,9 +139,15 @@ def _get_tip_presence(results: List[tip_types.TipNotification]) -> TipStateType:
)
return TipStateType(results[0].presence)
- async def get_tip_status(self, mount: OT3Mount) -> TipStateType:
+ async def get_tip_status(
+ self,
+ mount: OT3Mount,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> TipStateType:
detector = self.get_detector(mount)
- return self._get_tip_presence(await detector.request_tip_status())
+ return self._get_tip_presence(
+ await detector.request_tip_status(), follow_singular_sensor
+ )
def get_detector(self, mount: OT3Mount) -> TipDetector:
detector = self._detectors[self._get_key(mount)]
diff --git a/api/src/opentrons/hardware_control/backends/types.py b/api/src/opentrons/hardware_control/backends/types.py
new file mode 100644
index 00000000000..e29001abee9
--- /dev/null
+++ b/api/src/opentrons/hardware_control/backends/types.py
@@ -0,0 +1,14 @@
+"""backends.types - wrapper types for api/backend interaction"""
+
+from enum import Enum, auto
+
+
+class HWStopCondition(Enum):
+ none = auto()
+ limit_switch = auto()
+ sync_line = auto()
+ encoder_position = auto()
+ gripper_force = auto()
+ stall = auto()
+ ignore_stalls = auto()
+ limit_switch_backoff = auto()
diff --git a/api/src/opentrons/hardware_control/dev_types.py b/api/src/opentrons/hardware_control/dev_types.py
index 0c4e5ae4ef7..e6122bf86aa 100644
--- a/api/src/opentrons/hardware_control/dev_types.py
+++ b/api/src/opentrons/hardware_control/dev_types.py
@@ -19,6 +19,7 @@
from opentrons_shared_data.pipette.pipette_definition import (
PipetteConfigurations,
SupportedTipsDefinition,
+ PipetteBoundingBoxOffsetDefinition,
)
from opentrons_shared_data.gripper import (
GripperModel,
@@ -83,6 +84,7 @@ class PipetteDict(InstrumentDict):
tip_length: float
working_volume: float
tip_overlap: Dict[str, float]
+ versioned_tip_overlap: Dict[str, Dict[str, float]]
available_volume: float
return_tip_height: float
default_aspirate_flow_rates: Dict[str, float]
@@ -95,7 +97,8 @@ class PipetteDict(InstrumentDict):
has_tip: bool
default_push_out_volume: Optional[float]
supported_tips: Dict[PipetteTipType, SupportedTipsDefinition]
- current_nozzle_map: Optional[NozzleMap]
+ pipette_bounding_box_offsets: PipetteBoundingBoxOffsetDefinition
+ current_nozzle_map: NozzleMap
class PipetteStateDict(TypedDict):
diff --git a/api/src/opentrons/hardware_control/execution_manager.py b/api/src/opentrons/hardware_control/execution_manager.py
index 5ad0f45912c..7e4f570933f 100644
--- a/api/src/opentrons/hardware_control/execution_manager.py
+++ b/api/src/opentrons/hardware_control/execution_manager.py
@@ -1,6 +1,16 @@
import asyncio
import functools
-from typing import Set, TypeVar, Type, cast, Callable, Any, Awaitable, overload
+from typing import (
+ Set,
+ TypeVar,
+ Type,
+ cast,
+ Callable,
+ Any,
+ Coroutine,
+ ParamSpec,
+ Concatenate,
+)
from .types import ExecutionState
from opentrons_shared_data.errors.exceptions import ExecutionCancelledError
@@ -64,7 +74,9 @@ async def wait_for_is_running(self) -> None:
async with self._condition:
if self._state == ExecutionState.PAUSED:
await self._condition.wait()
- if self._state == ExecutionState.CANCELLED:
+ # type-ignore needed because this is a reentrant function and narrowing cannot
+ # apply
+ if self._state == ExecutionState.CANCELLED: # type: ignore[comparison-overlap]
raise ExecutionCancelledError
elif self._state == ExecutionState.CANCELLED:
raise ExecutionCancelledError
@@ -72,14 +84,9 @@ async def wait_for_is_running(self) -> None:
pass
-DecoratedReturn = TypeVar("DecoratedReturn")
-DecoratedMethodReturningValue = TypeVar(
- "DecoratedMethodReturningValue", bound=Callable[..., Awaitable[DecoratedReturn]]
-)
-DecoratedMethodNoReturn = TypeVar(
- "DecoratedMethodNoReturn", bound=Callable[..., Awaitable[None]]
-)
SubclassInstance = TypeVar("SubclassInstance", bound="ExecutionManagerProvider")
+DecoratedMethodParams = ParamSpec("DecoratedMethodParams")
+DecoratedReturn = TypeVar("DecoratedReturn")
class ExecutionManagerProvider:
@@ -106,31 +113,22 @@ def taskify_movement_execution(self, cancellable: bool) -> None:
def execution_manager(self) -> ExecutionManager:
return self._execution_manager
- @overload
@classmethod
def wait_for_running(
- cls: Type[SubclassInstance], decorated: DecoratedMethodReturningValue
- ) -> DecoratedMethodReturningValue:
- ...
-
- @overload
- @classmethod
- def wait_for_running(
- cls: Type[SubclassInstance], decorated: DecoratedMethodNoReturn
- ) -> DecoratedMethodNoReturn:
- ...
-
- # this type ignore and the overloads are because mypy requires that a function
- # whose signature declares it returns None not have a return statement, whereas
- # this function's implementation relies on python having None actually be the
- # thing you return, and it's mad at that
- @classmethod # type: ignore
- def wait_for_running(
- cls: Type[SubclassInstance], decorated: DecoratedMethodReturningValue
- ) -> DecoratedMethodReturningValue:
+ cls: Type["ExecutionManagerProvider"],
+ decorated: Callable[
+ Concatenate[SubclassInstance, DecoratedMethodParams],
+ Coroutine[Any, Any, DecoratedReturn],
+ ],
+ ) -> Callable[
+ Concatenate[SubclassInstance, DecoratedMethodParams],
+ Coroutine[Any, Any, DecoratedReturn],
+ ]:
@functools.wraps(decorated)
async def replace(
- inst: SubclassInstance, *args: Any, **kwargs: Any
+ inst: SubclassInstance,
+ *args: DecoratedMethodParams.args,
+ **kwargs: DecoratedMethodParams.kwargs,
) -> DecoratedReturn:
if not inst._em_simulate:
await inst.execution_manager.wait_for_is_running()
@@ -147,7 +145,13 @@ async def replace(
else:
return await decorated(inst, *args, **kwargs)
- return cast(DecoratedMethodReturningValue, replace)
+ return cast(
+ Callable[
+ Concatenate[SubclassInstance, DecoratedMethodParams],
+ Coroutine[Any, Any, DecoratedReturn],
+ ],
+ replace,
+ )
async def do_delay(self, duration_s: float) -> None:
if not self._em_simulate:
diff --git a/api/src/opentrons/hardware_control/instruments/instrument_abc.py b/api/src/opentrons/hardware_control/instruments/instrument_abc.py
index 46b4810e22f..1411c1b2b1d 100644
--- a/api/src/opentrons/hardware_control/instruments/instrument_abc.py
+++ b/api/src/opentrons/hardware_control/instruments/instrument_abc.py
@@ -12,11 +12,13 @@ class AbstractInstrument(ABC, Generic[InstrumentConfig]):
"""Defines the common methods of an instrument."""
@property
+ @abstractmethod
def model(self) -> str:
"""Return model of the instrument."""
...
@property
+ @abstractmethod
def config(self) -> InstrumentConfig:
"""Instrument config in dataclass format."""
...
diff --git a/api/src/opentrons/hardware_control/instruments/ot2/instrument_calibration.py b/api/src/opentrons/hardware_control/instruments/ot2/instrument_calibration.py
index f296f390a38..f2f8a7fc426 100644
--- a/api/src/opentrons/hardware_control/instruments/ot2/instrument_calibration.py
+++ b/api/src/opentrons/hardware_control/instruments/ot2/instrument_calibration.py
@@ -17,6 +17,11 @@
LabwareDefinition as TypeDictLabwareDef,
)
+# These type aliases aid typechecking in tests that work the same on this and
+# the hardware_control.instruments.ot3 variant
+SourceType = types.SourceType
+CalibrationStatus = types.CalibrationStatus
+
@dataclass
class PipetteOffsetByPipetteMount:
@@ -25,8 +30,8 @@ class PipetteOffsetByPipetteMount:
"""
offset: Point
- source: types.SourceType
- status: types.CalibrationStatus
+ source: SourceType
+ status: CalibrationStatus
tiprack: typing.Optional[str] = None
uri: typing.Optional[str] = None
last_modified: typing.Optional[datetime] = None
@@ -44,15 +49,15 @@ class PipetteOffsetCalibration:
tiprack: str
uri: str
last_modified: datetime
- source: types.SourceType
- status: types.CalibrationStatus
+ source: SourceType
+ status: CalibrationStatus
@dataclass
class TipLengthCalibration:
tip_length: float
- source: types.SourceType
- status: types.CalibrationStatus
+ source: SourceType
+ status: CalibrationStatus
pipette: str
tiprack: str
last_modified: datetime
@@ -65,8 +70,8 @@ def load_pipette_offset(
# load default if pipette offset data do not exist
pip_cal_obj = PipetteOffsetByPipetteMount(
offset=Point(*default_pipette_offset()),
- source=types.SourceType.default,
- status=types.CalibrationStatus(),
+ source=SourceType.default,
+ status=CalibrationStatus(),
)
# TODO this can be removed once we switch to using
# ot3 pipette types in the ot3 hardware controller.
@@ -125,18 +130,15 @@ def load_tip_length_for_pipette(
pipette_id, tiprack
)
- # TODO (lc 09-26-2022) We shouldn't have to do a hash twice. We should figure out what
- # information we actually need from the labware definition and pass it into
- # the `load_tip_length_calibration` function.
- tiprack_hash = helpers.hash_labware_def(tiprack)
+ tiprack_uri = helpers.uri_from_definition(tiprack)
return TipLengthCalibration(
tip_length=tip_length_data.tipLength,
source=tip_length_data.source,
pipette=pipette_id,
- tiprack=tiprack_hash,
+ tiprack=tip_length_data.definitionHash,
last_modified=tip_length_data.lastModified,
- uri=tip_length_data.uri,
+ uri=tiprack_uri,
status=types.CalibrationStatus(
markedAt=tip_length_data.status.markedAt,
markedBad=tip_length_data.status.markedBad,
diff --git a/api/src/opentrons/hardware_control/instruments/ot2/pipette.py b/api/src/opentrons/hardware_control/instruments/ot2/pipette.py
index f5b9aa3fd29..be126f8513f 100644
--- a/api/src/opentrons/hardware_control/instruments/ot2/pipette.py
+++ b/api/src/opentrons/hardware_control/instruments/ot2/pipette.py
@@ -12,7 +12,8 @@
PlungerPositions,
MotorConfigurations,
SupportedTipsDefinition,
- TipHandlingConfigurations,
+ PickUpTipConfigurations,
+ DropTipConfigurations,
PipetteModelVersionType,
PipetteNameType,
PipetteLiquidPropertiesDefinition,
@@ -25,18 +26,19 @@
InvalidLiquidClassName,
CommandPreconditionViolated,
)
+from opentrons_shared_data.pipette.ul_per_mm import (
+ piecewise_volume_conversion,
+ PIPETTING_FUNCTION_FALLBACK_VERSION,
+ PIPETTING_FUNCTION_LATEST_VERSION,
+)
from opentrons.types import Point, Mount
-from opentrons.config import robot_configs, feature_flags as ff
+from opentrons.config import robot_configs
from opentrons.config.types import RobotConfig
from opentrons.drivers.types import MoveSplit
from ..instrument_abc import AbstractInstrument
-from ..instrument_helpers import (
- piecewise_volume_conversion,
- PIPETTING_FUNCTION_FALLBACK_VERSION,
- PIPETTING_FUNCTION_LATEST_VERSION,
-)
+
from .instrument_calibration import (
PipetteOffsetByPipetteMount,
load_pipette_offset,
@@ -74,7 +76,9 @@ class Pipette(AbstractInstrument[PipetteConfigurations]):
control API. Its only purpose is to gather state.
"""
- DictType = Dict[str, Union[str, float, bool]]
+ DictType = Dict[
+ str, Union[str, float, bool]
+ ] # spp: as_dict() has value items that aren't Union[str, float, bool]..
#: The type of this data class as a dict
def __init__(
@@ -82,6 +86,7 @@ def __init__(
config: PipetteConfigurations,
pipette_offset_cal: PipetteOffsetByPipetteMount,
pipette_id: Optional[str] = None,
+ use_old_aspiration_functions: bool = False,
) -> None:
self._config = config
self._config_as_dict = config.dict()
@@ -109,10 +114,7 @@ def __init__(
)
self._nozzle_offset = self._config.nozzle_offset
self._nozzle_manager = (
- nozzle_manager.NozzleConfigurationManager.build_from_nozzlemap(
- self._config.nozzle_map,
- self._config.partial_tip_configurations.per_tip_pickup_current,
- )
+ nozzle_manager.NozzleConfigurationManager.build_from_config(self._config)
)
self._current_volume = 0.0
self._working_volume = float(self._liquid_class.max_volume)
@@ -146,9 +148,9 @@ def __init__(
self._active_tip_settings.default_blowout_flowrate.default
)
- self._tip_overlap_lookup = self._liquid_class.tip_overlap_dictionary
+ self._tip_overlap_lookup = self._liquid_class.versioned_tip_overlap_dictionary
- if ff.use_old_aspiration_functions():
+ if use_old_aspiration_functions:
self._pipetting_function_version = PIPETTING_FUNCTION_FALLBACK_VERSION
else:
self._pipetting_function_version = PIPETTING_FUNCTION_LATEST_VERSION
@@ -214,7 +216,7 @@ def pipette_offset(self) -> PipetteOffsetByPipetteMount:
return self._pipette_offset
@property
- def tip_overlap(self) -> Dict[str, float]:
+ def tip_overlap(self) -> Dict[str, Dict[str, float]]:
return self._tip_overlap_lookup
@property
@@ -230,17 +232,15 @@ def plunger_motor_current(self) -> MotorConfigurations:
return self._config.plunger_motor_configurations
@property
- def pick_up_configurations(self) -> TipHandlingConfigurations:
+ def pick_up_configurations(self) -> PickUpTipConfigurations:
return self._config.pick_up_tip_configurations
@pick_up_configurations.setter
- def pick_up_configurations(
- self, pick_up_configs: TipHandlingConfigurations
- ) -> None:
+ def pick_up_configurations(self, pick_up_configs: PickUpTipConfigurations) -> None:
self._pick_up_configurations = pick_up_configs
@property
- def drop_configurations(self) -> TipHandlingConfigurations:
+ def drop_configurations(self) -> DropTipConfigurations:
return self._config.drop_tip_configurations
@property
@@ -290,12 +290,9 @@ def reset_state(self) -> None:
self.active_tip_settings.default_blowout_flowrate.default
)
- self._tip_overlap_lookup = self.liquid_class.tip_overlap_dictionary
+ self._tip_overlap_lookup = self.liquid_class.versioned_tip_overlap_dictionary
self._nozzle_manager = (
- nozzle_manager.NozzleConfigurationManager.build_from_nozzlemap(
- self._config.nozzle_map,
- self._config.partial_tip_configurations.per_tip_pickup_current,
- )
+ nozzle_manager.NozzleConfigurationManager.build_from_config(self._config)
)
def reset_pipette_offset(self, mount: Mount, to_default: bool) -> None:
@@ -516,7 +513,6 @@ def remove_tip(self) -> None:
Remove the tip from the pipette (effectively updates the pipette's
critical point)
"""
- assert self.has_tip
self._has_tip = False
self._current_tip_length = 0.0
@@ -575,7 +571,8 @@ def as_dict(self) -> "Pipette.DictType":
"default_dispense_flow_rates": self.dispense_flow_rates_lookup,
"tip_length": self.current_tip_length,
"return_tip_height": self.active_tip_settings.default_return_tip_height,
- "tip_overlap": self.tip_overlap,
+ "tip_overlap": self.tip_overlap["v0"],
+ "versioned_tip_overlap": self.tip_overlap,
"back_compat_names": self._config.pipette_backcompat_names,
"supported_tips": self.liquid_class.supported_tips,
}
@@ -598,6 +595,7 @@ def _reload_and_check_skip(
new_config: PipetteConfigurations,
attached_instr: Pipette,
pipette_offset: PipetteOffsetByPipetteMount,
+ use_old_aspiration_functions: bool,
) -> Tuple[Pipette, bool]:
# Once we have determined that the new and attached pipettes
# are similar enough that we might skip, see if the configs
@@ -618,7 +616,12 @@ def _reload_and_check_skip(
changed.add(k)
if changed.intersection(RECONFIG_KEYS):
# Something has changed that requires reconfig
- p = Pipette(new_config, pipette_offset, attached_instr._pipette_id)
+ p = Pipette(
+ new_config,
+ pipette_offset,
+ attached_instr._pipette_id,
+ use_old_aspiration_functions,
+ )
p.act_as(attached_instr.acting_as)
return p, False
# Good to skip
@@ -631,6 +634,7 @@ def load_from_config_and_check_skip(
requested: Optional[PipetteName],
serial: Optional[str],
pipette_offset: PipetteOffsetByPipetteMount,
+ use_old_aspiration_functions: bool,
) -> Tuple[Optional[Pipette], bool]:
"""
Given the pipette config for an attached pipette (if any) freshly read
@@ -658,16 +662,23 @@ def load_from_config_and_check_skip(
# configured to the request
if requested == str(attached.acting_as):
# similar enough to check
- return _reload_and_check_skip(config, attached, pipette_offset)
+ return _reload_and_check_skip(
+ config, attached, pipette_offset, use_old_aspiration_functions
+ )
else:
# if there is no request, make sure that the old pipette
# did not have backcompat applied
if str(attached.acting_as) == attached.name:
# similar enough to check
- return _reload_and_check_skip(config, attached, pipette_offset)
+ return _reload_and_check_skip(
+ config, attached, pipette_offset, use_old_aspiration_functions
+ )
if config:
- return Pipette(config, pipette_offset, serial), False
+ return (
+ Pipette(config, pipette_offset, serial, use_old_aspiration_functions),
+ False,
+ )
else:
return None, False
diff --git a/api/src/opentrons/hardware_control/instruments/ot2/pipette_handler.py b/api/src/opentrons/hardware_control/instruments/ot2/pipette_handler.py
index 67596cea790..e1d87f499a9 100644
--- a/api/src/opentrons/hardware_control/instruments/ot2/pipette_handler.py
+++ b/api/src/opentrons/hardware_control/instruments/ot2/pipette_handler.py
@@ -212,6 +212,7 @@ def get_attached_instrument(self, mount: MountType) -> PipetteDict:
"blow_out_flow_rate",
"working_volume",
"tip_overlap",
+ "versioned_tip_overlap",
"available_volume",
"return_tip_height",
"default_aspirate_flow_rates",
@@ -242,8 +243,6 @@ def get_attached_instrument(self, mount: MountType) -> PipetteDict:
instr, instr.blow_out_flow_rate, "dispense"
)
result["ready_to_aspirate"] = instr.ready_to_aspirate
- # TODO (12-5-2022) figure out why this is using default aspirate flow rate
- # rather than default dispense flow rate.
result["default_blow_out_speeds"] = {
alvl: self.plunger_speed(instr, fr, "blowout")
for alvl, fr in instr.blow_out_flow_rates_lookup.items()
@@ -257,6 +256,9 @@ def get_attached_instrument(self, mount: MountType) -> PipetteDict:
alvl: self.plunger_speed(instr, fr, "aspirate")
for alvl, fr in instr.aspirate_flow_rates_lookup.items()
}
+ result[
+ "pipette_bounding_box_offsets"
+ ] = instr.config.pipette_bounding_box_offsets
return cast(PipetteDict, result)
@property
@@ -619,7 +621,12 @@ def plan_check_dispense( # type: ignore[no-untyped-def]
else:
disp_vol = volume
- # Ensure we don't dispense more than the current volume
+ # Ensure we don't dispense more than the current volume.
+ #
+ # This clamping is inconsistent with plan_check_aspirate(), which asserts
+ # that its input is in bounds instead of clamping it. This is left to avoid
+ # disturbing Python protocols with apiLevel <= 2.13. In newer Python protocols,
+ # the Protocol Engine layer applies its own bounds checking.
disp_vol = min(instrument.current_volume, disp_vol)
if disp_vol == 0:
@@ -766,28 +773,31 @@ def plan_check_pick_up_tip( # type: ignore[no-untyped-def]
if instrument.has_tip:
raise UnexpectedTipAttachError("pick_up_tip", instrument.name, mount.name)
self._ihp_log.debug(f"Picking up tip on {mount.name}")
-
+ tip_count = instrument.nozzle_manager.current_configuration.tip_count
if presses is None or presses < 0:
- checked_presses = instrument.pick_up_configurations.presses
+ checked_presses = instrument.pick_up_configurations.press_fit.presses
else:
checked_presses = presses
if not increment or increment < 0:
- check_incr = instrument.pick_up_configurations.increment
+ check_incr = instrument.pick_up_configurations.press_fit.increment
else:
check_incr = increment
- pick_up_speed = instrument.pick_up_configurations.speed
+ pick_up_speed = instrument.pick_up_configurations.press_fit.speed_by_tip_count[
+ tip_count
+ ]
+ pick_up_distance = (
+ instrument.pick_up_configurations.press_fit.distance_by_tip_count[tip_count]
+ )
def build_presses() -> Iterator[Tuple[float, float]]:
# Press the nozzle into the tip number of times,
# moving further by mm after each press
for i in range(checked_presses):
# move nozzle down into the tip
- press_dist = (
- -1.0 * instrument.pick_up_configurations.distance
- + -1.0 * check_incr * i
- )
+
+ press_dist = -1.0 * pick_up_distance + -1.0 * check_incr * i
# move nozzle back up
backup_dist = -press_dist
yield (press_dist, backup_dist)
@@ -808,7 +818,9 @@ def add_tip_to_instr() -> None:
current={
Axis.by_mount(
mount
- ): instrument.nozzle_manager.get_tip_configuration_current()
+ ): instrument.pick_up_configurations.press_fit.current_by_tip_count[
+ tip_count
+ ]
},
speed=pick_up_speed,
relative_down=top_types.Point(0, 0, press_dist),
@@ -817,9 +829,7 @@ def add_tip_to_instr() -> None:
for press_dist, backup_dist in build_presses()
],
shake_off_list=self._build_pickup_shakes(instrument),
- retract_target=instrument.pick_up_configurations.distance
- + check_incr * checked_presses
- + 2,
+ retract_target=pick_up_distance + check_incr * checked_presses + 2,
),
add_tip_to_instr,
)
@@ -837,7 +847,9 @@ def add_tip_to_instr() -> None:
current={
Axis.by_mount(
mount
- ): instrument.nozzle_manager.get_tip_configuration_current()
+ ): instrument.pick_up_configurations.press_fit.current_by_tip_count[
+ instrument.nozzle_manager.current_configuration.tip_count
+ ]
},
speed=pick_up_speed,
relative_down=top_types.Point(0, 0, press_dist),
@@ -846,9 +858,7 @@ def add_tip_to_instr() -> None:
for press_dist, backup_dist in build_presses()
],
shake_off_list=self._build_pickup_shakes(instrument),
- retract_target=instrument.pick_up_configurations.distance
- + check_incr * checked_presses
- + 2,
+ retract_target=pick_up_distance + check_incr * checked_presses + 2,
),
add_tip_to_instr,
)
@@ -923,9 +933,13 @@ def plan_check_drop_tip( # type: ignore[no-untyped-def]
):
instrument = self.get_pipette(mount)
+ if not instrument.drop_configurations.plunger_eject:
+ raise CommandPreconditionViolated(
+ f"Pipette {instrument.name} on {mount.name} has no plunger eject configuration"
+ )
bottom = instrument.plunger_positions.bottom
droptip = instrument.plunger_positions.drop_tip
- speed = instrument.drop_configurations.speed
+ speed = instrument.drop_configurations.plunger_eject.speed
shakes: List[Tuple[top_types.Point, Optional[float]]] = []
if Quirks.dropTipShake in instrument.config.quirks:
diameter = instrument.current_tiprack_diameter
@@ -941,7 +955,11 @@ def _remove_tips() -> None:
bottom,
droptip,
{Axis.of_plunger(mount): instrument.plunger_motor_current.run},
- {Axis.of_plunger(mount): instrument.drop_configurations.current},
+ {
+ Axis.of_plunger(
+ mount
+ ): instrument.drop_configurations.plunger_eject.current
+ },
speed,
home_after,
(Axis.of_plunger(mount),),
@@ -971,7 +989,7 @@ def _remove_tips() -> None:
{
Axis.of_main_tool_actuator(
mount
- ): instrument.drop_configurations.current
+ ): instrument.drop_configurations.plunger_eject.current
},
speed,
home_after,
diff --git a/api/src/opentrons/hardware_control/instruments/ot3/gripper.py b/api/src/opentrons/hardware_control/instruments/ot3/gripper.py
index 3eb3c863522..ba49ea7d5e7 100644
--- a/api/src/opentrons/hardware_control/instruments/ot3/gripper.py
+++ b/api/src/opentrons/hardware_control/instruments/ot3/gripper.py
@@ -3,7 +3,7 @@
""" Classes and functions for gripper state tracking
"""
import logging
-from typing import Any, Optional, Set, Dict, Tuple
+from typing import Any, Optional, Set, Dict, Tuple, Final
from opentrons.types import Point
from opentrons.config import gripper_config
@@ -22,7 +22,10 @@
)
from ..instrument_abc import AbstractInstrument
from opentrons.hardware_control.dev_types import AttachedGripper, GripperDict
-from opentrons_shared_data.errors.exceptions import CommandPreconditionViolated
+from opentrons_shared_data.errors.exceptions import (
+ CommandPreconditionViolated,
+ MotionFailedError,
+)
from opentrons_shared_data.gripper import (
GripperDefinition,
@@ -30,8 +33,9 @@
Geometry,
)
-RECONFIG_KEYS = {"quirks"}
+RECONFIG_KEYS = {"quirks", "grip_force_profile"}
+MAX_ACCEPTABLE_JAW_DISPLACEMENT: Final = 20
mod_log = logging.getLogger(__name__)
@@ -48,6 +52,7 @@ def __init__(
config: GripperDefinition,
gripper_cal_offset: GripperCalibrationOffset,
gripper_id: str,
+ jaw_max_offset: Optional[float] = None,
) -> None:
self._config = config
self._model = config.model
@@ -79,6 +84,7 @@ def __init__(
self._log.info(
f"loaded: {self._model}, gripper offset: {self._calibration_offset}"
)
+ self._jaw_max_offset = jaw_max_offset
@property
def grip_force_profile(self) -> GripForceProfile:
@@ -101,10 +107,21 @@ def remove_probe(self) -> None:
assert self.attached_probe
self._attached_probe = None
+ @property
+ def max_allowed_grip_error(self) -> float:
+ return self._geometry.max_allowed_grip_error
+
+ @property
+ def max_jaw_width(self) -> float:
+ return self._config.geometry.jaw_width["max"] + (self._jaw_max_offset or 0)
+
+ @property
+ def min_jaw_width(self) -> float:
+ return self._config.geometry.jaw_width["min"]
+
@property
def jaw_width(self) -> float:
- jaw_max = self.geometry.jaw_width["max"]
- return jaw_max - (self.current_jaw_displacement * 2.0)
+ return self.max_jaw_width - (self.current_jaw_displacement * 2.0)
@property
def current_jaw_displacement(self) -> float:
@@ -113,7 +130,7 @@ def current_jaw_displacement(self) -> float:
@current_jaw_displacement.setter
def current_jaw_displacement(self, mm: float) -> None:
- max_mm = self._max_jaw_displacement() + 2.0
+ max_mm = self.max_jaw_displacement() + 2.0
if mm > max_mm:
self._log.warning(
f"jaw displacement {round(mm, 1)} mm exceeds max expected value: "
@@ -133,9 +150,9 @@ def default_idle_force(self) -> float:
def default_home_force(self) -> float:
return self.grip_force_profile.default_home_force
- def _max_jaw_displacement(self) -> float:
+ def max_jaw_displacement(self) -> float:
geometry = self._config.geometry
- return (geometry.jaw_width["max"] - geometry.jaw_width["min"]) / 2
+ return (self.max_jaw_width - geometry.jaw_width["min"]) / 2
@property
def state(self) -> GripperJawState:
@@ -163,6 +180,41 @@ def gripper_id(self) -> str:
def reload_configurations(self) -> None:
return None
+ def update_jaw_open_position_from_closed_position(
+ self, jaw_at_closed: float
+ ) -> None:
+ """Update the estimation of the jaw position at open based on reading it at closed.
+
+ This is necessary because the gripper jaw has a well-defined positional hard stop
+ when fully closed and empty but _not_ when open. The open position can vary unit to
+ unit. You can calibrate this out by reading the position of the encoder when the jaw
+ is closed, and then altering the logical open position so that it is whatever it needs
+ to be for the logical closed position to be the same as the config.
+ """
+ jaw_min = self._config.geometry.jaw_width["min"]
+ jaw_nominal_max = self._config.geometry.jaw_width["max"]
+ if (
+ abs((jaw_at_closed * 2) - (jaw_nominal_max - jaw_min))
+ > MAX_ACCEPTABLE_JAW_DISPLACEMENT
+ ):
+ raise MotionFailedError(
+ message="Gripper jaw calibration out of bounds",
+ detail={
+ "type": "gripper-jaw-calibration-out-of-bounds",
+ "actual-displacement": str(jaw_at_closed * 2),
+ "nominal-displacement": str(jaw_nominal_max - jaw_min),
+ },
+ )
+
+ self._jaw_max_offset = jaw_min - (jaw_nominal_max - (jaw_at_closed * 2))
+ self._log.info(
+ f"Gripper max jaw offset is now {self._jaw_max_offset} from input position {jaw_at_closed}"
+ )
+
+ @property
+ def has_jaw_width_calibration(self) -> bool:
+ return self._jaw_max_offset is not None
+
def reset_offset(self, to_default: bool) -> None:
"""Tempoarily reset the gripper offsets to default values."""
if to_default:
@@ -220,6 +272,7 @@ def critical_point(self, cp_override: Optional[CriticalPoint] = None) -> Point:
self._front_calibration_pin_offset
+ Point(*self._calibration_offset.offset)
+ Point(y=self.current_jaw_displacement)
+ - Point(y=(self._jaw_max_offset or 0))
)
elif cp == CriticalPoint.GRIPPER_REAR_CALIBRATION_PIN:
self.check_calibration_pin_location_is_accurate()
@@ -227,6 +280,7 @@ def critical_point(self, cp_override: Optional[CriticalPoint] = None) -> Point:
self._rear_calibration_pin_offset
+ Point(*self._calibration_offset.offset)
- Point(y=self.current_jaw_displacement)
+ + Point(y=(self._jaw_max_offset or 0))
)
else:
raise InvalidCriticalPoint(cp.name, "gripper")
@@ -272,11 +326,13 @@ def _reload_gripper(
changed.add(k)
if changed.intersection(RECONFIG_KEYS):
# Something has changed that requires reconfig
+ # we shoud recalibrate the jaw as well
return (
Gripper(
new_config,
cal_offset,
attached_instr._gripper_id,
+ None,
),
False,
)
diff --git a/api/src/opentrons/hardware_control/instruments/ot3/gripper_handler.py b/api/src/opentrons/hardware_control/instruments/ot3/gripper_handler.py
index cf2ba55e23d..e327306c19f 100644
--- a/api/src/opentrons/hardware_control/instruments/ot3/gripper_handler.py
+++ b/api/src/opentrons/hardware_control/instruments/ot3/gripper_handler.py
@@ -51,6 +51,7 @@ def reset_gripper(self) -> None:
og_gripper.config,
load_gripper_calibration_offset(og_gripper.gripper_id),
og_gripper.gripper_id,
+ og_gripper._jaw_max_offset,
)
self._gripper = new_gripper
diff --git a/api/src/opentrons/hardware_control/instruments/ot3/instrument_calibration.py b/api/src/opentrons/hardware_control/instruments/ot3/instrument_calibration.py
index d42ae38b779..b7eae1aa1fc 100644
--- a/api/src/opentrons/hardware_control/instruments/ot3/instrument_calibration.py
+++ b/api/src/opentrons/hardware_control/instruments/ot3/instrument_calibration.py
@@ -21,7 +21,12 @@
)
from opentrons.hardware_control.types import OT3Mount
-PIPETTE_OFFSET_CONSISTENCY_LIMIT: Final = 1.5
+PIPETTE_OFFSET_CONSISTENCY_LIMIT: Final = 4.0
+
+# These type aliases aid typechecking in tests that work the same on this and
+# the hardware_control.instruments.ot2 variant
+SourceType = cal_top_types.SourceType
+CalibrationStatus = cal_top_types.CalibrationStatus
@dataclass
@@ -41,8 +46,8 @@ class PipetteOffsetByPipetteMount:
"""
offset: Point
- source: cal_top_types.SourceType
- status: cal_top_types.CalibrationStatus
+ source: SourceType
+ status: CalibrationStatus
last_modified: typing.Optional[datetime] = None
@@ -63,8 +68,8 @@ class GripperCalibrationOffset:
"""
offset: Point
- source: cal_top_types.SourceType
- status: cal_top_types.CalibrationStatus
+ source: SourceType
+ status: CalibrationStatus
last_modified: typing.Optional[datetime] = None
@@ -74,8 +79,8 @@ def load_pipette_offset(
# load default if pipette offset data do not exist
pip_cal_obj = PipetteOffsetByPipetteMount(
offset=Point(*default_pipette_offset()),
- source=cal_top_types.SourceType.default,
- status=cal_top_types.CalibrationStatus(),
+ source=SourceType.default,
+ status=CalibrationStatus(),
)
# TODO this can be removed once we switch to using
# ot3 pipette types in the ot3 hardware controller.
@@ -90,7 +95,7 @@ def load_pipette_offset(
offset=pip_offset_data.offset,
last_modified=pip_offset_data.lastModified,
source=pip_offset_data.source,
- status=cal_top_types.CalibrationStatus(
+ status=CalibrationStatus(
markedAt=pip_offset_data.status.markedAt,
markedBad=pip_offset_data.status.markedBad,
source=pip_offset_data.status.source,
@@ -118,8 +123,8 @@ def load_gripper_calibration_offset(
# load default if gripper offset data do not exist
grip_cal_obj = GripperCalibrationOffset(
offset=Point(*default_gripper_calibration_offset()),
- source=cal_top_types.SourceType.default,
- status=cal_top_types.CalibrationStatus(),
+ source=SourceType.default,
+ status=CalibrationStatus(),
)
if gripper_id and ff.enable_ot3_hardware_controller():
grip_offset_data = gripper_offset.get_gripper_calibration_offset(gripper_id)
diff --git a/api/src/opentrons/hardware_control/instruments/ot3/pipette.py b/api/src/opentrons/hardware_control/instruments/ot3/pipette.py
index 1f6dd0b4b59..9372cf90ae1 100644
--- a/api/src/opentrons/hardware_control/instruments/ot3/pipette.py
+++ b/api/src/opentrons/hardware_control/instruments/ot3/pipette.py
@@ -5,13 +5,15 @@
from opentrons.types import Point
-from opentrons.config import feature_flags as ff
from opentrons_shared_data.pipette.pipette_definition import (
PipetteConfigurations,
PlungerPositions,
MotorConfigurations,
SupportedTipsDefinition,
- TipHandlingConfigurations,
+ PickUpTipConfigurations,
+ PressFitPickUpTipConfiguration,
+ CamActionPickUpTipConfiguration,
+ DropTipConfigurations,
PlungerHomingConfigurations,
PipetteNameType,
PipetteModelVersionType,
@@ -23,12 +25,12 @@
CommandPreconditionViolated,
PythonException,
)
-from ..instrument_abc import AbstractInstrument
-from ..instrument_helpers import (
+from opentrons_shared_data.pipette.ul_per_mm import (
piecewise_volume_conversion,
PIPETTING_FUNCTION_FALLBACK_VERSION,
PIPETTING_FUNCTION_LATEST_VERSION,
)
+from ..instrument_abc import AbstractInstrument
from .instrument_calibration import (
save_pipette_offset_calibration,
load_pipette_offset,
@@ -65,6 +67,7 @@ def __init__(
config: PipetteConfigurations,
pipette_offset_cal: PipetteOffsetByPipetteMount,
pipette_id: Optional[str] = None,
+ use_old_aspiration_functions: bool = False,
) -> None:
self._config = config
self._config_as_dict = config.dict()
@@ -95,10 +98,7 @@ def __init__(
)
self._nozzle_offset = self._config.nozzle_offset
self._nozzle_manager = (
- nozzle_manager.NozzleConfigurationManager.build_from_nozzlemap(
- self._config.nozzle_map,
- self._config.partial_tip_configurations.per_tip_pickup_current,
- )
+ nozzle_manager.NozzleConfigurationManager.build_from_config(self._config)
)
self._current_volume = 0.0
self._working_volume = float(self._liquid_class.max_volume)
@@ -133,9 +133,9 @@ def __init__(
)
self._flow_acceleration = self._active_tip_settings.default_flow_acceleration
- self._tip_overlap_lookup = self._liquid_class.tip_overlap_dictionary
+ self._tip_overlap_lookup = self._liquid_class.versioned_tip_overlap_dictionary
- if ff.use_old_aspiration_functions():
+ if use_old_aspiration_functions:
self._pipetting_function_version = PIPETTING_FUNCTION_FALLBACK_VERSION
else:
self._pipetting_function_version = PIPETTING_FUNCTION_LATEST_VERSION
@@ -161,7 +161,7 @@ def backlash_distance(self) -> float:
return self._backlash_distance
@property
- def tip_overlap(self) -> Dict[str, float]:
+ def tip_overlap(self) -> Dict[str, Dict[str, float]]:
return self._tip_overlap_lookup
@property
@@ -185,13 +185,11 @@ def plunger_motor_current(self) -> MotorConfigurations:
return self._plunger_motor_current
@property
- def pick_up_configurations(self) -> TipHandlingConfigurations:
+ def pick_up_configurations(self) -> PickUpTipConfigurations:
return self._pick_up_configurations
@pick_up_configurations.setter
- def pick_up_configurations(
- self, pick_up_configs: TipHandlingConfigurations
- ) -> None:
+ def pick_up_configurations(self, pick_up_configs: PickUpTipConfigurations) -> None:
self._pick_up_configurations = pick_up_configs
@property
@@ -199,7 +197,7 @@ def plunger_homing_configurations(self) -> PlungerHomingConfigurations:
return self._plunger_homing_configurations
@property
- def drop_configurations(self) -> TipHandlingConfigurations:
+ def drop_configurations(self) -> DropTipConfigurations:
return self._drop_configurations
@property
@@ -256,12 +254,9 @@ def reset_state(self) -> None:
)
self._flow_acceleration = self._active_tip_settings.default_flow_acceleration
- self._tip_overlap_lookup = self.liquid_class.tip_overlap_dictionary
+ self._tip_overlap_lookup = self.liquid_class.versioned_tip_overlap_dictionary
self._nozzle_manager = (
- nozzle_manager.NozzleConfigurationManager.build_from_nozzlemap(
- self._config.nozzle_map,
- self._config.partial_tip_configurations.per_tip_pickup_current,
- )
+ nozzle_manager.NozzleConfigurationManager.build_from_config(self._config)
)
def reset_pipette_offset(self, mount: OT3Mount, to_default: bool) -> None:
@@ -489,7 +484,6 @@ def remove_tip(self) -> None:
Remove the tip from the pipette (effectively updates the pipette's
critical point)
"""
- assert self.has_tip_length
self._current_tip_length = 0.0
self._has_tip_length = False
@@ -510,14 +504,6 @@ def tip_presence_responses(self) -> int:
# TODO: put this in shared-data
return 2 if self.channels > 8 else 1
- @property
- def connect_tiprack_distance_mm(self) -> float:
- return self._config.connect_tiprack_distance_mm
-
- @property
- def end_tip_action_retract_distance_mm(self) -> float:
- return self._config.end_tip_action_retract_distance_mm
-
# Cache max is chosen somewhat arbitrarily. With a float is input we don't
# want this to unbounded.
@functools.lru_cache(maxsize=100)
@@ -574,7 +560,8 @@ def as_dict(self) -> "Pipette.DictType":
"default_flow_acceleration": self.active_tip_settings.default_flow_acceleration,
"tip_length": self.current_tip_length,
"return_tip_height": self.active_tip_settings.default_return_tip_height,
- "tip_overlap": self.tip_overlap,
+ "tip_overlap": self.tip_overlap["v0"],
+ "versioned_tip_overlap": self.tip_overlap,
"back_compat_names": self._config.pipette_backcompat_names,
"supported_tips": self.liquid_class.supported_tips,
}
@@ -669,14 +656,40 @@ def set_tip_type(self, tip_type: pip_types.PipetteTipType) -> None:
self._flow_acceleration = self._active_tip_settings.default_flow_acceleration
self._fallback_tip_length = self._active_tip_settings.default_tip_length
- self._tip_overlap_lookup = self.liquid_class.tip_overlap_dictionary
+ self._tip_overlap_lookup = self.liquid_class.versioned_tip_overlap_dictionary
self._working_volume = min(tip_type.value, self.liquid_class.max_volume)
+ def get_pick_up_configuration_for_tip_count(
+ self, count: int
+ ) -> Union[CamActionPickUpTipConfiguration, PressFitPickUpTipConfiguration]:
+ for config in (
+ self._config.pick_up_tip_configurations.press_fit,
+ self._config.pick_up_tip_configurations.cam_action,
+ ):
+ if not config:
+ continue
+
+ if isinstance(config, PressFitPickUpTipConfiguration) and all(
+ [
+ config.speed_by_tip_count.get(count),
+ config.distance_by_tip_count.get(count),
+ config.current_by_tip_count.get(count),
+ ]
+ ):
+ return config
+ elif config.current_by_tip_count.get(count) is not None:
+ return config
+
+ raise CommandPreconditionViolated(
+ message=f"No pick up tip configuration for {count} tips",
+ )
+
def _reload_and_check_skip(
new_config: PipetteConfigurations,
attached_instr: Pipette,
pipette_offset: PipetteOffsetByPipetteMount,
+ use_old_aspiration_functions: bool,
) -> Tuple[Pipette, bool]:
# Once we have determined that the new and attached pipettes
# are similar enough that we might skip, see if the configs
@@ -695,7 +708,12 @@ def _reload_and_check_skip(
changed.add(k)
if changed.intersection("quirks"):
# Something has changed that requires reconfig
- p = Pipette(new_config, pipette_offset, attached_instr._pipette_id)
+ p = Pipette(
+ new_config,
+ pipette_offset,
+ attached_instr._pipette_id,
+ use_old_aspiration_functions,
+ )
p.act_as(attached_instr.acting_as)
return p, False
# Good to skip, just need to update calibration offset and update_info
@@ -709,6 +727,7 @@ def load_from_config_and_check_skip(
requested: Optional[PipetteName],
serial: Optional[str],
pipette_offset: PipetteOffsetByPipetteMount,
+ use_old_aspiration_functions: bool,
) -> Tuple[Optional[Pipette], bool]:
"""
Given the pipette config for an attached pipette (if any) freshly read
@@ -740,6 +759,7 @@ def load_from_config_and_check_skip(
config,
attached,
pipette_offset,
+ use_old_aspiration_functions,
)
else:
# if there is no request, make sure that the old pipette
@@ -750,9 +770,13 @@ def load_from_config_and_check_skip(
config,
attached,
pipette_offset,
+ use_old_aspiration_functions,
)
if config:
- return Pipette(config, pipette_offset, serial), False
+ return (
+ Pipette(config, pipette_offset, serial, use_old_aspiration_functions),
+ False,
+ )
else:
return None, False
diff --git a/api/src/opentrons/hardware_control/instruments/ot3/pipette_handler.py b/api/src/opentrons/hardware_control/instruments/ot3/pipette_handler.py
index 36b41e3e816..70d62156181 100644
--- a/api/src/opentrons/hardware_control/instruments/ot3/pipette_handler.py
+++ b/api/src/opentrons/hardware_control/instruments/ot3/pipette_handler.py
@@ -22,6 +22,8 @@
)
from opentrons_shared_data.pipette.pipette_definition import (
liquid_class_for_volume_between_default_and_defaultlowvolume,
+ PressFitPickUpTipConfiguration,
+ CamActionPickUpTipConfiguration,
)
from opentrons import types as top_types
@@ -226,6 +228,7 @@ def get_attached_instrument(self, mount: OT3Mount) -> PipetteDict:
"blow_out_flow_rate",
"working_volume",
"tip_overlap",
+ "versioned_tip_overlap",
"available_volume",
"return_tip_height",
"default_aspirate_flow_rates",
@@ -274,6 +277,9 @@ def get_attached_instrument(self, mount: OT3Mount) -> PipetteDict:
result[
"default_push_out_volume"
] = instr.active_tip_settings.default_push_out_volume
+ result[
+ "pipette_bounding_box_offsets"
+ ] = instr.config.pipette_bounding_box_offsets
return cast(PipetteDict, result)
@property
@@ -595,7 +601,12 @@ def plan_check_dispense(
else:
disp_vol = volume
- # Ensure we don't dispense more than the current volume
+ # Ensure we don't dispense more than the current volume.
+ #
+ # This clamping is inconsistent with plan_check_aspirate(), which asserts
+ # that its input is in bounds instead of clamping it. This is to match a quirk
+ # of the OT-2 version of this class. Protocol Engine does its own clamping,
+ # so we don't expect this to trigger in practice.
disp_vol = min(instrument.current_volume, disp_vol)
is_full_dispense = numpy.isclose(instrument.current_volume - disp_vol, 0)
@@ -729,7 +740,7 @@ def build_one_shake() -> List[Tuple[top_types.Point, Optional[float]]]:
return []
- def plan_ht_pick_up_tip(self) -> TipActionSpec:
+ def plan_ht_pick_up_tip(self, tip_count: int) -> TipActionSpec:
# Prechecks: ready for pickup tip and press/increment are valid
mount = OT3Mount.LEFT
instrument = self.get_pipette(mount)
@@ -737,25 +748,32 @@ def plan_ht_pick_up_tip(self) -> TipActionSpec:
raise UnexpectedTipAttachError("pick_up_tip", instrument.name, mount.name)
self._ihp_log.debug(f"Picking up tip on {mount.name}")
+ pick_up_config = instrument.get_pick_up_configuration_for_tip_count(tip_count)
+ if not isinstance(pick_up_config, CamActionPickUpTipConfiguration):
+ raise CommandPreconditionViolated(
+ f"Low-throughput pick up tip got wrong config for {instrument.name} on {mount.name}"
+ )
+
tip_motor_moves = self._build_tip_motor_moves(
- prep_move_dist=instrument.pick_up_configurations.prep_move_distance,
- clamp_move_dist=instrument.pick_up_configurations.distance,
- prep_move_speed=instrument.pick_up_configurations.prep_move_speed,
- clamp_move_speed=instrument.pick_up_configurations.speed,
+ prep_move_dist=pick_up_config.prep_move_distance,
+ clamp_move_dist=pick_up_config.distance,
+ prep_move_speed=pick_up_config.prep_move_speed,
+ clamp_move_speed=pick_up_config.speed,
plunger_current=instrument.plunger_motor_current.run,
- tip_motor_current=instrument.nozzle_manager.get_tip_configuration_current(),
+ tip_motor_current=pick_up_config.current_by_tip_count[tip_count],
)
return TipActionSpec(
tip_action_moves=tip_motor_moves,
shake_off_moves=[],
- z_distance_to_tiprack=(-1 * instrument.connect_tiprack_distance_mm),
- ending_z_retract_distance=instrument.end_tip_action_retract_distance_mm,
+ z_distance_to_tiprack=(-1 * pick_up_config.connect_tiprack_distance_mm),
+ ending_z_retract_distance=instrument.config.end_tip_action_retract_distance_mm,
)
def plan_lt_pick_up_tip(
self,
mount: OT3Mount,
+ tip_count: int,
presses: Optional[int],
increment: Optional[float],
) -> TipActionSpec:
@@ -765,17 +783,22 @@ def plan_lt_pick_up_tip(
raise UnexpectedTipAttachError("pick_up_tip", instrument.name, mount.name)
self._ihp_log.debug(f"Picking up tip on {mount.name}")
+ pick_up_config = instrument.get_pick_up_configuration_for_tip_count(tip_count)
+ if not isinstance(pick_up_config, PressFitPickUpTipConfiguration):
+ raise CommandPreconditionViolated(
+ f"Low-throughput pick up tip got wrong config for {instrument.name} on {mount.name}"
+ )
if presses is None or presses < 0:
- checked_presses = instrument.pick_up_configurations.presses
+ checked_presses = pick_up_config.presses
else:
checked_presses = presses
if not increment or increment < 0:
- check_incr = instrument.pick_up_configurations.increment
+ check_incr = pick_up_config.increment
else:
check_incr = increment
- pick_up_speed = instrument.pick_up_configurations.speed
+ pick_up_speed = pick_up_config.speed_by_tip_count[tip_count]
def build_presses() -> List[TipActionMoveSpec]:
# Press the nozzle into the tip number of times,
@@ -784,7 +807,7 @@ def build_presses() -> List[TipActionMoveSpec]:
for i in range(checked_presses):
# move nozzle down into the tip
press_dist = (
- -1.0 * instrument.pick_up_configurations.distance
+ -1.0 * pick_up_config.distance_by_tip_count[tip_count]
+ -1.0 * check_incr * i
)
press_moves.append(
@@ -792,9 +815,9 @@ def build_presses() -> List[TipActionMoveSpec]:
distance=press_dist,
speed=pick_up_speed,
currents={
- Axis.by_mount(
- mount
- ): instrument.nozzle_manager.get_tip_configuration_current()
+ Axis.by_mount(mount): pick_up_config.current_by_tip_count[
+ tip_count
+ ]
},
)
)
@@ -840,15 +863,17 @@ def plan_lt_drop_tip(
mount: OT3Mount,
) -> TipActionSpec:
instrument = self.get_pipette(mount)
-
+ config = instrument.drop_configurations.plunger_eject
+ if not config:
+ raise CommandPreconditionViolated(
+ f"No plunger-eject drop tip configurations for {instrument.name} on {mount.name}"
+ )
drop_seq = [
TipActionMoveSpec(
distance=instrument.plunger_positions.drop_tip,
- speed=instrument.drop_configurations.speed,
+ speed=config.speed,
currents={
- Axis.of_main_tool_actuator(
- mount
- ): instrument.drop_configurations.current,
+ Axis.of_main_tool_actuator(mount): config.current,
},
),
TipActionMoveSpec(
@@ -870,14 +895,19 @@ def plan_lt_drop_tip(
def plan_ht_drop_tip(self) -> TipActionSpec:
mount = OT3Mount.LEFT
instrument = self.get_pipette(mount)
+ config = instrument.drop_configurations.cam_action
+ if not config:
+ raise CommandPreconditionViolated(
+ f"No cam-action drop tip configurations for {instrument.name} on {mount.name}"
+ )
drop_seq = self._build_tip_motor_moves(
- prep_move_dist=instrument.drop_configurations.prep_move_distance,
- clamp_move_dist=instrument.drop_configurations.distance,
- prep_move_speed=instrument.drop_configurations.prep_move_speed,
- clamp_move_speed=instrument.drop_configurations.speed,
+ prep_move_dist=config.prep_move_distance,
+ clamp_move_dist=config.distance,
+ prep_move_speed=config.prep_move_speed,
+ clamp_move_speed=config.speed,
plunger_current=instrument.plunger_motor_current.run,
- tip_motor_current=instrument.drop_configurations.current,
+ tip_motor_current=config.current,
)
return TipActionSpec(
diff --git a/api/src/opentrons/hardware_control/module_control.py b/api/src/opentrons/hardware_control/module_control.py
index d7c5f391ea1..6fda6b7cdc5 100644
--- a/api/src/opentrons/hardware_control/module_control.py
+++ b/api/src/opentrons/hardware_control/module_control.py
@@ -15,6 +15,8 @@
save_module_calibration_offset,
)
from opentrons.hardware_control.modules.types import ModuleType
+from opentrons.hardware_control.modules import SimulatingModuleAtPort
+
from opentrons.types import Point
from .types import AionotifyEvent, BoardRevision, OT3Mount
from . import modules
@@ -26,7 +28,15 @@
log = logging.getLogger(__name__)
-MODULE_PORT_REGEX = re.compile("|".join(modules.MODULE_TYPE_BY_NAME.keys()), re.I)
+MODULE_PORT_REGEX = re.compile(
+ # add a negative lookbehind to suppress matches on OT-2 tempfiles udev creates
+ r"(? modules.AbstractModule:
return await modules.build(
port=port,
@@ -87,10 +98,14 @@ async def build_module(
hw_control_loop=self._api.loop,
execution_manager=self._api._execution_manager,
sim_model=sim_model,
+ sim_serial_number=sim_serial_number,
)
async def unregister_modules(
- self, mods_at_ports: List[modules.ModuleAtPort]
+ self,
+ mods_at_ports: Union[
+ List[modules.ModuleAtPort], List[modules.SimulatingModuleAtPort]
+ ],
) -> None:
"""
De-register Modules.
@@ -120,7 +135,9 @@ async def unregister_modules(
async def register_modules(
self,
- new_mods_at_ports: Optional[List[modules.ModuleAtPort]] = None,
+ new_mods_at_ports: Optional[
+ Union[List[modules.ModuleAtPort], List[modules.SimulatingModuleAtPort]]
+ ] = None,
removed_mods_at_ports: Optional[List[modules.ModuleAtPort]] = None,
) -> None:
"""
@@ -146,6 +163,9 @@ async def register_modules(
port=mod.port,
usb_port=mod.usb_port,
type=modules.MODULE_TYPE_BY_NAME[mod.name],
+ sim_serial_number=mod.serial_number
+ if isinstance(mod, SimulatingModuleAtPort)
+ else None,
)
self._available_modules.append(new_instance)
log.info(
@@ -183,7 +203,7 @@ def get_module_at_port(port: str) -> Optional[modules.ModuleAtPort]:
"""
match = MODULE_PORT_REGEX.search(port)
if match:
- name = match.group().lower()
+ name = match.group(1).lower()
if name not in modules.MODULE_TYPE_BY_NAME:
log.warning(f"Unexpected module connected: {name} on {port}")
return None
@@ -205,10 +225,10 @@ async def handle_module_appearance(self, event: AionotifyEvent) -> None:
new_modules = None
removed_modules = None
if maybe_module_at_port is not None:
- if hasattr(event.flags, "DELETE"):
+ if hasattr(event.flags, "DELETE") or hasattr(event.flags, "MOVED_FROM"):
removed_modules = [maybe_module_at_port]
log.info(f"Module Removed: {maybe_module_at_port}")
- elif hasattr(event.flags, "CREATE"):
+ elif hasattr(event.flags, "CREATE") or hasattr(event.flags, "MOVED_TO"):
new_modules = [maybe_module_at_port]
log.info(f"Module Added: {maybe_module_at_port}")
try:
diff --git a/api/src/opentrons/hardware_control/modules/__init__.py b/api/src/opentrons/hardware_control/modules/__init__.py
index 4a8208dce49..dd8c531bdb1 100644
--- a/api/src/opentrons/hardware_control/modules/__init__.py
+++ b/api/src/opentrons/hardware_control/modules/__init__.py
@@ -11,6 +11,7 @@
BundledFirmware,
UpdateError,
ModuleAtPort,
+ SimulatingModuleAtPort,
ModuleType,
ModuleModel,
TemperatureStatus,
@@ -33,6 +34,7 @@
"BundledFirmware",
"UpdateError",
"ModuleAtPort",
+ "SimulatingModuleAtPort",
"HeaterShaker",
"ModuleType",
"ModuleModel",
diff --git a/api/src/opentrons/hardware_control/modules/heater_shaker.py b/api/src/opentrons/hardware_control/modules/heater_shaker.py
index d4a8fb11d94..09ac06ea5f2 100644
--- a/api/src/opentrons/hardware_control/modules/heater_shaker.py
+++ b/api/src/opentrons/hardware_control/modules/heater_shaker.py
@@ -49,6 +49,7 @@ async def build(
poll_interval_seconds: Optional[float] = None,
simulating: bool = False,
sim_model: Optional[str] = None,
+ sim_serial_number: Optional[str] = None,
) -> "HeaterShaker":
"""
Build a HeaterShaker
@@ -71,7 +72,7 @@ async def build(
driver = await HeaterShakerDriver.create(port=port, loop=hw_control_loop)
poll_interval_seconds = poll_interval_seconds or POLL_PERIOD
else:
- driver = SimulatingDriver()
+ driver = SimulatingDriver(serial_number=sim_serial_number)
poll_interval_seconds = poll_interval_seconds or SIMULATING_POLL_PERIOD
reader = HeaterShakerReader(driver=driver)
diff --git a/api/src/opentrons/hardware_control/modules/magdeck.py b/api/src/opentrons/hardware_control/modules/magdeck.py
index e195716882a..07c0f2ffb5c 100644
--- a/api/src/opentrons/hardware_control/modules/magdeck.py
+++ b/api/src/opentrons/hardware_control/modules/magdeck.py
@@ -53,13 +53,16 @@ async def build(
poll_interval_seconds: Optional[float] = None,
simulating: bool = False,
sim_model: Optional[str] = None,
+ sim_serial_number: Optional[str] = None,
) -> "MagDeck":
"""Factory function."""
driver: AbstractMagDeckDriver
if not simulating:
driver = await MagDeckDriver.create(port=port, loop=hw_control_loop)
else:
- driver = SimulatingDriver(sim_model=sim_model)
+ driver = SimulatingDriver(
+ sim_model=sim_model, serial_number=sim_serial_number
+ )
mod = cls(
port=port,
diff --git a/api/src/opentrons/hardware_control/modules/mod_abc.py b/api/src/opentrons/hardware_control/modules/mod_abc.py
index 48d7f79e4b2..b41315e6815 100644
--- a/api/src/opentrons/hardware_control/modules/mod_abc.py
+++ b/api/src/opentrons/hardware_control/modules/mod_abc.py
@@ -2,9 +2,8 @@
import asyncio
import logging
import re
-from pkg_resources import parse_version
-from typing import ClassVar, Mapping, Optional, cast, TypeVar
-
+from typing import ClassVar, Mapping, Optional, TypeVar, cast
+from packaging.version import InvalidVersion, parse, Version
from opentrons.config import IS_ROBOT, ROBOT_FIRMWARE_DIR
from opentrons.drivers.rpi_drivers.types import USBPort
@@ -16,6 +15,19 @@
TaskPayload = TypeVar("TaskPayload")
+def parse_fw_version(version: str) -> Version:
+ try:
+ device_version = parse(version)
+ # This is a patch for older versions of packaging - they would try and parse old
+ # kidns of versions and return a LegacyVersion object. We can't check for that
+ # explicitly because they removed it in modern versions of packaging.
+ if not isinstance(device_version, Version):
+ raise InvalidVersion()
+ except InvalidVersion:
+ device_version = parse("v0.0.0")
+ return cast(Version, device_version)
+
+
class AbstractModule(abc.ABC):
"""Defines the common methods of a module."""
@@ -32,6 +44,7 @@ async def build(
poll_interval_seconds: Optional[float] = None,
simulating: bool = False,
sim_model: Optional[str] = None,
+ sim_serial_number: Optional[str] = None,
) -> "AbstractModule":
"""Modules should always be created using this factory.
@@ -87,9 +100,9 @@ def get_bundled_fw(self) -> Optional[BundledFirmware]:
def has_available_update(self) -> bool:
"""Return whether a newer firmware file is available"""
if self.device_info and self._bundled_fw:
- device_version = parse_version(self.device_info["version"])
- available_version = parse_version(self._bundled_fw.version)
- return cast(bool, available_version > device_version)
+ device_version = parse_fw_version(self.device_info["version"])
+ available_version = parse_fw_version(self._bundled_fw.version)
+ return available_version > device_version
return False
async def wait_for_is_running(self) -> None:
diff --git a/api/src/opentrons/hardware_control/modules/tempdeck.py b/api/src/opentrons/hardware_control/modules/tempdeck.py
index 261d40ea026..afcc4d64636 100644
--- a/api/src/opentrons/hardware_control/modules/tempdeck.py
+++ b/api/src/opentrons/hardware_control/modules/tempdeck.py
@@ -39,6 +39,7 @@ async def build(
poll_interval_seconds: Optional[float] = None,
simulating: bool = False,
sim_model: Optional[str] = None,
+ sim_serial_number: Optional[str] = None,
) -> "TempDeck":
"""
Build a TempDeck
@@ -60,7 +61,9 @@ async def build(
driver = await TempDeckDriver.create(port=port, loop=hw_control_loop)
poll_interval_seconds = poll_interval_seconds or TEMP_POLL_INTERVAL_SECS
else:
- driver = SimulatingDriver(sim_model=sim_model)
+ driver = SimulatingDriver(
+ sim_model=sim_model, serial_number=sim_serial_number
+ )
poll_interval_seconds = poll_interval_seconds or SIM_TEMP_POLL_INTERVAL_SECS
reader = TempDeckReader(driver=driver)
diff --git a/api/src/opentrons/hardware_control/modules/thermocycler.py b/api/src/opentrons/hardware_control/modules/thermocycler.py
index fe333d37849..f93cd61ded9 100644
--- a/api/src/opentrons/hardware_control/modules/thermocycler.py
+++ b/api/src/opentrons/hardware_control/modules/thermocycler.py
@@ -63,6 +63,7 @@ async def build(
poll_interval_seconds: Optional[float] = None,
simulating: bool = False,
sim_model: Optional[str] = None,
+ sim_serial_number: Optional[str] = None,
) -> "Thermocycler":
"""
Build and connect to a Thermocycler
@@ -87,7 +88,7 @@ async def build(
)
poll_interval_seconds = poll_interval_seconds or POLLING_FREQUENCY_SEC
else:
- driver = SimulatingDriver(model=sim_model)
+ driver = SimulatingDriver(model=sim_model, serial_number=sim_serial_number)
poll_interval_seconds = poll_interval_seconds or SIM_POLLING_FREQUENCY_SEC
reader = ThermocyclerReader(driver=driver)
diff --git a/api/src/opentrons/hardware_control/modules/types.py b/api/src/opentrons/hardware_control/modules/types.py
index 653b0b08e4f..eb8054a87ee 100644
--- a/api/src/opentrons/hardware_control/modules/types.py
+++ b/api/src/opentrons/hardware_control/modules/types.py
@@ -64,6 +64,22 @@ def from_model(cls, model: ModuleModel) -> ModuleType:
if isinstance(model, MagneticBlockModel):
return cls.MAGNETIC_BLOCK
+ @classmethod
+ def to_module_fixture_id(cls, module_type: ModuleType) -> str:
+ if module_type == ModuleType.THERMOCYCLER:
+ # Thermocyclers are "loaded" in B1 only
+ return "thermocyclerModuleV2Front"
+ if module_type == ModuleType.TEMPERATURE:
+ return "temperatureModuleV2"
+ if module_type == ModuleType.HEATER_SHAKER:
+ return "heaterShakerModuleV1"
+ if module_type == ModuleType.MAGNETIC_BLOCK:
+ return "magneticBlockV1"
+ else:
+ raise ValueError(
+ f"Module Type {module_type} does not have a related fixture ID."
+ )
+
class MagneticModuleModel(str, Enum):
MAGNETIC_V1: str = "magneticModuleV1"
@@ -103,13 +119,18 @@ def module_model_from_string(model_string: str) -> ModuleModel:
raise ValueError(f"No such module model {model_string}")
-@dataclass
+@dataclass(kw_only=True)
class ModuleAtPort:
port: str
name: str
usb_port: USBPort = USBPort(name="", port_number=0)
+@dataclass(kw_only=True)
+class SimulatingModuleAtPort(ModuleAtPort):
+ serial_number: str
+
+
class BundledFirmware(NamedTuple):
"""Represents a versioned firmware file, generally bundled into the fs"""
diff --git a/api/src/opentrons/hardware_control/modules/update.py b/api/src/opentrons/hardware_control/modules/update.py
index 519bb9bbf2a..51c7d1cd32a 100644
--- a/api/src/opentrons/hardware_control/modules/update.py
+++ b/api/src/opentrons/hardware_control/modules/update.py
@@ -3,7 +3,7 @@
import os
from pathlib import Path
from glob import glob
-from typing import Any, AsyncGenerator, Dict, Tuple, Optional, Union
+from typing import Any, AsyncGenerator, Dict, Tuple, Union
from .types import UpdateError
from .mod_abc import AbstractModule
from opentrons.hardware_control.threaded_async_lock import ThreadedAsyncLock
@@ -23,7 +23,6 @@ async def protect_update_transition() -> AsyncGenerator[None, None]:
async def update_firmware(
module: AbstractModule,
firmware_file: Union[str, Path],
- loop: Optional[asyncio.AbstractEventLoop],
) -> None:
"""Apply update of given firmware file to given module.
@@ -34,7 +33,6 @@ async def update_firmware(
kwargs: Dict[str, Any] = {
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.PIPE,
- "loop": loop,
}
successful, res = await module.bootloader()(
flash_port_or_dfu_serial, str(firmware_file), kwargs
diff --git a/api/src/opentrons/hardware_control/modules/utils.py b/api/src/opentrons/hardware_control/modules/utils.py
index 56a47f977da..0c213ead6a1 100644
--- a/api/src/opentrons/hardware_control/modules/utils.py
+++ b/api/src/opentrons/hardware_control/modules/utils.py
@@ -42,6 +42,7 @@ async def build(
hw_control_loop: asyncio.AbstractEventLoop,
execution_manager: ExecutionManager,
sim_model: Optional[str] = None,
+ sim_serial_number: Optional[str] = None,
) -> AbstractModule:
return await _MODULE_CLS_BY_TYPE[type].build(
port=port,
@@ -50,6 +51,7 @@ async def build(
hw_control_loop=hw_control_loop,
execution_manager=execution_manager,
sim_model=sim_model,
+ sim_serial_number=sim_serial_number,
)
diff --git a/api/src/opentrons/hardware_control/nozzle_manager.py b/api/src/opentrons/hardware_control/nozzle_manager.py
index 781d2c55bc8..a25e5e57319 100644
--- a/api/src/opentrons/hardware_control/nozzle_manager.py
+++ b/api/src/opentrons/hardware_control/nozzle_manager.py
@@ -1,17 +1,43 @@
-from typing import Dict, List, Optional, Any, Sequence
-from typing_extensions import Final
+from typing import Dict, List, Optional, Any, Sequence, Iterator, Tuple, cast
from dataclasses import dataclass
from collections import OrderedDict
from enum import Enum
+from itertools import chain
from opentrons.hardware_control.types import CriticalPoint
from opentrons.types import Point
-from opentrons_shared_data.errors import (
- ErrorCodes,
- GeneralError,
+from opentrons_shared_data.pipette.pipette_definition import (
+ PipetteGeometryDefinition,
+ PipetteRowDefinition,
)
+from opentrons_shared_data.errors import ErrorCodes, GeneralError, PythonException
-INTERNOZZLE_SPACING = 9
+MAXIMUM_NOZZLE_COUNT = 24
+
+
+def _nozzle_names_by_row(rows: List[PipetteRowDefinition]) -> Iterator[str]:
+ for row in rows:
+ for nozzle in row.ordered_nozzles:
+ yield nozzle
+
+
+def _row_or_col_index_for_nozzle(
+ row_or_col: "OrderedDict[str, List[str]]", nozzle: str
+) -> int:
+ for index, row_or_col_contents in enumerate(row_or_col.values()):
+ if nozzle in row_or_col_contents:
+ return index
+ raise KeyError(nozzle)
+
+
+def _row_col_indices_for_nozzle(
+ rows: "OrderedDict[str, List[str]]",
+ cols: "OrderedDict[str, List[str]]",
+ nozzle: str,
+) -> Tuple[int, int]:
+ return _row_or_col_index_for_nozzle(rows, nozzle), _row_or_col_index_for_nozzle(
+ cols, nozzle
+ )
class NozzleConfigurationType(Enum):
@@ -24,174 +50,248 @@ class NozzleConfigurationType(Enum):
COLUMN = "COLUMN"
ROW = "ROW"
- QUADRANT = "QUADRANT"
SINGLE = "SINGLE"
FULL = "FULL"
+ SUBRECT = "SUBRECT"
@classmethod
def determine_nozzle_configuration(
cls,
- nozzle_difference: Point,
- physical_nozzlemap_length: int,
- current_nozzlemap_length: int,
+ physical_rows: "OrderedDict[str, List[str]]",
+ current_rows: "OrderedDict[str, List[str]]",
+ physical_cols: "OrderedDict[str, List[str]]",
+ current_cols: "OrderedDict[str, List[str]]",
) -> "NozzleConfigurationType":
"""
Determine the nozzle configuration based on the starting and
ending nozzle.
-
- :param nozzle_difference: the difference between the back
- left and front right nozzle
- :param physical_nozzlemap_length: integer representing the
- length of the default physical configuration of the pipette.
- :param current_nozzlemap_length: integer representing the
- length of the current physical configuration of the pipette.
- :return : nozzle configuration type
"""
- if physical_nozzlemap_length == current_nozzlemap_length:
+ if physical_rows == current_rows and physical_cols == current_cols:
return NozzleConfigurationType.FULL
-
- if nozzle_difference == Point(0, 0, 0):
+ if len(current_rows) == 1 and len(current_cols) == 1:
return NozzleConfigurationType.SINGLE
- elif nozzle_difference[0] == 0:
- return NozzleConfigurationType.COLUMN
- elif nozzle_difference[1] == 0:
+ if len(current_rows) == 1:
return NozzleConfigurationType.ROW
- else:
- return NozzleConfigurationType.QUADRANT
+ if len(current_cols) == 1:
+ return NozzleConfigurationType.COLUMN
+ return NozzleConfigurationType.SUBRECT
@dataclass
class NozzleMap:
"""
- Nozzle Map.
+ A NozzleMap instance represents a specific configuration of active nozzles on a pipette.
+
+ It exposes properties of the configuration like the configuration's front-right, front-left,
+ back-left and starting nozzles as well as a map of all the nozzles active in the configuration.
- A data store class that can build
- and store nozzle configurations
- based on the physical default
- nozzle map of the pipette and
- the requested starting/ending tips.
+ Because NozzleMaps represent configurations directly, the properties of the NozzleMap may not
+ match the properties of the physical pipette. For instance, a NozzleMap for a single channel
+ configuration of an 8-channel pipette - say, A1 only - will have its front left, front right,
+ and active channels all be A1, while the physical configuration would have the front right
+ channel be H1.
"""
- back_left: str
- front_right: str
starting_nozzle: str
+ #: The nozzle that automated operations that count nozzles should start at
+ # these are really ordered dicts but you can't say that even in quotes because pydantic needs to
+ # evaluate them to generate serdes code so please only use ordered dicts here
map_store: Dict[str, Point]
+ #: A map of all of the nozzles active in this configuration
+ rows: Dict[str, List[str]]
+ #: A map of all the rows active in this configuration
+ columns: Dict[str, List[str]]
+ #: A map of all the columns active in this configuration
configuration: NozzleConfigurationType
+ #: The kind of configuration this is
+
+ full_instrument_map_store: Dict[str, Point]
+ #: A map of all of the nozzles of an instrument
+ full_instrument_rows: Dict[str, List[str]]
+ #: A map of all the rows of an instrument
def __str__(self) -> str:
return f"back_left_nozzle: {self.back_left} front_right_nozzle: {self.front_right} configuration: {self.configuration}"
+ @property
+ def back_left(self) -> str:
+ """The backest, leftest (i.e. back if it's a column, left if it's a row) nozzle of the configuration.
+
+ Note: This is the value relevant for this particular configuration, and it may not represent the back left nozzle
+ of the underlying physical pipette. For instance, the back-left nozzle of a configuration representing nozzles
+ D7 to H12 of a 96-channel pipette is D7, which is not the back-left nozzle of the physical pipette (A1).
+ """
+ return next(iter(self.rows.values()))[0]
+
+ @property
+ def front_right(self) -> str:
+ """The frontest, rightest (i.e. front if it's a column, right if it's a row) nozzle of the configuration.
+
+ Note: This is the value relevant for this configuration, not the physical pipette. See the note on back_left.
+ """
+ return next(reversed(list(self.rows.values())))[-1]
+
+ @property
+ def full_instrument_back_left(self) -> str:
+ """The backest, leftest (i.e. back if it's a column, left if it's a row) nozzle of the full instrument.
+
+ Note: This value represents the back left nozzle of the underlying physical pipette. For instance,
+ the back-left nozzle of a 96-Channel pipette is A1.
+ """
+ return next(iter(self.full_instrument_rows.values()))[0]
+
+ @property
+ def full_instrument_front_right(self) -> str:
+ """The frontest, rightest (i.e. front if it's a column, right if it's a row) nozzle of the full instrument.
+
+ Note: This value represents the front right nozzle of the physical pipette. See the note on full_instrument_back_left.
+ """
+ return next(reversed(list(self.full_instrument_rows.values())))[-1]
+
@property
def starting_nozzle_offset(self) -> Point:
+ """The position of the starting nozzle."""
return self.map_store[self.starting_nozzle]
@property
def xy_center_offset(self) -> Point:
+ """The position of the geometrical center of all nozzles in the configuration.
+
+ Note: This is the value relevant for this configuration, not the physical pipette. See the note on back_left.
+ """
difference = self.map_store[self.front_right] - self.map_store[self.back_left]
return self.map_store[self.back_left] + Point(
difference[0] / 2, difference[1] / 2, 0
)
+ @property
+ def instrument_xy_center_offset(self) -> Point:
+ """The position of the geometrical center of all nozzles for the entire instrument.
+
+ Note: This the value reflects the center of the maximum number of nozzles of the physical pipette.
+ This would be the same as a full configuration.
+ """
+ difference = (
+ self.full_instrument_map_store[self.full_instrument_front_right]
+ - self.full_instrument_map_store[self.full_instrument_back_left]
+ )
+ return self.full_instrument_map_store[self.full_instrument_back_left] + Point(
+ difference[0] / 2, difference[1] / 2, 0
+ )
+
+ @property
+ def y_center_offset(self) -> Point:
+ """The position in the center of the primary column of the map."""
+ front_left = next(reversed(list(self.rows.values())))[0]
+ difference = self.map_store[front_left] - self.map_store[self.back_left]
+ return self.map_store[self.back_left] + Point(0, difference[1] / 2, 0)
+
@property
def front_nozzle_offset(self) -> Point:
+ """The offset for the front_left nozzle."""
# front left-most nozzle of the 96 channel in a given configuration
# and front nozzle of the 8 channel
- if self.starting_nozzle == self.front_right:
- return self.map_store[self.front_right]
- map_store_list = list(self.map_store.values())
- starting_idx = map_store_list.index(self.map_store[self.back_left])
- difference = self.map_store[self.back_left] - self.map_store[self.front_right]
- y_rows_length = int(difference[1] // INTERNOZZLE_SPACING)
- return map_store_list[starting_idx + y_rows_length]
+ front_left = next(iter(self.columns.values()))[-1]
+ return self.map_store[front_left]
+
+ @property
+ def front_right_nozzle_offset(self) -> Point:
+ """The offset for the front_right nozzle."""
+ # Front-right-most nozzle of the 96 channel in a given configuration
+ # and Front-most nozzle of the 8-channel
+ return self.map_store[self.front_right]
+
+ @property
+ def back_left_nozzle_offset(self) -> Point:
+ """The offset for the back_left nozzle."""
+ # Back-left-most nozzle of the 96-channel in a given configuration
+ # and back-most nozzle of the 8-channel
+ return self.map_store[self.back_left]
@property
def tip_count(self) -> int:
+ """The total number of active nozzles in the configuration, and thus the number of tips that will be picked up."""
return len(self.map_store)
@classmethod
def build(
cls,
- physical_nozzle_map: Dict[str, Point],
+ physical_nozzles: "OrderedDict[str, Point]",
+ physical_rows: "OrderedDict[str, List[str]]",
+ physical_columns: "OrderedDict[str, List[str]]",
starting_nozzle: str,
back_left_nozzle: str,
front_right_nozzle: str,
- origin_nozzle: Optional[str] = None,
) -> "NozzleMap":
- difference = (
- physical_nozzle_map[front_right_nozzle]
- - physical_nozzle_map[back_left_nozzle]
- )
- x_columns_length = int(abs(difference[0] // INTERNOZZLE_SPACING)) + 1
- y_rows_length = int(abs(difference[1] // INTERNOZZLE_SPACING)) + 1
-
- map_store_list = list(physical_nozzle_map.items())
-
- if origin_nozzle:
- origin_difference = (
- physical_nozzle_map[back_left_nozzle]
- - physical_nozzle_map[origin_nozzle]
+ try:
+ back_left_row_index, back_left_column_index = _row_col_indices_for_nozzle(
+ physical_rows, physical_columns, back_left_nozzle
)
- starting_col = int(abs(origin_difference[0] // INTERNOZZLE_SPACING))
- else:
- starting_col = 0
+ except KeyError as e:
+ raise IncompatibleNozzleConfiguration(
+ message=f"No entry for back left nozzle {e} in pipette",
+ wrapping=[PythonException(e)],
+ ) from e
+ try:
+ (
+ front_right_row_index,
+ front_right_column_index,
+ ) = _row_col_indices_for_nozzle(
+ physical_rows, physical_columns, front_right_nozzle
+ )
+ except KeyError as e:
+ raise IncompatibleNozzleConfiguration(
+ message=f"No entry for front right nozzle {e} in pipette",
+ wrapping=[PythonException(e)],
+ ) from e
+
+ correct_rows_with_all_columns = list(physical_rows.items())[
+ back_left_row_index : front_right_row_index + 1
+ ]
+ correct_rows = [
+ (
+ row_name,
+ row_entries[back_left_column_index : front_right_column_index + 1],
+ )
+ for row_name, row_entries in correct_rows_with_all_columns
+ ]
+ rows = OrderedDict(correct_rows)
+ correct_columns_with_all_rows = list(physical_columns.items())[
+ back_left_column_index : front_right_column_index + 1
+ ]
+ correct_columns = [
+ (col_name, col_entries[back_left_row_index : front_right_row_index + 1])
+ for col_name, col_entries in correct_columns_with_all_rows
+ ]
+ columns = OrderedDict(correct_columns)
+
map_store = OrderedDict(
- {
- k: v
- for i in range(x_columns_length)
- for k, v in map_store_list[
- (i + starting_col) * 8 : y_rows_length * ((i + starting_col) + 1)
- ]
- }
+ (nozzle, physical_nozzles[nozzle]) for nozzle in chain(*rows.values())
)
+
+ if (
+ NozzleConfigurationType.determine_nozzle_configuration(
+ physical_rows, rows, physical_columns, columns
+ )
+ != NozzleConfigurationType.FULL
+ ):
+ if len(rows) * len(columns) > MAXIMUM_NOZZLE_COUNT:
+ raise IncompatibleNozzleConfiguration(
+ f"Partial Nozzle Layouts may not be configured to contain more than {MAXIMUM_NOZZLE_COUNT} channels."
+ )
+
return cls(
- back_left=back_left_nozzle,
- front_right=front_right_nozzle,
starting_nozzle=starting_nozzle,
map_store=map_store,
+ rows=rows,
+ full_instrument_map_store=physical_nozzles,
+ full_instrument_rows=physical_rows,
+ columns=columns,
configuration=NozzleConfigurationType.determine_nozzle_configuration(
- difference, len(physical_nozzle_map), len(map_store)
+ physical_rows, rows, physical_columns, columns
),
)
- @staticmethod
- def validate_nozzle_configuration(
- back_left_nozzle: str,
- front_right_nozzle: str,
- default_configuration: "NozzleMap",
- current_configuration: Optional["NozzleMap"] = None,
- ) -> None:
- """
- Validate nozzle configuration.
- """
- if back_left_nozzle > front_right_nozzle:
- raise IncompatibleNozzleConfiguration(
- message=f"Back left nozzle {back_left_nozzle} provided is not to the back or left of {front_right_nozzle}.",
- detail={
- "current_nozzle_configuration": current_configuration,
- "requested_back_left_nozzle": back_left_nozzle,
- "requested_front_right_nozzle": front_right_nozzle,
- },
- )
- if not default_configuration.map_store.get(back_left_nozzle):
- raise IncompatibleNozzleConfiguration(
- message=f"Starting nozzle {back_left_nozzle} does not exist in the nozzle map.",
- detail={
- "current_nozzle_configuration": current_configuration,
- "requested_back_left_nozzle": back_left_nozzle,
- "requested_front_right_nozzle": front_right_nozzle,
- },
- )
-
- if not default_configuration.map_store.get(front_right_nozzle):
- raise IncompatibleNozzleConfiguration(
- message=f"Ending nozzle {front_right_nozzle} does not exist in the nozzle map.",
- detail={
- "current_nozzle_configuration": current_configuration,
- "requested_back_left_nozzle": back_left_nozzle,
- "requested_front_right_nozzle": front_right_nozzle,
- },
- )
-
class IncompatibleNozzleConfiguration(GeneralError):
"""Error raised if nozzle configuration is incompatible with the currently loaded pipette."""
@@ -215,33 +315,39 @@ class NozzleConfigurationManager:
def __init__(
self,
nozzle_map: NozzleMap,
- pick_up_current_map: Dict[int, float],
) -> None:
self._physical_nozzle_map = nozzle_map
self._current_nozzle_configuration = nozzle_map
- self._pick_up_current_map: Final[Dict[int, float]] = pick_up_current_map
@classmethod
- def build_from_nozzlemap(
- cls,
- nozzle_map: Dict[str, List[float]],
- pick_up_current_map: Dict[int, float],
+ def build_from_config(
+ cls, pipette_geometry: PipetteGeometryDefinition
) -> "NozzleConfigurationManager":
-
- sorted_nozzlemap = list(nozzle_map.keys())
- sorted_nozzlemap.sort(key=lambda x: int(x[1::]))
- nozzle_map_ordereddict: Dict[str, Point] = OrderedDict(
- {k: Point(*nozzle_map[k]) for k in sorted_nozzlemap}
+ sorted_nozzle_map = OrderedDict(
+ (
+ (k, Point(*pipette_geometry.nozzle_map[k]))
+ for k in _nozzle_names_by_row(pipette_geometry.ordered_rows)
+ )
+ )
+ sorted_rows = OrderedDict(
+ (entry.key, entry.ordered_nozzles)
+ for entry in pipette_geometry.ordered_rows
+ )
+ sorted_cols = OrderedDict(
+ (entry.key, entry.ordered_nozzles)
+ for entry in pipette_geometry.ordered_columns
)
- first_nozzle = next(iter(list(nozzle_map_ordereddict.keys())))
- last_nozzle = next(reversed(list(nozzle_map_ordereddict.keys())))
+ back_left = next(iter(sorted_rows.values()))[0]
+ front_right = next(reversed(list(sorted_rows.values())))[-1]
starting_nozzle_config = NozzleMap.build(
- nozzle_map_ordereddict,
- starting_nozzle=first_nozzle,
- back_left_nozzle=first_nozzle,
- front_right_nozzle=last_nozzle,
+ physical_nozzles=sorted_nozzle_map,
+ physical_rows=sorted_rows,
+ physical_columns=sorted_cols,
+ starting_nozzle=back_left,
+ back_left_nozzle=back_left,
+ front_right_nozzle=front_right,
)
- return cls(starting_nozzle_config, pick_up_current_map)
+ return cls(starting_nozzle_config)
@property
def starting_nozzle_offset(self) -> Point:
@@ -260,37 +366,38 @@ def update_nozzle_configuration(
front_right_nozzle: str,
starting_nozzle: Optional[str] = None,
) -> None:
- if (
- back_left_nozzle == self._physical_nozzle_map.back_left
- and front_right_nozzle == self._physical_nozzle_map.front_right
- ):
- self._current_nozzle_configuration = self._physical_nozzle_map
- else:
- NozzleMap.validate_nozzle_configuration(
- back_left_nozzle,
- front_right_nozzle,
- self._physical_nozzle_map,
- self._current_nozzle_configuration,
- )
-
- self._current_nozzle_configuration = NozzleMap.build(
- self._physical_nozzle_map.map_store,
- starting_nozzle=starting_nozzle or back_left_nozzle,
- back_left_nozzle=back_left_nozzle,
- front_right_nozzle=front_right_nozzle,
- origin_nozzle=self._physical_nozzle_map.starting_nozzle,
- )
+ self._current_nozzle_configuration = NozzleMap.build(
+ # these casts are because of pydantic in the protocol engine (see above)
+ physical_nozzles=cast(
+ "OrderedDict[str, Point]", self._physical_nozzle_map.map_store
+ ),
+ physical_rows=cast(
+ "OrderedDict[str, List[str]]", self._physical_nozzle_map.rows
+ ),
+ physical_columns=cast(
+ "OrderedDict[str, List[str]]", self._physical_nozzle_map.columns
+ ),
+ starting_nozzle=starting_nozzle or back_left_nozzle,
+ back_left_nozzle=back_left_nozzle,
+ front_right_nozzle=front_right_nozzle,
+ )
- def get_tip_configuration_current(self) -> float:
- return self._pick_up_current_map[self._current_nozzle_configuration.tip_count]
+ def get_tip_count(self) -> int:
+ return self._current_nozzle_configuration.tip_count
def critical_point_with_tip_length(
self,
cp_override: Optional[CriticalPoint],
tip_length: float = 0.0,
) -> Point:
- if cp_override == CriticalPoint.XY_CENTER:
+ if cp_override == CriticalPoint.INSTRUMENT_XY_CENTER:
+ current_nozzle = (
+ self._current_nozzle_configuration.instrument_xy_center_offset
+ )
+ elif cp_override == CriticalPoint.XY_CENTER:
current_nozzle = self._current_nozzle_configuration.xy_center_offset
+ elif cp_override == CriticalPoint.Y_CENTER:
+ current_nozzle = self._current_nozzle_configuration.y_center_offset
elif cp_override == CriticalPoint.FRONT_NOZZLE:
current_nozzle = self._current_nozzle_configuration.front_nozzle_offset
else:
diff --git a/api/src/opentrons/hardware_control/ot3_calibration.py b/api/src/opentrons/hardware_control/ot3_calibration.py
index 6bca60bfed3..e49b4de171f 100644
--- a/api/src/opentrons/hardware_control/ot3_calibration.py
+++ b/api/src/opentrons/hardware_control/ot3_calibration.py
@@ -9,7 +9,7 @@
from enum import Enum
from math import floor, copysign
from logging import getLogger
-from opentrons.util.linal import solve_attitude, SolvePoints
+from opentrons.util.linal import solve_attitude, SolvePoints, DoubleMatrix
from .types import OT3Mount, Axis, GripperProbe
from opentrons.types import Point
@@ -42,12 +42,12 @@
)
from opentrons.config.robot_configs import (
default_ot3_deck_calibration,
- defaults_ot3,
)
+from opentrons.config import defaults_ot3
from .util import DeckTransformState
if TYPE_CHECKING:
- from .ot3api import OT3API
+ from opentrons.hardware_control import OT3HardwareControlAPI
LOG = getLogger(__name__)
@@ -123,7 +123,7 @@ def _verify_height(
async def _verify_edge_pos(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
search_axis: Union[Literal[Axis.X, Axis.Y]],
found_edge: Point,
@@ -177,7 +177,7 @@ def critical_edge_offset(
async def find_edge_binary(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
slot_edge_nominal: Point,
search_axis: Union[Literal[Axis.X, Axis.Y]],
@@ -272,7 +272,7 @@ async def find_edge_binary(
async def find_slot_center_binary(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
estimated_center: Point,
raise_verify_error: bool = True,
@@ -337,7 +337,7 @@ async def find_slot_center_binary(
async def find_calibration_structure_height(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
nominal_center: Point,
probe: InstrumentProbeType = InstrumentProbeType.PRIMARY,
@@ -365,7 +365,7 @@ async def find_calibration_structure_height(
async def _probe_deck_at(
- api: OT3API,
+ api: OT3HardwareControlAPI,
mount: OT3Mount,
target: Point,
settings: CapacitivePassSettings,
@@ -390,7 +390,7 @@ async def _probe_deck_at(
async def find_axis_center(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
minus_edge_nominal: Point,
plus_edge_nominal: Point,
@@ -477,7 +477,7 @@ def _edges_from_data(
# an N-sample rolling average. by inverting the sign of half the kernel, which is
# why we need it to be even, we do the same thing but while also taking a finite
# difference.
- average_difference_kernel = np.concatenate( # type: ignore
+ average_difference_kernel = np.concatenate(
(
np.full(average_width_samples // 2, 1 / average_width_samples),
np.full(average_width_samples // 2, -1 / average_width_samples),
@@ -530,7 +530,7 @@ def _edges_from_data(
async def find_slot_center_noncontact(
- hcapi: OT3API, mount: OT3Mount, estimated_center: Point
+ hcapi: OT3HardwareControlAPI, mount: OT3Mount, estimated_center: Point
) -> Point:
NONCONTACT_INTERVAL_MM: float = 0.1
travel_center = estimated_center + Point(0, 0, NONCONTACT_INTERVAL_MM)
@@ -552,7 +552,7 @@ async def find_slot_center_noncontact(
async def find_calibration_structure_center(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
nominal_center: Point,
method: CalibrationMethod = CalibrationMethod.BINARY_SEARCH,
@@ -574,7 +574,7 @@ async def find_calibration_structure_center(
async def _calibrate_mount(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
slot: int = SLOT_CENTER,
method: CalibrationMethod = CalibrationMethod.BINARY_SEARCH,
@@ -641,7 +641,7 @@ async def _calibrate_mount(
async def find_calibration_structure_position(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
nominal_center: Point,
method: CalibrationMethod = CalibrationMethod.BINARY_SEARCH,
@@ -673,7 +673,7 @@ async def find_calibration_structure_position(
async def find_slot_center_binary_from_nominal_center(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
slot: int,
) -> Tuple[Point, Point]:
@@ -698,7 +698,7 @@ async def find_slot_center_binary_from_nominal_center(
async def _determine_transform_matrix(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
) -> Tuple[types.AttitudeMatrix, Dict[str, Any]]:
"""
@@ -750,7 +750,7 @@ def gripper_pin_offsets_mean(front: Point, rear: Point) -> Point:
async def calibrate_gripper_jaw(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
probe: GripperProbe,
slot: int = 5,
method: CalibrationMethod = CalibrationMethod.BINARY_SEARCH,
@@ -788,7 +788,7 @@ async def calibrate_gripper_jaw(
async def calibrate_gripper(
- hcapi: OT3API, offset_front: Point, offset_rear: Point
+ hcapi: OT3HardwareControlAPI, offset_front: Point, offset_rear: Point
) -> Point:
"""Calibrate gripper."""
offset = gripper_pin_offsets_mean(front=offset_front, rear=offset_rear)
@@ -798,7 +798,7 @@ async def calibrate_gripper(
async def find_pipette_offset(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: Literal[OT3Mount.LEFT, OT3Mount.RIGHT],
slot: int = 5,
method: CalibrationMethod = CalibrationMethod.BINARY_SEARCH,
@@ -829,7 +829,7 @@ async def find_pipette_offset(
async def calibrate_pipette(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: Literal[OT3Mount.LEFT, OT3Mount.RIGHT],
slot: int = 5,
method: CalibrationMethod = CalibrationMethod.BINARY_SEARCH,
@@ -852,7 +852,7 @@ async def calibrate_pipette(
async def calibrate_module(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
slot: str,
module_id: str,
@@ -907,7 +907,7 @@ async def calibrate_module(
async def calibrate_belts(
- hcapi: OT3API,
+ hcapi: OT3HardwareControlAPI,
mount: OT3Mount,
pipette_id: str,
) -> Tuple[types.AttitudeMatrix, Dict[str, Any]]:
@@ -952,9 +952,11 @@ def apply_machine_transform(
-------
Attitude matrix with regards to machine coordinate system.
"""
- belt_attitude_arr = np.array(belt_attitude)
- machine_transform_arr = np.array(defaults_ot3.DEFAULT_MACHINE_TRANSFORM)
- deck_attitude_arr = np.dot(belt_attitude_arr, machine_transform_arr) # type: ignore[no-untyped-call]
+ belt_attitude_arr: DoubleMatrix = np.array(belt_attitude)
+ machine_transform_arr: DoubleMatrix = np.array(
+ defaults_ot3.DEFAULT_MACHINE_TRANSFORM
+ )
+ deck_attitude_arr = np.dot(belt_attitude_arr, machine_transform_arr)
deck_attitude = deck_attitude_arr.round(4).tolist()
return deck_attitude # type: ignore[no-any-return]
@@ -991,9 +993,9 @@ def validate_attitude_deck_calibration(
TODO(pm, 5/9/2023): As with the OT2, expand on this method,
or create another method to diagnose bad instrument offset data
"""
- curr_cal = np.array(deck_cal.attitude)
+ curr_cal: DoubleMatrix = np.array(deck_cal.attitude)
row, _ = curr_cal.shape
- rank: int = np.linalg.matrix_rank(curr_cal) # type: ignore
+ rank: int = np.linalg.matrix_rank(curr_cal)
if row != rank:
# Check that the matrix is non-singular
return DeckTransformState.SINGULARITY
@@ -1005,7 +1007,7 @@ def validate_attitude_deck_calibration(
return DeckTransformState.OK
-def delete_belt_calibration_data(hcapi: OT3API) -> None:
+def delete_belt_calibration_data(hcapi: OT3HardwareControlAPI) -> None:
delete_robot_belt_attitude()
hcapi.reset_deck_calibration()
diff --git a/api/src/opentrons/hardware_control/ot3api.py b/api/src/opentrons/hardware_control/ot3api.py
index 1842d26152d..21c3f70dab7 100644
--- a/api/src/opentrons/hardware_control/ot3api.py
+++ b/api/src/opentrons/hardware_control/ot3api.py
@@ -4,7 +4,6 @@
from functools import partial, lru_cache, wraps
from dataclasses import replace
import logging
-from copy import deepcopy
from collections import OrderedDict
from typing import (
AsyncIterator,
@@ -34,9 +33,6 @@
pipette_load_name_conversions as pipette_load_name,
)
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons_shared_data.errors.exceptions import (
- StallOrCollisionDetectedError,
-)
from opentrons import types as top_types
from opentrons.config import robot_configs
@@ -48,14 +44,7 @@
LiquidProbeSettings,
)
from opentrons.drivers.rpi_drivers.types import USBPort, PortGroup
-from opentrons_hardware.hardware_control.motion_planning import (
- Move,
- MoveManager,
- MoveTarget,
- ZeroLengthMoveError,
-)
from opentrons.hardware_control.nozzle_manager import NozzleConfigurationType
-from opentrons_hardware.hardware_control.motion import MoveStopCondition
from opentrons_shared_data.errors.exceptions import (
EnumeratedError,
PythonException,
@@ -75,15 +64,7 @@
GripperCalibrationOffset,
PipetteOffsetSummary,
)
-from .backends.ot3controller import OT3Controller
-from .backends.ot3simulator import OT3Simulator
-from .backends.ot3utils import (
- axis_convert,
- get_system_constraints,
- get_system_constraints_for_calibration,
- get_system_constraints_for_plunger_acceleration,
-)
-from .backends.errors import SubsystemUpdating
+
from .execution_manager import ExecutionManagerProvider
from .pause_manager import PauseManager
from .module_control import AttachedModulesControl
@@ -95,6 +76,8 @@
HardwareEvent,
HardwareEventHandler,
HardwareAction,
+ HepaFanState,
+ HepaUVState,
MotionChecks,
SubSystem,
PauseType,
@@ -109,9 +92,9 @@
SubSystemState,
TipStateType,
EstopOverallStatus,
- EstopAttachLocation,
EstopStateNotification,
EstopState,
+ HardwareFeatureFlags,
FailedTipStateCheck,
)
from .errors import (
@@ -120,7 +103,7 @@
from . import modules
from .ot3_calibration import OT3Transforms, OT3RobotCalibrationProvider
-from .protocols import HardwareControlInterface
+from .protocols import FlexHardwareControlInterface
# TODO (lc 09/15/2022) We should update our pipette handler to reflect OT-3 properties
# in a follow-up PR.
@@ -154,10 +137,12 @@
InstrumentDict,
GripperDict,
)
+from .backends.types import HWStopCondition
+from .backends.flex_protocol import FlexBackend
+from .backends.ot3simulator import OT3Simulator
+from .backends.errors import SubsystemUpdating
-from .status_bar_state import StatusBarStateController
-
mod_log = logging.getLogger(__name__)
AXES_IN_HOMING_ORDER: Tuple[Axis, Axis, Axis, Axis, Axis, Axis, Axis, Axis, Axis] = (
@@ -196,7 +181,9 @@ class OT3API(
# of methods that are present in the protocol will call the (empty,
# do-nothing) methods in the protocol. This will happily make all the
# tests fail.
- HardwareControlInterface[OT3Transforms],
+ FlexHardwareControlInterface[
+ OT3Transforms, Union[top_types.Mount, OT3Mount], OT3Config
+ ],
):
"""This API is the primary interface to the hardware controller.
@@ -217,9 +204,10 @@ class OT3API(
def __init__(
self,
- backend: Union[OT3Simulator, OT3Controller],
+ backend: FlexBackend,
loop: asyncio.AbstractEventLoop,
config: OT3Config,
+ feature_flags: HardwareFeatureFlags,
) -> None:
"""Initialize an API instance.
@@ -235,7 +223,8 @@ def __init__(
def estop_cb(event: HardwareEvent) -> None:
self._update_estop_state(event)
- backend.estop_state_machine.add_listener(estop_cb)
+ self._feature_flags = feature_flags
+ backend.add_estop_callback(estop_cb)
self._callbacks: Set[HardwareEventHandler] = set()
# {'X': 0.0, 'Y': 0.0, 'Z': 0.0, 'A': 0.0, 'B': 0.0, 'C': 0.0}
@@ -251,21 +240,32 @@ def estop_cb(event: HardwareEvent) -> None:
self._motion_lock = asyncio.Lock()
self._door_state = DoorState.CLOSED
self._pause_manager = PauseManager()
- self._gantry_load = GantryLoad.LOW_THROUGHPUT
- self._move_manager = MoveManager(
- constraints=get_system_constraints(
- self._config.motion_settings, self._gantry_load
- )
- )
- self._status_bar_controller = StatusBarStateController(
- self._backend.status_bar_interface()
- )
-
self._pipette_handler = OT3PipetteHandler({m: None for m in OT3Mount})
self._gripper_handler = GripperHandler(gripper=None)
+ self._gantry_load = GantryLoad.LOW_THROUGHPUT
+ self._configured_since_update = True
OT3RobotCalibrationProvider.__init__(self, self._config)
ExecutionManagerProvider.__init__(self, isinstance(backend, OT3Simulator))
+ def is_idle_mount(self, mount: Union[top_types.Mount, OT3Mount]) -> bool:
+ """Only the gripper mount or the 96-channel pipette mount would be idle
+ (disengaged).
+
+ If gripper mount is NOT the last moved mount, it's idle.
+ If a 96-channel pipette is attached, the mount is idle if it's not
+ the last moved mount.
+ """
+ realmount = OT3Mount.from_mount(mount)
+ if realmount == OT3Mount.GRIPPER or (
+ realmount == OT3Mount.LEFT
+ and self._gantry_load == GantryLoad.HIGH_THROUGHPUT
+ ):
+ ax = Axis.by_mount(realmount)
+ if ax in self.engaged_axes.keys():
+ return not self.engaged_axes[ax]
+
+ return False
+
@property
def door_state(self) -> DoorState:
return self._door_state
@@ -281,42 +281,28 @@ def gantry_load(self) -> GantryLoad:
async def set_gantry_load(self, gantry_load: GantryLoad) -> None:
mod_log.info(f"Setting gantry load to {gantry_load}")
self._gantry_load = gantry_load
- self._move_manager.update_constraints(
- get_system_constraints(self._config.motion_settings, gantry_load)
- )
+ self._backend.update_constraints_for_gantry_load(gantry_load)
await self._backend.update_to_default_current_settings(gantry_load)
async def get_serial_number(self) -> Optional[str]:
return await self._backend.get_serial_number()
async def set_system_constraints_for_calibration(self) -> None:
- self._move_manager.update_constraints(
- get_system_constraints_for_calibration(
- self._config.motion_settings, self._gantry_load
- )
- )
- mod_log.debug(
- f"Set system constraints for calibration: {self._move_manager.get_constraints()}"
+ self._backend.update_constraints_for_calibration_with_gantry_load(
+ self._gantry_load
)
async def set_system_constraints_for_plunger_acceleration(
self, mount: OT3Mount, acceleration: float
) -> None:
- new_constraints = get_system_constraints_for_plunger_acceleration(
- self._config.motion_settings, self._gantry_load, mount, acceleration
+ self._backend.update_constraints_for_plunger_acceleration(
+ mount, acceleration, self._gantry_load
)
- self._move_manager.update_constraints(new_constraints)
@contextlib.asynccontextmanager
async def restore_system_constrants(self) -> AsyncIterator[None]:
- old_system_constraints = deepcopy(self._move_manager.get_constraints())
- try:
+ async with self._backend.restore_system_constraints():
yield
- finally:
- self._move_manager.update_constraints(old_system_constraints)
- mod_log.debug(
- f"Restore previous system constraints: {old_system_constraints}"
- )
def _update_door_state(self, door_state: DoorState) -> None:
mod_log.info(f"Updating the window switch status: {door_state}")
@@ -378,18 +364,33 @@ async def build_hardware_controller(
use_usb_bus: bool = False,
update_firmware: bool = True,
status_bar_enabled: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> "OT3API":
"""Build an ot3 hardware controller."""
checked_loop = use_or_initialize_loop(loop)
+ if feature_flags is None:
+ # If no feature flag set is defined, we will use the default values
+ feature_flags = HardwareFeatureFlags()
if not isinstance(config, OT3Config):
checked_config = robot_configs.load_ot3()
else:
checked_config = config
+ from .backends.ot3controller import OT3Controller
+
backend = await OT3Controller.build(
- checked_config, use_usb_bus, check_updates=update_firmware
+ checked_config,
+ use_usb_bus,
+ check_updates=update_firmware,
+ feature_flags=feature_flags,
+ )
+
+ api_instance = cls(
+ backend,
+ loop=checked_loop,
+ config=checked_config,
+ feature_flags=feature_flags,
)
- api_instance = cls(backend, loop=checked_loop, config=checked_config)
await api_instance.set_status_bar_enabled(status_bar_enabled)
module_controls = await AttachedModulesControl.build(
api_instance, board_revision=backend.board_revision
@@ -412,24 +413,28 @@ async def build_hardware_simulator(
Dict[OT3Mount, Dict[str, Optional[str]]],
Dict[top_types.Mount, Dict[str, Optional[str]]],
] = None,
- attached_modules: Optional[List[str]] = None,
+ attached_modules: Optional[Dict[str, List[str]]] = None,
config: Union[RobotConfig, OT3Config, None] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
strict_attached_instruments: bool = True,
+ feature_flags: Optional[HardwareFeatureFlags] = None,
) -> "OT3API":
"""Build a simulating hardware controller.
This method may be used both on a real robot and on dev machines.
Multiple simulating hardware controllers may be active at one time.
"""
+ if feature_flags is None:
+ feature_flags = HardwareFeatureFlags()
- checked_modules = attached_modules or []
+ checked_modules = attached_modules or {}
checked_loop = use_or_initialize_loop(loop)
if not isinstance(config, OT3Config):
checked_config = robot_configs.load_ot3()
else:
checked_config = config
+
backend = await OT3Simulator.build(
{OT3Mount.from_mount(k): v for k, v in attached_instruments.items()}
if attached_instruments
@@ -438,8 +443,14 @@ async def build_hardware_simulator(
checked_config,
checked_loop,
strict_attached_instruments,
+ feature_flags,
+ )
+ api_instance = cls(
+ backend,
+ loop=checked_loop,
+ config=checked_config,
+ feature_flags=feature_flags,
)
- api_instance = cls(backend, loop=checked_loop, config=checked_config)
await api_instance.cache_instruments()
module_controls = await AttachedModulesControl.build(
api_instance, board_revision=backend.board_revision
@@ -497,20 +508,29 @@ async def update_firmware(
) -> AsyncIterator[UpdateStatus]:
"""Start the firmware update for one or more subsystems and return update progress iterator."""
subsystems = subsystems or set()
+ if SubSystem.head in subsystems:
+ await self.disengage_axes([Axis.Z_L, Axis.Z_R])
+ if SubSystem.gripper in subsystems:
+ await self.disengage_axes([Axis.Z_G])
# start the updates and yield the progress
- try:
- async for update_status in self._backend.update_firmware(subsystems, force):
- yield update_status
- except SubsystemUpdating as e:
- raise UpdateOngoingError(e.msg) from e
- except EnumeratedError:
- raise
- except BaseException as e:
- mod_log.exception("Firmware update failed")
- raise FirmwareUpdateFailedError(
- message="Update failed because of uncaught error",
- wrapping=[PythonException(e)],
- ) from e
+ async with self._motion_lock:
+ try:
+ async for update_status in self._backend.update_firmware(
+ subsystems, force
+ ):
+ yield update_status
+ except SubsystemUpdating as e:
+ raise UpdateOngoingError(e.msg) from e
+ except EnumeratedError:
+ raise
+ except BaseException as e:
+ mod_log.exception("Firmware update failed")
+ raise FirmwareUpdateFailedError(
+ message="Update failed because of uncaught error",
+ wrapping=[PythonException(e)],
+ ) from e
+ finally:
+ self._configured_since_update = False
# Incidentals (i.e. not motion) API
@@ -537,13 +557,13 @@ async def identify(self, duration_s: int = 5) -> None:
await self.set_lights(button=True)
async def set_status_bar_state(self, state: StatusBarState) -> None:
- await self._status_bar_controller.set_status_bar_state(state)
+ await self._backend.set_status_bar_state(state)
async def set_status_bar_enabled(self, enabled: bool) -> None:
- await self._status_bar_controller.set_enabled(enabled)
+ await self._backend.set_status_bar_enabled(enabled)
def get_status_bar_state(self) -> StatusBarState:
- return self._status_bar_controller.get_current_state()
+ return self._backend.get_status_bar_state()
@ExecutionManagerProvider.wait_for_running
async def delay(self, duration_s: float) -> None:
@@ -600,6 +620,7 @@ async def cache_pipette(
req_instr,
pip_id,
pip_offset_cal,
+ self._feature_flags.use_old_aspiration_functions,
)
self._pipette_handler.hardware_instruments[mount] = p
# TODO (lc 12-5-2022) Properly support backwards compatibility
@@ -631,16 +652,21 @@ def get_all_attached_instr(self) -> Dict[OT3Mount, Optional[InstrumentDict]]:
# TODO (spp, 2023-01-31): add unit tests
async def cache_instruments(
- self, require: Optional[Dict[top_types.Mount, PipetteName]] = None
+ self,
+ require: Optional[Dict[top_types.Mount, PipetteName]] = None,
+ skip_if_would_block: bool = False,
) -> None:
"""
Scan the attached instruments, take necessary configuration actions,
and set up hardware controller internal state if necessary.
"""
- skip_configure = await self._cache_instruments(require)
- if not skip_configure:
- self._log.info("Instrument model cache updated, reconfiguring")
- await self._configure_instruments()
+ if skip_if_would_block and self._motion_lock.locked():
+ return
+ async with self._motion_lock:
+ skip_configure = await self._cache_instruments(require)
+ if not skip_configure or not self._configured_since_update:
+ self._log.info("Reconfiguring instrument cache")
+ await self._configure_instruments()
async def _cache_instruments( # noqa: C901
self, require: Optional[Dict[top_types.Mount, PipetteName]] = None
@@ -660,11 +686,11 @@ async def _cache_instruments( # noqa: C901
# We should also check version here once we're comfortable.
if not pipette_load_name.supported_pipette(name):
raise RuntimeError(f"{name} is not a valid pipette name")
- async with self._motion_lock:
- # we're not actually checking the required instrument except in the context
- # of simulation and it feels like a lot of work for this function
- # actually be doing.
- found = await self._backend.get_attached_instruments(checked_require)
+
+ # we're not actually checking the required instrument except in the context
+ # of simulation and it feels like a lot of work for this function
+ # actually be doing.
+ found = await self._backend.get_attached_instruments(checked_require)
if OT3Mount.GRIPPER in found.keys():
# Is now a gripper, ask if it's ok to skip
@@ -710,8 +736,9 @@ async def _cache_instruments( # noqa: C901
async def _configure_instruments(self) -> None:
"""Configure instruments"""
await self.set_gantry_load(self._gantry_load_from_instruments())
- await self.refresh_positions()
- await self.reset_tip_detectors()
+ await self.refresh_positions(acquire_lock=False)
+ await self.reset_tip_detectors(False)
+ self._configured_since_update = True
async def reset_tip_detectors(
self,
@@ -844,18 +871,26 @@ async def home_z(
axes = list(Axis.ot3_mount_axes())
await self.home(axes)
- async def home_gripper_jaw(self) -> None:
- """
- Home the jaw of the gripper.
- """
- try:
- gripper = self._gripper_handler.get_gripper()
- self._log.info("Homing gripper jaw.")
-
- dc = self._gripper_handler.get_duty_cycle_by_grip_force(
- gripper.default_home_force
+ async def _do_home_and_maybe_calibrate_gripper_jaw(self) -> None:
+ gripper = self._gripper_handler.get_gripper()
+ self._log.info("Homing gripper jaw.")
+ dc = self._gripper_handler.get_duty_cycle_by_grip_force(
+ gripper.default_home_force
+ )
+ await self._ungrip(duty_cycle=dc)
+ if not gripper.has_jaw_width_calibration:
+ self._log.info("Calibrating gripper jaw.")
+ await self._grip(
+ duty_cycle=dc, expected_displacement=gripper.max_jaw_displacement()
)
+ jaw_at_closed = (await self._cache_encoder_position())[Axis.G]
+ gripper.update_jaw_open_position_from_closed_position(jaw_at_closed)
await self._ungrip(duty_cycle=dc)
+
+ async def home_gripper_jaw(self) -> None:
+ """Home the jaw of the gripper."""
+ try:
+ await self._do_home_and_maybe_calibrate_gripper_jaw()
except GripperNotPresentError:
pass
@@ -879,33 +914,38 @@ async def home_gear_motors(self) -> None:
GantryLoad.HIGH_THROUGHPUT
][OT3AxisKind.Q]
+ max_distance = self._backend.axis_bounds[Axis.Q][1]
# if position is not known, move toward limit switch at a constant velocity
- if not any(self._backend.gear_motor_position):
+ if self._backend.gear_motor_position is None:
await self._backend.home_tip_motors(
- distance=self._backend.axis_bounds[Axis.Q][1],
+ distance=max_distance,
velocity=homing_velocity,
)
return
- current_pos_float = axis_convert(self._backend.gear_motor_position, 0.0)[
- Axis.P_L
- ]
+ current_pos_float = self._backend.gear_motor_position or 0.0
+
+ # We filter out a distance more than `max_distance` because, if the tip motor was stopped during
+ # a slow-home motion, the position may be stuck at an enormous large value.
+ if (
+ current_pos_float > self._config.safe_home_distance
+ and current_pos_float < max_distance
+ ):
- if current_pos_float > self._config.safe_home_distance:
- fast_home_moves = self._build_moves(
- {Axis.Q: current_pos_float}, {Axis.Q: self._config.safe_home_distance}
- )
# move toward home until a safe distance
- await self._backend.tip_action(moves=fast_home_moves[0])
+ await self._backend.tip_action(
+ origin={Axis.Q: current_pos_float},
+ targets=[({Axis.Q: self._config.safe_home_distance}, 400)],
+ )
# update current position
- current_pos_float = axis_convert(self._backend.gear_motor_position, 0.0)[
- Axis.P_L
- ]
+ current_pos_float = self._backend.gear_motor_position or 0.0
# move until the limit switch is triggered, with no acceleration
await self._backend.home_tip_motors(
- distance=(current_pos_float + self._config.safe_home_distance),
+ distance=min(
+ current_pos_float + self._config.safe_home_distance, max_distance
+ ),
velocity=homing_velocity,
)
@@ -953,9 +993,11 @@ async def current_position_ot3(
OT3Mount.from_mount(mount), self._current_position, critical_point
)
- async def refresh_positions(self) -> None:
+ async def refresh_positions(self, acquire_lock: bool = True) -> None:
"""Request and update both the motor and encoder positions from backend."""
- async with self._motion_lock:
+ async with contextlib.AsyncExitStack() as stack:
+ if acquire_lock:
+ await stack.enter_async_context(self._motion_lock)
await self._backend.update_motor_status()
await self._cache_current_position()
await self._cache_encoder_position()
@@ -1002,6 +1044,12 @@ def _assert_encoder_ok(self, axes: Sequence[Axis]) -> None:
detail={"axes": axes_str},
)
+ def motor_status_ok(self, axis: Axis) -> bool:
+ return self._backend.check_motor_status([axis])
+
+ def encoder_status_ok(self, axis: Axis) -> bool:
+ return self._backend.check_encoder_status([axis])
+
async def encoder_current_position(
self,
mount: Union[top_types.Mount, OT3Mount],
@@ -1068,9 +1116,7 @@ def _effector_pos_from_carriage_pos(
plunger_ax: carriage_position[plunger_ax],
}
if self._gantry_load == GantryLoad.HIGH_THROUGHPUT:
- effector_pos[Axis.Q] = axis_convert(self._backend.gear_motor_position, 0.0)[
- Axis.P_L
- ]
+ effector_pos[Axis.Q] = self._backend.gear_motor_position or 0.0
return effector_pos
@@ -1140,7 +1186,7 @@ async def move_to(
else:
checked_max = None
- await self._cache_and_maybe_retract_mount(realmount)
+ await self.prepare_for_mount_movement(realmount)
await self._move(
target_position,
speed=speed,
@@ -1251,7 +1297,8 @@ async def move_rel(
checked_max: Optional[OT3AxisMap[float]] = max_speeds
else:
checked_max = None
- await self._cache_and_maybe_retract_mount(realmount)
+
+ await self.prepare_for_mount_movement(realmount)
await self._move(
target_position,
speed=speed,
@@ -1261,19 +1308,53 @@ async def move_rel(
)
async def _cache_and_maybe_retract_mount(self, mount: OT3Mount) -> None:
- """Retract the 'other' mount if necessary
+ """Retract the 'other' mount if necessary.
If `mount` does not match the value in :py:attr:`_last_moved_mount`
(and :py:attr:`_last_moved_mount` exists) then retract the mount
in :py:attr:`_last_moved_mount`. Also unconditionally update
:py:attr:`_last_moved_mount` to contain `mount`.
+
+ Disengage the 96-channel and gripper mount if retracted. Re-engage
+ the 96-channel or gripper mount if it is about to move.
"""
- if mount != self._last_moved_mount and self._last_moved_mount:
- await self.retract(self._last_moved_mount, 10)
- if mount != OT3Mount.GRIPPER:
+ last_moved = self._last_moved_mount
+ # if gripper exists and it's not the moving mount, it should retract
+ if (
+ self.has_gripper()
+ and mount != OT3Mount.GRIPPER
+ and not self.is_idle_mount(OT3Mount.GRIPPER)
+ ):
+ await self.retract(OT3Mount.GRIPPER, 10)
+ await self.disengage_axes([Axis.Z_G])
await self.idle_gripper()
+
+ # if 96-channel pipette is attached and not being moved, it should retract
+ if (
+ mount != OT3Mount.LEFT
+ and self._gantry_load == GantryLoad.HIGH_THROUGHPUT
+ and not self.is_idle_mount(OT3Mount.LEFT)
+ ):
+ await self.retract(OT3Mount.LEFT, 10)
+ await self.disengage_axes([Axis.Z_L])
+
+ # if the last moved mount is not covered in neither of the above scenario,
+ # simply retract the last moved mount
+ if last_moved and not self.is_idle_mount(last_moved) and mount != last_moved:
+ await self.retract(last_moved, 10)
+
+ # finally, home the current left/gripper mount to prepare for movement
+ if self.is_idle_mount(mount):
+ await self.home_z(mount)
self._last_moved_mount = mount
+ async def prepare_for_mount_movement(
+ self, mount: Union[top_types.Mount, OT3Mount]
+ ) -> None:
+ """Retract the idle mount if necessary."""
+ realmount = OT3Mount.from_mount(mount)
+ await self._cache_and_maybe_retract_mount(realmount)
+
async def idle_gripper(self) -> None:
"""Move gripper to its idle, gripped position."""
try:
@@ -1286,20 +1367,35 @@ async def idle_gripper(self) -> None:
except GripperNotPresentError:
pass
- def _build_moves(
+ def raise_error_if_gripper_pickup_failed(
self,
- origin: Dict[Axis, float],
- target: Dict[Axis, float],
- speed: Optional[float] = None,
- ) -> List[List[Move[Axis]]]:
- """Build move with Move Manager with machine positions."""
- # TODO: (2022-02-10) Use actual max speed for MoveTarget
- checked_speed = speed or 400
- move_target = MoveTarget.build(position=target, max_speed=checked_speed)
- _, moves = self._move_manager.plan_motion(
- origin=origin, target_list=[move_target]
+ expected_grip_width: float,
+ grip_width_uncertainty_wider: float,
+ grip_width_uncertainty_narrower: float,
+ ) -> None:
+ """Ensure that a gripper pickup succeeded.
+
+ The labware width is the width of the labware at the point of the grip, as closely as it is known.
+ The uncertainty values should be specified to handle the case where the labware definition does not
+ provide that information.
+
+ Both values should be positive; their direcitonal sense is determined by which argument they are.
+ """
+ # check if the gripper is at an acceptable position after attempting to
+ # pick up labware
+ gripper = self._gripper_handler.get_gripper()
+ self._backend.check_gripper_position_within_bounds(
+ expected_grip_width,
+ grip_width_uncertainty_wider,
+ grip_width_uncertainty_narrower,
+ gripper.jaw_width,
+ gripper.max_allowed_grip_error,
+ gripper.max_jaw_width,
+ gripper.min_jaw_width,
)
- return moves
+
+ def gripper_jaw_can_home(self) -> bool:
+ return self._gripper_handler.is_ready_for_jaw_home()
@ExecutionManagerProvider.wait_for_running
async def _move(
@@ -1326,27 +1422,17 @@ async def _move(
if ax in Axis.gantry_axes()
}
check_motion_bounds(to_check, target_position, bounds, check_bounds)
-
+ self._log.info(f"Move: deck {target_position} becomes machine {machine_pos}")
origin = await self._backend.update_position()
- try:
- moves = self._build_moves(origin, machine_pos, speed)
- except ZeroLengthMoveError as zero_length_error:
- self._log.info(f"{str(zero_length_error)}, ignoring")
- return
- self._log.info(
- f"move: deck {target_position} becomes machine {machine_pos} from {origin} "
- f"requiring {moves}"
- )
async with contextlib.AsyncExitStack() as stack:
if acquire_lock:
await stack.enter_async_context(self._motion_lock)
try:
await self._backend.move(
origin,
- moves[0],
- MoveStopCondition.stall
- if expect_stalls
- else MoveStopCondition.none,
+ machine_pos,
+ speed or 400.0,
+ HWStopCondition.stall if expect_stalls else HWStopCondition.none,
)
except Exception:
self._log.exception("Move failed")
@@ -1373,9 +1459,6 @@ async def _set_plunger_current_and_home(
if encoder_ok and motor_ok:
if origin[axis] - target_pos[axis] > self._config.safe_home_distance:
target_pos[axis] += self._config.safe_home_distance
- moves = self._build_moves(
- origin, target_pos, instr.config.plunger_homing_configurations.speed
- )
async with self._backend.motor_current(
run_currents={
axis: instr.config.plunger_homing_configurations.current
@@ -1383,8 +1466,9 @@ async def _set_plunger_current_and_home(
):
await self._backend.move(
origin,
- moves[0],
- MoveStopCondition.none,
+ target_pos,
+ instr.config.plunger_homing_configurations.speed,
+ HWStopCondition.none,
)
await self._backend.home([axis], self.gantry_load)
else:
@@ -1401,6 +1485,22 @@ async def _retrieve_home_position(
target_pos = {axis: self._backend.home_position()[axis]}
return origin_pos, target_pos
+ async def _enable_before_update_estimation(self, axis: Axis) -> None:
+ enabled = await self._backend.is_motor_engaged(axis)
+
+ if not enabled:
+ if axis == Axis.Z_L and self.gantry_load == GantryLoad.HIGH_THROUGHPUT:
+ # we're here if the left mount has been idle and the brake is engaged
+ # we want to temporarily increase its hold current to prevent the z
+ # stage from dropping when switching off the ebrake
+ async with self._backend.increase_z_l_hold_current():
+ await self.engage_axes([axis])
+ else:
+ await self.engage_axes([axis])
+
+ # now that motor is enabled, we can update position estimation
+ await self._update_position_estimation([axis])
+
@_adjust_high_throughput_z_current
async def _home_axis(self, axis: Axis) -> None:
"""
@@ -1422,20 +1522,23 @@ async def _home_axis(self, axis: Axis) -> None:
assert axis not in [Axis.G, Axis.Q]
encoder_ok = self._backend.check_encoder_status([axis])
- motor_ok = self._backend.check_motor_status([axis])
-
if encoder_ok:
- # ensure stepper position can be updated after boot
- await self.engage_axes([axis])
- await self._update_position_estimation([axis])
- # refresh motor and encoder statuses after position estimation update
- motor_ok = self._backend.check_motor_status([axis])
- encoder_ok = self._backend.check_encoder_status([axis])
+ # enable motor (if needed) and update estimation
+ await self._enable_before_update_estimation(axis)
+
+ # refresh motor status after position estimation update
+ motor_ok = self._backend.check_motor_status([axis])
if Axis.to_kind(axis) == OT3AxisKind.P:
await self._set_plunger_current_and_home(axis, motor_ok, encoder_ok)
return
+ # TODO: (ba, 2024-04-19): We need to explictly engage the axis and enable
+ # the motor when we are attempting to move. This should be already
+ # happening but something on the firmware is either not enabling the motor or
+ # disabling the motor.
+ await self.engage_axes([axis])
+
# we can move to safe home distance!
if encoder_ok and motor_ok:
origin, target_pos = await self._retrieve_home_position(axis)
@@ -1449,19 +1552,12 @@ async def _home_axis(self, axis: Axis) -> None:
axis_home_dist = 20.0
if origin[axis] - target_pos[axis] > axis_home_dist:
target_pos[axis] += axis_home_dist
- moves = self._build_moves(origin, target_pos)
- try:
- await self._backend.move(
- origin,
- moves[0],
- MoveStopCondition.none,
- )
- except StallOrCollisionDetectedError:
- self._log.warning(
- f"Stall on {axis} during fast home, encoder may have missed an overflow"
- )
- await self.refresh_positions()
-
+ await self._backend.move(
+ origin,
+ target_pos,
+ speed=400,
+ stop_condition=HWStopCondition.none,
+ )
await self._backend.home([axis], self.gantry_load)
else:
# both stepper and encoder positions are invalid, must home
@@ -1469,25 +1565,21 @@ async def _home_axis(self, axis: Axis) -> None:
async def _home(self, axes: Sequence[Axis]) -> None:
"""Home one axis at a time."""
- async with self._motion_lock:
- for axis in axes:
- try:
- if axis == Axis.G:
- await self.home_gripper_jaw()
- elif axis == Axis.Q:
- await self._backend.home([axis], self.gantry_load)
- else:
- await self._home_axis(axis)
- except ZeroLengthMoveError:
- self._log.info(f"{axis} already at home position, skip homing")
- continue
- except BaseException as e:
- self._log.exception(f"Homing failed: {e}")
- self._current_position.clear()
- raise
+ for axis in axes:
+ try:
+ if axis == Axis.G:
+ await self.home_gripper_jaw()
+ elif axis == Axis.Q:
+ await self._backend.home([axis], self.gantry_load)
else:
- await self._cache_current_position()
- await self._cache_encoder_position()
+ await self._home_axis(axis)
+ except BaseException as e:
+ self._log.exception(f"Homing failed: {e}")
+ self._current_position.clear()
+ raise
+ else:
+ await self._cache_current_position()
+ await self._cache_encoder_position()
@ExecutionManagerProvider.wait_for_running
async def home(
@@ -1518,7 +1610,8 @@ async def home(
if (ax in checked_axes and self._backend.axis_is_present(ax))
]
self._log.info(f"home was called with {axes} generating sequence {home_seq}")
- await self._home(home_seq)
+ async with self._motion_lock:
+ await self._home(home_seq)
def get_engaged_axes(self) -> Dict[Axis, bool]:
"""Which axes are engaged and holding."""
@@ -1562,20 +1655,24 @@ async def retract_axis(self, axis: Axis) -> None:
motor_ok = self._backend.check_motor_status([axis])
encoder_ok = self._backend.check_encoder_status([axis])
- if motor_ok and encoder_ok:
- # we can move to the home position without checking the limit switch
- origin = await self._backend.update_position()
- target_pos = {axis: self._backend.home_position()[axis]}
- try:
- moves = self._build_moves(origin, target_pos)
- await self._backend.move(origin, moves[0], MoveStopCondition.none)
- except ZeroLengthMoveError:
- self._log.info(f"{axis} already at home position, skip retract")
- else:
- # home the axis
- await self._home_axis(axis)
- await self._cache_current_position()
- await self._cache_encoder_position()
+ async with self._motion_lock:
+ if motor_ok and encoder_ok:
+ # TODO: (ba, 2024-04-19): We need to explictly engage the axis and enable
+ # the motor when we are attempting to move. This should be already
+ # happening but something on the firmware is either not enabling the motor or
+ # disabling the motor.
+ await self.engage_axes([axis])
+
+ # we can move to the home position without checking the limit switch
+ origin = await self._backend.update_position()
+ target_pos = {axis: self._backend.home_position()[axis]}
+ await self._backend.move(origin, target_pos, 400, HWStopCondition.none)
+ else:
+ # home the axis
+ await self._home_axis(axis)
+
+ await self._cache_current_position()
+ await self._cache_encoder_position()
# Gantry/frame (i.e. not pipette) config API
@property
@@ -1613,12 +1710,25 @@ async def update_config(self, **kwargs: Any) -> None:
"""Update values of the robot's configuration."""
self._config = replace(self._config, **kwargs)
+ @property
+ def hardware_feature_flags(self) -> HardwareFeatureFlags:
+ return self._feature_flags
+
+ @hardware_feature_flags.setter
+ def hardware_feature_flags(self, feature_flags: HardwareFeatureFlags) -> None:
+ self._feature_flags = feature_flags
+ self._backend.update_feature_flags(self._feature_flags)
+
@ExecutionManagerProvider.wait_for_running
- async def _grip(self, duty_cycle: float, stay_engaged: bool = True) -> None:
+ async def _grip(
+ self, duty_cycle: float, expected_displacement: float, stay_engaged: bool = True
+ ) -> None:
"""Move the gripper jaw inward to close."""
try:
await self._backend.gripper_grip_jaw(
- duty_cycle=duty_cycle, stay_engaged=stay_engaged
+ duty_cycle=duty_cycle,
+ expected_displacement=self._gripper_handler.get_gripper().max_jaw_displacement(),
+ stay_engaged=stay_engaged,
)
await self._cache_encoder_position()
self._gripper_handler.set_jaw_state(await self._backend.get_jaw_state())
@@ -1662,7 +1772,11 @@ async def grip(
dc = self._gripper_handler.get_duty_cycle_by_grip_force(
force_newtons or self._gripper_handler.get_gripper().default_grip_force
)
- await self._grip(duty_cycle=dc, stay_engaged=stay_engaged)
+ await self._grip(
+ duty_cycle=dc,
+ expected_displacement=self._gripper_handler.get_gripper().max_jaw_displacement(),
+ stay_engaged=stay_engaged,
+ )
async def ungrip(self, force_newtons: Optional[float] = None) -> None:
"""
@@ -1840,7 +1954,6 @@ async def dispense(
mount: Union[top_types.Mount, OT3Mount],
volume: Optional[float] = None,
rate: float = 1.0,
- # TODO (tz, 8-24-24): add implementation https://opentrons.atlassian.net/browse/RET-1373
push_out: Optional[float] = None,
) -> None:
"""
@@ -1943,16 +2056,14 @@ async def _high_throughput_check_tip(self) -> AsyncIterator[None]:
if self._backend.gear_motor_position is None:
await self.home_gear_motors()
- tip_motor_pos_float = axis_convert(self._backend.gear_motor_position, 0.0)[
- Axis.of_main_tool_actuator(OT3Mount.LEFT)
- ]
+ tip_motor_pos_float = self._backend.gear_motor_position or 0.0
# only move tip motors if they are not already below the sensor
if tip_motor_pos_float < tip_presence_check_target:
- clamp_moves = self._build_moves(
- {Axis.Q: tip_motor_pos_float}, {Axis.Q: tip_presence_check_target}
+ await self._backend.tip_action(
+ origin={Axis.Q: tip_motor_pos_float},
+ targets=[({Axis.Q: tip_presence_check_target}, 400)],
)
- await self._backend.tip_action(moves=clamp_moves[0])
try:
yield
finally:
@@ -1960,25 +2071,34 @@ async def _high_throughput_check_tip(self) -> AsyncIterator[None]:
async def get_tip_presence_status(
self,
- mount: OT3Mount,
+ mount: Union[top_types.Mount, OT3Mount],
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
) -> TipStateType:
"""
Check tip presence status. If a high throughput pipette is present,
move the tip motors down before checking the sensor status.
"""
- async with contextlib.AsyncExitStack() as stack:
- if (
- mount == OT3Mount.LEFT
- and self._gantry_load == GantryLoad.HIGH_THROUGHPUT
- ):
- await stack.enter_async_context(self._high_throughput_check_tip())
- result = await self._backend.get_tip_status(mount)
- return result
+ async with self._motion_lock:
+ real_mount = OT3Mount.from_mount(mount)
+ async with contextlib.AsyncExitStack() as stack:
+ if (
+ real_mount == OT3Mount.LEFT
+ and self._gantry_load == GantryLoad.HIGH_THROUGHPUT
+ ):
+ await stack.enter_async_context(self._high_throughput_check_tip())
+ result = await self._backend.get_tip_status(
+ real_mount, follow_singular_sensor
+ )
+ return result
async def verify_tip_presence(
- self, mount: OT3Mount, expected: TipStateType
+ self,
+ mount: Union[top_types.Mount, OT3Mount],
+ expected: TipStateType,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
) -> None:
- status = await self.get_tip_presence_status(mount)
+ real_mount = OT3Mount.from_mount(mount)
+ status = await self.get_tip_presence_status(real_mount, follow_singular_sensor)
if status != expected:
raise FailedTipStateCheck(expected, status.value)
@@ -1987,14 +2107,17 @@ async def _force_pick_up_tip(
) -> None:
for press in pipette_spec.tip_action_moves:
async with self._backend.motor_current(run_currents=press.currents):
- target_down = target_position_from_relative(
+ target = target_position_from_relative(
mount, top_types.Point(z=press.distance), self._current_position
)
- await self._move(target_down, speed=press.speed, expect_stalls=True)
- if press.distance < 0:
- # we expect a stall has happened during a downward movement into the tiprack, so
- # we want to update the motor estimation
- await self._update_position_estimation([Axis.by_mount(mount)])
+ if press.distance < 0:
+ # we expect a stall has happened during a downward movement into the tiprack, so
+ # we want to update the motor estimation
+ await self._move(target, speed=press.speed, expect_stalls=True)
+ await self._update_position_estimation([Axis.by_mount(mount)])
+ else:
+ # we should not ignore stalls that happen during the retract part of the routine
+ await self._move(target, speed=press.speed, expect_stalls=False)
async def _tip_motor_action(
self, mount: OT3Mount, pipette_spec: List[TipActionMoveSpec]
@@ -2004,27 +2127,18 @@ async def _tip_motor_action(
currents = pipette_spec[0].currents
# Move to pickup position
async with self._backend.motor_current(run_currents=currents):
- if not any(self._backend.gear_motor_position):
+ if self._backend.gear_motor_position is None:
# home gear motor if position not known
await self.home_gear_motors()
- pipette_axis = Axis.of_main_tool_actuator(mount)
- gear_origin_float = axis_convert(self._backend.gear_motor_position, 0.0)[
- pipette_axis
- ]
+ gear_origin_float = self._backend.gear_motor_position or 0.0
move_targets = [
- MoveTarget.build(
- position={Axis.Q: move_segment.distance},
- max_speed=move_segment.speed or 400,
- )
+ ({Axis.Q: move_segment.distance}, move_segment.speed or 400)
for move_segment in pipette_spec
]
-
- _, moves = self._move_manager.plan_motion(
- origin={Axis.Q: gear_origin_float}, target_list=move_targets
+ await self._backend.tip_action(
+ origin={Axis.Q: gear_origin_float}, targets=move_targets
)
- await self._backend.tip_action(moves=moves[0])
-
await self.home_gear_motors()
async def pick_up_tip(
@@ -2042,6 +2156,8 @@ async def pick_up_tip(
def add_tip_to_instr() -> None:
instrument.add_tip(tip_length=tip_length)
instrument.set_current_volume(0)
+ if isinstance(self._backend, OT3Simulator):
+ self._backend._update_tip_state(realmount, True)
await self._move_to_plunger_bottom(realmount, rate=1.0)
if (
@@ -2049,7 +2165,9 @@ def add_tip_to_instr() -> None:
and instrument.nozzle_manager.current_configuration.configuration
== NozzleConfigurationType.FULL
):
- spec = self._pipette_handler.plan_ht_pick_up_tip()
+ spec = self._pipette_handler.plan_ht_pick_up_tip(
+ instrument.nozzle_manager.current_configuration.tip_count
+ )
if spec.z_distance_to_tiprack:
await self.move_rel(
realmount, top_types.Point(z=spec.z_distance_to_tiprack)
@@ -2057,7 +2175,10 @@ def add_tip_to_instr() -> None:
await self._tip_motor_action(realmount, spec.tip_action_moves)
else:
spec = self._pipette_handler.plan_lt_pick_up_tip(
- realmount, presses, increment
+ realmount,
+ instrument.nozzle_manager.current_configuration.tip_count,
+ presses,
+ increment,
)
await self._force_pick_up_tip(realmount, spec)
@@ -2137,6 +2258,9 @@ def _remove_tips() -> None:
await self._home([Axis.by_mount(mount)])
_remove_tips()
+ # call this in case we're simulating
+ if isinstance(self._backend, OT3Simulator):
+ self._backend._update_tip_state(realmount, False)
async def clean_up(self) -> None:
"""Get the API ready to stop cleanly."""
@@ -2214,7 +2338,7 @@ def reset_instrument(
self._pipette_handler.reset_instrument(checked_mount)
def get_instrument_offset(
- self, mount: OT3Mount
+ self, mount: Union[top_types.Mount, OT3Mount]
) -> Union[GripperCalibrationOffset, PipetteOffsetSummary, None]:
"""Get instrument calibration data."""
# TODO (spp, 2023-04-19): We haven't introduced a 'calibration_offset' key in
@@ -2223,11 +2347,13 @@ def get_instrument_offset(
# to be a part of the dict, this getter can be updated to fetch pipette offset
# from the dict, or just remove this getter entirely.
- if mount == OT3Mount.GRIPPER:
+ ot3_mount = OT3Mount.from_mount(mount)
+
+ if ot3_mount == OT3Mount.GRIPPER:
gripper_dict = self._gripper_handler.get_gripper_dict()
return gripper_dict["calibration_offset"] if gripper_dict else None
else:
- return self._pipette_handler.get_instrument_offset(mount=mount)
+ return self._pipette_handler.get_instrument_offset(mount=ot3_mount)
async def reset_instrument_offset(
self, mount: Union[top_types.Mount, OT3Mount], to_default: bool = True
@@ -2366,7 +2492,7 @@ def get_instrument_max_height(
OT3Mount.from_mount(mount), carriage_pos, critical_point
)
- return pos_at_home[Axis.by_mount(mount)] - self._config.z_retract_distance
+ return pos_at_home[Axis.by_mount(mount)]
async def update_nozzle_configuration_for_mount(
self,
@@ -2474,7 +2600,8 @@ async def liquid_probe(
probe_settings.mount_speed,
(probe_settings.plunger_speed * plunger_direction),
probe_settings.sensor_threshold_pascals,
- probe_settings.log_pressure,
+ probe_settings.output_option,
+ probe_settings.data_files,
probe_settings.auto_zero_sensor,
probe_settings.num_baseline_reads,
probe=probe if probe else InstrumentProbeType.PRIMARY,
@@ -2492,29 +2619,6 @@ async def capacitive_probe(
retract_after: bool = True,
probe: Optional[InstrumentProbeType] = None,
) -> Tuple[float, bool]:
- """Determine the position of something using the capacitive sensor.
-
- This function orchestrates detecting the position of a collision between the
- capacitive probe on the tool on the specified mount, and some fixed element
- of the robot.
-
- When calling this function, the mount's probe critical point should already
- be aligned in the probe axis with the item to be probed.
-
- It will move the mount's probe critical point to a small distance behind
- the expected position of the element (which is target_pos, in deck coordinates,
- in the axis to be probed) while running the tool's capacitive sensor. When the
- sensor senses contact, the mount stops.
-
- This function moves away and returns the sensed position.
-
- This sensed position can be used in several ways, including
- - To get an absolute position in deck coordinates of whatever was
- targeted, if something was guaranteed to be physically present.
- - To detect whether a collision occured at all. If this function
- returns a value far enough past the anticipated position, then it indicates
- there was no material there.
- """
if moving_axis not in [
Axis.X,
Axis.Y,
@@ -2619,22 +2723,32 @@ def attached_subsystems(self) -> Dict[SubSystem, SubSystemState]:
@property
def estop_status(self) -> EstopOverallStatus:
- return EstopOverallStatus(
- state=self._backend.estop_state_machine.state,
- left_physical_state=self._backend.estop_state_machine.get_physical_status(
- EstopAttachLocation.LEFT
- ),
- right_physical_state=self._backend.estop_state_machine.get_physical_status(
- EstopAttachLocation.RIGHT
- ),
- )
+ return self._backend.estop_status
def estop_acknowledge_and_clear(self) -> EstopOverallStatus:
"""Attempt to acknowledge an Estop event and clear the status.
Returns the estop status after clearing the status."""
- self._backend.estop_state_machine.acknowledge_and_clear()
+ self._backend.estop_acknowledge_and_clear()
return self.estop_status
def get_estop_state(self) -> EstopState:
- return self._backend.estop_state_machine.state
+ return self._backend.get_estop_state()
+
+ async def set_hepa_fan_state(
+ self, turn_on: bool = False, duty_cycle: int = 75
+ ) -> bool:
+ """Sets the state and duty cycle of the Hepa/UV module."""
+ return await self._backend.set_hepa_fan_state(turn_on, duty_cycle)
+
+ async def get_hepa_fan_state(self) -> Optional[HepaFanState]:
+ return await self._backend.get_hepa_fan_state()
+
+ async def set_hepa_uv_state(
+ self, turn_on: bool = False, uv_duration_s: int = 900
+ ) -> bool:
+ """Sets the state and duration (seconds) of the UV light for the Hepa/UV module."""
+ return await self._backend.set_hepa_uv_state(turn_on, uv_duration_s)
+
+ async def get_hepa_uv_state(self) -> Optional[HepaUVState]:
+ return await self._backend.get_hepa_uv_state()
diff --git a/api/src/opentrons/hardware_control/poller.py b/api/src/opentrons/hardware_control/poller.py
index 76dd40157a1..27224bebaef 100644
--- a/api/src/opentrons/hardware_control/poller.py
+++ b/api/src/opentrons/hardware_control/poller.py
@@ -3,6 +3,7 @@
import logging
from abc import ABC, abstractmethod
from typing import AsyncGenerator, List, Optional
+from opentrons_shared_data.errors.exceptions import ModuleCommunicationError
log = logging.getLogger(__name__)
@@ -48,6 +49,8 @@ async def stop(self) -> None:
async with self._use_read_lock():
task.cancel()
await asyncio.gather(task, return_exceptions=True)
+ for waiter in self._poll_waiters:
+ waiter.cancel(msg="Module was removed")
async def wait_next_poll(self) -> None:
"""Wait for the next poll to complete.
@@ -56,6 +59,9 @@ async def wait_next_poll(self) -> None:
the next complete read. If a read raises an exception,
it will be passed through to `wait_next_poll`.
"""
+ if not self._poll_forever_task or self._poll_forever_task.done():
+ raise ModuleCommunicationError(message="Module was removed")
+
poll_future = asyncio.get_running_loop().create_future()
self._poll_waiters.append(poll_future)
await poll_future
diff --git a/api/src/opentrons/hardware_control/protocols/__init__.py b/api/src/opentrons/hardware_control/protocols/__init__.py
index d4250a5d589..e47b54dba2c 100644
--- a/api/src/opentrons/hardware_control/protocols/__init__.py
+++ b/api/src/opentrons/hardware_control/protocols/__init__.py
@@ -1,12 +1,14 @@
"""Typing protocols describing a hardware controller."""
-from typing_extensions import Protocol
+from typing_extensions import Protocol, Type
+
+from opentrons.hardware_control.types import Axis
from .module_provider import ModuleProvider
from .hardware_manager import HardwareManager
from .chassis_accessory_manager import ChassisAccessoryManager
from .event_sourcer import EventSourcer
from .liquid_handler import LiquidHandler
-from .calibratable import Calibratable, CalibrationType
+from .calibratable import Calibratable
from .configurable import Configurable
from .motion_controller import MotionController
from .instrument_configurer import InstrumentConfigurer
@@ -14,18 +16,31 @@
from .asyncio_configurable import AsyncioConfigurable
from .stoppable import Stoppable
from .simulatable import Simulatable
+from .identifiable import Identifiable
+from .gripper_controller import GripperController
+from .flex_calibratable import FlexCalibratable
+from .flex_instrument_configurer import FlexInstrumentConfigurer
+
+from .types import (
+ CalibrationType,
+ MountArgType,
+ ConfigType,
+ OT2RobotType,
+ FlexRobotType,
+)
class HardwareControlInterface(
ModuleProvider,
ExecutionControllable,
- LiquidHandler[CalibrationType],
+ LiquidHandler[CalibrationType, MountArgType, ConfigType],
ChassisAccessoryManager,
HardwareManager,
AsyncioConfigurable,
Stoppable,
Simulatable,
- Protocol[CalibrationType],
+ Identifiable[Type[OT2RobotType]],
+ Protocol[CalibrationType, MountArgType, ConfigType],
):
"""A mypy protocol for a hardware controller.
@@ -41,11 +56,44 @@ class HardwareControlInterface(
however, they can satisfy protocols.
"""
- ...
+ def get_robot_type(self) -> Type[OT2RobotType]:
+ return OT2RobotType
+
+
+class FlexHardwareControlInterface(
+ ModuleProvider,
+ ExecutionControllable,
+ LiquidHandler[CalibrationType, MountArgType, ConfigType],
+ ChassisAccessoryManager,
+ HardwareManager,
+ AsyncioConfigurable,
+ Stoppable,
+ Simulatable,
+ GripperController,
+ FlexCalibratable,
+ FlexInstrumentConfigurer[MountArgType],
+ Identifiable[Type[FlexRobotType]],
+ Protocol[CalibrationType, MountArgType, ConfigType],
+):
+ """A mypy protocol for a hardware controller with Flex-specific extensions.
+
+ The interface for the Flex controller is mostly in-line with the OT-2 interface,
+ with some additional functionality and parameterization not supported on the OT-2.
+ """
+
+ def get_robot_type(self) -> Type[FlexRobotType]:
+ return FlexRobotType
+
+ def motor_status_ok(self, axis: Axis) -> bool:
+ ...
+
+ def encoder_status_ok(self, axis: Axis) -> bool:
+ ...
__all__ = [
"HardwareControlAPI",
+ "FlexHardwareControlInterface",
"Simulatable",
"Stoppable",
"AsyncioConfigurable",
@@ -59,4 +107,6 @@ class HardwareControlInterface(
"ChassisAccessoryManager",
"HardwareManager",
"ModuleProvider",
+ "Identifiable",
+ "FlexCalibratable",
]
diff --git a/api/src/opentrons/hardware_control/protocols/calibratable.py b/api/src/opentrons/hardware_control/protocols/calibratable.py
index 8c8bb65be42..530765d8249 100644
--- a/api/src/opentrons/hardware_control/protocols/calibratable.py
+++ b/api/src/opentrons/hardware_control/protocols/calibratable.py
@@ -1,10 +1,8 @@
from typing_extensions import Protocol
-from typing import TypeVar
+from .types import CalibrationType
from ..util import DeckTransformState
-CalibrationType = TypeVar("CalibrationType")
-
class Calibratable(Protocol[CalibrationType]):
"""Protocol specifying calibration information"""
diff --git a/api/src/opentrons/hardware_control/protocols/configurable.py b/api/src/opentrons/hardware_control/protocols/configurable.py
index 5bf7486620f..8e880d524ad 100644
--- a/api/src/opentrons/hardware_control/protocols/configurable.py
+++ b/api/src/opentrons/hardware_control/protocols/configurable.py
@@ -1,29 +1,39 @@
-from typing import Union, Dict, Any
+from typing import Dict, Any
from typing_extensions import Protocol
-from opentrons.config.types import RobotConfig, OT3Config
+from .types import ConfigType
+from opentrons.hardware_control.types import HardwareFeatureFlags
-class Configurable(Protocol):
+class Configurable(Protocol[ConfigType]):
"""Protocol specifying hardware control configuration."""
- def get_config(self) -> Union[RobotConfig, OT3Config]:
+ def get_config(self) -> ConfigType:
"""Get the robot's configuration object.
:returns .RobotConfig: The object.
"""
...
- def set_config(self, config: Union[RobotConfig, OT3Config]) -> None:
+ def set_config(self, config: ConfigType) -> None:
"""Replace the currently-loaded config"""
...
@property
- def config(self) -> Union[RobotConfig, OT3Config]:
+ def hardware_feature_flags(self) -> HardwareFeatureFlags:
+ ...
+
+ @hardware_feature_flags.setter
+ def hardware_feature_flags(self, feature_flags: HardwareFeatureFlags) -> None:
+ """Replace the currently-configured hardware feature flags."""
+ ...
+
+ @property
+ def config(self) -> ConfigType:
...
@config.setter
- def config(self, config: Union[RobotConfig, OT3Config]) -> None:
+ def config(self, config: ConfigType) -> None:
...
async def update_config(self, **kwargs: Dict[str, Any]) -> None:
diff --git a/api/src/opentrons/hardware_control/protocols/flex_calibratable.py b/api/src/opentrons/hardware_control/protocols/flex_calibratable.py
new file mode 100644
index 00000000000..d424f3bc654
--- /dev/null
+++ b/api/src/opentrons/hardware_control/protocols/flex_calibratable.py
@@ -0,0 +1,99 @@
+from typing import Optional, Tuple, List, AsyncIterator, Union
+import contextlib
+from typing_extensions import Protocol
+
+from opentrons import types as top_types
+from opentrons.config.types import (
+ CapacitivePassSettings,
+)
+from opentrons.hardware_control.types import (
+ Axis,
+ OT3Mount,
+ InstrumentProbeType,
+ GripperProbe,
+)
+from opentrons.hardware_control.instruments.ot3.instrument_calibration import (
+ GripperCalibrationOffset,
+ PipetteOffsetSummary,
+)
+from opentrons.hardware_control.modules.module_calibration import (
+ ModuleCalibrationOffset,
+)
+
+
+class FlexCalibratable(Protocol):
+ """Calibration extensions for Flex hardware."""
+
+ async def capacitive_probe(
+ self,
+ mount: OT3Mount,
+ moving_axis: Axis,
+ target_pos: float,
+ pass_settings: CapacitivePassSettings,
+ retract_after: bool = True,
+ probe: Optional[InstrumentProbeType] = None,
+ ) -> Tuple[float, bool]:
+ """Determine the position of something using the capacitive sensor.
+
+ This function orchestrates detecting the position of a collision between the
+ capacitive probe on the tool on the specified mount, and some fixed element
+ of the robot.
+
+ When calling this function, the mount's probe critical point should already
+ be aligned in the probe axis with the item to be probed.
+
+ It will move the mount's probe critical point to a small distance behind
+ the expected position of the element (which is target_pos, in deck coordinates,
+ in the axis to be probed) while running the tool's capacitive sensor. When the
+ sensor senses contact, the mount stops.
+
+ This function moves away and returns the sensed position.
+
+ This sensed position can be used in several ways, including
+ - To get an absolute position in deck coordinates of whatever was
+ targeted, if something was guaranteed to be physically present.
+ - To detect whether a collision occured at all. If this function
+ returns a value far enough past the anticipated position, then it indicates
+ there was no material there.
+ """
+ ...
+
+ async def capacitive_sweep(
+ self,
+ mount: OT3Mount,
+ moving_axis: Axis,
+ begin: top_types.Point,
+ end: top_types.Point,
+ speed_mm_s: float,
+ ) -> List[float]:
+ ...
+
+ # Note that there is a default implementation of this function to allow for
+ # the asynccontextmanager decorator to propagate properly.
+ @contextlib.asynccontextmanager
+ async def restore_system_constrants(self) -> AsyncIterator[None]:
+ yield
+
+ async def set_system_constraints_for_calibration(self) -> None:
+ ...
+
+ async def reset_instrument_offset(
+ self, mount: Union[top_types.Mount, OT3Mount], to_default: bool = True
+ ) -> None:
+ ...
+
+ def add_gripper_probe(self, probe: GripperProbe) -> None:
+ ...
+
+ def remove_gripper_probe(self) -> None:
+ ...
+
+ async def save_instrument_offset(
+ self, mount: Union[top_types.Mount, OT3Mount], delta: top_types.Point
+ ) -> Union[GripperCalibrationOffset, PipetteOffsetSummary]:
+ ...
+
+ async def save_module_offset(
+ self, module_id: str, mount: OT3Mount, slot: str, offset: top_types.Point
+ ) -> Optional[ModuleCalibrationOffset]:
+ ...
diff --git a/api/src/opentrons/hardware_control/protocols/flex_instrument_configurer.py b/api/src/opentrons/hardware_control/protocols/flex_instrument_configurer.py
new file mode 100644
index 00000000000..9b156f0dffa
--- /dev/null
+++ b/api/src/opentrons/hardware_control/protocols/flex_instrument_configurer.py
@@ -0,0 +1,52 @@
+"""Flex-specific extensions to instrument configuration."""
+from typing import Union, Optional
+from typing_extensions import Protocol
+
+from .types import MountArgType
+
+from opentrons.hardware_control.dev_types import (
+ PipetteStateDict,
+)
+from opentrons.hardware_control.types import (
+ TipStateType,
+ InstrumentProbeType,
+)
+from opentrons.hardware_control.instruments.ot3.instrument_calibration import (
+ PipetteOffsetSummary,
+ GripperCalibrationOffset,
+)
+
+
+class FlexInstrumentConfigurer(Protocol[MountArgType]):
+ """A protocol specifying Flex-specific extensions to instrument configuration."""
+
+ async def get_instrument_state(
+ self,
+ mount: MountArgType,
+ ) -> PipetteStateDict:
+ ...
+
+ def get_instrument_offset(
+ self, mount: MountArgType
+ ) -> Union[GripperCalibrationOffset, PipetteOffsetSummary, None]:
+ ...
+
+ async def get_tip_presence_status(
+ self,
+ mount: MountArgType,
+ ) -> TipStateType:
+ """Check tip presence status.
+
+ If a high throughput pipette is present,
+ move the tip motors down before checking the sensor status.
+ """
+ ...
+
+ async def verify_tip_presence(
+ self,
+ mount: MountArgType,
+ expected: TipStateType,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> None:
+ """Check tip presence status and raise if it does not match `expected`."""
+ ...
diff --git a/api/src/opentrons/hardware_control/protocols/gripper_controller.py b/api/src/opentrons/hardware_control/protocols/gripper_controller.py
new file mode 100644
index 00000000000..fc81325193c
--- /dev/null
+++ b/api/src/opentrons/hardware_control/protocols/gripper_controller.py
@@ -0,0 +1,52 @@
+"""Protocol specifying API gripper control."""
+from typing import Optional
+from typing_extensions import Protocol
+
+from opentrons.hardware_control.dev_types import GripperDict
+from opentrons.hardware_control.instruments.ot3.gripper import Gripper
+
+
+class GripperController(Protocol):
+ """A protocol specifying gripper API functions."""
+
+ async def grip(
+ self, force_newtons: Optional[float] = None, stay_engaged: bool = True
+ ) -> None:
+ ...
+
+ async def ungrip(self, force_newtons: Optional[float] = None) -> None:
+ """Release gripped object.
+
+ To simply open the jaw, use `home_gripper_jaw` instead.
+ """
+ ...
+
+ async def idle_gripper(self) -> None:
+ """Move gripper to its idle, gripped position."""
+ ...
+
+ def gripper_jaw_can_home(self) -> bool:
+ """Check if it is valid to home the gripper jaw.
+
+ This should return False if the API believes that the gripper is
+ currently holding something.
+ """
+ ...
+
+ def raise_error_if_gripper_pickup_failed(
+ self,
+ expected_grip_width: float,
+ grip_width_uncertainty_wider: float,
+ grip_width_uncertainty_narrower: float,
+ ) -> None:
+ """Ensure that a gripper pickup succeeded."""
+
+ @property
+ def attached_gripper(self) -> Optional[GripperDict]:
+ """Get a dict of all attached grippers."""
+ ...
+
+ @property
+ def hardware_gripper(self) -> Optional[Gripper]:
+ """Get attached gripper, if present."""
+ ...
diff --git a/api/src/opentrons/hardware_control/protocols/identifiable.py b/api/src/opentrons/hardware_control/protocols/identifiable.py
new file mode 100644
index 00000000000..4e964f5633f
--- /dev/null
+++ b/api/src/opentrons/hardware_control/protocols/identifiable.py
@@ -0,0 +1,16 @@
+from typing_extensions import Protocol
+
+from .types import ProtocolRobotType
+
+
+class Identifiable(Protocol[ProtocolRobotType]):
+ """Protocol specifying support for hardware identification."""
+
+ def get_robot_type(self) -> ProtocolRobotType:
+ """Return the enumerated robot type that this API controls.
+
+ When a caller needs to determine whether an API function is expected
+ to be present on a hardware_control instance, it is preferable to check
+ with this function rather than check the exact type via `isinstance`.
+ """
+ ...
diff --git a/api/src/opentrons/hardware_control/protocols/instrument_configurer.py b/api/src/opentrons/hardware_control/protocols/instrument_configurer.py
index 820757e5e6b..ab5b37acc99 100644
--- a/api/src/opentrons/hardware_control/protocols/instrument_configurer.py
+++ b/api/src/opentrons/hardware_control/protocols/instrument_configurer.py
@@ -3,6 +3,7 @@
from opentrons_shared_data.pipette.dev_types import PipetteName
from opentrons.types import Mount
+from .types import MountArgType
# TODO (lc 12-05-2022) This protocol has deviated from the OT3 api. We
# need to figure out how to combine them again in follow-up refactors.
@@ -11,10 +12,10 @@
from ..types import CriticalPoint
-class InstrumentConfigurer(Protocol):
+class InstrumentConfigurer(Protocol[MountArgType]):
"""A protocol specifying how to interact with instrument presence and detection."""
- def reset_instrument(self, mount: Optional[Mount] = None) -> None:
+ def reset_instrument(self, mount: Optional[MountArgType] = None) -> None:
"""
Reset the internal state of a pipette by its mount, without doing
any lower level reconfiguration. This is useful to make sure that no
@@ -27,6 +28,7 @@ def reset_instrument(self, mount: Optional[Mount] = None) -> None:
async def cache_instruments(
self,
require: Optional[Dict[Mount, PipetteName]] = None,
+ skip_if_would_block: bool = False,
) -> None:
"""
Scan the attached instruments, take necessary configuration actions,
@@ -66,7 +68,7 @@ def get_attached_instruments(self) -> Dict[Mount, PipetteDict]:
"""
...
- def get_attached_instrument(self, mount: Mount) -> PipetteDict:
+ def get_attached_instrument(self, mount: MountArgType) -> PipetteDict:
"""Get the status dict of a single cached instrument.
Return values and caveats are as get_attached_instruments.
@@ -91,7 +93,7 @@ def attached_pipettes(self) -> Dict[Mount, PipetteDict]:
def calibrate_plunger(
self,
- mount: Mount,
+ mount: MountArgType,
top: Optional[float] = None,
bottom: Optional[float] = None,
blow_out: Optional[float] = None,
@@ -112,7 +114,7 @@ def calibrate_plunger(
def set_flow_rate(
self,
- mount: Mount,
+ mount: MountArgType,
aspirate: Optional[float] = None,
dispense: Optional[float] = None,
blow_out: Optional[float] = None,
@@ -122,7 +124,7 @@ def set_flow_rate(
def set_pipette_speed(
self,
- mount: Mount,
+ mount: MountArgType,
aspirate: Optional[float] = None,
dispense: Optional[float] = None,
blow_out: Optional[float] = None,
@@ -132,7 +134,7 @@ def set_pipette_speed(
def get_instrument_max_height(
self,
- mount: Mount,
+ mount: MountArgType,
critical_point: Optional[CriticalPoint] = None,
) -> float:
"""Return max achievable height of the attached instrument
@@ -140,7 +142,7 @@ def get_instrument_max_height(
"""
...
- async def add_tip(self, mount: Mount, tip_length: float) -> None:
+ async def add_tip(self, mount: MountArgType, tip_length: float) -> None:
"""Inform the hardware that a tip is now attached to a pipette.
This changes the critical point of the pipette to make sure that
@@ -148,7 +150,7 @@ async def add_tip(self, mount: Mount, tip_length: float) -> None:
"""
...
- async def remove_tip(self, mount: Mount) -> None:
+ async def remove_tip(self, mount: MountArgType) -> None:
"""Inform the hardware that a tip is no longer attached to a pipette.
This changes the critical point of the system to the end of the
@@ -157,7 +159,7 @@ async def remove_tip(self, mount: Mount) -> None:
...
def set_current_tiprack_diameter(
- self, mount: Mount, tiprack_diameter: float
+ self, mount: MountArgType, tiprack_diameter: float
) -> None:
"""Inform the hardware of the diameter of the tiprack.
@@ -166,7 +168,7 @@ def set_current_tiprack_diameter(
"""
...
- def set_working_volume(self, mount: Mount, tip_volume: float) -> None:
+ def set_working_volume(self, mount: MountArgType, tip_volume: float) -> None:
"""Inform the hardware how much volume a pipette can aspirate.
This will set the limit of aspiration for the pipette, and is
@@ -181,3 +183,12 @@ def hardware_instruments(self) -> Dict[Mount, Optional[Pipette]]:
This should rarely be used. Do not write new code that uses it.
"""
...
+
+ def has_gripper(self) -> bool:
+ """Return whether there is a gripper attached to this instance.
+
+ - On robots that do not support a gripper, this will always return False.
+ - On robots that support a gripper, this will return based on the current
+ presence of a gripper.
+ """
+ ...
diff --git a/api/src/opentrons/hardware_control/protocols/liquid_handler.py b/api/src/opentrons/hardware_control/protocols/liquid_handler.py
index e46cea2fdc2..e55dbb88440 100644
--- a/api/src/opentrons/hardware_control/protocols/liquid_handler.py
+++ b/api/src/opentrons/hardware_control/protocols/liquid_handler.py
@@ -1,24 +1,24 @@
from typing import Optional
from typing_extensions import Protocol
-from opentrons.types import Mount
+from .types import MountArgType, CalibrationType, ConfigType
from .instrument_configurer import InstrumentConfigurer
from .motion_controller import MotionController
from .configurable import Configurable
-from .calibratable import Calibratable, CalibrationType
+from .calibratable import Calibratable
class LiquidHandler(
- InstrumentConfigurer,
- MotionController,
- Configurable,
+ InstrumentConfigurer[MountArgType],
+ MotionController[MountArgType],
+ Configurable[ConfigType],
Calibratable[CalibrationType],
- Protocol[CalibrationType],
+ Protocol[CalibrationType, MountArgType, ConfigType],
):
async def update_nozzle_configuration_for_mount(
self,
- mount: Mount,
+ mount: MountArgType,
back_left_nozzle: Optional[str],
front_right_nozzle: Optional[str],
starting_nozzle: Optional[str] = None,
@@ -40,7 +40,7 @@ async def update_nozzle_configuration_for_mount(
"""
...
- async def configure_for_volume(self, mount: Mount, volume: float) -> None:
+ async def configure_for_volume(self, mount: MountArgType, volume: float) -> None:
"""
Configure a pipette to handle the specified volume.
@@ -53,7 +53,9 @@ async def configure_for_volume(self, mount: Mount, volume: float) -> None:
"""
...
- async def prepare_for_aspirate(self, mount: Mount, rate: float = 1.0) -> None:
+ async def prepare_for_aspirate(
+ self, mount: MountArgType, rate: float = 1.0
+ ) -> None:
"""
Prepare the pipette for aspiration.
@@ -75,7 +77,7 @@ async def prepare_for_aspirate(self, mount: Mount, rate: float = 1.0) -> None:
async def aspirate(
self,
- mount: Mount,
+ mount: MountArgType,
volume: Optional[float] = None,
rate: float = 1.0,
) -> None:
@@ -102,7 +104,7 @@ async def aspirate(
async def dispense(
self,
- mount: Mount,
+ mount: MountArgType,
volume: Optional[float] = None,
rate: float = 1.0,
push_out: Optional[float] = None,
@@ -119,7 +121,9 @@ async def dispense(
"""
...
- async def blow_out(self, mount: Mount, volume: Optional[float] = None) -> None:
+ async def blow_out(
+ self, mount: MountArgType, volume: Optional[float] = None
+ ) -> None:
"""
Force any remaining liquid to dispense. The liquid will be dispensed at
the current location of pipette
@@ -128,7 +132,7 @@ async def blow_out(self, mount: Mount, volume: Optional[float] = None) -> None:
async def pick_up_tip(
self,
- mount: Mount,
+ mount: MountArgType,
tip_length: float,
presses: Optional[int] = None,
increment: Optional[float] = None,
@@ -154,7 +158,7 @@ async def pick_up_tip(
async def drop_tip(
self,
- mount: Mount,
+ mount: MountArgType,
home_after: bool = True,
) -> None:
"""
diff --git a/api/src/opentrons/hardware_control/protocols/motion_controller.py b/api/src/opentrons/hardware_control/protocols/motion_controller.py
index 8d89bb7abc1..8387e4a907c 100644
--- a/api/src/opentrons/hardware_control/protocols/motion_controller.py
+++ b/api/src/opentrons/hardware_control/protocols/motion_controller.py
@@ -1,11 +1,12 @@
from typing import Dict, List, Optional, Mapping
from typing_extensions import Protocol
-from opentrons.types import Mount, Point
+from opentrons.types import Point
from ..types import Axis, CriticalPoint, MotionChecks
+from .types import MountArgType
-class MotionController(Protocol):
+class MotionController(Protocol[MountArgType]):
"""Protocol specifying fundamental motion controls."""
async def halt(self, disengage_before_stopping: bool = False) -> None:
@@ -44,13 +45,13 @@ async def reset(self) -> None:
# Gantry/frame (i.e. not pipette) action API
async def home_z(
self,
- mount: Optional[Mount] = None,
+ mount: Optional[MountArgType] = None,
allow_home_other: bool = True,
) -> None:
"""Home a selected z-axis, or both if not specified."""
...
- async def home_plunger(self, mount: Mount) -> None:
+ async def home_plunger(self, mount: MountArgType) -> None:
"""
Home the plunger motor for a mount, and then return it to the 'bottom'
position.
@@ -69,7 +70,7 @@ async def home(self, axes: Optional[List[Axis]] = None) -> None:
async def current_position(
self,
- mount: Mount,
+ mount: MountArgType,
critical_point: Optional[CriticalPoint] = None,
refresh: bool = False,
# TODO(mc, 2021-11-15): combine with `refresh` for more reliable
@@ -97,7 +98,7 @@ async def current_position(
async def gantry_position(
self,
- mount: Mount,
+ mount: MountArgType,
critical_point: Optional[CriticalPoint] = None,
refresh: bool = False,
# TODO(mc, 2021-11-15): combine with `refresh` for more reliable
@@ -114,7 +115,7 @@ async def gantry_position(
async def move_to(
self,
- mount: Mount,
+ mount: MountArgType,
abs_position: Point,
speed: Optional[float] = None,
critical_point: Optional[CriticalPoint] = None,
@@ -173,7 +174,7 @@ async def move_axes(
async def move_rel(
self,
- mount: Mount,
+ mount: MountArgType,
delta: Point,
speed: Optional[float] = None,
max_speeds: Optional[Dict[Axis, float]] = None,
@@ -203,7 +204,7 @@ async def disengage_axes(self, which: List[Axis]) -> None:
"""Disengage some axes."""
...
- async def retract(self, mount: Mount, margin: float = 10) -> None:
+ async def retract(self, mount: MountArgType, margin: float = 10) -> None:
"""Pull the specified mount up to its home position.
Works regardless of critical point or home status.
@@ -225,3 +226,7 @@ def should_taskify_movement_execution(self, taskify: bool) -> None:
async def cancel_execution_and_running_tasks(self) -> None:
"""Cancel all tasks and set execution manager state to Cancelled."""
...
+
+ async def prepare_for_mount_movement(self, mount: MountArgType) -> None:
+ """Retract the other mount if necessary."""
+ ...
diff --git a/api/src/opentrons/hardware_control/protocols/types.py b/api/src/opentrons/hardware_control/protocols/types.py
new file mode 100644
index 00000000000..bdd4a3799f4
--- /dev/null
+++ b/api/src/opentrons/hardware_control/protocols/types.py
@@ -0,0 +1,27 @@
+"""Types that are common across protocols."""
+
+from typing import TypeVar, Union, Type
+from opentrons.hardware_control.types import OT3Mount
+from opentrons.types import Mount
+from opentrons.config.types import RobotConfig, OT3Config
+
+
+class OT2RobotType:
+ pass
+
+
+class FlexRobotType:
+ pass
+
+
+CalibrationType = TypeVar("CalibrationType")
+
+MountArgType = TypeVar(
+ "MountArgType", Mount, Union[OT3Mount, Mount], contravariant=True
+)
+
+ConfigType = TypeVar("ConfigType", RobotConfig, OT3Config)
+
+ProtocolRobotType = TypeVar(
+ "ProtocolRobotType", Type[FlexRobotType], Type[OT2RobotType], covariant=True
+)
diff --git a/api/src/opentrons/hardware_control/robot_calibration.py b/api/src/opentrons/hardware_control/robot_calibration.py
index 1c776bffbdc..270344fff2f 100644
--- a/api/src/opentrons/hardware_control/robot_calibration.py
+++ b/api/src/opentrons/hardware_control/robot_calibration.py
@@ -4,6 +4,7 @@
from datetime import datetime
from dataclasses import dataclass
from typing import Optional, List, Any, cast
+from numpy.typing import NDArray
from opentrons import config
@@ -49,9 +50,9 @@ def validate_attitude_deck_calibration(
TODO(lc, 8/10/2020): As with the OT2, expand on this method, or create
another method to diagnose bad instrument offset data
"""
- curr_cal = np.array(deck_cal.attitude)
+ curr_cal: linal.DoubleMatrix = np.array(deck_cal.attitude)
row, _ = curr_cal.shape
- rank: int = np.linalg.matrix_rank(curr_cal) # type: ignore
+ rank: int = np.linalg.matrix_rank(curr_cal)
if row != rank:
# Check that the matrix is non-singular
return DeckTransformState.SINGULARITY
@@ -68,10 +69,10 @@ def validate_gantry_calibration(gantry_cal: List[List[float]]) -> DeckTransformS
This function determines whether the gantry calibration is valid
or not based on the following use-cases:
"""
- curr_cal = np.array(gantry_cal)
+ curr_cal: linal.DoubleMatrix = np.array(gantry_cal)
row, _ = curr_cal.shape
- rank: int = np.linalg.matrix_rank(curr_cal) # type: ignore
+ rank: int = np.linalg.matrix_rank(curr_cal)
id_matrix = linal.identity_deck_transform()
@@ -95,7 +96,7 @@ def validate_gantry_calibration(gantry_cal: List[List[float]]) -> DeckTransformS
def migrate_affine_xy_to_attitude(
gantry_cal: List[List[float]],
) -> types.AttitudeMatrix:
- masked_transform = np.array(
+ masked_transform: NDArray[np.bool_] = np.array(
[
[True, True, True, False],
[True, True, True, False],
@@ -108,7 +109,7 @@ def migrate_affine_xy_to_attitude(
] = np.ma.masked_array( # type: ignore
gantry_cal, ~masked_transform
)
- attitude_array = np.zeros((3, 3))
+ attitude_array: linal.DoubleMatrix = np.zeros((3, 3))
np.put(attitude_array, [0, 1, 2], masked_array[0].compressed())
np.put(attitude_array, [3, 4, 5], masked_array[1].compressed())
np.put(attitude_array, 8, 1)
diff --git a/api/src/opentrons/hardware_control/scripts/repl.py b/api/src/opentrons/hardware_control/scripts/repl.py
index f2d1bf0d47c..1efbe0c2233 100644
--- a/api/src/opentrons/hardware_control/scripts/repl.py
+++ b/api/src/opentrons/hardware_control/scripts/repl.py
@@ -11,6 +11,7 @@
from logging.config import dictConfig
from opentrons.hardware_control.api import API
from opentrons.hardware_control.ot3api import OT3API
+from opentrons.hardware_control.types import HardwareFeatureFlags
update_firmware = True
has_robot_server = True
@@ -94,6 +95,7 @@ def build_thread_manager() -> ThreadManager[Union[API, OT3API]]:
OT3API.build_hardware_controller,
use_usb_bus=ff.rear_panel_integration(),
update_firmware=update_firmware,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
)
def wrap_async_util_fn(fn: Any, *bind_args: Any, **bind_kwargs: Any) -> Any:
@@ -114,6 +116,7 @@ def build_thread_manager() -> ThreadManager[Union[API, OT3API]]:
API.build_hardware_controller,
use_usb_bus=ff.rear_panel_integration(),
update_firmware=update_firmware,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
)
diff --git a/api/src/opentrons/hardware_control/simulator_setup.py b/api/src/opentrons/hardware_control/simulator_setup.py
index 4a210655af2..25fa17d36a1 100644
--- a/api/src/opentrons/hardware_control/simulator_setup.py
+++ b/api/src/opentrons/hardware_control/simulator_setup.py
@@ -10,7 +10,7 @@
from opentrons.config.types import RobotConfig, OT3Config
from opentrons.types import Mount
from opentrons.hardware_control import API, HardwareControlAPI, ThreadManager
-from opentrons.hardware_control.types import OT3Mount
+from opentrons.hardware_control.types import OT3Mount, HardwareFeatureFlags
# Name and kwargs for a module function
@@ -21,13 +21,19 @@ class ModuleCall:
kwargs: Dict[str, Any] = field(default_factory=dict)
+@dataclass(frozen=True)
+class ModuleItem:
+ serial_number: str
+ calls: List[ModuleCall] = field(default_factory=list)
+
+
@dataclass(frozen=True)
class OT2SimulatorSetup:
machine: Literal["OT-2 Standard"] = "OT-2 Standard"
attached_instruments: Dict[Mount, Dict[str, Optional[str]]] = field(
default_factory=dict
)
- attached_modules: Dict[str, List[ModuleCall]] = field(default_factory=dict)
+ attached_modules: Dict[str, List[ModuleItem]] = field(default_factory=dict)
config: Optional[RobotConfig] = None
strict_attached_instruments: bool = True
@@ -38,7 +44,7 @@ class OT3SimulatorSetup:
attached_instruments: Dict[OT3Mount, Dict[str, Optional[str]]] = field(
default_factory=dict
)
- attached_modules: Dict[str, List[ModuleCall]] = field(default_factory=dict)
+ attached_modules: Dict[str, List[ModuleItem]] = field(default_factory=dict)
config: Optional[OT3Config] = None
strict_attached_instruments: bool = True
@@ -52,20 +58,28 @@ async def _simulator_for_setup(
if setup.machine == "OT-2 Standard":
return await API.build_hardware_simulator(
attached_instruments=setup.attached_instruments,
- attached_modules=list(setup.attached_modules.keys()),
+ attached_modules={
+ k: [m.serial_number for m in v]
+ for k, v in setup.attached_modules.items()
+ },
config=setup.config,
strict_attached_instruments=setup.strict_attached_instruments,
loop=loop,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
)
else:
from opentrons.hardware_control.ot3api import OT3API
return await OT3API.build_hardware_simulator(
attached_instruments=setup.attached_instruments,
- attached_modules=list(setup.attached_modules.keys()),
+ attached_modules={
+ k: [m.serial_number for m in v]
+ for k, v in setup.attached_modules.items()
+ },
config=setup.config,
strict_attached_instruments=setup.strict_attached_instruments,
loop=loop,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
)
@@ -75,10 +89,12 @@ async def create_simulator(
"""Create a simulator"""
simulator = await _simulator_for_setup(setup, loop)
for attached_module in simulator.attached_modules:
- calls = setup.attached_modules[attached_module.name()]
- for call in calls:
- f = getattr(attached_module, call.function_name)
- await f(*call.args, **call.kwargs)
+ modules = setup.attached_modules[attached_module.name()]
+ for module in modules:
+ if module.serial_number == attached_module.device_info.get("serial"):
+ for call in module.calls:
+ f = getattr(attached_module, call.function_name)
+ await f(*call.args, **call.kwargs)
return simulator
@@ -97,9 +113,13 @@ def _thread_manager_for_setup(
return ThreadManager(
API.build_hardware_simulator,
attached_instruments=setup.attached_instruments,
- attached_modules=list(setup.attached_modules.keys()),
+ attached_modules={
+ k: [m.serial_number for m in v]
+ for k, v in setup.attached_modules.items()
+ },
config=setup.config,
strict_attached_instruments=setup.strict_attached_instruments,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
)
else:
from opentrons.hardware_control.ot3api import OT3API
@@ -107,9 +127,13 @@ def _thread_manager_for_setup(
return ThreadManager(
OT3API.build_hardware_simulator,
attached_instruments=setup.attached_instruments,
- attached_modules=list(setup.attached_modules.keys()),
+ attached_modules={
+ k: [m.serial_number for m in v]
+ for k, v in setup.attached_modules.items()
+ },
config=setup.config,
strict_attached_instruments=setup.strict_attached_instruments,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
)
@@ -121,10 +145,11 @@ async def create_simulator_thread_manager(
await thread_manager.managed_thread_ready_async()
for attached_module in thread_manager.wrapped().attached_modules:
- calls = setup.attached_modules[attached_module.name()]
- for call in calls:
- f = getattr(attached_module, call.function_name)
- await f(*call.args, **call.kwargs)
+ modules = setup.attached_modules[attached_module.name()]
+ for module in modules:
+ for call in module.calls:
+ f = getattr(attached_module, call.function_name)
+ await f(*call.args, **call.kwargs)
return thread_manager
@@ -184,7 +209,18 @@ def _prepare_for_simulator_setup(key: str, value: Dict[str, Any]) -> Any:
if key == "config" and value:
return robot_configs.build_config_ot2(value)
if key == "attached_modules" and value:
- return {k: [ModuleCall(**data) for data in v] for (k, v) in value.items()}
+ attached_modules: Dict[str, List[ModuleItem]] = {}
+ for key, item in value.items():
+ for obj in item:
+ attached_modules.setdefault(key, []).append(
+ ModuleItem(
+ serial_number=obj["serial_number"],
+ calls=[ModuleCall(**data) for data in obj["calls"]],
+ )
+ )
+
+ return attached_modules
+
return value
@@ -194,5 +230,15 @@ def _prepare_for_ot3_simulator_setup(key: str, value: Dict[str, Any]) -> Any:
if key == "config" and value:
return robot_configs.build_config_ot3(value)
if key == "attached_modules" and value:
- return {k: [ModuleCall(**data) for data in v] for (k, v) in value.items()}
+ attached_modules: Dict[str, List[ModuleItem]] = {}
+ for key, item in value.items():
+ for obj in item:
+ attached_modules.setdefault(key, []).append(
+ ModuleItem(
+ serial_number=obj["serial_number"],
+ calls=[ModuleCall(**data) for data in obj["calls"]],
+ )
+ )
+
+ return attached_modules
return value
diff --git a/api/src/opentrons/hardware_control/thread_manager.py b/api/src/opentrons/hardware_control/thread_manager.py
index 4a8a7ae5936..c72ec3857b9 100644
--- a/api/src/opentrons/hardware_control/thread_manager.py
+++ b/api/src/opentrons/hardware_control/thread_manager.py
@@ -18,6 +18,7 @@
AsyncGenerator,
Union,
Type,
+ ParamSpec,
)
from .adapters import SynchronousAdapter
from .modules.mod_abc import AbstractModule
@@ -34,17 +35,14 @@ class ThreadManagerException(Exception):
WrappedReturn = TypeVar("WrappedReturn", contravariant=True)
WrappedYield = TypeVar("WrappedYield", contravariant=True)
-WrappedCoro = TypeVar("WrappedCoro", bound=Callable[..., Awaitable[WrappedReturn]])
-WrappedAGenFunc = TypeVar(
- "WrappedAGenFunc", bound=Callable[..., AsyncGenerator[WrappedYield, None]]
-)
+P = ParamSpec("P")
async def call_coroutine_threadsafe(
loop: asyncio.AbstractEventLoop,
- coro: WrappedCoro,
- *args: Sequence[Any],
- **kwargs: Mapping[str, Any],
+ coro: Callable[P, Awaitable[WrappedReturn]],
+ *args: P.args,
+ **kwargs: P.kwargs,
) -> WrappedReturn:
fut = cast(
"asyncio.Future[WrappedReturn]",
@@ -56,9 +54,9 @@ async def call_coroutine_threadsafe(
async def execute_asyncgen_threadsafe(
loop: asyncio.AbstractEventLoop,
- agenfunc: WrappedAGenFunc,
- *args: Sequence[Any],
- **kwargs: Mapping[str, Any],
+ agenfunc: Callable[P, AsyncGenerator[WrappedYield, None]],
+ *args: P.args,
+ **kwargs: P.kwargs,
) -> AsyncGenerator[WrappedYield, None]:
# This function should bridge an async generator function between two asyncio
@@ -295,7 +293,7 @@ def sync(self) -> SynchronousAdapter[WrappedObj]:
def __repr__(self) -> str:
return ""
- def clean_up(self) -> None:
+ def clean_up_tm(self) -> None:
try:
loop = object.__getattribute__(self, "_loop")
loop.call_soon_threadsafe(loop.stop)
@@ -348,7 +346,7 @@ def __getattribute__(self, attr_name: str) -> Any:
wrapped_cleanup = getattr(
object.__getattribute__(self, "bridged_obj"), "clean_up"
)
- our_cleanup = object.__getattribute__(self, "clean_up")
+ our_cleanup = object.__getattribute__(self, "clean_up_tm")
def call_both() -> None:
# the wrapped cleanup wants to happen in the managed thread,
diff --git a/api/src/opentrons/hardware_control/types.py b/api/src/opentrons/hardware_control/types.py
index 4dbd64559b5..1ea79652f34 100644
--- a/api/src/opentrons/hardware_control/types.py
+++ b/api/src/opentrons/hardware_control/types.py
@@ -5,6 +5,7 @@
from typing_extensions import Literal
from opentrons import types as top_types
from opentrons_shared_data.pipette.types import PipetteChannelType
+from opentrons.config import feature_flags
MODULE_LOG = logging.getLogger(__name__)
@@ -231,6 +232,13 @@ def of_plunger(cls, mount: top_types.Mount) -> "Axis":
"""
return cls.of_main_tool_actuator(mount)
+ @classmethod
+ def node_axes(cls) -> List["Axis"]:
+ """
+ Get a list of axes that are backed by flex canbus nodes.
+ """
+ return [cls.X, cls.Y, cls.Z_L, cls.Z_R, cls.P_L, cls.P_R, cls.Z_G, cls.G]
+
class SubSystem(enum.Enum):
"""An enumeration of ot3 components.
@@ -246,6 +254,7 @@ class SubSystem(enum.Enum):
gripper = 5
rear_panel = 6
motor_controller_board = 7
+ hepa_uv = 8
def __str__(self) -> str:
return self.name
@@ -383,6 +392,19 @@ class EstopOverallStatus:
right_physical_state: EstopPhysicalStatus
+@dataclass
+class HepaFanState:
+ fan_on: bool
+ duty_cycle: int
+
+
+@dataclass
+class HepaUVState:
+ light_on: bool
+ uv_duration_s: int
+ remaining_time_s: int
+
+
@dataclass(frozen=True)
class DoorStateNotification:
event: Literal[
@@ -411,6 +433,7 @@ class ErrorMessageNotification:
]
HardwareEventHandler = Callable[[HardwareEvent], None]
+HardwareEventUnsubscriber = Callable[[], None]
RevisionLiteral = Literal["2.1", "A", "B", "C", "UNKNOWN"]
@@ -479,6 +502,13 @@ class CriticalPoint(enum.Enum):
point. This is the same as the GRIPPER_JAW_CENTER for grippers.
"""
+ INSTRUMENT_XY_CENTER = enum.auto()
+ """
+ The INSTRUMENT_XY_CENTER means the critical point under consideration is
+ the XY center of the entire pipette, regardless of configuration.
+ No pipettes, single or multi, will change their instrument center point.
+ """
+
FRONT_NOZZLE = enum.auto()
"""
The end of the front-most nozzle of a multipipette with a tip attached.
@@ -504,6 +534,16 @@ class CriticalPoint(enum.Enum):
back calibration pin slot.
"""
+ Y_CENTER = enum.auto()
+ """
+ Y_CENTER means the critical point under consideration is at the same X
+ coordinate as the default nozzle point (i.e. TIP | NOZZLE | FRONT_NOZZLE)
+ but halfway in between the Y axis bounding box of the pipette - it is the
+ XY center of the first column in the pipette. It's really only relevant for
+ the 96; it will produce the same position as XY_CENTER on an eight or one
+ channel pipette.
+ """
+
class ExecutionState(enum.Enum):
RUNNING = enum.auto()
@@ -584,6 +624,7 @@ class GripperJawState(enum.Enum):
class InstrumentProbeType(enum.Enum):
PRIMARY = enum.auto()
SECONDARY = enum.auto()
+ BOTH = enum.auto()
class GripperProbe(enum.Enum):
@@ -606,6 +647,40 @@ def __str__(self) -> str:
return self.name
+@dataclass
+class HardwareFeatureFlags:
+ """
+ Hardware configuration options that can be passed to API instances.
+ Some options may not be relevant to every robot.
+
+ These generally map to the feature flag options in the opentrons.config
+ module.
+ """
+
+ use_old_aspiration_functions: bool = (
+ False # To support pipette backwards compatability
+ )
+ require_estop: bool = True
+ stall_detection_enabled: bool = True
+ overpressure_detection_enabled: bool = True
+
+ @classmethod
+ def build_from_ff(cls) -> "HardwareFeatureFlags":
+ """Build from the feature flags configuration file on disc.
+
+ Note that, if this class is built from the default constructor, the values
+ of all of the flags are just the default values instead of the values in the
+ feature_flags file or environment variables. Use this constructor to ensure
+ the right values are pulled in.
+ """
+ return HardwareFeatureFlags(
+ use_old_aspiration_functions=feature_flags.use_old_aspiration_functions(),
+ require_estop=feature_flags.require_estop(),
+ stall_detection_enabled=feature_flags.stall_detection_enabled(),
+ overpressure_detection_enabled=feature_flags.overpressure_detection_enabled(),
+ )
+
+
class EarlyLiquidSenseTrigger(RuntimeError):
"""Error raised if sensor threshold reached before minimum probing distance."""
diff --git a/api/src/opentrons/legacy_broker.py b/api/src/opentrons/legacy_broker.py
index 838a75b7759..b58a779134e 100644
--- a/api/src/opentrons/legacy_broker.py
+++ b/api/src/opentrons/legacy_broker.py
@@ -5,7 +5,7 @@
from typing import Callable, Dict, List
from typing_extensions import Literal
-from opentrons.commands import types
+from opentrons.legacy_commands import types
MODULE_LOG = logging.getLogger(__name__)
@@ -16,7 +16,7 @@ class LegacyBroker:
Deprecated:
Use the newer, more generic `opentrons.utils.Broker` class instead.
- This class is coupled to old types from `opentrons.commands`.
+ This class is coupled to old types from `opentrons.legacy_commands`.
https://opentrons.atlassian.net/browse/RSS-270
"""
diff --git a/api/src/opentrons/legacy_commands/__init__.py b/api/src/opentrons/legacy_commands/__init__.py
new file mode 100644
index 00000000000..558ad9b87c0
--- /dev/null
+++ b/api/src/opentrons/legacy_commands/__init__.py
@@ -0,0 +1 @@
+"""Command models from before v5.0, before Protocol Engine."""
diff --git a/api/src/opentrons/commands/commands.py b/api/src/opentrons/legacy_commands/commands.py
similarity index 76%
rename from api/src/opentrons/commands/commands.py
rename to api/src/opentrons/legacy_commands/commands.py
index ffbb9cba82b..68b6f1a0595 100755
--- a/api/src/opentrons/commands/commands.py
+++ b/api/src/opentrons/legacy_commands/commands.py
@@ -2,10 +2,11 @@
from typing import TYPE_CHECKING, List, Union, overload
-from .helpers import stringify_location, listify
+from .helpers import stringify_location, stringify_disposal_location, listify
from . import types as command_types
from opentrons.types import Location
+from opentrons.protocol_api.disposal_locations import TrashBin, WasteChute
if TYPE_CHECKING:
from opentrons.protocol_api import InstrumentContext
@@ -63,6 +64,28 @@ def dispense(
}
+def dispense_in_disposal_location(
+ instrument: InstrumentContext,
+ volume: float,
+ location: Union[TrashBin, WasteChute],
+ flow_rate: float,
+ rate: float,
+) -> command_types.DispenseInDisposalLocationCommand:
+ location_text = stringify_disposal_location(location)
+ text = f"Dispensing {float(volume)} uL into {location_text} at {flow_rate} uL/sec"
+
+ return {
+ "name": command_types.DISPENSE_IN_DISPOSAL_LOCATION,
+ "payload": {
+ "instrument": instrument,
+ "volume": volume,
+ "location": location,
+ "rate": rate,
+ "text": text,
+ },
+ }
+
+
def consolidate(
instrument: InstrumentContext,
volume: Union[float, List[float]],
@@ -190,6 +213,18 @@ def blow_out(
}
+def blow_out_in_disposal_location(
+ instrument: InstrumentContext, location: Union[TrashBin, WasteChute]
+) -> command_types.BlowOutInDisposalLocationCommand:
+ location_text = stringify_disposal_location(location)
+ text = f"Blowing out into {location_text}"
+
+ return {
+ "name": command_types.BLOW_OUT_IN_DISPOSAL_LOCATION,
+ "payload": {"instrument": instrument, "location": location, "text": text},
+ }
+
+
def touch_tip(instrument: InstrumentContext) -> command_types.TouchTipCommand:
text = "Touching tip"
@@ -231,6 +266,17 @@ def drop_tip(
}
+def drop_tip_in_disposal_location(
+ instrument: InstrumentContext, location: Union[TrashBin, WasteChute]
+) -> command_types.DropTipInDisposalLocationCommand:
+ location_text = stringify_disposal_location(location)
+ text = f"Dropping tip into {location_text}"
+ return {
+ "name": command_types.DROP_TIP_IN_DISPOSAL_LOCATION,
+ "payload": {"instrument": instrument, "location": location, "text": text},
+ }
+
+
def move_to(
instrument: InstrumentContext,
location: Location,
@@ -241,3 +287,15 @@ def move_to(
"name": command_types.MOVE_TO,
"payload": {"instrument": instrument, "location": location, "text": text},
}
+
+
+def move_to_disposal_location(
+ instrument: InstrumentContext,
+ location: Union[TrashBin, WasteChute],
+) -> command_types.MoveToDisposalLocationCommand:
+ location_text = stringify_disposal_location(location)
+ text = f"Moving to {location_text}"
+ return {
+ "name": command_types.MOVE_TO_DISPOSAL_LOCATION,
+ "payload": {"instrument": instrument, "location": location, "text": text},
+ }
diff --git a/api/src/opentrons/legacy_commands/helpers.py b/api/src/opentrons/legacy_commands/helpers.py
new file mode 100644
index 00000000000..b3de03de4bc
--- /dev/null
+++ b/api/src/opentrons/legacy_commands/helpers.py
@@ -0,0 +1,74 @@
+from typing import List, Union
+
+from opentrons.protocol_api.labware import Well, Labware
+from opentrons.protocol_api.module_contexts import ModuleContext
+from opentrons.protocol_api.disposal_locations import TrashBin, WasteChute
+from opentrons.protocol_api._types import OffDeckType
+from opentrons.types import Location, DeckLocation
+
+
+CommandLocation = Union[Location, Well]
+
+
+def listify(
+ location: Union[CommandLocation, List[CommandLocation]]
+) -> List[CommandLocation]:
+ if isinstance(location, list):
+ try:
+ return listify(location[0])
+ except IndexError:
+ # TODO(mc, 2021-10-20): this looks like a bug; should this
+ # return an empty list, instead?
+ return [location] # type: ignore[list-item]
+ else:
+ return [location]
+
+
+def _stringify_new_loc(loc: CommandLocation) -> str:
+ if isinstance(loc, Location):
+ if loc.labware.is_empty:
+ return str(loc.point)
+ else:
+ return repr(loc.labware)
+ elif isinstance(loc, Well):
+ return str(loc)
+ else:
+ raise TypeError(loc)
+
+
+def stringify_location(location: Union[CommandLocation, List[CommandLocation]]) -> str:
+ loc_str_list = [_stringify_new_loc(loc) for loc in listify(location)]
+ return ", ".join(loc_str_list)
+
+
+def stringify_disposal_location(location: Union[TrashBin, WasteChute]) -> str:
+ if isinstance(location, TrashBin):
+ return f"Trash Bin on slot {location.location.id}"
+ elif isinstance(location, WasteChute):
+ return "Waste Chute"
+
+
+def _stringify_labware_movement_location(
+ location: Union[DeckLocation, OffDeckType, Labware, ModuleContext, WasteChute]
+) -> str:
+ if isinstance(location, (int, str)):
+ return f"slot {location}"
+ elif isinstance(location, OffDeckType):
+ return "off-deck"
+ elif isinstance(location, Labware):
+ return location.name
+ elif isinstance(location, ModuleContext):
+ return str(location)
+ elif isinstance(location, WasteChute):
+ return "Waste Chute"
+
+
+def stringify_labware_movement_command(
+ source_labware: Labware,
+ destination: Union[DeckLocation, OffDeckType, Labware, ModuleContext, WasteChute],
+ use_gripper: bool,
+) -> str:
+ source_labware_text = _stringify_labware_movement_location(source_labware)
+ destination_text = _stringify_labware_movement_location(destination)
+ gripper_text = " with gripper" if use_gripper else ""
+ return f"Moving {source_labware_text} to {destination_text}{gripper_text}"
diff --git a/api/src/opentrons/commands/module_commands.py b/api/src/opentrons/legacy_commands/module_commands.py
similarity index 100%
rename from api/src/opentrons/commands/module_commands.py
rename to api/src/opentrons/legacy_commands/module_commands.py
diff --git a/api/src/opentrons/commands/protocol_commands.py b/api/src/opentrons/legacy_commands/protocol_commands.py
similarity index 88%
rename from api/src/opentrons/commands/protocol_commands.py
rename to api/src/opentrons/legacy_commands/protocol_commands.py
index e48dadc87b9..2b1b70bb0d9 100644
--- a/api/src/opentrons/commands/protocol_commands.py
+++ b/api/src/opentrons/legacy_commands/protocol_commands.py
@@ -45,3 +45,10 @@ def resume() -> command_types.ResumeCommand:
"name": command_types.RESUME,
"payload": {"text": "Resuming robot operation"},
}
+
+
+def move_labware(text: str) -> command_types.MoveLabwareCommand:
+ return {
+ "name": command_types.MOVE_LABWARE,
+ "payload": {"text": text},
+ }
diff --git a/api/src/opentrons/commands/publisher.py b/api/src/opentrons/legacy_commands/publisher.py
similarity index 100%
rename from api/src/opentrons/commands/publisher.py
rename to api/src/opentrons/legacy_commands/publisher.py
diff --git a/api/src/opentrons/legacy_commands/types.py b/api/src/opentrons/legacy_commands/types.py
new file mode 100755
index 00000000000..5aaa72b8e09
--- /dev/null
+++ b/api/src/opentrons/legacy_commands/types.py
@@ -0,0 +1,927 @@
+from __future__ import annotations
+
+from typing_extensions import Literal, Final, TypedDict
+from typing import Optional, List, Sequence, TYPE_CHECKING, Union
+from opentrons.hardware_control.modules import ThermocyclerStep
+
+if TYPE_CHECKING:
+ from opentrons.protocol_api import InstrumentContext
+ from opentrons.protocol_api.labware import Well
+ from opentrons.protocol_api.disposal_locations import TrashBin, WasteChute
+
+from opentrons.types import Location
+
+
+# type for subscriptions
+COMMAND: Final = "command"
+
+# Robot #
+
+DELAY: Final = "command.DELAY"
+HOME: Final = "command.HOME"
+PAUSE: Final = "command.PAUSE"
+RESUME: Final = "command.RESUME"
+COMMENT: Final = "command.COMMENT"
+MOVE_LABWARE: Final = "command.MOVE_LABWARE"
+
+# Pipette #
+
+ASPIRATE: Final = "command.ASPIRATE"
+DISPENSE: Final = "command.DISPENSE"
+DISPENSE_IN_DISPOSAL_LOCATION: Final = "command.DISPENSE_IN_DISPOSAL_LOCATION"
+MIX: Final = "command.MIX"
+CONSOLIDATE: Final = "command.CONSOLIDATE"
+DISTRIBUTE: Final = "command.DISTRIBUTE"
+TRANSFER: Final = "command.TRANSFER"
+PICK_UP_TIP: Final = "command.PICK_UP_TIP"
+DROP_TIP: Final = "command.DROP_TIP"
+DROP_TIP_IN_DISPOSAL_LOCATION: Final = "command.DROP_TIP_IN_DISPOSAL_LOCATION"
+BLOW_OUT: Final = "command.BLOW_OUT"
+BLOW_OUT_IN_DISPOSAL_LOCATION: Final = "command.BLOW_OUT_IN_DISPOSAL_LOCATION"
+AIR_GAP: Final = "command.AIR_GAP"
+TOUCH_TIP: Final = "command.TOUCH_TIP"
+RETURN_TIP: Final = "command.RETURN_TIP"
+MOVE_TO: Final = "command.MOVE_TO"
+MOVE_TO_DISPOSAL_LOCATION: Final = "command.MOVE_TO_DISPOSAL_LOCATION"
+
+# Modules #
+
+HEATER_SHAKER_SET_TARGET_TEMPERATURE: Final = (
+ "command.HEATER_SHAKER_SET_TARGET_TEMPERATURE"
+)
+HEATER_SHAKER_WAIT_FOR_TEMPERATURE: Final = "command.HEATER_SHAKER_WAIT_FOR_TEMPERATURE"
+HEATER_SHAKER_SET_AND_WAIT_FOR_SHAKE_SPEED: Final = (
+ "command.HEATER_SHAKER_SET_AND_WAIT_FOR_SHAKE_SPEED"
+)
+HEATER_SHAKER_OPEN_LABWARE_LATCH: Final = "command.HEATER_SHAKER_OPEN_LABWARE_LATCH"
+HEATER_SHAKER_CLOSE_LABWARE_LATCH: Final = "command.HEATER_SHAKER_CLOSE_LABWARE_LATCH"
+HEATER_SHAKER_DEACTIVATE_SHAKER: Final = "command.HEATER_SHAKER_DEACTIVATE_SHAKER"
+HEATER_SHAKER_DEACTIVATE_HEATER: Final = "command.HEATER_SHAKER_DEACTIVATE_HEATER"
+
+MAGDECK_CALIBRATE: Final = "command.MAGDECK_CALIBRATE"
+MAGDECK_DISENGAGE: Final = "command.MAGDECK_DISENGAGE"
+MAGDECK_ENGAGE: Final = "command.MAGDECK_ENGAGE"
+
+TEMPDECK_DEACTIVATE: Final = "command.TEMPDECK_DEACTIVATE"
+TEMPDECK_SET_TEMP: Final = "command.TEMPDECK_SET_TEMP"
+TEMPDECK_AWAIT_TEMP: Final = "command.TEMPDECK_AWAIT_TEMP"
+
+THERMOCYCLER_OPEN: Final = "command.THERMOCYCLER_OPEN"
+THERMOCYCLER_CLOSE: Final = "command.THERMOCYCLER_CLOSE"
+THERMOCYCLER_SET_BLOCK_TEMP: Final = "command.THERMOCYCLER_SET_BLOCK_TEMP"
+THERMOCYCLER_EXECUTE_PROFILE: Final = "command.THERMOCYCLER_EXECUTE_PROFILE"
+THERMOCYCLER_DEACTIVATE: Final = "command.THERMOCYCLER_DEACTIVATE"
+THERMOCYCLER_WAIT_FOR_HOLD: Final = "command.THERMOCYCLER_WAIT_FOR_HOLD"
+THERMOCYCLER_WAIT_FOR_TEMP: Final = "command.THERMOCYCLER_WAIT_FOR_TEMP"
+THERMOCYCLER_WAIT_FOR_LID_TEMP: Final = "command.THERMOCYCLER_WAIT_FOR_LID_TEMP"
+THERMOCYCLER_SET_LID_TEMP: Final = "command.THERMOCYCLER_SET_LID_TEMP"
+THERMOCYCLER_DEACTIVATE_LID: Final = "command.THERMOCYCLER_DEACTIVATE_LID"
+THERMOCYCLER_DEACTIVATE_BLOCK: Final = "command.THERMOCYCLER_DEACTIVATE_BLOCK"
+
+
+class TextOnlyPayload(TypedDict):
+ text: str
+
+
+class MultiLocationPayload(TypedDict):
+ locations: Sequence[Union[Location, Well]]
+
+
+class OptionalMultiLocationPayload(TypedDict):
+ locations: Optional[Sequence[Union[Location, Well]]]
+
+
+class SingleInstrumentPayload(TypedDict):
+ instrument: InstrumentContext
+
+
+class MultiInstrumentPayload(TypedDict):
+ instruments: Sequence[InstrumentContext]
+
+
+class CommentCommandPayload(TextOnlyPayload):
+ pass
+
+
+class CommentCommand(TypedDict):
+ name: Literal["command.COMMENT"]
+ payload: CommentCommandPayload
+
+
+class DelayCommandPayload(TextOnlyPayload):
+ minutes: float
+ seconds: float
+
+
+class DelayCommand(TypedDict):
+ name: Literal["command.DELAY"]
+ payload: DelayCommandPayload
+
+
+class PauseCommandPayload(TextOnlyPayload):
+ userMessage: Optional[str]
+
+
+class PauseCommand(TypedDict):
+ name: Literal["command.PAUSE"]
+ payload: PauseCommandPayload
+
+
+class ResumeCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ResumeCommand(TypedDict):
+ name: Literal["command.RESUME"]
+ payload: ResumeCommandPayload
+
+
+class HeaterShakerSetTargetTemperaturePayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerSetTargetTemperatureCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_SET_TARGET_TEMPERATURE"]
+ payload: HeaterShakerSetTargetTemperaturePayload
+
+
+class HeaterShakerWaitForTemperaturePayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerWaitForTemperatureCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_WAIT_FOR_TEMPERATURE"]
+ payload: HeaterShakerWaitForTemperaturePayload
+
+
+class HeaterShakerSetAndWaitForShakeSpeedPayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerSetAndWaitForShakeSpeedCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_SET_AND_WAIT_FOR_SHAKE_SPEED"]
+ payload: HeaterShakerSetAndWaitForShakeSpeedPayload
+
+
+class HeaterShakerOpenLabwareLatchPayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerOpenLabwareLatchCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_OPEN_LABWARE_LATCH"]
+ payload: HeaterShakerOpenLabwareLatchPayload
+
+
+class HeaterShakerCloseLabwareLatchPayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerCloseLabwareLatchCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_CLOSE_LABWARE_LATCH"]
+ payload: HeaterShakerCloseLabwareLatchPayload
+
+
+class HeaterShakerDeactivateShakerPayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerDeactivateShakerCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_DEACTIVATE_SHAKER"]
+ payload: HeaterShakerDeactivateShakerPayload
+
+
+class HeaterShakerDeactivateHeaterPayload(TextOnlyPayload):
+ pass
+
+
+class HeaterShakerDeactivateHeaterCommand(TypedDict):
+ name: Literal["command.HEATER_SHAKER_DEACTIVATE_HEATER"]
+ payload: HeaterShakerDeactivateHeaterPayload
+
+
+class MagdeckEngageCommandPayload(TextOnlyPayload):
+ pass
+
+
+class MagdeckEngageCommand(TypedDict):
+ name: Literal["command.MAGDECK_ENGAGE"]
+ payload: MagdeckEngageCommandPayload
+
+
+class MagdeckDisengageCommandPayload(TextOnlyPayload):
+ pass
+
+
+class MagdeckDisengageCommand(TypedDict):
+ name: Literal["command.MAGDECK_DISENGAGE"]
+ payload: MagdeckDisengageCommandPayload
+
+
+class MagdeckCalibrateCommandPayload(TextOnlyPayload):
+ pass
+
+
+class MagdeckCalibrateCommand(TypedDict):
+ name: Literal["command.MAGDECK_CALIBRATE"]
+ payload: MagdeckCalibrateCommandPayload
+
+
+class TempdeckSetTempCommandPayload(TextOnlyPayload):
+ celsius: float
+
+
+class TempdeckSetTempCommand(TypedDict):
+ name: Literal["command.TEMPDECK_SET_TEMP"]
+ payload: TempdeckSetTempCommandPayload
+
+
+class TempdeckAwaitTempCommandPayload(TextOnlyPayload):
+ celsius: float
+
+
+class TempdeckAwaitTempCommand(TypedDict):
+ name: Literal["command.TEMPDECK_AWAIT_TEMP"]
+ payload: TempdeckAwaitTempCommandPayload
+
+
+class TempdeckDeactivateCommandPayload(TextOnlyPayload):
+ pass
+
+
+class TempdeckDeactivateCommand(TypedDict):
+ name: Literal["command.TEMPDECK_DEACTIVATE"]
+ payload: TempdeckDeactivateCommandPayload
+
+
+class ThermocyclerOpenCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerOpenCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_OPEN"]
+ payload: ThermocyclerOpenCommandPayload
+
+
+class ThermocyclerSetBlockTempCommandPayload(TextOnlyPayload):
+ temperature: float
+ hold_time: Optional[float]
+
+
+class ThermocyclerSetBlockTempCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_SET_BLOCK_TEMP"]
+ payload: ThermocyclerSetBlockTempCommandPayload
+
+
+class ThermocyclerExecuteProfileCommandPayload(TextOnlyPayload):
+ steps: List[ThermocyclerStep]
+
+
+class ThermocyclerExecuteProfileCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_EXECUTE_PROFILE"]
+ payload: ThermocyclerExecuteProfileCommandPayload
+
+
+class ThermocyclerWaitForHoldCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerWaitForHoldCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_WAIT_FOR_HOLD"]
+ payload: ThermocyclerWaitForHoldCommandPayload
+
+
+class ThermocyclerWaitForTempCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerWaitForTempCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_WAIT_FOR_TEMP"]
+ payload: ThermocyclerWaitForTempCommandPayload
+
+
+class ThermocyclerSetLidTempCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerSetLidTempCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_SET_LID_TEMP"]
+ payload: ThermocyclerSetLidTempCommandPayload
+
+
+class ThermocyclerDeactivateLidCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerDeactivateLidCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_DEACTIVATE_LID"]
+ payload: ThermocyclerDeactivateLidCommandPayload
+
+
+class ThermocyclerDeactivateBlockCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerDeactivateBlockCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_DEACTIVATE_BLOCK"]
+ payload: ThermocyclerDeactivateBlockCommandPayload
+
+
+class ThermocyclerDeactivateCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerDeactivateCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_DEACTIVATE"]
+ payload: ThermocyclerDeactivateCommandPayload
+
+
+class ThermocyclerWaitForLidTempCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerWaitForLidTempCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_WAIT_FOR_LID_TEMP"]
+ payload: ThermocyclerWaitForLidTempCommandPayload
+
+
+class ThermocyclerCloseCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ThermocyclerCloseCommand(TypedDict):
+ name: Literal["command.THERMOCYCLER_CLOSE"]
+ payload: ThermocyclerCloseCommandPayload
+
+
+class HomeCommandPayload(TextOnlyPayload):
+ axis: str
+
+
+class HomeCommand(TypedDict):
+ name: Literal["command.HOME"]
+ payload: HomeCommandPayload
+
+
+class AspirateDispenseCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Location
+ volume: float
+ rate: float
+
+
+class AspirateCommand(TypedDict):
+ name: Literal["command.ASPIRATE"]
+ payload: AspirateDispenseCommandPayload
+
+
+class DispenseCommand(TypedDict):
+ name: Literal["command.DISPENSE"]
+ payload: AspirateDispenseCommandPayload
+
+
+class DispenseInDisposalLocationCommandPayload(
+ TextOnlyPayload, SingleInstrumentPayload
+):
+ location: Union[TrashBin, WasteChute]
+ volume: float
+ rate: float
+
+
+class DispenseInDisposalLocationCommand(TypedDict):
+ name: Literal["command.DISPENSE_IN_DISPOSAL_LOCATION"]
+ payload: DispenseInDisposalLocationCommandPayload
+
+
+class ConsolidateCommandPayload(
+ TextOnlyPayload, MultiLocationPayload, SingleInstrumentPayload
+):
+ volume: Union[float, List[float]]
+ source: List[Union[Location, Well]]
+ dest: Union[Location, Well]
+
+
+class ConsolidateCommand(TypedDict):
+ name: Literal["command.CONSOLIDATE"]
+ payload: ConsolidateCommandPayload
+
+
+class DistributeCommandPayload(
+ TextOnlyPayload, MultiLocationPayload, SingleInstrumentPayload
+):
+ volume: Union[float, List[float]]
+ source: Union[Location, Well]
+ dest: List[Union[Location, Well]]
+
+
+class DistributeCommand(TypedDict):
+ name: Literal["command.DISTRIBUTE"]
+ payload: DistributeCommandPayload
+
+
+class TransferCommandPayload(
+ TextOnlyPayload, MultiLocationPayload, SingleInstrumentPayload
+):
+ volume: Union[float, List[float]]
+ source: List[Union[Location, Well]]
+ dest: List[Union[Location, Well]]
+
+
+class TransferCommand(TypedDict):
+ name: Literal["command.TRANSFER"]
+ payload: TransferCommandPayload
+
+
+class MixCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Union[None, Location, Well]
+ volume: float
+ repetitions: int
+
+
+class MixCommand(TypedDict):
+ name: Literal["command.MIX"]
+ payload: MixCommandPayload
+
+
+class BlowOutCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Optional[Location]
+
+
+class BlowOutCommand(TypedDict):
+ name: Literal["command.BLOW_OUT"]
+ payload: BlowOutCommandPayload
+
+
+class BlowOutInDisposalLocationCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Union[TrashBin, WasteChute]
+
+
+class BlowOutInDisposalLocationCommand(TypedDict):
+ name: Literal["command.BLOW_OUT_IN_DISPOSAL_LOCATION"]
+ payload: BlowOutInDisposalLocationCommandPayload
+
+
+class TouchTipCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ pass
+
+
+class TouchTipCommand(TypedDict):
+ name: Literal["command.TOUCH_TIP"]
+ payload: TouchTipCommandPayload
+
+
+class AirGapCommandPayload(TextOnlyPayload):
+ pass
+
+
+class AirGapCommand(TypedDict):
+ name: Literal["command.AIR_GAP"]
+ payload: AirGapCommandPayload
+
+
+class ReturnTipCommandPayload(TextOnlyPayload):
+ pass
+
+
+class ReturnTipCommand(TypedDict):
+ name: Literal["command.RETURN_TIP"]
+ payload: ReturnTipCommandPayload
+
+
+class PickUpTipCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Well
+
+
+class PickUpTipCommand(TypedDict):
+ name: Literal["command.PICK_UP_TIP"]
+ payload: PickUpTipCommandPayload
+
+
+class DropTipCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Well
+
+
+class DropTipCommand(TypedDict):
+ name: Literal["command.DROP_TIP"]
+ payload: DropTipCommandPayload
+
+
+class DropTipInDisposalLocationCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Union[TrashBin, WasteChute]
+
+
+class DropTipInDisposalLocationCommand(TypedDict):
+ name: Literal["command.DROP_TIP_IN_DISPOSAL_LOCATION"]
+ payload: DropTipInDisposalLocationCommandPayload
+
+
+class MoveToCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Location
+
+
+class MoveToCommand(TypedDict):
+ name: Literal["command.MOVE_TO"]
+ payload: MoveToCommandPayload
+
+
+class MoveToDisposalLocationCommandPayload(TextOnlyPayload, SingleInstrumentPayload):
+ location: Union[TrashBin, WasteChute]
+
+
+class MoveToDisposalLocationCommand(TypedDict):
+ name: Literal["command.MOVE_TO_DISPOSAL_LOCATION"]
+ payload: MoveToDisposalLocationCommandPayload
+
+
+class MoveLabwareCommandPayload(TextOnlyPayload):
+ pass
+
+
+class MoveLabwareCommand(TypedDict):
+ name: Literal["command.MOVE_LABWARE"]
+ payload: MoveLabwareCommandPayload
+
+
+Command = Union[
+ DropTipCommand,
+ DropTipInDisposalLocationCommand,
+ PickUpTipCommand,
+ ReturnTipCommand,
+ AirGapCommand,
+ TouchTipCommand,
+ BlowOutCommand,
+ BlowOutInDisposalLocationCommand,
+ MixCommand,
+ TransferCommand,
+ DistributeCommand,
+ ConsolidateCommand,
+ DispenseCommand,
+ DispenseInDisposalLocationCommand,
+ AspirateCommand,
+ HomeCommand,
+ HeaterShakerSetTargetTemperatureCommand,
+ HeaterShakerWaitForTemperatureCommand,
+ HeaterShakerSetAndWaitForShakeSpeedCommand,
+ HeaterShakerOpenLabwareLatchCommand,
+ HeaterShakerCloseLabwareLatchCommand,
+ HeaterShakerDeactivateShakerCommand,
+ HeaterShakerDeactivateHeaterCommand,
+ ThermocyclerCloseCommand,
+ ThermocyclerWaitForLidTempCommand,
+ ThermocyclerDeactivateCommand,
+ ThermocyclerDeactivateBlockCommand,
+ ThermocyclerDeactivateLidCommand,
+ ThermocyclerSetLidTempCommand,
+ ThermocyclerWaitForTempCommand,
+ ThermocyclerWaitForHoldCommand,
+ ThermocyclerExecuteProfileCommand,
+ ThermocyclerSetBlockTempCommand,
+ ThermocyclerOpenCommand,
+ TempdeckDeactivateCommand,
+ TempdeckAwaitTempCommand,
+ TempdeckSetTempCommand,
+ MagdeckCalibrateCommand,
+ MagdeckDisengageCommand,
+ MagdeckEngageCommand,
+ ResumeCommand,
+ PauseCommand,
+ DelayCommand,
+ CommentCommand,
+ MoveToCommand,
+ MoveToDisposalLocationCommand,
+ MoveLabwareCommand,
+]
+
+
+CommandPayload = Union[
+ CommentCommandPayload,
+ ResumeCommandPayload,
+ HeaterShakerSetTargetTemperaturePayload,
+ HeaterShakerWaitForTemperaturePayload,
+ HeaterShakerSetAndWaitForShakeSpeedPayload,
+ HeaterShakerOpenLabwareLatchPayload,
+ HeaterShakerCloseLabwareLatchPayload,
+ HeaterShakerDeactivateShakerPayload,
+ HeaterShakerDeactivateHeaterPayload,
+ MagdeckEngageCommandPayload,
+ MagdeckDisengageCommandPayload,
+ MagdeckCalibrateCommandPayload,
+ ThermocyclerOpenCommandPayload,
+ ThermocyclerWaitForHoldCommandPayload,
+ ThermocyclerWaitForTempCommandPayload,
+ ThermocyclerSetLidTempCommandPayload,
+ ThermocyclerDeactivateLidCommandPayload,
+ ThermocyclerDeactivateBlockCommandPayload,
+ ThermocyclerDeactivateCommandPayload,
+ ThermocyclerWaitForLidTempCommand,
+ ThermocyclerCloseCommandPayload,
+ AirGapCommandPayload,
+ ReturnTipCommandPayload,
+ DropTipCommandPayload,
+ DropTipInDisposalLocationCommandPayload,
+ PickUpTipCommandPayload,
+ TouchTipCommandPayload,
+ BlowOutCommandPayload,
+ BlowOutInDisposalLocationCommandPayload,
+ MixCommandPayload,
+ TransferCommandPayload,
+ DistributeCommandPayload,
+ ConsolidateCommandPayload,
+ AspirateDispenseCommandPayload,
+ DispenseInDisposalLocationCommandPayload,
+ HomeCommandPayload,
+ ThermocyclerExecuteProfileCommandPayload,
+ ThermocyclerSetBlockTempCommandPayload,
+ TempdeckAwaitTempCommandPayload,
+ TempdeckSetTempCommandPayload,
+ PauseCommandPayload,
+ DelayCommandPayload,
+ MoveToCommandPayload,
+ MoveToDisposalLocationCommandPayload,
+ MoveLabwareCommandPayload,
+]
+
+
+MessageSequenceId = Union[Literal["before"], Literal["after"]]
+
+
+CommandMessageFields = TypedDict(
+ "CommandMessageFields",
+ {"$": MessageSequenceId, "id": str, "error": Optional[Exception]},
+)
+
+
+class MoveToMessage(CommandMessageFields, MoveToCommand):
+ pass
+
+
+class MoveToDisposalLocationMessage(
+ CommandMessageFields, MoveToDisposalLocationCommand
+):
+ pass
+
+
+class DropTipMessage(CommandMessageFields, DropTipCommand):
+ pass
+
+
+class DropTipInDisposalLocationMessage(
+ CommandMessageFields, DropTipInDisposalLocationCommand
+):
+ pass
+
+
+class PickUpTipMessage(CommandMessageFields, PickUpTipCommand):
+ pass
+
+
+class ReturnTipMessage(CommandMessageFields, ReturnTipCommand):
+ pass
+
+
+class AirGapMessage(CommandMessageFields, AirGapCommand):
+ pass
+
+
+class TouchTipMessage(CommandMessageFields, TouchTipCommand):
+ pass
+
+
+class BlowOutMessage(CommandMessageFields, BlowOutCommand):
+ pass
+
+
+class BlowOutInDisposalLocationMessage(
+ CommandMessageFields, BlowOutInDisposalLocationCommand
+):
+ pass
+
+
+class MixMessage(CommandMessageFields, MixCommand):
+ pass
+
+
+class TransferMessage(CommandMessageFields, TransferCommand):
+ pass
+
+
+class DistributeMessage(CommandMessageFields, DistributeCommand):
+ pass
+
+
+class ConsolidateMessage(CommandMessageFields, ConsolidateCommand):
+ pass
+
+
+class DispenseMessage(CommandMessageFields, DispenseCommand):
+ pass
+
+
+class DispenseInDisposalLocationMessage(
+ CommandMessageFields, DispenseInDisposalLocationCommand
+):
+ pass
+
+
+class AspirateMessage(CommandMessageFields, AspirateCommand):
+ pass
+
+
+class HomeMessage(CommandMessageFields, HomeCommand):
+ pass
+
+
+class HeaterShakerSetTargetTemperatureMessage(
+ CommandMessageFields, HeaterShakerSetTargetTemperatureCommand
+):
+ pass
+
+
+class HeaterShakerWaitForTemperatureMessage(
+ CommandMessageFields, HeaterShakerWaitForTemperatureCommand
+):
+ pass
+
+
+class HeaterShakerSetAndWaitForShakeSpeedMessage(
+ CommandMessageFields, HeaterShakerSetAndWaitForShakeSpeedCommand
+):
+ pass
+
+
+class HeaterShakerOpenLabwareLatchMessage(
+ CommandMessageFields, HeaterShakerOpenLabwareLatchCommand
+):
+ pass
+
+
+class HeaterShakerCloseLabwareLatchMessage(
+ CommandMessageFields, HeaterShakerCloseLabwareLatchCommand
+):
+ pass
+
+
+class HeaterShakerDeactivateShakerMessage(
+ CommandMessageFields, HeaterShakerDeactivateShakerCommand
+):
+ pass
+
+
+class HeaterShakerDeactivateHeaterMessage(
+ CommandMessageFields, HeaterShakerDeactivateHeaterCommand
+):
+ pass
+
+
+class ThermocyclerCloseMessage(CommandMessageFields, ThermocyclerCloseCommand):
+ pass
+
+
+class ThermocyclerWaitForLidTempMessage(
+ CommandMessageFields, ThermocyclerWaitForLidTempCommand
+):
+ pass
+
+
+class ThermocyclerDeactivateMessage(
+ CommandMessageFields, ThermocyclerDeactivateCommand
+):
+ pass
+
+
+class ThermocyclerDeactivateBlockMessage(
+ CommandMessageFields, ThermocyclerDeactivateBlockCommand
+):
+ pass
+
+
+class ThermocyclerDeactivateLidMessage(
+ CommandMessageFields, ThermocyclerDeactivateLidCommand
+):
+ pass
+
+
+class ThermocyclerSetLidTempMessage(
+ CommandMessageFields, ThermocyclerSetLidTempCommand
+):
+ pass
+
+
+class ThermocyclerWaitForTempMessage(
+ CommandMessageFields, ThermocyclerWaitForTempCommand
+):
+ pass
+
+
+class ThermocyclerWaitForHoldMessage(
+ CommandMessageFields, ThermocyclerWaitForHoldCommand
+):
+ pass
+
+
+class ThermocyclerExecuteProfileMessage(
+ CommandMessageFields, ThermocyclerExecuteProfileCommand
+):
+ pass
+
+
+class ThermocyclerSetBlockTempMessage(
+ CommandMessageFields, ThermocyclerSetBlockTempCommand
+):
+ pass
+
+
+class ThermocyclerOpenMessage(CommandMessageFields, ThermocyclerOpenCommand):
+ pass
+
+
+class TempdeckDeactivateMessage(CommandMessageFields, TempdeckDeactivateCommand):
+ pass
+
+
+class TempdeckAwaitTempMessage(CommandMessageFields, TempdeckAwaitTempCommand):
+ pass
+
+
+class TempdeckSetTempMessage(CommandMessageFields, TempdeckSetTempCommand):
+ pass
+
+
+class MagdeckCalibrateMessage(CommandMessageFields, MagdeckCalibrateCommand):
+ pass
+
+
+class MagdeckDisengageMessage(CommandMessageFields, MagdeckDisengageCommand):
+ pass
+
+
+class MagdeckEngageMessage(CommandMessageFields, MagdeckEngageCommand):
+ pass
+
+
+class ResumeMessage(CommandMessageFields, ResumeCommand):
+ pass
+
+
+class PauseMessage(CommandMessageFields, PauseCommand):
+ pass
+
+
+class DelayMessage(CommandMessageFields, DelayCommand):
+ pass
+
+
+class CommentMessage(CommandMessageFields, CommentCommand):
+ pass
+
+
+class MoveLabwareMessage(CommandMessageFields, MoveLabwareCommand):
+ pass
+
+
+CommandMessage = Union[
+ DropTipMessage,
+ DropTipInDisposalLocationMessage,
+ PickUpTipMessage,
+ ReturnTipMessage,
+ AirGapMessage,
+ TouchTipMessage,
+ BlowOutMessage,
+ BlowOutInDisposalLocationMessage,
+ MixMessage,
+ TransferMessage,
+ DistributeMessage,
+ ConsolidateMessage,
+ DispenseMessage,
+ DispenseInDisposalLocationMessage,
+ AspirateMessage,
+ HomeMessage,
+ HeaterShakerSetTargetTemperatureMessage,
+ HeaterShakerWaitForTemperatureMessage,
+ HeaterShakerSetAndWaitForShakeSpeedMessage,
+ HeaterShakerOpenLabwareLatchMessage,
+ HeaterShakerCloseLabwareLatchMessage,
+ HeaterShakerDeactivateShakerMessage,
+ HeaterShakerDeactivateHeaterMessage,
+ ThermocyclerCloseMessage,
+ ThermocyclerWaitForLidTempMessage,
+ ThermocyclerDeactivateMessage,
+ ThermocyclerDeactivateBlockMessage,
+ ThermocyclerDeactivateLidMessage,
+ ThermocyclerSetLidTempMessage,
+ ThermocyclerWaitForTempMessage,
+ ThermocyclerWaitForHoldMessage,
+ ThermocyclerExecuteProfileMessage,
+ ThermocyclerSetBlockTempMessage,
+ ThermocyclerOpenMessage,
+ TempdeckSetTempMessage,
+ TempdeckDeactivateMessage,
+ MagdeckEngageMessage,
+ MagdeckDisengageMessage,
+ MagdeckCalibrateMessage,
+ CommentMessage,
+ DelayMessage,
+ PauseMessage,
+ ResumeMessage,
+ MoveToMessage,
+ MoveToDisposalLocationMessage,
+ MoveLabwareMessage,
+]
diff --git a/api/src/opentrons/motion_planning/adjacent_slots_getters.py b/api/src/opentrons/motion_planning/adjacent_slots_getters.py
index 5c8d5b07402..9644f40f157 100644
--- a/api/src/opentrons/motion_planning/adjacent_slots_getters.py
+++ b/api/src/opentrons/motion_planning/adjacent_slots_getters.py
@@ -1,6 +1,10 @@
"""Getters for specific adjacent slots."""
+from dataclasses import dataclass
+from typing import Optional, List, Dict, Union
-from typing import Optional, List
+from opentrons_shared_data.robot.dev_types import RobotType
+
+from opentrons.types import DeckSlotName, StagingSlotName
def get_north_slot(slot: int) -> Optional[int]:
@@ -35,6 +39,116 @@ def get_west_slot(slot: int) -> Optional[int]:
return slot - 1
+def get_north_west_slot(slot: int) -> Optional[int]:
+ """Get the slot that's north-west of the given slot."""
+ if slot in [1, 4, 7, 10, 11, 12]:
+ return None
+ else:
+ north_slot = get_north_slot(slot)
+ return north_slot - 1 if north_slot else None
+
+
+def get_north_east_slot(slot: int) -> Optional[int]:
+ """Get the slot that's north-east of the given slot."""
+ if slot in [3, 6, 9, 10, 11, 12]:
+ return None
+ else:
+ north_slot = get_north_slot(slot)
+ return north_slot + 1 if north_slot else None
+
+
+def get_south_west_slot(slot: int) -> Optional[int]:
+ """Get the slot that's south-west of the given slot."""
+ if slot in [1, 2, 3, 4, 7, 10]:
+ return None
+ else:
+ south_slot = get_south_slot(slot)
+ return south_slot - 1 if south_slot else None
+
+
+def get_south_east_slot(slot: int) -> Optional[int]:
+ """Get the slot that's south-east of the given slot."""
+ if slot in [1, 2, 3, 6, 9, 12]:
+ return None
+ else:
+ south_slot = get_south_slot(slot)
+ return south_slot + 1 if south_slot else None
+
+
+@dataclass
+class _MixedTypeSlots:
+ regular_slots: List[DeckSlotName]
+ staging_slots: List[StagingSlotName]
+
+
+def get_surrounding_slots(slot: int, robot_type: RobotType) -> _MixedTypeSlots:
+ """Get all the surrounding slots, i.e., adjacent slots as well as corner slots."""
+ corner_slots: List[Union[int, None]] = [
+ get_north_east_slot(slot),
+ get_north_west_slot(slot),
+ get_south_east_slot(slot),
+ get_south_west_slot(slot),
+ ]
+
+ surrounding_regular_slots_int = get_adjacent_slots(slot) + [
+ maybe_slot for maybe_slot in corner_slots if maybe_slot is not None
+ ]
+ surrounding_regular_slots = [
+ DeckSlotName.from_primitive(slot_int).to_equivalent_for_robot_type(robot_type)
+ for slot_int in surrounding_regular_slots_int
+ ]
+ surrounding_staging_slots = _SURROUNDING_STAGING_SLOTS_MAP.get(
+ DeckSlotName.from_primitive(slot).to_equivalent_for_robot_type(robot_type), []
+ )
+ return _MixedTypeSlots(
+ regular_slots=surrounding_regular_slots, staging_slots=surrounding_staging_slots
+ )
+
+
+_WEST_OF_STAGING_SLOT_MAP: Dict[StagingSlotName, DeckSlotName] = {
+ StagingSlotName.SLOT_A4: DeckSlotName.SLOT_A3,
+ StagingSlotName.SLOT_B4: DeckSlotName.SLOT_B3,
+ StagingSlotName.SLOT_C4: DeckSlotName.SLOT_C3,
+ StagingSlotName.SLOT_D4: DeckSlotName.SLOT_D3,
+}
+
+_EAST_OF_FLEX_COLUMN_3_MAP: Dict[DeckSlotName, StagingSlotName] = {
+ deck_slot: staging_slot
+ for staging_slot, deck_slot in _WEST_OF_STAGING_SLOT_MAP.items()
+}
+
+
+_SURROUNDING_STAGING_SLOTS_MAP: Dict[DeckSlotName, List[StagingSlotName]] = {
+ DeckSlotName.SLOT_D3: [StagingSlotName.SLOT_C4, StagingSlotName.SLOT_D4],
+ DeckSlotName.SLOT_C3: [
+ StagingSlotName.SLOT_B4,
+ StagingSlotName.SLOT_C4,
+ StagingSlotName.SLOT_D4,
+ ],
+ DeckSlotName.SLOT_B3: [
+ StagingSlotName.SLOT_A4,
+ StagingSlotName.SLOT_B4,
+ StagingSlotName.SLOT_C4,
+ ],
+ DeckSlotName.SLOT_A3: [StagingSlotName.SLOT_A4, StagingSlotName.SLOT_B4],
+}
+
+
+def get_west_of_staging_slot(staging_slot: StagingSlotName) -> DeckSlotName:
+ """Get slot west of a staging slot."""
+ return _WEST_OF_STAGING_SLOT_MAP[staging_slot]
+
+
+def get_adjacent_staging_slot(deck_slot: DeckSlotName) -> Optional[StagingSlotName]:
+ """Get the adjacent staging slot if the deck slot is in the third column."""
+ return _EAST_OF_FLEX_COLUMN_3_MAP.get(deck_slot)
+
+
+def get_surrounding_staging_slots(deck_slot: DeckSlotName) -> List[StagingSlotName]:
+ """Get the staging slots surrounding the given deck slot."""
+ return _SURROUNDING_STAGING_SLOTS_MAP.get(deck_slot, [])
+
+
def get_east_west_slots(slot: int) -> List[int]:
"""Get slots east & west of the given slot."""
east = get_east_slot(slot)
diff --git a/api/src/opentrons/motion_planning/deck_conflict.py b/api/src/opentrons/motion_planning/deck_conflict.py
index c7cf00ba192..8b26897dc1b 100644
--- a/api/src/opentrons/motion_planning/deck_conflict.py
+++ b/api/src/opentrons/motion_planning/deck_conflict.py
@@ -11,9 +11,10 @@
get_east_west_slots,
get_south_slot,
get_adjacent_slots,
+ get_adjacent_staging_slot,
)
-from opentrons.types import DeckSlotName
+from opentrons.types import DeckSlotName, StagingSlotName
_FIXED_TRASH_SLOT: Final[Set[DeckSlotName]] = {
DeckSlotName.FIXED_TRASH,
@@ -59,6 +60,14 @@ class Labware:
is_fixed_trash: bool
+@dataclass
+class TrashBin:
+ """A non-labware trash bin (loaded via api level 2.16 and above)."""
+
+ name_for_errors: str
+ highest_z: float
+
+
@dataclass
class _Module:
name_for_errors: str
@@ -70,6 +79,11 @@ class HeaterShakerModule(_Module):
"""A Heater-Shaker module."""
+@dataclass
+class MagneticBlockModule(_Module):
+ """A Magnetic Block module."""
+
+
@dataclass
class ThermocyclerModule(_Module):
"""A Thermocycler module."""
@@ -89,17 +103,19 @@ class OtherModule(_Module):
DeckItem = Union[
Labware,
HeaterShakerModule,
+ MagneticBlockModule,
ThermocyclerModule,
OtherModule,
+ TrashBin,
]
class _NothingAllowed(NamedTuple):
"""Nothing is allowed in this slot."""
- location: DeckSlotName
+ location: Union[DeckSlotName, StagingSlotName]
source_item: DeckItem
- source_location: DeckSlotName
+ source_location: Union[DeckSlotName, StagingSlotName]
def is_allowed(self, item: DeckItem) -> bool:
return False
@@ -122,6 +138,8 @@ def is_allowed(self, item: DeckItem) -> bool:
return item.highest_z < self.max_height
elif isinstance(item, _Module):
return item.highest_z_including_labware < self.max_height
+ elif isinstance(item, TrashBin):
+ return item.highest_z < self.max_height
class _NoModule(NamedTuple):
@@ -146,21 +164,11 @@ def is_allowed(self, item: DeckItem) -> bool:
return not isinstance(item, HeaterShakerModule)
-class _FixedTrashOnly(NamedTuple):
- """Only fixed-trash labware is allowed in this slot."""
-
- location: DeckSlotName
-
- def is_allowed(self, item: DeckItem) -> bool:
- return _is_fixed_trash(item)
-
-
_DeckRestriction = Union[
_NothingAllowed,
_MaxHeight,
_NoModule,
_NoHeaterShakerModule,
- _FixedTrashOnly,
]
"""A restriction on what is allowed in a given slot."""
@@ -173,9 +181,9 @@ class DeckConflictError(ValueError):
# things that don't fit into a single deck slot, like the Thermocycler.
# Refactor this interface to take a more symbolic location.
def check(
- existing_items: Mapping[DeckSlotName, DeckItem],
+ existing_items: Mapping[Union[DeckSlotName, StagingSlotName], DeckItem],
new_item: DeckItem,
- new_location: DeckSlotName,
+ new_location: Union[DeckSlotName, StagingSlotName],
robot_type: RobotType,
) -> None:
"""Check a deck layout for conflicts.
@@ -189,11 +197,7 @@ def check(
Raises:
DeckConflictError: Adding this item should not be allowed.
"""
- restrictions: List[_DeckRestriction] = [
- _FixedTrashOnly(
- location=DeckSlotName.FIXED_TRASH.to_equivalent_for_robot_type(robot_type)
- )
- ]
+ restrictions: List[_DeckRestriction] = []
# build restrictions driven by existing items
for location, item in existing_items.items():
restrictions += _create_restrictions(
@@ -224,10 +228,12 @@ def check(
)
-def _create_ot2_restrictions(
- item: DeckItem, location: DeckSlotName
+def _create_ot2_restrictions( # noqa: C901
+ item: DeckItem, location: Union[DeckSlotName, StagingSlotName]
) -> List[_DeckRestriction]:
restrictions: List[_DeckRestriction] = []
+ if isinstance(location, StagingSlotName):
+ raise DeckConflictError(f"OT-2 does not support staging slots ({location.id}).")
if location not in _FIXED_TRASH_SLOT:
# Disallow a different item from overlapping this item in this deck slot.
@@ -239,7 +245,7 @@ def _create_ot2_restrictions(
)
)
- if _is_fixed_trash(item):
+ if _is_ot2_fixed_trash(item):
# A Heater-Shaker can't safely be placed just south of the fixed trash,
# because the fixed trash blocks access to the screw that locks the
# Heater-Shaker onto the deck.
@@ -288,20 +294,35 @@ def _create_ot2_restrictions(
def _create_flex_restrictions(
- item: DeckItem, location: DeckSlotName
+ item: DeckItem, location: Union[DeckSlotName, StagingSlotName]
) -> List[_DeckRestriction]:
- restrictions: List[_DeckRestriction] = []
+ restrictions: List[_DeckRestriction] = [
+ _NothingAllowed(
+ location=location,
+ source_item=item,
+ source_location=location,
+ )
+ ]
- if location not in _FIXED_TRASH_SLOT:
- restrictions.append(
- _NothingAllowed(
- location=location,
- source_item=item,
- source_location=location,
+ if isinstance(item, (HeaterShakerModule, OtherModule)):
+ if isinstance(location, StagingSlotName):
+ raise DeckConflictError(
+ "Cannot have a module loaded on a staging area slot."
+ )
+ adjacent_staging_slot = get_adjacent_staging_slot(location)
+ if adjacent_staging_slot is not None:
+ # You can't have anything on a staging area slot next to a heater-shaker or
+ # temperature module because the module caddy physically blocks you from having
+ # that staging area slot installed in the first place.
+ restrictions.append(
+ _NothingAllowed(
+ location=adjacent_staging_slot,
+ source_item=item,
+ source_location=location,
+ )
)
- )
- if isinstance(item, ThermocyclerModule):
+ elif isinstance(item, ThermocyclerModule):
for covered_location in _flex_slots_covered_by_thermocycler():
restrictions.append(
_NothingAllowed(
@@ -315,7 +336,7 @@ def _create_flex_restrictions(
def _create_restrictions(
- item: DeckItem, location: DeckSlotName, robot_type: str
+ item: DeckItem, location: Union[DeckSlotName, StagingSlotName], robot_type: str
) -> List[_DeckRestriction]:
if robot_type == "OT-2 Standard":
@@ -333,10 +354,7 @@ def _create_deck_conflict_error_message(
new_item is not None or existing_item is not None
), "Conflict error expects either new_item or existing_item"
- if isinstance(restriction, _FixedTrashOnly):
- message = f"Only fixed-trash is allowed in slot {restriction.location}"
-
- elif new_item is not None:
+ if new_item is not None:
message = (
f"{restriction.source_item.name_for_errors}"
f" in slot {restriction.source_location}"
@@ -372,5 +390,7 @@ def _flex_slots_covered_by_thermocycler() -> Set[DeckSlotName]:
return {DeckSlotName.SLOT_B1, DeckSlotName.SLOT_A1}
-def _is_fixed_trash(item: DeckItem) -> bool:
- return isinstance(item, Labware) and item.is_fixed_trash
+def _is_ot2_fixed_trash(item: DeckItem) -> bool:
+ return (isinstance(item, Labware) and item.is_fixed_trash) or isinstance(
+ item, TrashBin
+ )
diff --git a/api/src/opentrons/motion_planning/types.py b/api/src/opentrons/motion_planning/types.py
index 1251d00e18c..2c8ca3211ca 100644
--- a/api/src/opentrons/motion_planning/types.py
+++ b/api/src/opentrons/motion_planning/types.py
@@ -38,3 +38,5 @@ class GripperMovementWaypointsWithJawStatus:
position: Point
jaw_open: bool
+ dropping: bool
+ """This flag should only be set to True if this waypoint involves dropping a piece of labware."""
diff --git a/api/src/opentrons/motion_planning/waypoints.py b/api/src/opentrons/motion_planning/waypoints.py
index 0f3634e449d..b9c62114215 100644
--- a/api/src/opentrons/motion_planning/waypoints.py
+++ b/api/src/opentrons/motion_planning/waypoints.py
@@ -126,6 +126,7 @@ def get_gripper_labware_movement_waypoints(
to_labware_center: Point,
gripper_home_z: float,
offset_data: LabwareMovementOffsetData,
+ post_drop_slide_offset: Optional[Point],
) -> List[GripperMovementWaypointsWithJawStatus]:
"""Get waypoints for moving labware using a gripper."""
pick_up_offset = offset_data.pickUpOffset
@@ -138,26 +139,45 @@ def get_gripper_labware_movement_waypoints(
drop_offset.x, drop_offset.y, drop_offset.z
)
+ post_drop_home_pos = Point(drop_location.x, drop_location.y, gripper_home_z)
+
waypoints_with_jaw_status = [
GripperMovementWaypointsWithJawStatus(
position=Point(pick_up_location.x, pick_up_location.y, gripper_home_z),
jaw_open=False,
+ dropping=False,
+ ),
+ GripperMovementWaypointsWithJawStatus(
+ position=pick_up_location, jaw_open=True, dropping=False
),
- GripperMovementWaypointsWithJawStatus(position=pick_up_location, jaw_open=True),
# Gripper grips the labware here
GripperMovementWaypointsWithJawStatus(
position=Point(pick_up_location.x, pick_up_location.y, gripper_home_z),
jaw_open=False,
+ dropping=False,
),
GripperMovementWaypointsWithJawStatus(
position=Point(drop_location.x, drop_location.y, gripper_home_z),
jaw_open=False,
+ dropping=False,
+ ),
+ GripperMovementWaypointsWithJawStatus(
+ position=drop_location, jaw_open=False, dropping=False
),
- GripperMovementWaypointsWithJawStatus(position=drop_location, jaw_open=False),
# Gripper ungrips here
GripperMovementWaypointsWithJawStatus(
- position=Point(drop_location.x, drop_location.y, gripper_home_z),
+ position=post_drop_home_pos,
jaw_open=True,
+ dropping=True,
),
]
+ if post_drop_slide_offset is not None:
+ # IF it is specified, add one more step after homing the gripper
+ waypoints_with_jaw_status.append(
+ GripperMovementWaypointsWithJawStatus(
+ position=post_drop_home_pos + post_drop_slide_offset,
+ jaw_open=True,
+ dropping=False,
+ )
+ )
return waypoints_with_jaw_status
diff --git a/api/src/opentrons/ordered_set.py b/api/src/opentrons/ordered_set.py
index 99b8f77b3d7..0b0481ddcd8 100644
--- a/api/src/opentrons/ordered_set.py
+++ b/api/src/opentrons/ordered_set.py
@@ -81,7 +81,7 @@ def head(
def head(
self, default_value: Union[_DefaultValueT, _NOT_SPECIFIED] = _NOT_SPECIFIED()
) -> Union[_SetElementT, _DefaultValueT]:
- """Get the head of the set.
+ """Get the head (oldest-added element) of the set.
Args:
default_value: A value to return if set is empty.
@@ -93,12 +93,12 @@ def head(
IndexError: set is empty and default was not specified.
"""
try:
- return next(iter(self))
- except StopIteration as e:
+ return next(iter(self._elements))
+ except StopIteration:
if isinstance(default_value, _NOT_SPECIFIED):
- raise IndexError("Set is empty") from e
-
- return default_value
+ raise IndexError("Set is empty") from None
+ else:
+ return default_value
def __iter__(self) -> Iterator[_SetElementT]:
"""Enable iteration over all elements in the set.
@@ -130,3 +130,9 @@ def __sub__(
The elements that aren't removed retain their original relative order.
"""
return OrderedSet(e for e in self if e not in other)
+
+ def __repr__(self) -> str: # noqa: D105
+ # Use repr() on the keys view in case it's super long and Python is smart
+ # enough to abbreviate it.
+ elements_str = repr(self._elements.keys())
+ return f"OrderedSet({elements_str})"
diff --git a/api/src/opentrons/protocol_api/__init__.py b/api/src/opentrons/protocol_api/__init__.py
index 0d518bbf5c0..1e817c7a882 100644
--- a/api/src/opentrons/protocol_api/__init__.py
+++ b/api/src/opentrons/protocol_api/__init__.py
@@ -22,13 +22,15 @@
HeaterShakerContext,
MagneticBlockContext,
)
+from .disposal_locations import TrashBin, WasteChute
from ._liquid import Liquid
from ._types import OFF_DECK
-from ._waste_chute import WasteChute
from ._nozzle_layout import (
COLUMN,
- EMPTY,
+ ALL,
)
+from ._parameters import Parameters
+from ._parameter_context import ParameterContext
from .create_protocol_context import (
create_protocol_context,
@@ -48,12 +50,15 @@
"ThermocyclerContext",
"HeaterShakerContext",
"MagneticBlockContext",
+ "ParameterContext",
"Labware",
+ "TrashBin",
"WasteChute",
"Well",
"Liquid",
+ "Parameters",
"COLUMN",
- "EMPTY",
+ "ALL",
"OFF_DECK",
# For internal Opentrons use only:
"create_protocol_context",
diff --git a/api/src/opentrons/protocol_api/_nozzle_layout.py b/api/src/opentrons/protocol_api/_nozzle_layout.py
index 45cabb24af6..8e8cdf99521 100644
--- a/api/src/opentrons/protocol_api/_nozzle_layout.py
+++ b/api/src/opentrons/protocol_api/_nozzle_layout.py
@@ -7,11 +7,11 @@ class NozzleLayout(enum.Enum):
SINGLE = "SINGLE"
ROW = "ROW"
QUADRANT = "QUADRANT"
- EMPTY = "EMPTY"
+ ALL = "ALL"
COLUMN: Final = NozzleLayout.COLUMN
-EMPTY: Final = NozzleLayout.EMPTY
+ALL: Final = NozzleLayout.ALL
# Set __doc__ manually as a workaround. When this docstring is written the normal way, right after
# the constant definition, Sphinx has trouble picking it up.
@@ -20,8 +20,8 @@ class NozzleLayout(enum.Enum):
See for details on using ``COLUMN`` with :py:obj:`InstrumentContext.configure_nozzle_layout()`.
"""
-EMPTY.__doc__ = """\
+ALL.__doc__ = """\
A special nozzle configuration type indicating a reset back to default where the pipette will pick up its max capacity of tips.
-See for details on using ``RESET`` with :py:obj:`InstrumentContext.configure_nozzle_layout()`.
+See for details on using ``ALL`` with :py:obj:`InstrumentContext.configure_nozzle_layout()`.
"""
diff --git a/api/src/opentrons/protocol_api/_parameter_context.py b/api/src/opentrons/protocol_api/_parameter_context.py
new file mode 100644
index 00000000000..8c9debd882c
--- /dev/null
+++ b/api/src/opentrons/protocol_api/_parameter_context.py
@@ -0,0 +1,204 @@
+"""Parameter context for python protocols."""
+
+from typing import List, Optional, Union, Dict
+
+from opentrons.protocols.api_support.types import APIVersion
+from opentrons.protocols.parameters import parameter_definition, validation
+from opentrons.protocols.parameters.types import (
+ ParameterChoice,
+ ParameterDefinitionError,
+)
+from opentrons.protocol_engine.types import RunTimeParameter, RunTimeParamValuesType
+
+from ._parameters import Parameters
+
+_ParameterDefinitionTypes = Union[
+ parameter_definition.ParameterDefinition[int],
+ parameter_definition.ParameterDefinition[bool],
+ parameter_definition.ParameterDefinition[float],
+ parameter_definition.ParameterDefinition[str],
+]
+
+
+class ParameterContext:
+ """Public context for adding parameters to a protocol."""
+
+ def __init__(self, api_version: APIVersion) -> None:
+ """Initializes a parameter context for user-set parameters."""
+ self._api_version = api_version
+ self._parameters: Dict[str, _ParameterDefinitionTypes] = {}
+
+ def add_int(
+ self,
+ display_name: str,
+ variable_name: str,
+ default: int,
+ minimum: Optional[int] = None,
+ maximum: Optional[int] = None,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+ unit: Optional[str] = None,
+ ) -> None:
+ """Creates an integer parameter, settable within a given range or list of choices.
+
+ Arguments:
+ display_name: The display name of the int parameter as it would show up on the frontend.
+ variable_name: The variable name the int parameter will be referred to in the run context.
+ default: The default value the int parameter will be set to. This will be used in initial analysis.
+ minimum: The minimum value the int parameter can be set to (inclusive). Mutually exclusive with choices.
+ maximum: The maximum value the int parameter can be set to (inclusive). Mutually exclusive with choices.
+ choices: A list of possible choices that this parameter can be set to.
+ Mutually exclusive with minimum and maximum.
+ description: A description of the parameter as it will show up on the frontend.
+ unit: An optional unit to be appended to the end of the integer as it shown on the frontend.
+ """
+ validation.validate_variable_name_unique(variable_name, set(self._parameters))
+ parameter = parameter_definition.create_int_parameter(
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ minimum=minimum,
+ maximum=maximum,
+ choices=choices,
+ description=description,
+ unit=unit,
+ )
+ self._parameters[parameter.variable_name] = parameter
+
+ def add_float(
+ self,
+ display_name: str,
+ variable_name: str,
+ default: float,
+ minimum: Optional[float] = None,
+ maximum: Optional[float] = None,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+ unit: Optional[str] = None,
+ ) -> None:
+ """Creates a float parameter, settable within a given range or list of choices.
+
+ Arguments:
+ display_name: The display name of the float parameter as it would show up on the frontend.
+ variable_name: The variable name the float parameter will be referred to in the run context.
+ default: The default value the float parameter will be set to. This will be used in initial analysis.
+ minimum: The minimum value the float parameter can be set to (inclusive). Mutually exclusive with choices.
+ maximum: The maximum value the float parameter can be set to (inclusive). Mutually exclusive with choices.
+ choices: A list of possible choices that this parameter can be set to.
+ Mutually exclusive with minimum and maximum.
+ description: A description of the parameter as it will show up on the frontend.
+ unit: An optional unit to be appended to the end of the float as it shown on the frontend.
+ """
+ validation.validate_variable_name_unique(variable_name, set(self._parameters))
+ parameter = parameter_definition.create_float_parameter(
+ display_name=display_name,
+ variable_name=variable_name,
+ default=validation.ensure_float_value(default),
+ minimum=validation.ensure_optional_float_value(minimum),
+ maximum=validation.ensure_optional_float_value(maximum),
+ choices=validation.ensure_float_choices(choices),
+ description=description,
+ unit=unit,
+ )
+ self._parameters[parameter.variable_name] = parameter
+
+ def add_bool(
+ self,
+ display_name: str,
+ variable_name: str,
+ default: bool,
+ description: Optional[str] = None,
+ ) -> None:
+ """Creates a boolean parameter with allowable values of "On" (True) or "Off" (False).
+
+ Arguments:
+ display_name: The display name of the boolean parameter as it would show up on the frontend.
+ variable_name: The variable name the boolean parameter will be referred to in the run context.
+ default: The default value the boolean parameter will be set to. This will be used in initial analysis.
+ description: A description of the parameter as it will show up on the frontend.
+ """
+ validation.validate_variable_name_unique(variable_name, set(self._parameters))
+ parameter = parameter_definition.create_bool_parameter(
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ choices=[
+ {"display_name": "On", "value": True},
+ {"display_name": "Off", "value": False},
+ ],
+ description=description,
+ )
+ self._parameters[parameter.variable_name] = parameter
+
+ def add_str(
+ self,
+ display_name: str,
+ variable_name: str,
+ default: str,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+ ) -> None:
+ """Creates a string parameter, settable among given choices.
+
+ Arguments:
+ display_name: The display name of the string parameter as it would show up on the frontend.
+ variable_name: The variable name the string parameter will be referred to in the run context.
+ default: The default value the string parameter will be set to. This will be used in initial analysis.
+ choices: A list of possible choices that this parameter can be set to.
+ Mutually exclusive with minimum and maximum.
+ description: A description of the parameter as it will show up on the frontend.
+ """
+ validation.validate_variable_name_unique(variable_name, set(self._parameters))
+ parameter = parameter_definition.create_str_parameter(
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ choices=choices,
+ description=description,
+ )
+ self._parameters[parameter.variable_name] = parameter
+
+ def set_parameters(self, parameter_overrides: RunTimeParamValuesType) -> None:
+ """Sets parameters to values given by client, validating them as well.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ for variable_name, override_value in parameter_overrides.items():
+ try:
+ parameter = self._parameters[variable_name]
+ except KeyError:
+ raise ParameterDefinitionError(
+ f"Parameter {variable_name} is not defined as a parameter for this protocol."
+ )
+ validated_value = validation.ensure_value_type(
+ override_value, parameter.parameter_type
+ )
+ parameter.value = validated_value
+
+ def export_parameters_for_analysis(self) -> List[RunTimeParameter]:
+ """Exports all parameters into a protocol engine models for reporting in analysis.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return [
+ parameter.as_protocol_engine_type()
+ for parameter in self._parameters.values()
+ ]
+
+ def export_parameters_for_protocol(self) -> Parameters:
+ """Exports all parameters into a protocol run usable parameters object.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return Parameters(
+ parameters={
+ parameter.variable_name: parameter.value
+ for parameter in self._parameters.values()
+ }
+ )
diff --git a/api/src/opentrons/protocol_api/_parameters.py b/api/src/opentrons/protocol_api/_parameters.py
new file mode 100644
index 00000000000..8176052111b
--- /dev/null
+++ b/api/src/opentrons/protocol_api/_parameters.py
@@ -0,0 +1,30 @@
+from typing import Dict, Optional, Any
+
+from opentrons.protocols.parameters.types import AllowedTypes, ParameterNameError
+
+
+class Parameters:
+ def __init__(self, parameters: Optional[Dict[str, AllowedTypes]] = None) -> None:
+ super().__setattr__("_values", {})
+ self._values: Dict[str, AllowedTypes] = {}
+ if parameters is not None:
+ for name, value in parameters.items():
+ self._initialize_parameter(name, value)
+
+ def __setattr__(self, key: str, value: Any) -> None:
+ if key in self._values:
+ raise AttributeError(f"Cannot overwrite protocol defined parameter {key}")
+ super().__setattr__(key, value)
+
+ def _initialize_parameter(self, variable_name: str, value: AllowedTypes) -> None:
+ if not hasattr(self, variable_name):
+ setattr(self, variable_name, value)
+ self._values[variable_name] = value
+ else:
+ raise ParameterNameError(
+ f"Cannot use {variable_name} as a variable name, either duplicates another"
+ f" parameter name, Opentrons reserved function, or Python built-in"
+ )
+
+ def get_all(self) -> Dict[str, AllowedTypes]:
+ return self._values
diff --git a/api/src/opentrons/protocol_api/_types.py b/api/src/opentrons/protocol_api/_types.py
index dea183c2eab..9890e29c2bc 100644
--- a/api/src/opentrons/protocol_api/_types.py
+++ b/api/src/opentrons/protocol_api/_types.py
@@ -1,3 +1,4 @@
+from __future__ import annotations
from typing_extensions import Final
import enum
diff --git a/api/src/opentrons/protocol_api/_waste_chute.py b/api/src/opentrons/protocol_api/_waste_chute.py
deleted file mode 100644
index 7472327f941..00000000000
--- a/api/src/opentrons/protocol_api/_waste_chute.py
+++ /dev/null
@@ -1,11 +0,0 @@
-class WasteChute:
- """Represents a Flex waste chute.
-
- See :py:obj:`ProtocolContext.load_waste_chute`.
- """
-
- def __init__(
- self,
- with_staging_area_slot_d4: bool,
- ) -> None:
- self._with_staging_area_slot_d4 = with_staging_area_slot_d4
diff --git a/api/src/opentrons/protocol_api/_waste_chute_dimensions.py b/api/src/opentrons/protocol_api/_waste_chute_dimensions.py
deleted file mode 100644
index b0c94f85f0c..00000000000
--- a/api/src/opentrons/protocol_api/_waste_chute_dimensions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""Constants for the dimensions of the Flex waste chute.
-
-TODO: These should be moved into shared-data and interpreted by Protocol Engine.
-"""
-
-
-from opentrons.types import Point
-
-
-SLOT_ORIGIN_TO_1_OR_8_TIP_A1 = Point(64, 21.91, 144)
-SLOT_ORIGIN_TO_96_TIP_A1 = Point(14.445, 42.085, 115)
-
-# TODO: This z-coord is misleading. We need to account for the labware height and the paddle height;
-# we can't define this as a single coordinate.
-SLOT_ORIGIN_TO_GRIPPER_JAW_CENTER = Point(64, 29, 136.5)
-
-# This includes the height of the optional lid.
-ENVELOPE_HEIGHT = 154
diff --git a/api/src/opentrons/protocol_api/core/engine/__init__.py b/api/src/opentrons/protocol_api/core/engine/__init__.py
index ded1ff960e0..69287a4edfa 100644
--- a/api/src/opentrons/protocol_api/core/engine/__init__.py
+++ b/api/src/opentrons/protocol_api/core/engine/__init__.py
@@ -10,6 +10,7 @@
from .well import WellCore
ENGINE_CORE_API_VERSION: Final = APIVersion(2, 14)
+SET_OFFSET_RESTORED_API_VERSION: Final = APIVersion(2, 18)
__all__ = [
"ENGINE_CORE_API_VERSION",
diff --git a/api/src/opentrons/protocol_api/core/engine/deck_conflict.py b/api/src/opentrons/protocol_api/core/engine/deck_conflict.py
index 7314d8074cd..2a50964e757 100644
--- a/api/src/opentrons/protocol_api/core/engine/deck_conflict.py
+++ b/api/src/opentrons/protocol_api/core/engine/deck_conflict.py
@@ -1,19 +1,94 @@
"""A Protocol-Engine-friendly wrapper for opentrons.motion_planning.deck_conflict."""
-
+from __future__ import annotations
import itertools
-from typing import Collection, Dict, Optional, Tuple, overload
+import logging
+from typing import (
+ Collection,
+ Dict,
+ Optional,
+ Tuple,
+ overload,
+ Union,
+ TYPE_CHECKING,
+ List,
+)
+from opentrons_shared_data.errors.exceptions import MotionPlanningFailureError
+from opentrons_shared_data.module import FLEX_TC_LID_COLLISION_ZONE
+
+from opentrons.hardware_control.nozzle_manager import NozzleConfigurationType
from opentrons.hardware_control.modules.types import ModuleType
from opentrons.motion_planning import deck_conflict as wrapped_deck_conflict
+from opentrons.motion_planning import adjacent_slots_getters
+
from opentrons.protocol_engine import (
StateView,
DeckSlotLocation,
ModuleLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
OFF_DECK_LOCATION,
+ WellLocation,
+ DropTipWellLocation,
)
from opentrons.protocol_engine.errors.exceptions import LabwareNotLoadedOnModuleError
-from opentrons.types import DeckSlotName
+from opentrons.protocol_engine.types import (
+ StagingSlotLocation,
+)
+from opentrons.types import DeckSlotName, StagingSlotName, Point
+from ...disposal_locations import TrashBin, WasteChute
+from . import point_calculations
+
+if TYPE_CHECKING:
+ from ...labware import Labware
+
+
+class PartialTipMovementNotAllowedError(MotionPlanningFailureError):
+ """Error raised when trying to perform a partial tip movement to an illegal location."""
+
+ def __init__(self, message: str) -> None:
+ super().__init__(
+ message=message,
+ )
+
+
+class UnsuitableTiprackForPipetteMotion(MotionPlanningFailureError):
+ """Error raised when trying to perform a pipette movement to a tip rack, based on adapter status."""
+
+ def __init__(self, message: str) -> None:
+ super().__init__(
+ message=message,
+ )
+
+
+_log = logging.getLogger(__name__)
+
+# TODO (spp, 2023-12-06): move this to a location like motion planning where we can
+# derive these values from geometry definitions
+# Also, verify y-axis extents values for the nozzle columns.
+# Bounding box measurements
+A12_column_front_left_bound = Point(x=-11.03, y=2)
+A12_column_back_right_bound = Point(x=526.77, y=506.2)
+
+_NOZZLE_PITCH = 9
+A1_column_front_left_bound = Point(
+ x=A12_column_front_left_bound.x - _NOZZLE_PITCH * 11, y=2
+)
+A1_column_back_right_bound = Point(
+ x=A12_column_back_right_bound.x - _NOZZLE_PITCH * 11, y=506.2
+)
+
+_FLEX_TC_LID_BACK_LEFT_PT = Point(
+ x=FLEX_TC_LID_COLLISION_ZONE["back_left"]["x"],
+ y=FLEX_TC_LID_COLLISION_ZONE["back_left"]["y"],
+ z=FLEX_TC_LID_COLLISION_ZONE["back_left"]["z"],
+)
+
+_FLEX_TC_LID_FRONT_RIGHT_PT = Point(
+ x=FLEX_TC_LID_COLLISION_ZONE["front_right"]["x"],
+ y=FLEX_TC_LID_COLLISION_ZONE["front_right"]["y"],
+ z=FLEX_TC_LID_COLLISION_ZONE["front_right"]["z"],
+)
@overload
@@ -22,6 +97,7 @@ def check(
engine_state: StateView,
existing_labware_ids: Collection[str],
existing_module_ids: Collection[str],
+ existing_disposal_locations: Collection[Union[Labware, WasteChute, TrashBin]],
new_labware_id: str,
) -> None:
pass
@@ -33,22 +109,37 @@ def check(
engine_state: StateView,
existing_labware_ids: Collection[str],
existing_module_ids: Collection[str],
+ existing_disposal_locations: Collection[Union[Labware, WasteChute, TrashBin]],
new_module_id: str,
) -> None:
pass
+@overload
+def check(
+ *,
+ engine_state: StateView,
+ existing_labware_ids: Collection[str],
+ existing_module_ids: Collection[str],
+ existing_disposal_locations: Collection[Union[Labware, WasteChute, TrashBin]],
+ new_trash_bin: TrashBin,
+) -> None:
+ pass
+
+
def check(
*,
engine_state: StateView,
existing_labware_ids: Collection[str],
existing_module_ids: Collection[str],
+ existing_disposal_locations: Collection[Union[Labware, WasteChute, TrashBin]],
# TODO(mm, 2023-02-23): This interface is impossible to use correctly. In order
# to have new_labware_id or new_module_id, the caller needs to have already loaded
# the new item into Protocol Engine--but then, it's too late to do deck conflict.
# checking. Find a way to do deck conflict checking before the new item is loaded.
new_labware_id: Optional[str] = None,
new_module_id: Optional[str] = None,
+ new_trash_bin: Optional[TrashBin] = None,
) -> None:
"""Check for conflicts between items on the deck.
@@ -73,6 +164,8 @@ def check(
new_location_and_item = _map_labware(engine_state, new_labware_id)
if new_module_id is not None:
new_location_and_item = _map_module(engine_state, new_module_id)
+ if new_trash_bin is not None:
+ new_location_and_item = _map_disposal_location(new_trash_bin)
if new_location_and_item is None:
# The new item should be excluded from deck conflict checking. Nothing to do.
@@ -90,9 +183,19 @@ def check(
)
mapped_existing_modules = (m for m in all_existing_modules if m is not None)
- existing_items: Dict[DeckSlotName, wrapped_deck_conflict.DeckItem] = {}
+ all_exisiting_disposal_locations = (
+ _map_disposal_location(disposal_location)
+ for disposal_location in existing_disposal_locations
+ )
+ mapped_disposal_locations = (
+ m for m in all_exisiting_disposal_locations if m is not None
+ )
+
+ existing_items: Dict[
+ Union[DeckSlotName, StagingSlotName], wrapped_deck_conflict.DeckItem
+ ] = {}
for existing_location, existing_item in itertools.chain(
- mapped_existing_labware, mapped_existing_modules
+ mapped_existing_labware, mapped_existing_modules, mapped_disposal_locations
):
assert existing_location not in existing_items
existing_items[existing_location] = existing_item
@@ -105,13 +208,293 @@ def check(
)
+# TODO (spp, 2023-02-16): move pipette movement safety checks to its own separate file.
+def check_safe_for_pipette_movement(
+ engine_state: StateView,
+ pipette_id: str,
+ labware_id: str,
+ well_name: str,
+ well_location: Union[WellLocation, DropTipWellLocation],
+) -> None:
+ """Check if the labware is safe to move to with a pipette in partial tip configuration.
+
+ Args:
+ engine_state: engine state view
+ pipette_id: ID of the pipette to be moved
+ labware_id: ID of the labware we are moving to
+ well_name: Name of the well to move to
+ well_location: exact location within the well to move to
+ """
+ # TODO (spp, 2023-02-06): remove this check after thorough testing.
+ # This function is capable of checking for movement conflict regardless of
+ # nozzle configuration.
+ if not engine_state.pipettes.get_is_partially_configured(pipette_id):
+ return
+
+ if isinstance(well_location, DropTipWellLocation):
+ # convert to WellLocation
+ well_location = engine_state.geometry.get_checked_tip_drop_location(
+ pipette_id=pipette_id,
+ labware_id=labware_id,
+ well_location=well_location,
+ partially_configured=True,
+ )
+ well_location_point = engine_state.geometry.get_well_position(
+ labware_id=labware_id, well_name=well_name, well_location=well_location
+ )
+ primary_nozzle = engine_state.pipettes.get_primary_nozzle(pipette_id)
+
+ if not _is_within_pipette_extents(
+ engine_state=engine_state, pipette_id=pipette_id, location=well_location_point
+ ):
+ raise PartialTipMovementNotAllowedError(
+ f"Requested motion with the {primary_nozzle} nozzle partial configuration"
+ f" is outside of robot bounds for the pipette."
+ )
+
+ labware_slot = engine_state.geometry.get_ancestor_slot_name(labware_id)
+ pipette_bounds_at_well_location = (
+ engine_state.pipettes.get_pipette_bounds_at_specified_move_to_position(
+ pipette_id=pipette_id, destination_position=well_location_point
+ )
+ )
+ surrounding_slots = adjacent_slots_getters.get_surrounding_slots(
+ slot=labware_slot.as_int(), robot_type=engine_state.config.robot_type
+ )
+
+ if _will_collide_with_thermocycler_lid(
+ engine_state=engine_state,
+ pipette_bounds=pipette_bounds_at_well_location,
+ surrounding_regular_slots=surrounding_slots.regular_slots,
+ ):
+ raise PartialTipMovementNotAllowedError(
+ f"Moving to {engine_state.labware.get_display_name(labware_id)} in slot"
+ f" {labware_slot} with {primary_nozzle} nozzle partial configuration"
+ f" will result in collision with thermocycler lid in deck slot A1."
+ )
+
+ for regular_slot in surrounding_slots.regular_slots:
+ if _slot_has_potential_colliding_object(
+ engine_state=engine_state,
+ pipette_bounds=pipette_bounds_at_well_location,
+ surrounding_slot=regular_slot,
+ ):
+ raise PartialTipMovementNotAllowedError(
+ f"Moving to {engine_state.labware.get_display_name(labware_id)} in slot"
+ f" {labware_slot} with {primary_nozzle} nozzle partial configuration"
+ f" will result in collision with items in deck slot {regular_slot}."
+ )
+ for staging_slot in surrounding_slots.staging_slots:
+ if _slot_has_potential_colliding_object(
+ engine_state=engine_state,
+ pipette_bounds=pipette_bounds_at_well_location,
+ surrounding_slot=staging_slot,
+ ):
+ raise PartialTipMovementNotAllowedError(
+ f"Moving to {engine_state.labware.get_display_name(labware_id)} in slot"
+ f" {labware_slot} with {primary_nozzle} nozzle partial configuration"
+ f" will result in collision with items in staging slot {staging_slot}."
+ )
+
+
+def _slot_has_potential_colliding_object(
+ engine_state: StateView,
+ pipette_bounds: Tuple[Point, Point, Point, Point],
+ surrounding_slot: Union[DeckSlotName, StagingSlotName],
+) -> bool:
+ """Return the slot, if any, that has an item that the pipette might collide into."""
+ # Check if slot overlaps with pipette position
+ slot_pos = engine_state.addressable_areas.get_addressable_area_position(
+ addressable_area_name=surrounding_slot.id,
+ do_compatibility_check=False,
+ )
+ slot_bounds = engine_state.addressable_areas.get_addressable_area_bounding_box(
+ addressable_area_name=surrounding_slot.id,
+ do_compatibility_check=False,
+ )
+ slot_back_left_coords = Point(slot_pos.x, slot_pos.y + slot_bounds.y, slot_pos.z)
+ slot_front_right_coords = Point(slot_pos.x + slot_bounds.x, slot_pos.y, slot_pos.z)
+
+ # If slot overlaps with pipette bounds
+ if point_calculations.are_overlapping_rectangles(
+ rectangle1=(pipette_bounds[0], pipette_bounds[1]),
+ rectangle2=(slot_back_left_coords, slot_front_right_coords),
+ ):
+ # Check z-height of items in overlapping slot
+ if isinstance(surrounding_slot, DeckSlotName):
+ slot_highest_z = engine_state.geometry.get_highest_z_in_slot(
+ DeckSlotLocation(slotName=surrounding_slot)
+ )
+ else:
+ slot_highest_z = engine_state.geometry.get_highest_z_in_slot(
+ StagingSlotLocation(slotName=surrounding_slot)
+ )
+ return slot_highest_z >= pipette_bounds[0].z
+ return False
+
+
+def _will_collide_with_thermocycler_lid(
+ engine_state: StateView,
+ pipette_bounds: Tuple[Point, Point, Point, Point],
+ surrounding_regular_slots: List[DeckSlotName],
+) -> bool:
+ """Return whether the pipette might collide with thermocycler's lid/clips on a Flex.
+
+ If any of the pipette's bounding vertices lie inside the no-go zone of the thermocycler-
+ which is the area that's to the left, back and below the thermocycler's lid's
+ protruding clips, then we will mark the movement for possible collision.
+
+ This could cause false raises for the case where an 8-channel is accessing the
+ thermocycler labware in a location such that the pipette is in the area between
+ the clips but not touching either clips. But that's a tradeoff we'll need to make
+ between a complicated check involving accurate positions of all entities involved
+ and a crude check that disallows all partial tip movements around the thermocycler.
+ """
+ # TODO (spp, 2024-02-27): Improvements:
+ # - make the check dynamic according to lid state:
+ # - if lid is open, check if pipette is in no-go zone
+ # - if lid is closed, use the closed lid height to check for conflict
+ if (
+ DeckSlotName.SLOT_A1 in surrounding_regular_slots
+ and engine_state.modules.is_flex_deck_with_thermocycler()
+ ):
+ return (
+ point_calculations.are_overlapping_rectangles(
+ rectangle1=(_FLEX_TC_LID_BACK_LEFT_PT, _FLEX_TC_LID_FRONT_RIGHT_PT),
+ rectangle2=(pipette_bounds[0], pipette_bounds[1]),
+ )
+ and pipette_bounds[0].z <= _FLEX_TC_LID_BACK_LEFT_PT.z
+ )
+
+ return False
+
+
+def check_safe_for_tip_pickup_and_return(
+ engine_state: StateView,
+ pipette_id: str,
+ labware_id: str,
+) -> None:
+ """Check if the presence or absence of a tiprack adapter might cause any pipette movement issues.
+
+ A 96 channel pipette will pick up tips using cam action when it's configured
+ to use ALL nozzles. For this, the tiprack needs to be on the Flex 96 channel tiprack adapter
+ or similar or the tips will not be picked up.
+
+ On the other hand, if the pipette is configured with partial nozzle configuration,
+ it uses the usual pipette presses to pick the tips up, in which case, having the tiprack
+ on the Flex 96 channel tiprack adapter (or similar) will cause the pipette to
+ crash against the adapter posts.
+
+ In order to check if the 96-channel can move and pickup/drop tips safely, this method
+ checks for the height attribute of the tiprack adapter rather than checking for the
+ specific official adapter since users might create custom labware &/or definitions
+ compatible with the official adapter.
+ """
+ if not engine_state.pipettes.get_channels(pipette_id) == 96:
+ # Adapters only matter to 96 ch.
+ return
+
+ is_partial_config = engine_state.pipettes.get_is_partially_configured(pipette_id)
+ tiprack_name = engine_state.labware.get_display_name(labware_id)
+ tiprack_parent = engine_state.labware.get_location(labware_id)
+ if isinstance(tiprack_parent, OnLabwareLocation): # tiprack is on an adapter
+ is_96_ch_tiprack_adapter = engine_state.labware.get_has_quirk(
+ labware_id=tiprack_parent.labwareId, quirk="tiprackAdapterFor96Channel"
+ )
+ tiprack_height = engine_state.labware.get_dimensions(labware_id).z
+ adapter_height = engine_state.labware.get_dimensions(tiprack_parent.labwareId).z
+ if is_partial_config and tiprack_height < adapter_height:
+ raise PartialTipMovementNotAllowedError(
+ f"{tiprack_name} cannot be on an adapter taller than the tip rack"
+ f" when picking up fewer than 96 tips."
+ )
+ elif not is_partial_config and not is_96_ch_tiprack_adapter:
+ raise UnsuitableTiprackForPipetteMotion(
+ f"{tiprack_name} must be on an Opentrons Flex 96 Tip Rack Adapter"
+ f" in order to pick up or return all 96 tips simultaneously."
+ )
+
+ elif (
+ not is_partial_config
+ ): # tiprack is not on adapter and pipette is in full config
+ raise UnsuitableTiprackForPipetteMotion(
+ f"{tiprack_name} must be on an Opentrons Flex 96 Tip Rack Adapter"
+ f" in order to pick up or return all 96 tips simultaneously."
+ )
+
+
+# TODO (spp, 2023-02-06): update the extents check to use all nozzle bounds instead of
+# just position of primary nozzle when checking if the pipette is out-of-bounds
+def _is_within_pipette_extents(
+ engine_state: StateView,
+ pipette_id: str,
+ location: Point,
+) -> bool:
+ """Whether a given point is within the extents of a configured pipette on the specified robot."""
+ robot_type = engine_state.config.robot_type
+ pipette_channels = engine_state.pipettes.get_channels(pipette_id)
+ nozzle_config = engine_state.pipettes.get_nozzle_layout_type(pipette_id)
+ primary_nozzle = engine_state.pipettes.get_primary_nozzle(pipette_id)
+ if robot_type == "OT-3 Standard":
+ if pipette_channels == 96 and nozzle_config == NozzleConfigurationType.COLUMN:
+ # TODO (spp, 2023-12-18): change this eventually to use column mappings in
+ # the pipette geometry definitions.
+ if primary_nozzle == "A12":
+ return (
+ A12_column_front_left_bound.x
+ <= location.x
+ <= A12_column_back_right_bound.x
+ and A12_column_front_left_bound.y
+ <= location.y
+ <= A12_column_back_right_bound.y
+ )
+ elif primary_nozzle == "A1":
+ return (
+ A1_column_front_left_bound.x
+ <= location.x
+ <= A1_column_back_right_bound.x
+ and A1_column_front_left_bound.y
+ <= location.y
+ <= A1_column_back_right_bound.y
+ )
+ # TODO (spp, 2023-11-07): check for 8-channel nozzle A1 & H1 extents on Flex & OT2
+ return True
+
+
def _map_labware(
engine_state: StateView,
labware_id: str,
-) -> Optional[Tuple[DeckSlotName, wrapped_deck_conflict.DeckItem]]:
+) -> Optional[
+ Tuple[Union[DeckSlotName, StagingSlotName], wrapped_deck_conflict.DeckItem]
+]:
location_from_engine = engine_state.labware.get_location(labware_id=labware_id)
- if isinstance(location_from_engine, DeckSlotLocation):
+ if isinstance(location_from_engine, AddressableAreaLocation):
+ # This will be guaranteed to be either deck slot name or staging slot name
+ slot: Union[DeckSlotName, StagingSlotName]
+ try:
+ slot = DeckSlotName.from_primitive(location_from_engine.addressableAreaName)
+ except ValueError:
+ slot = StagingSlotName.from_primitive(
+ location_from_engine.addressableAreaName
+ )
+ return (
+ slot,
+ wrapped_deck_conflict.Labware(
+ name_for_errors=engine_state.labware.get_load_name(
+ labware_id=labware_id
+ ),
+ highest_z=engine_state.geometry.get_labware_highest_z(
+ labware_id=labware_id
+ ),
+ uri=engine_state.labware.get_definition_uri(labware_id=labware_id),
+ is_fixed_trash=engine_state.labware.is_fixed_trash(
+ labware_id=labware_id
+ ),
+ ),
+ )
+
+ elif isinstance(location_from_engine, DeckSlotLocation):
# This labware is loaded directly into a deck slot.
# Map it to a wrapped_deck_conflict.Labware.
return (
@@ -171,6 +554,14 @@ def _map_module(
highest_z_including_labware=highest_z_including_labware,
),
)
+ elif module_type == ModuleType.MAGNETIC_BLOCK:
+ return (
+ mapped_location,
+ wrapped_deck_conflict.MagneticBlockModule(
+ name_for_errors=name_for_errors,
+ highest_z_including_labware=highest_z_including_labware,
+ ),
+ )
elif module_type == ModuleType.THERMOCYCLER:
return (
mapped_location,
@@ -192,6 +583,20 @@ def _map_module(
)
+def _map_disposal_location(
+ disposal_location: Union[Labware, WasteChute, TrashBin],
+) -> Optional[Tuple[DeckSlotName, wrapped_deck_conflict.DeckItem]]:
+ if isinstance(disposal_location, TrashBin):
+ return (
+ disposal_location.location,
+ wrapped_deck_conflict.TrashBin(
+ name_for_errors="trash bin", highest_z=disposal_location.height
+ ),
+ )
+ else:
+ return None
+
+
def _deck_slot_to_int(deck_slot_location: DeckSlotLocation) -> int:
return deck_slot_location.slotName.as_int()
diff --git a/api/src/opentrons/protocol_api/core/engine/instrument.py b/api/src/opentrons/protocol_api/core/engine/instrument.py
index 0c762358c14..d2057a7605d 100644
--- a/api/src/opentrons/protocol_api/core/engine/instrument.py
+++ b/api/src/opentrons/protocol_api/core/engine/instrument.py
@@ -1,7 +1,8 @@
"""ProtocolEngine-based InstrumentContext core implementation."""
from __future__ import annotations
-from typing import Optional, TYPE_CHECKING, cast
+from typing import Optional, TYPE_CHECKING, cast, Union
+from opentrons.protocols.api_support.types import APIVersion
from opentrons.types import Location, Mount
from opentrons.hardware_control import SyncHardwareAPI
@@ -14,7 +15,7 @@
WellLocation,
WellOrigin,
WellOffset,
- EmptyNozzleLayoutConfiguration,
+ AllNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
RowNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
@@ -23,25 +24,30 @@
from opentrons.protocol_engine.types import (
PRIMARY_NOZZLE_LITERAL,
NozzleLayoutConfigurationType,
+ AddressableOffsetVector,
)
from opentrons.protocol_engine.errors.exceptions import TipNotAttachedError
from opentrons.protocol_engine.clients import SyncClient as EngineClient
from opentrons.protocols.api_support.definitions import MAX_SUPPORTED_VERSION
-from opentrons.types import Point, DeckSlotName
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons.protocol_api._nozzle_layout import NozzleLayout
+from opentrons.hardware_control.nozzle_manager import NozzleConfigurationType
+from opentrons.hardware_control.nozzle_manager import NozzleMap
+from . import deck_conflict, overlap_versions
from ..instrument import AbstractInstrument
from .well import WellCore
-from ..._waste_chute import WasteChute
-from ... import _waste_chute_dimensions
+from ...disposal_locations import TrashBin, WasteChute
if TYPE_CHECKING:
from .protocol import ProtocolCore
+_DISPENSE_VOLUME_VALIDATION_ADDED_IN = APIVersion(2, 17)
+
+
class InstrumentCore(AbstractInstrument[WellCore]):
"""Instrument API core using a ProtocolEngine.
@@ -140,7 +146,13 @@ def aspirate(
absolute_point=location.point,
)
)
-
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ well_name=well_name,
+ well_location=well_location,
+ )
self._engine_client.aspirate(
pipette_id=self._pipette_id,
labware_id=labware_id,
@@ -154,7 +166,7 @@ def aspirate(
def dispense(
self,
- location: Location,
+ location: Union[Location, TrashBin, WasteChute],
well_core: Optional[WellCore],
volume: float,
rate: float,
@@ -172,17 +184,31 @@ def dispense(
in_place: whether this is a in-place command.
push_out: The amount to push the plunger below bottom position.
"""
+ if self._protocol_core.api_version < _DISPENSE_VOLUME_VALIDATION_ADDED_IN:
+ # In older API versions, when you try to dispense more than you can,
+ # it gets clamped.
+ volume = min(volume, self.get_current_volume())
+ else:
+ # Newer API versions raise an error if you try to dispense more than
+ # you can. Let the error come from Protocol Engine's validation.
+ pass
+
if well_core is None:
if not in_place:
- self._engine_client.move_to_coordinates(
- pipette_id=self._pipette_id,
- coordinates=DeckPoint(
- x=location.point.x, y=location.point.y, z=location.point.z
- ),
- minimum_z_height=None,
- force_direct=False,
- speed=None,
- )
+ if isinstance(location, (TrashBin, WasteChute)):
+ self._move_to_disposal_location(
+ disposal_location=location, force_direct=False, speed=None
+ )
+ else:
+ self._engine_client.move_to_coordinates(
+ pipette_id=self._pipette_id,
+ coordinates=DeckPoint(
+ x=location.point.x, y=location.point.y, z=location.point.z
+ ),
+ minimum_z_height=None,
+ force_direct=False,
+ speed=None,
+ )
self._engine_client.dispense_in_place(
pipette_id=self._pipette_id,
@@ -191,6 +217,8 @@ def dispense(
push_out=push_out,
)
else:
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise ValueError("Trash Bin and Waste Chute have no Wells.")
well_name = well_core.get_name()
labware_id = well_core.labware_id
@@ -201,7 +229,13 @@ def dispense(
absolute_point=location.point,
)
)
-
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ well_name=well_name,
+ well_location=well_location,
+ )
self._engine_client.dispense(
pipette_id=self._pipette_id,
labware_id=labware_id,
@@ -212,10 +246,18 @@ def dispense(
push_out=push_out,
)
- self._protocol_core.set_last_location(location=location, mount=self.get_mount())
+ if isinstance(location, (TrashBin, WasteChute)):
+ self._protocol_core.set_last_location(location=None, mount=self.get_mount())
+ else:
+ self._protocol_core.set_last_location(
+ location=location, mount=self.get_mount()
+ )
def blow_out(
- self, location: Location, well_core: Optional[WellCore], in_place: bool
+ self,
+ location: Union[Location, TrashBin, WasteChute],
+ well_core: Optional[WellCore],
+ in_place: bool,
) -> None:
"""Blow liquid out of the tip.
@@ -227,20 +269,27 @@ def blow_out(
flow_rate = self.get_blow_out_flow_rate(1.0)
if well_core is None:
if not in_place:
- self._engine_client.move_to_coordinates(
- pipette_id=self._pipette_id,
- coordinates=DeckPoint(
- x=location.point.x, y=location.point.y, z=location.point.z
- ),
- force_direct=False,
- minimum_z_height=None,
- speed=None,
- )
+ if isinstance(location, (TrashBin, WasteChute)):
+ self._move_to_disposal_location(
+ disposal_location=location, force_direct=False, speed=None
+ )
+ else:
+ self._engine_client.move_to_coordinates(
+ pipette_id=self._pipette_id,
+ coordinates=DeckPoint(
+ x=location.point.x, y=location.point.y, z=location.point.z
+ ),
+ force_direct=False,
+ minimum_z_height=None,
+ speed=None,
+ )
self._engine_client.blow_out_in_place(
pipette_id=self._pipette_id, flow_rate=flow_rate
)
else:
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise ValueError("Trash Bin and Waste Chute have no Wells.")
well_name = well_core.get_name()
labware_id = well_core.labware_id
@@ -251,7 +300,13 @@ def blow_out(
absolute_point=location.point,
)
)
-
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ well_name=well_name,
+ well_location=well_location,
+ )
self._engine_client.blow_out(
pipette_id=self._pipette_id,
labware_id=labware_id,
@@ -262,7 +317,12 @@ def blow_out(
flow_rate=flow_rate,
)
- self._protocol_core.set_last_location(location=location, mount=self.get_mount())
+ if isinstance(location, (TrashBin, WasteChute)):
+ self._protocol_core.set_last_location(location=None, mount=self.get_mount())
+ else:
+ self._protocol_core.set_last_location(
+ location=location, mount=self.get_mount()
+ )
def touch_tip(
self,
@@ -288,7 +348,13 @@ def touch_tip(
well_location = WellLocation(
origin=WellOrigin.TOP, offset=WellOffset(x=0, y=0, z=z_offset)
)
-
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ well_name=well_name,
+ well_location=well_location,
+ )
self._engine_client.touch_tip(
pipette_id=self._pipette_id,
labware_id=labware_id,
@@ -330,14 +396,30 @@ def pick_up_tip(
well_name=well_name,
absolute_point=location.point,
)
+ deck_conflict.check_safe_for_tip_pickup_and_return(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ )
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ well_name=well_name,
+ well_location=well_location,
+ )
- self._engine_client.pick_up_tip(
+ self._engine_client.pick_up_tip_wait_for_recovery(
pipette_id=self._pipette_id,
labware_id=labware_id,
well_name=well_name,
well_location=well_location,
)
+ # Set the "last location" unconditionally, even if the command failed
+ # and was recovered from and we don't know if the pipette is physically here.
+ # This isn't used for path planning, but rather for implicit destination
+ # selection like in `pipette.aspirate(location=None)`.
self._protocol_core.set_last_location(location=location, mount=self.get_mount())
def drop_tip(
@@ -350,7 +432,7 @@ def drop_tip(
"""Move to and drop a tip into a given well.
Args:
- location: The location of the well we're picking up from.
+ location: The location of the well we're dropping tip into.
Used to calculate the relative well offset for the drop command.
well_core: The well we're dropping into
home_after: Whether to home the pipette after the tip is dropped.
@@ -376,6 +458,19 @@ def drop_tip(
else:
well_location = DropTipWellLocation()
+ if self._engine_client.state.labware.is_tiprack(labware_id):
+ deck_conflict.check_safe_for_tip_pickup_and_return(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ )
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=self._engine_client.state,
+ pipette_id=self._pipette_id,
+ labware_id=labware_id,
+ well_name=well_name,
+ well_location=well_location,
+ )
self._engine_client.drop_tip(
pipette_id=self._pipette_id,
labware_id=labware_id,
@@ -387,54 +482,70 @@ def drop_tip(
self._protocol_core.set_last_location(location=location, mount=self.get_mount())
- def _drop_tip_in_place(self, home_after: Optional[bool]) -> None:
- self._engine_client.drop_tip_in_place(
- pipette_id=self._pipette_id,
- home_after=home_after,
- )
-
- def drop_tip_in_waste_chute(
- self, waste_chute: WasteChute, home_after: Optional[bool]
+ def drop_tip_in_disposal_location(
+ self,
+ disposal_location: Union[TrashBin, WasteChute],
+ home_after: Optional[bool],
+ alternate_tip_drop: bool = False,
) -> None:
- # TODO: Can we get away with implementing this in two steps like this,
- # or does drop_tip() need to take the waste chute location because the z-height
- # depends on the intent of dropping tip? How would Protocol Designer want to implement
- # this?
- self._move_to_waste_chute(
- waste_chute,
+ self._move_to_disposal_location(
+ disposal_location,
force_direct=False,
speed=None,
+ alternate_tip_drop=alternate_tip_drop,
)
self._drop_tip_in_place(home_after=home_after)
+ self._protocol_core.set_last_location(location=None, mount=self.get_mount())
- def _move_to_waste_chute(
+ def _move_to_disposal_location(
self,
- waste_chute: WasteChute,
+ disposal_location: Union[TrashBin, WasteChute],
force_direct: bool,
speed: Optional[float],
+ alternate_tip_drop: bool = False,
) -> None:
- if self.get_channels() == 96:
- slot_origin_to_tip_a1 = _waste_chute_dimensions.SLOT_ORIGIN_TO_96_TIP_A1
- else:
- slot_origin_to_tip_a1 = _waste_chute_dimensions.SLOT_ORIGIN_TO_1_OR_8_TIP_A1
-
- # TODO: All of this logic to compute the destination coordinate belongs in Protocol Engine.
- slot_d3 = self._protocol_core.get_slot_definition(DeckSlotName.SLOT_D3)
- slot_d3_origin = Point(*slot_d3["position"])
- destination_point = slot_d3_origin + slot_origin_to_tip_a1
-
- # Normally, we use a 10 mm margin. (DEFAULT_GENERAL_ARC_Z_MARGIN.) Unfortunately, with
- # 1000µL tips, we have slightly not enough room to meet that margin. We can make the margin
- # as big as 7.5 mm before the motion planner raises an error. So, use that reduced margin,
- # with a little more subtracted in order to leave wiggle room for pipette calibration.
- minimum_z = _waste_chute_dimensions.ENVELOPE_HEIGHT + 5.0
-
- self.move_to(
- Location(destination_point, labware=None),
- well_core=None,
- force_direct=force_direct,
- minimum_z_height=minimum_z,
- speed=speed,
+ # TODO (nd, 2023-11-30): give appropriate offset when finalized
+ # https://opentrons.atlassian.net/browse/RSS-391
+
+ disposal_offset = disposal_location.offset
+ offset = AddressableOffsetVector(
+ x=disposal_offset.x, y=disposal_offset.y, z=disposal_offset.z
+ )
+
+ if isinstance(disposal_location, TrashBin):
+ addressable_area_name = disposal_location.area_name
+ self._engine_client.move_to_addressable_area_for_drop_tip(
+ pipette_id=self._pipette_id,
+ addressable_area_name=addressable_area_name,
+ offset=offset,
+ force_direct=force_direct,
+ speed=speed,
+ minimum_z_height=None,
+ alternate_drop_location=alternate_tip_drop,
+ ignore_tip_configuration=True,
+ )
+
+ if isinstance(disposal_location, WasteChute):
+ num_channels = self.get_channels()
+ addressable_area_name = {
+ 1: "1ChannelWasteChute",
+ 8: "8ChannelWasteChute",
+ 96: "96ChannelWasteChute",
+ }[num_channels]
+
+ self._engine_client.move_to_addressable_area(
+ pipette_id=self._pipette_id,
+ addressable_area_name=addressable_area_name,
+ offset=offset,
+ force_direct=force_direct,
+ speed=speed,
+ minimum_z_height=None,
+ )
+
+ def _drop_tip_in_place(self, home_after: Optional[bool]) -> None:
+ self._engine_client.drop_tip_in_place(
+ pipette_id=self._pipette_id,
+ home_after=home_after,
)
def home(self) -> None:
@@ -452,13 +563,15 @@ def home_plunger(self) -> None:
def move_to(
self,
- location: Location,
+ location: Union[Location, TrashBin, WasteChute],
well_core: Optional[WellCore],
force_direct: bool,
minimum_z_height: Optional[float],
speed: Optional[float],
) -> None:
if well_core is not None:
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise ValueError("Trash Bin and Waste Chute have no Wells.")
labware_id = well_core.labware_id
well_name = well_core.get_name()
well_location = (
@@ -479,16 +592,26 @@ def move_to(
speed=speed,
)
else:
- self._engine_client.move_to_coordinates(
- pipette_id=self._pipette_id,
- coordinates=DeckPoint(
- x=location.point.x, y=location.point.y, z=location.point.z
- ),
- minimum_z_height=minimum_z_height,
- force_direct=force_direct,
- speed=speed,
+ if isinstance(location, (TrashBin, WasteChute)):
+ self._move_to_disposal_location(
+ disposal_location=location, force_direct=force_direct, speed=speed
+ )
+ else:
+ self._engine_client.move_to_coordinates(
+ pipette_id=self._pipette_id,
+ coordinates=DeckPoint(
+ x=location.point.x, y=location.point.y, z=location.point.z
+ ),
+ minimum_z_height=minimum_z_height,
+ force_direct=force_direct,
+ speed=speed,
+ )
+ if isinstance(location, (TrashBin, WasteChute)):
+ self._protocol_core.set_last_location(location=None, mount=self.get_mount())
+ else:
+ self._protocol_core.set_last_location(
+ location=location, mount=self.get_mount()
)
- self._protocol_core.set_last_location(location=location, mount=self.get_mount())
def get_mount(self) -> Mount:
"""Get the mount the pipette is attached to."""
@@ -553,6 +676,14 @@ def get_hardware_state(self) -> PipetteDict:
def get_channels(self) -> int:
return self._engine_client.state.tips.get_pipette_channels(self._pipette_id)
+ def get_active_channels(self) -> int:
+ return self._engine_client.state.tips.get_pipette_active_channels(
+ self._pipette_id
+ )
+
+ def get_nozzle_map(self) -> NozzleMap:
+ return self._engine_client.state.tips.get_pipette_nozzle_map(self._pipette_id)
+
def has_tip(self) -> bool:
return (
self._engine_client.state.pipettes.get_attached_tip(self._pipette_id)
@@ -574,6 +705,28 @@ def get_dispense_flow_rate(self, rate: float = 1.0) -> float:
def get_blow_out_flow_rate(self, rate: float = 1.0) -> float:
return self._blow_out_flow_rate * rate
+ def get_nozzle_configuration(self) -> NozzleConfigurationType:
+ return self._engine_client.state.pipettes.get_nozzle_layout_type(
+ self._pipette_id
+ )
+
+ def is_tip_tracking_available(self) -> bool:
+ primary_nozzle = self._engine_client.state.pipettes.get_primary_nozzle(
+ self._pipette_id
+ )
+ if self.get_nozzle_configuration() == NozzleConfigurationType.FULL:
+ return True
+ else:
+ if self.get_channels() == 96:
+ return True
+ if self.get_channels() == 8:
+ # TODO: (cb, 03/06/24): Enable automatic tip tracking on the 8 channel pipettes once PAPI support exists
+ return (
+ self.get_nozzle_configuration() == NozzleConfigurationType.SINGLE
+ and primary_nozzle == "H1"
+ )
+ return False
+
def set_flow_rate(
self,
aspirate: Optional[float] = None,
@@ -592,7 +745,11 @@ def set_flow_rate(
def configure_for_volume(self, volume: float) -> None:
self._engine_client.configure_for_volume(
- pipette_id=self._pipette_id, volume=volume
+ pipette_id=self._pipette_id,
+ volume=volume,
+ tip_overlap_version=overlap_versions.overlap_for_api_version(
+ self._protocol_core.api_version
+ ),
)
def prepare_to_aspirate(self) -> None:
@@ -604,29 +761,33 @@ def configure_nozzle_layout(
primary_nozzle: Optional[str],
front_right_nozzle: Optional[str],
) -> None:
-
if style == NozzleLayout.COLUMN:
configuration_model: NozzleLayoutConfigurationType = (
ColumnNozzleLayoutConfiguration(
- primary_nozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle)
+ primaryNozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle)
)
)
elif style == NozzleLayout.ROW:
configuration_model = RowNozzleLayoutConfiguration(
- primary_nozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle)
+ primaryNozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle)
)
elif style == NozzleLayout.QUADRANT:
assert front_right_nozzle is not None
configuration_model = QuadrantNozzleLayoutConfiguration(
- primary_nozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle),
- front_right_nozzle=front_right_nozzle,
+ primaryNozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle),
+ frontRightNozzle=front_right_nozzle,
)
elif style == NozzleLayout.SINGLE:
configuration_model = SingleNozzleLayoutConfiguration(
- primary_nozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle)
+ primaryNozzle=cast(PRIMARY_NOZZLE_LITERAL, primary_nozzle)
)
else:
- configuration_model = EmptyNozzleLayoutConfiguration()
+ configuration_model = AllNozzleLayoutConfiguration()
self._engine_client.configure_nozzle_layout(
pipette_id=self._pipette_id, configuration_params=configuration_model
)
+
+ def retract(self) -> None:
+ """Retract this instrument to the top of the gantry."""
+ z_axis = self._engine_client.state.pipettes.get_z_axis(self._pipette_id)
+ self._engine_client.home([z_axis])
diff --git a/api/src/opentrons/protocol_api/core/engine/labware.py b/api/src/opentrons/protocol_api/core/engine/labware.py
index eb823d2ba6a..301d7dcdece 100644
--- a/api/src/opentrons/protocol_api/core/engine/labware.py
+++ b/api/src/opentrons/protocol_api/core/engine/labware.py
@@ -10,7 +10,13 @@
from opentrons.protocol_engine.errors import LabwareNotOnDeckError, ModuleNotOnDeckError
from opentrons.protocol_engine.clients import SyncClient as ProtocolEngineClient
+from opentrons.protocol_engine.types import (
+ LabwareOffsetCreate,
+ LabwareOffsetVector,
+)
from opentrons.types import DeckSlotName, Point
+from opentrons.hardware_control.nozzle_manager import NozzleMap
+
from ..labware import AbstractLabware, LabwareLoadParams
from .well import WellCore
@@ -30,7 +36,9 @@ def __init__(self, labware_id: str, engine_client: ProtocolEngineClient) -> None
labware_state = engine_client.state.labware
self._definition = labware_state.get_definition(labware_id)
- self._user_display_name = labware_state.get_display_name(labware_id)
+ self._user_display_name = labware_state.get_user_specified_display_name(
+ labware_id
+ )
@property
def labware_id(self) -> str:
@@ -89,8 +97,28 @@ def get_quirks(self) -> List[str]:
return self._definition.parameters.quirks or []
def set_calibration(self, delta: Point) -> None:
- raise NotImplementedError(
- "Setting a labware's calibration after it's been loaded is not supported."
+ """Add a labware offset for this labware at its current location.
+
+ This will override any previous labware offsets for this definition URI and location,
+ even if the other labware offset was for a different specific labware instance.
+ """
+ offset_location = self._engine_client.state.geometry.get_offset_location(
+ self._labware_id
+ )
+ if not offset_location:
+ raise LabwareNotOnDeckError(
+ message=f"Cannot set offset for {self.get_name()} as it is not currently in a deck slot.",
+ details={"kind": "labware-not-in-slot"},
+ )
+
+ request = LabwareOffsetCreate.construct(
+ definitionUri=self.get_uri(),
+ location=offset_location,
+ vector=LabwareOffsetVector(x=delta.x, y=delta.y, z=delta.z),
+ )
+ self._engine_client.add_labware_offset(request)
+ self._engine_client.reload_labware(
+ labware_id=self._labware_id,
)
def get_calibrated_offset(self) -> Point:
@@ -120,7 +148,10 @@ def reset_tips(self) -> None:
raise TypeError(f"{self.get_display_name()} is not a tip rack.")
def get_next_tip(
- self, num_tips: int, starting_tip: Optional[WellCore]
+ self,
+ num_tips: int,
+ starting_tip: Optional[WellCore],
+ nozzle_map: Optional[NozzleMap],
) -> Optional[str]:
return self._engine_client.state.tips.get_next_tip(
labware_id=self._labware_id,
@@ -130,6 +161,7 @@ def get_next_tip(
if starting_tip and starting_tip.labware_id == self._labware_id
else None
),
+ nozzle_map=nozzle_map,
)
def get_well_columns(self) -> List[List[str]]:
diff --git a/api/src/opentrons/protocol_api/core/engine/overlap_versions.py b/api/src/opentrons/protocol_api/core/engine/overlap_versions.py
new file mode 100644
index 00000000000..ed14859ecd3
--- /dev/null
+++ b/api/src/opentrons/protocol_api/core/engine/overlap_versions.py
@@ -0,0 +1,16 @@
+"""Mappings between API versions and overlap versions."""
+from functools import lru_cache
+from typing_extensions import Final
+from opentrons.protocols.api_support.types import APIVersion
+
+_OVERLAP_VERSION_MAP: Final = {APIVersion(2, 0): "v0", APIVersion(2, 19): "v1"}
+
+
+@lru_cache(1)
+def overlap_for_api_version(api_version: APIVersion) -> str:
+ """Get the overlap version for a specific API version."""
+ defined = list(reversed(sorted(_OVERLAP_VERSION_MAP.keys())))
+ for version in defined:
+ if version <= api_version:
+ return _OVERLAP_VERSION_MAP[version]
+ return _OVERLAP_VERSION_MAP[APIVersion(2, 0)]
diff --git a/api/src/opentrons/protocol_api/core/engine/point_calculations.py b/api/src/opentrons/protocol_api/core/engine/point_calculations.py
index 3aad2fc9a5d..4f3ec68fd26 100644
--- a/api/src/opentrons/protocol_api/core/engine/point_calculations.py
+++ b/api/src/opentrons/protocol_api/core/engine/point_calculations.py
@@ -22,3 +22,43 @@ def get_relative_offset(
y=point.y + y_offset,
z=point.z + z_offset,
)
+
+
+def are_overlapping_rectangles(
+ rectangle1: Tuple[Point, Point],
+ rectangle2: Tuple[Point, Point],
+) -> bool:
+ """Return whether the two provided rectangles are overlapping in 2D space.
+
+ The rectangles are assumed to be coplanar and represented by tuples of
+ the back-left and front right vertices (in that order) of the respective rectangles.
+ The z-coordinate of each point will be ignored.
+
+ We determine if the rectangles overlap by comparing projections of the sides of
+ the rectangles on each of the 2 axes (x & y). If the projections on each axis overlap,
+ then we can conclude that the rectangles overlap.
+
+ The projection on an axis overlaps if the distance between the first projected point
+ and the last projected point is less than the sum of the lengths of the projected sides
+ of the two rectangles. For example, if we have two rectangles with vertices:
+ Rect1 -> BL: (x1, y1), FR: (x2, y2)
+ Rect2 -> BL: (x3, y3), FR: (x4, y4)
+
+ Then for the two rectangles to be overlapping, they should satisfy:
+ max(x1, x2, x3, x4) - min(x1, x2, x3, x4) < (x2 - x1) + (x4 - x3)
+ AND
+ max(y1, y2, y3, y4) - min(y1, y2, y3, y4) < (y2 - y1) + (y4 - y3)
+ """
+ x_coordinates = [rectangle1[0].x, rectangle1[1].x, rectangle2[0].x, rectangle2[1].x]
+ x_length_rect1 = abs(rectangle1[1].x - rectangle1[0].x)
+ x_length_rect2 = abs(rectangle2[1].x - rectangle2[0].x)
+ overlapping_in_x = (
+ abs(max(x_coordinates) - min(x_coordinates)) < x_length_rect1 + x_length_rect2
+ )
+ y_coordinates = [rectangle1[0].y, rectangle1[1].y, rectangle2[0].y, rectangle2[1].y]
+ y_length_rect1 = abs(rectangle1[1].y - rectangle1[0].y)
+ y_length_rect2 = abs(rectangle2[1].y - rectangle2[0].y)
+ overlapping_in_y = (
+ abs(max(y_coordinates) - min(y_coordinates)) < y_length_rect1 + y_length_rect2
+ )
+ return overlapping_in_x and overlapping_in_y
diff --git a/api/src/opentrons/protocol_api/core/engine/protocol.py b/api/src/opentrons/protocol_api/core/engine/protocol.py
index d82edf8cee8..28fb0376c40 100644
--- a/api/src/opentrons/protocol_api/core/engine/protocol.py
+++ b/api/src/opentrons/protocol_api/core/engine/protocol.py
@@ -1,16 +1,22 @@
"""ProtocolEngine-based Protocol API core implementation."""
-from typing import Dict, Optional, Type, Union, List, Tuple
-
-from opentrons.protocol_api import _waste_chute_dimensions
+from __future__ import annotations
+from typing import Dict, Optional, Type, Union, List, Tuple, TYPE_CHECKING
from opentrons.protocol_engine.commands import LoadModuleResult
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4, SlotDefV3
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5, SlotDefV3
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
from opentrons_shared_data.labware.dev_types import LabwareDefinition as LabwareDefDict
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons.types import DeckSlotName, Location, Mount, MountType, Point
+from opentrons.types import (
+ DeckSlotName,
+ Location,
+ Mount,
+ MountType,
+ Point,
+ StagingSlotName,
+)
from opentrons.hardware_control import SyncHardwareAPI, SynchronousAdapter
from opentrons.hardware_control.modules import AbstractModule
from opentrons.hardware_control.modules.types import ModuleModel, ModuleType
@@ -18,8 +24,10 @@
from opentrons.protocols.api_support.util import AxisMaxSpeeds
from opentrons.protocols.api_support.types import APIVersion
+
from opentrons.protocol_engine import (
DeckSlotLocation,
+ AddressableAreaLocation,
ModuleLocation,
OnLabwareLocation,
ModuleModel as EngineModuleModel,
@@ -41,9 +49,9 @@
)
from ... import validation
-from ..._types import OffDeckType, OFF_DECK
+from ..._types import OffDeckType
from ..._liquid import Liquid
-from ..._waste_chute import WasteChute
+from ...disposal_locations import TrashBin, WasteChute
from ..protocol import AbstractProtocol
from ..labware import LabwareLoadParams
from .labware import LabwareCore
@@ -58,8 +66,10 @@
MagneticBlockCore,
)
from .exceptions import InvalidModuleLocationError
-from . import load_labware_params
-from . import deck_conflict
+from . import load_labware_params, deck_conflict, overlap_versions
+
+if TYPE_CHECKING:
+ from ...labware import Labware
class ProtocolCore(
@@ -90,6 +100,7 @@ def __init__(
self._module_cores_by_id: Dict[
str, Union[ModuleCore, NonConnectedModuleCore]
] = {}
+ self._disposal_locations: List[Union[Labware, TrashBin, WasteChute]] = []
self._load_fixed_trash()
@property
@@ -105,17 +116,51 @@ def robot_type(self) -> RobotType:
def fixed_trash(self) -> Optional[LabwareCore]:
"""Get the fixed trash labware."""
trash_id = self._engine_client.state.labware.get_fixed_trash_id()
- if trash_id is not None:
+ if trash_id is not None and self._api_version < APIVersion(2, 16):
return self._labware_cores_by_id[trash_id]
return None
def _load_fixed_trash(self) -> None:
- trash_id = self._engine_client.state.labware.get_fixed_trash_id()
- if trash_id is not None:
- self._labware_cores_by_id[trash_id] = LabwareCore(
- labware_id=trash_id,
- engine_client=self._engine_client,
+ if self.robot_type == "OT-2 Standard" or self._api_version < APIVersion(2, 16):
+ trash_id = self._engine_client.state.labware.get_fixed_trash_id()
+ if trash_id is not None:
+ self._labware_cores_by_id[trash_id] = LabwareCore(
+ labware_id=trash_id,
+ engine_client=self._engine_client,
+ )
+
+ def append_disposal_location(
+ self,
+ disposal_location: Union[Labware, TrashBin, WasteChute],
+ ) -> None:
+ """Append a disposal location object to the core."""
+ self._disposal_locations.append(disposal_location)
+
+ def _add_disposal_location_to_engine(
+ self, disposal_location: Union[TrashBin, WasteChute]
+ ) -> None:
+ """Verify and add disposal location to engine store and append it to the core."""
+ self._engine_client.state.addressable_areas.raise_if_area_not_in_deck_configuration(
+ disposal_location.area_name
+ )
+ if isinstance(disposal_location, TrashBin):
+ deck_conflict.check(
+ engine_state=self._engine_client.state,
+ new_trash_bin=disposal_location,
+ existing_disposal_locations=self._disposal_locations,
+ # TODO: We can now fetch these IDs from engine too.
+ # See comment in self.load_labware().
+ #
+ # Wrapping .keys() in list() is just to make Decoy verification easier.
+ existing_labware_ids=list(self._labware_cores_by_id.keys()),
+ existing_module_ids=list(self._module_cores_by_id.keys()),
)
+ self._engine_client.add_addressable_area(disposal_location.area_name)
+ self.append_disposal_location(disposal_location)
+
+ def get_disposal_locations(self) -> List[Union[Labware, TrashBin, WasteChute]]:
+ """Get disposal locations."""
+ return self._disposal_locations
def get_max_speeds(self) -> AxisMaxSpeeds:
"""Get a control interface for maximum move speeds."""
@@ -143,7 +188,12 @@ def load_labware(
self,
load_name: str,
location: Union[
- DeckSlotName, LabwareCore, ModuleCore, NonConnectedModuleCore, OffDeckType
+ DeckSlotName,
+ StagingSlotName,
+ LabwareCore,
+ ModuleCore,
+ NonConnectedModuleCore,
+ OffDeckType,
],
label: Optional[str],
namespace: Optional[str],
@@ -182,11 +232,12 @@ def load_labware(
deck_conflict.check(
engine_state=self._engine_client.state,
new_labware_id=load_result.labwareId,
- # It's important that we don't fetch these IDs from Protocol Engine, and
- # use our own bookkeeping instead. If we fetched these IDs from Protocol
- # Engine, it would have leaked state from Labware Position Check in the
- # same HTTP run.
- #
+ existing_disposal_locations=self._disposal_locations,
+ # TODO (spp, 2023-11-27): We've been using IDs from _labware_cores_by_id
+ # and _module_cores_by_id instead of getting the lists directly from engine
+ # because of the chance of engine carrying labware IDs from LPC too.
+ # But with https://github.com/Opentrons/opentrons/pull/13943,
+ # & LPC in maintenance runs, we can now rely on engine state for these IDs too.
# Wrapping .keys() in list() is just to make Decoy verification easier.
existing_labware_ids=list(self._labware_cores_by_id.keys()),
existing_module_ids=list(self._module_cores_by_id.keys()),
@@ -204,7 +255,13 @@ def load_labware(
def load_adapter(
self,
load_name: str,
- location: Union[DeckSlotName, ModuleCore, NonConnectedModuleCore, OffDeckType],
+ location: Union[
+ DeckSlotName,
+ StagingSlotName,
+ ModuleCore,
+ NonConnectedModuleCore,
+ OffDeckType,
+ ],
namespace: Optional[str],
version: Optional[int],
) -> LabwareCore:
@@ -230,10 +287,9 @@ def load_adapter(
deck_conflict.check(
engine_state=self._engine_client.state,
new_labware_id=load_result.labwareId,
- # It's important that we don't fetch these IDs from Protocol Engine, and
- # use our own bookkeeping instead. If we fetched these IDs from Protocol
- # Engine, it would have leaked state from Labware Position Check in the
- # same HTTP run.
+ existing_disposal_locations=self._disposal_locations,
+ # TODO: We can now fetch these IDs from engine too.
+ # See comment in self.load_labware().
#
# Wrapping .keys() in list() is just to make Decoy verification easier.
existing_labware_ids=list(self._labware_cores_by_id.keys()),
@@ -255,6 +311,7 @@ def move_labware(
labware_core: LabwareCore,
new_location: Union[
DeckSlotName,
+ StagingSlotName,
LabwareCore,
ModuleCore,
NonConnectedModuleCore,
@@ -287,71 +344,34 @@ def move_labware(
else None
)
- if isinstance(new_location, WasteChute):
- self._move_labware_to_waste_chute(
- labware_core, strategy, _pick_up_offset, _drop_offset
- )
- else:
- to_location = self._convert_labware_location(location=new_location)
-
- # TODO(mm, 2023-02-23): Check for conflicts with other items on the deck,
- # when move_labware() support is no longer experimental.
+ to_location = self._convert_labware_location(location=new_location)
- self._engine_client.move_labware(
- labware_id=labware_core.labware_id,
- new_location=to_location,
- strategy=strategy,
- pick_up_offset=_pick_up_offset,
- drop_offset=_drop_offset,
- )
+ self._engine_client.move_labware(
+ labware_id=labware_core.labware_id,
+ new_location=to_location,
+ strategy=strategy,
+ pick_up_offset=_pick_up_offset,
+ drop_offset=_drop_offset,
+ )
if strategy == LabwareMovementStrategy.USING_GRIPPER:
# Clear out last location since it is not relevant to pipetting
# and we only use last location for in-place pipetting commands
self.set_last_location(location=None, mount=Mount.EXTENSION)
- def _move_labware_to_waste_chute(
- self,
- labware_core: LabwareCore,
- strategy: LabwareMovementStrategy,
- pick_up_offset: Optional[LabwareOffsetVector],
- drop_offset: Optional[LabwareOffsetVector],
- ) -> None:
- slot = DeckSlotLocation(slotName=DeckSlotName.SLOT_D3)
- slot_width = 128
- slot_height = 86
- drop_offset_from_slot = (
- _waste_chute_dimensions.SLOT_ORIGIN_TO_GRIPPER_JAW_CENTER
- - Point(x=slot_width / 2, y=slot_height / 2)
- )
- if drop_offset is not None:
- drop_offset_from_slot += Point(
- x=drop_offset.x, y=drop_offset.y, z=drop_offset.z
- )
-
- # To get the physical movement to happen, move the labware "into the slot" with a giant
- # offset to dunk it in the waste chute.
- self._engine_client.move_labware(
- labware_id=labware_core.labware_id,
- new_location=slot,
- strategy=strategy,
- pick_up_offset=pick_up_offset,
- drop_offset=LabwareOffsetVector(
- x=drop_offset_from_slot.x,
- y=drop_offset_from_slot.y,
- z=drop_offset_from_slot.z,
- ),
- )
-
- # To get the logical movement to be correct, move the labware off-deck.
- # Otherwise, leaving the labware "in the slot" would mean you couldn't call this function
- # again for other labware.
- self._engine_client.move_labware(
- labware_id=labware_core.labware_id,
- new_location=self._convert_labware_location(OFF_DECK),
- strategy=LabwareMovementStrategy.MANUAL_MOVE_WITHOUT_PAUSE,
- pick_up_offset=None,
- drop_offset=None,
+ # FIXME(jbl, 2024-01-04) deck conflict after execution logic issue, read notes in load_labware for more info:
+ deck_conflict.check(
+ engine_state=self._engine_client.state,
+ new_labware_id=labware_core.labware_id,
+ existing_disposal_locations=self._disposal_locations,
+ # TODO: We can now fetch these IDs from engine too.
+ # See comment in self.load_labware().
+ existing_labware_ids=[
+ labware_id
+ for labware_id in self._labware_cores_by_id
+ if labware_id != labware_core.labware_id
+ ],
+ existing_module_ids=list(self._module_cores_by_id.keys()),
)
def _resolve_module_hardware(
@@ -388,7 +408,6 @@ def load_module(
robot_type = self._engine_client.state.config.robot_type
normalized_deck_slot = deck_slot.to_equivalent_for_robot_type(robot_type)
- self._ensure_module_location(normalized_deck_slot, module_type)
result = self._engine_client.load_module(
model=EngineModuleModel(model),
@@ -403,8 +422,9 @@ def load_module(
deck_conflict.check(
engine_state=self._engine_client.state,
new_module_id=result.moduleId,
- # It's important that we don't fetch these IDs from Protocol Engine.
- # See comment in self.load_labware().
+ existing_disposal_locations=self._disposal_locations,
+ # TODO: We can now fetch these IDs from engine too.
+ # See comment in self.load_labware().
#
# Wrapping .keys() in list() is just to make Decoy verification easier.
existing_labware_ids=list(self._labware_cores_by_id.keys()),
@@ -477,7 +497,13 @@ def load_instrument(
An instrument core configured to use the requested instrument.
"""
engine_mount = MountType[mount.name]
- load_result = self._engine_client.load_pipette(instrument_name, engine_mount)
+ load_result = self._engine_client.load_pipette(
+ instrument_name,
+ engine_mount,
+ tip_overlap_version=overlap_versions.overlap_for_api_version(
+ self._api_version
+ ),
+ )
return InstrumentCore(
pipette_id=load_result.pipetteId,
@@ -488,6 +514,51 @@ def load_instrument(
default_movement_speed=400,
)
+ def load_trash_bin(self, slot_name: DeckSlotName, area_name: str) -> TrashBin:
+ """Load a deck configuration based trash bin.
+
+ Args:
+ slot_name: the slot the trash is being loaded into.
+ area_name: the addressable area name of the trash.
+
+ Returns:
+ A trash bin object.
+ """
+ trash_bin = TrashBin(
+ location=slot_name,
+ addressable_area_name=area_name,
+ api_version=self._api_version,
+ engine_client=self._engine_client,
+ )
+ self._add_disposal_location_to_engine(trash_bin)
+ return trash_bin
+
+ def load_ot2_fixed_trash_bin(self) -> None:
+ """Load a deck configured OT-2 fixed trash in Slot 12."""
+ _fixed_trash_trash_bin = TrashBin(
+ location=DeckSlotName.FIXED_TRASH,
+ addressable_area_name="fixedTrash",
+ api_version=self._api_version,
+ engine_client=self._engine_client,
+ )
+ # We are just appending the fixed trash to the core's internal list here, not adding it to the engine via
+ # the core, since that method works through the SyncClient and if called from here, will cause protocols
+ # to deadlock. Instead, that method is called in protocol engine directly in create_protocol_context after
+ # ProtocolContext is initialized.
+ self.append_disposal_location(_fixed_trash_trash_bin)
+
+ def load_waste_chute(self) -> WasteChute:
+ """Load a deck configured waste chute into Slot D3.
+
+ Returns:
+ A waste chute object.
+ """
+ waste_chute = WasteChute(
+ engine_client=self._engine_client, api_version=self._api_version
+ )
+ self._add_disposal_location_to_engine(waste_chute)
+ return waste_chute
+
def pause(self, msg: Optional[str]) -> None:
"""Pause the protocol."""
self._engine_client.wait_for_resume(message=msg)
@@ -535,30 +606,32 @@ def set_last_location(
self._last_location = location
self._last_mount = mount
- def get_deck_definition(self) -> DeckDefinitionV4:
+ def get_deck_definition(self) -> DeckDefinitionV5:
"""Get the geometry definition of the robot's deck."""
return self._engine_client.state.labware.get_deck_definition()
- def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:
+ def get_slot_definition(
+ self, slot: Union[DeckSlotName, StagingSlotName]
+ ) -> SlotDefV3:
"""Get the slot definition from the robot's deck."""
- return self._engine_client.state.labware.get_slot_definition(slot)
+ return self._engine_client.state.addressable_areas.get_slot_definition(slot.id)
- def _ensure_module_location(
- self, slot: DeckSlotName, module_type: ModuleType
- ) -> None:
- slot_def = self.get_slot_definition(slot)
- compatible_modules = slot_def["compatibleModuleTypes"]
- if module_type.value not in compatible_modules:
- raise ValueError(f"A {module_type.value} cannot be loaded into slot {slot}")
+ def get_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all standard slot definitions available in the deck definition."""
+ return self._engine_client.state.addressable_areas.get_deck_slot_definitions()
+
+ def get_staging_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all staging slot definitions available in the deck definition."""
+ return (
+ self._engine_client.state.addressable_areas.get_staging_slot_definitions()
+ )
def get_slot_item(
- self, slot_name: DeckSlotName
+ self, slot_name: Union[DeckSlotName, StagingSlotName]
) -> Union[LabwareCore, ModuleCore, NonConnectedModuleCore, None]:
"""Get the contents of a given slot, if any."""
loaded_item = self._engine_client.state.geometry.get_slot_item(
- slot_name=slot_name,
- allowed_labware_ids=set(self._labware_cores_by_id.keys()),
- allowed_module_ids=set(self._module_cores_by_id.keys()),
+ slot_name=slot_name
)
if isinstance(loaded_item, LoadedLabware):
@@ -593,13 +666,15 @@ def get_labware_on_labware(
except LabwareNotLoadedOnLabwareError:
return None
- def get_slot_center(self, slot_name: DeckSlotName) -> Point:
+ def get_slot_center(self, slot_name: Union[DeckSlotName, StagingSlotName]) -> Point:
"""Get the absolute coordinate of a slot's center."""
- return self._engine_client.state.labware.get_slot_center_position(slot_name)
+ return self._engine_client.state.addressable_areas.get_addressable_area_center(
+ slot_name.id
+ )
def get_highest_z(self) -> float:
"""Get the highest Z point of all deck items."""
- return self._engine_client.state.geometry.get_all_labware_highest_z()
+ return self._engine_client.state.geometry.get_all_obstacle_highest_z()
def get_labware_cores(self) -> List[LabwareCore]:
"""Get all loaded labware cores."""
@@ -640,6 +715,9 @@ def get_labware_location(
return validation.internal_slot_to_public_string(
labware_location.slotName, self._engine_client.state.config.robot_type
)
+ elif isinstance(labware_location, AddressableAreaLocation):
+ # This will only ever be a robot accurate deck slot name or Flex staging slot name
+ return labware_location.addressableAreaName
elif isinstance(labware_location, ModuleLocation):
return self._module_cores_by_id[labware_location.moduleId]
elif isinstance(labware_location, OnLabwareLocation):
@@ -650,7 +728,13 @@ def get_labware_location(
def _convert_labware_location(
self,
location: Union[
- DeckSlotName, LabwareCore, ModuleCore, NonConnectedModuleCore, OffDeckType
+ DeckSlotName,
+ StagingSlotName,
+ LabwareCore,
+ ModuleCore,
+ NonConnectedModuleCore,
+ OffDeckType,
+ WasteChute,
],
) -> LabwareLocation:
if isinstance(location, LabwareCore):
@@ -660,7 +744,14 @@ def _convert_labware_location(
@staticmethod
def _get_non_stacked_location(
- location: Union[DeckSlotName, ModuleCore, NonConnectedModuleCore, OffDeckType]
+ location: Union[
+ DeckSlotName,
+ StagingSlotName,
+ ModuleCore,
+ NonConnectedModuleCore,
+ OffDeckType,
+ WasteChute,
+ ]
) -> NonStackedLocation:
if isinstance(location, (ModuleCore, NonConnectedModuleCore)):
return ModuleLocation(moduleId=location.module_id)
@@ -668,3 +759,8 @@ def _get_non_stacked_location(
return OFF_DECK_LOCATION
elif isinstance(location, DeckSlotName):
return DeckSlotLocation(slotName=location)
+ elif isinstance(location, StagingSlotName):
+ return AddressableAreaLocation(addressableAreaName=location.id)
+ elif isinstance(location, WasteChute):
+ # TODO(mm, 2023-12-06) This will need to determine the appropriate Waste Chute to return, but only move_labware uses this for now
+ return AddressableAreaLocation(addressableAreaName="gripperWasteChute")
diff --git a/api/src/opentrons/protocol_api/core/engine/stringify.py b/api/src/opentrons/protocol_api/core/engine/stringify.py
index fd4a90817cd..78de37c5c5d 100644
--- a/api/src/opentrons/protocol_api/core/engine/stringify.py
+++ b/api/src/opentrons/protocol_api/core/engine/stringify.py
@@ -4,6 +4,7 @@
LabwareLocation,
ModuleLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
)
@@ -42,13 +43,19 @@ def _labware_location_string(
labware_on_string = _labware_location_string(engine_client, labware_on)
return f"{labware_name} on {labware_on_string}"
+ elif isinstance(location, AddressableAreaLocation):
+ # In practice this will always be a deck slot or staging slot
+ return f"slot {location.addressableAreaName}"
+
elif location == "offDeck":
return "[off-deck]"
def _labware_name(engine_client: SyncClient, labware_id: str) -> str:
"""Return the user-specified labware label, or fall back to the display name from the def."""
- user_name = engine_client.state.labware.get_display_name(labware_id=labware_id)
+ user_name = engine_client.state.labware.get_user_specified_display_name(
+ labware_id=labware_id
+ )
definition_name = engine_client.state.labware.get_definition(
labware_id=labware_id
).metadata.displayName
diff --git a/api/src/opentrons/protocol_api/core/instrument.py b/api/src/opentrons/protocol_api/core/instrument.py
index 6429e253c2e..fec252a009e 100644
--- a/api/src/opentrons/protocol_api/core/instrument.py
+++ b/api/src/opentrons/protocol_api/core/instrument.py
@@ -3,14 +3,15 @@
from __future__ import annotations
from abc import abstractmethod, ABC
-from typing import Any, Generic, Optional, TypeVar
+from typing import Any, Generic, Optional, TypeVar, Union
from opentrons import types
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.protocols.api_support.util import FlowRates
from opentrons.protocol_api._nozzle_layout import NozzleLayout
+from opentrons.hardware_control.nozzle_manager import NozzleMap
-from .._waste_chute import WasteChute
+from ..disposal_locations import TrashBin, WasteChute
from .well import WellCoreType
@@ -47,7 +48,7 @@ def aspirate(
@abstractmethod
def dispense(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[WellCoreType],
volume: float,
rate: float,
@@ -70,7 +71,7 @@ def dispense(
@abstractmethod
def blow_out(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[WellCoreType],
in_place: bool,
) -> None:
@@ -135,9 +136,19 @@ def drop_tip(
...
@abstractmethod
- def drop_tip_in_waste_chute(
- self, waste_chute: WasteChute, home_after: Optional[bool]
+ def drop_tip_in_disposal_location(
+ self,
+ disposal_location: Union[TrashBin, WasteChute],
+ home_after: Optional[bool],
+ alternate_tip_drop: bool = False,
) -> None:
+ """Move to and drop tip into a TrashBin or WasteChute.
+
+ Args:
+ disposal_location: The disposal location object we're dropping to.
+ home_after: Whether to home the pipette after the tip is dropped.
+ alternate_tip_drop: Whether to alternate tip drop location in a trash bin.
+ """
...
@abstractmethod
@@ -151,7 +162,7 @@ def home_plunger(self) -> None:
@abstractmethod
def move_to(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[WellCoreType],
force_direct: bool,
minimum_z_height: Optional[float],
@@ -204,6 +215,14 @@ def get_hardware_state(self) -> PipetteDict:
def get_channels(self) -> int:
...
+ @abstractmethod
+ def get_active_channels(self) -> int:
+ ...
+
+ @abstractmethod
+ def get_nozzle_map(self) -> NozzleMap:
+ ...
+
@abstractmethod
def has_tip(self) -> bool:
...
@@ -228,6 +247,7 @@ def get_dispense_flow_rate(self, rate: float = 1.0) -> float:
def get_blow_out_flow_rate(self, rate: float = 1.0) -> float:
...
+ @abstractmethod
def set_flow_rate(
self,
aspirate: Optional[float] = None,
@@ -236,18 +256,21 @@ def set_flow_rate(
) -> None:
...
+ @abstractmethod
def configure_for_volume(self, volume: float) -> None:
"""Configure the pipette for a specific volume.
Args:
- volume: The volume to preppare to handle.
+ volume: The volume to prepare to handle.
"""
...
+ @abstractmethod
def prepare_to_aspirate(self) -> None:
"""Prepare the pipette to aspirate."""
...
+ @abstractmethod
def configure_nozzle_layout(
self,
style: NozzleLayout,
@@ -258,10 +281,18 @@ def configure_nozzle_layout(
Args:
style: The type of configuration you wish to build.
- primary_nozzle: The nozzle that will determine a pipettes critical point.
+ primary_nozzle: The nozzle that will determine a pipette's critical point.
front_right_nozzle: The front right most nozzle in the requested layout.
"""
...
+ @abstractmethod
+ def is_tip_tracking_available(self) -> bool:
+ """Return whether auto tip tracking is available for the pipette's current nozzle configuration."""
+
+ def retract(self) -> None:
+ """Retract this instrument to the top of the gantry."""
+ ...
+
InstrumentCoreType = TypeVar("InstrumentCoreType", bound=AbstractInstrument[Any])
diff --git a/api/src/opentrons/protocol_api/core/labware.py b/api/src/opentrons/protocol_api/core/labware.py
index 4411155692f..ada1a7ff0ed 100644
--- a/api/src/opentrons/protocol_api/core/labware.py
+++ b/api/src/opentrons/protocol_api/core/labware.py
@@ -11,6 +11,7 @@
)
from opentrons.types import DeckSlotName, Point
+from opentrons.hardware_control.nozzle_manager import NozzleMap
from .well import WellCoreType
@@ -110,7 +111,10 @@ def reset_tips(self) -> None:
@abstractmethod
def get_next_tip(
- self, num_tips: int, starting_tip: Optional[WellCoreType]
+ self,
+ num_tips: int,
+ starting_tip: Optional[WellCoreType],
+ nozzle_map: Optional[NozzleMap],
) -> Optional[str]:
"""Get the name of the next available tip(s) in the rack, if available."""
diff --git a/api/src/opentrons/protocol_api/core/legacy/deck.py b/api/src/opentrons/protocol_api/core/legacy/deck.py
index ea4068934bd..685f0f5d553 100644
--- a/api/src/opentrons/protocol_api/core/legacy/deck.py
+++ b/api/src/opentrons/protocol_api/core/legacy/deck.py
@@ -3,7 +3,7 @@
import functools
import logging
from collections import UserDict
-from typing import Dict, Optional, List, Union
+from typing import Dict, Optional, List, Union, Mapping
from typing_extensions import Protocol, Final
from opentrons_shared_data.deck import load as load_deck
@@ -14,7 +14,14 @@
from opentrons.hardware_control.modules.types import ModuleType
from opentrons.motion_planning import deck_conflict
from opentrons.protocols.api_support.labware_like import LabwareLike
-from opentrons.types import DeckLocation, Location, Mount, Point, DeckSlotName
+from opentrons.types import (
+ DeckLocation,
+ Location,
+ Mount,
+ Point,
+ DeckSlotName,
+ StagingSlotName,
+)
from opentrons.protocol_api.core.labware import AbstractLabware
from opentrons.protocol_api.deck import CalibrationPosition
@@ -167,7 +174,9 @@ def __delitem__(self, key: DeckLocation) -> None:
def __setitem__(self, key: DeckLocation, val: DeckItem) -> None:
slot_key_int = self._check_name(key)
- existing_items = {
+ existing_items: Mapping[
+ Union[DeckSlotName, StagingSlotName], deck_conflict.DeckItem
+ ] = {
DeckSlotName.from_primitive(slot): self._map_to_conflict_checker_item(item)
for slot, item in self.data.items()
if item is not None
@@ -271,6 +280,11 @@ def resolve_module_location(
compatible_modules = slot_def["compatibleModuleTypes"]
if module_type.value in compatible_modules:
return location
+ elif (
+ self._definition["robot"]["model"] == "OT-3 Standard"
+ and ModuleType.to_module_fixture_id(module_type) == slot_def["id"]
+ ):
+ return location
else:
raise ValueError(
f"A {dn_from_type[module_type]} cannot be loaded"
diff --git a/api/src/opentrons/protocol_api/core/legacy/legacy_instrument_core.py b/api/src/opentrons/protocol_api/core/legacy/legacy_instrument_core.py
index 5ce5fd595c9..3755b093e78 100644
--- a/api/src/opentrons/protocol_api/core/legacy/legacy_instrument_core.py
+++ b/api/src/opentrons/protocol_api/core/legacy/legacy_instrument_core.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import logging
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, Union
from opentrons import types
from opentrons.hardware_control import CriticalPoint
@@ -18,8 +18,9 @@
)
from opentrons.protocols.geometry import planning
from opentrons.protocol_api._nozzle_layout import NozzleLayout
+from opentrons.hardware_control.nozzle_manager import NozzleMap
-from ..._waste_chute import WasteChute
+from ...disposal_locations import TrashBin, WasteChute
from ..instrument import AbstractInstrument
from .legacy_well_core import LegacyWellCore
from .legacy_module_core import LegacyThermocyclerCore, LegacyHeaterShakerCore
@@ -112,7 +113,7 @@ def aspirate(
def dispense(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[LegacyWellCore],
volume: float,
rate: float,
@@ -130,6 +131,10 @@ def dispense(
in_place: Whether we should move_to location.
push_out: The amount to push the plunger below bottom position.
"""
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Dispense in Moveable Trash or Waste Chute are not supported in this API Version."
+ )
if push_out:
raise APIVersionError("push_out is not supported in this API version.")
if not in_place:
@@ -139,7 +144,7 @@ def dispense(
def blow_out(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[LegacyWellCore],
in_place: bool,
) -> None:
@@ -150,6 +155,11 @@ def blow_out(
well_core: Unused by legacy core.
in_place: Whether we should move_to location.
"""
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Blow Out in Moveable Trash or Waste Chute are not supported in this API Version."
+ )
+
if not in_place:
self.move_to(location=location)
self._protocol_interface.get_hardware().blow_out(self._mount)
@@ -284,11 +294,14 @@ def drop_tip(
f"Could not return tip to {labware_core.get_display_name()}"
)
- def drop_tip_in_waste_chute(
- self, waste_chute: WasteChute, home_after: Optional[bool]
+ def drop_tip_in_disposal_location(
+ self,
+ disposal_location: Union[TrashBin, WasteChute],
+ home_after: Optional[bool],
+ alternate_tip_drop: bool = False,
) -> None:
raise APIVersionError(
- "Dropping tips in a waste chute is not supported in this API Version."
+ "Dropping tips in a trash bin or waste chute is not supported in this API Version."
)
def home(self) -> None:
@@ -307,7 +320,7 @@ def home_plunger(self) -> None:
def move_to(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[LegacyWellCore] = None,
force_direct: bool = False,
minimum_z_height: Optional[float] = None,
@@ -326,6 +339,10 @@ def move_to(
LabwareHeightError: An item on the deck is taller than
the computed safe travel height.
"""
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Move To Trash Bin and Waste Chute are not supported in this API Version."
+ )
self.flag_unsafe_move(location)
# prevent direct movement bugs in PAPI version >= 2.10
@@ -527,5 +544,21 @@ def configure_nozzle_layout(
primary_nozzle: Optional[str],
front_right_nozzle: Optional[str],
) -> None:
- """This will never be called because it was added in API 2.15."""
+ """This will never be called because it was added in API 2.16."""
pass
+
+ def get_active_channels(self) -> int:
+ """This will never be called because it was added in API 2.16."""
+ assert False, "get_active_channels only supported in API 2.16 & later"
+
+ def get_nozzle_map(self) -> NozzleMap:
+ """This will never be called because it was added in API 2.18."""
+ assert False, "get_nozzle_map only supported in API 2.18 & later"
+
+ def is_tip_tracking_available(self) -> bool:
+ # Tip tracking is always available in legacy context
+ return True
+
+ def retract(self) -> None:
+ """Retract this instrument to the top of the gantry."""
+ self._protocol_interface.get_hardware.retract(self._mount) # type: ignore [attr-defined]
diff --git a/api/src/opentrons/protocol_api/core/legacy/legacy_labware_core.py b/api/src/opentrons/protocol_api/core/legacy/legacy_labware_core.py
index 2749ef8949a..ece9be66f19 100644
--- a/api/src/opentrons/protocol_api/core/legacy/legacy_labware_core.py
+++ b/api/src/opentrons/protocol_api/core/legacy/legacy_labware_core.py
@@ -5,6 +5,7 @@
from opentrons.protocols.api_support.tip_tracker import TipTracker
from opentrons.types import DeckSlotName, Location, Point
+from opentrons.hardware_control.nozzle_manager import NozzleMap
from opentrons_shared_data.labware.dev_types import LabwareParameters, LabwareDefinition
from ..labware import AbstractLabware, LabwareLoadParams
@@ -153,8 +154,15 @@ def reset_tips(self) -> None:
well.set_has_tip(True)
def get_next_tip(
- self, num_tips: int, starting_tip: Optional[LegacyWellCore]
+ self,
+ num_tips: int,
+ starting_tip: Optional[LegacyWellCore],
+ nozzle_map: Optional[NozzleMap],
) -> Optional[str]:
+ if nozzle_map is not None:
+ raise ValueError(
+ "Nozzle Map cannot be provided to calls for next tip in legacy protocols."
+ )
next_well = self._tip_tracker.next_tip(num_tips, starting_tip)
return next_well.get_name() if next_well else None
diff --git a/api/src/opentrons/protocol_api/core/legacy/legacy_protocol_core.py b/api/src/opentrons/protocol_api/core/legacy/legacy_protocol_core.py
index 01faa63c17b..02fc2003733 100644
--- a/api/src/opentrons/protocol_api/core/legacy/legacy_protocol_core.py
+++ b/api/src/opentrons/protocol_api/core/legacy/legacy_protocol_core.py
@@ -1,12 +1,12 @@
import logging
from typing import Dict, List, Optional, Set, Union, cast, Tuple
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4, SlotDefV3
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5, SlotDefV3
from opentrons_shared_data.labware.dev_types import LabwareDefinition
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons.types import DeckSlotName, Location, Mount, Point
+from opentrons.types import DeckSlotName, StagingSlotName, Location, Mount, Point
from opentrons.util.broker import Broker
from opentrons.hardware_control import SyncHardwareAPI
from opentrons.hardware_control.modules import AbstractModule, ModuleModel, ModuleType
@@ -16,9 +16,9 @@
from opentrons.protocols import labware as labware_definition
from ...labware import Labware
+from ...disposal_locations import TrashBin, WasteChute
from ..._liquid import Liquid
from ..._types import OffDeckType
-from ..._waste_chute import WasteChute
from ..protocol import AbstractProtocol
from ..labware import LabwareLoadParams
@@ -87,6 +87,7 @@ def __init__(
self._loaded_modules: Set["AbstractModule"] = set()
self._module_cores: List[legacy_module_core.LegacyModuleCore] = []
self._labware_cores: List[LegacyLabwareCore] = [self.fixed_trash]
+ self._disposal_locations: List[Union[Labware, TrashBin, WasteChute]] = []
@property
def api_version(self) -> APIVersion:
@@ -131,6 +132,16 @@ def is_simulating(self) -> bool:
"""Returns true if hardware is being simulated."""
return self._sync_hardware.is_simulator # type: ignore[no-any-return]
+ def append_disposal_location(
+ self,
+ disposal_location: Union[Labware, TrashBin, WasteChute],
+ ) -> None:
+ if isinstance(disposal_location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Trash Bin and Waste Chute Disposal locations are not supported in this API Version."
+ )
+ self._disposal_locations.append(disposal_location)
+
def add_labware_definition(
self,
definition: LabwareDefinition,
@@ -152,6 +163,7 @@ def load_labware(
DeckSlotName,
LegacyLabwareCore,
legacy_module_core.LegacyModuleCore,
+ StagingSlotName,
OffDeckType,
],
label: Optional[str],
@@ -167,6 +179,8 @@ def load_labware(
raise APIVersionError(
"Loading a labware onto another labware or adapter is only supported with api version 2.15 and above"
)
+ elif isinstance(location, StagingSlotName):
+ raise APIVersionError("Using a staging deck slot requires apiLevel 2.16.")
deck_slot = (
location if isinstance(location, DeckSlotName) else location.get_deck_slot()
@@ -237,7 +251,12 @@ def load_labware(
def load_adapter(
self,
load_name: str,
- location: Union[DeckSlotName, legacy_module_core.LegacyModuleCore, OffDeckType],
+ location: Union[
+ DeckSlotName,
+ StagingSlotName,
+ legacy_module_core.LegacyModuleCore,
+ OffDeckType,
+ ],
namespace: Optional[str],
version: Optional[int],
) -> LegacyLabwareCore:
@@ -250,6 +269,7 @@ def move_labware(
labware_core: LegacyLabwareCore,
new_location: Union[
DeckSlotName,
+ StagingSlotName,
LegacyLabwareCore,
legacy_module_core.LegacyModuleCore,
OffDeckType,
@@ -358,12 +378,31 @@ def load_instrument(
return new_instr
+ def load_trash_bin(self, slot_name: DeckSlotName, area_name: str) -> TrashBin:
+ raise APIVersionError(
+ "Loading deck configured trash bin is not supported in this API version."
+ )
+
+ def load_ot2_fixed_trash_bin(self) -> None:
+ raise APIVersionError(
+ "Loading deck configured OT-2 fixed trash bin is not supported in this API version."
+ )
+
+ def load_waste_chute(self) -> WasteChute:
+ raise APIVersionError(
+ "Loading waste chute is not supported in this API version."
+ )
+
def get_loaded_instruments(
self,
) -> Dict[Mount, Optional[LegacyInstrumentCore]]:
"""Get a mapping of mount to instrument."""
return self._instruments
+ def get_disposal_locations(self) -> List[Union[Labware, TrashBin, WasteChute]]:
+ """Get valid disposal locations."""
+ return self._disposal_locations
+
def pause(self, msg: Optional[str]) -> None:
"""Pause the protocol."""
self._sync_hardware.pause(PauseType.PAUSE)
@@ -452,21 +491,31 @@ def get_labware_on_labware(
) -> Optional[LegacyLabwareCore]:
assert False, "get_labware_on_labware only supported on engine core"
- def get_deck_definition(self) -> DeckDefinitionV4:
+ def get_deck_definition(self) -> DeckDefinitionV5:
"""Get the geometry definition of the robot's deck."""
assert False, "get_deck_definition only supported on engine core"
- def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:
+ def get_slot_definition(
+ self, slot: Union[DeckSlotName, StagingSlotName]
+ ) -> SlotDefV3:
"""Get the slot definition from the robot's deck."""
assert False, "get_slot_definition only supported on engine core"
+ def get_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all standard slot definitions available in the deck definition."""
+ assert False, "get_slot_definitions only supported on engine core"
+
+ def get_staging_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all staging slot definitions available in the deck definition."""
+ assert False, "get_staging_slot_definitions only supported on engine core"
+
def get_slot_item(
- self, slot_name: DeckSlotName
+ self, slot_name: Union[DeckSlotName, StagingSlotName]
) -> Union[LegacyLabwareCore, legacy_module_core.LegacyModuleCore, None]:
"""Get the contents of a given slot, if any."""
assert False, "get_slot_item only supported on engine core"
- def get_slot_center(self, slot_name: DeckSlotName) -> Point:
+ def get_slot_center(self, slot_name: Union[DeckSlotName, StagingSlotName]) -> Point:
"""Get the absolute coordinate of a slot's center."""
assert False, "get_slot_center only supported on engine core."
diff --git a/api/src/opentrons/protocol_api/core/legacy/module_geometry.py b/api/src/opentrons/protocol_api/core/legacy/module_geometry.py
index 234a36fdc9a..839154a76d1 100644
--- a/api/src/opentrons/protocol_api/core/legacy/module_geometry.py
+++ b/api/src/opentrons/protocol_api/core/legacy/module_geometry.py
@@ -13,9 +13,11 @@
from typing import TYPE_CHECKING, Optional
import numpy as np
+from numpy.typing import NDArray
from opentrons_shared_data import module
from opentrons_shared_data.module.dev_types import ModuleDefinitionV3
+from opentrons_shared_data.module import OLD_TC_GEN2_LABWARE_OFFSET
from opentrons.types import Location, Point, LocationLabware
from opentrons.motion_planning.adjacent_slots_getters import (
@@ -29,9 +31,9 @@
ModuleModel,
ModuleType,
module_model_from_string,
+ ThermocyclerModuleModel,
)
-
if TYPE_CHECKING:
from opentrons.protocol_api.labware import Labware
@@ -431,14 +433,30 @@ def create_geometry(
The definition should be schema checked before being passed to this
function; all definitions passed here are assumed to be valid.
"""
- pre_transform = np.array(
- (
- definition["labwareOffset"]["x"],
- definition["labwareOffset"]["y"],
- definition["labwareOffset"]["z"],
- 1,
+ module_type = ModuleType(definition["moduleType"])
+ module_model = module_model_from_string(definition["model"])
+ overall_height = definition["dimensions"]["bareOverallHeight"]
+ height_over_labware = definition["dimensions"]["overLabwareHeight"]
+ display_name = definition["displayName"]
+
+ if module_model == ThermocyclerModuleModel.THERMOCYCLER_V2:
+ pre_transform: NDArray[np.double] = np.array(
+ (
+ OLD_TC_GEN2_LABWARE_OFFSET["x"],
+ OLD_TC_GEN2_LABWARE_OFFSET["y"],
+ OLD_TC_GEN2_LABWARE_OFFSET["z"],
+ 1,
+ )
+ )
+ else:
+ pre_transform = np.array(
+ (
+ definition["labwareOffset"]["x"],
+ definition["labwareOffset"]["y"],
+ definition["labwareOffset"]["z"],
+ 1,
+ )
)
- )
if not parent.labware.is_slot:
par = ""
_log.warning(
@@ -462,29 +480,29 @@ def create_geometry(
xform_ser = xforms_ser["labwareOffset"]
# apply the slot transform if any
- xform = np.array(xform_ser)
- xformed = np.dot(xform, pre_transform) # type: ignore[no-untyped-call]
- module_type = ModuleType(definition["moduleType"])
+ xform: NDArray[np.double] = np.array(xform_ser)
+ xformed = np.dot(xform, pre_transform)
+ labware_offset = Point(xformed[0], xformed[1], xformed[2])
if module_type == ModuleType.MAGNETIC or module_type == ModuleType.TEMPERATURE:
return ModuleGeometry(
parent=parent,
- offset=Point(xformed[0], xformed[1], xformed[2]),
- overall_height=definition["dimensions"]["bareOverallHeight"],
- height_over_labware=definition["dimensions"]["overLabwareHeight"],
- model=module_model_from_string(definition["model"]),
- module_type=ModuleType(definition["moduleType"]),
- display_name=definition["displayName"],
+ offset=labware_offset,
+ overall_height=overall_height,
+ height_over_labware=height_over_labware,
+ model=module_model,
+ module_type=module_type,
+ display_name=display_name,
)
elif module_type == ModuleType.THERMOCYCLER:
return ThermocyclerGeometry(
parent=parent,
- offset=Point(xformed[0], xformed[1], xformed[2]),
- overall_height=definition["dimensions"]["bareOverallHeight"],
- height_over_labware=definition["dimensions"]["overLabwareHeight"],
- model=module_model_from_string(definition["model"]),
- module_type=ModuleType(definition["moduleType"]),
- display_name=definition["displayName"],
+ offset=labware_offset,
+ overall_height=overall_height,
+ height_over_labware=height_over_labware,
+ model=module_model,
+ module_type=module_type,
+ display_name=display_name,
lid_height=definition["dimensions"]["lidHeight"],
configuration=(
ThermocyclerConfiguration(configuration)
@@ -495,14 +513,13 @@ def create_geometry(
elif module_type == ModuleType.HEATER_SHAKER:
return HeaterShakerGeometry(
parent=parent,
- offset=Point(xformed[0], xformed[1], xformed[2]),
- overall_height=definition["dimensions"]["bareOverallHeight"],
- height_over_labware=definition["dimensions"]["overLabwareHeight"],
- model=module_model_from_string(definition["model"]),
- module_type=ModuleType(definition["moduleType"]),
- display_name=definition["displayName"],
+ offset=labware_offset,
+ overall_height=overall_height,
+ height_over_labware=height_over_labware,
+ model=module_model,
+ module_type=module_type,
+ display_name=display_name,
)
-
else:
raise AssertionError(f"Module type {module_type} is invalid")
diff --git a/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_instrument_core.py b/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_instrument_core.py
index 27ed2a5d438..ffcdda5019c 100644
--- a/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_instrument_core.py
+++ b/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_instrument_core.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import logging
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, Union
from opentrons import types
from opentrons.hardware_control.dev_types import PipetteDict
@@ -21,8 +21,9 @@
UnexpectedTipAttachError,
)
-from ..._waste_chute import WasteChute
+from ...disposal_locations import TrashBin, WasteChute
from opentrons.protocol_api._nozzle_layout import NozzleLayout
+from opentrons.hardware_control.nozzle_manager import NozzleMap
from ..instrument import AbstractInstrument
@@ -122,7 +123,7 @@ def aspirate(
def dispense(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[LegacyWellCore],
volume: float,
rate: float,
@@ -130,6 +131,10 @@ def dispense(
in_place: bool,
push_out: Optional[float],
) -> None:
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Dispense in Moveable Trash or Waste Chute are not supported in this API Version."
+ )
if not in_place:
self.move_to(location=location, well_core=well_core)
self._raise_if_no_tip(HardwareAction.DISPENSE.name)
@@ -137,10 +142,14 @@ def dispense(
def blow_out(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[LegacyWellCore],
in_place: bool,
) -> None:
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Blow Out in Moveable Trash or Waste Chute are not supported in this API Version."
+ )
if not in_place:
self.move_to(location=location, well_core=well_core)
self._raise_if_no_tip(HardwareAction.BLOWOUT.name)
@@ -253,10 +262,15 @@ def drop_tip(
f"Could not return tip to {labware_core.get_display_name()}"
)
- def drop_tip_in_waste_chute(
- self, waste_chute: WasteChute, home_after: Optional[bool]
+ def drop_tip_in_disposal_location(
+ self,
+ disposal_location: Union[TrashBin, WasteChute],
+ home_after: Optional[bool],
+ alternate_tip_drop: bool = False,
) -> None:
- raise APIVersionError("Waste chutes are not supported in this PAPI version.")
+ raise APIVersionError(
+ "Dropping tips in a trash bin or waste chute is not supported in this API Version."
+ )
def home(self) -> None:
self._protocol_interface.set_last_location(None)
@@ -266,13 +280,18 @@ def home_plunger(self) -> None:
def move_to(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
well_core: Optional[LegacyWellCore] = None,
force_direct: bool = False,
minimum_z_height: Optional[float] = None,
speed: Optional[float] = None,
) -> None:
"""Simulation of only the motion planning portion of move_to."""
+ if isinstance(location, (TrashBin, WasteChute)):
+ raise APIVersionError(
+ "Move To Trash Bin and Waste Chute are not supported in this API Version."
+ )
+
self.flag_unsafe_move(location)
last_location = self._protocol_interface.get_last_location()
@@ -445,3 +464,19 @@ def configure_nozzle_layout(
) -> None:
"""This will never be called because it was added in API 2.15."""
pass
+
+ def get_active_channels(self) -> int:
+ """This will never be called because it was added in API 2.16."""
+ assert False, "get_active_channels only supported in API 2.16 & later"
+
+ def get_nozzle_map(self) -> NozzleMap:
+ """This will never be called because it was added in API 2.18."""
+ assert False, "get_nozzle_map only supported in API 2.18 & later"
+
+ def is_tip_tracking_available(self) -> bool:
+ # Tip tracking is always available in legacy context
+ return True
+
+ def retract(self) -> None:
+ """Retract this instrument to the top of the gantry."""
+ self._protocol_interface.get_hardware.retract(self._mount) # type: ignore [attr-defined]
diff --git a/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_protocol_core.py b/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_protocol_core.py
index 93cebe167aa..9fb9d1a0f51 100644
--- a/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_protocol_core.py
+++ b/api/src/opentrons/protocol_api/core/legacy_simulator/legacy_protocol_core.py
@@ -2,6 +2,11 @@
from typing import Dict, Optional
from opentrons_shared_data.pipette.dev_types import PipetteNameType
+from opentrons_shared_data.pipette.pipette_load_name_conversions import (
+ convert_to_pipette_name_type,
+)
+from opentrons_shared_data.pipette.types import PipetteGenerationType
+
from opentrons.types import Mount
from ..protocol import AbstractProtocol
@@ -27,6 +32,16 @@ def load_instrument( # type: ignore[override]
self, instrument_name: PipetteNameType, mount: Mount
) -> LegacyInstrumentCoreSimulator:
"""Create a simulating instrument context."""
+ pipette_generation = convert_to_pipette_name_type(
+ instrument_name.value
+ ).pipette_generation
+
+ if pipette_generation not in [
+ PipetteGenerationType.GEN1,
+ PipetteGenerationType.GEN2,
+ ]:
+ raise ValueError(f"{instrument_name} is not a valid OT-2 pipette.")
+
existing_instrument = self._instruments[mount]
if (
diff --git a/api/src/opentrons/protocol_api/core/protocol.py b/api/src/opentrons/protocol_api/core/protocol.py
index 596cb9c6da4..a554c14e306 100644
--- a/api/src/opentrons/protocol_api/core/protocol.py
+++ b/api/src/opentrons/protocol_api/core/protocol.py
@@ -3,14 +3,14 @@
from __future__ import annotations
from abc import abstractmethod, ABC
-from typing import Generic, List, Optional, Union, Tuple
+from typing import Generic, List, Optional, Union, Tuple, Dict, TYPE_CHECKING
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4, SlotDefV3
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5, SlotDefV3
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.labware.dev_types import LabwareDefinition
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons.types import DeckSlotName, Location, Mount, Point
+from opentrons.types import DeckSlotName, StagingSlotName, Location, Mount, Point
from opentrons.hardware_control import SyncHardwareAPI
from opentrons.hardware_control.modules.types import ModuleModel
from opentrons.protocols.api_support.util import AxisMaxSpeeds
@@ -19,8 +19,11 @@
from .labware import LabwareCoreType, LabwareLoadParams
from .module import ModuleCoreType
from .._liquid import Liquid
-from .._waste_chute import WasteChute
from .._types import OffDeckType
+from ..disposal_locations import TrashBin, WasteChute
+
+if TYPE_CHECKING:
+ from ..labware import Labware
class AbstractProtocol(
@@ -57,11 +60,21 @@ def add_labware_definition(
"""Add a labware definition to the set of loadable definitions."""
...
+ @abstractmethod
+ def append_disposal_location(
+ self,
+ disposal_location: Union[Labware, TrashBin, WasteChute],
+ ) -> None:
+ """Append a disposal location object to the core"""
+ ...
+
@abstractmethod
def load_labware(
self,
load_name: str,
- location: Union[DeckSlotName, LabwareCoreType, ModuleCoreType, OffDeckType],
+ location: Union[
+ DeckSlotName, StagingSlotName, LabwareCoreType, ModuleCoreType, OffDeckType
+ ],
label: Optional[str],
namespace: Optional[str],
version: Optional[int],
@@ -73,7 +86,7 @@ def load_labware(
def load_adapter(
self,
load_name: str,
- location: Union[DeckSlotName, ModuleCoreType, OffDeckType],
+ location: Union[DeckSlotName, StagingSlotName, ModuleCoreType, OffDeckType],
namespace: Optional[str],
version: Optional[int],
) -> LabwareCoreType:
@@ -86,7 +99,12 @@ def move_labware(
self,
labware_core: LabwareCoreType,
new_location: Union[
- DeckSlotName, LabwareCoreType, ModuleCoreType, OffDeckType, WasteChute
+ DeckSlotName,
+ StagingSlotName,
+ LabwareCoreType,
+ ModuleCoreType,
+ OffDeckType,
+ WasteChute,
],
use_gripper: bool,
pause_for_manual_move: bool,
@@ -110,6 +128,18 @@ def load_instrument(
) -> InstrumentCoreType:
...
+ @abstractmethod
+ def load_trash_bin(self, slot_name: DeckSlotName, area_name: str) -> TrashBin:
+ ...
+
+ @abstractmethod
+ def load_ot2_fixed_trash_bin(self) -> None:
+ ...
+
+ @abstractmethod
+ def load_waste_chute(self) -> WasteChute:
+ ...
+
@abstractmethod
def pause(self, msg: Optional[str]) -> None:
...
@@ -130,6 +160,10 @@ def home(self) -> None:
def set_rail_lights(self, on: bool) -> None:
...
+ @abstractmethod
+ def get_disposal_locations(self) -> List[Union[Labware, TrashBin, WasteChute]]:
+ ...
+
@abstractmethod
def get_rail_lights_on(self) -> bool:
...
@@ -154,17 +188,26 @@ def set_last_location(
...
@abstractmethod
- def get_deck_definition(self) -> DeckDefinitionV4:
+ def get_deck_definition(self) -> DeckDefinitionV5:
"""Get the geometry definition of the robot's deck."""
- # TODO(jbl 10-30-2023) this method may no longer need to exist post deck config work being completed
@abstractmethod
- def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:
+ def get_slot_definition(
+ self, slot: Union[DeckSlotName, StagingSlotName]
+ ) -> SlotDefV3:
"""Get the slot definition from the robot's deck."""
+ @abstractmethod
+ def get_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all standard slot definitions available in the deck definition."""
+
+ @abstractmethod
+ def get_staging_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all staging slot definitions available in the deck definition."""
+
@abstractmethod
def get_slot_item(
- self, slot_name: DeckSlotName
+ self, slot_name: Union[DeckSlotName, StagingSlotName]
) -> Union[LabwareCoreType, ModuleCoreType, None]:
"""Get the contents of a given slot, if any."""
@@ -181,7 +224,7 @@ def get_labware_on_labware(
"""Get the labware on a given labware, if any."""
@abstractmethod
- def get_slot_center(self, slot_name: DeckSlotName) -> Point:
+ def get_slot_center(self, slot_name: Union[DeckSlotName, StagingSlotName]) -> Point:
"""Get the absolute coordinate of a slot's center."""
@abstractmethod
diff --git a/api/src/opentrons/protocol_api/create_protocol_context.py b/api/src/opentrons/protocol_api/create_protocol_context.py
index 5a64e70cf99..b01d4bbbbe0 100644
--- a/api/src/opentrons/protocol_api/create_protocol_context.py
+++ b/api/src/opentrons/protocol_api/create_protocol_context.py
@@ -4,7 +4,6 @@
from opentrons_shared_data.labware.dev_types import LabwareDefinition
-from opentrons.config import feature_flags
from opentrons.hardware_control import (
HardwareControlAPI,
ThreadManager,
@@ -15,10 +14,14 @@
from opentrons.protocol_engine import ProtocolEngine
from opentrons.protocol_engine.clients import SyncClient, ChildThreadTransport
from opentrons.protocols.api_support.types import APIVersion
+from opentrons.protocols.api_support.deck_type import (
+ should_load_fixed_trash_area_for_python_protocol,
+)
from opentrons.protocols.api_support.definitions import MAX_SUPPORTED_VERSION
from .protocol_context import ProtocolContext
from .deck import Deck
+from .disposal_locations import TrashBin
from .core.common import ProtocolCore as AbstractProtocolCore
from .core.legacy.deck import Deck as LegacyDeck
@@ -119,8 +122,7 @@ def create_protocol_context(
sync_hardware=sync_hardware,
)
- # TODO(mc, 2022-8-22): remove `disable_fast_protocol_upload`
- elif use_simulating_core and not feature_flags.disable_fast_protocol_upload():
+ elif use_simulating_core:
legacy_deck = LegacyDeck(deck_type=deck_type)
core = LegacyProtocolCoreSimulator(
sync_hardware=sync_hardware,
@@ -148,7 +150,7 @@ def create_protocol_context(
# this swap may happen once `ctx.move_labware` off-deck is implemented
deck = None if isinstance(core, ProtocolCore) else cast(Deck, core.get_deck())
- return ProtocolContext(
+ context = ProtocolContext(
api_version=api_version,
# TODO(mm, 2023-05-11): This cast shouldn't be necessary.
# Fix this by making the appropriate TypeVars covariant?
@@ -158,3 +160,18 @@ def create_protocol_context(
deck=deck,
bundled_data=bundled_data,
)
+ # If we're loading an engine based core into the context, and we're on api level 2.16 or above, on an OT-2 we need
+ # to insert a fixed trash addressable area into the protocol engine, for correctness in anything that relies on
+ # knowing what addressable areas have been loaded (and any checks involving trash geometry). Because the method
+ # that uses this in the core relies on the sync client and this code will run in the main thread (which if called
+ # will cause a deadlock), we're directly calling the protocol engine method here where we have access to it.
+ if (
+ protocol_engine is not None
+ and should_load_fixed_trash_area_for_python_protocol(
+ api_version=api_version,
+ robot_type=protocol_engine.state_view.config.robot_type,
+ )
+ ):
+ assert isinstance(context.fixed_trash, TrashBin)
+ protocol_engine.add_addressable_area(context.fixed_trash.area_name)
+ return context
diff --git a/api/src/opentrons/protocol_api/deck.py b/api/src/opentrons/protocol_api/deck.py
index c5c9fcb2368..338b1bbecf6 100644
--- a/api/src/opentrons/protocol_api/deck.py
+++ b/api/src/opentrons/protocol_api/deck.py
@@ -7,7 +7,7 @@
from opentrons.motion_planning import adjacent_slots_getters
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols.api_support.util import APIVersionError
-from opentrons.types import DeckLocation, DeckSlotName, Location, Point
+from opentrons.types import DeckLocation, DeckSlotName, StagingSlotName, Location, Point
from opentrons_shared_data.robot.dev_types import RobotType
@@ -22,6 +22,8 @@
DeckItem = Union[Labware, ModuleContext]
+STAGING_SLOT_VERSION_GATE = APIVersion(2, 16)
+
@dataclass(frozen=True)
class CalibrationPosition:
@@ -40,11 +42,12 @@ class CalibrationPosition:
def _get_slot_name(
slot_key: DeckLocation, api_version: APIVersion, robot_type: RobotType
-) -> DeckSlotName:
+) -> Union[DeckSlotName, StagingSlotName]:
try:
- return validation.ensure_and_convert_deck_slot(
+ slot = validation.ensure_and_convert_deck_slot(
slot_key, api_version, robot_type
)
+ return slot
except (TypeError, ValueError) as error:
raise KeyError(slot_key) from error
@@ -65,47 +68,14 @@ def __init__(
self._core_map = core_map
self._api_version = api_version
- # TODO(jbl 10-30-2023) this hardcoding should be removed once slots are refactored to work with deck config
- if self._protocol_core.robot_type == "OT-2 Standard":
- ordered_slot_ids = [
- "1",
- "2",
- "3",
- "4",
- "5",
- "6",
- "7",
- "8",
- "9",
- "10",
- "11",
- "12",
- ]
- else:
- ordered_slot_ids = [
- "D1",
- "D2",
- "D3",
- "C1",
- "C2",
- "C3",
- "B1",
- "B2",
- "B3",
- "A1",
- "A2",
- "A3",
- ]
-
- self._slot_definitions_by_name = {
- slot_id: self._protocol_core.get_slot_definition(
- DeckSlotName.from_primitive(slot_id)
- )
- for slot_id in ordered_slot_ids
- }
-
deck_locations = protocol_core.get_deck_definition()["locations"]
+ self._slot_definitions_by_name = self._protocol_core.get_slot_definitions()
+ if self._api_version >= STAGING_SLOT_VERSION_GATE:
+ self._slot_definitions_by_name.update(
+ self._protocol_core.get_staging_slot_definitions()
+ )
+
self._calibration_positions = [
CalibrationPosition(
id=point["id"],
@@ -181,7 +151,10 @@ def right_of(self, slot: DeckLocation) -> Optional[DeckItem]:
slot_name = _get_slot_name(
slot, self._api_version, self._protocol_core.robot_type
)
- east_slot = adjacent_slots_getters.get_east_slot(slot_name.as_int())
+ if isinstance(slot_name, DeckSlotName):
+ east_slot = adjacent_slots_getters.get_east_slot(slot_name.as_int())
+ else:
+ east_slot = None
return self[east_slot] if east_slot is not None else None
@@ -191,7 +164,11 @@ def left_of(self, slot: DeckLocation) -> Optional[DeckItem]:
slot_name = _get_slot_name(
slot, self._api_version, self._protocol_core.robot_type
)
- west_slot = adjacent_slots_getters.get_west_slot(slot_name.as_int())
+ west_slot: Optional[DeckLocation]
+ if isinstance(slot_name, DeckSlotName):
+ west_slot = adjacent_slots_getters.get_west_slot(slot_name.as_int())
+ else:
+ west_slot = adjacent_slots_getters.get_west_of_staging_slot(slot_name).id
return self[west_slot] if west_slot is not None else None
diff --git a/api/src/opentrons/protocol_api/disposal_locations.py b/api/src/opentrons/protocol_api/disposal_locations.py
new file mode 100644
index 00000000000..77c9b4e76d1
--- /dev/null
+++ b/api/src/opentrons/protocol_api/disposal_locations.py
@@ -0,0 +1,244 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing_extensions import Protocol as TypingProtocol
+
+from opentrons.types import DeckSlotName
+from opentrons.protocols.api_support.types import APIVersion
+from opentrons.protocols.api_support.util import requires_version
+from opentrons.protocol_engine.clients import SyncClient
+
+
+# TODO(jbl 2024-02-26) these are hardcoded here since there is a 1 to many relationship going from
+# addressable area names to cutout fixture ids. Currently for trash and waste chute this would not be
+# an issue (trash has only one fixture that provides it, all waste chute fixtures are the same height).
+# The ultimate fix for this is a multiple pass analysis, so for now these are being hardcoded to avoid
+# writing cumbersome guessing logic for area name -> fixture name while still providing a direct link to
+# the numbers in shared data.
+_TRASH_BIN_CUTOUT_FIXTURE = "trashBinAdapter"
+_TRASH_BIN_OT2_CUTOUT_FIXTURE = "fixedTrashSlot"
+_WASTE_CHUTE_CUTOUT_FIXTURE = "wasteChuteRightAdapterCovered"
+
+
+@dataclass(frozen=True)
+class DisposalOffset:
+ x: float
+ y: float
+ z: float
+
+
+class _DisposalLocation(TypingProtocol):
+ """Abstract class for disposal location."""
+
+ def top(self, x: float = 0, y: float = 0, z: float = 0) -> _DisposalLocation:
+ """Returns a disposal location with a user set offset."""
+ ...
+
+ @property
+ def offset(self) -> DisposalOffset:
+ """Offset of the disposal location.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ ...
+
+ @property
+ def location(self) -> DeckSlotName:
+ """Location of the disposal location.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ ...
+
+ @property
+ def area_name(self) -> str:
+ """Addressable area name of the disposal location.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ ...
+
+ @property
+ def height(self) -> float:
+ """Height of the disposal location.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ ...
+
+
+class TrashBin(_DisposalLocation):
+ """Represents a Flex or OT-2 trash bin.
+
+ See :py:meth:`.ProtocolContext.load_trash_bin`.
+ """
+
+ def __init__(
+ self,
+ location: DeckSlotName,
+ addressable_area_name: str,
+ engine_client: SyncClient,
+ api_version: APIVersion,
+ offset: DisposalOffset = DisposalOffset(x=0, y=0, z=0),
+ ) -> None:
+ self._location = location
+ self._addressable_area_name = addressable_area_name
+ self._offset = offset
+ self._api_version = api_version
+ self._engine_client = engine_client
+ if self._engine_client.state.config.robot_type == "OT-2 Standard":
+ self._cutout_fixture_name = _TRASH_BIN_OT2_CUTOUT_FIXTURE
+ else:
+ self._cutout_fixture_name = _TRASH_BIN_CUTOUT_FIXTURE
+
+ @requires_version(2, 18)
+ def top(self, x: float = 0, y: float = 0, z: float = 0) -> TrashBin:
+ """Add a location offset to a trash bin.
+
+ The default location (``x``, ``y``, and ``z`` all set to ``0``) is the center of
+ the bin on the x- and y-axes, and slightly below its physical top on the z-axis.
+
+ Offsets can be positive or negative and are measured in mm.
+ See :ref:`protocol-api-deck-coords`.
+ """
+ return TrashBin(
+ location=self._location,
+ addressable_area_name=self._addressable_area_name,
+ engine_client=self._engine_client,
+ api_version=self._api_version,
+ offset=DisposalOffset(x=x, y=y, z=z),
+ )
+
+ @property
+ def offset(self) -> DisposalOffset:
+ """Current offset of the trash bin.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return self._offset
+
+ @property
+ def location(self) -> DeckSlotName:
+ """Location of the trash bin.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return self._location
+
+ @property
+ def area_name(self) -> str:
+ """Addressable area name of the trash bin.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return self._addressable_area_name
+
+ @property
+ def height(self) -> float:
+ """Height of the trash bin.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return self._engine_client.state.addressable_areas.get_fixture_height(
+ self._cutout_fixture_name
+ )
+
+
+class WasteChute(_DisposalLocation):
+ """Represents a Flex waste chute.
+
+ See :py:meth:`.ProtocolContext.load_waste_chute`.
+ """
+
+ def __init__(
+ self,
+ engine_client: SyncClient,
+ api_version: APIVersion,
+ offset: DisposalOffset = DisposalOffset(x=0, y=0, z=0),
+ ) -> None:
+ self._engine_client = engine_client
+ self._api_version = api_version
+ self._offset = offset
+
+ @requires_version(2, 18)
+ def top(self, x: float = 0, y: float = 0, z: float = 0) -> WasteChute:
+ """Add a location offset to a waste chute.
+
+ The default location (``x``, ``y``, and ``z`` all set to ``0``) is the center of
+ the chute's opening on the x- and y-axes, and slightly below its physical top
+ on the z-axis. See :ref:`configure-waste-chute` for more information on possible
+ configurations of the chute.
+
+ Offsets can be positive or negative and are measured in mm.
+ See :ref:`protocol-api-deck-coords`.
+ """
+ return WasteChute(
+ engine_client=self._engine_client,
+ api_version=self._api_version,
+ offset=DisposalOffset(x=x, y=y, z=z),
+ )
+
+ @property
+ def offset(self) -> DisposalOffset:
+ """Current offset of the waste chute.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return self._offset
+
+ @property
+ def location(self) -> DeckSlotName:
+ """Location of the waste chute.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return DeckSlotName.SLOT_D3
+
+ @property
+ def area_name(self) -> str:
+ """Addressable area name of the waste chute.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ # TODO(jbl 2024-02-06) this is hardcoded here and should be removed when a multiple pass analysis exists
+ #
+ # We want to tell Protocol Engine that there's a waste chute in the waste chute location when it's loaded,
+ # so analysis can prevent the user from doing anything that would collide with it. At the same time, we
+ # do not want to create a false negative when it comes to addressable area conflict. We therefore use the
+ # addressable area `1ChannelWasteChute` because every waste chute cutout fixture provides it and it will
+ # provide the engine with the information it needs.
+ return "1ChannelWasteChute"
+
+ @property
+ def height(self) -> float:
+ """Height of the waste chute.
+
+ :meta private:
+
+ This is intended for Opentrons internal use only and is not a guaranteed API.
+ """
+ return self._engine_client.state.addressable_areas.get_fixture_height(
+ _WASTE_CHUTE_CUTOUT_FIXTURE
+ )
diff --git a/api/src/opentrons/protocol_api/instrument_context.py b/api/src/opentrons/protocol_api/instrument_context.py
index 64cef355d7f..68e39888405 100644
--- a/api/src/opentrons/protocol_api/instrument_context.py
+++ b/api/src/opentrons/protocol_api/instrument_context.py
@@ -1,18 +1,19 @@
from __future__ import annotations
import logging
-from contextlib import nullcontext
-from typing import Any, List, Optional, Sequence, Union, cast
+from contextlib import ExitStack
+from typing import Any, List, Optional, Sequence, Union, cast, Dict
from opentrons_shared_data.errors.exceptions import (
CommandPreconditionViolated,
CommandParameterLimitViolated,
+ UnexpectedTipRemovalError,
)
from opentrons.legacy_broker import LegacyBroker
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons import types
-from opentrons.commands import commands as cmds
+from opentrons.legacy_commands import commands as cmds
-from opentrons.commands import publisher
+from opentrons.legacy_commands import publisher
from opentrons.protocols.advanced_control.mix import mix_from_kwargs
from opentrons.protocols.advanced_control import transfers
@@ -26,13 +27,13 @@
requires_version,
APIVersionError,
)
-from opentrons_shared_data.errors.exceptions import UnexpectedTipRemovalError
+from opentrons.hardware_control.nozzle_manager import NozzleConfigurationType
from .core.common import InstrumentCore, ProtocolCore
from .core.engine import ENGINE_CORE_API_VERSION
from .core.legacy.legacy_instrument_core import LegacyInstrumentCore
from .config import Clearances
-from ._waste_chute import WasteChute
+from .disposal_locations import TrashBin, WasteChute
from ._nozzle_layout import NozzleLayout
from . import labware, validation
@@ -55,6 +56,12 @@
"""The version after which the pick-up tip procedure deprecates presses and increment arguments."""
_DROP_TIP_LOCATION_ALTERNATING_ADDED_IN = APIVersion(2, 15)
"""The version after which a drop-tip-into-trash procedure drops tips in different alternating locations within the trash well."""
+_PARTIAL_NOZZLE_CONFIGURATION_ADDED_IN = APIVersion(2, 16)
+"""The version after which a partial nozzle configuration became available for the 96 Channel Pipette."""
+_PARTIAL_NOZZLE_CONFIGURATION_AUTOMATIC_TIP_TRACKING_IN = APIVersion(2, 18)
+"""The version after which automatic tip tracking supported partially configured nozzle layouts."""
+_DISPOSAL_LOCATION_OFFSET_ADDED_IN = APIVersion(2, 18)
+"""The version after which offsets for deck configured trash containers and changes to alternating tip drop behavior were introduced."""
class InstrumentContext(publisher.CommandPublisher):
@@ -86,7 +93,7 @@ def __init__(
broker: LegacyBroker,
api_version: APIVersion,
tip_racks: List[labware.Labware],
- trash: Optional[labware.Labware],
+ trash: Optional[Union[labware.Labware, TrashBin, WasteChute]],
requested_as: str,
) -> None:
super().__init__(broker)
@@ -100,16 +107,17 @@ def __init__(
default_aspirate=_DEFAULT_ASPIRATE_CLEARANCE,
default_dispense=_DEFAULT_DISPENSE_CLEARANCE,
)
-
- self._trash = trash
+ self._user_specified_trash: Union[
+ labware.Labware, TrashBin, WasteChute, None
+ ] = trash
self.requested_as = requested_as
- @property # type: ignore
+ @property
@requires_version(2, 0)
def api_version(self) -> APIVersion:
return self._api_version
- @property # type: ignore
+ @property
@requires_version(2, 0)
def starting_tip(self) -> Union[labware.Well, None]:
"""
@@ -137,7 +145,7 @@ def reset_tipracks(self) -> None:
tiprack.reset()
self.starting_tip = None
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def default_speed(self) -> float:
"""The speed at which the robot's gantry moves in mm/s.
@@ -165,9 +173,15 @@ def aspirate(
See :ref:`new-aspirate` for more details and examples.
- :param volume: The volume to aspirate, measured in µL. If 0 or unspecified,
+ :param volume: The volume to aspirate, measured in µL. If unspecified,
defaults to the maximum volume for the pipette and its currently
attached tip.
+
+ If ``aspirate`` is called with a volume of precisely 0, its behavior
+ depends on the API level of the protocol. On API levels below 2.16,
+ it will behave the same as a volume of ``None``/unspecified: aspirate
+ until the pipette is full. On API levels at or above 2.16, no liquid
+ will be aspirated.
:type volume: int or float
:param location: Tells the robot where to aspirate from. The location can be
a :py:class:`.Well` or a :py:class:`.Location`.
@@ -228,7 +242,10 @@ def aspirate(
well = target.well
if isinstance(target, validation.PointTarget):
move_to_location = target.location
-
+ if isinstance(target, (TrashBin, WasteChute)):
+ raise ValueError(
+ "Trash Bin and Waste Chute are not acceptable location parameters for Aspirate commands."
+ )
if self.api_version >= APIVersion(2, 11):
instrument.validate_takes_liquid(
location=move_to_location,
@@ -236,7 +253,10 @@ def aspirate(
reject_adapter=self.api_version >= APIVersion(2, 15),
)
- c_vol = self._core.get_available_volume() if not volume else volume
+ if self.api_version >= APIVersion(2, 16):
+ c_vol = self._core.get_available_volume() if volume is None else volume
+ else:
+ c_vol = self._core.get_available_volume() if not volume else volume
flow_rate = self._core.get_aspirate_flow_rate(rate)
with publisher.publish_context(
@@ -261,10 +281,12 @@ def aspirate(
return self
@requires_version(2, 0)
- def dispense(
+ def dispense( # noqa: C901
self,
volume: Optional[float] = None,
- location: Optional[Union[types.Location, labware.Well]] = None,
+ location: Optional[
+ Union[types.Location, labware.Well, TrashBin, WasteChute]
+ ] = None,
rate: float = 1.0,
push_out: Optional[float] = None,
) -> InstrumentContext:
@@ -273,9 +295,20 @@ def dispense(
See :ref:`new-dispense` for more details and examples.
- :param volume: The volume to dispense, measured in µL. If 0 or unspecified,
- defaults to :py:attr:`current_volume`. If only a volume is
- passed, the pipette will dispense from its current position.
+ :param volume: The volume to dispense, measured in µL.
+
+ - If unspecified or ``None``, dispense the :py:attr:`current_volume`.
+
+ - If 0, the behavior of ``dispense()`` depends on the API level
+ of the protocol. In API version 2.16 and earlier, dispense all
+ liquid in the pipette (same as unspecified or ``None``). In API
+ version 2.17 and later, dispense no liquid.
+
+ - If greater than :py:obj:`.current_volume`, the behavior of
+ ``dispense()`` depends on the API level of the protocol. In API
+ version 2.16 and earlier, dispense all liquid in the pipette.
+ In API version 2.17 and later, raise an error.
+
:type volume: int or float
:param location: Tells the robot where to dispense liquid held in the pipette.
@@ -307,6 +340,8 @@ def dispense(
:type rate: float
:param push_out: Continue past the plunger bottom to help ensure all liquid
leaves the tip. Measured in µL. The default value is ``None``.
+
+ See :ref:`push-out-dispense` for details.
:type push_out: float
:returns: This instance.
@@ -318,6 +353,11 @@ def dispense(
``location``, specify it as a keyword argument:
``pipette.dispense(location=plate['A1'])``.
+ .. versionchanged:: 2.15
+ Added the ``push_out`` parameter.
+
+ .. versionchanged:: 2.17
+ Behavior of the ``volume`` parameter.
"""
if self.api_version < APIVersion(2, 15) and push_out:
raise APIVersionError(
@@ -356,17 +396,44 @@ def dispense(
if isinstance(target, validation.PointTarget):
move_to_location = target.location
- if self.api_version >= APIVersion(2, 11):
+ if self.api_version >= APIVersion(2, 11) and not isinstance(
+ target, (TrashBin, WasteChute)
+ ):
instrument.validate_takes_liquid(
location=move_to_location,
reject_module=self.api_version >= APIVersion(2, 13),
reject_adapter=self.api_version >= APIVersion(2, 15),
)
- c_vol = self._core.get_current_volume() if not volume else volume
+ if self.api_version >= APIVersion(2, 16):
+ c_vol = self._core.get_current_volume() if volume is None else volume
+ else:
+ c_vol = self._core.get_current_volume() if not volume else volume
flow_rate = self._core.get_dispense_flow_rate(rate)
+ if isinstance(target, (TrashBin, WasteChute)):
+ with publisher.publish_context(
+ broker=self.broker,
+ command=cmds.dispense_in_disposal_location(
+ instrument=self,
+ volume=c_vol,
+ location=target,
+ rate=rate,
+ flow_rate=flow_rate,
+ ),
+ ):
+ self._core.dispense(
+ volume=c_vol,
+ rate=rate,
+ location=target,
+ well_core=None,
+ flow_rate=flow_rate,
+ in_place=False,
+ push_out=push_out,
+ )
+ return self
+
with publisher.publish_context(
broker=self.broker,
command=cmds.dispense(
@@ -403,8 +470,14 @@ def mix(
See :ref:`mix` for examples.
:param repetitions: Number of times to mix (default is 1).
- :param volume: The volume to mix, measured in µL. If 0 or unspecified, defaults
+ :param volume: The volume to mix, measured in µL. If unspecified, defaults
to the maximum volume for the pipette and its attached tip.
+
+ If ``mix`` is called with a volume of precisely 0, its behavior
+ depends on the API level of the protocol. On API levels below 2.16,
+ it will behave the same as a volume of ``None``/unspecified: mix
+ the full working volume of the pipette. On API levels at or above 2.16,
+ no liquid will be mixed.
:param location: The :py:class:`.Well` or :py:class:`~.types.Location` where the
pipette will mix. If unspecified, the pipette will mix at its
current position.
@@ -433,7 +506,14 @@ def mix(
if not self._core.has_tip():
raise UnexpectedTipRemovalError("mix", self.name, self.mount)
- c_vol = self._core.get_available_volume() if not volume else volume
+ if self.api_version >= APIVersion(2, 16):
+ c_vol = self._core.get_available_volume() if volume is None else volume
+ else:
+ c_vol = self._core.get_available_volume() if not volume else volume
+
+ dispense_kwargs: Dict[str, Any] = {}
+ if self.api_version >= APIVersion(2, 16):
+ dispense_kwargs["push_out"] = 0.0
with publisher.publish_context(
broker=self.broker,
@@ -446,7 +526,7 @@ def mix(
):
self.aspirate(volume, location, rate)
while repetitions - 1 > 0:
- self.dispense(volume, rate=rate)
+ self.dispense(volume, rate=rate, **dispense_kwargs)
self.aspirate(volume, rate=rate)
repetitions -= 1
self.dispense(volume, rate=rate)
@@ -455,7 +535,10 @@ def mix(
@requires_version(2, 0)
def blow_out(
- self, location: Optional[Union[types.Location, labware.Well]] = None
+ self,
+ location: Optional[
+ Union[types.Location, labware.Well, TrashBin, WasteChute]
+ ] = None,
) -> InstrumentContext:
"""
Blow an extra amount of air through a pipette's tip to clear it.
@@ -501,6 +584,19 @@ def blow_out(
well = target.well
elif isinstance(target, validation.PointTarget):
move_to_location = target.location
+ elif isinstance(target, (TrashBin, WasteChute)):
+ with publisher.publish_context(
+ broker=self.broker,
+ command=cmds.blow_out_in_disposal_location(
+ instrument=self, location=target
+ ),
+ ):
+ self._core.blow_out(
+ location=target,
+ well_core=None,
+ in_place=False,
+ )
+ return self
with publisher.publish_context(
broker=self.broker,
@@ -677,8 +773,8 @@ def return_tip(self, home_after: Optional[bool] = None) -> InstrumentContext:
return self
- @requires_version(2, 0) # noqa: C901
- def pick_up_tip(
+ @requires_version(2, 0)
+ def pick_up_tip( # noqa: C901
self,
location: Union[types.Location, labware.Well, labware.Labware, None] = None,
presses: Optional[int] = None,
@@ -782,12 +878,48 @@ def pick_up_tip(
well: labware.Well
tip_rack: labware.Labware
move_to_location: Optional[types.Location] = None
+ active_channels = (
+ self.active_channels
+ if self._api_version >= _PARTIAL_NOZZLE_CONFIGURATION_ADDED_IN
+ else self.channels
+ )
+ nozzle_map = (
+ self._core.get_nozzle_map()
+ if self._api_version
+ >= _PARTIAL_NOZZLE_CONFIGURATION_AUTOMATIC_TIP_TRACKING_IN
+ else None
+ )
if location is None:
+ if (
+ nozzle_map is not None
+ and nozzle_map.configuration != NozzleConfigurationType.FULL
+ and self.starting_tip is not None
+ ):
+ # Disallowing this avoids concerning the system with the direction
+ # in which self.starting_tip consumes tips. It would currently vary
+ # depending on the configuration layout of a pipette at a given
+ # time, which means that some combination of starting tip and partial
+ # configuraiton are incompatible under the current understanding of
+ # starting tip behavior. Replacing starting_tip with an undeprecated
+ # Labware.has_tip may solve this.
+ raise CommandPreconditionViolated(
+ "Automatic tip tracking is not available when using a partial pipette"
+ " nozzle configuration and InstrumentContext.starting_tip."
+ " Switch to a full configuration or set starting_tip to None."
+ )
+ if not self._core.is_tip_tracking_available():
+ raise CommandPreconditionViolated(
+ "Automatic tip tracking is not available for the current pipette"
+ " nozzle configuration. We suggest switching to a configuration"
+ " that supports automatic tip tracking or specifying the exact tip"
+ " to pick up."
+ )
tip_rack, well = labware.next_available_tip(
starting_tip=self.starting_tip,
tip_racks=self.tip_racks,
- channels=self.channels,
+ channels=active_channels,
+ nozzle_map=nozzle_map,
)
elif isinstance(location, labware.Well):
@@ -798,7 +930,8 @@ def pick_up_tip(
tip_rack, well = labware.next_available_tip(
starting_tip=None,
tip_racks=[location],
- channels=self.channels,
+ channels=active_channels,
+ nozzle_map=nozzle_map,
)
elif isinstance(location, types.Location):
@@ -813,7 +946,8 @@ def pick_up_tip(
tip_rack, well = labware.next_available_tip(
starting_tip=None,
tip_racks=[maybe_tip_rack],
- channels=self.channels,
+ channels=active_channels,
+ nozzle_map=nozzle_map,
)
else:
raise TypeError(
@@ -862,6 +996,7 @@ def drop_tip(
Union[
types.Location,
labware.Well,
+ TrashBin,
WasteChute,
]
] = None,
@@ -873,7 +1008,7 @@ def drop_tip(
See :ref:`pipette-drop-tip` for examples.
If no location is passed (e.g. ``pipette.drop_tip()``), the pipette will drop
- the attached tip into its default :py:attr:`trash_container`.
+ the attached tip into its :py:attr:`trash_container`.
Starting with API version 2.15, if the trash container is the default fixed
trash, the API will instruct the pipette to drop tips in different locations
@@ -891,6 +1026,12 @@ def drop_tip(
- As a :py:class:`~.types.Location`. For example, to drop a tip from an
unusually large height above the tip rack, you could call
``pipette.drop_tip(tip_rack["A1"].top(z=10))``.
+ - As a :py:class:`.TrashBin`. This uses a default location relative to the
+ ``TrashBin`` object. For example,
+ ``pipette.drop_tip(location=trash_bin)``.
+ - As a :py:class:`.WasteChute`. This uses a default location relative to
+ the ``WasteChute`` object. For example,
+ ``pipette.drop_tip(location=waste_chute)``.
:param location:
The location to drop the tip.
@@ -908,9 +1049,25 @@ def drop_tip(
"""
alternate_drop_location: bool = False
if location is None:
- well = self.trash_container.wells()[0]
+ trash_container = self.trash_container
if self.api_version >= _DROP_TIP_LOCATION_ALTERNATING_ADDED_IN:
alternate_drop_location = True
+ if isinstance(trash_container, labware.Labware):
+ well = trash_container.wells()[0]
+ else: # implicit drop tip in disposal location, not well
+ with publisher.publish_context(
+ broker=self.broker,
+ command=cmds.drop_tip_in_disposal_location(
+ instrument=self, location=trash_container
+ ),
+ ):
+ self._core.drop_tip_in_disposal_location(
+ trash_container,
+ home_after=home_after,
+ alternate_tip_drop=True,
+ )
+ self._last_tip_picked_up_from = None
+ return self
elif isinstance(location, labware.Well):
well = location
@@ -930,9 +1087,24 @@ def drop_tip(
well = maybe_well
- elif isinstance(location, WasteChute):
- # TODO: Publish to run log.
- self._core.drop_tip_in_waste_chute(location, home_after=home_after)
+ elif isinstance(location, (TrashBin, WasteChute)):
+ # In 2.16 and 2.17, we would always automatically use automatic alternate tip drop locations regardless
+ # of whether you explicitly passed the disposal location as a location or if none was provided. Now, in
+ # 2.18 and moving forward, passing it in will bypass the automatic behavior and instead go to the set
+ # offset or the XY center if none is provided.
+ if self.api_version < _DISPOSAL_LOCATION_OFFSET_ADDED_IN:
+ alternate_drop_location = True
+ with publisher.publish_context(
+ broker=self.broker,
+ command=cmds.drop_tip_in_disposal_location(
+ instrument=self, location=location
+ ),
+ ):
+ self._core.drop_tip_in_disposal_location(
+ location,
+ home_after=home_after,
+ alternate_tip_drop=alternate_drop_location,
+ )
self._last_tip_picked_up_from = None
return self
@@ -985,6 +1157,7 @@ def home_plunger(self) -> InstrumentContext:
self._core.home_plunger()
return self
+ # TODO (spp, 2024-03-08): verify if ok to & change source & dest types to AdvancedLiquidHandling
@publisher.publish(command=cmds.distribute)
@requires_version(2, 0)
def distribute(
@@ -1024,6 +1197,7 @@ def distribute(
return self.transfer(volume, source, dest, **kwargs)
+ # TODO (spp, 2024-03-08): verify if ok to & change source & dest types to AdvancedLiquidHandling
@publisher.publish(command=cmds.consolidate)
@requires_version(2, 0)
def consolidate(
@@ -1056,9 +1230,9 @@ def consolidate(
return self.transfer(volume, source, dest, **kwargs)
- @publisher.publish(command=cmds.transfer) # noqa: C901
+ @publisher.publish(command=cmds.transfer)
@requires_version(2, 0)
- def transfer(
+ def transfer( # noqa: C901
self,
volume: Union[float, Sequence[float]],
source: AdvancedLiquidHandling,
@@ -1183,6 +1357,17 @@ def transfer(
blow_out = kwargs.get("blow_out")
blow_out_strategy = None
+ active_channels = (
+ self.active_channels
+ if self._api_version >= _PARTIAL_NOZZLE_CONFIGURATION_ADDED_IN
+ else self.channels
+ )
+ nozzle_map = (
+ self._core.get_nozzle_map()
+ if self._api_version
+ >= _PARTIAL_NOZZLE_CONFIGURATION_AUTOMATIC_TIP_TRACKING_IN
+ else None
+ )
if blow_out and not blowout_location:
if self.current_volume:
@@ -1199,7 +1384,10 @@ def transfer(
if new_tip != types.TransferTipPolicy.NEVER:
tr, next_tip = labware.next_available_tip(
- self.starting_tip, self.tip_racks, self.channels
+ self.starting_tip,
+ self.tip_racks,
+ active_channels,
+ nozzle_map=nozzle_map,
)
max_volume = min(next_tip.max_volume, self.max_volume)
else:
@@ -1283,7 +1471,7 @@ def delay(self, *args: Any, **kwargs: Any) -> None:
@requires_version(2, 0)
def move_to(
self,
- location: types.Location,
+ location: Union[types.Location, TrashBin, WasteChute],
force_direct: bool = False,
minimum_z_height: Optional[float] = None,
speed: Optional[float] = None,
@@ -1312,27 +1500,53 @@ def move_to(
:param publish: Whether to list this function call in the run preview.
Default is ``True``.
"""
- publish_ctx = nullcontext()
-
- if publish:
- publish_ctx = publisher.publish_context(
- broker=self.broker,
- command=cmds.move_to(instrument=self, location=location),
- )
- with publish_ctx:
- _, well = location.labware.get_parent_labware_and_well()
-
- self._core.move_to(
- location=location,
- well_core=well._core if well is not None else None,
- force_direct=force_direct,
- minimum_z_height=minimum_z_height,
- speed=speed,
- )
+ with ExitStack() as contexts:
+ if isinstance(location, (TrashBin, WasteChute)):
+ if publish:
+ contexts.enter_context(
+ publisher.publish_context(
+ broker=self.broker,
+ command=cmds.move_to_disposal_location(
+ instrument=self, location=location
+ ),
+ )
+ )
+
+ self._core.move_to(
+ location=location,
+ well_core=None,
+ force_direct=force_direct,
+ minimum_z_height=minimum_z_height,
+ speed=speed,
+ )
+ else:
+ if publish:
+ contexts.enter_context(
+ publisher.publish_context(
+ broker=self.broker,
+ command=cmds.move_to(instrument=self, location=location),
+ )
+ )
+
+ _, well = location.labware.get_parent_labware_and_well()
+
+ self._core.move_to(
+ location=location,
+ well_core=well._core if well is not None else None,
+ force_direct=force_direct,
+ minimum_z_height=minimum_z_height,
+ speed=speed,
+ )
return self
- @property # type: ignore
+ @requires_version(2, 18)
+ def _retract(
+ self,
+ ) -> None:
+ self._core.retract()
+
+ @property
@requires_version(2, 0)
def mount(self) -> str:
"""
@@ -1342,7 +1556,7 @@ def mount(self) -> str:
"""
return self._core.get_mount().name.lower()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def speed(self) -> "PlungerSpeeds":
"""The speeds (in mm/s) configured for the pipette plunger.
@@ -1370,7 +1584,7 @@ def speed(self) -> "PlungerSpeeds":
assert isinstance(self._core, LegacyInstrumentCore)
return cast(LegacyInstrumentCore, self._core).get_speed()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def flow_rate(self) -> "FlowRates":
"""The speeds, in µL/s, configured for the pipette.
@@ -1387,19 +1601,20 @@ def flow_rate(self) -> "FlowRates":
"""
return self._core.get_flow_rate()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def type(self) -> str:
- """One of ``'single'`` or ``'multi'``."""
- model = self.name
- if "single" in model:
+ """``'single'`` if this is a 1-channel pipette, or ``'multi'`` otherwise.
+
+ See also :py:obj:`.channels`, which can distinguish between 8-channel and 96-channel
+ pipettes.
+ """
+ if self.channels == 1:
return "single"
- elif "multi" in model:
- return "multi"
else:
- raise RuntimeError("Bad pipette name: {}".format(model))
+ return "multi"
- @property # type: ignore
+ @property
@requires_version(2, 0)
def tip_racks(self) -> List[labware.Labware]:
"""
@@ -1414,27 +1629,42 @@ def tip_racks(self) -> List[labware.Labware]:
def tip_racks(self, racks: List[labware.Labware]) -> None:
self._tip_racks = racks
- @property # type: ignore
+ @property
@requires_version(2, 0)
- def trash_container(self) -> labware.Labware:
+ def trash_container(self) -> Union[labware.Labware, TrashBin, WasteChute]:
"""The trash container associated with this pipette.
This is the property used to determine where to drop tips and blow out liquids
when calling :py:meth:`drop_tip` or :py:meth:`blow_out` without arguments.
- By default, the trash container is in slot A3 on Flex and in slot 12 on OT-2.
+ You can set this to a :py:obj:`Labware`, :py:class:`.TrashBin`, or :py:class:`.WasteChute`.
+
+ The default value depends on the robot type and API version:
+
+ - :py:obj:`ProtocolContext.fixed_trash`, if it exists.
+ - Otherwise, the first item previously loaded with
+ :py:obj:`ProtocolContext.load_trash_bin()` or
+ :py:obj:`ProtocolContext.load_waste_chute()`.
+
+ .. versionchanged:: 2.16
+ Added support for ``TrashBin`` and ``WasteChute`` objects.
"""
- if self._trash is None:
- raise NoTrashDefinedError(
- "No trash container has been defined in this protocol."
- )
- return self._trash
+ if self._user_specified_trash is None:
+ disposal_locations = self._protocol_core.get_disposal_locations()
+ if len(disposal_locations) == 0:
+ raise NoTrashDefinedError(
+ "No trash container has been defined in this protocol."
+ )
+ return disposal_locations[0]
+ return self._user_specified_trash
@trash_container.setter
- def trash_container(self, trash: labware.Labware) -> None:
- self._trash = trash
+ def trash_container(
+ self, trash: Union[labware.Labware, TrashBin, WasteChute]
+ ) -> None:
+ self._user_specified_trash = trash
- @property # type: ignore
+ @property
@requires_version(2, 0)
def name(self) -> str:
"""
@@ -1442,7 +1672,7 @@ def name(self) -> str:
"""
return self._core.get_pipette_name()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def model(self) -> str:
"""
@@ -1450,7 +1680,7 @@ def model(self) -> str:
"""
return self._core.get_model()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def min_volume(self) -> float:
"""
@@ -1460,7 +1690,7 @@ def min_volume(self) -> float:
"""
return self._core.get_min_volume()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def max_volume(self) -> float:
"""
@@ -1473,7 +1703,7 @@ def max_volume(self) -> float:
"""
return self._core.get_max_volume()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def current_volume(self) -> float:
"""
@@ -1481,7 +1711,7 @@ def current_volume(self) -> float:
"""
return self._core.get_current_volume()
- @property # type: ignore
+ @property
@requires_version(2, 7)
def has_tip(self) -> bool:
"""Whether this instrument has a tip attached or not.
@@ -1500,7 +1730,7 @@ def _has_tip(self) -> bool:
"""
return self._core.has_tip()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def hw_pipette(self) -> PipetteDict:
"""View the information returned by the hardware API directly.
@@ -1510,15 +1740,28 @@ def hw_pipette(self) -> PipetteDict:
"""
return self._core.get_hardware_state()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def channels(self) -> int:
"""The number of channels on the pipette.
- Possible values are 1, 8, or 96."""
+ Possible values are 1, 8, or 96.
+
+ See also :py:obj:`.type`.
+ """
return self._core.get_channels()
- @property # type: ignore
+ @property
+ @requires_version(2, 16)
+ def active_channels(self) -> int:
+ """The number of channels the pipette will use to pick up tips.
+
+ By default, all channels on the pipette. Use :py:meth:`.configure_nozzle_layout`
+ to set the pipette to use fewer channels.
+ """
+ return self._core.get_active_channels()
+
+ @property
@requires_version(2, 2)
def return_height(self) -> float:
"""The height to return a tip to its tip rack.
@@ -1529,7 +1772,7 @@ def return_height(self) -> float:
"""
return self._core.get_return_height()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def well_bottom_clearance(self) -> "Clearances":
"""The distance above the bottom of a well to aspirate or dispense.
@@ -1660,50 +1903,73 @@ def prepare_to_aspirate(self) -> None:
)
self._core.prepare_to_aspirate()
+ @requires_version(2, 16)
def configure_nozzle_layout(
self,
style: NozzleLayout,
start: Optional[str] = None,
front_right: Optional[str] = None,
+ tip_racks: Optional[List[labware.Labware]] = None,
) -> None:
- """Configure a pipette to pick up less than the maximum tip capacity. The pipette
- will remain in its partial state until this function is called again without any inputs. All subsequent
- pipetting calls will execute with the new nozzle layout meaning that the pipette will perform
- robot moves in the set nozzle layout.
-
- :param style: The requested nozzle layout should specify the shape that you
- wish to configure your pipette to. Certain pipettes are restricted to a subset of `NozzleLayout`
- types. See the note below on the different `NozzleLayout` types.
- :type requested_nozzle_layout: `NozzleLayout.COLUMN`, `NozzleLayout.EMPTY` or None.
- :param start: Signifies the nozzle that the robot will use to determine how to perform moves to different locations on the deck.
- :type start: string or None.
- :param front_right: Signifies the ending nozzle in your partial configuration. It is not required for NozzleLayout.COLUMN, NozzleLayout.ROW, or NozzleLayout.SINGLE
- configurations.
- :type front_right: string or None.
-
- .. note::
- Your `start` and `front_right` strings should be formatted similarly to a well, so in the format of .
- The pipette nozzles are mapped in the same format as a 96 well standard plate starting from the back left-most nozzle
- to the front right-most nozzle.
+ """Configure how many tips the 96-channel pipette will pick up.
- .. code-block:: python
+ Changing the nozzle layout will affect gantry movement for all subsequent
+ pipetting actions that the pipette performs. It also alters the pipette's
+ behavior for picking up tips. The pipette will continue to use the specified
+ layout until this function is called again.
- from opentrons.protocol_api import COLUMN, EMPTY
-
- # Sets a pipette to a full single column pickup using "A1" as the primary nozzle. Implicitly, "H1" is the ending nozzle.
- instr.configure_nozzle_layout(style=COLUMN, start="A1")
-
- # Resets the pipette configuration to default
- instr.configure_nozzle_layout(style=EMPTY)
+ .. note::
+ When picking up fewer than 96 tips at once, the tip rack *must not* be
+ placed in a tip rack adapter in the deck. If you try to pick up fewer than 96
+ tips from a tip rack that is in an adapter, the API will raise an error.
+
+ :param style: The shape of the nozzle layout.
+
+ - ``COLUMN`` sets the pipette to use 8 nozzles, aligned from front to back
+ with respect to the deck. This corresponds to a column of wells on labware.
+ - ``ALL`` resets the pipette to use all of its nozzles. Calling
+ ``configure_nozzle_layout`` with no arguments also resets the pipette.
+
+ :type style: ``NozzleLayout`` or ``None``
+ :param start: The nozzle at the back left of the layout, which the robot uses
+ to determine how it will move to different locations on the deck. The string
+ should be of the same format used when identifying wells by name.
+ Required unless setting ``style=ALL``.
+
+ :type start: str or ``None``
+ :param tip_racks: Behaves the same as setting the ``tip_racks`` parameter of
+ :py:meth:`.load_instrument`. If not specified, the new configuration resets
+ :py:obj:`.InstrumentContext.tip_racks` and you must specify the location
+ every time you call :py:meth:`~.InstrumentContext.pick_up_tip`.
+ :type tip_racks: List[:py:class:`.Labware`]
"""
- if style != NozzleLayout.EMPTY:
+ # TODO: add the following back into the docstring when QUADRANT is supported
+ #
+ # :param front_right: The nozzle at the front left of the layout. Only used for
+ # NozzleLayout.QUADRANT configurations.
+ # :type front_right: str or ``None``
+ #
+ # NOTE: Disabled layouts error case can be removed once desired map configurations
+ # have appropriate data regarding tip-type to map current values added to the
+ # pipette definitions.
+ disabled_layouts = [
+ NozzleLayout.ROW,
+ NozzleLayout.SINGLE,
+ NozzleLayout.QUADRANT,
+ ]
+ if style in disabled_layouts:
+ raise ValueError(
+ f"Nozzle layout configuration of style {style.value} is currently unsupported."
+ )
+
+ if style != NozzleLayout.ALL:
if start is None:
raise ValueError(
f"Cannot configure a nozzle layout of style {style.value} without a starting nozzle."
)
if start not in types.ALLOWED_PRIMARY_NOZZLES:
raise ValueError(
- f"Starting nozzle specified is not of {types.ALLOWED_PRIMARY_NOZZLES}"
+ f"Starting nozzle specified is not one of {types.ALLOWED_PRIMARY_NOZZLES}"
)
if style == NozzleLayout.QUADRANT:
if front_right is None:
@@ -1711,5 +1977,9 @@ def configure_nozzle_layout(
"Cannot configure a QUADRANT layout without a front right nozzle."
)
self._core.configure_nozzle_layout(
- style, primary_nozzle=start, front_right_nozzle=front_right
+ style,
+ primary_nozzle=start,
+ front_right_nozzle=front_right,
)
+ # TODO (spp, 2023-12-05): verify that tipracks are on adapters for only full 96 channel config
+ self._tip_racks = tip_racks or []
diff --git a/api/src/opentrons/protocol_api/labware.py b/api/src/opentrons/protocol_api/labware.py
index d3297e66253..3b7ae943208 100644
--- a/api/src/opentrons/protocol_api/labware.py
+++ b/api/src/opentrons/protocol_api/labware.py
@@ -7,6 +7,7 @@
transform from labware symbolic points (such as "well a1 of an opentrons
tiprack") to points in deck coordinates.
"""
+
from __future__ import annotations
import logging
@@ -19,6 +20,7 @@
from opentrons.types import Location, Point
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols.api_support.util import requires_version, APIVersionError
+from opentrons.hardware_control.nozzle_manager import NozzleMap
# TODO(mc, 2022-09-02): re-exports provided for backwards compatibility
# remove when their usage is no longer needed
@@ -33,7 +35,7 @@
from ._liquid import Liquid
from ._types import OffDeckType
from .core import well_grid
-from .core.engine import ENGINE_CORE_API_VERSION
+from .core.engine import ENGINE_CORE_API_VERSION, SET_OFFSET_RESTORED_API_VERSION
from .core.labware import AbstractLabware
from .core.module import AbstractModuleCore
from .core.core_map import LoadedCoreMap
@@ -72,7 +74,7 @@ class Well:
- Calculating positions relative to the well. See :ref:`position-relative-labware` for details.
- - Returning well measurements. see :ref:`new-labware-well-properties` for details.
+ - Returning well measurements. See :ref:`new-labware-well-properties` for details.
- Specifying what liquid should be in the well at the beginning of a protocol. See :ref:`labeling-liquids` for details.
"""
@@ -85,17 +87,18 @@ def __init__(self, parent: Labware, core: WellCore, api_version: APIVersion):
self._core = core
self._api_version = api_version
- @property # type: ignore
+ @property
@requires_version(2, 0)
def api_version(self) -> APIVersion:
return self._api_version
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def parent(self) -> Labware:
+ """The :py:class:`.Labware` object that the well is a part of."""
return self._parent
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def has_tip(self) -> bool:
"""Whether this well contains a tip. Always ``False`` if the parent labware
@@ -113,6 +116,10 @@ def has_tip(self, value: bool) -> None:
@property
def max_volume(self) -> float:
+ """The maximum volume, in µL, that the well can hold.
+
+ This amount is set by the JSON labware definition, specifically the ``totalLiquidVolume`` property of the particular well.
+ """
return self._core.get_max_volume()
@property
@@ -121,7 +128,7 @@ def geometry(self) -> WellGeometry:
return self._core.geometry
raise APIVersionError("Well.geometry has been deprecated.")
- @property # type: ignore
+ @property
@requires_version(2, 0)
def diameter(self) -> Optional[float]:
"""
@@ -130,7 +137,7 @@ def diameter(self) -> Optional[float]:
"""
return self._core.diameter
- @property # type: ignore
+ @property
@requires_version(2, 9)
def length(self) -> Optional[float]:
"""
@@ -139,7 +146,7 @@ def length(self) -> Optional[float]:
"""
return self._core.length
- @property # type: ignore
+ @property
@requires_version(2, 9)
def width(self) -> Optional[float]:
"""
@@ -148,7 +155,7 @@ def width(self) -> Optional[float]:
"""
return self._core.width
- @property # type: ignore
+ @property
@requires_version(2, 9)
def depth(self) -> float:
"""
@@ -159,11 +166,24 @@ def depth(self) -> float:
@property
def display_name(self) -> str:
+ """A human-readable name for the well, including labware and deck location.
+
+ For example, "A1 of Corning 96 Well Plate 360 µL Flat on slot D1". Run log
+ entries use this format for identifying wells. See
+ :py:meth:`.ProtocolContext.commands`.
+ """
return self._core.get_display_name()
- @property # type: ignore
+ @property
@requires_version(2, 7)
def well_name(self) -> str:
+ """A string representing the well's coordinates.
+
+ For example, "A1" or "H12".
+
+ The format of strings that this property returns is the same format as the key
+ for :ref:`accessing wells in a dictionary `.
+ """
return self._core.get_name()
@requires_version(2, 0)
@@ -278,29 +298,22 @@ def __hash__(self) -> int:
class Labware:
"""
- This class represents a labware, such as a PCR plate, a tube rack,
- reservoir, tip rack, etc. It defines the physical geometry of the labware,
- and provides methods for accessing wells within the labware.
-
- It is commonly created by calling ``ProtocolContext.load_labware()``.
-
- To access a labware's wells, you can use its well accessor methods:
- :py:meth:`wells_by_name`, :py:meth:`wells`, :py:meth:`columns`,
- :py:meth:`rows`, :py:meth:`rows_by_name`, and :py:meth:`columns_by_name`.
- You can also use an instance of a labware as a Python dictionary, accessing
- wells by their names. The following example shows how to use all of these
- methods to access well A1:
-
- .. code-block :: python
-
- labware = context.load_labware('corning_96_wellplate_360ul_flat', 1)
- labware['A1']
- labware.wells_by_name()['A1']
- labware.wells()[0]
- labware.rows()[0][0]
- labware.columns()[0][0]
- labware.rows_by_name()['A'][0]
- labware.columns_by_name()[0][0]
+ This class represents a piece of labware.
+
+ Labware available in the API generally fall under two categories.
+
+ - Consumable labware: well plates, tubes in racks, reservoirs, tip racks, etc.
+ - Adapters: durable items that hold other labware, either on modules or directly
+ on the deck.
+
+ The ``Labware`` class defines the physical geometry of the labware
+ and provides methods for :ref:`accessing wells ` within the labware.
+
+ Create ``Labware`` objects by calling the appropriate ``load_labware()`` method,
+ depending on where you are loading the labware. For example, to load labware on a
+ Thermocycler Module, use :py:meth:`.ThermocyclerContext.load_labware`. To load
+ labware directly on the deck, use :py:meth:`.ProtocolContext.load_labware`. See
+ :ref:`loading-labware`.
"""
@@ -349,44 +362,48 @@ def separate_calibration(self) -> bool:
)
return False
- @property # type: ignore
+ @property
@requires_version(2, 0)
def api_version(self) -> APIVersion:
+ """See :py:obj:`.ProtocolContext.api_version`."""
return self._api_version
def __getitem__(self, key: str) -> Well:
return self.wells_by_name()[key]
- @property # type: ignore
+ @property
@requires_version(2, 0)
def uri(self) -> str:
"""A string fully identifying the labware.
- :returns: The uri, ``"namespace/loadname/version"``
+ The URI has three parts and follows the pattern ``"namespace/load_name/version"``.
+ For example, ``opentrons/corning_96_wellplate_360ul_flat/2``.
"""
return self._core.get_uri()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def parent(self) -> Union[str, Labware, ModuleTypes, OffDeckType]:
- """The parent of this labware---where this labware is loaded.
+ """Where the labware is loaded.
+
+ This corresponds to the physical object that the labware *directly* rests upon.
Returns:
- If the labware is directly on the robot's deck, the `str` name of the deck slot,
+ If the labware is directly on the robot's deck, the ``str`` name of the deck slot,
like ``"D1"`` (Flex) or ``"1"`` (OT-2). See :ref:`deck-slots`.
- If the labware is on a module, a :py:class:`ModuleContext`.
+ If the labware is on a module, a module context.
If the labware is on a labware or adapter, a :py:class:`Labware`.
If the labware is off-deck, :py:obj:`OFF_DECK`.
.. versionchanged:: 2.14
- Return type for module parent changed to :py:class:`ModuleContext`.
- Prior to this version, an internal geometry interface is returned.
+ Return type for module parent changed.
+ Formerly, the API returned an internal geometry interface.
.. versionchanged:: 2.15
- Will return a :py:class:`Labware` if the labware was loaded onto a labware/adapter.
- Will now return :py:obj:`OFF_DECK` if the labware is off-deck.
+ Returns a :py:class:`Labware` if the labware is loaded onto a labware/adapter.
+ Returns :py:obj:`OFF_DECK` if the labware is off-deck.
Formerly, if the labware was removed by using ``del`` on :py:obj:`.deck`,
this would return where it was before its removal.
"""
@@ -403,11 +420,16 @@ def parent(self) -> Union[str, Labware, ModuleTypes, OffDeckType]:
return labware_location
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def name(self) -> str:
- """Can either be the canonical name of the labware, which is used to
- load it, or the label of the labware specified by a user."""
+ """The display name of the labware.
+
+ If you specified a value for ``label`` when loading the labware, ``name`` is
+ that value.
+
+ Otherwise, it is the :py:obj:`~.Labware.load_name` of the labware.
+ """
return self._core.get_name()
@name.setter
@@ -425,19 +447,19 @@ def name(self, new_name: str) -> None:
assert isinstance(self._core, LegacyLabwareCore)
cast(LegacyLabwareCore, self._core).set_name(new_name)
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def load_name(self) -> str:
- """The API load name of the labware definition"""
+ """The API load name of the labware definition."""
return self._core.load_name
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def parameters(self) -> "LabwareParameters":
- """Internal properties of a labware including type and quirks"""
+ """Internal properties of a labware including type and quirks."""
return self._core.get_parameters()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def quirks(self) -> List[str]:
"""Quirks specific to this labware."""
@@ -446,7 +468,7 @@ def quirks(self) -> List[str]:
# TODO(mm, 2023-02-08):
# Specify units and origin after we resolve RSS-110.
# Remove warning once we resolve RSS-109 more broadly.
- @property # type: ignore
+ @property
@requires_version(2, 0)
def magdeck_engage_height(self) -> Optional[float]:
"""Return the default magnet engage height that
@@ -471,7 +493,7 @@ def magdeck_engage_height(self) -> Optional[float]:
else:
return p["magneticModuleEngageHeight"]
- @property # type: ignore[misc]
+ @property
@requires_version(2, 15)
def child(self) -> Optional[Labware]:
"""The labware (if any) present on this labware."""
@@ -518,13 +540,13 @@ def load_labware(
def load_labware_from_definition(
self, definition: LabwareDefinition, label: Optional[str] = None
) -> Labware:
- """Load a labware onto the module using an inline definition.
+ """Load a compatible labware onto the labware using an inline definition.
:param definition: The labware definition.
- :param str label: An optional special name to give the labware. If
- specified, this is the name the labware will appear
- as in the run log and the calibration view in the
- Opentrons App.
+ :param str label: An optional special name to give the labware. If specified,
+ this is how the labware will appear in the run log, Labware Position
+ Check, and elsewhere in the Opentrons App and on the touchscreen.
+
:returns: The initialized and loaded labware object.
"""
load_params = self._protocol_core.add_labware_definition(definition)
@@ -538,7 +560,7 @@ def load_labware_from_definition(
def set_calibration(self, delta: Point) -> None:
"""
- An internal, deprecated method used for updating the offset on the object.
+ An internal, deprecated method used for updating the labware offset.
.. deprecated:: 2.14
"""
@@ -558,45 +580,45 @@ def set_offset(self, x: float, y: float, z: float) -> None:
(see :ref:`protocol-api-deck-coords`) that the motion system
will add to any movement targeting this labware instance.
- The offset will *not* apply to any other labware instances,
+ The offset *will not apply* to any other labware instances,
even if those labware are of the same type.
- .. caution::
- This method is *only* for use with mechanisms like
- :obj:`opentrons.execute.get_protocol_api`, which lack an interactive way
- to adjust labware offsets. (See :ref:`advanced-control`.)
+ This method is *only* for use with mechanisms like
+ :obj:`opentrons.execute.get_protocol_api`, which lack an interactive way
+ to adjust labware offsets. (See :ref:`advanced-control`.)
+
+ .. warning::
If you're uploading a protocol via the Opentrons App, don't use this method,
because it will produce undefined behavior.
- Instead, use Labware Position Check in the app.
+ Instead, use Labware Position Check in the app or on the touchscreen.
- Because protocols using :ref:`API version ` 2.14 or higher
- can currently *only* be uploaded via the Opentrons App, it doesn't make
- sense to use this method with them. Trying to do so will raise an exception.
"""
- if self._api_version >= ENGINE_CORE_API_VERSION:
- # TODO(mm, 2023-02-13): See Jira RCORE-535.
- #
- # Until that issue is resolved, the only way to simulate or run a
- # >=ENGINE_CORE_API_VERSION protocol is through the Opentrons App.
- # Therefore, in >=ENGINE_CORE_API_VERSION protocols,
- # there's no legitimate way to use this method.
+ if (
+ self._api_version >= ENGINE_CORE_API_VERSION
+ and self._api_version < SET_OFFSET_RESTORED_API_VERSION
+ ):
raise APIVersionError(
- "Labware.set_offset() is not supported when apiLevel is 2.14 or higher."
- " Use a lower apiLevel"
+ "Labware.set_offset() is not supported when apiLevel is 2.14, 2.15, 2.16, or 2.17."
+ " Use apilevel 2.13 or below, or 2.18 or above to set offset,"
" or use the Opentrons App's Labware Position Check."
)
else:
self._core.set_calibration(Point(x=x, y=y, z=z))
- @property # type: ignore
+ @property
@requires_version(2, 0)
def calibrated_offset(self) -> Point:
+ """The front-left-bottom corner of the labware, including its labware offset.
+
+ When running a protocol in the Opentrons App or on the touchscreen, Labware
+ Position Check sets the labware offset.
+ """
return self._core.get_calibrated_offset()
@requires_version(2, 0)
def well(self, idx: Union[int, str]) -> Well:
- """Deprecated---use result of `wells` or `wells_by_name`"""
+ """Deprecated. Use result of :py:meth:`wells` or :py:meth:`wells_by_name`."""
if isinstance(idx, int):
return self.wells()[idx]
elif isinstance(idx, str):
@@ -609,20 +631,21 @@ def well(self, idx: Union[int, str]) -> Well:
@requires_version(2, 0)
def wells(self, *args: Union[str, int]) -> List[Well]:
"""
- Accessor function used to generate a list of wells in top -> down,
- left -> right order. This is representative of moving down `rows` and
- across `columns` (e.g. 'A1', 'B1', 'C1'...'A2', 'B2', 'C2')
+ Accessor function to navigate a labware top to bottom, left to right.
+
+ i.e., this method returns a list ordered A1, B1, C1…A2, B2, C2….
+
+ Use indexing to access individual wells contained in the list.
+ For example, access well A1 with ``labware.wells()[0]``.
- With indexing one can treat it as a typical python
- list. To access well A1, for example, write: labware.wells()[0]
+ .. note::
+ Using args with this method is deprecated. Use indexing instead.
- Note that this method takes args for backward-compatibility, but use
- of args is deprecated and will be removed in future versions. Args
- can be either strings or integers, but must all be the same type (e.g.:
- `self.wells(1, 4, 8)` or `self.wells('A1', 'B2')`, but
- `self.wells('A1', 4)` is invalid.
+ If your code uses args, they can be either strings or integers, but not a
+ mix of the two. For example, ``.wells(1, 4)`` or ``.wells("1", "4")`` is
+ valid, but ``.wells("1", 4)`` is not.
- :return: Ordered list of all wells in a labware
+ :return: Ordered list of all wells in a labware.
"""
if not args:
return list(self._wells_by_name.values())
@@ -644,13 +667,12 @@ def wells(self, *args: Union[str, int]) -> List[Well]:
@requires_version(2, 0)
def wells_by_name(self) -> Dict[str, Well]:
"""
- Accessor function used to create a look-up table of Wells by name.
+ Accessor function used to navigate through a labware by well name.
- With indexing one can treat it as a typical python
- dictionary whose keys are well names. To access well A1, for example,
- write: labware.wells_by_name()['A1']
+ Use indexing to access individual wells contained in the dictionary.
+ For example, access well A1 with ``labware.wells_by_name()["A1"]``.
- :return: Dictionary of well objects keyed by well name
+ :return: Dictionary of :py:class:`.Well` objects keyed by well name.
"""
return dict(self._wells_by_name)
@@ -668,19 +690,20 @@ def wells_by_index(self) -> Dict[str, Well]:
@requires_version(2, 0)
def rows(self, *args: Union[int, str]) -> List[List[Well]]:
"""
- Accessor function used to navigate through a labware by row.
+ Accessor function to navigate through a labware by row.
- With indexing one can treat it as a typical python nested list.
- To access row A for example, write: labware.rows()[0]. This
- will output ['A1', 'A2', 'A3', 'A4'...]
+ Use indexing to access individual rows or wells contained in the nested list.
+ On a standard 96-well plate, this will output a list of :py:class:`.Well`
+ objects containing A1 through A12.
- Note that this method takes args for backward-compatibility, but use
- of args is deprecated and will be removed in future versions. Args
- can be either strings or integers, but must all be the same type (e.g.:
- `self.rows(1, 4, 8)` or `self.rows('A', 'B')`, but `self.rows('A', 4)`
- is invalid.
+ .. note::
+ Using args with this method is deprecated. Use indexing instead.
- :return: A list of row lists
+ If your code uses args, they can be either strings or integers, but not a
+ mix of the two. For example, ``.rows(1, 4)`` or ``.rows("1", "4")`` is
+ valid, but ``.rows("1", 4)`` is not.
+
+ :return: A list of row lists.
"""
if not args:
return [
@@ -705,13 +728,14 @@ def rows(self, *args: Union[int, str]) -> List[List[Well]]:
@requires_version(2, 0)
def rows_by_name(self) -> Dict[str, List[Well]]:
"""
- Accessor function used to navigate through a labware by row name.
+ Accessor function to navigate through a labware by row name.
- With indexing one can treat it as a typical python dictionary.
- To access row A for example, write: labware.rows_by_name()['A']
- This will output ['A1', 'A2', 'A3', 'A4'...].
+ Use indexing to access individual rows or wells contained in the dictionary.
+ For example, access row A with ``labware.rows_by_name()["A"]``.
+ On a standard 96-well plate, this will output a list of :py:class:`.Well`
+ objects containing A1 through A12.
- :return: Dictionary of Well lists keyed by row name
+ :return: Dictionary of :py:class:`.Well` lists keyed by row name.
"""
return {
row_name: [self._wells_by_name[well_name] for well_name in row]
@@ -730,20 +754,21 @@ def rows_by_index(self) -> Dict[str, List[Well]]:
@requires_version(2, 0)
def columns(self, *args: Union[int, str]) -> List[List[Well]]:
"""
- Accessor function used to navigate through a labware by column.
+ Accessor function to navigate through a labware by column.
+
+ Use indexing to access individual columns or wells contained in the nested list.
+ For example, access column 1 with ``labware.columns()[0]``.
+ On a standard 96-well plate, this will output a list of :py:class:`.Well`
+ objects containing A1 through H1.
- With indexing one can treat it as a typical python nested list.
- To access row A for example,
- write: labware.columns()[0]
- This will output ['A1', 'B1', 'C1', 'D1'...].
+ .. note::
+ Using args with this method is deprecated. Use indexing instead.
- Note that this method takes args for backward-compatibility, but use
- of args is deprecated and will be removed in future versions. Args
- can be either strings or integers, but must all be the same type (e.g.:
- `self.columns(1, 4, 8)` or `self.columns('1', '2')`, but
- `self.columns('1', 4)` is invalid.
+ If your code uses args, they can be either strings or integers, but not a
+ mix of the two. For example, ``.columns(1, 4)`` or ``.columns("1", "4")`` is
+ valid, but ``.columns("1", 4)`` is not.
- :return: A list of column lists
+ :return: A list of column lists.
"""
if not args:
return [
@@ -768,14 +793,14 @@ def columns(self, *args: Union[int, str]) -> List[List[Well]]:
@requires_version(2, 0)
def columns_by_name(self) -> Dict[str, List[Well]]:
"""
- Accessor function used to navigate through a labware by column name.
+ Accessor function to navigate through a labware by column name.
- With indexing one can treat it as a typical python dictionary.
- To access row A for example,
- write: labware.columns_by_name()['1']
- This will output ['A1', 'B1', 'C1', 'D1'...].
+ Use indexing to access individual columns or wells contained in the dictionary.
+ For example, access column 1 with ``labware.columns_by_name()["1"]``.
+ On a standard 96-well plate, this will output a list of :py:class:`.Well`
+ objects containing A1 through H1.
- :return: Dictionary of Well lists keyed by column name
+ :return: Dictionary of :py:class:`.Well` lists keyed by column name.
"""
return {
column_name: [self._wells_by_name[well_name] for well_name in column]
@@ -791,14 +816,14 @@ def columns_by_index(self) -> Dict[str, List[Well]]:
_log.warning("columns_by_index is deprecated. Use columns_by_name instead.")
return self.columns_by_name()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def highest_z(self) -> float:
"""
- The z-coordinate of the tallest single point anywhere on the labware.
+ The z-coordinate of the highest single point anywhere on the labware.
- This is drawn from the 'dimensions'/'zDimension' elements of the
- labware definition and takes into account the calibration offset.
+ This is taken from the ``zDimension`` property of the ``dimensions`` object in the
+ labware definition and takes into account the labware offset.
"""
return self._core.highest_z
@@ -807,19 +832,34 @@ def _is_tiprack(self) -> bool:
"""as is_tiprack but not subject to version checking for speed"""
return self._core.is_tip_rack()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def is_tiprack(self) -> bool:
+ """Whether the labware behaves as a tip rack.
+
+ Returns ``True`` if the labware definition specifies ``isTiprack`` as ``True``.
+ """
return self._is_tiprack
- @property # type: ignore[misc]
+ @property
@requires_version(2, 15)
def is_adapter(self) -> bool:
+ """Whether the labware behaves as an adapter.
+
+ Returns ``True`` if the labware definition specifies ``adapter`` as one of the
+ labware's ``allowedRoles``.
+ """
return self._core.is_adapter()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def tip_length(self) -> float:
+ """For a tip rack labware, the length of the tips it holds, in mm.
+
+ This is taken from the ``tipLength`` property of the ``parameters`` object in the labware definition.
+
+ This method will raise an exception if you call it on a labware that isn’t a tip rack.
+ """
return self._core.get_tip_length()
@tip_length.setter
@@ -835,13 +875,17 @@ def tip_length(self, length: float) -> None:
raise APIVersionError("Labware.tip_length setter has been deprecated")
# TODO(mc, 2023-02-06): this assert should be enough for mypy
- # invvestigate if upgrading mypy allows the `cast` to be removed
+ # investigate if upgrading mypy allows the `cast` to be removed
assert isinstance(self._core, LegacyLabwareCore)
cast(LegacyLabwareCore, self._core).set_tip_length(length)
# TODO(mc, 2022-11-09): implementation detail; deprecate public method
def next_tip(
- self, num_tips: int = 1, starting_tip: Optional[Well] = None
+ self,
+ num_tips: int = 1,
+ starting_tip: Optional[Well] = None,
+ *,
+ nozzle_map: Optional[NozzleMap] = None,
) -> Optional[Well]:
"""
Find the next valid well for pick-up.
@@ -862,6 +906,7 @@ def next_tip(
well_name = self._core.get_next_tip(
num_tips=num_tips,
starting_tip=starting_tip._core if starting_tip else None,
+ nozzle_map=nozzle_map,
)
return self._wells_by_name[well_name] if well_name is not None else None
@@ -993,7 +1038,13 @@ def return_tips(self, start_well: Well, num_channels: int = 1) -> None:
@requires_version(2, 0)
def reset(self) -> None:
- """Reset all tips in a tip rack.
+ """Reset tip tracking for a tip rack.
+
+ After resetting, the API treats all wells on the rack as if they contain unused tips.
+ This is useful if you want to reuse tips after calling :py:meth:`.return_tip()`.
+
+ If you need to physically replace an empty tip rack in the middle of your protocol,
+ use :py:meth:`.move_labware()` instead. See :ref:`off-deck-location` for an example.
.. versionchanged:: 2.14
This method will raise an exception if you call it on a labware that isn't
@@ -1013,7 +1064,11 @@ def split_tipracks(tip_racks: List[Labware]) -> Tuple[Labware, List[Labware]]:
# TODO(mc, 2022-11-09): implementation detail, move to core
def select_tiprack_from_list(
- tip_racks: List[Labware], num_channels: int, starting_point: Optional[Well] = None
+ tip_racks: List[Labware],
+ num_channels: int,
+ starting_point: Optional[Well] = None,
+ *,
+ nozzle_map: Optional[NozzleMap] = None,
) -> Tuple[Labware, Well]:
try:
first, rest = split_tipracks(tip_racks)
@@ -1022,18 +1077,20 @@ def select_tiprack_from_list(
if starting_point and starting_point.parent != first:
raise TipSelectionError(
- "The starting tip you selected " f"does not exist in {first}"
+ f"The starting tip you selected does not exist in {first}"
)
elif starting_point:
first_well = starting_point
+ elif nozzle_map:
+ first_well = None
else:
first_well = first.wells()[0]
- next_tip = first.next_tip(num_channels, first_well)
+ next_tip = first.next_tip(num_channels, first_well, nozzle_map=nozzle_map)
if next_tip:
return first, next_tip
else:
- return select_tiprack_from_list(rest, num_channels)
+ return select_tiprack_from_list(rest, num_channels, None, nozzle_map=nozzle_map)
# TODO(mc, 2022-11-09): implementation detail, move to core
@@ -1045,14 +1102,23 @@ def filter_tipracks_to_start(
# TODO(mc, 2022-11-09): implementation detail, move to core
def next_available_tip(
- starting_tip: Optional[Well], tip_racks: List[Labware], channels: int
+ starting_tip: Optional[Well],
+ tip_racks: List[Labware],
+ channels: int,
+ *,
+ nozzle_map: Optional[NozzleMap] = None,
) -> Tuple[Labware, Well]:
start = starting_tip
if start is None:
- return select_tiprack_from_list(tip_racks, channels)
+ return select_tiprack_from_list(
+ tip_racks, channels, None, nozzle_map=nozzle_map
+ )
else:
return select_tiprack_from_list(
- filter_tipracks_to_start(start, tip_racks), channels, start
+ filter_tipracks_to_start(start, tip_racks),
+ channels,
+ start,
+ nozzle_map=nozzle_map,
)
diff --git a/api/src/opentrons/protocol_api/module_contexts.py b/api/src/opentrons/protocol_api/module_contexts.py
index 8605069ccc7..654a6ec46c1 100644
--- a/api/src/opentrons/protocol_api/module_contexts.py
+++ b/api/src/opentrons/protocol_api/module_contexts.py
@@ -8,8 +8,8 @@
from opentrons.legacy_broker import LegacyBroker
from opentrons.hardware_control.modules import ThermocyclerStep
-from opentrons.commands import module_commands as cmds
-from opentrons.commands.publisher import CommandPublisher, publish
+from opentrons.legacy_commands import module_commands as cmds
+from opentrons.legacy_commands.publisher import CommandPublisher, publish
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols.api_support.util import APIVersionError, requires_version
@@ -63,18 +63,18 @@ def __init__(
self._core_map = core_map
self._api_version = api_version
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def api_version(self) -> APIVersion:
return self._api_version
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def model(self) -> ModuleModel:
"""Get the module's model identifier."""
return cast(ModuleModel, self._core.get_model().value)
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def type(self) -> ModuleType:
"""Get the module's general type identifier."""
@@ -151,7 +151,7 @@ def load_labware(
load_location = loaded_adapter._core
else:
load_location = self._core
-
+ name = validation.ensure_lowercase_name(name)
labware_core = self._protocol_core.load_labware(
load_name=name,
label=label,
@@ -265,14 +265,14 @@ def load_adapter_from_definition(self, definition: LabwareDefinition) -> Labware
version=load_params.version,
)
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def labware(self) -> Optional[Labware]:
"""The labware (if any) present on this module."""
labware_core = self._protocol_core.get_labware_on_module(self._core)
return self._core_map.get(labware_core)
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def parent(self) -> str:
"""The name of the slot the module is on.
@@ -282,7 +282,7 @@ def parent(self) -> str:
"""
return self._core.get_deck_slot_id()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def geometry(self) -> LegacyModuleGeometry:
"""The object representing the module as an item on the deck.
@@ -319,7 +319,7 @@ class TemperatureModuleContext(ModuleContext):
_core: TemperatureModuleCore
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def serial_number(self) -> str:
"""Get the module's unique hardware serial number."""
@@ -361,7 +361,7 @@ def deactivate(self) -> None:
"""Stop heating or cooling, and turn off the fan."""
self._core.deactivate()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def temperature(self) -> float:
"""The current temperature of the Temperature Module's deck in °C.
@@ -370,7 +370,7 @@ def temperature(self) -> float:
"""
return self._core.get_current_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def target(self) -> Optional[float]:
"""The target temperature of the Temperature Module's deck in °C.
@@ -379,7 +379,7 @@ def target(self) -> Optional[float]:
"""
return self._core.get_target_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 3)
def status(self) -> str:
"""One of four possible temperature statuses:
@@ -404,7 +404,7 @@ class MagneticModuleContext(ModuleContext):
_core: MagneticModuleCore
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def serial_number(self) -> str:
"""Get the module's unique hardware serial number."""
@@ -467,9 +467,9 @@ def engage(
if height is not None:
if self._api_version >= _MAGNETIC_MODULE_HEIGHT_PARAM_REMOVED_IN:
raise APIVersionError(
- "The height parameter of MagneticModuleContext.engage() was removed"
- " in {_MAGNETIC_MODULE_HEIGHT_PARAM_REMOVED_IN}."
- " Use offset or height_from_base instead."
+ f"The height parameter of MagneticModuleContext.engage() was removed"
+ f" in {_MAGNETIC_MODULE_HEIGHT_PARAM_REMOVED_IN}."
+ f" Use offset or height_from_base instead."
)
self._core.engage(height_from_home=height)
@@ -493,7 +493,7 @@ def disengage(self) -> None:
"""Lower the magnets back into the Magnetic Module."""
self._core.disengage()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def status(self) -> str:
"""The status of the module, either ``engaged`` or ``disengaged``."""
@@ -511,7 +511,7 @@ class ThermocyclerContext(ModuleContext):
_core: ThermocyclerCore
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def serial_number(self) -> str:
"""Get the module's unique hardware serial number."""
@@ -642,7 +642,7 @@ def deactivate(self) -> None:
"""Turn off both the well block temperature controller and the lid heater."""
self._core.deactivate()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def lid_position(self) -> Optional[str]:
"""One of these possible lid statuses:
@@ -655,7 +655,7 @@ def lid_position(self) -> Optional[str]:
status = self._core.get_lid_position()
return status.value if status is not None else None
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def block_temperature_status(self) -> str:
"""One of five possible temperature statuses:
@@ -669,7 +669,7 @@ def block_temperature_status(self) -> str:
"""
return self._core.get_block_temperature_status().value
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def lid_temperature_status(self) -> Optional[str]:
"""One of five possible temperature statuses:
@@ -685,61 +685,61 @@ def lid_temperature_status(self) -> Optional[str]:
status = self._core.get_lid_temperature_status()
return status.value if status is not None else None
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def block_temperature(self) -> Optional[float]:
"""The current temperature of the well block in °C."""
return self._core.get_block_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def block_target_temperature(self) -> Optional[float]:
"""The target temperature of the well block in °C."""
return self._core.get_block_target_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def lid_temperature(self) -> Optional[float]:
"""The current temperature of the lid in °C."""
return self._core.get_lid_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def lid_target_temperature(self) -> Optional[float]:
"""The target temperature of the lid in °C."""
return self._core.get_lid_target_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def ramp_rate(self) -> Optional[float]:
"""The current ramp rate in °C/s."""
return self._core.get_ramp_rate()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def hold_time(self) -> Optional[float]:
"""Remaining hold time in seconds."""
return self._core.get_hold_time()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def total_cycle_count(self) -> Optional[int]:
"""Number of repetitions for current set cycle"""
return self._core.get_total_cycle_count()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def current_cycle_index(self) -> Optional[int]:
"""Index of the current set cycle repetition"""
return self._core.get_current_cycle_index()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def total_step_count(self) -> Optional[int]:
"""Number of steps within the current cycle"""
return self._core.get_total_step_count()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 0)
def current_step_index(self) -> Optional[int]:
"""Index of the current step within the current cycle"""
@@ -757,13 +757,13 @@ class HeaterShakerContext(ModuleContext):
_core: HeaterShakerCore
- @property # type: ignore[misc]
+ @property
@requires_version(2, 14)
def serial_number(self) -> str:
"""Get the module's unique hardware serial number."""
return self._core.get_serial_number()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def target_temperature(self) -> Optional[float]:
"""The target temperature of the Heater-Shaker's plate in °C.
@@ -772,7 +772,7 @@ def target_temperature(self) -> Optional[float]:
"""
return self._core.get_target_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def current_temperature(self) -> float:
"""The current temperature of the Heater-Shaker's plate in °C.
@@ -781,19 +781,19 @@ def current_temperature(self) -> float:
"""
return self._core.get_current_temperature()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def current_speed(self) -> int:
"""The current speed of the Heater-Shaker's plate in rpm."""
return self._core.get_current_speed()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def target_speed(self) -> Optional[int]:
"""Target speed of the Heater-Shaker's plate in rpm."""
return self._core.get_target_speed()
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def temperature_status(self) -> str:
"""One of five possible temperature statuses:
@@ -808,7 +808,7 @@ def temperature_status(self) -> str:
"""
return self._core.get_temperature_status().value
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def speed_status(self) -> str:
"""One of five possible shaking statuses:
@@ -823,7 +823,7 @@ def speed_status(self) -> str:
"""
return self._core.get_speed_status().value
- @property # type: ignore[misc]
+ @property
@requires_version(2, 13)
def labware_latch_status(self) -> str:
"""One of six possible latch statuses:
diff --git a/api/src/opentrons/protocol_api/protocol_context.py b/api/src/opentrons/protocol_api/protocol_context.py
index ec1ff432384..feb8f56d91c 100644
--- a/api/src/opentrons/protocol_api/protocol_context.py
+++ b/api/src/opentrons/protocol_api/protocol_context.py
@@ -16,14 +16,23 @@
from opentrons_shared_data.labware.dev_types import LabwareDefinition
from opentrons_shared_data.pipette.dev_types import PipetteNameType
-from opentrons.types import Mount, Location, DeckLocation, DeckSlotName
+from opentrons.types import Mount, Location, DeckLocation, DeckSlotName, StagingSlotName
from opentrons.legacy_broker import LegacyBroker
from opentrons.hardware_control import SyncHardwareAPI
from opentrons.hardware_control.modules.types import MagneticBlockModel
-from opentrons.commands import protocol_commands as cmds, types as cmd_types
-from opentrons.commands.publisher import CommandPublisher, publish
+from opentrons.legacy_commands import protocol_commands as cmds, types as cmd_types
+from opentrons.legacy_commands.helpers import stringify_labware_movement_command
+from opentrons.legacy_commands.publisher import (
+ CommandPublisher,
+ publish,
+ publish_context,
+)
from opentrons.protocols.api_support import instrument as instrument_support
-from opentrons.protocols.api_support.deck_type import NoTrashDefinedError
+from opentrons.protocols.api_support.deck_type import (
+ NoTrashDefinedError,
+ should_load_fixed_trash_labware_for_python_protocol,
+ should_load_fixed_trash_area_for_python_protocol,
+)
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols.api_support.util import (
AxisMaxSpeeds,
@@ -47,7 +56,7 @@
from . import validation
from ._liquid import Liquid
-from ._waste_chute import WasteChute
+from .disposal_locations import TrashBin, WasteChute
from .deck import Deck
from .instrument_context import InstrumentContext
from .labware import Labware
@@ -59,6 +68,7 @@
MagneticBlockContext,
ModuleContext,
)
+from ._parameters import Parameters
logger = logging.getLogger(__name__)
@@ -83,17 +93,24 @@ class HardwareManager(NamedTuple):
class ProtocolContext(CommandPublisher):
- """The Context class is a container for the state of a protocol.
+ """A context for the state of a protocol.
+
+ The ``ProtocolContext`` class provides the objects, attributes, and methods that
+ allow you to configure and control the protocol.
+
+ Methods generally fall into one of two categories.
+
+ - They can change the state of the ``ProtocolContext`` object, such as adding
+ pipettes, hardware modules, or labware to your protocol.
+ - They can control the flow of a running protocol, such as pausing, displaying
+ messages, or controlling built-in robot hardware like the ambient lighting.
- It encapsulates many of the methods formerly found in the Robot class,
- including labware, instrument, and module loading, as well as core
- functions like pause and resume.
+ Do not instantiate a ``ProtocolContext`` directly.
+ The ``run()`` function of your protocol does that for you.
+ See the :ref:`Tutorial ` for more information.
- Unlike the old robot class, it is designed to be ephemeral. The lifetime
- of a particular instance should be about the same as the lifetime of a
- protocol. The only exception is the one stored in
- ``.legacy_api.api.robot``, which is provided only for back
- compatibility and should be used less and less as time goes by.
+ Use :py:meth:`opentrons.execute.get_protocol_api` to instantiate a ``ProtocolContext`` when
+ using Jupyter Notebook. See :ref:`advanced-control`.
.. versionadded:: 2.0
@@ -137,21 +154,47 @@ def __init__(
mount: None for mount in Mount
}
self._bundled_data: Dict[str, bytes] = bundled_data or {}
+
+ # With the addition of Movable Trashes and Waste Chute support, it is not necessary
+ # to ensure that the list of "disposal locations", essentially the list of trashes,
+ # is initialized correctly on protocols utilizing former API versions prior to 2.16
+ # and also to ensure that any protocols after 2.16 initialize a Fixed Trash for OT-2
+ # protocols so that no load trash bin behavior is required within the protocol itself.
+ # Protocols prior to 2.16 expect the Fixed Trash to exist as a Labware object, while
+ # protocols after 2.16 expect trash to exist as either a TrashBin or WasteChute object.
+
self._load_fixed_trash()
+ if should_load_fixed_trash_labware_for_python_protocol(self._api_version):
+ self._core.append_disposal_location(self.fixed_trash)
+ elif should_load_fixed_trash_area_for_python_protocol(
+ self._api_version, self._core.robot_type
+ ):
+ self._core.load_ot2_fixed_trash_bin()
self._commands: List[str] = []
+ self._params: Parameters = Parameters()
self._unsubscribe_commands: Optional[Callable[[], None]] = None
self.clear_commands()
- @property # type: ignore
+ @property
@requires_version(2, 0)
def api_version(self) -> APIVersion:
- """Return the API version supported by this protocol context.
-
- The supported API version was specified when the protocol context
- was initialized. It may be lower than the highest version supported
- by the robot software. For the highest version supported by the
- robot software, see ``protocol_api.MAX_SUPPORTED_VERSION``.
+ """Return the API version specified for this protocol context.
+
+ This value is set when the protocol context
+ is initialized.
+
+ - When the context is the argument of ``run()``, the ``"apiLevel"`` key of the
+ :ref:`metadata ` or :ref:`requirements
+ ` dictionary determines ``api_version``.
+ - When the context is instantiated with
+ :py:meth:`opentrons.execute.get_protocol_api` or
+ :py:meth:`opentrons.simulate.get_protocol_api`, the value of its ``version``
+ argument determines ``api_version``.
+
+ It may be lower than the :ref:`maximum version ` supported by the
+ robot software, which is accessible via the
+ ``protocol_api.MAX_SUPPORTED_VERSION`` constant.
"""
return self._api_version
@@ -165,18 +208,24 @@ def _hw_manager(self) -> HardwareManager:
)
return HardwareManager(hardware=self._core.get_hardware())
- @property # type: ignore
+ @property
@requires_version(2, 0)
def bundled_data(self) -> Dict[str, bytes]:
"""Accessor for data files bundled with this protocol, if any.
- This is a dictionary mapping the filenames of bundled datafiles, with
- extensions but without paths (e.g. if a file is stored in the bundle as
- ``data/mydata/aspirations.csv`` it will be in the dict as
- ``'aspirations.csv'``) to the bytes contents of the files.
+ This is a dictionary mapping the filenames of bundled datafiles to their
+ contents. The filename keys are formatted with extensions but without paths. For
+ example, a file stored in the bundle as ``data/mydata/aspirations.csv`` will
+ have the key ``"aspirations.csv"``. The values are :py:class:`bytes` objects
+ representing the contents of the files.
"""
return self._bundled_data
+ @property
+ @requires_version(2, 18)
+ def params(self) -> Parameters:
+ return self._params
+
def cleanup(self) -> None:
"""Finalize and clean up the protocol context."""
if self._unsubscribe_commands:
@@ -187,39 +236,24 @@ def __del__(self) -> None:
if getattr(self, "_unsubscribe_commands", None):
self._unsubscribe_commands() # type: ignore
- @property # type: ignore
+ @property
@requires_version(2, 0)
def max_speeds(self) -> AxisMaxSpeeds:
- """Per-axis speed limits when moving this instrument.
+ """Per-axis speed limits for moving instruments.
- Changing this value changes the speed limit for each non-plunger
- axis of the robot, when moving this pipette. Note that this does
- only sets a limit on how fast movements can be; movements can
- still be slower than this. However, it is useful if you require
- the robot to move much more slowly than normal when using this
- pipette.
+ Changing values within this property sets the speed limit for each non-plunger
+ axis of the robot. Note that this property only sets upper limits and can't
+ exceed the physical speed limits of the movement system.
- This is a dictionary mapping string names of axes to float values
- limiting speeds. To change a speed, set that axis's value. To
+ This property is a dict mapping string names of axes to float values
+ of maximum speeds in mm/s. To change a speed, set that axis's value. To
reset an axis's speed to default, delete the entry for that axis
or assign it to ``None``.
- For instance,
-
- .. code-block:: py
+ See :ref:`axis_speed_limits` for examples.
- def run(protocol):
- protocol.comment(str(right.max_speeds)) # '{}' - all default
- protocol.max_speeds['A'] = 10 # limit max speed of
- # right pipette Z to 10mm/s
- del protocol.max_speeds['A'] # reset to default
- protocol.max_speeds['X'] = 10 # limit max speed of x to
- # 10 mm/s
- protocol.max_speeds['X'] = None # reset to default
-
- .. caution::
- This property is not yet supported on
- :ref:`API version ` 2.14 or higher.
+ .. note::
+ This property is not yet supported in API version 2.14 or higher.
"""
if self._api_version >= ENGINE_CORE_API_VERSION:
# TODO(mc, 2023-02-23): per-axis max speeds not yet supported on the engine
@@ -240,7 +274,7 @@ def commands(self) -> List[str]:
far. For example, "Aspirating 123 µL from well A1 of 96 well plate in slot 1."
The exact format of these entries is not guaranteed. The format here may differ from other
- places that show the run log, such as the Opentrons App.
+ places that show the run log, such as the Opentrons App or touchscreen.
"""
return self._commands
@@ -270,6 +304,20 @@ def on_command(message: cmd_types.CommandMessage) -> None:
@requires_version(2, 0)
def is_simulating(self) -> bool:
+ """Returns ``True`` if the protocol is running in simulation.
+
+ Returns ``False`` if the protocol is running on actual hardware.
+
+ You can evaluate the result of this method in an ``if`` statement to make your
+ protocol behave differently in different environments. For example, you could
+ refer to a data file on your computer when simulating and refer to a data file
+ stored on the robot when not simulating.
+
+ You can also use it to skip time-consuming aspects of your protocol. Most Python
+ Protocol API methods, like :py:meth:`.delay`, are designed to evaluate
+ instantaneously in simulation. But external methods, like those from the
+ :py:mod:`time` module, will run at normal speed if not skipped.
+ """
return self._core.is_simulating()
@requires_version(2, 0)
@@ -279,19 +327,18 @@ def load_labware_from_definition(
location: Union[DeckLocation, OffDeckType],
label: Optional[str] = None,
) -> Labware:
- """Specify the presence of a piece of labware on the OT2 deck.
+ """Specify the presence of a labware on the deck.
- This function loads the labware definition specified by `labware_def`
- to the location specified by `location`.
+ This function loads the labware definition specified by ``labware_def``
+ to the location specified by ``location``.
- :param labware_def: The labware definition to load
+ :param labware_def: The labware's definition.
:param location: The slot into which to load the labware,
such as ``1``, ``"1"``, or ``"D1"``. See :ref:`deck-slots`.
:type location: int or str or :py:obj:`OFF_DECK`
- :param str label: An optional special name to give the labware. If
- specified, this is the name the labware will appear
- as in the run log and the calibration view in the
- Opentrons app.
+ :param str label: An optional special name to give the labware. If specified,
+ this is how the labware will appear in the run log, Labware Position
+ Check, and elsewhere in the Opentrons App and on the touchscreen.
"""
load_params = self._core.add_labware_definition(labware_def)
@@ -315,7 +362,7 @@ def load_labware(
) -> Labware:
"""Load a labware onto a location.
- For labware already defined by Opentrons, this is a convenient way
+ For Opentrons-verified labware, this is a convenient way
to collapse the two stages of labware initialization (creating
the labware and adding it to the protocol) into one.
@@ -323,8 +370,8 @@ def load_labware(
later in the protocol.
:param str load_name: A string to use for looking up a labware definition.
- You can find the ``load_name`` for any standard labware on the Opentrons
- `Labware Library `_.
+ You can find the ``load_name`` for any Opentrons-verified labware on the
+ `Labware Library `__.
:param location: Either a :ref:`deck slot `,
like ``1``, ``"1"``, or ``"D1"``, or the special value :py:obj:`OFF_DECK`.
@@ -334,25 +381,30 @@ def load_labware(
:type location: int or str or :py:obj:`OFF_DECK`
- :param str label: An optional special name to give the labware. If specified, this
- is the name the labware will appear as in the run log and the calibration
- view in the Opentrons app.
+ :param str label: An optional special name to give the labware. If specified,
+ this is how the labware will appear in the run log, Labware Position
+ Check, and elsewhere in the Opentrons App and on the touchscreen.
:param str namespace: The namespace that the labware definition belongs to.
- If unspecified, will search both:
+ If unspecified, the API will automatically search two namespaces:
- * ``"opentrons"``, to load standard Opentrons labware definitions.
- * ``"custom_beta"``, to load custom labware definitions created with the
- `Custom Labware Creator `_.
+ - ``"opentrons"``, to load standard Opentrons labware definitions.
+ - ``"custom_beta"``, to load custom labware definitions created with the
+ `Custom Labware Creator `__.
You might need to specify an explicit ``namespace`` if you have a custom
- definition whose ``load_name`` is the same as an Opentrons standard
+ definition whose ``load_name`` is the same as an Opentrons-verified
definition, and you want to explicitly choose one or the other.
:param version: The version of the labware definition. You should normally
- leave this unspecified to let the implementation choose a good default.
- :param adapter: Load name of an adapter to load the labware on top of. The adapter
- will be loaded from the same given namespace, but version will be automatically chosen.
+ leave this unspecified to let ``load_labware()`` choose a version
+ automatically.
+ :param adapter: An adapter to load the labware on top of. Accepts the same
+ values as the ``load_name`` parameter of :py:meth:`.load_adapter`. The
+ adapter will use the same namespace as the labware, and the API will
+ choose the adapter's version automatically.
+
+ .. versionadded:: 2.15
"""
if isinstance(location, OffDeckType) and self._api_version < APIVersion(2, 15):
raise APIVersionError(
@@ -360,7 +412,7 @@ def load_labware(
)
load_name = validation.ensure_lowercase_name(load_name)
- load_location: Union[OffDeckType, DeckSlotName, LabwareCore]
+ load_location: Union[OffDeckType, DeckSlotName, StagingSlotName, LabwareCore]
if adapter is not None:
if self._api_version < APIVersion(2, 15):
raise APIVersionError(
@@ -439,18 +491,47 @@ def load_adapter_from_definition(
)
@requires_version(2, 16)
- # TODO: Confirm official naming of "waste chute".
+ def load_trash_bin(self, location: DeckLocation) -> TrashBin:
+ """Load a trash bin on the deck of a Flex.
+
+ See :ref:`configure-trash-bin` for details.
+
+ If you try to load a trash bin on an OT-2, the API will raise an error.
+
+ :param location: The :ref:`deck slot ` where the trash bin is. The
+ location can be any unoccupied slot in column 1 or 3.
+
+ If you try to load a trash bin in column 2 or 4, the API will raise an error.
+ """
+ slot_name = validation.ensure_and_convert_deck_slot(
+ location,
+ api_version=self._api_version,
+ robot_type=self._core.robot_type,
+ )
+ if not isinstance(slot_name, DeckSlotName):
+ raise ValueError("Staging areas not permitted for trash bin.")
+ addressable_area_name = validation.ensure_and_convert_trash_bin_location(
+ location,
+ api_version=self._api_version,
+ robot_type=self._core.robot_type,
+ )
+ trash_bin = self._core.load_trash_bin(slot_name, addressable_area_name)
+ return trash_bin
+
+ @requires_version(2, 16)
def load_waste_chute(
self,
- *,
- # TODO: Confirm official naming of "staging area slot".
- with_staging_area_slot_d4: bool = False,
) -> WasteChute:
- if with_staging_area_slot_d4:
- raise NotImplementedError(
- "The waste chute staging area slot is not currently implemented."
- )
- return WasteChute(with_staging_area_slot_d4=with_staging_area_slot_d4)
+ """Load the waste chute on the deck of a Flex.
+
+ See :ref:`configure-waste-chute` for details, including the deck configuration
+ variants of the waste chute.
+
+ The deck plate adapter for the waste chute can only go in slot D3. If you try to
+ load another item in slot D3 after loading the waste chute, or vice versa, the
+ API will raise an error.
+ """
+ return self._core.load_waste_chute()
@requires_version(2, 15)
def load_adapter(
@@ -479,7 +560,7 @@ def load_adapter(
:type location: int or str or :py:obj:`OFF_DECK`
:param str namespace: The namespace that the labware definition belongs to.
- If unspecified, will search both:
+ If unspecified, the API will automatically search two namespaces:
* ``"opentrons"``, to load standard Opentrons labware definitions.
* ``"custom_beta"``, to load custom labware definitions created with the
@@ -490,10 +571,10 @@ def load_adapter(
definition, and you want to explicitly choose one or the other.
:param version: The version of the labware definition. You should normally
- leave this unspecified to let the implementation choose a good default.
+ leave this unspecified to let ``load_adapter()`` choose a version automatically.
"""
load_name = validation.ensure_lowercase_name(load_name)
- load_location: Union[OffDeckType, DeckSlotName]
+ load_location: Union[OffDeckType, DeckSlotName, StagingSlotName]
if isinstance(location, OffDeckType):
load_location = location
else:
@@ -520,7 +601,7 @@ def load_adapter(
# TODO(mm, 2023-06-07): Figure out what to do with this, now that the Flex has non-integer
# slot names and labware can be stacked. https://opentrons.atlassian.net/browse/RLAB-354
- @property # type: ignore
+ @property
@requires_version(2, 0)
def loaded_labwares(self) -> Dict[int, Labware]:
"""Get the labwares that have been loaded into the protocol context.
@@ -534,7 +615,7 @@ def loaded_labwares(self) -> Dict[int, Labware]:
be no entry for that slot in this value. That means you should not
use ``loaded_labwares`` to determine if a slot is available or not,
only to get a list of labwares. If you want a data structure of all
- objects on the deck regardless of type, see :py:attr:`deck`.
+ objects on the deck regardless of type, use :py:attr:`deck`.
:returns: Dict mapping deck slot number to labware, sorted in order of
@@ -562,7 +643,9 @@ def move_labware(
pick_up_offset: Optional[Mapping[str, float]] = None,
drop_offset: Optional[Mapping[str, float]] = None,
) -> None:
- """Move a loaded labware to a new location. See :ref:`moving-labware` for more details.
+ """Move a loaded labware to a new location.
+
+ See :ref:`moving-labware` for more details.
:param labware: The labware to move. It should be a labware already loaded
using :py:meth:`load_labware`.
@@ -578,9 +661,9 @@ def move_labware(
:param use_gripper: Whether to use the Flex Gripper for this movement.
- * If ``True``, will use the gripper to perform an automatic
- movement. This will raise an error on an OT-2 protocol.
- * If ``False``, will pause protocol execution until the user
+ * If ``True``, use the gripper to perform an automatic
+ movement. This will raise an error in an OT-2 protocol.
+ * If ``False``, pause protocol execution until the user
performs the movement. Protocol execution remains paused until
the user presses **Confirm and resume**.
@@ -599,7 +682,14 @@ def move_labware(
f"Expected labware of type 'Labware' but got {type(labware)}."
)
- location: Union[ModuleCore, LabwareCore, WasteChute, OffDeckType, DeckSlotName]
+ location: Union[
+ ModuleCore,
+ LabwareCore,
+ WasteChute,
+ OffDeckType,
+ DeckSlotName,
+ StagingSlotName,
+ ]
if isinstance(new_location, (Labware, ModuleContext)):
location = new_location._core
elif isinstance(new_location, (OffDeckType, WasteChute)):
@@ -619,14 +709,23 @@ def move_labware(
if drop_offset
else None
)
- self._core.move_labware(
- labware_core=labware._core,
- new_location=location,
- use_gripper=use_gripper,
- pause_for_manual_move=True,
- pick_up_offset=_pick_up_offset,
- drop_offset=_drop_offset,
- )
+ with publish_context(
+ broker=self.broker,
+ command=cmds.move_labware(
+ # This needs to be called from protocol context and not the command for import loop reasons
+ text=stringify_labware_movement_command(
+ labware, new_location, use_gripper
+ )
+ ),
+ ):
+ self._core.move_labware(
+ labware_core=labware._core,
+ new_location=location,
+ use_gripper=use_gripper,
+ pause_for_manual_move=True,
+ pick_up_offset=_pick_up_offset,
+ drop_offset=_drop_offset,
+ )
@requires_version(2, 0)
def load_module(
@@ -643,8 +742,8 @@ def load_module(
context, which will be a different class depending on the kind of
module loaded.
- A map of deck positions to loaded modules can be accessed later
- by using :py:attr:`loaded_modules`.
+ After loading modules, you can access a map of deck positions to loaded modules
+ with :py:attr:`loaded_modules`.
:param str module_name: The name or model of the module.
See :ref:`available_modules` for possible values.
@@ -661,7 +760,7 @@ def load_module(
.. versionchanged:: 2.15
You can now specify a deck slot as a coordinate, like ``"D1"``.
- :param configuration: Configure a thermocycler to be in the ``semi`` position.
+ :param configuration: Configure a Thermocycler to be in the ``semi`` position.
This parameter does not work. Do not use it.
.. versionchanged:: 2.14
@@ -709,6 +808,8 @@ def load_module(
location, self._api_version, self._core.robot_type
)
)
+ if isinstance(deck_slot, StagingSlotName):
+ raise ValueError("Cannot load a module onto a staging slot.")
module_core = self._core.load_module(
model=requested_model,
@@ -730,21 +831,20 @@ def load_module(
# TODO(mm, 2023-06-07): Figure out what to do with this, now that the Flex has non-integer
# slot names and labware can be stacked. https://opentrons.atlassian.net/browse/RLAB-354
- @property # type: ignore
+ @property
@requires_version(2, 0)
def loaded_modules(self) -> Dict[int, ModuleTypes]:
"""Get the modules loaded into the protocol context.
- This is a map of deck positions to modules loaded by previous calls
- to :py:meth:`load_module`. It is not necessarily the same as the
- modules attached to the robot - for instance, if the robot has a
- Magnetic Module and a Temperature Module attached, but the protocol
- has only loaded the Temperature Module with :py:meth:`load_module`,
- only the Temperature Module will be present.
+ This is a map of deck positions to modules loaded by previous calls to
+ :py:meth:`load_module`. It does not reflect what modules are actually attached
+ to the robot. For example, if the robot has a Magnetic Module and a Temperature
+ Module attached, but the protocol has only loaded the Temperature Module with
+ :py:meth:`load_module`, only the Temperature Module will be included in
+ ``loaded_modules``.
- :returns Dict[int, ModuleContext]: Dict mapping slot name to module
- contexts. The elements may not be
- ordered by slot number.
+ :returns: Dict mapping slot name to module contexts. The elements may not be
+ ordered by slot number.
"""
return {
core.get_deck_slot().as_int(): self._core_map.get(core)
@@ -755,39 +855,44 @@ def loaded_modules(self) -> Dict[int, ModuleTypes]:
def load_instrument(
self,
instrument_name: str,
- mount: Union[Mount, str],
+ mount: Union[Mount, str, None] = None,
tip_racks: Optional[List[Labware]] = None,
replace: bool = False,
) -> InstrumentContext:
- """Load a specific instrument required by the protocol.
-
- This value will actually be checked when the protocol runs, to
- ensure that the correct instrument is attached in the specified
- location.
-
- :param str instrument_name: The name of the instrument model, or a
- prefix. For instance, 'p10_single' may be
- used to request a P10 single regardless of
- the version.
- :param mount: The mount in which this instrument should be attached.
- This can either be an instance of the enum type
- :py:class:`.types.Mount` or one of the strings `'left'`
- and `'right'`.
- :type mount: types.Mount or str
- :param tip_racks: A list of tip racks from which to pick tips if
- :py:meth:`.InstrumentContext.pick_up_tip` is called
- without arguments.
+ """Load a specific instrument for use in the protocol.
+
+ When analyzing the protocol on the robot, instruments loaded with this method
+ are compared against the instruments attached to the robot. You won't be able to
+ start the protocol until the correct instruments are attached and calibrated.
+
+ Currently, this method only loads pipettes. You do not need to load the Flex
+ Gripper to use it in protocols. See :ref:`automatic-manual-moves`.
+
+ :param str instrument_name: The instrument to load. See :ref:`new-pipette-models`
+ for the valid values.
+ :param mount: The mount where the instrument should be attached.
+ This can either be an instance of :py:class:`.types.Mount` or one
+ of the strings ``"left"`` or ``"right"``. When loading a Flex
+ 96-Channel Pipette (``instrument_name="flex_96channel_1000"``),
+ you can leave this unspecified, since it always occupies both
+ mounts; if you do specify a value, it will be ignored.
+ :type mount: types.Mount or str or ``None``
+ :param tip_racks: A list of tip racks from which to pick tips when calling
+ :py:meth:`.InstrumentContext.pick_up_tip` without arguments.
:type tip_racks: List[:py:class:`.Labware`]
- :param bool replace: Indicate that the currently-loaded instrument in
- `mount` (if such an instrument exists) should be
- replaced by `instrument_name`.
+ :param bool replace: If ``True``, replace the currently loaded instrument in
+ ``mount``, if any. This is intended for :ref:`advanced
+ control ` applications. You cannot
+ replace an instrument in the middle of a protocol being run
+ from the Opentrons App or touchscreen.
"""
- # TODO (spp: 2023-08-30): disallow loading Flex pipettes on OT-2 by checking robotType
instrument_name = validation.ensure_lowercase_name(instrument_name)
checked_instrument_name = validation.ensure_pipette_name(instrument_name)
- is_96_channel = checked_instrument_name == PipetteNameType.P1000_96
+ checked_mount = validation.ensure_mount_for_pipette(
+ mount, checked_instrument_name
+ )
- checked_mount = Mount.LEFT if is_96_channel else validation.ensure_mount(mount)
+ is_96_channel = checked_instrument_name == PipetteNameType.P1000_96
tip_racks = tip_racks or []
@@ -796,7 +901,7 @@ def load_instrument(
if is_96_channel and on_right_mount is not None:
raise RuntimeError(
f"Instrument already present on right:"
- f" {on_right_mount.name}. In order to load a 96 channel pipette both mounts need to be available."
+ f" {on_right_mount.name}. In order to load a 96-channel pipette, both mounts need to be available."
)
existing_instrument = self._instruments[checked_mount]
@@ -823,10 +928,10 @@ def load_instrument(
log=logger,
)
- trash: Optional[Labware]
+ trash: Optional[Union[Labware, TrashBin]]
try:
trash = self.fixed_trash
- except NoTrashDefinedError:
+ except (NoTrashDefinedError, APIVersionError):
trash = None
instrument = InstrumentContext(
@@ -843,23 +948,21 @@ def load_instrument(
return instrument
- @property # type: ignore
+ @property
@requires_version(2, 0)
def loaded_instruments(self) -> Dict[str, InstrumentContext]:
"""Get the instruments that have been loaded into the protocol.
This is a map of mount name to instruments previously loaded with
- :py:meth:`load_instrument`. It is not necessarily the same as the
- instruments attached to the robot - for instance, if the robot has
- an instrument in both mounts but your protocol has only loaded one
- of them with :py:meth:`load_instrument`, the unused one will not
- be present.
-
- :returns: A dict mapping mount name
- (``'left'`` or ``'right'``)
- to the instrument in that mount.
- If a mount has no loaded instrument,
- that key will be missing from the dict.
+ :py:meth:`load_instrument`. It does not reflect what instruments are actually
+ installed on the robot. For example, if the robot has instruments installed on
+ both mounts but your protocol has only loaded one of them with
+ :py:meth:`load_instrument`, the unused one will not be included in
+ ``loaded_instruments``.
+
+ :returns: A dict mapping mount name (``"left"`` or ``"right"``) to the
+ instrument in that mount. If a mount has no loaded instrument, that key
+ will be missing from the dict.
"""
return {
mount.name.lower(): instr
@@ -872,14 +975,13 @@ def loaded_instruments(self) -> Dict[str, InstrumentContext]:
def pause(self, msg: Optional[str] = None) -> None:
"""Pause execution of the protocol until it's resumed.
- A human can resume the protocol through the Opentrons App.
+ A human can resume the protocol in the Opentrons App or on the touchscreen.
- This function returns immediately, but the next function call that
- is blocked by a paused robot (anything that involves moving) will
- not return until the protocol is resumed.
+ .. note::
+ In Python Protocol API version 2.13 and earlier, the pause will only
+ take effect on the next function call that involves moving the robot.
- :param str msg: An optional message to show to connected clients. The
- Opentrons App will show this in the run log.
+ :param str msg: An optional message to show in the run log entry for the pause step.
"""
self._core.pause(msg=msg)
@@ -890,7 +992,6 @@ def resume(self) -> None:
.. deprecated:: 2.12
The Python Protocol API supports no safe way for a protocol to resume itself.
- See https://github.com/Opentrons/opentrons/issues/8209.
If you're looking for a way for your protocol to resume automatically
after a period of time, use :py:meth:`delay`.
"""
@@ -909,12 +1010,15 @@ def resume(self) -> None:
@requires_version(2, 0)
def comment(self, msg: str) -> None:
"""
- Add a user-readable comment string that will be echoed to the Opentrons
- app.
+ Add a user-readable message to the run log.
- The value of the message is computed during protocol simulation,
- so cannot be used to communicate real-time information from the robot's
- actual run.
+ The message is visible anywhere you can view the run log, including the Opentrons App and the touchscreen on Flex.
+
+ .. note::
+
+ The value of the message is computed during protocol analysis,
+ so ``comment()`` can't communicate real-time information during the
+ actual protocol run.
"""
self._core.comment(msg=msg)
@@ -928,17 +1032,17 @@ def delay(
) -> None:
"""Delay protocol execution for a specific amount of time.
- :param float seconds: A time to delay in seconds
- :param float minutes: A time to delay in minutes
+ :param float seconds: The time to delay in seconds.
+ :param float minutes: The time to delay in minutes.
- If both `seconds` and `minutes` are specified, they will be added.
+ If both ``seconds`` and ``minutes`` are specified, they will be added together.
"""
delay_time = seconds + minutes * 60
self._core.delay(seconds=delay_time, msg=msg)
@requires_version(2, 0)
def home(self) -> None:
- """Homes the robot."""
+ """Home the movement system of the robot."""
self._core.home()
@property
@@ -950,19 +1054,27 @@ def location_cache(self) -> Optional[Location]:
def location_cache(self, loc: Optional[Location]) -> None:
self._core.set_last_location(loc)
- @property # type: ignore
+ @property
@requires_version(2, 0)
def deck(self) -> Deck:
"""An interface to provide information about what's currently loaded on the deck.
- This object is useful for determining if a slot in the deck is free.
+ This object is useful for determining if a slot on the deck is free.
+
+ This object behaves like a dictionary whose keys are the :ref:`deck slot ` names.
+ For instance, ``deck[1]``, ``deck["1"]``, and ``deck["D1"]``
+ will all return the object loaded in the front-left slot.
- This object behaves like a dictionary whose keys are the deck slot names.
- For instance, ``protocol.deck[1]``, ``protocol.deck["1"]``, and ``protocol.deck["D1"]``
- will all return the object loaded in the front-left slot. (See :ref:`deck-slots`.)
+ The value for each key depends on what is loaded in the slot:
+ - A :py:obj:`~opentrons.protocol_api.Labware` if the slot contains a labware.
+ - A module context if the slot contains a hardware module.
+ - ``None`` if the slot doesn't contain anything.
- The value will be a :py:obj:`~opentrons.protocol_api.Labware` if the slot contains a
- labware, a :py:obj:`~opentrons.protocol_api.ModuleContext` if the slot contains a hardware
- module, or ``None`` if the slot doesn't contain anything.
+ A module that occupies multiple slots is set as the value for all of the
+ relevant slots. Currently, the only multiple-slot module is the Thermocycler.
+ When loaded, the :py:class:`ThermocyclerContext` object is the value for
+ ``deck`` keys ``"A1"`` and ``"B1"`` on Flex, and ``7``, ``8``, ``10``, and
+ ``11`` on OT-2. In API version 2.13 and earlier, only slot 7 keyed to the
+ Thermocycler object, and slots 8, 10, and 11 keyed to ``None``.
Rather than filtering the objects in the deck map yourself,
you can also use :py:attr:`loaded_labwares` to get a dict of labwares
@@ -970,7 +1082,7 @@ def deck(self) -> Deck:
For :ref:`advanced-control` *only*, you can delete an element of the ``deck`` dict.
This only works for deck slots that contain labware objects. For example, if slot
- 1 contains a labware, ``del protocol.deck['1']`` will free the slot so you can
+ 1 contains a labware, ``del protocol.deck["1"]`` will free the slot so you can
load another labware there.
.. warning::
@@ -979,24 +1091,47 @@ def deck(self) -> Deck:
reflect the new deck state, add a :py:meth:`.pause` or use
:py:meth:`.move_labware` instead.
+ .. versionchanged:: 2.14
+ Includes the Thermocycler in all of the slots it occupies.
+
.. versionchanged:: 2.15
``del`` sets the corresponding labware's location to ``OFF_DECK``.
"""
return self._deck
- @property # type: ignore
+ @property
@requires_version(2, 0)
- def fixed_trash(self) -> Labware:
- """The trash fixed to slot 12 of the robot deck.
+ def fixed_trash(self) -> Union[Labware, TrashBin]:
+ """The trash fixed to slot 12 of an OT-2's deck.
+
+ In API version 2.15 and earlier, the fixed trash is a :py:class:`.Labware` object with one well. Access it like labware in your protocol. For example, ``protocol.fixed_trash["A1"]``.
+
+ In API version 2.15 only, Flex protocols have a fixed trash in slot A3.
- It has one well and should be accessed like labware in your protocol.
- e.g. ``protocol.fixed_trash['A1']``
+ In API version 2.16 and later, the fixed trash only exists in OT-2 protocols. It is a :py:class:`.TrashBin` object, which doesn't have any wells. Trying to access ``fixed_trash`` in a Flex protocol will raise an error. See :ref:`configure-trash-bin` for details on using the movable trash in Flex protocols.
+
+ .. versionchanged:: 2.16
+ Returns a ``TrashBin`` object.
"""
+ if self._api_version >= APIVersion(2, 16):
+ if self._core.robot_type == "OT-3 Standard":
+ raise APIVersionError(
+ "Fixed Trash is not supported on Flex protocols in API Version 2.16 and above."
+ )
+ disposal_locations = self._core.get_disposal_locations()
+ if len(disposal_locations) == 0:
+ raise NoTrashDefinedError(
+ "No trash container has been defined in this protocol."
+ )
+ if isinstance(disposal_locations[0], TrashBin):
+ return disposal_locations[0]
+
fixed_trash = self._core_map.get(self._core.fixed_trash)
if fixed_trash is None:
raise NoTrashDefinedError(
"No trash container has been defined in this protocol."
)
+
return fixed_trash
def _load_fixed_trash(self) -> None:
@@ -1013,9 +1148,9 @@ def _load_fixed_trash(self) -> None:
@requires_version(2, 5)
def set_rail_lights(self, on: bool) -> None:
"""
- Controls the robot rail lights
+ Controls the robot's ambient lighting (rail lights).
- :param bool on: If true, turn on rail lights; otherwise, turn off.
+ :param bool on: If ``True``, turn on the lights; otherwise, turn them off.
"""
self._core.set_rail_lights(on=on)
@@ -1038,16 +1173,16 @@ def define_liquid(
display_color=display_color,
)
- @property # type: ignore
+ @property
@requires_version(2, 5)
def rail_lights_on(self) -> bool:
- """Returns True if the rail lights are on"""
+ """Returns ``True`` if the robot's ambient lighting is on."""
return self._core.get_rail_lights_on()
- @property # type: ignore
+ @property
@requires_version(2, 5)
def door_closed(self) -> bool:
- """Returns True if the robot door is closed"""
+ """Returns ``True`` if the front door of the robot is closed."""
return self._core.door_closed()
diff --git a/api/src/opentrons/protocol_api/validation.py b/api/src/opentrons/protocol_api/validation.py
index c6767ebc71f..eb72c6b6dfd 100644
--- a/api/src/opentrons/protocol_api/validation.py
+++ b/api/src/opentrons/protocol_api/validation.py
@@ -21,7 +21,7 @@
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols.api_support.util import APIVersionError
from opentrons.protocols.models import LabwareDefinition
-from opentrons.types import Mount, DeckSlotName, Location
+from opentrons.types import Mount, DeckSlotName, StagingSlotName, Location
from opentrons.hardware_control.modules.types import (
ModuleModel,
MagneticModuleModel,
@@ -32,6 +32,8 @@
ThermocyclerStep,
)
+from .disposal_locations import TrashBin, WasteChute
+
if TYPE_CHECKING:
from .labware import Well
@@ -39,6 +41,9 @@
# The first APIVersion where Python protocols can specify deck labels like "D1" instead of "1".
_COORDINATE_DECK_LABEL_VERSION_GATE = APIVersion(2, 15)
+# The first APIVersion where Python protocols can specify staging deck slots (e.g. "D4")
+_STAGING_DECK_SLOT_VERSION_GATE = APIVersion(2, 16)
+
# Mapping of public Python Protocol API pipette load names
# to names used by the internal Opentrons system
_PIPETTE_NAMES_MAP = {
@@ -78,7 +83,35 @@ class LabwareDefinitionIsNotLabwareError(ValueError):
"""An error raised when a labware is not loaded using `load_labware`."""
-def ensure_mount(mount: Union[str, Mount]) -> Mount:
+class InvalidTrashBinLocationError(ValueError):
+ """An error raised when attempting to load trash bins in invalid slots."""
+
+
+class InvalidFixtureLocationError(ValueError):
+ """An error raised when attempting to load a fixture in an invalid cutout."""
+
+
+def ensure_mount_for_pipette(
+ mount: Union[str, Mount, None], pipette: PipetteNameType
+) -> Mount:
+ """Ensure that an input value represents a valid mount, and is valid for the given pipette."""
+ if pipette == PipetteNameType.P1000_96:
+ # Always validate the raw mount input, even if the pipette is a 96-channel and we're not going
+ # to use the mount value.
+ if mount is not None:
+ _ensure_mount(mount)
+ # Internal layers treat the 96-channel as being on the left mount.
+ return Mount.LEFT
+ else:
+ if mount is None:
+ raise InvalidPipetteMountError(
+ f"You must specify a left or right mount to load {pipette.value}."
+ )
+ else:
+ return _ensure_mount(mount)
+
+
+def _ensure_mount(mount: Union[str, Mount]) -> Mount:
"""Ensure that an input value represents a valid Mount."""
if mount in [Mount.EXTENSION, "extension"]:
# This would cause existing protocols that might be iterating over mount types
@@ -125,9 +158,12 @@ def ensure_pipette_name(pipette_name: str) -> PipetteNameType:
) from None
+# TODO(jbl 11-17-2023) this function's original purpose was ensure a valid deck slot for a given robot type
+# With deck configuration, the shape of this should change to better represent it checking if a deck slot
+# (and maybe any addressable area) being valid for that deck configuration
def ensure_and_convert_deck_slot(
deck_slot: Union[int, str], api_version: APIVersion, robot_type: RobotType
-) -> DeckSlotName:
+) -> Union[DeckSlotName, StagingSlotName]:
"""Ensure that a primitive value matches a named deck slot.
Also, convert the deck slot to match the given `robot_type`.
@@ -149,25 +185,33 @@ def ensure_and_convert_deck_slot(
if not isinstance(deck_slot, (int, str)):
raise TypeError(f"Deck slot must be a string or integer, but got {deck_slot}")
- try:
- parsed_slot = DeckSlotName.from_primitive(deck_slot)
- except ValueError as e:
- raise ValueError(f"'{deck_slot}' is not a valid deck slot") from e
-
- is_ot2_style = parsed_slot.to_ot2_equivalent() == parsed_slot
- if not is_ot2_style and api_version < _COORDINATE_DECK_LABEL_VERSION_GATE:
- alternative = parsed_slot.to_ot2_equivalent().id
- raise APIVersionError(
- f'Specifying a deck slot like "{deck_slot}" requires apiLevel'
- f" {_COORDINATE_DECK_LABEL_VERSION_GATE}."
- f' Increase your protocol\'s apiLevel, or use slot "{alternative}" instead.'
- )
+ if str(deck_slot).upper() in {"A4", "B4", "C4", "D4"}:
+ if api_version < APIVersion(2, 16):
+ raise APIVersionError(
+ f"Using a staging deck slot requires apiLevel {_STAGING_DECK_SLOT_VERSION_GATE}."
+ )
+ # Don't need a try/except since we're already pre-validating this
+ parsed_staging_slot = StagingSlotName.from_primitive(str(deck_slot))
+ return parsed_staging_slot
+ else:
+ try:
+ parsed_slot = DeckSlotName.from_primitive(deck_slot)
+ except ValueError as e:
+ raise ValueError(f"'{deck_slot}' is not a valid deck slot") from e
+ is_ot2_style = parsed_slot.to_ot2_equivalent() == parsed_slot
+ if not is_ot2_style and api_version < _COORDINATE_DECK_LABEL_VERSION_GATE:
+ alternative = parsed_slot.to_ot2_equivalent().id
+ raise APIVersionError(
+ f'Specifying a deck slot like "{deck_slot}" requires apiLevel'
+ f" {_COORDINATE_DECK_LABEL_VERSION_GATE}."
+ f' Increase your protocol\'s apiLevel, or use slot "{alternative}" instead.'
+ )
- return parsed_slot.to_equivalent_for_robot_type(robot_type)
+ return parsed_slot.to_equivalent_for_robot_type(robot_type)
def internal_slot_to_public_string(
- slot_name: DeckSlotName, robot_type: RobotType
+ slot_name: Union[DeckSlotName, StagingSlotName], robot_type: RobotType
) -> str:
"""Convert an internal `DeckSlotName` to a user-facing Python Protocol API string.
@@ -175,7 +219,11 @@ def internal_slot_to_public_string(
Flexes. This probably won't change anything because the internal `DeckSlotName` should already
match the robot's native format, but it's nice to have an explicit interface barrier.
"""
- return slot_name.to_equivalent_for_robot_type(robot_type).id
+ if isinstance(slot_name, DeckSlotName):
+ return slot_name.to_equivalent_for_robot_type(robot_type).id
+ else:
+ # No need to convert staging slot names per robot type, since they only exist on Flex.
+ return slot_name.id
def ensure_lowercase_name(name: str) -> str:
@@ -246,6 +294,48 @@ def ensure_module_model(load_name: str) -> ModuleModel:
return model
+def ensure_and_convert_trash_bin_location(
+ deck_slot: Union[int, str], api_version: APIVersion, robot_type: RobotType
+) -> str:
+ """Ensure trash bin load location is valid.
+
+ Also, convert the deck slot to a valid trash bin addressable area.
+ """
+
+ if robot_type == "OT-2 Standard":
+ raise InvalidTrashBinLocationError("Cannot load trash on OT-2.")
+
+ # map trash bin location to addressable area
+ trash_bin_slots = [
+ DeckSlotName(slot) for slot in ["A1", "B1", "C1", "D1", "A3", "B3", "C3", "D3"]
+ ]
+ trash_bin_addressable_areas = [
+ "movableTrashA1",
+ "movableTrashB1",
+ "movableTrashC1",
+ "movableTrashD1",
+ "movableTrashA3",
+ "movableTrashB3",
+ "movableTrashC3",
+ "movableTrashD3",
+ ]
+ map_trash_bin_addressable_area = {
+ slot: addressable_area
+ for slot, addressable_area in zip(trash_bin_slots, trash_bin_addressable_areas)
+ }
+
+ slot_name_ot3 = ensure_and_convert_deck_slot(deck_slot, api_version, robot_type)
+ if not isinstance(slot_name_ot3, DeckSlotName):
+ raise ValueError("Staging areas not permitted for trash bin.")
+ if slot_name_ot3 not in trash_bin_slots:
+ raise InvalidTrashBinLocationError(
+ f"Invalid location for trash bin: {slot_name_ot3}.\n"
+ f"Valid slots: Any slot in column 1 or 3."
+ )
+
+ return map_trash_bin_addressable_area[slot_name_ot3]
+
+
def ensure_hold_time_seconds(
seconds: Optional[float], minutes: Optional[float]
) -> float:
@@ -342,8 +432,9 @@ class LocationTypeError(TypeError):
def validate_location(
- location: Union[Location, Well, None], last_location: Optional[Location]
-) -> Union[WellTarget, PointTarget]:
+ location: Union[Location, Well, TrashBin, WasteChute, None],
+ last_location: Optional[Location],
+) -> Union[WellTarget, PointTarget, TrashBin, WasteChute]:
"""Validate a given location for a liquid handling command.
Args:
@@ -365,11 +456,14 @@ def validate_location(
if target_location is None:
raise NoLocationError()
- if not isinstance(target_location, (Location, Well)):
+ if not isinstance(target_location, (Location, Well, TrashBin, WasteChute)):
raise LocationTypeError(
- f"location should be a Well or Location, but it is {location}"
+ f"location should be a Well, Location, TrashBin or WasteChute, but it is {location}"
)
+ if isinstance(target_location, (TrashBin, WasteChute)):
+ return target_location
+
in_place = target_location == last_location
if isinstance(target_location, Well):
diff --git a/api/src/opentrons/protocol_engine/__init__.py b/api/src/opentrons/protocol_engine/__init__.py
index 253e88dc33f..eb62ee7f33a 100644
--- a/api/src/opentrons/protocol_engine/__init__.py
+++ b/api/src/opentrons/protocol_engine/__init__.py
@@ -13,6 +13,7 @@
)
from .protocol_engine import ProtocolEngine
from .errors import ProtocolEngineError, ErrorOccurrence
+from .notes import CommandNote
from .commands import (
Command,
CommandParams,
@@ -30,11 +31,13 @@
LabwareOffsetVector,
LabwareOffsetLocation,
LabwareMovementStrategy,
+ AddressableOffsetVector,
DeckPoint,
DeckType,
DeckSlotLocation,
ModuleLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
OFF_DECK_LOCATION,
Dimensions,
EngineStatus,
@@ -52,7 +55,7 @@
ModuleModel,
ModuleDefinition,
Liquid,
- EmptyNozzleLayoutConfiguration,
+ AllNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
RowNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
@@ -77,6 +80,7 @@
"CommandStatus",
"CommandType",
"CommandIntent",
+ "CommandNote",
# state interfaces and models
"State",
"StateView",
@@ -88,11 +92,13 @@
"LabwareOffsetVector",
"LabwareOffsetLocation",
"LabwareMovementStrategy",
+ "AddressableOffsetVector",
"DeckSlotLocation",
"DeckPoint",
"DeckType",
"ModuleLocation",
"OnLabwareLocation",
+ "AddressableAreaLocation",
"OFF_DECK_LOCATION",
"Dimensions",
"EngineStatus",
@@ -110,7 +116,7 @@
"ModuleModel",
"ModuleDefinition",
"Liquid",
- "EmptyNozzleLayoutConfiguration",
+ "AllNozzleLayoutConfiguration",
"SingleNozzleLayoutConfiguration",
"RowNozzleLayoutConfiguration",
"ColumnNozzleLayoutConfiguration",
diff --git a/api/src/opentrons/protocol_engine/actions/__init__.py b/api/src/opentrons/protocol_engine/actions/__init__.py
index e72c8b7db25..ac3fc653976 100644
--- a/api/src/opentrons/protocol_engine/actions/__init__.py
+++ b/api/src/opentrons/protocol_engine/actions/__init__.py
@@ -11,14 +11,17 @@
PauseAction,
PauseSource,
StopAction,
+ ResumeFromRecoveryAction,
FinishAction,
HardwareStoppedAction,
QueueCommandAction,
- UpdateCommandAction,
+ RunCommandAction,
+ SucceedCommandAction,
FailCommandAction,
AddLabwareOffsetAction,
AddLabwareDefinitionAction,
AddLiquidAction,
+ AddAddressableAreaAction,
AddModuleAction,
FinishErrorDetails,
DoorChangeAction,
@@ -36,14 +39,17 @@
"PlayAction",
"PauseAction",
"StopAction",
+ "ResumeFromRecoveryAction",
"FinishAction",
"HardwareStoppedAction",
"QueueCommandAction",
- "UpdateCommandAction",
+ "RunCommandAction",
+ "SucceedCommandAction",
"FailCommandAction",
"AddLabwareOffsetAction",
"AddLabwareDefinitionAction",
"AddLiquidAction",
+ "AddAddressableAreaAction",
"AddModuleAction",
"DoorChangeAction",
"ResetTipsAction",
diff --git a/api/src/opentrons/protocol_engine/actions/actions.py b/api/src/opentrons/protocol_engine/actions/actions.py
index 64e4a5a1fad..adcf4f9e40b 100644
--- a/api/src/opentrons/protocol_engine/actions/actions.py
+++ b/api/src/opentrons/protocol_engine/actions/actions.py
@@ -6,7 +6,7 @@
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
-from typing import Optional, Union
+from typing import List, Optional, Union
from opentrons.protocols.models import LabwareDefinition
from opentrons.hardware_control.types import DoorState
@@ -15,7 +15,15 @@
from opentrons_shared_data.errors import EnumeratedError
from ..commands import Command, CommandCreate, CommandPrivateResult
-from ..types import LabwareOffsetCreate, ModuleDefinition, Liquid
+from ..error_recovery_policy import ErrorRecoveryType
+from ..notes.notes import CommandNote
+from ..types import (
+ LabwareOffsetCreate,
+ ModuleDefinition,
+ Liquid,
+ DeckConfigurationType,
+ AddressableAreaLocation,
+)
@dataclass(frozen=True)
@@ -23,6 +31,7 @@ class PlayAction:
"""Start or resume processing commands in the engine."""
requested_at: datetime
+ deck_configuration: Optional[DeckConfigurationType]
class PauseSource(str, Enum):
@@ -46,14 +55,18 @@ class PauseAction:
@dataclass(frozen=True)
class StopAction:
- """Stop the current engine execution.
-
- After a StopAction, the engine status will be marked as stopped.
- """
+ """Request engine execution to stop soon."""
from_estop: bool = False
+@dataclass(frozen=True)
+class ResumeFromRecoveryAction:
+ """See `ProtocolEngine.resume_from_recovery()`."""
+
+ pass
+
+
@dataclass(frozen=True)
class FinishErrorDetails:
"""Error details for the payload of a FinishAction or HardwareStoppedAction."""
@@ -103,13 +116,31 @@ class QueueCommandAction:
created_at: datetime
request: CommandCreate
request_hash: Optional[str]
+ failed_command_id: Optional[str] = None
@dataclass(frozen=True)
-class UpdateCommandAction:
- """Update a given command."""
+class RunCommandAction:
+ """Mark a given command as running.
+
+ At the time of dispatching this action, the command must be queued,
+ and no other command may be running.
+ """
+
+ command_id: str
+ started_at: datetime
+
+
+@dataclass(frozen=True)
+class SucceedCommandAction:
+ """Mark a given command as succeeded.
+
+ At the time of dispatching this action, the command must be running.
+ """
command: Command
+ """The command in its new succeeded state."""
+
private_result: CommandPrivateResult
@@ -117,16 +148,36 @@ class UpdateCommandAction:
class FailCommandAction:
"""Mark a given command as failed.
- The given command and all currently queued commands will be marked
- as failed due to the given error.
+ At the time of dispatching this action, the command must be running.
"""
- # TODO(mc, 2021-11-12): we'll likely need to add the command params
- # to this payload for state reaction purposes
command_id: str
+ """The command to fail."""
+
error_id: str
+ """An ID to assign to the command's error.
+
+ Must be unique to this occurrence of the error.
+ """
+
failed_at: datetime
+ """When the command failed."""
+
error: EnumeratedError
+ """The underlying exception that caused this command to fail."""
+
+ notes: List[CommandNote]
+ """Overwrite the command's `.notes` with these."""
+
+ type: ErrorRecoveryType
+ """How this error should be handled in the context of the overall run."""
+
+ # This is a quick hack so FailCommandAction handlers can get the params of the
+ # command that failed. We probably want this to be a new "failure details"
+ # object instead, similar to how succeeded commands can send a "private result"
+ # to Protocol Engine internals.
+ running_command: Command
+ """The command to fail, in its prior `running` state."""
@dataclass(frozen=True)
@@ -152,6 +203,18 @@ class AddLiquidAction:
liquid: Liquid
+@dataclass(frozen=True)
+class AddAddressableAreaAction:
+ """Add a single addressable area to state.
+
+ This differs from the deck configuration in PlayAction which sends over a mapping of cutout fixtures.
+ This action will only load one addressable area and that should be pre-validated before being sent via
+ the action.
+ """
+
+ addressable_area: AddressableAreaLocation
+
+
@dataclass(frozen=True)
class AddModuleAction:
"""Add an attached module directly to state without a location."""
@@ -184,15 +247,18 @@ class SetPipetteMovementSpeedAction:
PlayAction,
PauseAction,
StopAction,
+ ResumeFromRecoveryAction,
FinishAction,
HardwareStoppedAction,
DoorChangeAction,
QueueCommandAction,
- UpdateCommandAction,
+ RunCommandAction,
+ SucceedCommandAction,
FailCommandAction,
AddLabwareOffsetAction,
AddLabwareDefinitionAction,
AddModuleAction,
+ AddAddressableAreaAction,
AddLiquidAction,
ResetTipsAction,
SetPipetteMovementSpeedAction,
diff --git a/api/src/opentrons/protocol_engine/clients/sync_client.py b/api/src/opentrons/protocol_engine/clients/sync_client.py
index 1982ad66fa1..35ea6b9c14d 100644
--- a/api/src/opentrons/protocol_engine/clients/sync_client.py
+++ b/api/src/opentrons/protocol_engine/clients/sync_client.py
@@ -6,7 +6,9 @@
from opentrons_shared_data.labware.dev_types import LabwareUri
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
-from opentrons.commands.protocol_commands import comment as make_legacy_comment_command
+from opentrons.legacy_commands.protocol_commands import (
+ comment as make_legacy_comment_command,
+)
from opentrons.types import MountType
from opentrons.hardware_control.modules.types import ThermocyclerStep
@@ -24,6 +26,8 @@
MotorAxis,
Liquid,
NozzleLayoutConfigurationType,
+ AddressableOffsetVector,
+ LabwareOffsetCreate,
)
from .transports import ChildThreadTransport
@@ -70,6 +74,12 @@ def add_labware_definition(self, definition: LabwareDefinition) -> LabwareUri:
definition=definition,
)
+ def add_addressable_area(self, addressable_area_name: str) -> None:
+ """Add an addressable area to the engine's state."""
+ self._transport.call_method(
+ "add_addressable_area", addressable_area_name=addressable_area_name
+ )
+
def add_liquid(
self, name: str, color: Optional[str], description: Optional[str]
) -> Liquid:
@@ -83,6 +93,10 @@ def reset_tips(self, labware_id: str) -> None:
labware_id=labware_id,
)
+ def add_labware_offset(self, request: LabwareOffsetCreate) -> None:
+ """Add a labware offset."""
+ self._transport.call_method("add_labware_offset", request=request)
+
def set_pipette_movement_speed(
self, pipette_id: str, speed: Optional[float]
) -> None:
@@ -118,6 +132,19 @@ def load_labware(
return cast(commands.LoadLabwareResult, result)
+ def reload_labware(
+ self,
+ labware_id: str,
+ ) -> commands.ReloadLabwareResult:
+ """Execute a ReloadLabware command and return the result."""
+ request = commands.ReloadLabwareCreate(
+ params=commands.ReloadLabwareParams(
+ labwareId=labware_id,
+ )
+ )
+ result = self._transport.execute_command(request=request)
+ return cast(commands.ReloadLabwareResult, result)
+
# TODO (spp, 2022-12-14): https://opentrons.atlassian.net/browse/RLAB-237
def move_labware(
self,
@@ -145,10 +172,15 @@ def load_pipette(
self,
pipette_name: PipetteNameType,
mount: MountType,
+ tip_overlap_version: Optional[str] = None,
) -> commands.LoadPipetteResult:
"""Execute a LoadPipette command and return the result."""
request = commands.LoadPipetteCreate(
- params=commands.LoadPipetteParams(mount=mount, pipetteName=pipette_name)
+ params=commands.LoadPipetteParams(
+ mount=mount,
+ pipetteName=pipette_name,
+ tipOverlapNotAfterVersion=tip_overlap_version,
+ )
)
result = self._transport.execute_command(request=request)
@@ -180,6 +212,58 @@ def move_to_well(
return cast(commands.MoveToWellResult, result)
+ def move_to_addressable_area(
+ self,
+ pipette_id: str,
+ addressable_area_name: str,
+ offset: AddressableOffsetVector,
+ minimum_z_height: Optional[float],
+ force_direct: bool,
+ speed: Optional[float],
+ ) -> commands.MoveToAddressableAreaResult:
+ """Execute a MoveToAddressableArea command and return the result."""
+ request = commands.MoveToAddressableAreaCreate(
+ params=commands.MoveToAddressableAreaParams(
+ pipetteId=pipette_id,
+ addressableAreaName=addressable_area_name,
+ offset=offset,
+ forceDirect=force_direct,
+ minimumZHeight=minimum_z_height,
+ speed=speed,
+ )
+ )
+ result = self._transport.execute_command(request=request)
+
+ return cast(commands.MoveToAddressableAreaResult, result)
+
+ def move_to_addressable_area_for_drop_tip(
+ self,
+ pipette_id: str,
+ addressable_area_name: str,
+ offset: AddressableOffsetVector,
+ minimum_z_height: Optional[float],
+ force_direct: bool,
+ speed: Optional[float],
+ alternate_drop_location: Optional[bool],
+ ignore_tip_configuration: Optional[bool] = True,
+ ) -> commands.MoveToAddressableAreaForDropTipResult:
+ """Execute a MoveToAddressableArea command and return the result."""
+ request = commands.MoveToAddressableAreaForDropTipCreate(
+ params=commands.MoveToAddressableAreaForDropTipParams(
+ pipetteId=pipette_id,
+ addressableAreaName=addressable_area_name,
+ offset=offset,
+ forceDirect=force_direct,
+ minimumZHeight=minimum_z_height,
+ speed=speed,
+ alternateDropLocation=alternate_drop_location,
+ ignoreTipConfiguration=ignore_tip_configuration,
+ )
+ )
+ result = self._transport.execute_command(request=request)
+
+ return cast(commands.MoveToAddressableAreaForDropTipResult, result)
+
def move_to_coordinates(
self,
pipette_id: str,
@@ -235,6 +319,29 @@ def pick_up_tip(
return cast(commands.PickUpTipResult, result)
+ def pick_up_tip_wait_for_recovery(
+ self,
+ pipette_id: str,
+ labware_id: str,
+ well_name: str,
+ well_location: WellLocation,
+ ) -> commands.PickUpTip:
+ """Execute a PickUpTip, wait for any error recovery, and return it.
+
+ Note that the returned command will not necessarily have a `result`.
+ """
+ request = commands.PickUpTipCreate(
+ params=commands.PickUpTipParams(
+ pipetteId=pipette_id,
+ labwareId=labware_id,
+ wellName=well_name,
+ wellLocation=well_location,
+ )
+ )
+ command = self._transport.execute_command_wait_for_recovery(request=request)
+
+ return cast(commands.PickUpTip, command)
+
def drop_tip(
self,
pipette_id: str,
@@ -274,12 +381,14 @@ def drop_tip_in_place(
return cast(commands.DropTipInPlaceResult, result)
def configure_for_volume(
- self, pipette_id: str, volume: float
+ self, pipette_id: str, volume: float, tip_overlap_version: Optional[str] = None
) -> commands.ConfigureForVolumeResult:
"""Execute a ConfigureForVolume command."""
request = commands.ConfigureForVolumeCreate(
params=commands.ConfigureForVolumeParams(
- pipetteId=pipette_id, volume=volume
+ pipetteId=pipette_id,
+ volume=volume,
+ tipOverlapNotAfterVersion=tip_overlap_version,
)
)
result = self._transport.execute_command(request=request)
@@ -301,7 +410,7 @@ def configure_nozzle_layout(
"""Execute a ConfigureForVolume command."""
request = commands.ConfigureNozzleLayoutCreate(
params=commands.ConfigureNozzleLayoutParams(
- pipetteId=pipette_id, configuration_params=configuration_params
+ pipetteId=pipette_id, configurationParams=configuration_params
)
)
result = self._transport.execute_command(request=request)
diff --git a/api/src/opentrons/protocol_engine/clients/transports.py b/api/src/opentrons/protocol_engine/clients/transports.py
index 270599ff469..6de08db97ed 100644
--- a/api/src/opentrons/protocol_engine/clients/transports.py
+++ b/api/src/opentrons/protocol_engine/clients/transports.py
@@ -1,15 +1,28 @@
"""A helper for controlling a `ProtocolEngine` without async/await."""
from asyncio import AbstractEventLoop, run_coroutine_threadsafe
-from typing import Any, overload
+from typing import Any, Final, overload
from typing_extensions import Literal
from opentrons_shared_data.labware.dev_types import LabwareUri
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
+
from ..protocol_engine import ProtocolEngine
from ..errors import ProtocolCommandFailedError
+from ..error_recovery_policy import ErrorRecoveryType
from ..state import StateView
-from ..commands import CommandCreate, CommandResult
+from ..commands import Command, CommandCreate, CommandResult, CommandStatus
+
+
+class RunStoppedBeforeCommandError(RuntimeError):
+ """Raised if the ProtocolEngine was stopped before a command could start."""
+
+ def __init__(self, command: Command) -> None:
+ self._command = command
+ super().__init__(
+ f"The run was stopped"
+ f" before {command.commandType} command {command.id} could execute."
+ )
class ChildThreadTransport:
@@ -30,8 +43,10 @@ def __init__(self, engine: ProtocolEngine, loop: AbstractEventLoop) -> None:
want to synchronously access it.
loop: The event loop that `engine` is running in (in the other thread).
"""
- self._engine = engine
- self._loop = loop
+ # We might access these from different threads,
+ # so let's make them Final for (shallow) immutability.
+ self._engine: Final = engine
+ self._loop: Final = loop
@property
def state(self) -> StateView:
@@ -39,7 +54,11 @@ def state(self) -> StateView:
return self._engine.state_view
def execute_command(self, request: CommandCreate) -> CommandResult:
- """Execute a ProtocolEngine command, blocking until the command completes.
+ """Execute a ProtocolEngine command.
+
+ This blocks until the command completes. If the command fails, this will always
+ raise the failure as an exception--even if ProtocolEngine deemed the failure
+ recoverable.
Args:
request: The ProtocolEngine command request
@@ -48,8 +67,11 @@ def execute_command(self, request: CommandCreate) -> CommandResult:
The command's result data.
Raises:
- ProtocolEngineError: if the command execution is not successful,
- the specific error that cause the command to fail is raised.
+ ProtocolEngineError: If the command execution was not successful,
+ the specific error that caused the command to fail is raised.
+
+ If the run was stopped before the command could complete, that's
+ also signaled as this exception.
"""
command = run_coroutine_threadsafe(
self._engine.add_and_execute_command(request=request),
@@ -64,21 +86,76 @@ def execute_command(self, request: CommandCreate) -> CommandResult:
message=f"{error.errorType}: {error.detail}",
)
- # FIXME(mm, 2023-04-10): This assert can easily trigger from this sequence:
- #
- # 1. The engine is paused.
- # 2. The user's Python script calls this method to start a new command,
- # which remains `queued` because of the pause.
- # 3. The engine is stopped.
- #
- # The returned command will be `queued`, so it won't have a result.
- #
- # We need to figure out a proper way to report this condition to callers
- # so they correctly interpret it as an intentional stop, not an internal error.
- assert command.result is not None, f"Expected Command {command} to have result"
+ if command.result is None:
+ # This can happen with a certain pause timing:
+ #
+ # 1. The engine is paused.
+ # 2. The user's Python script calls this method to start a new command,
+ # which remains `queued` because of the pause.
+ # 3. The engine is stopped. The returned command will be `queued`
+ # and won't have a result.
+ raise RunStoppedBeforeCommandError(command)
return command.result
+ def execute_command_wait_for_recovery(self, request: CommandCreate) -> Command:
+ """Execute a ProtocolEngine command, including error recovery.
+
+ This blocks until the command completes. Additionally, if the command fails,
+ this will continue to block until its error recovery has been completed.
+
+ Args:
+ request: The ProtocolEngine command request.
+
+ Returns:
+ The command. If error recovery happened for it, the command will be
+ reported here as failed.
+
+ Raises:
+ ProtocolEngineError: If the command failed, *and* the failure was not
+ recovered from.
+
+ If the run was stopped before the command could complete, that's
+ also signalled as this exception.
+ """
+
+ async def run_in_pe_thread() -> Command:
+ command = await self._engine.add_and_execute_command_wait_for_recovery(
+ request=request
+ )
+
+ if command.error is not None:
+ error_was_recovered_from = (
+ self._engine.state_view.commands.get_error_recovery_type(command.id)
+ == ErrorRecoveryType.WAIT_FOR_RECOVERY
+ )
+ if not error_was_recovered_from:
+ error = command.error
+ # TODO: this needs to have an actual code
+ raise ProtocolCommandFailedError(
+ original_error=error,
+ message=f"{error.errorType}: {error.detail}",
+ )
+
+ elif command.status == CommandStatus.QUEUED:
+ # This can happen with a certain pause timing:
+ #
+ # 1. The engine is paused.
+ # 2. The user's Python script calls this method to start a new command,
+ # which remains `queued` because of the pause.
+ # 3. The engine is stopped. The returned command will be `queued`,
+ # and won't have a result.
+ raise RunStoppedBeforeCommandError(command)
+
+ return command
+
+ command = run_coroutine_threadsafe(
+ run_in_pe_thread(),
+ loop=self._loop,
+ ).result()
+
+ return command
+
@overload
def call_method(
self,
diff --git a/api/src/opentrons/protocol_engine/commands/__init__.py b/api/src/opentrons/protocol_engine/commands/__init__.py
index 60c5e8350ea..123425e464f 100644
--- a/api/src/opentrons/protocol_engine/commands/__init__.py
+++ b/api/src/opentrons/protocol_engine/commands/__init__.py
@@ -19,7 +19,7 @@
from . import thermocycler
from . import calibration
-from .hash_command_params import hash_command_params
+from .hash_command_params import hash_protocol_command_params
from .generate_command_schema import generate_command_schema
from .command import (
@@ -37,6 +37,7 @@
CommandResult,
CommandType,
CommandPrivateResult,
+ CommandT,
)
from .aspirate import (
@@ -119,6 +120,14 @@
LoadLabwareCommandType,
)
+from .reload_labware import (
+ ReloadLabware,
+ ReloadLabwareParams,
+ ReloadLabwareCreate,
+ ReloadLabwareResult,
+ ReloadLabwareCommandType,
+)
+
from .load_liquid import (
LoadLiquid,
LoadLiquidParams,
@@ -177,6 +186,22 @@
MoveToWellCommandType,
)
+from .move_to_addressable_area import (
+ MoveToAddressableArea,
+ MoveToAddressableAreaParams,
+ MoveToAddressableAreaCreate,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaCommandType,
+)
+
+from .move_to_addressable_area_for_drop_tip import (
+ MoveToAddressableAreaForDropTip,
+ MoveToAddressableAreaForDropTipParams,
+ MoveToAddressableAreaForDropTipCreate,
+ MoveToAddressableAreaForDropTipResult,
+ MoveToAddressableAreaForDropTipCommandType,
+)
+
from .wait_for_resume import (
WaitForResume,
WaitForResumeParams,
@@ -279,9 +304,26 @@
ConfigureNozzleLayoutCreate,
ConfigureNozzleLayoutParams,
ConfigureNozzleLayoutResult,
+ ConfigureNozzleLayoutPrivateResult,
ConfigureNozzleLayoutCommandType,
)
+from .get_tip_presence import (
+ GetTipPresence,
+ GetTipPresenceCreate,
+ GetTipPresenceParams,
+ GetTipPresenceResult,
+ GetTipPresenceCommandType,
+)
+
+from .verify_tip_presence import (
+ VerifyTipPresence,
+ VerifyTipPresenceCreate,
+ VerifyTipPresenceParams,
+ VerifyTipPresenceResult,
+ VerifyTipPresenceCommandType,
+)
+
__all__ = [
# command type unions
"Command",
@@ -290,6 +332,7 @@
"CommandResult",
"CommandType",
"CommandPrivateResult",
+ "CommandT",
# base interfaces
"AbstractCommandImpl",
"AbstractCommandWithPrivateResultImpl",
@@ -298,7 +341,7 @@
"CommandStatus",
"CommandIntent",
# command parameter hashing
- "hash_command_params",
+ "hash_protocol_command_params",
# command schema generation
"generate_command_schema",
# aspirate command models
@@ -367,6 +410,12 @@
"LoadLabwareParams",
"LoadLabwareResult",
"LoadLabwareCommandType",
+ # reload labware command models
+ "ReloadLabware",
+ "ReloadLabwareCreate",
+ "ReloadLabwareParams",
+ "ReloadLabwareResult",
+ "ReloadLabwareCommandType",
# load module command models
"LoadModule",
"LoadModuleCreate",
@@ -404,6 +453,18 @@
"MoveToWellParams",
"MoveToWellResult",
"MoveToWellCommandType",
+ # move to addressable area command models
+ "MoveToAddressableArea",
+ "MoveToAddressableAreaParams",
+ "MoveToAddressableAreaCreate",
+ "MoveToAddressableAreaResult",
+ "MoveToAddressableAreaCommandType",
+ # move to addressable area for drop tip command models
+ "MoveToAddressableAreaForDropTip",
+ "MoveToAddressableAreaForDropTipParams",
+ "MoveToAddressableAreaForDropTipCreate",
+ "MoveToAddressableAreaForDropTipResult",
+ "MoveToAddressableAreaForDropTipCommandType",
# wait for resume command models
"WaitForResume",
"WaitForResumeParams",
@@ -491,4 +552,17 @@
"ConfigureNozzleLayoutParams",
"ConfigureNozzleLayoutResult",
"ConfigureNozzleLayoutCommandType",
+ "ConfigureNozzleLayoutPrivateResult",
+ # get pipette tip presence bundle
+ "GetTipPresence",
+ "GetTipPresenceCreate",
+ "GetTipPresenceParams",
+ "GetTipPresenceResult",
+ "GetTipPresenceCommandType",
+ # verify pipette tip presence bundle
+ "VerifyTipPresence",
+ "VerifyTipPresenceCreate",
+ "VerifyTipPresenceParams",
+ "VerifyTipPresenceResult",
+ "VerifyTipPresenceCommandType",
]
diff --git a/api/src/opentrons/protocol_engine/commands/aspirate.py b/api/src/opentrons/protocol_engine/commands/aspirate.py
index d80fc12b4c8..4dcb81dcc33 100644
--- a/api/src/opentrons/protocol_engine/commands/aspirate.py
+++ b/api/src/opentrons/protocol_engine/commands/aspirate.py
@@ -5,7 +5,7 @@
from .pipetting_common import (
PipetteIdMixin,
- VolumeMixin,
+ AspirateVolumeMixin,
FlowRateMixin,
WellLocationMixin,
BaseLiquidHandlingResult,
@@ -20,12 +20,15 @@
if TYPE_CHECKING:
from ..execution import MovementHandler, PipettingHandler
from ..state import StateView
+ from ..notes import CommandNoteAdder
AspirateCommandType = Literal["aspirate"]
-class AspirateParams(PipetteIdMixin, VolumeMixin, FlowRateMixin, WellLocationMixin):
+class AspirateParams(
+ PipetteIdMixin, AspirateVolumeMixin, FlowRateMixin, WellLocationMixin
+):
"""Parameters required to aspirate from a specific well."""
pass
@@ -46,12 +49,14 @@ def __init__(
state_view: StateView,
hardware_api: HardwareControlAPI,
movement: MovementHandler,
+ command_note_adder: CommandNoteAdder,
**kwargs: object,
) -> None:
self._pipetting = pipetting
self._state_view = state_view
self._hardware_api = hardware_api
self._movement = movement
+ self._command_note_adder = command_note_adder
async def execute(self, params: AspirateParams) -> AspirateResult:
"""Move to and aspirate from the requested well.
@@ -96,7 +101,10 @@ async def execute(self, params: AspirateParams) -> AspirateResult:
)
volume = await self._pipetting.aspirate_in_place(
- pipette_id=pipette_id, volume=params.volume, flow_rate=params.flowRate
+ pipette_id=pipette_id,
+ volume=params.volume,
+ flow_rate=params.flowRate,
+ command_note_adder=self._command_note_adder,
)
return AspirateResult(
diff --git a/api/src/opentrons/protocol_engine/commands/aspirate_in_place.py b/api/src/opentrons/protocol_engine/commands/aspirate_in_place.py
index 3c8632c1d87..f59bccdd9f7 100644
--- a/api/src/opentrons/protocol_engine/commands/aspirate_in_place.py
+++ b/api/src/opentrons/protocol_engine/commands/aspirate_in_place.py
@@ -8,7 +8,7 @@
from .pipetting_common import (
PipetteIdMixin,
- VolumeMixin,
+ AspirateVolumeMixin,
FlowRateMixin,
BaseLiquidHandlingResult,
)
@@ -18,12 +18,12 @@
if TYPE_CHECKING:
from ..execution import PipettingHandler
from ..state import StateView
-
+ from ..notes import CommandNoteAdder
AspirateInPlaceCommandType = Literal["aspirateInPlace"]
-class AspirateInPlaceParams(PipetteIdMixin, VolumeMixin, FlowRateMixin):
+class AspirateInPlaceParams(PipetteIdMixin, AspirateVolumeMixin, FlowRateMixin):
"""Payload required to aspirate in place."""
pass
@@ -45,11 +45,13 @@ def __init__(
pipetting: PipettingHandler,
hardware_api: HardwareControlAPI,
state_view: StateView,
+ command_note_adder: CommandNoteAdder,
**kwargs: object,
) -> None:
self._pipetting = pipetting
self._state_view = state_view
self._hardware_api = hardware_api
+ self._command_note_adder = command_note_adder
async def execute(self, params: AspirateInPlaceParams) -> AspirateInPlaceResult:
"""Aspirate without moving the pipette.
@@ -69,7 +71,10 @@ async def execute(self, params: AspirateInPlaceParams) -> AspirateInPlaceResult:
" so the plunger can be reset in a known safe position."
)
volume = await self._pipetting.aspirate_in_place(
- pipette_id=params.pipetteId, volume=params.volume, flow_rate=params.flowRate
+ pipette_id=params.pipetteId,
+ volume=params.volume,
+ flow_rate=params.flowRate,
+ command_note_adder=self._command_note_adder,
)
return AspirateInPlaceResult(volume=volume)
diff --git a/api/src/opentrons/protocol_engine/commands/calibration/move_to_maintenance_position.py b/api/src/opentrons/protocol_engine/commands/calibration/move_to_maintenance_position.py
index 464b177e980..8ce067963ab 100644
--- a/api/src/opentrons/protocol_engine/commands/calibration/move_to_maintenance_position.py
+++ b/api/src/opentrons/protocol_engine/commands/calibration/move_to_maintenance_position.py
@@ -80,42 +80,25 @@ async def execute(
ot3_api = ensure_ot3_hardware(
self._hardware_api,
)
- current_position_mount = await ot3_api.gantry_position(
+ max_height_z_tip = ot3_api.get_instrument_max_height(Mount.LEFT)
+ # disengage the gripper z mount if present and enabled
+ await ot3_api.prepare_for_mount_movement(Mount.LEFT)
+
+ await ot3_api.retract(Mount.LEFT)
+ current_pos = await ot3_api.gantry_position(
Mount.LEFT, critical_point=CriticalPoint.MOUNT
)
- max_height_z_mount = ot3_api.get_instrument_max_height(
- Mount.LEFT, critical_point=CriticalPoint.MOUNT
+ await ot3_api.move_to(
+ mount=Mount.LEFT,
+ abs_position=Point(x=_ATTACH_POINT.x, y=_ATTACH_POINT.y, z=current_pos.z),
+ critical_point=CriticalPoint.MOUNT,
)
- max_height_z_tip = ot3_api.get_instrument_max_height(Mount.LEFT)
- # avoid using motion planning waypoints because we do not need to move the z at this moment
- movement_points = [
- # move the z to the highest position
- Point(
- x=current_position_mount.x,
- y=current_position_mount.y,
- z=max_height_z_mount,
- ),
- # move in x,y without going down the z
- Point(x=_ATTACH_POINT.x, y=_ATTACH_POINT.y, z=max_height_z_mount),
- ]
-
- for movement in movement_points:
- await ot3_api.move_to(
- mount=Mount.LEFT,
- abs_position=movement,
- critical_point=CriticalPoint.MOUNT,
- )
if params.mount != MountType.EXTENSION:
-
- # disengage the gripper z to enable the e-brake, this prevents the gripper
- # z from dropping when the right mount carriage gets released from the
- # mount during 96-channel detach flow
- if ot3_api.has_gripper():
- await ot3_api.disengage_axes([Axis.Z_G])
-
if params.maintenancePosition == MaintenancePosition.ATTACH_INSTRUMENT:
- mount_to_axis = Axis.by_mount(params.mount.to_hw_mount())
+ mount = params.mount.to_hw_mount()
+ mount_to_axis = Axis.by_mount(mount)
+ await ot3_api.prepare_for_mount_movement(mount)
await ot3_api.move_axes(
{
mount_to_axis: _INSTRUMENT_ATTACH_Z_POINT,
@@ -130,6 +113,7 @@ async def execute(
Axis.Z_R: max_motion_range + _RIGHT_MOUNT_Z_MARGIN,
}
)
+ await ot3_api.disengage_axes([Axis.Z_L, Axis.Z_R])
return MoveToMaintenancePositionResult()
diff --git a/api/src/opentrons/protocol_engine/commands/command.py b/api/src/opentrons/protocol_engine/commands/command.py
index f8f48bba67c..fcdd7387355 100644
--- a/api/src/opentrons/protocol_engine/commands/command.py
+++ b/api/src/opentrons/protocol_engine/commands/command.py
@@ -6,7 +6,16 @@
from abc import ABC, abstractmethod
from datetime import datetime
from enum import Enum
-from typing import TYPE_CHECKING, Generic, Optional, TypeVar, Tuple
+from typing import (
+ TYPE_CHECKING,
+ Generic,
+ Optional,
+ TypeVar,
+ Tuple,
+ List,
+ Type,
+ Union,
+)
from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
@@ -14,6 +23,7 @@
from opentrons.hardware_control import HardwareControlAPI
from ..errors import ErrorOccurrence
+from ..notes import CommandNote, CommandNoteAdder
# Work around type-only circular dependencies.
if TYPE_CHECKING:
@@ -21,11 +31,11 @@
from ..state import StateView
-CommandParamsT = TypeVar("CommandParamsT", bound=BaseModel)
-
-CommandResultT = TypeVar("CommandResultT", bound=BaseModel)
-
-CommandPrivateResultT = TypeVar("CommandPrivateResultT")
+_ParamsT = TypeVar("_ParamsT", bound=BaseModel)
+_ParamsT_contra = TypeVar("_ParamsT_contra", bound=BaseModel, contravariant=True)
+_ResultT = TypeVar("_ResultT", bound=BaseModel)
+_ResultT_co = TypeVar("_ResultT_co", bound=BaseModel, covariant=True)
+_PrivateResultT_co = TypeVar("_PrivateResultT_co", covariant=True)
class CommandStatus(str, Enum):
@@ -47,9 +57,10 @@ class CommandIntent(str, Enum):
PROTOCOL = "protocol"
SETUP = "setup"
+ FIXIT = "fixit"
-class BaseCommandCreate(GenericModel, Generic[CommandParamsT]):
+class BaseCommandCreate(GenericModel, Generic[_ParamsT]):
"""Base class for command creation requests.
You shouldn't use this class directly; instead, use or define
@@ -63,7 +74,7 @@ class BaseCommandCreate(GenericModel, Generic[CommandParamsT]):
"execution behavior"
),
)
- params: CommandParamsT = Field(..., description="Command execution data payload")
+ params: _ParamsT = Field(..., description="Command execution data payload")
intent: Optional[CommandIntent] = Field(
None,
description=(
@@ -88,7 +99,7 @@ class BaseCommandCreate(GenericModel, Generic[CommandParamsT]):
)
-class BaseCommand(GenericModel, Generic[CommandParamsT, CommandResultT]):
+class BaseCommand(GenericModel, Generic[_ParamsT, _ResultT]):
"""Base command model.
You shouldn't use this class directly; instead, use or define
@@ -118,8 +129,8 @@ class BaseCommand(GenericModel, Generic[CommandParamsT, CommandResultT]):
),
)
status: CommandStatus = Field(..., description="Command execution status")
- params: CommandParamsT = Field(..., description="Command execution data payload")
- result: Optional[CommandResultT] = Field(
+ params: _ParamsT = Field(..., description="Command execution data payload")
+ result: Optional[_ResultT] = Field(
None,
description="Command execution result data, if succeeded",
)
@@ -144,11 +155,29 @@ class BaseCommand(GenericModel, Generic[CommandParamsT, CommandResultT]):
" a command that is part of a calibration procedure."
),
)
+ notes: Optional[List[CommandNote]] = Field(
+ None,
+ description=(
+ "Information not critical to the execution of the command derived from either"
+ " the command's execution or the command's generation."
+ ),
+ )
+ failedCommandId: Optional[str] = Field(
+ None,
+ description=(
+ "FIXIT command use only. Reference of the failed command id we are trying to fix."
+ ),
+ )
+
+ _ImplementationCls: Union[
+ Type[AbstractCommandImpl[_ParamsT, _ResultT]],
+ Type[AbstractCommandWithPrivateResultImpl[_ParamsT, _ResultT, object]],
+ ]
class AbstractCommandImpl(
ABC,
- Generic[CommandParamsT, CommandResultT],
+ Generic[_ParamsT_contra, _ResultT_co],
):
"""Abstract command creation and execution implementation.
@@ -176,19 +205,20 @@ def __init__(
run_control: execution.RunControlHandler,
rail_lights: execution.RailLightsHandler,
status_bar: execution.StatusBarHandler,
+ command_note_adder: CommandNoteAdder,
) -> None:
"""Initialize the command implementation with execution handlers."""
pass
@abstractmethod
- async def execute(self, params: CommandParamsT) -> CommandResultT:
+ async def execute(self, params: _ParamsT_contra) -> _ResultT_co:
"""Execute the command, mapping data from execution into a response model."""
...
class AbstractCommandWithPrivateResultImpl(
ABC,
- Generic[CommandParamsT, CommandResultT, CommandPrivateResultT],
+ Generic[_ParamsT_contra, _ResultT_co, _PrivateResultT_co],
):
"""Abstract command creation and execution implementation if the command has private results.
@@ -217,13 +247,14 @@ def __init__(
run_control: execution.RunControlHandler,
rail_lights: execution.RailLightsHandler,
status_bar: execution.StatusBarHandler,
+ command_note_adder: CommandNoteAdder,
) -> None:
"""Initialize the command implementation with execution handlers."""
pass
@abstractmethod
async def execute(
- self, params: CommandParamsT
- ) -> Tuple[CommandResultT, CommandPrivateResultT]:
+ self, params: _ParamsT_contra
+ ) -> Tuple[_ResultT_co, _PrivateResultT_co]:
"""Execute the command, mapping data from execution into a response model."""
...
diff --git a/api/src/opentrons/protocol_engine/commands/command_unions.py b/api/src/opentrons/protocol_engine/commands/command_unions.py
index 4387a9178ec..7674508cc96 100644
--- a/api/src/opentrons/protocol_engine/commands/command_unions.py
+++ b/api/src/opentrons/protocol_engine/commands/command_unions.py
@@ -1,6 +1,9 @@
"""Union types of concrete command definitions."""
-from typing import Union
+from typing import Union, TypeVar
+from typing_extensions import Annotated
+
+from pydantic import Field
from . import heater_shaker
from . import magnetic_module
@@ -97,6 +100,14 @@
LoadLabwareCommandType,
)
+from .reload_labware import (
+ ReloadLabware,
+ ReloadLabwareParams,
+ ReloadLabwareCreate,
+ ReloadLabwareResult,
+ ReloadLabwareCommandType,
+)
+
from .load_liquid import (
LoadLiquid,
LoadLiquidParams,
@@ -154,6 +165,22 @@
MoveToWellCommandType,
)
+from .move_to_addressable_area import (
+ MoveToAddressableArea,
+ MoveToAddressableAreaParams,
+ MoveToAddressableAreaCreate,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaCommandType,
+)
+
+from .move_to_addressable_area_for_drop_tip import (
+ MoveToAddressableAreaForDropTip,
+ MoveToAddressableAreaForDropTipParams,
+ MoveToAddressableAreaForDropTipCreate,
+ MoveToAddressableAreaForDropTipResult,
+ MoveToAddressableAreaForDropTipCommandType,
+)
+
from .wait_for_resume import (
WaitForResume,
WaitForResumeParams,
@@ -252,62 +279,86 @@
ConfigureNozzleLayoutPrivateResult,
)
-Command = Union[
- Aspirate,
- AspirateInPlace,
- Comment,
- Custom,
- Dispense,
- DispenseInPlace,
- BlowOut,
- BlowOutInPlace,
- ConfigureForVolume,
- ConfigureNozzleLayout,
- DropTip,
- DropTipInPlace,
- Home,
- RetractAxis,
- LoadLabware,
- LoadLiquid,
- LoadModule,
- LoadPipette,
- MoveLabware,
- MoveRelative,
- MoveToCoordinates,
- MoveToWell,
- PrepareToAspirate,
- WaitForResume,
- WaitForDuration,
- PickUpTip,
- SavePosition,
- SetRailLights,
- TouchTip,
- SetStatusBar,
- heater_shaker.WaitForTemperature,
- heater_shaker.SetTargetTemperature,
- heater_shaker.DeactivateHeater,
- heater_shaker.SetAndWaitForShakeSpeed,
- heater_shaker.DeactivateShaker,
- heater_shaker.OpenLabwareLatch,
- heater_shaker.CloseLabwareLatch,
- magnetic_module.Disengage,
- magnetic_module.Engage,
- temperature_module.SetTargetTemperature,
- temperature_module.WaitForTemperature,
- temperature_module.DeactivateTemperature,
- thermocycler.SetTargetBlockTemperature,
- thermocycler.WaitForBlockTemperature,
- thermocycler.SetTargetLidTemperature,
- thermocycler.WaitForLidTemperature,
- thermocycler.DeactivateBlock,
- thermocycler.DeactivateLid,
- thermocycler.OpenLid,
- thermocycler.CloseLid,
- thermocycler.RunProfile,
- calibration.CalibrateGripper,
- calibration.CalibratePipette,
- calibration.CalibrateModule,
- calibration.MoveToMaintenancePosition,
+from .verify_tip_presence import (
+ VerifyTipPresence,
+ VerifyTipPresenceCreate,
+ VerifyTipPresenceParams,
+ VerifyTipPresenceResult,
+ VerifyTipPresenceCommandType,
+)
+
+from .get_tip_presence import (
+ GetTipPresence,
+ GetTipPresenceCreate,
+ GetTipPresenceParams,
+ GetTipPresenceResult,
+ GetTipPresenceCommandType,
+)
+
+Command = Annotated[
+ Union[
+ Aspirate,
+ AspirateInPlace,
+ Comment,
+ Custom,
+ Dispense,
+ DispenseInPlace,
+ BlowOut,
+ BlowOutInPlace,
+ ConfigureForVolume,
+ ConfigureNozzleLayout,
+ DropTip,
+ DropTipInPlace,
+ Home,
+ RetractAxis,
+ LoadLabware,
+ ReloadLabware,
+ LoadLiquid,
+ LoadModule,
+ LoadPipette,
+ MoveLabware,
+ MoveRelative,
+ MoveToCoordinates,
+ MoveToWell,
+ MoveToAddressableArea,
+ MoveToAddressableAreaForDropTip,
+ PrepareToAspirate,
+ WaitForResume,
+ WaitForDuration,
+ PickUpTip,
+ SavePosition,
+ SetRailLights,
+ TouchTip,
+ SetStatusBar,
+ VerifyTipPresence,
+ GetTipPresence,
+ heater_shaker.WaitForTemperature,
+ heater_shaker.SetTargetTemperature,
+ heater_shaker.DeactivateHeater,
+ heater_shaker.SetAndWaitForShakeSpeed,
+ heater_shaker.DeactivateShaker,
+ heater_shaker.OpenLabwareLatch,
+ heater_shaker.CloseLabwareLatch,
+ magnetic_module.Disengage,
+ magnetic_module.Engage,
+ temperature_module.SetTargetTemperature,
+ temperature_module.WaitForTemperature,
+ temperature_module.DeactivateTemperature,
+ thermocycler.SetTargetBlockTemperature,
+ thermocycler.WaitForBlockTemperature,
+ thermocycler.SetTargetLidTemperature,
+ thermocycler.WaitForLidTemperature,
+ thermocycler.DeactivateBlock,
+ thermocycler.DeactivateLid,
+ thermocycler.OpenLid,
+ thermocycler.CloseLid,
+ thermocycler.RunProfile,
+ calibration.CalibrateGripper,
+ calibration.CalibratePipette,
+ calibration.CalibrateModule,
+ calibration.MoveToMaintenancePosition,
+ ],
+ Field(discriminator="commandType"),
]
CommandParams = Union[
@@ -326,6 +377,7 @@
HomeParams,
RetractAxisParams,
LoadLabwareParams,
+ ReloadLabwareParams,
LoadLiquidParams,
LoadModuleParams,
LoadPipetteParams,
@@ -333,6 +385,8 @@
MoveRelativeParams,
MoveToCoordinatesParams,
MoveToWellParams,
+ MoveToAddressableAreaParams,
+ MoveToAddressableAreaForDropTipParams,
PrepareToAspirateParams,
WaitForResumeParams,
WaitForDurationParams,
@@ -341,6 +395,8 @@
SetRailLightsParams,
TouchTipParams,
SetStatusBarParams,
+ VerifyTipPresenceParams,
+ GetTipPresenceParams,
heater_shaker.WaitForTemperatureParams,
heater_shaker.SetTargetTemperatureParams,
heater_shaker.DeactivateHeaterParams,
@@ -385,6 +441,7 @@
HomeCommandType,
RetractAxisCommandType,
LoadLabwareCommandType,
+ ReloadLabwareCommandType,
LoadLiquidCommandType,
LoadModuleCommandType,
LoadPipetteCommandType,
@@ -392,6 +449,8 @@
MoveRelativeCommandType,
MoveToCoordinatesCommandType,
MoveToWellCommandType,
+ MoveToAddressableAreaCommandType,
+ MoveToAddressableAreaForDropTipCommandType,
PrepareToAspirateCommandType,
WaitForResumeCommandType,
WaitForDurationCommandType,
@@ -400,6 +459,8 @@
SetRailLightsCommandType,
TouchTipCommandType,
SetStatusBarCommandType,
+ VerifyTipPresenceCommandType,
+ GetTipPresenceCommandType,
heater_shaker.WaitForTemperatureCommandType,
heater_shaker.SetTargetTemperatureCommandType,
heater_shaker.DeactivateHeaterCommandType,
@@ -427,62 +488,70 @@
calibration.MoveToMaintenancePositionCommandType,
]
-CommandCreate = Union[
- AspirateCreate,
- AspirateInPlaceCreate,
- CommentCreate,
- ConfigureForVolumeCreate,
- ConfigureNozzleLayoutCreate,
- CustomCreate,
- DispenseCreate,
- DispenseInPlaceCreate,
- BlowOutCreate,
- BlowOutInPlaceCreate,
- DropTipCreate,
- DropTipInPlaceCreate,
- HomeCreate,
- RetractAxisCreate,
- LoadLabwareCreate,
- LoadLiquidCreate,
- LoadModuleCreate,
- LoadPipetteCreate,
- MoveLabwareCreate,
- MoveRelativeCreate,
- MoveToCoordinatesCreate,
- MoveToWellCreate,
- PrepareToAspirateCreate,
- WaitForResumeCreate,
- WaitForDurationCreate,
- PickUpTipCreate,
- SavePositionCreate,
- SetRailLightsCreate,
- TouchTipCreate,
- SetStatusBarCreate,
- heater_shaker.WaitForTemperatureCreate,
- heater_shaker.SetTargetTemperatureCreate,
- heater_shaker.DeactivateHeaterCreate,
- heater_shaker.SetAndWaitForShakeSpeedCreate,
- heater_shaker.DeactivateShakerCreate,
- heater_shaker.OpenLabwareLatchCreate,
- heater_shaker.CloseLabwareLatchCreate,
- magnetic_module.DisengageCreate,
- magnetic_module.EngageCreate,
- temperature_module.SetTargetTemperatureCreate,
- temperature_module.WaitForTemperatureCreate,
- temperature_module.DeactivateTemperatureCreate,
- thermocycler.SetTargetBlockTemperatureCreate,
- thermocycler.WaitForBlockTemperatureCreate,
- thermocycler.SetTargetLidTemperatureCreate,
- thermocycler.WaitForLidTemperatureCreate,
- thermocycler.DeactivateBlockCreate,
- thermocycler.DeactivateLidCreate,
- thermocycler.OpenLidCreate,
- thermocycler.CloseLidCreate,
- thermocycler.RunProfileCreate,
- calibration.CalibrateGripperCreate,
- calibration.CalibratePipetteCreate,
- calibration.CalibrateModuleCreate,
- calibration.MoveToMaintenancePositionCreate,
+CommandCreate = Annotated[
+ Union[
+ AspirateCreate,
+ AspirateInPlaceCreate,
+ CommentCreate,
+ ConfigureForVolumeCreate,
+ ConfigureNozzleLayoutCreate,
+ CustomCreate,
+ DispenseCreate,
+ DispenseInPlaceCreate,
+ BlowOutCreate,
+ BlowOutInPlaceCreate,
+ DropTipCreate,
+ DropTipInPlaceCreate,
+ HomeCreate,
+ RetractAxisCreate,
+ LoadLabwareCreate,
+ ReloadLabwareCreate,
+ LoadLiquidCreate,
+ LoadModuleCreate,
+ LoadPipetteCreate,
+ MoveLabwareCreate,
+ MoveRelativeCreate,
+ MoveToCoordinatesCreate,
+ MoveToWellCreate,
+ MoveToAddressableAreaCreate,
+ MoveToAddressableAreaForDropTipCreate,
+ PrepareToAspirateCreate,
+ WaitForResumeCreate,
+ WaitForDurationCreate,
+ PickUpTipCreate,
+ SavePositionCreate,
+ SetRailLightsCreate,
+ TouchTipCreate,
+ SetStatusBarCreate,
+ VerifyTipPresenceCreate,
+ GetTipPresenceCreate,
+ heater_shaker.WaitForTemperatureCreate,
+ heater_shaker.SetTargetTemperatureCreate,
+ heater_shaker.DeactivateHeaterCreate,
+ heater_shaker.SetAndWaitForShakeSpeedCreate,
+ heater_shaker.DeactivateShakerCreate,
+ heater_shaker.OpenLabwareLatchCreate,
+ heater_shaker.CloseLabwareLatchCreate,
+ magnetic_module.DisengageCreate,
+ magnetic_module.EngageCreate,
+ temperature_module.SetTargetTemperatureCreate,
+ temperature_module.WaitForTemperatureCreate,
+ temperature_module.DeactivateTemperatureCreate,
+ thermocycler.SetTargetBlockTemperatureCreate,
+ thermocycler.WaitForBlockTemperatureCreate,
+ thermocycler.SetTargetLidTemperatureCreate,
+ thermocycler.WaitForLidTemperatureCreate,
+ thermocycler.DeactivateBlockCreate,
+ thermocycler.DeactivateLidCreate,
+ thermocycler.OpenLidCreate,
+ thermocycler.CloseLidCreate,
+ thermocycler.RunProfileCreate,
+ calibration.CalibrateGripperCreate,
+ calibration.CalibratePipetteCreate,
+ calibration.CalibrateModuleCreate,
+ calibration.MoveToMaintenancePositionCreate,
+ ],
+ Field(discriminator="commandType"),
]
CommandResult = Union[
@@ -501,6 +570,7 @@
HomeResult,
RetractAxisResult,
LoadLabwareResult,
+ ReloadLabwareResult,
LoadLiquidResult,
LoadModuleResult,
LoadPipetteResult,
@@ -508,6 +578,8 @@
MoveRelativeResult,
MoveToCoordinatesResult,
MoveToWellResult,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaForDropTipResult,
PrepareToAspirateResult,
WaitForResumeResult,
WaitForDurationResult,
@@ -516,6 +588,8 @@
SetRailLightsResult,
TouchTipResult,
SetStatusBarResult,
+ VerifyTipPresenceResult,
+ GetTipPresenceResult,
heater_shaker.WaitForTemperatureResult,
heater_shaker.SetTargetTemperatureResult,
heater_shaker.DeactivateHeaterResult,
@@ -549,3 +623,5 @@
ConfigureForVolumePrivateResult,
ConfigureNozzleLayoutPrivateResult,
]
+
+CommandT = TypeVar("CommandT", bound=Command)
diff --git a/api/src/opentrons/protocol_engine/commands/configure_for_volume.py b/api/src/opentrons/protocol_engine/commands/configure_for_volume.py
index 9aa2f6ee0d3..ed0dfec0657 100644
--- a/api/src/opentrons/protocol_engine/commands/configure_for_volume.py
+++ b/api/src/opentrons/protocol_engine/commands/configure_for_volume.py
@@ -1,13 +1,10 @@
"""Configure for volume command request, result, and implementation models."""
from __future__ import annotations
-from pydantic import BaseModel
+from pydantic import BaseModel, Field
from typing import TYPE_CHECKING, Optional, Type, Tuple
from typing_extensions import Literal
-from .pipetting_common import (
- PipetteIdMixin,
- VolumeMixin,
-)
+from .pipetting_common import PipetteIdMixin
from .command import (
AbstractCommandWithPrivateResultImpl,
BaseCommand,
@@ -22,10 +19,22 @@
ConfigureForVolumeCommandType = Literal["configureForVolume"]
-class ConfigureForVolumeParams(PipetteIdMixin, VolumeMixin):
+class ConfigureForVolumeParams(PipetteIdMixin):
"""Parameters required to configure volume for a specific pipette."""
- pass
+ volume: float = Field(
+ ...,
+ description="Amount of liquid in uL. Must be at least 0 and no greater "
+ "than a pipette-specific maximum volume.",
+ ge=0,
+ )
+ tipOverlapNotAfterVersion: Optional[str] = Field(
+ None,
+ description="A version of tip overlap data to not exceed. The highest-versioned "
+ "tip overlap data that does not exceed this version will be used. Versions are "
+ "expressed as vN where N is an integer, counting up from v0. If None, the current "
+ "highest version will be used.",
+ )
class ConfigureForVolumePrivateResult(PipetteConfigUpdateResultMixin):
@@ -59,6 +68,7 @@ async def execute(
pipette_result = await self._equipment.configure_for_volume(
pipette_id=params.pipetteId,
volume=params.volume,
+ tip_overlap_version=params.tipOverlapNotAfterVersion,
)
return ConfigureForVolumeResult(), ConfigureForVolumePrivateResult(
diff --git a/api/src/opentrons/protocol_engine/commands/configure_nozzle_layout.py b/api/src/opentrons/protocol_engine/commands/configure_nozzle_layout.py
index 2ad5f38a9a5..49b90ec7432 100644
--- a/api/src/opentrons/protocol_engine/commands/configure_nozzle_layout.py
+++ b/api/src/opentrons/protocol_engine/commands/configure_nozzle_layout.py
@@ -16,7 +16,7 @@
PipetteNozzleLayoutResultMixin,
)
from ..types import (
- EmptyNozzleLayoutConfiguration,
+ AllNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
RowNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
@@ -33,8 +33,8 @@
class ConfigureNozzleLayoutParams(PipetteIdMixin):
"""Parameters required to configure the nozzle layout for a specific pipette."""
- configuration_params: Union[
- EmptyNozzleLayoutConfiguration,
+ configurationParams: Union[
+ AllNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
RowNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
@@ -73,8 +73,13 @@ async def execute(
self, params: ConfigureNozzleLayoutParams
) -> Tuple[ConfigureNozzleLayoutResult, ConfigureNozzleLayoutPrivateResult]:
"""Check that requested pipette can support the requested nozzle layout."""
+ primary_nozzle = params.configurationParams.dict().get("primaryNozzle")
+ front_right_nozzle = params.configurationParams.dict().get("frontRightNozzle")
nozzle_params = await self._tip_handler.available_for_nozzle_layout(
- pipette_id=params.pipetteId, **params.configuration_params.dict()
+ pipette_id=params.pipetteId,
+ style=params.configurationParams.style,
+ primary_nozzle=primary_nozzle,
+ front_right_nozzle=front_right_nozzle,
)
nozzle_map = await self._equipment.configure_nozzle_layout(
diff --git a/api/src/opentrons/protocol_engine/commands/configuring_common.py b/api/src/opentrons/protocol_engine/commands/configuring_common.py
index ec5917d9931..6998bcbac7b 100644
--- a/api/src/opentrons/protocol_engine/commands/configuring_common.py
+++ b/api/src/opentrons/protocol_engine/commands/configuring_common.py
@@ -1,7 +1,5 @@
"""Common configuration command base models."""
-from pydantic import BaseModel, Field
-from typing import Optional
from dataclasses import dataclass
from opentrons.hardware_control.nozzle_manager import (
NozzleMap,
@@ -11,18 +9,18 @@
@dataclass
class PipetteConfigUpdateResultMixin:
- """A mixin-suitable model for adding pipette config to results."""
+ """A mixin-suitable model for adding pipette config to private results."""
pipette_id: str
serial_number: str
config: pipette_data_provider.LoadedStaticPipetteData
-class PipetteNozzleLayoutResultMixin(BaseModel):
+@dataclass
+class PipetteNozzleLayoutResultMixin:
"""A nozzle layout result for updating the pipette state."""
pipette_id: str
- nozzle_map: Optional[NozzleMap] = Field(
- default=None,
- description="A dataclass object holding information about the current nozzle configuration.",
- )
+
+ nozzle_map: NozzleMap
+ """A dataclass object holding information about the current nozzle configuration."""
diff --git a/api/src/opentrons/protocol_engine/commands/dispense.py b/api/src/opentrons/protocol_engine/commands/dispense.py
index 361b6d2cdda..aa5017ed670 100644
--- a/api/src/opentrons/protocol_engine/commands/dispense.py
+++ b/api/src/opentrons/protocol_engine/commands/dispense.py
@@ -8,7 +8,7 @@
from ..types import DeckPoint
from .pipetting_common import (
PipetteIdMixin,
- VolumeMixin,
+ DispenseVolumeMixin,
FlowRateMixin,
WellLocationMixin,
BaseLiquidHandlingResult,
@@ -23,7 +23,9 @@
DispenseCommandType = Literal["dispense"]
-class DispenseParams(PipetteIdMixin, VolumeMixin, FlowRateMixin, WellLocationMixin):
+class DispenseParams(
+ PipetteIdMixin, DispenseVolumeMixin, FlowRateMixin, WellLocationMixin
+):
"""Payload required to dispense to a specific well."""
pushOut: Optional[float] = Field(
diff --git a/api/src/opentrons/protocol_engine/commands/dispense_in_place.py b/api/src/opentrons/protocol_engine/commands/dispense_in_place.py
index bda6a953f45..9f0aee8df03 100644
--- a/api/src/opentrons/protocol_engine/commands/dispense_in_place.py
+++ b/api/src/opentrons/protocol_engine/commands/dispense_in_place.py
@@ -7,7 +7,7 @@
from .pipetting_common import (
PipetteIdMixin,
- VolumeMixin,
+ DispenseVolumeMixin,
FlowRateMixin,
BaseLiquidHandlingResult,
)
@@ -20,7 +20,7 @@
DispenseInPlaceCommandType = Literal["dispenseInPlace"]
-class DispenseInPlaceParams(PipetteIdMixin, VolumeMixin, FlowRateMixin):
+class DispenseInPlaceParams(PipetteIdMixin, DispenseVolumeMixin, FlowRateMixin):
"""Payload required to dispense in place."""
pushOut: Optional[float] = Field(
diff --git a/api/src/opentrons/protocol_engine/commands/drop_tip.py b/api/src/opentrons/protocol_engine/commands/drop_tip.py
index 90b0c04484b..923c384e630 100644
--- a/api/src/opentrons/protocol_engine/commands/drop_tip.py
+++ b/api/src/opentrons/protocol_engine/commands/drop_tip.py
@@ -82,8 +82,14 @@ async def execute(self, params: DropTipParams) -> DropTipResult:
else:
well_location = params.wellLocation
+ is_partially_configured = self._state_view.pipettes.get_is_partially_configured(
+ pipette_id=pipette_id
+ )
tip_drop_location = self._state_view.geometry.get_checked_tip_drop_location(
- pipette_id=pipette_id, labware_id=labware_id, well_location=well_location
+ pipette_id=pipette_id,
+ labware_id=labware_id,
+ well_location=well_location,
+ partially_configured=is_partially_configured,
)
position = await self._movement_handler.move_to_well(
diff --git a/api/src/opentrons/protocol_engine/commands/get_tip_presence.py b/api/src/opentrons/protocol_engine/commands/get_tip_presence.py
new file mode 100644
index 00000000000..0a878418a6b
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/commands/get_tip_presence.py
@@ -0,0 +1,80 @@
+"""Get tip presence command request, result and implementation models."""
+from __future__ import annotations
+
+from pydantic import Field, BaseModel
+from typing import TYPE_CHECKING, Optional, Type
+from typing_extensions import Literal
+
+from .pipetting_common import PipetteIdMixin
+from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
+
+from ..types import TipPresenceStatus
+
+if TYPE_CHECKING:
+ from ..execution import TipHandler
+
+
+GetTipPresenceCommandType = Literal["getTipPresence"]
+
+
+class GetTipPresenceParams(PipetteIdMixin):
+ """Payload required for a GetTipPresence command."""
+
+ pass
+
+
+class GetTipPresenceResult(BaseModel):
+ """Result data from the execution of a GetTipPresence command."""
+
+ status: TipPresenceStatus = Field(
+ ...,
+ description=(
+ "Whether or not a tip is attached on the pipette. This only works on"
+ " on FLEX because OT-2 pipettes do not possess tip presence sensors,"
+ " hence, will always return TipPresenceStatus.UNKNOWN."
+ ),
+ )
+
+
+class GetTipPresenceImplementation(
+ AbstractCommandImpl[GetTipPresenceParams, GetTipPresenceResult]
+):
+ """GetTipPresence command implementation."""
+
+ def __init__(
+ self,
+ tip_handler: TipHandler,
+ **kwargs: object,
+ ) -> None:
+ self._tip_handler = tip_handler
+
+ async def execute(self, params: GetTipPresenceParams) -> GetTipPresenceResult:
+ """Verify if tip presence is as expected for the requested pipette."""
+ pipette_id = params.pipetteId
+
+ result = await self._tip_handler.get_tip_presence(
+ pipette_id=pipette_id,
+ )
+
+ return GetTipPresenceResult(status=result)
+
+
+class GetTipPresence(BaseCommand[GetTipPresenceParams, GetTipPresenceResult]):
+ """GetTipPresence command model."""
+
+ commandType: GetTipPresenceCommandType = "getTipPresence"
+ params: GetTipPresenceParams
+ result: Optional[GetTipPresenceResult]
+
+ _ImplementationCls: Type[
+ GetTipPresenceImplementation
+ ] = GetTipPresenceImplementation
+
+
+class GetTipPresenceCreate(BaseCommandCreate[GetTipPresenceParams]):
+ """GetTipPresence command creation request model."""
+
+ commandType: GetTipPresenceCommandType = "getTipPresence"
+ params: GetTipPresenceParams
+
+ _CommandCls: Type[GetTipPresence] = GetTipPresence
diff --git a/api/src/opentrons/protocol_engine/commands/hash_command_params.py b/api/src/opentrons/protocol_engine/commands/hash_command_params.py
index 39a042e55dd..1d124101d4c 100644
--- a/api/src/opentrons/protocol_engine/commands/hash_command_params.py
+++ b/api/src/opentrons/protocol_engine/commands/hash_command_params.py
@@ -9,7 +9,7 @@
# TODO(mm, 2023-04-28):
# This implementation will not notice that commands are different if they have different params
# but share the same commandType. We should also hash command params. (Jira RCORE-326.)
-def hash_command_params(
+def hash_protocol_command_params(
create: CommandCreate, last_hash: Optional[str]
) -> Optional[str]:
"""Given a command create object, return a hash.
@@ -28,12 +28,11 @@ def hash_command_params(
The command hash, if the command is a protocol command.
`None` if the command is a setup command.
"""
- if create.intent == CommandIntent.SETUP:
+ if create.intent in [CommandIntent.SETUP, CommandIntent.FIXIT]:
return None
- else:
- # We avoid Python's built-in hash() function because it's not stable across
- # runs of the Python interpreter. (Jira RSS-215.)
- last_contribution = b"" if last_hash is None else last_hash.encode("ascii")
- this_contribution = md5(create.commandType.encode("ascii")).digest()
- to_hash = last_contribution + this_contribution
- return md5(to_hash).hexdigest()
+ # We avoid Python's built-in hash() function because it's not stable across
+ # runs of the Python interpreter. (Jira RSS-215.)
+ last_contribution = b"" if last_hash is None else last_hash.encode("ascii")
+ this_contribution = md5(create.commandType.encode("ascii")).digest()
+ to_hash = last_contribution + this_contribution
+ return md5(to_hash).hexdigest()
diff --git a/api/src/opentrons/protocol_engine/commands/load_labware.py b/api/src/opentrons/protocol_engine/commands/load_labware.py
index 614c702df51..64ed68b47ba 100644
--- a/api/src/opentrons/protocol_engine/commands/load_labware.py
+++ b/api/src/opentrons/protocol_engine/commands/load_labware.py
@@ -7,8 +7,13 @@
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
from ..errors import LabwareIsNotAllowedInLocationError
-from ..resources import labware_validation
-from ..types import LabwareLocation, OnLabwareLocation, DeckSlotLocation
+from ..resources import labware_validation, fixture_validation
+from ..types import (
+ LabwareLocation,
+ OnLabwareLocation,
+ DeckSlotLocation,
+ AddressableAreaLocation,
+)
from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
@@ -105,21 +110,38 @@ async def execute(self, params: LoadLabwareParams) -> LoadLabwareResult:
f"{params.loadName} is not allowed in slot {params.location.slotName}"
)
+ if isinstance(params.location, AddressableAreaLocation):
+ area_name = params.location.addressableAreaName
+ if not fixture_validation.is_deck_slot(params.location.addressableAreaName):
+ raise LabwareIsNotAllowedInLocationError(
+ f"Cannot load {params.loadName} onto addressable area {area_name}"
+ )
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ area_name
+ )
+ elif isinstance(params.location, DeckSlotLocation):
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ params.location.slotName.id
+ )
+
+ verified_location = self._state_view.geometry.ensure_location_not_occupied(
+ params.location
+ )
loaded_labware = await self._equipment.load_labware(
load_name=params.loadName,
namespace=params.namespace,
version=params.version,
- location=params.location,
+ location=verified_location,
labware_id=params.labwareId,
)
# TODO(jbl 2023-06-23) these validation checks happen after the labware is loaded, because they rely on
# on the definition. In practice this will not cause any issues since they will raise protocol ending
# exception, but for correctness should be refactored to do this check beforehand.
- if isinstance(params.location, OnLabwareLocation):
+ if isinstance(verified_location, OnLabwareLocation):
self._state_view.labware.raise_if_labware_cannot_be_stacked(
top_labware_definition=loaded_labware.definition,
- bottom_labware_id=params.location.labwareId,
+ bottom_labware_id=verified_location.labwareId,
)
return LoadLabwareResult(
diff --git a/api/src/opentrons/protocol_engine/commands/load_module.py b/api/src/opentrons/protocol_engine/commands/load_module.py
index 407db1dc93a..5c1d474be4d 100644
--- a/api/src/opentrons/protocol_engine/commands/load_module.py
+++ b/api/src/opentrons/protocol_engine/commands/load_module.py
@@ -5,9 +5,18 @@
from pydantic import BaseModel, Field
from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
-from ..types import DeckSlotLocation, ModuleModel, ModuleDefinition
+from ..types import (
+ DeckSlotLocation,
+ ModuleType,
+ ModuleModel,
+ ModuleDefinition,
+)
+from opentrons.types import DeckSlotName
+
+from opentrons.protocol_engine.resources import deck_configuration_provider
if TYPE_CHECKING:
+ from ..state import StateView
from ..execution import EquipmentHandler
@@ -86,7 +95,7 @@ class LoadModuleResult(BaseModel):
)
serialNumber: Optional[str] = Field(
- ...,
+ None,
description="Hardware serial number of the connected module. "
"Will be `None` if a module is not electrically connected to the robot (like the Magnetic Block).",
)
@@ -95,21 +104,45 @@ class LoadModuleResult(BaseModel):
class LoadModuleImplementation(AbstractCommandImpl[LoadModuleParams, LoadModuleResult]):
"""The implementation of the load module command."""
- def __init__(self, equipment: EquipmentHandler, **kwargs: object) -> None:
+ def __init__(
+ self, equipment: EquipmentHandler, state_view: StateView, **kwargs: object
+ ) -> None:
self._equipment = equipment
+ self._state_view = state_view
async def execute(self, params: LoadModuleParams) -> LoadModuleResult:
"""Check that the requested module is attached and assign its identifier."""
+ module_type = params.model.as_type()
+ self._ensure_module_location(params.location.slotName, module_type)
+
+ if self._state_view.config.robot_type == "OT-2 Standard":
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ params.location.slotName.id
+ )
+ else:
+ addressable_area = self._state_view.geometry._modules.ensure_and_convert_module_fixture_location(
+ deck_slot=params.location.slotName,
+ deck_type=self._state_view.config.deck_type,
+ model=params.model,
+ )
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ addressable_area
+ )
+
+ verified_location = self._state_view.geometry.ensure_location_not_occupied(
+ params.location
+ )
+
if params.model == ModuleModel.MAGNETIC_BLOCK_V1:
loaded_module = await self._equipment.load_magnetic_block(
model=params.model,
- location=params.location,
+ location=verified_location,
module_id=params.moduleId,
)
else:
loaded_module = await self._equipment.load_module(
model=params.model,
- location=params.location,
+ location=verified_location,
module_id=params.moduleId,
)
@@ -120,6 +153,30 @@ async def execute(self, params: LoadModuleParams) -> LoadModuleResult:
definition=loaded_module.definition,
)
+ def _ensure_module_location(
+ self, slot: DeckSlotName, module_type: ModuleType
+ ) -> None:
+ if self._state_view.config.robot_type == "OT-2 Standard":
+ slot_def = self._state_view.addressable_areas.get_slot_definition(slot.id)
+ compatible_modules = slot_def["compatibleModuleTypes"]
+ if module_type.value not in compatible_modules:
+ raise ValueError(
+ f"A {module_type.value} cannot be loaded into slot {slot}"
+ )
+ else:
+ cutout_fixture_id = ModuleType.to_module_fixture_id(module_type)
+ module_fixture = deck_configuration_provider.get_cutout_fixture(
+ cutout_fixture_id,
+ self._state_view.addressable_areas.state.deck_definition,
+ )
+ cutout_id = (
+ self._state_view.addressable_areas.get_cutout_id_by_deck_slot_name(slot)
+ )
+ if cutout_id not in module_fixture["mayMountTo"]:
+ raise ValueError(
+ f"A {module_type.value} cannot be loaded into slot {slot}"
+ )
+
class LoadModule(BaseCommand[LoadModuleParams, LoadModuleResult]):
"""The model for a load module command."""
diff --git a/api/src/opentrons/protocol_engine/commands/load_pipette.py b/api/src/opentrons/protocol_engine/commands/load_pipette.py
index 66f32e99edc..e623ecb6d7f 100644
--- a/api/src/opentrons/protocol_engine/commands/load_pipette.py
+++ b/api/src/opentrons/protocol_engine/commands/load_pipette.py
@@ -1,5 +1,12 @@
"""Load pipette command request, result, and implementation models."""
from __future__ import annotations
+
+from opentrons_shared_data.pipette.pipette_load_name_conversions import (
+ convert_to_pipette_name_type,
+)
+from opentrons_shared_data.pipette.types import PipetteGenerationType
+from opentrons_shared_data.robot import user_facing_robot_type
+from opentrons_shared_data.robot.dev_types import RobotTypeEnum
from pydantic import BaseModel, Field
from typing import TYPE_CHECKING, Optional, Type, Tuple
from typing_extensions import Literal
@@ -13,9 +20,11 @@
BaseCommandCreate,
)
from .configuring_common import PipetteConfigUpdateResultMixin
+from ..errors import InvalidSpecificationForRobotTypeError, InvalidLoadPipetteSpecsError
if TYPE_CHECKING:
from ..execution import EquipmentHandler
+ from ..state import StateView
LoadPipetteCommandType = Literal["loadPipette"]
@@ -43,6 +52,13 @@ class LoadPipetteParams(BaseModel):
description="An optional ID to assign to this pipette. If None, an ID "
"will be generated.",
)
+ tipOverlapNotAfterVersion: Optional[str] = Field(
+ None,
+ description="A version of tip overlap data to not exceed. The highest-versioned "
+ "tip overlap data that does not exceed this version will be used. Versions are "
+ "expressed as vN where N is an integer, counting up from v0. If None, the current "
+ "highest version will be used.",
+ )
class LoadPipetteResult(BaseModel):
@@ -61,17 +77,49 @@ class LoadPipetteImplementation(
):
"""Load pipette command implementation."""
- def __init__(self, equipment: EquipmentHandler, **kwargs: object) -> None:
+ def __init__(
+ self, equipment: EquipmentHandler, state_view: StateView, **kwargs: object
+ ) -> None:
self._equipment = equipment
+ self._state_view = state_view
async def execute(
self, params: LoadPipetteParams
) -> Tuple[LoadPipetteResult, LoadPipettePrivateResult]:
"""Check that requested pipette is attached and assign its identifier."""
+ pipette_generation = convert_to_pipette_name_type(
+ params.pipetteName.value
+ ).pipette_generation
+ robot_type = RobotTypeEnum.robot_literal_to_enum(
+ self._state_view.config.robot_type
+ )
+ if (
+ (
+ robot_type == RobotTypeEnum.FLEX
+ and pipette_generation != PipetteGenerationType.FLEX
+ )
+ ) or (
+ (
+ robot_type == RobotTypeEnum.OT2
+ and pipette_generation
+ not in [PipetteGenerationType.GEN1, PipetteGenerationType.GEN2]
+ )
+ ):
+ raise InvalidSpecificationForRobotTypeError(
+ f"Cannot load a {pipette_generation.value.capitalize()} pipette on "
+ f"{user_facing_robot_type(robot_type=self._state_view.config.robot_type, include_article=True)}."
+ )
+
+ if params.mount == MountType.EXTENSION:
+ raise InvalidLoadPipetteSpecsError(
+ "Cannot load a pipette on the EXTENSION mount. Use mount LEFT or RIGHT."
+ )
+
loaded_pipette = await self._equipment.load_pipette(
pipette_name=params.pipetteName,
mount=params.mount,
pipette_id=params.pipetteId,
+ tip_overlap_version=params.tipOverlapNotAfterVersion,
)
return LoadPipetteResult(
diff --git a/api/src/opentrons/protocol_engine/commands/move_labware.py b/api/src/opentrons/protocol_engine/commands/move_labware.py
index 682f2a58a22..653c390c64b 100644
--- a/api/src/opentrons/protocol_engine/commands/move_labware.py
+++ b/api/src/opentrons/protocol_engine/commands/move_labware.py
@@ -5,16 +5,20 @@
from typing import TYPE_CHECKING, Optional, Type
from typing_extensions import Literal
+from opentrons.types import Point
from ..types import (
LabwareLocation,
+ DeckSlotLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
LabwareMovementStrategy,
LabwareOffsetVector,
LabwareMovementOffsetData,
)
from ..errors import LabwareMovementNotAllowedError, NotSupportedOnRobotType
-from ..resources import labware_validation
+from ..resources import labware_validation, fixture_validation
from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
+from opentrons_shared_data.gripper.constants import GRIPPER_PADDLE_WIDTH
if TYPE_CHECKING:
from ..execution import EquipmentHandler, RunControlHandler, LabwareMovementHandler
@@ -24,6 +28,10 @@
MoveLabwareCommandType = Literal["moveLabware"]
+# Extra buffer on top of minimum distance to move to the right
+_TRASH_CHUTE_DROP_BUFFER_MM = 8
+
+
# TODO (spp, 2022-12-14): https://opentrons.atlassian.net/browse/RLAB-237
class MoveLabwareParams(BaseModel):
"""Input parameters for a ``moveLabware`` command."""
@@ -83,7 +91,9 @@ def __init__(
self._labware_movement = labware_movement
self._run_control = run_control
- async def execute(self, params: MoveLabwareParams) -> MoveLabwareResult:
+ async def execute( # noqa: C901
+ self, params: MoveLabwareParams
+ ) -> MoveLabwareResult:
"""Move a loaded labware to a new location."""
# Allow propagation of LabwareNotLoadedError.
current_labware = self._state_view.labware.get(labware_id=params.labwareId)
@@ -91,12 +101,44 @@ async def execute(self, params: MoveLabwareParams) -> MoveLabwareResult:
labware_id=params.labwareId
)
definition_uri = current_labware.definitionUri
+ post_drop_slide_offset: Optional[Point] = None
if self._state_view.labware.is_fixed_trash(params.labwareId):
raise LabwareMovementNotAllowedError(
f"Cannot move fixed trash labware '{current_labware_definition.parameters.loadName}'."
)
+ if isinstance(params.newLocation, AddressableAreaLocation):
+ area_name = params.newLocation.addressableAreaName
+ if not fixture_validation.is_gripper_waste_chute(
+ area_name
+ ) and not fixture_validation.is_deck_slot(area_name):
+ raise LabwareMovementNotAllowedError(
+ f"Cannot move {current_labware.loadName} to addressable area {area_name}"
+ )
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ area_name
+ )
+
+ if fixture_validation.is_gripper_waste_chute(area_name):
+ # When dropping off labware in the waste chute, some bigger pieces
+ # of labware (namely tipracks) can get stuck between a gripper
+ # paddle and the bottom of the waste chute, even after the gripper
+ # has homed all the way to the top of its travel. We add a "post-drop
+ # slide" to dropoffs in the waste chute in order to guarantee that the
+ # labware can drop fully through the chute before the gripper jaws close.
+ post_drop_slide_offset = Point(
+ x=(current_labware_definition.dimensions.xDimension / 2.0)
+ + (GRIPPER_PADDLE_WIDTH / 2.0)
+ + _TRASH_CHUTE_DROP_BUFFER_MM,
+ y=0,
+ z=0,
+ )
+ elif isinstance(params.newLocation, DeckSlotLocation):
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ params.newLocation.slotName.id
+ )
+
available_new_location = self._state_view.geometry.ensure_location_not_occupied(
location=params.newLocation
)
@@ -157,12 +199,14 @@ async def execute(self, params: MoveLabwareParams) -> MoveLabwareResult:
pickUpOffset=params.pickUpOffset or LabwareOffsetVector(x=0, y=0, z=0),
dropOffset=params.dropOffset or LabwareOffsetVector(x=0, y=0, z=0),
)
+
# Skips gripper moves when using virtual gripper
await self._labware_movement.move_labware_with_gripper(
labware_id=params.labwareId,
current_location=validated_current_loc,
new_location=validated_new_loc,
user_offset_data=user_offset_data,
+ post_drop_slide_offset=post_drop_slide_offset,
)
elif params.strategy == LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE:
# Pause to allow for manual labware movement
diff --git a/api/src/opentrons/protocol_engine/commands/move_to_addressable_area.py b/api/src/opentrons/protocol_engine/commands/move_to_addressable_area.py
new file mode 100644
index 00000000000..7dfc0b53895
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/commands/move_to_addressable_area.py
@@ -0,0 +1,130 @@
+"""Move to addressable area command request, result, and implementation models."""
+from __future__ import annotations
+from pydantic import Field
+from typing import TYPE_CHECKING, Optional, Type
+from typing_extensions import Literal
+
+from ..errors import LocationNotAccessibleByPipetteError
+from ..types import DeckPoint, AddressableOffsetVector
+from ..resources import fixture_validation
+from .pipetting_common import (
+ PipetteIdMixin,
+ MovementMixin,
+ DestinationPositionResult,
+)
+from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
+
+if TYPE_CHECKING:
+ from ..execution import MovementHandler
+ from ..state import StateView
+
+MoveToAddressableAreaCommandType = Literal["moveToAddressableArea"]
+
+
+class MoveToAddressableAreaParams(PipetteIdMixin, MovementMixin):
+ """Payload required to move a pipette to a specific addressable area.
+
+ An *addressable area* is a space in the robot that may or may not be usable depending on how
+ the robot's deck is configured. For example, if a Flex is configured with a waste chute, it will
+ have additional addressable areas representing the opening of the waste chute, where tips and
+ labware can be dropped.
+
+ This moves the pipette so all of its nozzles are centered over the addressable area.
+ If the pipette is currently configured with a partial tip layout, this centering is over all
+ the pipette's physical nozzles, not just the nozzles that are active.
+
+ The z-position will be chosen to put the bottom of the tips---or the bottom of the nozzles,
+ if there are no tips---level with the top of the addressable area.
+
+ When this command is executed, Protocol Engine will make sure the robot's deck is configured
+ such that the requested addressable area actually exists. For example, if you request
+ the addressable area B4, it will make sure the robot is set up with a B3/B4 staging area slot.
+ If that's not the case, the command will fail.
+ """
+
+ addressableAreaName: str = Field(
+ ...,
+ description=(
+ "The name of the addressable area that you want to use."
+ " Valid values are the `id`s of `addressableArea`s in the"
+ " [deck definition](https://github.com/Opentrons/opentrons/tree/edge/shared-data/deck)."
+ ),
+ )
+ offset: AddressableOffsetVector = Field(
+ AddressableOffsetVector(x=0, y=0, z=0),
+ description="Relative offset of addressable area to move pipette's critical point.",
+ )
+ stayAtHighestPossibleZ: bool = Field(
+ False,
+ description=(
+ "If `true`, the pipette will retract to its highest possible height"
+ " and stay there instead of descending to the destination."
+ " `minimumZHeight` will be ignored."
+ ),
+ )
+
+
+class MoveToAddressableAreaResult(DestinationPositionResult):
+ """Result data from the execution of a MoveToAddressableArea command."""
+
+ pass
+
+
+class MoveToAddressableAreaImplementation(
+ AbstractCommandImpl[MoveToAddressableAreaParams, MoveToAddressableAreaResult]
+):
+ """Move to addressable area command implementation."""
+
+ def __init__(
+ self, movement: MovementHandler, state_view: StateView, **kwargs: object
+ ) -> None:
+ self._movement = movement
+ self._state_view = state_view
+
+ async def execute(
+ self, params: MoveToAddressableAreaParams
+ ) -> MoveToAddressableAreaResult:
+ """Move the requested pipette to the requested addressable area."""
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ params.addressableAreaName
+ )
+
+ if fixture_validation.is_staging_slot(params.addressableAreaName):
+ raise LocationNotAccessibleByPipetteError(
+ f"Cannot move pipette to staging slot {params.addressableAreaName}"
+ )
+
+ x, y, z = await self._movement.move_to_addressable_area(
+ pipette_id=params.pipetteId,
+ addressable_area_name=params.addressableAreaName,
+ offset=params.offset,
+ force_direct=params.forceDirect,
+ minimum_z_height=params.minimumZHeight,
+ speed=params.speed,
+ stay_at_highest_possible_z=params.stayAtHighestPossibleZ,
+ )
+
+ return MoveToAddressableAreaResult(position=DeckPoint(x=x, y=y, z=z))
+
+
+class MoveToAddressableArea(
+ BaseCommand[MoveToAddressableAreaParams, MoveToAddressableAreaResult]
+):
+ """Move to addressable area command model."""
+
+ commandType: MoveToAddressableAreaCommandType = "moveToAddressableArea"
+ params: MoveToAddressableAreaParams
+ result: Optional[MoveToAddressableAreaResult]
+
+ _ImplementationCls: Type[
+ MoveToAddressableAreaImplementation
+ ] = MoveToAddressableAreaImplementation
+
+
+class MoveToAddressableAreaCreate(BaseCommandCreate[MoveToAddressableAreaParams]):
+ """Move to addressable area command creation request model."""
+
+ commandType: MoveToAddressableAreaCommandType = "moveToAddressableArea"
+ params: MoveToAddressableAreaParams
+
+ _CommandCls: Type[MoveToAddressableArea] = MoveToAddressableArea
diff --git a/api/src/opentrons/protocol_engine/commands/move_to_addressable_area_for_drop_tip.py b/api/src/opentrons/protocol_engine/commands/move_to_addressable_area_for_drop_tip.py
new file mode 100644
index 00000000000..dc79714c829
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/commands/move_to_addressable_area_for_drop_tip.py
@@ -0,0 +1,159 @@
+"""Move to addressable area for drop tip command request, result, and implementation models."""
+from __future__ import annotations
+from pydantic import Field
+from typing import TYPE_CHECKING, Optional, Type
+from typing_extensions import Literal
+
+from ..errors import LocationNotAccessibleByPipetteError
+from ..types import DeckPoint, AddressableOffsetVector
+from ..resources import fixture_validation
+from .pipetting_common import (
+ PipetteIdMixin,
+ MovementMixin,
+ DestinationPositionResult,
+)
+from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
+
+if TYPE_CHECKING:
+ from ..execution import MovementHandler
+ from ..state import StateView
+
+MoveToAddressableAreaForDropTipCommandType = Literal["moveToAddressableAreaForDropTip"]
+
+
+class MoveToAddressableAreaForDropTipParams(PipetteIdMixin, MovementMixin):
+ """Payload required to move a pipette to a specific addressable area.
+
+ An *addressable area* is a space in the robot that may or may not be usable depending on how
+ the robot's deck is configured. For example, if a Flex is configured with a waste chute, it will
+ have additional addressable areas representing the opening of the waste chute, where tips and
+ labware can be dropped.
+
+ This moves the pipette so all of its nozzles are centered over the addressable area.
+ If the pipette is currently configured with a partial tip layout, this centering is over all
+ the pipette's physical nozzles, not just the nozzles that are active.
+
+ The z-position will be chosen to put the bottom of the tips---or the bottom of the nozzles,
+ if there are no tips---level with the top of the addressable area.
+
+ When this command is executed, Protocol Engine will make sure the robot's deck is configured
+ such that the requested addressable area actually exists. For example, if you request
+ the addressable area B4, it will make sure the robot is set up with a B3/B4 staging area slot.
+ If that's not the case, the command will fail.
+ """
+
+ addressableAreaName: str = Field(
+ ...,
+ description=(
+ "The name of the addressable area that you want to use."
+ " Valid values are the `id`s of `addressableArea`s in the"
+ " [deck definition](https://github.com/Opentrons/opentrons/tree/edge/shared-data/deck)."
+ ),
+ )
+ offset: AddressableOffsetVector = Field(
+ AddressableOffsetVector(x=0, y=0, z=0),
+ description="Relative offset of addressable area to move pipette's critical point.",
+ )
+ alternateDropLocation: Optional[bool] = Field(
+ False,
+ description=(
+ "Whether to alternate location where tip is dropped within the addressable area."
+ " If True, this command will ignore the offset provided and alternate"
+ " between dropping tips at two predetermined locations inside the specified"
+ " labware well."
+ " If False, the tip will be dropped at the top center of the area."
+ ),
+ )
+ ignoreTipConfiguration: Optional[bool] = Field(
+ True,
+ description=(
+ "Whether to utilize the critical point of the tip configuraiton when moving to an addressable area."
+ " If True, this command will ignore the tip configuration and use the center of the entire instrument"
+ " as the critical point for movement."
+ " If False, this command will use the critical point provided by the current tip configuration."
+ ),
+ )
+
+
+class MoveToAddressableAreaForDropTipResult(DestinationPositionResult):
+ """Result data from the execution of a MoveToAddressableAreaForDropTip command."""
+
+ pass
+
+
+class MoveToAddressableAreaForDropTipImplementation(
+ AbstractCommandImpl[
+ MoveToAddressableAreaForDropTipParams, MoveToAddressableAreaForDropTipResult
+ ]
+):
+ """Move to addressable area for drop tip command implementation."""
+
+ def __init__(
+ self, movement: MovementHandler, state_view: StateView, **kwargs: object
+ ) -> None:
+ self._movement = movement
+ self._state_view = state_view
+
+ async def execute(
+ self, params: MoveToAddressableAreaForDropTipParams
+ ) -> MoveToAddressableAreaForDropTipResult:
+ """Move the requested pipette to the requested addressable area in preperation of a drop tip."""
+ self._state_view.addressable_areas.raise_if_area_not_in_deck_configuration(
+ params.addressableAreaName
+ )
+
+ if fixture_validation.is_staging_slot(params.addressableAreaName):
+ raise LocationNotAccessibleByPipetteError(
+ f"Cannot move pipette to staging slot {params.addressableAreaName}"
+ )
+
+ if params.alternateDropLocation:
+ offset = self._state_view.geometry.get_next_tip_drop_location_for_addressable_area(
+ addressable_area_name=params.addressableAreaName,
+ pipette_id=params.pipetteId,
+ )
+ else:
+ offset = params.offset
+
+ x, y, z = await self._movement.move_to_addressable_area(
+ pipette_id=params.pipetteId,
+ addressable_area_name=params.addressableAreaName,
+ offset=offset,
+ force_direct=params.forceDirect,
+ minimum_z_height=params.minimumZHeight,
+ speed=params.speed,
+ ignore_tip_configuration=params.ignoreTipConfiguration,
+ )
+
+ return MoveToAddressableAreaForDropTipResult(position=DeckPoint(x=x, y=y, z=z))
+
+
+class MoveToAddressableAreaForDropTip(
+ BaseCommand[
+ MoveToAddressableAreaForDropTipParams, MoveToAddressableAreaForDropTipResult
+ ]
+):
+ """Move to addressable area for drop tip command model."""
+
+ commandType: MoveToAddressableAreaForDropTipCommandType = (
+ "moveToAddressableAreaForDropTip"
+ )
+ params: MoveToAddressableAreaForDropTipParams
+ result: Optional[MoveToAddressableAreaForDropTipResult]
+
+ _ImplementationCls: Type[
+ MoveToAddressableAreaForDropTipImplementation
+ ] = MoveToAddressableAreaForDropTipImplementation
+
+
+class MoveToAddressableAreaForDropTipCreate(
+ BaseCommandCreate[MoveToAddressableAreaForDropTipParams]
+):
+ """Move to addressable area for drop tip command creation request model."""
+
+ commandType: MoveToAddressableAreaForDropTipCommandType = (
+ "moveToAddressableAreaForDropTip"
+ )
+ params: MoveToAddressableAreaForDropTipParams
+
+ _CommandCls: Type[MoveToAddressableAreaForDropTip] = MoveToAddressableAreaForDropTip
diff --git a/api/src/opentrons/protocol_engine/commands/pipetting_common.py b/api/src/opentrons/protocol_engine/commands/pipetting_common.py
index a2dc1c8e5cd..2d165e4894a 100644
--- a/api/src/opentrons/protocol_engine/commands/pipetting_common.py
+++ b/api/src/opentrons/protocol_engine/commands/pipetting_common.py
@@ -14,14 +14,29 @@ class PipetteIdMixin(BaseModel):
)
-class VolumeMixin(BaseModel):
- """Mixin for command requests that take a volume of liquid."""
+class AspirateVolumeMixin(BaseModel):
+ """Mixin for the `volume` field of aspirate commands."""
volume: float = Field(
...,
- description="Amount of liquid in uL. Must be greater than 0 and less "
- "than a pipette-specific maximum volume.",
- gt=0,
+ description="The amount of liquid to aspirate, in µL."
+ " Must not be greater than the remaining available amount, which depends on"
+ " the pipette (see `loadPipette`), its configuration (see `configureForVolume`),"
+ " the tip (see `pickUpTip`), and the amount you've aspirated so far."
+ " There is some tolerance for floating point rounding errors.",
+ ge=0,
+ )
+
+
+class DispenseVolumeMixin(BaseModel):
+ """Mixin for the `volume` field of dispense commands."""
+
+ volume: float = Field(
+ ...,
+ description="The amount of liquid to dispense, in µL."
+ " Must not be greater than the currently aspirated volume."
+ " There is some tolerance for floating point rounding errors.",
+ ge=0,
)
@@ -88,7 +103,7 @@ class BaseLiquidHandlingResult(BaseModel):
volume: float = Field(
...,
description="Amount of liquid in uL handled in the operation.",
- gt=0,
+ ge=0,
)
diff --git a/api/src/opentrons/protocol_engine/commands/reload_labware.py b/api/src/opentrons/protocol_engine/commands/reload_labware.py
new file mode 100644
index 00000000000..247f717feb9
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/commands/reload_labware.py
@@ -0,0 +1,86 @@
+"""Reload labware command request, result, and implementation models."""
+from __future__ import annotations
+from pydantic import BaseModel, Field
+from typing import TYPE_CHECKING, Optional, Type
+from typing_extensions import Literal
+
+from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
+
+if TYPE_CHECKING:
+ from ..state import StateView
+ from ..execution import EquipmentHandler
+
+
+ReloadLabwareCommandType = Literal["reloadLabware"]
+
+
+class ReloadLabwareParams(BaseModel):
+ """Payload required to load a labware into a slot."""
+
+ labwareId: str = Field(
+ ..., description="The already-loaded labware instance to update."
+ )
+
+
+class ReloadLabwareResult(BaseModel):
+ """Result data from the execution of a LoadLabware command."""
+
+ labwareId: str = Field(
+ ...,
+ description="An ID to reference this labware in subsequent commands. Same as the one in the parameters.",
+ )
+ offsetId: Optional[str] = Field(
+ # Default `None` instead of `...` so this field shows up as non-required in
+ # OpenAPI. The server is allowed to omit it or make it null.
+ None,
+ description=(
+ "An ID referencing the labware offset that will apply"
+ " to the reloaded labware."
+ " This offset will be in effect until the labware is moved"
+ " with a `moveLabware` command."
+ " Null or undefined means no offset applies,"
+ " so the default of (0, 0, 0) will be used."
+ ),
+ )
+
+
+class ReloadLabwareImplementation(
+ AbstractCommandImpl[ReloadLabwareParams, ReloadLabwareResult]
+):
+ """Reload labware command implementation."""
+
+ def __init__(
+ self, equipment: EquipmentHandler, state_view: StateView, **kwargs: object
+ ) -> None:
+ self._equipment = equipment
+ self._state_view = state_view
+
+ async def execute(self, params: ReloadLabwareParams) -> ReloadLabwareResult:
+ """Reload the definition and calibration data for a specific labware."""
+ reloaded_labware = await self._equipment.reload_labware(
+ labware_id=params.labwareId,
+ )
+
+ return ReloadLabwareResult(
+ labwareId=params.labwareId,
+ offsetId=reloaded_labware.offsetId,
+ )
+
+
+class ReloadLabware(BaseCommand[ReloadLabwareParams, ReloadLabwareResult]):
+ """Reload labware command resource model."""
+
+ commandType: ReloadLabwareCommandType = "reloadLabware"
+ params: ReloadLabwareParams
+ result: Optional[ReloadLabwareResult]
+
+ _ImplementationCls: Type[ReloadLabwareImplementation] = ReloadLabwareImplementation
+
+
+class ReloadLabwareCreate(BaseCommandCreate[ReloadLabwareParams]):
+ """Reload labware command creation request."""
+
+ commandType: ReloadLabwareCommandType = "reloadLabware"
+ params: ReloadLabwareParams
+
+ _CommandCls: Type[ReloadLabware] = ReloadLabware
diff --git a/api/src/opentrons/protocol_engine/commands/save_position.py b/api/src/opentrons/protocol_engine/commands/save_position.py
index 5e38b902058..a45937a73e8 100644
--- a/api/src/opentrons/protocol_engine/commands/save_position.py
+++ b/api/src/opentrons/protocol_engine/commands/save_position.py
@@ -26,6 +26,9 @@ class SavePositionParams(BaseModel):
description="An optional ID to assign to this command instance. "
"Auto-assigned if not defined.",
)
+ failOnNotHomed: Optional[bool] = Field(
+ True, descrption="Require all axes to be homed before saving position."
+ )
class SavePositionResult(BaseModel):
@@ -58,8 +61,11 @@ def __init__(
async def execute(self, params: SavePositionParams) -> SavePositionResult:
"""Check the requested pipette's current position."""
position_id = self._model_utils.ensure_id(params.positionId)
+ fail_on_not_homed = (
+ params.failOnNotHomed if params.failOnNotHomed is not None else True
+ )
x, y, z = await self._gantry_mover.get_position(
- pipette_id=params.pipetteId, fail_on_not_homed=True
+ pipette_id=params.pipetteId, fail_on_not_homed=fail_on_not_homed
)
return SavePositionResult(
diff --git a/api/src/opentrons/protocol_engine/commands/verify_tip_presence.py b/api/src/opentrons/protocol_engine/commands/verify_tip_presence.py
new file mode 100644
index 00000000000..67aa5d1dc34
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/commands/verify_tip_presence.py
@@ -0,0 +1,86 @@
+"""Verify tip presence command request, result and implementation models."""
+from __future__ import annotations
+
+from pydantic import Field, BaseModel
+from typing import TYPE_CHECKING, Optional, Type
+from typing_extensions import Literal
+
+from .pipetting_common import PipetteIdMixin
+from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
+
+from ..types import TipPresenceStatus, InstrumentSensorId
+
+if TYPE_CHECKING:
+ from ..execution import TipHandler
+
+
+VerifyTipPresenceCommandType = Literal["verifyTipPresence"]
+
+
+class VerifyTipPresenceParams(PipetteIdMixin):
+ """Payload required for a VerifyTipPresence command."""
+
+ expectedState: TipPresenceStatus = Field(
+ ..., description="The expected tip presence status on the pipette."
+ )
+ followSingularSensor: Optional[InstrumentSensorId] = Field(
+ default=None, description="The sensor id to follow if the other can be ignored."
+ )
+
+
+class VerifyTipPresenceResult(BaseModel):
+ """Result data from the execution of a VerifyTipPresence command."""
+
+ pass
+
+
+class VerifyTipPresenceImplementation(
+ AbstractCommandImpl[VerifyTipPresenceParams, VerifyTipPresenceResult]
+):
+ """VerifyTipPresence command implementation."""
+
+ def __init__(
+ self,
+ tip_handler: TipHandler,
+ **kwargs: object,
+ ) -> None:
+ self._tip_handler = tip_handler
+
+ async def execute(self, params: VerifyTipPresenceParams) -> VerifyTipPresenceResult:
+ """Verify if tip presence is as expected for the requested pipette."""
+ pipette_id = params.pipetteId
+ expected_state = params.expectedState
+ follow_singular_sensor = (
+ InstrumentSensorId.to_instrument_probe_type(params.followSingularSensor)
+ if params.followSingularSensor
+ else None
+ )
+
+ await self._tip_handler.verify_tip_presence(
+ pipette_id=pipette_id,
+ expected=expected_state,
+ follow_singular_sensor=follow_singular_sensor,
+ )
+
+ return VerifyTipPresenceResult()
+
+
+class VerifyTipPresence(BaseCommand[VerifyTipPresenceParams, VerifyTipPresenceResult]):
+ """VerifyTipPresence command model."""
+
+ commandType: VerifyTipPresenceCommandType = "verifyTipPresence"
+ params: VerifyTipPresenceParams
+ result: Optional[VerifyTipPresenceResult]
+
+ _ImplementationCls: Type[
+ VerifyTipPresenceImplementation
+ ] = VerifyTipPresenceImplementation
+
+
+class VerifyTipPresenceCreate(BaseCommandCreate[VerifyTipPresenceParams]):
+ """VerifyTipPresence command creation request model."""
+
+ commandType: VerifyTipPresenceCommandType = "verifyTipPresence"
+ params: VerifyTipPresenceParams
+
+ _CommandCls: Type[VerifyTipPresence] = VerifyTipPresence
diff --git a/api/src/opentrons/protocol_engine/create_protocol_engine.py b/api/src/opentrons/protocol_engine/create_protocol_engine.py
index adb4657d2af..ab91b5fabaa 100644
--- a/api/src/opentrons/protocol_engine/create_protocol_engine.py
+++ b/api/src/opentrons/protocol_engine/create_protocol_engine.py
@@ -10,20 +10,26 @@
from .protocol_engine import ProtocolEngine
from .resources import DeckDataProvider, ModuleDataProvider
from .state import Config, StateStore
-from .types import PostRunHardwareState
+from .types import PostRunHardwareState, DeckConfigurationType
# TODO(mm, 2023-06-16): Arguably, this not being a context manager makes us prone to forgetting to
# clean it up properly, especially in tests. See e.g. https://opentrons.atlassian.net/browse/RSS-222
async def create_protocol_engine(
- hardware_api: HardwareControlAPI, config: Config, load_fixed_trash: bool = False
+ hardware_api: HardwareControlAPI,
+ config: Config,
+ load_fixed_trash: bool = False,
+ deck_configuration: typing.Optional[DeckConfigurationType] = None,
+ notify_publishers: typing.Optional[typing.Callable[[], None]] = None,
) -> ProtocolEngine:
"""Create a ProtocolEngine instance.
Arguments:
hardware_api: Hardware control API to pass down to dependencies.
config: ProtocolEngine configuration.
- load_fixed_trash: Automatically load fixed trash labware in engine
+ load_fixed_trash: Automatically load fixed trash labware in engine.
+ deck_configuration: The initial deck configuration the engine will be instantiated with.
+ notify_publishers: Notifies robot server publishers of internal state change.
"""
deck_data = DeckDataProvider(config.deck_type)
deck_definition = await deck_data.get_deck_definition()
@@ -40,6 +46,8 @@ async def create_protocol_engine(
deck_fixed_labware=deck_fixed_labware,
is_door_open=hardware_api.door_state is DoorState.OPEN,
module_calibration_offsets=module_calibration_offsets,
+ deck_configuration=deck_configuration,
+ notify_publishers=notify_publishers,
)
return ProtocolEngine(state_store=state_store, hardware_api=hardware_api)
@@ -101,6 +109,9 @@ async def _protocol_engine(
load_fixed_trash=load_fixed_trash,
)
try:
+ # TODO(mm, 2023-11-21): Callers like opentrons.execute need to be able to pass in
+ # the deck_configuration argument to ProtocolEngine.play().
+ # https://opentrons.atlassian.net/browse/RSS-400
protocol_engine.play()
yield protocol_engine
finally:
diff --git a/api/src/opentrons/protocol_engine/error_recovery_policy.py b/api/src/opentrons/protocol_engine/error_recovery_policy.py
new file mode 100644
index 00000000000..6285e7ae37a
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/error_recovery_policy.py
@@ -0,0 +1,76 @@
+# noqa: D100
+
+import enum
+from typing import Protocol
+
+from opentrons_shared_data.errors import EnumeratedError, ErrorCodes
+
+from opentrons.config import feature_flags as ff
+from opentrons.protocol_engine.commands import Command
+
+
+class ErrorRecoveryType(enum.Enum):
+ """Ways to handle a command failure."""
+
+ FAIL_RUN = enum.auto()
+ """Permanently fail the entire run.
+
+ TODO(mm, 2024-03-18): This might be a misnomer because failing the run is not
+ a decision that's up to Protocol Engine. It's decided by what the caller supplies
+ to `ProtocolEngine.finish()`. For example, a Python protocol can
+ theoretically swallow the exception and continue on.
+ """
+
+ WAIT_FOR_RECOVERY = enum.auto()
+ """Stop and wait for the error to be recovered from manually."""
+
+ # TODO(mm, 2023-03-18): Add something like this for
+ # https://opentrons.atlassian.net/browse/EXEC-302.
+ # CONTINUE = enum.auto()
+ # """Continue with the run, as if the command never failed."""
+
+
+class ErrorRecoveryPolicy(Protocol):
+ """An interface to decide how to handle a command failure.
+
+ This describes a function that Protocol Engine calls after each command failure,
+ with the details of that failure. The implementation should inspect those details
+ and return an appropriate `ErrorRecoveryType`.
+ """
+
+ @staticmethod
+ def __call__( # noqa: D102
+ failed_command: Command, exception: Exception
+ ) -> ErrorRecoveryType:
+ ...
+
+
+def error_recovery_by_ff(
+ failed_command: Command, exception: Exception
+) -> ErrorRecoveryType:
+ """Use API feature flags to decide how to handle an error.
+
+ This is just for development. This should be replaced by a proper config
+ system exposed through robot-server's HTTP API.
+ """
+ # todo(mm, 2024-03-18): Do we need to do anything explicit here to disable
+ # error recovery on the OT-2?
+ if ff.enable_error_recovery_experiments() and _is_recoverable(
+ failed_command, exception
+ ):
+ return ErrorRecoveryType.WAIT_FOR_RECOVERY
+ else:
+ return ErrorRecoveryType.FAIL_RUN
+
+
+def _is_recoverable(failed_command: Command, exception: Exception) -> bool:
+ if (
+ failed_command.commandType == "pickUpTip"
+ and isinstance(exception, EnumeratedError)
+ # Hack(?): It seems like this should be ErrorCodes.TIP_PICKUP_FAILED, but that's
+ # not what gets raised in practice.
+ and exception.code == ErrorCodes.UNEXPECTED_TIP_REMOVAL
+ ):
+ return True
+ else:
+ return False
diff --git a/api/src/opentrons/protocol_engine/errors/__init__.py b/api/src/opentrons/protocol_engine/errors/__init__.py
index 642d4ff6cd8..994e4cc9ed3 100644
--- a/api/src/opentrons/protocol_engine/errors/__init__.py
+++ b/api/src/opentrons/protocol_engine/errors/__init__.py
@@ -5,6 +5,8 @@
UnexpectedProtocolError,
FailedToLoadPipetteError,
PipetteNotAttachedError,
+ InvalidSpecificationForRobotTypeError,
+ InvalidLoadPipetteSpecsError,
TipNotAttachedError,
TipAttachedError,
CommandDoesNotExistError,
@@ -27,11 +29,17 @@
ModuleNotOnDeckError,
ModuleNotConnectedError,
SlotDoesNotExistError,
+ CutoutDoesNotExistError,
FixtureDoesNotExistError,
+ AddressableAreaDoesNotExistError,
+ FixtureDoesNotProvideAreasError,
+ AreaNotInDeckConfigurationError,
+ IncompatibleAddressableAreaError,
FailedToPlanMoveError,
MustHomeError,
RunStoppedError,
SetupCommandNotAllowedError,
+ FixitCommandNotAllowedError,
ModuleNotAttachedError,
ModuleAlreadyPresentError,
WrongModuleTypeError,
@@ -48,14 +56,18 @@
InvalidHoldTimeError,
CannotPerformModuleAction,
PauseNotAllowedError,
+ ResumeFromRecoveryNotAllowedError,
GripperNotAttachedError,
CannotPerformGripperAction,
HardwareNotSupportedError,
LabwareMovementNotAllowedError,
LabwareIsNotAllowedInLocationError,
LocationIsOccupiedError,
+ LocationNotAccessibleByPipetteError,
+ LocationIsStagingSlotError,
InvalidAxisForRobotType,
NotSupportedOnRobotType,
+ CommandNotAllowedError,
)
from .error_occurrence import ErrorOccurrence, ProtocolCommandFailedError
@@ -66,6 +78,8 @@
"UnexpectedProtocolError",
"FailedToLoadPipetteError",
"PipetteNotAttachedError",
+ "InvalidSpecificationForRobotTypeError",
+ "InvalidLoadPipetteSpecsError",
"TipNotAttachedError",
"TipAttachedError",
"CommandDoesNotExistError",
@@ -88,11 +102,17 @@
"ModuleNotOnDeckError",
"ModuleNotConnectedError",
"SlotDoesNotExistError",
+ "CutoutDoesNotExistError",
"FixtureDoesNotExistError",
+ "AddressableAreaDoesNotExistError",
+ "FixtureDoesNotProvideAreasError",
+ "AreaNotInDeckConfigurationError",
+ "IncompatibleAddressableAreaError",
"FailedToPlanMoveError",
"MustHomeError",
"RunStoppedError",
"SetupCommandNotAllowedError",
+ "FixitCommandNotAllowedError",
"ModuleNotAttachedError",
"ModuleAlreadyPresentError",
"WrongModuleTypeError",
@@ -108,6 +128,7 @@
"InvalidBlockVolumeError",
"InvalidHoldTimeError",
"CannotPerformModuleAction",
+ "ResumeFromRecoveryNotAllowedError",
"PauseNotAllowedError",
"ProtocolCommandFailedError",
"GripperNotAttachedError",
@@ -116,8 +137,11 @@
"LabwareMovementNotAllowedError",
"LabwareIsNotAllowedInLocationError",
"LocationIsOccupiedError",
+ "LocationNotAccessibleByPipetteError",
+ "LocationIsStagingSlotError",
"InvalidAxisForRobotType",
"NotSupportedOnRobotType",
# error occurrence models
"ErrorOccurrence",
+ "CommandNotAllowedError",
]
diff --git a/api/src/opentrons/protocol_engine/errors/error_occurrence.py b/api/src/opentrons/protocol_engine/errors/error_occurrence.py
index 12f1289f4f0..570948943ee 100644
--- a/api/src/opentrons/protocol_engine/errors/error_occurrence.py
+++ b/api/src/opentrons/protocol_engine/errors/error_occurrence.py
@@ -24,6 +24,12 @@ def from_failed(
error: Union[ProtocolEngineError, EnumeratedError],
) -> "ErrorOccurrence":
"""Build an ErrorOccurrence from the details available from a FailedAction or FinishAction."""
+ if isinstance(error, ProtocolCommandFailedError) and error.original_error:
+ wrappedErrors = [error.original_error]
+ else:
+ wrappedErrors = [
+ cls.from_failed(id, createdAt, err) for err in error.wrapping
+ ]
return cls.construct(
id=id,
createdAt=createdAt,
@@ -31,9 +37,7 @@ def from_failed(
detail=error.message or str(error),
errorInfo=error.detail,
errorCode=error.code.value.code,
- wrappedErrors=[
- cls.from_failed(id, createdAt, err) for err in error.wrapping
- ],
+ wrappedErrors=wrappedErrors,
)
id: str = Field(..., description="Unique identifier of this error occurrence.")
diff --git a/api/src/opentrons/protocol_engine/errors/exceptions.py b/api/src/opentrons/protocol_engine/errors/exceptions.py
index 7f4304f8097..7f022652d71 100644
--- a/api/src/opentrons/protocol_engine/errors/exceptions.py
+++ b/api/src/opentrons/protocol_engine/errors/exceptions.py
@@ -93,6 +93,32 @@ def __init__(
super().__init__(ErrorCodes.PIPETTE_NOT_PRESENT, message, details, wrapping)
+class InvalidLoadPipetteSpecsError(ProtocolEngineError):
+ """Raised when a loadPipette uses invalid specifications."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build an InvalidLoadPipetteSpecsError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
+class InvalidSpecificationForRobotTypeError(ProtocolEngineError):
+ """Raised when a command provides invalid specs for the given robot type."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build an InvalidSpecificationForRobotTypeError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
class TipNotAttachedError(ProtocolEngineError):
"""Raised when an operation's required pipette tip is not attached."""
@@ -373,8 +399,21 @@ def __init__(
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+class CutoutDoesNotExistError(ProtocolEngineError):
+ """Raised when referencing a cutout that does not exist."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a CutoutDoesNotExistError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
class FixtureDoesNotExistError(ProtocolEngineError):
- """Raised when referencing an addressable area (aka fixture) that does not exist."""
+ """Raised when referencing a cutout fixture that does not exist."""
def __init__(
self,
@@ -386,6 +425,58 @@ def __init__(
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+class AddressableAreaDoesNotExistError(ProtocolEngineError):
+ """Raised when referencing an addressable area that does not exist."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a AddressableAreaDoesNotExistError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
+class FixtureDoesNotProvideAreasError(ProtocolEngineError):
+ """Raised when a cutout fixture does not provide any addressable areas for a requested cutout."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a FixtureDoesNotProvideAreasError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
+class AreaNotInDeckConfigurationError(ProtocolEngineError):
+ """Raised when an addressable area is referenced that is not provided by a deck configuration."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a AreaNotInDeckConfigurationError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
+class IncompatibleAddressableAreaError(ProtocolEngineError):
+ """Raised when two non-compatible addressable areas are referenced during analysis."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a IncompatibleAddressableAreaError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
# TODO(mc, 2020-11-06): flesh out with structured data to replicate
# existing LabwareHeightError
class FailedToPlanMoveError(ProtocolEngineError):
@@ -414,6 +505,32 @@ def __init__(
super().__init__(ErrorCodes.POSITION_UNKNOWN, message, details, wrapping)
+class CommandNotAllowedError(ProtocolEngineError):
+ """Raised when adding a command with bad data."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a CommandNotAllowedError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
+class FixitCommandNotAllowedError(ProtocolEngineError):
+ """Raised when adding a fixit command to a non-recoverable engine."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a SetupCommandNotAllowedError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
class SetupCommandNotAllowedError(ProtocolEngineError):
"""Raised when adding a setup command to a non-idle/non-paused engine."""
@@ -427,6 +544,19 @@ def __init__(
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+class ResumeFromRecoveryNotAllowedError(ProtocolEngineError):
+ """Raised when attempting to resume a run from recovery that has a fixit command in the queue."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a ResumeFromRecoveryNotAllowedError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
class PauseNotAllowedError(ProtocolEngineError):
"""Raised when attempting to pause a run that is not running."""
@@ -741,6 +871,32 @@ def __init__(
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+class LocationNotAccessibleByPipetteError(ProtocolEngineError):
+ """Raised when attempting to move pipette to an inaccessible location."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a LocationNotAccessibleByPipetteError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
+class LocationIsStagingSlotError(ProtocolEngineError):
+ """Raised when referencing a labware on a staging slot when trying to get standard deck slot."""
+
+ def __init__(
+ self,
+ message: Optional[str] = None,
+ details: Optional[Dict[str, Any]] = None,
+ wrapping: Optional[Sequence[EnumeratedError]] = None,
+ ) -> None:
+ """Build a LocationIsStagingSlotError."""
+ super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
+
+
class FirmwareUpdateRequired(ProtocolEngineError):
"""Raised when the firmware needs to be updated."""
@@ -769,21 +925,33 @@ def __init__(
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
-class InvalidPipettingVolumeError(ProtocolEngineError):
+class InvalidAspirateVolumeError(ProtocolEngineError):
"""Raised when pipetting a volume larger than the pipette volume."""
def __init__(
self,
- message: Optional[str] = None,
- details: Optional[Dict[str, Any]] = None,
+ attempted_aspirate_volume: float,
+ available_volume: float,
+ max_pipette_volume: float,
+ max_tip_volume: Optional[float], # None if there's no tip.
wrapping: Optional[Sequence[EnumeratedError]] = None,
) -> None:
"""Build a InvalidPipettingVolumeError."""
+ message = (
+ f"Cannot aspirate {attempted_aspirate_volume} µL when only"
+ f" {available_volume} is available."
+ )
+ details = {
+ "attempted_aspirate_volume": attempted_aspirate_volume,
+ "available_volume": available_volume,
+ "max_pipette_volume": max_pipette_volume,
+ "max_tip_volume": max_tip_volume,
+ }
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
-class InvalidPushOutVolumeError(ProtocolEngineError):
- """Raised when attempting to use an invalid volume for dispense push_out."""
+class InvalidDispenseVolumeError(ProtocolEngineError):
+ """Raised when attempting to dispense a volume that was not aspirated."""
def __init__(
self,
@@ -791,12 +959,12 @@ def __init__(
details: Optional[Dict[str, Any]] = None,
wrapping: Optional[Sequence[EnumeratedError]] = None,
) -> None:
- """Build a InvalidPushOutVolumeError."""
+ """Build a InvalidDispenseVolumeError."""
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
-class InvalidDispenseVolumeError(ProtocolEngineError):
- """Raised when attempting to dispense a volume that was not aspirated."""
+class InvalidPushOutVolumeError(ProtocolEngineError):
+ """Raised when attempting to use an invalid volume for dispense push_out."""
def __init__(
self,
@@ -804,7 +972,7 @@ def __init__(
details: Optional[Dict[str, Any]] = None,
wrapping: Optional[Sequence[EnumeratedError]] = None,
) -> None:
- """Build a InvalidDispenseVolumeError."""
+ """Build a InvalidPushOutVolumeError."""
super().__init__(ErrorCodes.GENERAL_ERROR, message, details, wrapping)
@@ -822,16 +990,18 @@ def __init__(
class EStopActivatedError(ProtocolEngineError):
- """Raised when an operation's required pipette tip is not attached."""
+ """Represents an E-stop event."""
def __init__(
self,
- message: Optional[str] = None,
- details: Optional[Dict[str, Any]] = None,
wrapping: Optional[Sequence[EnumeratedError]] = None,
) -> None:
"""Build an EStopActivatedError."""
- super().__init__(ErrorCodes.E_STOP_ACTIVATED, message, details, wrapping)
+ super().__init__(
+ code=ErrorCodes.E_STOP_ACTIVATED,
+ message="E-stop activated.",
+ wrapping=wrapping,
+ )
class NotSupportedOnRobotType(ProtocolEngineError):
diff --git a/api/src/opentrons/protocol_engine/execution/__init__.py b/api/src/opentrons/protocol_engine/execution/__init__.py
index 5d2da5e6840..80f2dfd0d99 100644
--- a/api/src/opentrons/protocol_engine/execution/__init__.py
+++ b/api/src/opentrons/protocol_engine/execution/__init__.py
@@ -8,6 +8,7 @@
LoadedPipetteData,
LoadedModuleData,
LoadedConfigureForVolumeData,
+ ReloadedLabwareData,
)
from .movement import MovementHandler
from .gantry_mover import GantryMover
@@ -29,6 +30,7 @@
"create_queue_worker",
"EquipmentHandler",
"LoadedLabwareData",
+ "ReloadedLabwareData",
"LoadedPipetteData",
"LoadedModuleData",
"LoadedConfigureForVolumeData",
diff --git a/api/src/opentrons/protocol_engine/execution/command_executor.py b/api/src/opentrons/protocol_engine/execution/command_executor.py
index 828d060b9d3..d00b5c0a96d 100644
--- a/api/src/opentrons/protocol_engine/execution/command_executor.py
+++ b/api/src/opentrons/protocol_engine/execution/command_executor.py
@@ -1,7 +1,7 @@
"""Command side-effect execution logic container."""
import asyncio
from logging import getLogger
-from typing import Optional
+from typing import Optional, List, Protocol
from opentrons.hardware_control import HardwareControlAPI
@@ -11,12 +11,25 @@
PythonException,
)
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryPolicy
+
from ..state import StateStore
from ..resources import ModelUtils
-from ..commands import CommandStatus, AbstractCommandImpl
-from ..actions import ActionDispatcher, UpdateCommandAction, FailCommandAction
+from ..commands import (
+ CommandStatus,
+ AbstractCommandImpl,
+ CommandResult,
+ CommandPrivateResult,
+)
+from ..actions import (
+ ActionDispatcher,
+ RunCommandAction,
+ SucceedCommandAction,
+ FailCommandAction,
+)
from ..errors import RunStoppedError
from ..errors.exceptions import EStopActivatedError as PE_EStopActivatedError
+from ..notes import CommandNote, CommandNoteTracker
from .equipment import EquipmentHandler
from .movement import MovementHandler
from .gantry_mover import GantryMover
@@ -31,6 +44,29 @@
log = getLogger(__name__)
+class CommandNoteTrackerProvider(Protocol):
+ """The correct shape for a function that provides a CommandNoteTracker.
+
+ This function will be called by the executor once for each call to execute().
+ It is mostly useful for testing harnesses.
+ """
+
+ def __call__(self) -> CommandNoteTracker:
+ """Provide a new CommandNoteTracker."""
+ ...
+
+
+class _NoteTracker(CommandNoteTracker):
+ def __init__(self) -> None:
+ self._notes: List[CommandNote] = []
+
+ def __call__(self, note: CommandNote) -> None:
+ self._notes.append(note)
+
+ def get_notes(self) -> List[CommandNote]:
+ return self._notes
+
+
class CommandExecutor:
"""CommandExecutor container class.
@@ -52,7 +88,9 @@ def __init__(
run_control: RunControlHandler,
rail_lights: RailLightsHandler,
status_bar: StatusBarHandler,
+ error_recovery_policy: ErrorRecoveryPolicy,
model_utils: Optional[ModelUtils] = None,
+ command_note_tracker_provider: Optional[CommandNoteTrackerProvider] = None,
) -> None:
"""Initialize the CommandExecutor with access to its dependencies."""
self._hardware_api = hardware_api
@@ -68,6 +106,10 @@ def __init__(
self._rail_lights = rail_lights
self._model_utils = model_utils or ModelUtils()
self._status_bar = status_bar
+ self._command_note_tracker_provider = (
+ command_note_tracker_provider or _NoteTracker
+ )
+ self._error_recovery_policy = error_recovery_policy
async def execute(self, command_id: str) -> None:
"""Run a given command's execution procedure.
@@ -76,8 +118,9 @@ async def execute(self, command_id: str) -> None:
command_id: The identifier of the command to execute. The
command itself will be looked up from state.
"""
- command = self._state_store.commands.get(command_id=command_id)
- command_impl = command._ImplementationCls(
+ queued_command = self._state_store.commands.get(command_id=command_id)
+ note_tracker = self._command_note_tracker_provider()
+ command_impl = queued_command._ImplementationCls(
state_view=self._state_store,
hardware_api=self._hardware_api,
equipment=self._equipment,
@@ -89,59 +132,68 @@ async def execute(self, command_id: str) -> None:
run_control=self._run_control,
rail_lights=self._rail_lights,
status_bar=self._status_bar,
+ command_note_adder=note_tracker,
)
started_at = self._model_utils.get_timestamp()
- running_command = command.copy(
- update={
- "status": CommandStatus.RUNNING,
- "startedAt": started_at,
- }
- )
self._action_dispatcher.dispatch(
- UpdateCommandAction(command=running_command, private_result=None)
+ RunCommandAction(command_id=queued_command.id, started_at=started_at)
)
+ running_command = self._state_store.commands.get(queued_command.id)
try:
log.debug(
- f"Executing {command.id}, {command.commandType}, {command.params}"
+ f"Executing {running_command.id}, {running_command.commandType}, {running_command.params}"
)
if isinstance(command_impl, AbstractCommandImpl):
- result = await command_impl.execute(command.params) # type: ignore[arg-type]
- private_result = None
+ result: CommandResult = await command_impl.execute(running_command.params) # type: ignore[arg-type]
+ private_result: Optional[CommandPrivateResult] = None
else:
- result, private_result = await command_impl.execute(command.params) # type: ignore[arg-type]
+ result, private_result = await command_impl.execute(running_command.params) # type: ignore[arg-type]
except (Exception, asyncio.CancelledError) as error:
- log.warning(f"Execution of {command.id} failed", exc_info=error)
+ log.warning(f"Execution of {running_command.id} failed", exc_info=error)
# TODO(mc, 2022-11-14): mark command as stopped rather than failed
# https://opentrons.atlassian.net/browse/RCORE-390
if isinstance(error, asyncio.CancelledError):
error = RunStoppedError("Run was cancelled")
elif isinstance(error, EStopActivatedError):
- error = PE_EStopActivatedError(message=str(error), wrapping=[error])
+ error = PE_EStopActivatedError(wrapping=[error])
elif not isinstance(error, EnumeratedError):
error = PythonException(error)
self._action_dispatcher.dispatch(
FailCommandAction(
error=error,
- command_id=command_id,
+ command_id=running_command.id,
+ running_command=running_command,
error_id=self._model_utils.generate_id(),
failed_at=self._model_utils.get_timestamp(),
+ notes=note_tracker.get_notes(),
+ # todo(mm, 2024-03-13):
+ # When a command fails recoverably, and we handle it with
+ # WAIT_FOR_RECOVERY or CONTINUE, we want to update our logical
+ # protocol state as if the command succeeded. (e.g. if a tip
+ # pickup failed, pretend that it succeeded and that the tip is now
+ # on the pipette.) However, this currently does the opposite,
+ # acting as if the command never executed.
+ type=self._error_recovery_policy(
+ running_command,
+ error,
+ ),
)
)
else:
- completed_command = running_command.copy(
- update={
- "result": result,
- "status": CommandStatus.SUCCEEDED,
- "completedAt": self._model_utils.get_timestamp(),
- }
- )
+ update = {
+ "result": result,
+ "status": CommandStatus.SUCCEEDED,
+ "completedAt": self._model_utils.get_timestamp(),
+ "notes": note_tracker.get_notes(),
+ }
+ succeeded_command = running_command.copy(update=update)
self._action_dispatcher.dispatch(
- UpdateCommandAction(
- command=completed_command, private_result=private_result
+ SucceedCommandAction(
+ command=succeeded_command, private_result=private_result
),
)
diff --git a/api/src/opentrons/protocol_engine/execution/create_queue_worker.py b/api/src/opentrons/protocol_engine/execution/create_queue_worker.py
index c204107f5a1..8b59eda5ef2 100644
--- a/api/src/opentrons/protocol_engine/execution/create_queue_worker.py
+++ b/api/src/opentrons/protocol_engine/execution/create_queue_worker.py
@@ -1,5 +1,6 @@
"""QueueWorker and dependency factory."""
from opentrons.hardware_control import HardwareControlAPI
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryPolicy
from opentrons.protocol_engine.execution.rail_lights import RailLightsHandler
from ..state import StateStore
@@ -20,6 +21,7 @@ def create_queue_worker(
hardware_api: HardwareControlAPI,
state_store: StateStore,
action_dispatcher: ActionDispatcher,
+ error_recovery_policy: ErrorRecoveryPolicy,
) -> QueueWorker:
"""Create a ready-to-use QueueWorker instance.
@@ -27,6 +29,7 @@ def create_queue_worker(
hardware_api: Hardware control API to pass down to dependencies.
state_store: StateStore to pass down to dependencies.
action_dispatcher: ActionDispatcher to pass down to dependencies.
+ error_recovery_policy: ErrorRecoveryPolicy to pass down to dependencies.
"""
gantry_mover = create_gantry_mover(
hardware_api=hardware_api,
@@ -36,7 +39,6 @@ def create_queue_worker(
equipment_handler = EquipmentHandler(
hardware_api=hardware_api,
state_store=state_store,
- action_dispatcher=action_dispatcher,
)
movement_handler = MovementHandler(
@@ -85,6 +87,7 @@ def create_queue_worker(
run_control=run_control_handler,
rail_lights=rail_lights_handler,
status_bar=status_bar_handler,
+ error_recovery_policy=error_recovery_policy,
)
return QueueWorker(
diff --git a/api/src/opentrons/protocol_engine/execution/equipment.py b/api/src/opentrons/protocol_engine/execution/equipment.py
index b3361510ec2..d6898ef0863 100644
--- a/api/src/opentrons/protocol_engine/execution/equipment.py
+++ b/api/src/opentrons/protocol_engine/execution/equipment.py
@@ -1,6 +1,6 @@
"""Equipment command side-effect logic."""
from dataclasses import dataclass
-from typing import Optional, overload
+from typing import Optional, overload, Union
from opentrons_shared_data.pipette.dev_types import PipetteNameType
@@ -22,7 +22,6 @@
TemperatureModuleId,
ThermocyclerModuleId,
)
-from ..actions import ActionDispatcher
from ..errors import (
FailedToLoadPipetteError,
LabwareDefinitionDoesNotExistError,
@@ -44,6 +43,7 @@
LabwareOffsetLocation,
ModuleModel,
ModuleDefinition,
+ AddressableAreaLocation,
)
@@ -56,6 +56,14 @@ class LoadedLabwareData:
offsetId: Optional[str]
+@dataclass(frozen=True)
+class ReloadedLabwareData:
+ """The result of a reload labware procedure."""
+
+ location: LabwareLocation
+ offsetId: Optional[str]
+
+
@dataclass(frozen=True)
class LoadedPipetteData:
"""The result of a load pipette procedure."""
@@ -98,7 +106,6 @@ def __init__(
self,
hardware_api: HardwareControlAPI,
state_store: StateStore,
- action_dispatcher: ActionDispatcher,
labware_data_provider: Optional[LabwareDataProvider] = None,
module_data_provider: Optional[ModuleDataProvider] = None,
model_utils: Optional[ModelUtils] = None,
@@ -109,7 +116,6 @@ def __init__(
"""Initialize an EquipmentHandler instance."""
self._hardware_api = hardware_api
self._state_store = state_store
- self._action_dispatcher = action_dispatcher
self._labware_data_provider = labware_data_provider or LabwareDataProvider()
self._module_data_provider = module_data_provider or ModuleDataProvider()
self._model_utils = model_utils or ModelUtils()
@@ -173,11 +179,31 @@ async def load_labware(
labware_id=labware_id, definition=definition, offsetId=offset_id
)
+ async def reload_labware(self, labware_id: str) -> ReloadedLabwareData:
+ """Reload an already-loaded labware. This cannot change the labware location.
+
+ Args:
+ labware_id: The ID of the already-loaded labware.
+
+ Raises:
+ LabwareNotLoadedError: If `labware_id` does not reference a loaded labware.
+
+ """
+ location = self._state_store.labware.get_location(labware_id)
+ definition_uri = self._state_store.labware.get_definition_uri(labware_id)
+ offset_id = self.find_applicable_labware_offset_id(
+ labware_definition_uri=definition_uri,
+ labware_location=location,
+ )
+
+ return ReloadedLabwareData(location=location, offsetId=offset_id)
+
async def load_pipette(
self,
pipette_name: PipetteNameType,
mount: MountType,
pipette_id: Optional[str],
+ tip_overlap_version: Optional[str],
) -> LoadedPipetteData:
"""Ensure the requested pipette is attached.
@@ -186,6 +212,8 @@ async def load_pipette(
mount: The mount on which pipette must be attached.
pipette_id: An optional identifier to assign the pipette. If None, an
identifier will be generated.
+ tip_overlap_version: An optional specifier for the version of tip overlap data to use.
+ If None, defaults to v0. Does not need to be format checked - this function does it.
Returns:
A LoadedPipetteData object.
@@ -200,9 +228,13 @@ async def load_pipette(
if isinstance(pipette_name, PipetteNameType)
else pipette_name
)
+ sanitized_overlap_version = (
+ pipette_data_provider.validate_and_default_tip_overlap_version(
+ tip_overlap_version
+ )
+ )
pipette_id = pipette_id or self._model_utils.generate_id()
-
if not use_virtual_pipettes:
cache_request = {mount.to_hw_mount(): pipette_name_value}
@@ -233,18 +265,19 @@ async def load_pipette(
serial_number = pipette_dict["pipette_id"]
static_pipette_config = pipette_data_provider.get_pipette_static_config(
- pipette_dict
+ pipette_dict=pipette_dict, tip_overlap_version=sanitized_overlap_version
)
else:
serial_number = self._model_utils.generate_id(prefix="fake-serial-number-")
static_pipette_config = (
self._virtual_pipette_data_provider.get_virtual_pipette_static_config(
- pipette_name_value, pipette_id
+ pipette_name=pipette_name_value,
+ pipette_id=pipette_id,
+ tip_overlap_version=sanitized_overlap_version,
)
)
serial = serial_number or ""
-
return LoadedPipetteData(
pipette_id=pipette_id,
serial_number=serial,
@@ -254,7 +287,7 @@ async def load_pipette(
async def load_magnetic_block(
self,
model: ModuleModel,
- location: DeckSlotLocation,
+ location: Union[DeckSlotLocation, AddressableAreaLocation],
module_id: Optional[str],
) -> LoadedModuleData:
"""Ensure the required magnetic block is attached.
@@ -276,10 +309,6 @@ async def load_magnetic_block(
model
), f"Expected Magnetic block and got {model.name}"
definition = self._module_data_provider.get_definition(model)
- # when loading a hardware module select_hardware_module_to_load
- # will ensure a module of a different type is not loaded at the same slot.
- # this is for non-connected modules.
- self._state_store.modules.raise_if_module_in_location(location=location)
return LoadedModuleData(
module_id=self._model_utils.ensure_id(module_id),
serial_number=None,
@@ -323,10 +352,14 @@ async def load_module(
for hw_mod in self._hardware_api.attached_modules
]
+ serial_number_at_locaiton = self._state_store.geometry._addressable_areas.get_fixture_serial_from_deck_configuration_by_deck_slot(
+ location.slotName
+ )
attached_module = self._state_store.modules.select_hardware_module_to_load(
model=model,
location=location,
attached_modules=attached_modules,
+ expected_serial_number=serial_number_at_locaiton,
)
else:
@@ -344,9 +377,7 @@ async def load_module(
)
async def configure_for_volume(
- self,
- pipette_id: str,
- volume: float,
+ self, pipette_id: str, volume: float, tip_overlap_version: Optional[str]
) -> LoadedConfigureForVolumeData:
"""Ensure the requested volume can be configured for the given pipette.
@@ -358,6 +389,11 @@ async def configure_for_volume(
A LoadedConfiguredVolumeData object.
"""
use_virtual_pipettes = self._state_store.config.use_virtual_pipettes
+ sanitized_overlap_version = (
+ pipette_data_provider.validate_and_default_tip_overlap_version(
+ tip_overlap_version
+ )
+ )
if not use_virtual_pipettes:
mount = self._state_store.pipettes.get_mount(pipette_id).to_hw_mount()
@@ -367,7 +403,7 @@ async def configure_for_volume(
serial_number = pipette_dict["pipette_id"]
static_pipette_config = pipette_data_provider.get_pipette_static_config(
- pipette_dict
+ pipette_dict=pipette_dict, tip_overlap_version=sanitized_overlap_version
)
else:
@@ -378,7 +414,9 @@ async def configure_for_volume(
serial_number = self._model_utils.generate_id(prefix="fake-serial-number-")
static_pipette_config = self._virtual_pipette_data_provider.get_virtual_pipette_static_config_by_model_string(
- model, pipette_id
+ pipette_model_string=model,
+ pipette_id=pipette_id,
+ tip_overlap_version=sanitized_overlap_version,
)
return LoadedConfigureForVolumeData(
@@ -394,7 +432,7 @@ async def configure_nozzle_layout(
primary_nozzle: Optional[str] = None,
front_right_nozzle: Optional[str] = None,
back_left_nozzle: Optional[str] = None,
- ) -> Optional[NozzleMap]:
+ ) -> NozzleMap:
"""Ensure the requested nozzle layout is compatible with the current pipette.
Args:
diff --git a/api/src/opentrons/protocol_engine/execution/gantry_mover.py b/api/src/opentrons/protocol_engine/execution/gantry_mover.py
index d8e2ea8afc5..7e05c8db247 100644
--- a/api/src/opentrons/protocol_engine/execution/gantry_mover.py
+++ b/api/src/opentrons/protocol_engine/execution/gantry_mover.py
@@ -75,6 +75,11 @@ async def home(self, axes: Optional[List[MotorAxis]]) -> None:
async def retract_axis(self, axis: MotorAxis) -> None:
"""Retract the specified axis to its home position."""
+ ...
+
+ async def prepare_for_mount_movement(self, mount: Mount) -> None:
+ """Retract the 'idle' mount if necessary."""
+ ...
class HardwareGantryMover(GantryMover):
@@ -99,7 +104,7 @@ async def get_position(
"""
pipette_location = self._state_view.motion.get_pipette_location(
pipette_id=pipette_id,
- current_well=current_well,
+ current_location=current_well,
)
try:
return await self._hardware_api.gantry_position(
@@ -211,6 +216,10 @@ async def retract_axis(self, axis: MotorAxis) -> None:
)
await self._hardware_api.retract_axis(axis=hardware_axis)
+ async def prepare_for_mount_movement(self, mount: Mount) -> None:
+ """Retract the 'idle' mount if necessary."""
+ await self._hardware_api.prepare_for_mount_movement(mount)
+
class VirtualGantryMover(GantryMover):
"""State store based gantry movement handler for simulation/analysis."""
@@ -286,6 +295,10 @@ async def retract_axis(self, axis: MotorAxis) -> None:
"""Retract the specified axis. No-op in virtual implementation."""
pass
+ async def prepare_for_mount_movement(self, mount: Mount) -> None:
+ """Retract the 'idle' mount if necessary."""
+ pass
+
def create_gantry_mover(
state_view: StateView, hardware_api: HardwareControlAPI
diff --git a/api/src/opentrons/protocol_engine/execution/hardware_stopper.py b/api/src/opentrons/protocol_engine/execution/hardware_stopper.py
index 11f753b0ee4..28eacd7525b 100644
--- a/api/src/opentrons/protocol_engine/execution/hardware_stopper.py
+++ b/api/src/opentrons/protocol_engine/execution/hardware_stopper.py
@@ -15,6 +15,8 @@
from .tip_handler import TipHandler, HardwareTipHandler
from ...hardware_control.types import OT3Mount
+from opentrons.protocol_engine.types import AddressableOffsetVector
+
log = logging.getLogger(__name__)
# TODO(mc, 2022-03-07): this constant dup'd from opentrons.protocols.geometry.deck
@@ -48,44 +50,68 @@ def __init__(
state_view=state_store,
)
+ async def _home_everything_except_plungers(self) -> None:
+ # TODO: Update this once gripper MotorAxis is available in engine.
+ try:
+ ot3api = ensure_ot3_hardware(hardware_api=self._hardware_api)
+ if (
+ not self._state_store.config.use_virtual_gripper
+ and ot3api.has_gripper()
+ ):
+ await ot3api.home_z(mount=OT3Mount.GRIPPER)
+ except HardwareNotSupportedError:
+ pass
+ await self._movement_handler.home(
+ axes=[MotorAxis.X, MotorAxis.Y, MotorAxis.LEFT_Z, MotorAxis.RIGHT_Z]
+ )
+
async def _drop_tip(self) -> None:
"""Drop currently attached tip, if any, into trash after a run cancel."""
attached_tips = self._state_store.pipettes.get_all_attached_tips()
if attached_tips:
await self._hardware_api.stop(home_after=False)
- # TODO: Update this once gripper MotorAxis is available in engine.
- try:
- ot3api = ensure_ot3_hardware(hardware_api=self._hardware_api)
- if (
- not self._state_store.config.use_virtual_gripper
- and ot3api.has_gripper()
- ):
- await ot3api.home_z(mount=OT3Mount.GRIPPER)
- except HardwareNotSupportedError:
- pass
- await self._movement_handler.home(
- axes=[MotorAxis.X, MotorAxis.Y, MotorAxis.LEFT_Z, MotorAxis.RIGHT_Z]
- )
-
- for pipette_id, tip in attached_tips:
- try:
- await self._tip_handler.add_tip(pipette_id=pipette_id, tip=tip)
- # TODO: Add ability to drop tip onto custom trash as well.
- await self._movement_handler.move_to_well(
- pipette_id=pipette_id,
- labware_id=FIXED_TRASH_ID,
- well_name="A1",
- )
- await self._tip_handler.drop_tip(
- pipette_id=pipette_id,
- home_after=False,
- )
-
- except HwPipetteNotAttachedError:
- # this will happen normally during protocol analysis, but
- # should not happen during an actual run
- log.debug(f"Pipette ID {pipette_id} no longer attached.")
+
+ await self._home_everything_except_plungers()
+
+ for pipette_id, tip in attached_tips:
+ try:
+ if self._state_store.labware.get_fixed_trash_id() == FIXED_TRASH_ID:
+ # OT-2 and Flex 2.15 protocols will default to the Fixed Trash Labware
+ await self._tip_handler.add_tip(pipette_id=pipette_id, tip=tip)
+ await self._movement_handler.move_to_well(
+ pipette_id=pipette_id,
+ labware_id=FIXED_TRASH_ID,
+ well_name="A1",
+ )
+ await self._tip_handler.drop_tip(
+ pipette_id=pipette_id,
+ home_after=False,
+ )
+ elif self._state_store.config.robot_type == "OT-2 Standard":
+ # API 2.16 and above OT2 protocols use addressable areas
+ await self._tip_handler.add_tip(pipette_id=pipette_id, tip=tip)
+ await self._movement_handler.move_to_addressable_area(
+ pipette_id=pipette_id,
+ addressable_area_name="fixedTrash",
+ offset=AddressableOffsetVector(x=0, y=0, z=0),
+ force_direct=False,
+ speed=None,
+ minimum_z_height=None,
+ )
+ await self._tip_handler.drop_tip(
+ pipette_id=pipette_id,
+ home_after=False,
+ )
+ else:
+ log.debug(
+ "Flex Protocols API Version 2.16 and beyond do not support automatic tip dropping at this time."
+ )
+
+ except HwPipetteNotAttachedError:
+ # this will happen normally during protocol analysis, but
+ # should not happen during an actual run
+ log.debug(f"Pipette ID {pipette_id} no longer attached.")
async def do_halt(self, disengage_before_stopping: bool = False) -> None:
"""Issue a halt signal to the hardware API.
@@ -102,12 +128,15 @@ async def do_stop_and_recover(
post_run_hardware_state: PostRunHardwareState,
drop_tips_after_run: bool = False,
) -> None:
- """Stop and reset the HardwareAPI, optionally dropping tips and homing."""
- if drop_tips_after_run:
- await self._drop_tip()
-
+ """Stop and reset the HardwareAPI, homing and dropping tips independently if specified."""
home_after_stop = post_run_hardware_state in (
PostRunHardwareState.HOME_AND_STAY_ENGAGED,
PostRunHardwareState.HOME_THEN_DISENGAGE,
)
- await self._hardware_api.stop(home_after=home_after_stop)
+ if drop_tips_after_run:
+ await self._drop_tip()
+ await self._hardware_api.stop(home_after=home_after_stop)
+ else:
+ await self._hardware_api.stop(home_after=False)
+ if home_after_stop:
+ await self._home_everything_except_plungers()
diff --git a/api/src/opentrons/protocol_engine/execution/labware_movement.py b/api/src/opentrons/protocol_engine/execution/labware_movement.py
index d3c4fb3619c..3cdd78b8808 100644
--- a/api/src/opentrons/protocol_engine/execution/labware_movement.py
+++ b/api/src/opentrons/protocol_engine/execution/labware_movement.py
@@ -3,6 +3,8 @@
from typing import Optional, TYPE_CHECKING
+from opentrons.types import Point
+
from opentrons.hardware_control import HardwareControlAPI
from opentrons.hardware_control.types import OT3Mount, Axis
from opentrons.motion_planning import get_gripper_labware_movement_waypoints
@@ -32,6 +34,8 @@
if TYPE_CHECKING:
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
+_GRIPPER_HOMED_POSITION_Z = 166.125 # Height of the center of the gripper critical point from the deck when homed
+
# TODO (spp, 2022-10-20): name this GripperMovementHandler if it doesn't handle
# any non-gripper implementations
@@ -83,11 +87,20 @@ async def move_labware_with_gripper(
current_location: OnDeckLabwareLocation,
new_location: OnDeckLabwareLocation,
user_offset_data: LabwareMovementOffsetData,
+ post_drop_slide_offset: Optional[Point],
) -> None:
"""Move a loaded labware from one location to another using gripper."""
use_virtual_gripper = self._state_store.config.use_virtual_gripper
+
if use_virtual_gripper:
+ # During Analysis we will pass in hard coded estimates for certain positions only accessible during execution
+ self._state_store.geometry.check_gripper_labware_tip_collision(
+ gripper_homed_position_z=_GRIPPER_HOMED_POSITION_Z,
+ labware_id=labware_id,
+ current_location=current_location,
+ )
return
+
ot3api = ensure_ot3_hardware(
hardware_api=self._hardware_api,
error_msg="Gripper is only available on Opentrons Flex",
@@ -97,7 +110,7 @@ async def move_labware_with_gripper(
raise GripperNotAttachedError(
"No gripper found for performing labware movements."
)
- if not ot3api._gripper_handler.is_ready_for_jaw_home():
+ if not ot3api.gripper_jaw_can_home():
raise CannotPerformGripperAction(
"Cannot pick up labware when gripper is already gripping."
)
@@ -108,6 +121,13 @@ async def move_labware_with_gripper(
await ot3api.home(axes=[Axis.Z_L, Axis.Z_R, Axis.Z_G])
gripper_homed_position = await ot3api.gantry_position(mount=gripper_mount)
+ # Verify that no tip collisions will occur during the move
+ self._state_store.geometry.check_gripper_labware_tip_collision(
+ gripper_homed_position_z=gripper_homed_position.z,
+ labware_id=labware_id,
+ current_location=current_location,
+ )
+
async with self._thermocycler_plate_lifter.lift_plate_for_labware_movement(
labware_location=current_location
):
@@ -129,14 +149,43 @@ async def move_labware_with_gripper(
to_labware_center=to_labware_center,
gripper_home_z=gripper_homed_position.z,
offset_data=final_offsets,
+ post_drop_slide_offset=post_drop_slide_offset,
)
labware_grip_force = self._state_store.labware.get_grip_force(labware_id)
-
+ holding_labware = False
for waypoint_data in movement_waypoints:
if waypoint_data.jaw_open:
+ if waypoint_data.dropping:
+ # This `disengage_axes` step is important in order to engage
+ # the electronic brake on the Z axis of the gripper. The brake
+ # has a stronger holding force on the axis than the hold current,
+ # and prevents the axis from spuriously dropping when e.g. the notch
+ # on the side of a falling tiprack catches the jaw.
+ await ot3api.disengage_axes([Axis.Z_G])
await ot3api.ungrip()
+ holding_labware = True
+ if waypoint_data.dropping:
+ # We lost the position estimation after disengaging the axis, so
+ # it is necessary to home it next
+ await ot3api.home_z(OT3Mount.GRIPPER)
else:
await ot3api.grip(force_newtons=labware_grip_force)
+ # we only want to check position after the gripper has opened and
+ # should be holding labware
+ if holding_labware:
+ labware_bbox = self._state_store.labware.get_dimensions(
+ labware_id
+ )
+ well_bbox = self._state_store.labware.get_well_bbox(labware_id)
+ ot3api.raise_error_if_gripper_pickup_failed(
+ expected_grip_width=labware_bbox.y,
+ grip_width_uncertainty_wider=abs(
+ max(well_bbox.y - labware_bbox.y, 0)
+ ),
+ grip_width_uncertainty_narrower=abs(
+ min(well_bbox.y - labware_bbox.y, 0)
+ ),
+ )
await ot3api.move_to(
mount=gripper_mount, abs_position=waypoint_data.position
)
diff --git a/api/src/opentrons/protocol_engine/execution/movement.py b/api/src/opentrons/protocol_engine/execution/movement.py
index d0caac1f55a..451f482ad0d 100644
--- a/api/src/opentrons/protocol_engine/execution/movement.py
+++ b/api/src/opentrons/protocol_engine/execution/movement.py
@@ -14,6 +14,7 @@
MovementAxis,
MotorAxis,
CurrentWell,
+ AddressableOffsetVector,
)
from ..state import StateStore
from ..resources import ModelUtils
@@ -72,6 +73,10 @@ async def move_to_well(
speed: Optional[float] = None,
) -> Point:
"""Move to a specific well."""
+ self._state_store.labware.raise_if_labware_inaccessible_by_pipette(
+ labware_id=labware_id
+ )
+
self._state_store.labware.raise_if_labware_has_labware_on_top(
labware_id=labware_id
)
@@ -102,10 +107,13 @@ async def move_to_well(
# get the pipette's mount and current critical point, if applicable
pipette_location = self._state_store.motion.get_pipette_location(
pipette_id=pipette_id,
- current_well=current_well,
+ current_location=current_well,
)
origin_cp = pipette_location.critical_point
+ await self._gantry_mover.prepare_for_mount_movement(
+ pipette_location.mount.to_hw_mount()
+ )
origin = await self._gantry_mover.get_position(pipette_id=pipette_id)
max_travel_z = self._gantry_mover.get_max_travel_z(pipette_id=pipette_id)
@@ -133,6 +141,78 @@ async def move_to_well(
return final_point
+ async def move_to_addressable_area(
+ self,
+ pipette_id: str,
+ addressable_area_name: str,
+ offset: AddressableOffsetVector,
+ force_direct: bool = False,
+ minimum_z_height: Optional[float] = None,
+ speed: Optional[float] = None,
+ stay_at_highest_possible_z: bool = False,
+ ignore_tip_configuration: Optional[bool] = True,
+ ) -> Point:
+ """Move to a specific addressable area."""
+ # Check for presence of heater shakers on deck, and if planned
+ # pipette movement is allowed
+ hs_movement_restrictors = (
+ self._state_store.modules.get_heater_shaker_movement_restrictors()
+ )
+
+ dest_slot_int = (
+ self._state_store.addressable_areas.get_addressable_area_base_slot(
+ addressable_area_name
+ ).as_int()
+ )
+
+ self._hs_movement_flagger.raise_if_movement_restricted(
+ hs_movement_restrictors=hs_movement_restrictors,
+ destination_slot=dest_slot_int,
+ is_multi_channel=(
+ self._state_store.tips.get_pipette_channels(pipette_id) > 1
+ ),
+ destination_is_tip_rack=False,
+ )
+
+ # TODO(jbl 11-28-2023) check if addressable area is a deck slot, and if it is check if there are no labware
+ # or modules on top.
+
+ # get the pipette's mount and current critical point, if applicable
+ pipette_location = self._state_store.motion.get_pipette_location(
+ pipette_id=pipette_id,
+ current_location=None,
+ )
+ origin_cp = pipette_location.critical_point
+
+ await self._gantry_mover.prepare_for_mount_movement(
+ pipette_location.mount.to_hw_mount()
+ )
+ origin = await self._gantry_mover.get_position(pipette_id=pipette_id)
+ max_travel_z = self._gantry_mover.get_max_travel_z(pipette_id=pipette_id)
+
+ # calculate the movement's waypoints
+ waypoints = self._state_store.motion.get_movement_waypoints_to_addressable_area(
+ addressable_area_name=addressable_area_name,
+ offset=offset,
+ origin=origin,
+ origin_cp=origin_cp,
+ max_travel_z=max_travel_z,
+ force_direct=force_direct,
+ minimum_z_height=minimum_z_height,
+ stay_at_max_travel_z=stay_at_highest_possible_z,
+ ignore_tip_configuration=ignore_tip_configuration,
+ )
+
+ speed = self._state_store.pipettes.get_movement_speed(
+ pipette_id=pipette_id, requested_speed=speed
+ )
+
+ final_point = await self._gantry_mover.move_to(
+ pipette_id=pipette_id, waypoints=waypoints, speed=speed
+ )
+
+ return final_point
+
async def move_relative(
self,
pipette_id: str,
@@ -165,6 +245,13 @@ async def move_to_coordinates(
speed: Optional[float] = None,
) -> Point:
"""Move pipette to a given deck coordinate."""
+ # get the pipette's mount, if applicable
+ pipette_location = self._state_store.motion.get_pipette_location(
+ pipette_id=pipette_id
+ )
+ await self._gantry_mover.prepare_for_mount_movement(
+ pipette_location.mount.to_hw_mount()
+ )
origin = await self._gantry_mover.get_position(pipette_id=pipette_id)
max_travel_z = self._gantry_mover.get_max_travel_z(pipette_id=pipette_id)
diff --git a/api/src/opentrons/protocol_engine/execution/pipetting.py b/api/src/opentrons/protocol_engine/execution/pipetting.py
index 36037b6d11c..7abfb158539 100644
--- a/api/src/opentrons/protocol_engine/execution/pipetting.py
+++ b/api/src/opentrons/protocol_engine/execution/pipetting.py
@@ -6,14 +6,26 @@
from opentrons.hardware_control import HardwareControlAPI
from ..state import StateView, HardwarePipette
+from ..notes import CommandNoteAdder, CommandNote
from ..errors.exceptions import (
TipNotAttachedError,
- InvalidPipettingVolumeError,
+ InvalidAspirateVolumeError,
InvalidPushOutVolumeError,
InvalidDispenseVolumeError,
)
+# 1e-9 µL (1 femtoliter!) is a good value because:
+# * It's large relative to rounding errors that occur in practice in protocols. For
+# example, https://opentrons.atlassian.net/browse/RESC-182 shows a rounding error
+# on the order of 1e-15 µL.
+# * It's small relative to volumes that our users might actually care about and
+# expect the robot to execute faithfully.
+# * It's the default absolute tolerance for math.isclose(), where it apparently works
+# well in general.
+_VOLUME_ROUNDING_ERROR_TOLERANCE = 1e-9
+
+
class PipettingHandler(TypingProtocol):
"""Liquid handling commands."""
@@ -28,6 +40,7 @@ async def aspirate_in_place(
pipette_id: str,
volume: float,
flow_rate: float,
+ command_note_adder: CommandNoteAdder,
) -> float:
"""Set flow-rate and aspirate."""
@@ -77,17 +90,26 @@ async def aspirate_in_place(
pipette_id: str,
volume: float,
flow_rate: float,
+ command_note_adder: CommandNoteAdder,
) -> float:
"""Set flow-rate and aspirate."""
# get mount and config data from state and hardware controller
+ adjusted_volume = _validate_aspirate_volume(
+ state_view=self._state_view,
+ pipette_id=pipette_id,
+ aspirate_volume=volume,
+ command_note_adder=command_note_adder,
+ )
hw_pipette = self._state_view.pipettes.get_hardware_pipette(
pipette_id=pipette_id,
attached_pipettes=self._hardware_api.attached_instruments,
)
with self._set_flow_rate(pipette=hw_pipette, aspirate_flow_rate=flow_rate):
- await self._hardware_api.aspirate(mount=hw_pipette.mount, volume=volume)
+ await self._hardware_api.aspirate(
+ mount=hw_pipette.mount, volume=adjusted_volume
+ )
- return volume
+ return adjusted_volume
async def dispense_in_place(
self,
@@ -97,6 +119,9 @@ async def dispense_in_place(
push_out: Optional[float],
) -> float:
"""Dispense liquid without moving the pipette."""
+ adjusted_volume = _validate_dispense_volume(
+ state_view=self._state_view, pipette_id=pipette_id, dispense_volume=volume
+ )
hw_pipette = self._state_view.pipettes.get_hardware_pipette(
pipette_id=pipette_id,
attached_pipettes=self._hardware_api.attached_instruments,
@@ -108,10 +133,10 @@ async def dispense_in_place(
)
with self._set_flow_rate(pipette=hw_pipette, dispense_flow_rate=flow_rate):
await self._hardware_api.dispense(
- mount=hw_pipette.mount, volume=volume, push_out=push_out
+ mount=hw_pipette.mount, volume=adjusted_volume, push_out=push_out
)
- return volume
+ return adjusted_volume
async def blow_out_in_place(
self,
@@ -172,23 +197,6 @@ def get_is_ready_to_aspirate(self, pipette_id: str) -> bool:
"""Get whether a pipette is ready to aspirate."""
return self._state_view.pipettes.get_aspirated_volume(pipette_id) is not None
- def _validate_aspirated_volume(self, pipette_id: str, volume: float) -> None:
- """Get whether the aspirated volume is valid to aspirate."""
- working_volume = self._state_view.pipettes.get_working_volume(
- pipette_id=pipette_id
- )
-
- current_volume = (
- self._state_view.pipettes.get_aspirated_volume(pipette_id=pipette_id) or 0
- )
-
- new_volume = current_volume + volume
-
- if new_volume > working_volume:
- raise InvalidPipettingVolumeError(
- "Cannot aspirate more than pipette max volume"
- )
-
async def prepare_for_aspirate(self, pipette_id: str) -> None:
"""Virtually prepare to aspirate (no-op)."""
@@ -197,11 +205,16 @@ async def aspirate_in_place(
pipette_id: str,
volume: float,
flow_rate: float,
+ command_note_adder: CommandNoteAdder,
) -> float:
"""Virtually aspirate (no-op)."""
self._validate_tip_attached(pipette_id=pipette_id, command_name="aspirate")
- self._validate_aspirated_volume(pipette_id=pipette_id, volume=volume)
- return volume
+ return _validate_aspirate_volume(
+ state_view=self._state_view,
+ pipette_id=pipette_id,
+ aspirate_volume=volume,
+ command_note_adder=command_note_adder,
+ )
async def dispense_in_place(
self,
@@ -217,8 +230,9 @@ async def dispense_in_place(
"push out value cannot have a negative value."
)
self._validate_tip_attached(pipette_id=pipette_id, command_name="dispense")
- self._validate_dispense_volume(pipette_id=pipette_id, dispense_volume=volume)
- return volume
+ return _validate_dispense_volume(
+ state_view=self._state_view, pipette_id=pipette_id, dispense_volume=volume
+ )
async def blow_out_in_place(
self,
@@ -235,20 +249,6 @@ def _validate_tip_attached(self, pipette_id: str, command_name: str) -> None:
f"Cannot perform {command_name} without a tip attached"
)
- def _validate_dispense_volume(
- self, pipette_id: str, dispense_volume: float
- ) -> None:
- """Validate dispense volume."""
- aspirate_volume = self._state_view.pipettes.get_aspirated_volume(pipette_id)
- if aspirate_volume is None:
- raise InvalidDispenseVolumeError(
- "Cannot perform a dispense if there is no volume in attached tip."
- )
- elif dispense_volume > aspirate_volume:
- raise InvalidDispenseVolumeError(
- f"Cannot dispense {dispense_volume} µL when only {aspirate_volume} µL has been aspirated."
- )
-
def create_pipetting_handler(
state_view: StateView, hardware_api: HardwareControlAPI
@@ -259,3 +259,85 @@ def create_pipetting_handler(
if state_view.config.use_virtual_pipettes is False
else VirtualPipettingHandler(state_view=state_view)
)
+
+
+def _validate_aspirate_volume(
+ state_view: StateView,
+ pipette_id: str,
+ aspirate_volume: float,
+ command_note_adder: CommandNoteAdder,
+) -> float:
+ """Get whether the given volume is valid to aspirate right now.
+
+ Return the volume to aspirate, possibly clamped, or raise an
+ InvalidAspirateVolumeError.
+ """
+ working_volume = state_view.pipettes.get_working_volume(pipette_id=pipette_id)
+
+ current_volume = (
+ state_view.pipettes.get_aspirated_volume(pipette_id=pipette_id) or 0
+ )
+
+ # TODO(mm, 2024-01-11): We should probably just use
+ # state_view.pipettes.get_available_volume()? Its whole `None` return vs. exception
+ # raising thing is confusing me.
+ available_volume = working_volume - current_volume
+ available_volume_with_tolerance = (
+ available_volume + _VOLUME_ROUNDING_ERROR_TOLERANCE
+ )
+
+ if aspirate_volume > available_volume_with_tolerance:
+ raise InvalidAspirateVolumeError(
+ attempted_aspirate_volume=aspirate_volume,
+ available_volume=available_volume,
+ max_pipette_volume=state_view.pipettes.get_maximum_volume(
+ pipette_id=pipette_id
+ ),
+ max_tip_volume=_get_max_tip_volume(
+ state_view=state_view, pipette_id=pipette_id
+ ),
+ )
+ else:
+ volume_to_aspirate = min(aspirate_volume, available_volume)
+ if volume_to_aspirate < aspirate_volume:
+ command_note_adder(
+ CommandNote(
+ noteKind="warning",
+ shortMessage=f"Aspirate clamped to {available_volume} µL",
+ longMessage=(
+ f"Command requested to aspirate {aspirate_volume} µL but only"
+ f" {available_volume} µL were available in the pipette. This is"
+ " probably a floating point artifact."
+ ),
+ source="execution",
+ )
+ )
+ return volume_to_aspirate
+
+
+def _validate_dispense_volume(
+ state_view: StateView, pipette_id: str, dispense_volume: float
+) -> float:
+ """Get whether the given volume is valid to dispense right now.
+
+ Return the volume to dispense, possibly clamped, or raise an
+ InvalidDispenseVolumeError.
+ """
+ aspirated_volume = state_view.pipettes.get_aspirated_volume(pipette_id)
+ if aspirated_volume is None:
+ raise InvalidDispenseVolumeError(
+ "Cannot perform a dispense if there is no volume in attached tip."
+ )
+ else:
+ remaining = aspirated_volume - dispense_volume
+ if remaining < -_VOLUME_ROUNDING_ERROR_TOLERANCE:
+ raise InvalidDispenseVolumeError(
+ f"Cannot dispense {dispense_volume} µL when only {aspirated_volume} µL has been aspirated."
+ )
+ else:
+ return min(dispense_volume, aspirated_volume)
+
+
+def _get_max_tip_volume(state_view: StateView, pipette_id: str) -> Optional[float]:
+ attached_tip = state_view.pipettes.get_attached_tip(pipette_id=pipette_id)
+ return None if attached_tip is None else attached_tip.volume
diff --git a/api/src/opentrons/protocol_engine/execution/queue_worker.py b/api/src/opentrons/protocol_engine/execution/queue_worker.py
index c1ba60eb143..179880c03e9 100644
--- a/api/src/opentrons/protocol_engine/execution/queue_worker.py
+++ b/api/src/opentrons/protocol_engine/execution/queue_worker.py
@@ -72,6 +72,9 @@ async def _run_commands(self) -> None:
command_id = await self._state_store.wait_for(
condition=self._state_store.commands.get_next_to_execute
)
+ # Assert for type hinting. This is valid because the wait_for() above
+ # only returns when the value is truthy.
+ assert command_id is not None
except RunStoppedError:
# There are no more commands that we should execute, either because the run has
# completed on its own, or because a client requested it to stop.
diff --git a/api/src/opentrons/protocol_engine/execution/tip_handler.py b/api/src/opentrons/protocol_engine/execution/tip_handler.py
index 4ea54df86fa..e43685d2ebb 100644
--- a/api/src/opentrons/protocol_engine/execution/tip_handler.py
+++ b/api/src/opentrons/protocol_engine/execution/tip_handler.py
@@ -3,14 +3,22 @@
from typing_extensions import Protocol as TypingProtocol
from opentrons.hardware_control import HardwareControlAPI
+from opentrons.hardware_control.types import FailedTipStateCheck, InstrumentProbeType
from opentrons_shared_data.errors.exceptions import (
CommandPreconditionViolated,
CommandParameterLimitViolated,
+ PythonException,
)
-from ..resources import LabwareDataProvider
+from ..resources import LabwareDataProvider, ensure_ot3_hardware
from ..state import StateView
-from ..types import TipGeometry
+from ..types import TipGeometry, TipPresenceStatus
+from ..errors import (
+ HardwareNotSupportedError,
+ TipNotAttachedError,
+ TipAttachedError,
+ ProtocolEngineError,
+)
PRIMARY_NOZZLE_TO_ENDING_NOZZLE_MAP = {
@@ -62,6 +70,58 @@ async def drop_tip(self, pipette_id: str, home_after: Optional[bool]) -> None:
async def add_tip(self, pipette_id: str, tip: TipGeometry) -> None:
"""Tell the Hardware API that a tip is attached."""
+ async def get_tip_presence(self, pipette_id: str) -> TipPresenceStatus:
+ """Get tip presence status on the pipette."""
+
+ async def verify_tip_presence(
+ self,
+ pipette_id: str,
+ expected: TipPresenceStatus,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> None:
+ """Verify the expected tip presence status."""
+
+
+async def _available_for_nozzle_layout(
+ channels: int,
+ style: str,
+ primary_nozzle: Optional[str],
+ front_right_nozzle: Optional[str],
+) -> Dict[str, str]:
+ """Check nozzle layout is compatible with the pipette.
+
+ Returns:
+ A dict of nozzles used to configure the pipette.
+ """
+ if channels == 1:
+ raise CommandPreconditionViolated(
+ message=f"Cannot configure nozzle layout with a {channels} channel pipette."
+ )
+ if style == "ALL":
+ return {}
+ if style == "ROW" and channels == 8:
+ raise CommandParameterLimitViolated(
+ command_name="configure_nozzle_layout",
+ parameter_name="RowNozzleLayout",
+ limit_statement="RowNozzleLayout is incompatible with {channels} channel pipettes.",
+ actual_value=str(primary_nozzle),
+ )
+ if not primary_nozzle:
+ return {"primary_nozzle": "A1"}
+ if style == "SINGLE":
+ return {"primary_nozzle": primary_nozzle}
+ if not front_right_nozzle:
+ return {
+ "primary_nozzle": primary_nozzle,
+ "front_right_nozzle": PRIMARY_NOZZLE_TO_ENDING_NOZZLE_MAP[primary_nozzle][
+ style
+ ],
+ }
+ return {
+ "primary_nozzle": primary_nozzle,
+ "front_right_nozzle": front_right_nozzle,
+ }
+
class HardwareTipHandler(TipHandler):
"""Pick up and drop tips, using the Hardware API."""
@@ -72,9 +132,9 @@ def __init__(
hardware_api: HardwareControlAPI,
labware_data_provider: Optional[LabwareDataProvider] = None,
) -> None:
- self._state_view = state_view
self._hardware_api = hardware_api
self._labware_data_provider = labware_data_provider or LabwareDataProvider()
+ self._state_view = state_view
async def available_for_nozzle_layout(
self,
@@ -83,40 +143,15 @@ async def available_for_nozzle_layout(
primary_nozzle: Optional[str] = None,
front_right_nozzle: Optional[str] = None,
) -> Dict[str, str]:
- """Check nozzle layout is compatible with the pipette."""
+ """Returns configuration for nozzle layout to pass to configure_nozzle_layout."""
if self._state_view.pipettes.get_attached_tip(pipette_id):
raise CommandPreconditionViolated(
message=f"Cannot configure nozzle layout of {str(self)} while it has tips attached."
)
channels = self._state_view.pipettes.get_channels(pipette_id)
- if channels == 1:
- raise CommandPreconditionViolated(
- message=f"Cannot configure nozzle layout with a {channels} channel pipette."
- )
- if style == "EMPTY":
- return {}
- if style == "ROW" and channels == 8:
- raise CommandParameterLimitViolated(
- command_name="configure_nozzle_layout",
- parameter_name="RowNozzleLayout",
- limit_statement="RowNozzleLayout is incompatible with {channels} channel pipettes.",
- actual_value=str(primary_nozzle),
- )
- if not primary_nozzle:
- return {"primary_nozzle": "A1"}
- if style == "SINGLE":
- return {"primary_nozzle": primary_nozzle}
- if not front_right_nozzle:
- return {
- "primary_nozzle": primary_nozzle,
- "front_right_nozzle": PRIMARY_NOZZLE_TO_ENDING_NOZZLE_MAP[
- primary_nozzle
- ][style],
- }
- return {
- "primary_nozzle": primary_nozzle,
- "front_right_nozzle": front_right_nozzle,
- }
+ return await _available_for_nozzle_layout(
+ channels, style, primary_nozzle, front_right_nozzle
+ )
async def pick_up_tip(
self,
@@ -143,6 +178,7 @@ async def pick_up_tip(
presses=None,
increment=None,
)
+ await self.verify_tip_presence(pipette_id, TipPresenceStatus.PRESENT)
self._hardware_api.set_current_tiprack_diameter(
mount=hw_mount,
@@ -172,6 +208,7 @@ async def drop_tip(self, pipette_id: str, home_after: Optional[bool]) -> None:
kwargs = {}
await self._hardware_api.drop_tip(mount=hw_mount, **kwargs)
+ await self.verify_tip_presence(pipette_id, TipPresenceStatus.ABSENT)
async def add_tip(self, pipette_id: str, tip: TipGeometry) -> None:
"""Tell the Hardware API that a tip is attached."""
@@ -189,54 +226,56 @@ async def add_tip(self, pipette_id: str, tip: TipGeometry) -> None:
tip_volume=tip.volume,
)
+ async def get_tip_presence(self, pipette_id: str) -> TipPresenceStatus:
+ """Get the tip presence status of the pipette."""
+ try:
+ ot3api = ensure_ot3_hardware(hardware_api=self._hardware_api)
-class VirtualTipHandler(TipHandler):
- """Pick up and drop tips, using a virtual pipette."""
+ hw_mount = self._state_view.pipettes.get_mount(pipette_id).to_hw_mount()
- def __init__(self, state_view: StateView) -> None:
- self._state_view = state_view
+ status = await ot3api.get_tip_presence_status(hw_mount)
+ return TipPresenceStatus.from_hw_state(status)
+ except HardwareNotSupportedError:
+ # Tip presence sensing is not supported on the OT2
+ return TipPresenceStatus.UNKNOWN
- async def available_for_nozzle_layout(
+ async def verify_tip_presence(
self,
pipette_id: str,
- style: str,
- primary_nozzle: Optional[str] = None,
- front_right_nozzle: Optional[str] = None,
- ) -> Dict[str, str]:
- """Check nozzle layout is compatible with the pipette."""
- if self._state_view.pipettes.get_attached_tip(pipette_id):
- raise CommandPreconditionViolated(
- message=f"Cannot configure nozzle layout of {str(self)} while it has tips attached."
- )
- channels = self._state_view.pipettes.get_channels(pipette_id)
- if channels == 1:
- raise CommandPreconditionViolated(
- message=f"Cannot configure nozzle layout with a {channels} channel pipette."
- )
- if style == "EMPTY":
- return {}
- if style == "ROW" and channels == 8:
- raise CommandParameterLimitViolated(
- command_name="configure_nozzle_layout",
- parameter_name="RowNozzleLayout",
- limit_statement="RowNozzleLayout is incompatible with {channels} channel pipettes.",
- actual_value=str(primary_nozzle),
+ expected: TipPresenceStatus,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> None:
+ """Verify the expecterd tip presence status of the pipette.
+
+ This function will raise an exception if the specified tip presence status
+ isn't matched.
+ """
+ try:
+ ot3api = ensure_ot3_hardware(hardware_api=self._hardware_api)
+ hw_mount = self._state_view.pipettes.get_mount(pipette_id).to_hw_mount()
+ await ot3api.verify_tip_presence(
+ hw_mount, expected.to_hw_state(), follow_singular_sensor
)
- if not primary_nozzle:
- return {"primary_nozzle": "A1"}
- if style == "SINGLE":
- return {"primary_nozzle": primary_nozzle}
- if not front_right_nozzle:
- return {
- "primary_nozzle": primary_nozzle,
- "front_right_nozzle": PRIMARY_NOZZLE_TO_ENDING_NOZZLE_MAP[
- primary_nozzle
- ][style],
- }
- return {
- "primary_nozzle": primary_nozzle,
- "front_right_nozzle": front_right_nozzle,
- }
+ except HardwareNotSupportedError:
+ # Tip presence sensing is not supported on the OT2
+ pass
+ except FailedTipStateCheck as e:
+ if expected == TipPresenceStatus.ABSENT:
+ raise TipAttachedError(wrapping=[PythonException(e)])
+ elif expected == TipPresenceStatus.PRESENT:
+ raise TipNotAttachedError(wrapping=[PythonException(e)])
+ else:
+ raise ProtocolEngineError(
+ message="Unknown tip status in tip status check",
+ wrapping=[PythonException(e)],
+ )
+
+
+class VirtualTipHandler(TipHandler):
+ """Pick up and drop tips, using a virtual pipette."""
+
+ def __init__(self, state_view: StateView) -> None:
+ self._state_view = state_view
async def pick_up_tip(
self,
@@ -262,6 +301,23 @@ async def pick_up_tip(
return nominal_tip_geometry
+ async def available_for_nozzle_layout(
+ self,
+ pipette_id: str,
+ style: str,
+ primary_nozzle: Optional[str] = None,
+ front_right_nozzle: Optional[str] = None,
+ ) -> Dict[str, str]:
+ """Returns configuration for nozzle layout to pass to configure_nozzle_layout."""
+ if self._state_view.pipettes.get_attached_tip(pipette_id):
+ raise CommandPreconditionViolated(
+ message=f"Cannot configure nozzle layout of {str(self)} while it has tips attached."
+ )
+ channels = self._state_view.pipettes.get_channels(pipette_id)
+ return await _available_for_nozzle_layout(
+ channels, style, primary_nozzle, front_right_nozzle
+ )
+
async def drop_tip(
self,
pipette_id: str,
@@ -283,6 +339,25 @@ async def add_tip(self, pipette_id: str, tip: TipGeometry) -> None:
"""
assert False, "TipHandler.add_tip should not be used with virtual pipettes"
+ async def verify_tip_presence(
+ self,
+ pipette_id: str,
+ expected: TipPresenceStatus,
+ follow_singular_sensor: Optional[InstrumentProbeType] = None,
+ ) -> None:
+ """Verify tip presence.
+
+ This should not be called when using virtual pipettes.
+ """
+
+ async def get_tip_presence(self, pipette_id: str) -> TipPresenceStatus:
+ """Get tip presence.
+
+ This is a check to the physical machine's sensors and should not be
+ called on a virtual pipette.
+ """
+ raise RuntimeError("Do not call VirtualTipHandler.get_tip_presence")
+
def create_tip_handler(
state_view: StateView, hardware_api: HardwareControlAPI
diff --git a/api/src/opentrons/protocol_engine/notes/__init__.py b/api/src/opentrons/protocol_engine/notes/__init__.py
new file mode 100644
index 00000000000..f5b1d8c1a2a
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/notes/__init__.py
@@ -0,0 +1,5 @@
+"""Protocol engine notes module."""
+
+from .notes import NoteKind, CommandNote, CommandNoteAdder, CommandNoteTracker
+
+__all__ = ["NoteKind", "CommandNote", "CommandNoteAdder", "CommandNoteTracker"]
diff --git a/api/src/opentrons/protocol_engine/notes/notes.py b/api/src/opentrons/protocol_engine/notes/notes.py
new file mode 100644
index 00000000000..cf381aa4a68
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/notes/notes.py
@@ -0,0 +1,42 @@
+"""Definitions of data and interface shapes for notes."""
+from typing import Union, Literal, Protocol, List
+from pydantic import BaseModel, Field
+
+NoteKind = Union[Literal["warning", "information"], str]
+
+
+class CommandNote(BaseModel):
+ """A note about a command's execution or dispatch."""
+
+ noteKind: NoteKind = Field(
+ ...,
+ description="The kind of note this is. Only the literal possibilities should be"
+ " relied upon programmatically.",
+ )
+ shortMessage: str = Field(
+ ...,
+ description="The accompanying human-readable short message (suitable for display in a single line)",
+ )
+ longMessage: str = Field(
+ ...,
+ description="A longer message that may contain newlines and formatting characters describing the note.",
+ )
+ source: str = Field(
+ ..., description="An identifier for the party that created the note"
+ )
+
+
+class CommandNoteAdder(Protocol):
+ """The shape of a function that something can use to add a command note."""
+
+ def __call__(self, note: CommandNote) -> None:
+ """When called, this function should add the passed Note to some list."""
+ ...
+
+
+class CommandNoteTracker(CommandNoteAdder, Protocol):
+ """The shape of a class that can track notes."""
+
+ def get_notes(self) -> List[CommandNote]:
+ """When called, should return all notes previously added with __call__."""
+ ...
diff --git a/api/src/opentrons/protocol_engine/protocol_engine.py b/api/src/opentrons/protocol_engine/protocol_engine.py
index 857d787dcd4..0c4f2c4b670 100644
--- a/api/src/opentrons/protocol_engine/protocol_engine.py
+++ b/api/src/opentrons/protocol_engine/protocol_engine.py
@@ -2,6 +2,11 @@
from contextlib import AsyncExitStack
from logging import getLogger
from typing import Dict, Optional, Union
+from opentrons.protocol_engine.actions.actions import ResumeFromRecoveryAction
+from opentrons.protocol_engine.error_recovery_policy import (
+ ErrorRecoveryPolicy,
+ error_recovery_by_ff,
+)
from opentrons.protocols.models import LabwareDefinition
from opentrons.hardware_control import HardwareControlAPI
@@ -12,7 +17,7 @@
EnumeratedError,
)
-from .errors import ProtocolCommandFailedError, ErrorOccurrence
+from .errors import ProtocolCommandFailedError, ErrorOccurrence, CommandNotAllowedError
from .errors.exceptions import EStopActivatedError
from . import commands, slot_standardization
from .resources import ModelUtils, ModuleDataProvider
@@ -24,6 +29,8 @@
Liquid,
HexColor,
PostRunHardwareState,
+ DeckConfigurationType,
+ AddressableAreaLocation,
)
from .execution import (
QueueWorker,
@@ -45,11 +52,11 @@
AddLabwareOffsetAction,
AddLabwareDefinitionAction,
AddLiquidAction,
+ AddAddressableAreaAction,
AddModuleAction,
HardwareStoppedAction,
ResetTipsAction,
SetPipetteMovementSpeedAction,
- FailCommandAction,
)
@@ -86,6 +93,7 @@ def __init__(
hardware_stopper: Optional[HardwareStopper] = None,
door_watcher: Optional[DoorWatcher] = None,
module_data_provider: Optional[ModuleDataProvider] = None,
+ error_recovery_policy: ErrorRecoveryPolicy = error_recovery_by_ff,
) -> None:
"""Initialize a ProtocolEngine instance.
@@ -109,6 +117,7 @@ def __init__(
hardware_api=hardware_api,
state_store=self._state_store,
action_dispatcher=self._action_dispatcher,
+ error_recovery_policy=error_recovery_policy,
)
self._hardware_stopper = hardware_stopper or HardwareStopper(
hardware_api=hardware_api,
@@ -133,13 +142,13 @@ def add_plugin(self, plugin: AbstractPlugin) -> None:
"""Add a plugin to the engine to customize behavior."""
self._plugin_starter.start(plugin)
- def play(self) -> None:
+ def play(self, deck_configuration: Optional[DeckConfigurationType] = None) -> None:
"""Start or resume executing commands in the queue."""
requested_at = self._model_utils.get_timestamp()
# TODO(mc, 2021-08-05): if starting, ensure plungers motors are
# homed if necessary
action = self._state_store.commands.validate_action_allowed(
- PlayAction(requested_at=requested_at)
+ PlayAction(requested_at=requested_at, deck_configuration=deck_configuration)
)
self._action_dispatcher.dispatch(action)
@@ -148,15 +157,28 @@ def play(self) -> None:
else:
self._hardware_api.resume(HardwarePauseType.PAUSE)
- def pause(self) -> None:
- """Pause executing commands in the queue."""
+ def request_pause(self) -> None:
+ """Make command execution pause soon.
+
+ This will try to pause in the middle of the ongoing command, if there is one.
+ Otherwise, whenever the next command begins, the pause will happen then.
+ """
action = self._state_store.commands.validate_action_allowed(
PauseAction(source=PauseSource.CLIENT)
)
self._action_dispatcher.dispatch(action)
self._hardware_api.pause(HardwarePauseType.PAUSE)
- def add_command(self, request: commands.CommandCreate) -> commands.Command:
+ def resume_from_recovery(self) -> None:
+ """Resume normal protocol execution after the engine was `AWAITING_RECOVERY`."""
+ action = self._state_store.commands.validate_action_allowed(
+ ResumeFromRecoveryAction()
+ )
+ self._action_dispatcher.dispatch(action)
+
+ def add_command(
+ self, request: commands.CommandCreate, failed_command_id: Optional[str] = None
+ ) -> commands.Command:
"""Add a command to the `ProtocolEngine`'s queue.
Arguments:
@@ -171,16 +193,29 @@ def add_command(self, request: commands.CommandCreate) -> commands.Command:
but the engine was not idle or paused.
RunStoppedError: the run has been stopped, so no new commands
may be added.
+ CommandNotAllowedError: the request specified a failed command id
+ with a non fixit command.
"""
request = slot_standardization.standardize_command(
request, self.state_view.config.robot_type
)
+ if failed_command_id and request.intent != commands.CommandIntent.FIXIT:
+ raise CommandNotAllowedError(
+ "failed command id should be supplied with a FIXIT command."
+ )
+
command_id = self._model_utils.generate_id()
- request_hash = commands.hash_command_params(
- create=request,
- last_hash=self._state_store.commands.get_latest_command_hash(),
- )
+ if request.intent in (
+ commands.CommandIntent.SETUP,
+ commands.CommandIntent.FIXIT,
+ ):
+ request_hash = None
+ else:
+ request_hash = commands.hash_protocol_command_params(
+ create=request,
+ last_hash=self._state_store.commands.get_latest_protocol_command_hash(),
+ )
action = self.state_view.commands.validate_action_allowed(
QueueCommandAction(
@@ -188,6 +223,7 @@ def add_command(self, request: commands.CommandCreate) -> commands.Command:
request_hash=request_hash,
command_id=command_id,
created_at=self._model_utils.get_timestamp(),
+ failed_command_id=failed_command_id,
)
)
self._action_dispatcher.dispatch(action)
@@ -216,7 +252,10 @@ async def add_and_execute_command(
the command in state.
Returns:
- The command. If the command completed, it will be succeeded or failed.
+ The command.
+
+ If the command completed, it will be succeeded or failed.
+
If the engine was stopped before it reached the command,
the command will be queued.
"""
@@ -224,69 +263,85 @@ async def add_and_execute_command(
await self.wait_for_command(command.id)
return self._state_store.commands.get(command.id)
- def estop(self, maintenance_run: bool) -> None:
- """Signal to the engine that an estop event occurred.
+ async def add_and_execute_command_wait_for_recovery(
+ self, request: commands.CommandCreate
+ ) -> commands.Command:
+ """Like `add_and_execute_command()`, except wait for error recovery.
- If there are any queued commands for the engine, they will be marked
- as failed due to the estop event. If there aren't any queued commands
- *and* this is a maintenance run (which has commands queued one-by-one),
- a series of actions will mark the engine as Stopped. In either case the
- queue worker will be deactivated; the primary difference is that the former
- case will expect the protocol runner to `finish()` the engine, whereas the
- maintenance run will be put into a state wherein the engine can be discarded.
+ Unlike `add_and_execute_command()`, if the command fails, this will not
+ immediately return the failed command. Instead, if the error is recoverable,
+ it will wait until error recovery has completed (e.g. when some other task
+ calls `self.resume_from_recovery()`).
+
+ Returns:
+ The command.
+
+ If the command completed, it will be succeeded or failed. If it failed
+ and then its failure was recovered from, it will still be failed.
+
+ If the engine was stopped before it reached the command,
+ the command will be queued.
"""
- if self._state_store.commands.get_is_stopped():
- return
- current_id = (
- self._state_store.commands.state.running_command_id
- or self._state_store.commands.state.queued_command_ids.head(None)
+ queued_command = self.add_command(request)
+ await self.wait_for_command(command_id=queued_command.id)
+ completed_command = self._state_store.commands.get(queued_command.id)
+ await self._state_store.wait_for_not(
+ self.state_view.commands.get_recovery_in_progress_for_command,
+ queued_command.id,
)
+ return completed_command
- if current_id is not None:
- fail_action = FailCommandAction(
- command_id=current_id,
- error_id=self._model_utils.generate_id(),
- failed_at=self._model_utils.get_timestamp(),
- error=EStopActivatedError(message="Estop Activated"),
- )
- self._action_dispatcher.dispatch(fail_action)
-
- # In the case where the running command was a setup command - check if there
- # are any pending *run* commands and, if so, clear them all
- current_id = self._state_store.commands.state.queued_command_ids.head(None)
- if current_id is not None:
- fail_action = FailCommandAction(
- command_id=current_id,
- error_id=self._model_utils.generate_id(),
- failed_at=self._model_utils.get_timestamp(),
- error=EStopActivatedError(message="Estop Activated"),
- )
- self._action_dispatcher.dispatch(fail_action)
- self._queue_worker.cancel()
- elif maintenance_run:
- stop_action = self._state_store.commands.validate_action_allowed(
+ def estop(self) -> None:
+ """Signal to the engine that an E-stop event occurred.
+
+ If an estop happens while the robot is moving, lower layers physically stop
+ motion and raise the event as an exception, which fails the Protocol Engine
+ command. No action from the `ProtocolEngine` caller is needed to handle that.
+
+ However, if an estop happens in between commands, or in the middle of
+ a command like `comment` or `waitForDuration` that doesn't access the hardware,
+ `ProtocolEngine` needs to be told about it so it can interrupt the command
+ and stop executing any more. This method is how to do that.
+
+ This acts roughly like `request_stop()`. After calling this, you should call
+ `finish()` with an EStopActivatedError.
+ """
+ try:
+ action = self._state_store.commands.validate_action_allowed(
StopAction(from_estop=True)
)
- self._action_dispatcher.dispatch(stop_action)
- hardware_stop_action = HardwareStoppedAction(
- completed_at=self._model_utils.get_timestamp(),
- finish_error_details=FinishErrorDetails(
- error=EStopActivatedError(message="Estop Activated"),
- error_id=self._model_utils.generate_id(),
- created_at=self._model_utils.get_timestamp(),
- ),
+ except Exception: # todo(mm, 2024-04-16): Catch a more specific type.
+ # This is likely called from some hardware API callback that doesn't care
+ # about ProtocolEngine lifecycle or what methods are valid to call at what
+ # times. So it makes more sense for us to no-op here than to propagate this
+ # as an error.
+ _log.info(
+ "ProtocolEngine cannot handle E-stop event right now. Ignoring it.",
+ exc_info=True,
)
- self._action_dispatcher.dispatch(hardware_stop_action)
- self._queue_worker.cancel()
- else:
- _log.info("estop pressed before protocol was started, taking no action.")
+ return
+ self._action_dispatcher.dispatch(action)
+ # self._queue_worker.cancel() will try to interrupt any ongoing command.
+ # Unfortunately, if it's a hardware command, this interruption will race
+ # against the E-stop exception propagating up from lower layers. But we need to
+ # do this because we want to make sure non-hardware commands, like
+ # `waitForDuration`, are also interrupted.
+ self._queue_worker.cancel()
+ # Unlike self.request_stop(), we don't need to do
+ # self._hardware_api.cancel_execution_and_running_tasks(). Since this was an
+ # E-stop event, the hardware API already knows.
+
+ async def request_stop(self) -> None:
+ """Make command execution stop soon.
- async def stop(self) -> None:
- """Stop execution immediately, halting all motion and cancelling future commands.
+ This will try to interrupt the ongoing command, if there is one. Future commands
+ are canceled. However, by the time this method returns, things may not have
+ settled by the time this method returns; the last command may still be
+ running.
- After an engine has been `stop`'ed, it cannot be restarted.
+ After a stop has been requested, the engine cannot be restarted.
- After a `stop`, you must still call `finish` to give the engine a chance
+ After a stop request, you must still call `finish` to give the engine a chance
to clean up resources and propagate errors.
"""
action = self._state_store.commands.validate_action_allowed(StopAction())
@@ -305,12 +360,12 @@ async def stop(self) -> None:
async def wait_until_complete(self) -> None:
"""Wait until there are no more commands to execute.
- Raises:
- CommandExecutionFailedError: if any protocol command failed.
+ If a command encountered a fatal error, it's raised as an exception.
"""
await self._state_store.wait_for(
condition=self._state_store.commands.get_all_commands_final
)
+ self._state_store.commands.raise_fatal_command_error()
async def finish(
self,
@@ -319,14 +374,20 @@ async def finish(
set_run_status: bool = True,
post_run_hardware_state: PostRunHardwareState = PostRunHardwareState.HOME_AND_STAY_ENGAGED,
) -> None:
- """Gracefully finish using the ProtocolEngine, waiting for it to become idle.
+ """Finish using the `ProtocolEngine`.
- The engine will finish executing its current command (if any),
- and then shut down. After an engine has been `finished`'ed, it cannot
- be restarted.
+ This does a few things:
+
+ 1. It may do post-run actions like homing and dropping tips. This depends on the
+ arguments passed as well as heuristics based on the history of the engine.
+ 2. It waits for the engine to be done controlling the robot's hardware.
+ 3. It releases internal resources, like background tasks.
+
+ It's safe to call `finish()` multiple times. After you call `finish()`,
+ the engine can't be restarted.
This method should not raise. If any exceptions happened during execution that were not
- properly caught by the CommandExecutor, or if any exceptions happen during this
+ properly caught by `ProtocolEngine` internals, or if any exceptions happen during this
`finish()` call, they should be saved as `.state_view.get_summary().errors`.
Arguments:
@@ -340,12 +401,11 @@ async def finish(
if self._state_store.commands.state.stopped_by_estop:
# This handles the case where the E-stop was pressed while we were *not* in the middle
# of some hardware interaction that would raise it as an exception. For example, imagine
- # we were paused between two commands, or imagine we were executing a very long run of
- # comment commands.
+ # we were paused between two commands, or imagine we were executing a waitForDuration.
drop_tips_after_run = False
post_run_hardware_state = PostRunHardwareState.DISENGAGE_IN_PLACE
if error is None:
- error = EStopActivatedError(message="Estop was activated during a run")
+ error = EStopActivatedError()
if error:
# If the run had an error, check if that error indicates an E-stop.
@@ -480,6 +540,13 @@ def add_liquid(
self._action_dispatcher.dispatch(AddLiquidAction(liquid=liquid))
return liquid
+ def add_addressable_area(self, addressable_area_name: str) -> None:
+ """Add an addressable area to state."""
+ area = AddressableAreaLocation(addressableAreaName=addressable_area_name)
+ self._action_dispatcher.dispatch(
+ AddAddressableAreaAction(addressable_area=area)
+ )
+
def reset_tips(self, labware_id: str) -> None:
"""Reset the tip state of a given labware."""
# TODO(mm, 2023-03-10): Safely raise an error if the given labware isn't a
diff --git a/api/src/opentrons/protocol_engine/resources/deck_configuration_provider.py b/api/src/opentrons/protocol_engine/resources/deck_configuration_provider.py
index cc24a572a70..648bd4f4484 100644
--- a/api/src/opentrons/protocol_engine/resources/deck_configuration_provider.py
+++ b/api/src/opentrons/protocol_engine/resources/deck_configuration_provider.py
@@ -1,72 +1,132 @@
"""Deck configuration resource provider."""
-from dataclasses import dataclass
-from typing import List, Set, Dict
+from typing import List, Set, Tuple
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4, AddressableArea
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5, CutoutFixture
+from opentrons.types import DeckSlotName
-from ..errors import FixtureDoesNotExistError
+from ..types import (
+ AddressableArea,
+ AreaType,
+ PotentialCutoutFixture,
+ DeckPoint,
+ Dimensions,
+ AddressableOffsetVector,
+)
+from ..errors import (
+ CutoutDoesNotExistError,
+ FixtureDoesNotExistError,
+ AddressableAreaDoesNotExistError,
+)
-@dataclass(frozen=True)
-class DeckCutoutFixture:
- """Basic cutout fixture data class."""
+def get_cutout_position(cutout_id: str, deck_definition: DeckDefinitionV5) -> DeckPoint:
+ """Get the base position of a cutout on the deck."""
+ for cutout in deck_definition["locations"]["cutouts"]:
+ if cutout_id == cutout["id"]:
+ position = cutout["position"]
+ return DeckPoint(x=position[0], y=position[1], z=position[2])
+ else:
+ raise CutoutDoesNotExistError(f"Could not find cutout with name {cutout_id}")
- name: str
- # TODO(jbl 10-30-2023) this is in reference to the cutout ID that is supplied in mayMountTo in the definition.
- # We might want to make this not a string.
- cutout_slot_location: str
+def get_cutout_fixture(
+ cutout_fixture_id: str, deck_definition: DeckDefinitionV5
+) -> CutoutFixture:
+ """Gets cutout fixture from deck that matches the cutout fixture ID provided."""
+ for cutout_fixture in deck_definition["cutoutFixtures"]:
+ if cutout_fixture["id"] == cutout_fixture_id:
+ return cutout_fixture
+ raise FixtureDoesNotExistError(
+ f"Could not find cutout fixture with name {cutout_fixture_id}"
+ )
-class DeckConfigurationProvider:
- """Provider class to ingest deck configuration data and retrieve relevant deck definition data."""
- _configuration: Dict[str, DeckCutoutFixture]
+def get_provided_addressable_area_names(
+ cutout_fixture_id: str, cutout_id: str, deck_definition: DeckDefinitionV5
+) -> List[str]:
+ """Gets a list of the addressable areas provided by the cutout fixture on the cutout."""
+ cutout_fixture = get_cutout_fixture(cutout_fixture_id, deck_definition)
+ try:
+ return cutout_fixture["providesAddressableAreas"][cutout_id]
+ except KeyError:
+ return []
- def __init__(
- self,
- deck_definition: DeckDefinitionV4,
- deck_configuration: List[DeckCutoutFixture],
- ) -> None:
- """Initialize a DeckDataProvider."""
- self._deck_definition = deck_definition
- self._configuration = {
- cutout_fixture.cutout_slot_location: cutout_fixture
- for cutout_fixture in deck_configuration
- }
- def get_addressable_areas_for_cutout_fixture(
- self, cutout_fixture_id: str, cutout_id: str
- ) -> Set[str]:
- """Get the allowable addressable areas for a cutout fixture loaded on a specific cutout slot."""
- for cutout_fixture in self._deck_definition["cutoutFixtures"]:
- if cutout_fixture_id == cutout_fixture["id"]:
- return set(
- cutout_fixture["providesAddressableAreas"].get(cutout_id, [])
- )
+def get_addressable_area_display_name(
+ addressable_area_name: str, deck_definition: DeckDefinitionV5
+) -> str:
+ """Get the display name for an addressable area name."""
+ for addressable_area in deck_definition["locations"]["addressableAreas"]:
+ if addressable_area["id"] == addressable_area_name:
+ return addressable_area["displayName"]
+ raise AddressableAreaDoesNotExistError(
+ f"Could not find addressable area with name {addressable_area_name}"
+ )
+
- raise FixtureDoesNotExistError(
- f'Could not resolve "{cutout_fixture_id}" to a fixture.'
+def get_potential_cutout_fixtures(
+ addressable_area_name: str, deck_definition: DeckDefinitionV5
+) -> Tuple[str, Set[PotentialCutoutFixture]]:
+ """Given an addressable area name, gets the cutout ID associated with it and a set of potential fixtures."""
+ potential_fixtures = []
+ for cutout_fixture in deck_definition["cutoutFixtures"]:
+ for cutout_id, provided_areas in cutout_fixture[
+ "providesAddressableAreas"
+ ].items():
+ if addressable_area_name in provided_areas:
+ potential_fixtures.append(
+ PotentialCutoutFixture(
+ cutout_id=cutout_id,
+ cutout_fixture_id=cutout_fixture["id"],
+ provided_addressable_areas=frozenset(provided_areas),
+ )
+ )
+ # This following logic is making the assumption that every addressable area can only go on one cutout, though
+ # it may have multiple cutout fixtures that supply it on that cutout. If this assumption changes, some of the
+ # following logic will have to be readjusted
+ if not potential_fixtures:
+ raise AddressableAreaDoesNotExistError(
+ f"{addressable_area_name} is not provided by any cutout fixtures"
+ f" in deck definition {deck_definition['otId']}"
)
+ cutout_id = potential_fixtures[0].cutout_id
+ assert all(cutout_id == fixture.cutout_id for fixture in potential_fixtures)
+ return cutout_id, set(potential_fixtures)
+
- def get_configured_addressable_areas(self) -> Set[str]:
- """Get a list of all addressable areas the robot is configured for."""
- configured_addressable_areas = set()
- for cutout_id, cutout_fixture in self._configuration.items():
- addressable_areas = self.get_addressable_areas_for_cutout_fixture(
- cutout_fixture.name, cutout_id
+def get_addressable_area_from_name(
+ addressable_area_name: str,
+ cutout_position: DeckPoint,
+ base_slot: DeckSlotName,
+ deck_definition: DeckDefinitionV5,
+) -> AddressableArea:
+ """Given a name and a cutout position, get an addressable area on the deck."""
+ for addressable_area in deck_definition["locations"]["addressableAreas"]:
+ if addressable_area["id"] == addressable_area_name:
+ area_offset = addressable_area["offsetFromCutoutFixture"]
+ position = AddressableOffsetVector(
+ x=area_offset[0] + cutout_position.x,
+ y=area_offset[1] + cutout_position.y,
+ z=area_offset[2] + cutout_position.z,
)
- configured_addressable_areas.update(addressable_areas)
- return configured_addressable_areas
-
- def get_addressable_area_definition(
- self, addressable_area_name: str
- ) -> AddressableArea:
- """Get the addressable area definition from the relevant deck definition."""
- for addressable_area in self._deck_definition["locations"]["addressableAreas"]:
- if addressable_area_name == addressable_area["id"]:
- return addressable_area
-
- raise FixtureDoesNotExistError(
- f'Could not resolve "{addressable_area_name}" to a fixture.'
- )
+ bounding_box = Dimensions(
+ x=addressable_area["boundingBox"]["xDimension"],
+ y=addressable_area["boundingBox"]["yDimension"],
+ z=addressable_area["boundingBox"]["zDimension"],
+ )
+
+ return AddressableArea(
+ area_name=addressable_area["id"],
+ area_type=AreaType(addressable_area["areaType"]),
+ base_slot=base_slot,
+ display_name=addressable_area["displayName"],
+ bounding_box=bounding_box,
+ position=position,
+ compatible_module_types=addressable_area.get(
+ "compatibleModuleTypes", []
+ ),
+ )
+ raise AddressableAreaDoesNotExistError(
+ f"Could not find addressable area with name {addressable_area_name}"
+ )
diff --git a/api/src/opentrons/protocol_engine/resources/deck_data_provider.py b/api/src/opentrons/protocol_engine/resources/deck_data_provider.py
index 6098c2f4301..017fc58f552 100644
--- a/api/src/opentrons/protocol_engine/resources/deck_data_provider.py
+++ b/api/src/opentrons/protocol_engine/resources/deck_data_provider.py
@@ -9,7 +9,7 @@
load as load_deck,
DEFAULT_DECK_DEFINITION_VERSION,
)
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons.protocols.models import LabwareDefinition
from opentrons.types import DeckSlotName
@@ -39,10 +39,10 @@ def __init__(
self._deck_type = deck_type
self._labware_data = labware_data or LabwareDataProvider()
- async def get_deck_definition(self) -> DeckDefinitionV4:
+ async def get_deck_definition(self) -> DeckDefinitionV5:
"""Get a labware definition given the labware's identification."""
- def sync() -> DeckDefinitionV4:
+ def sync() -> DeckDefinitionV5:
return load_deck(
name=self._deck_type.value, version=DEFAULT_DECK_DEFINITION_VERSION
)
@@ -51,7 +51,7 @@ def sync() -> DeckDefinitionV4:
async def get_deck_fixed_labware(
self,
- deck_definition: DeckDefinitionV4,
+ deck_definition: DeckDefinitionV5,
) -> List[DeckFixedLabware]:
"""Get a list of all labware fixtures from a given deck definition."""
labware: List[DeckFixedLabware] = []
diff --git a/api/src/opentrons/protocol_engine/resources/fixture_validation.py b/api/src/opentrons/protocol_engine/resources/fixture_validation.py
index 3eed2f90b22..9807cc6beaa 100644
--- a/api/src/opentrons/protocol_engine/resources/fixture_validation.py
+++ b/api/src/opentrons/protocol_engine/resources/fixture_validation.py
@@ -1,69 +1,48 @@
-"""Validation file for fixtures and addressable area reference checking functions."""
+"""Validation file for addressable area reference checking functions."""
-from typing import List
+from opentrons.types import DeckSlotName
-from opentrons_shared_data.deck.deck_definitions import Locations, CutoutFixture
-from opentrons.hardware_control.modules.types import ModuleModel, ModuleType
+def is_waste_chute(addressable_area_name: str) -> bool:
+ """Check if an addressable area is a Waste Chute."""
+ return addressable_area_name in {
+ "1ChannelWasteChute",
+ "8ChannelWasteChute",
+ "96ChannelWasteChute",
+ "gripperWasteChute",
+ }
-def validate_fixture_id(fixtureList: List[CutoutFixture], load_name: str) -> bool:
- """Check that the loaded fixture has an existing definition."""
- for fixture in fixtureList:
- if fixture.id == load_name:
- return True
- return False
+def is_gripper_waste_chute(addressable_area_name: str) -> bool:
+ """Check if an addressable area is a gripper-movement-compatible Waste Chute."""
+ return addressable_area_name == "gripperWasteChute"
-def validate_fixture_location_is_allowed(fixture: CutoutFixture, location: str) -> bool:
- """Validate that the fixture is allowed to load into the provided location according to the deck definitions."""
- return location in fixture.mayMountTo
+def is_drop_tip_waste_chute(addressable_area_name: str) -> bool:
+ """Check if an addressable area is a Waste Chute compatible for dropping tips."""
+ return addressable_area_name in {
+ "1ChannelWasteChute",
+ "8ChannelWasteChute",
+ "96ChannelWasteChute",
+ }
-def validate_is_wastechute(load_name: str) -> bool:
- """Check if a fixture is a Waste Chute."""
- return (
- load_name == "wasteChuteRightAdapterCovered"
- or load_name == "wasteChuteRightAdapterNoCover"
- or load_name == "stagingAreaSlotWithWasteChuteRightAdapterCovered"
- or load_name == "stagingAreaSlotWithWasteChuteRightAdapterNoCover"
- )
+def is_trash(addressable_area_name: str) -> bool:
+ """Check if an addressable area is a trash bin."""
+ return addressable_area_name in {"movableTrash", "fixedTrash", "shortFixedTrash"}
-def validate_module_is_compatible_with_fixture(
- locations: Locations, fixture: CutoutFixture, module: ModuleModel
-) -> bool:
- """Validate that the fixture allows the loading of a specified module."""
- module_name = ModuleType.from_model(module).name
- for key in fixture.providesAddressableAreas.keys():
- for area in fixture.providesAddressableAreas[key]:
- for l_area in locations.addressableAreas:
- if l_area.id == area:
- if l_area.compatibleModuleTypes is None:
- return False
- elif module_name in l_area.compatibleModuleTypes:
- return True
- return False
+def is_staging_slot(addressable_area_name: str) -> bool:
+ """Check if an addressable area is a staging area slot."""
+ return addressable_area_name in {"A4", "B4", "C4", "D4"}
-def validate_fixture_allows_drop_tip(
- locations: Locations, fixture: CutoutFixture
-) -> bool:
- """Validate that the fixture allows tips to be dropped in it's addressable areas."""
- for key in fixture.providesAddressableAreas.keys():
- for area in fixture.providesAddressableAreas[key]:
- for l_area in locations.addressableAreas:
- if l_area.id == area and l_area.ableToDropTips:
- return True
- return False
-
-def validate_fixture_allows_drop_labware(
- locations: Locations, fixture: CutoutFixture
-) -> bool:
- """Validate that the fixture allows labware to be dropped in it's addressable areas."""
- for key in fixture.providesAddressableAreas.keys():
- for area in fixture.providesAddressableAreas[key]:
- for l_area in locations.addressableAreas:
- if l_area.id == area and l_area.ableToDropLabware:
- return True
- return False
+def is_deck_slot(addressable_area_name: str) -> bool:
+ """Check if an addressable area is a deck slot (including staging area slots)."""
+ if is_staging_slot(addressable_area_name):
+ return True
+ try:
+ DeckSlotName.from_primitive(addressable_area_name)
+ except ValueError:
+ return False
+ return True
diff --git a/api/src/opentrons/protocol_engine/resources/ot3_validation.py b/api/src/opentrons/protocol_engine/resources/ot3_validation.py
index 8a555dd5f47..7b25bc35430 100644
--- a/api/src/opentrons/protocol_engine/resources/ot3_validation.py
+++ b/api/src/opentrons/protocol_engine/resources/ot3_validation.py
@@ -1,28 +1,21 @@
"""Validation file for protocol engine commandsot."""
from __future__ import annotations
-from typing import TYPE_CHECKING, Optional
+from typing import Optional
from opentrons.protocol_engine.errors import HardwareNotSupportedError
+from opentrons.hardware_control.protocols.types import FlexRobotType
-if TYPE_CHECKING:
- from opentrons.hardware_control.ot3api import OT3API
- from opentrons.hardware_control import HardwareControlAPI
+from opentrons.hardware_control import HardwareControlAPI, OT3HardwareControlAPI
def ensure_ot3_hardware(
- hardware_api: HardwareControlAPI, error_msg: Optional[str] = None
-) -> OT3API:
+ hardware_api: HardwareControlAPI,
+ error_msg: Optional[str] = None,
+) -> OT3HardwareControlAPI:
"""Validate that the HardwareControlAPI is of OT-3 instance."""
- try:
- from opentrons.hardware_control.ot3api import OT3API
- except ImportError as exception:
- raise HardwareNotSupportedError(
- error_msg or "This command is supported by OT-3 only."
- ) from exception
+ if hardware_api.get_robot_type() == FlexRobotType:
+ return hardware_api # type: ignore
- if not isinstance(hardware_api, OT3API):
- raise HardwareNotSupportedError(
- error_msg or "This command is supported by OT-3 only."
- )
-
- return hardware_api
+ raise HardwareNotSupportedError(
+ error_msg or "This command is supported by OT-3 only."
+ )
diff --git a/api/src/opentrons/protocol_engine/resources/pipette_data_provider.py b/api/src/opentrons/protocol_engine/resources/pipette_data_provider.py
index 818566e3691..3357b7d591d 100644
--- a/api/src/opentrons/protocol_engine/resources/pipette_data_provider.py
+++ b/api/src/opentrons/protocol_engine/resources/pipette_data_provider.py
@@ -1,6 +1,7 @@
"""Pipette config data providers."""
from dataclasses import dataclass
-from typing import Dict, Optional
+from typing import Dict, Optional, Sequence
+import re
from opentrons_shared_data.pipette.dev_types import PipetteName, PipetteModel
from opentrons_shared_data.pipette import (
@@ -10,14 +11,39 @@
pipette_definition,
)
-
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.hardware_control.nozzle_manager import (
NozzleConfigurationManager,
NozzleMap,
)
+from ..errors.exceptions import InvalidLoadPipetteSpecsError
from ..types import FlowRates
+from ...types import Point
+
+_TIP_OVERLAP_VERSION_RE = re.compile(r"^v\d+$")
+
+
+def validate_and_default_tip_overlap_version(version_spec: Optional[str]) -> str:
+ """Validate and sanitize tip overlap versions for later consumption.
+
+ Something that comes out of this function will be of the correct format, but a given kind of
+ pipette may not have this version of data.
+ """
+ if version_spec is None:
+ return f"v{pipette_definition.TIP_OVERLAP_VERSION_MAXIMUM}"
+ valid = _TIP_OVERLAP_VERSION_RE.match(version_spec)
+ if not valid:
+ raise InvalidLoadPipetteSpecsError(
+ f"Tip overlap version specification {version_spec} is invalid."
+ )
+ try:
+ _ = int(version_spec[1:])
+ except ValueError:
+ raise InvalidLoadPipetteSpecsError(
+ f"Tip overlap version specification {version_spec} is invalid."
+ )
+ return version_spec
@dataclass(frozen=True)
@@ -36,6 +62,9 @@ class LoadedStaticPipetteData:
float, pipette_definition.SupportedTipsDefinition
]
nominal_tip_overlap: Dict[str, float]
+ nozzle_map: NozzleMap
+ back_left_corner_offset: Point
+ front_right_corner_offset: Point
class VirtualPipetteDataProvider:
@@ -59,16 +88,13 @@ def configure_virtual_pipette_nozzle_layout(
config = self._get_virtual_pipette_full_config_by_model_string(
pipette_model_string
)
- new_nozzle_manager = NozzleConfigurationManager.build_from_nozzlemap(
- config.nozzle_map,
- config.partial_tip_configurations.per_tip_pickup_current,
- )
- if back_left_nozzle and front_right_nozzle and starting_nozzle:
+ new_nozzle_manager = NozzleConfigurationManager.build_from_config(config)
+ if back_left_nozzle and front_right_nozzle:
new_nozzle_manager.update_nozzle_configuration(
back_left_nozzle, front_right_nozzle, starting_nozzle
)
self._nozzle_manager_layout_by_id[pipette_id] = new_nozzle_manager
- elif back_left_nozzle and front_right_nozzle and starting_nozzle:
+ elif back_left_nozzle and front_right_nozzle:
# Need to make sure that we pass all the right nozzles here.
self._nozzle_manager_layout_by_id[pipette_id].update_nozzle_configuration(
back_left_nozzle, front_right_nozzle, starting_nozzle
@@ -103,14 +129,14 @@ def get_nozzle_layout_for_pipette(self, pipette_id: str) -> NozzleMap:
return self._nozzle_manager_layout_by_id[pipette_id].current_configuration
def get_virtual_pipette_static_config_by_model_string(
- self, pipette_model_string: str, pipette_id: str
+ self, pipette_model_string: str, pipette_id: str, tip_overlap_version: str
) -> LoadedStaticPipetteData:
"""Get the config of a pipette when you know its model string (e.g. from state)."""
pipette_model = pipette_load_name.convert_pipette_model(
PipetteModel(pipette_model_string)
)
return self._get_virtual_pipette_static_config_by_model(
- pipette_model, pipette_id
+ pipette_model, pipette_id, tip_overlap_version
)
def _get_virtual_pipette_full_config_by_model_string(
@@ -127,7 +153,10 @@ def _get_virtual_pipette_full_config_by_model_string(
)
def _get_virtual_pipette_static_config_by_model(
- self, pipette_model: pipette_definition.PipetteModelVersionType, pipette_id: str
+ self,
+ pipette_model: pipette_definition.PipetteModelVersionType,
+ pipette_id: str,
+ tip_overlap_version: str,
) -> LoadedStaticPipetteData:
if pipette_id not in self._liquid_class_by_id:
self._liquid_class_by_id[pipette_id] = pip_types.LiquidClasses.default
@@ -150,6 +179,9 @@ def _get_virtual_pipette_static_config_by_model(
tip_type
]
+ nozzle_manager = NozzleConfigurationManager.build_from_config(config)
+ pip_back_left = config.pipette_bounding_box_offsets.back_left_corner
+ pip_front_right = config.pipette_bounding_box_offsets.front_right_corner
return LoadedStaticPipetteData(
model=str(pipette_model),
display_name=config.display_name,
@@ -169,23 +201,35 @@ def _get_virtual_pipette_static_config_by_model(
default_aspirate=tip_configuration.default_aspirate_flowrate.values_by_api_level,
default_dispense=tip_configuration.default_dispense_flowrate.values_by_api_level,
),
- nominal_tip_overlap=config.liquid_properties[
- liquid_class
- ].tip_overlap_dictionary,
+ nominal_tip_overlap=get_latest_tip_overlap_before_version(
+ config.liquid_properties[liquid_class].versioned_tip_overlap_dictionary,
+ tip_overlap_version,
+ ),
+ nozzle_map=nozzle_manager.current_configuration,
+ back_left_corner_offset=Point(
+ pip_back_left[0], pip_back_left[1], pip_back_left[2]
+ ),
+ front_right_corner_offset=Point(
+ pip_front_right[0], pip_front_right[1], pip_front_right[2]
+ ),
)
def get_virtual_pipette_static_config(
- self, pipette_name: PipetteName, pipette_id: str
+ self, pipette_name: PipetteName, pipette_id: str, tip_overlap_version: str
) -> LoadedStaticPipetteData:
"""Get the config for a virtual pipette, given only the pipette name."""
pipette_model = pipette_load_name.convert_pipette_name(pipette_name)
return self._get_virtual_pipette_static_config_by_model(
- pipette_model, pipette_id
+ pipette_model, pipette_id, tip_overlap_version
)
-def get_pipette_static_config(pipette_dict: PipetteDict) -> LoadedStaticPipetteData:
+def get_pipette_static_config(
+ pipette_dict: PipetteDict, tip_overlap_version: str
+) -> LoadedStaticPipetteData:
"""Get the config for a pipette, given the state/config object from the HW API."""
+ back_left_offset = pipette_dict["pipette_bounding_box_offsets"].back_left_corner
+ front_right_offset = pipette_dict["pipette_bounding_box_offsets"].front_right_corner
return LoadedStaticPipetteData(
model=pipette_dict["model"],
display_name=pipette_dict["display_name"],
@@ -200,9 +244,41 @@ def get_pipette_static_config(pipette_dict: PipetteDict) -> LoadedStaticPipetteD
tip_configuration_lookup_table={
k.value: v for k, v in pipette_dict["supported_tips"].items()
},
- nominal_tip_overlap=pipette_dict["tip_overlap"],
+ nominal_tip_overlap=get_latest_tip_overlap_before_version(
+ pipette_dict["versioned_tip_overlap"], tip_overlap_version
+ ),
# TODO(mc, 2023-02-28): these two values are not present in PipetteDict
# https://opentrons.atlassian.net/browse/RCORE-655
home_position=0,
nozzle_offset_z=0,
+ nozzle_map=pipette_dict["current_nozzle_map"],
+ back_left_corner_offset=Point(
+ back_left_offset[0], back_left_offset[1], back_left_offset[2]
+ ),
+ front_right_corner_offset=Point(
+ front_right_offset[0], front_right_offset[1], front_right_offset[2]
+ ),
)
+
+
+def get_latest_tip_overlap_before_version(
+ overlap: Dict[str, Dict[str, float]], version: str
+) -> Dict[str, float]:
+ """Get the latest tip overlap definitions that are equal or older than the version."""
+ # TODO: make this less awful
+ def _numeric(versionstr: str) -> int:
+ return int(versionstr[1:])
+
+ def _latest(versions: Sequence[int], target: int) -> int:
+ last = 0
+ for version in versions:
+ if version > target:
+ return last
+ last = version
+ return last
+
+ numeric_target = _numeric(version)
+ numeric_versions = sorted([_numeric(k) for k in overlap.keys()])
+ found_numeric_version = _latest(numeric_versions, numeric_target)
+ found_version = f"v{found_numeric_version}"
+ return overlap[found_version]
diff --git a/api/src/opentrons/protocol_engine/slot_standardization.py b/api/src/opentrons/protocol_engine/slot_standardization.py
index 9b2e352393a..c4e733b3ca6 100644
--- a/api/src/opentrons/protocol_engine/slot_standardization.py
+++ b/api/src/opentrons/protocol_engine/slot_standardization.py
@@ -24,7 +24,7 @@
OFF_DECK_LOCATION,
DeckSlotLocation,
LabwareLocation,
- NonStackedLocation,
+ AddressableAreaLocation,
LabwareOffsetCreate,
ModuleLocation,
OnLabwareLocation,
@@ -124,21 +124,14 @@ def _standardize_labware_location(
if isinstance(original, DeckSlotLocation):
return _standardize_deck_slot_location(original, robot_type)
elif (
- isinstance(original, (ModuleLocation, OnLabwareLocation))
+ isinstance(
+ original, (ModuleLocation, OnLabwareLocation, AddressableAreaLocation)
+ )
or original == OFF_DECK_LOCATION
):
return original
-def _standardize_adapter_location(
- original: NonStackedLocation, robot_type: RobotType
-) -> NonStackedLocation:
- if isinstance(original, DeckSlotLocation):
- return _standardize_deck_slot_location(original, robot_type)
- elif isinstance(original, ModuleLocation) or original == OFF_DECK_LOCATION:
- return original
-
-
def _standardize_deck_slot_location(
original: DeckSlotLocation, robot_type: RobotType
) -> DeckSlotLocation:
diff --git a/api/src/opentrons/protocol_engine/state/__init__.py b/api/src/opentrons/protocol_engine/state/__init__.py
index 17afdc3ad28..cd6f1bb2b68 100644
--- a/api/src/opentrons/protocol_engine/state/__init__.py
+++ b/api/src/opentrons/protocol_engine/state/__init__.py
@@ -3,7 +3,13 @@
from .state import State, StateStore, StateView
from .state_summary import StateSummary
from .config import Config
-from .commands import CommandState, CommandView, CommandSlice, CurrentCommand
+from .commands import (
+ CommandState,
+ CommandView,
+ CommandSlice,
+ CurrentCommand,
+)
+from .command_history import CommandEntry
from .labware import LabwareState, LabwareView
from .pipettes import PipetteState, PipetteView, HardwarePipette
from .modules import ModuleState, ModuleView, HardwareModule
@@ -34,6 +40,7 @@
"CommandView",
"CommandSlice",
"CurrentCommand",
+ "CommandEntry",
# labware state and values
"LabwareState",
"LabwareView",
diff --git a/api/src/opentrons/protocol_engine/state/addressable_areas.py b/api/src/opentrons/protocol_engine/state/addressable_areas.py
new file mode 100644
index 00000000000..909beffbe86
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/state/addressable_areas.py
@@ -0,0 +1,655 @@
+"""Basic addressable area data state and store."""
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Set, Union
+
+from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons_shared_data.deck.dev_types import (
+ DeckDefinitionV5,
+ SlotDefV3,
+ CutoutFixture,
+)
+
+from opentrons.types import Point, DeckSlotName
+
+from ..commands import (
+ Command,
+ LoadLabwareResult,
+ LoadModuleResult,
+ MoveLabwareResult,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaForDropTipResult,
+)
+from ..errors import (
+ IncompatibleAddressableAreaError,
+ AreaNotInDeckConfigurationError,
+ SlotDoesNotExistError,
+ AddressableAreaDoesNotExistError,
+ CutoutDoesNotExistError,
+)
+from ..resources import deck_configuration_provider
+from ..types import (
+ DeckSlotLocation,
+ AddressableAreaLocation,
+ AddressableArea,
+ PotentialCutoutFixture,
+ DeckConfigurationType,
+ Dimensions,
+)
+from ..actions import Action, SucceedCommandAction, PlayAction, AddAddressableAreaAction
+from .config import Config
+from .abstract_store import HasState, HandlesActions
+
+
+@dataclass
+class AddressableAreaState:
+ """State of all loaded addressable area resources."""
+
+ loaded_addressable_areas_by_name: Dict[str, AddressableArea]
+ """The addressable areas that have been loaded so far.
+
+ When `use_simulated_deck_config` is `False`, these are the addressable areas that the
+ deck configuration provided.
+
+ When `use_simulated_deck_config` is `True`, these are the addressable areas that have been
+ referenced by the protocol so far.
+ """
+
+ potential_cutout_fixtures_by_cutout_id: Dict[str, Set[PotentialCutoutFixture]]
+
+ deck_definition: DeckDefinitionV5
+
+ deck_configuration: Optional[DeckConfigurationType]
+ """The host robot's full deck configuration.
+
+ If `use_simulated_deck_config` is `True`, this is meaningless and this value is undefined.
+ In practice it will probably be `None` or `[]`.
+
+ If `use_simulated_deck_config` is `False`, this will be non-`None`.
+ """
+
+ robot_type: RobotType
+
+ use_simulated_deck_config: bool
+ """See `Config.use_simulated_deck_config`."""
+
+
+_OT2_ORDERED_SLOTS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"]
+_FLEX_ORDERED_SLOTS = [
+ "D1",
+ "D2",
+ "D3",
+ "C1",
+ "C2",
+ "C3",
+ "B1",
+ "B2",
+ "B3",
+ "A1",
+ "A2",
+ "A3",
+]
+_FLEX_ORDERED_STAGING_SLOTS = ["D4", "C4", "B4", "A4"]
+
+
+def _get_conflicting_addressable_areas_error_string(
+ potential_cutout_fixtures: Set[PotentialCutoutFixture],
+ loaded_addressable_areas: Dict[str, AddressableArea],
+ deck_definition: DeckDefinitionV5,
+) -> str:
+ loaded_areas_on_cutout = set()
+ for fixture in potential_cutout_fixtures:
+ loaded_areas_on_cutout.update(
+ deck_configuration_provider.get_provided_addressable_area_names(
+ fixture.cutout_fixture_id,
+ fixture.cutout_id,
+ deck_definition,
+ )
+ )
+ loaded_areas_on_cutout.intersection_update(loaded_addressable_areas)
+ display_names = {
+ loaded_addressable_areas[area].display_name for area in loaded_areas_on_cutout
+ }
+ return ", ".join(display_names)
+
+
+# This is a temporary shim while Protocol Engine's conflict-checking code
+# can only take deck slots as input.
+# Long-term solution: Check for conflicts based on bounding boxes, not slot adjacencies.
+# Shorter-term: Change the conflict-checking code to take cutouts instead of deck slots.
+CUTOUT_TO_DECK_SLOT_MAP: Dict[str, DeckSlotName] = {
+ # OT-2
+ "cutout1": DeckSlotName.SLOT_1,
+ "cutout2": DeckSlotName.SLOT_2,
+ "cutout3": DeckSlotName.SLOT_3,
+ "cutout4": DeckSlotName.SLOT_4,
+ "cutout5": DeckSlotName.SLOT_5,
+ "cutout6": DeckSlotName.SLOT_6,
+ "cutout7": DeckSlotName.SLOT_7,
+ "cutout8": DeckSlotName.SLOT_8,
+ "cutout9": DeckSlotName.SLOT_9,
+ "cutout10": DeckSlotName.SLOT_10,
+ "cutout11": DeckSlotName.SLOT_11,
+ "cutout12": DeckSlotName.FIXED_TRASH,
+ # Flex
+ "cutoutA1": DeckSlotName.SLOT_A1,
+ "cutoutA2": DeckSlotName.SLOT_A2,
+ "cutoutA3": DeckSlotName.SLOT_A3,
+ "cutoutB1": DeckSlotName.SLOT_B1,
+ "cutoutB2": DeckSlotName.SLOT_B2,
+ "cutoutB3": DeckSlotName.SLOT_B3,
+ "cutoutC1": DeckSlotName.SLOT_C1,
+ "cutoutC2": DeckSlotName.SLOT_C2,
+ "cutoutC3": DeckSlotName.SLOT_C3,
+ "cutoutD1": DeckSlotName.SLOT_D1,
+ "cutoutD2": DeckSlotName.SLOT_D2,
+ "cutoutD3": DeckSlotName.SLOT_D3,
+}
+DECK_SLOT_TO_CUTOUT_MAP = {
+ deck_slot: cutout for cutout, deck_slot in CUTOUT_TO_DECK_SLOT_MAP.items()
+}
+
+
+class AddressableAreaStore(HasState[AddressableAreaState], HandlesActions):
+ """Addressable area state container."""
+
+ _state: AddressableAreaState
+
+ def __init__(
+ self,
+ deck_configuration: DeckConfigurationType,
+ config: Config,
+ deck_definition: DeckDefinitionV5,
+ ) -> None:
+ """Initialize an addressable area store and its state."""
+ if config.use_simulated_deck_config:
+ loaded_addressable_areas_by_name = {}
+ else:
+ loaded_addressable_areas_by_name = (
+ self._get_addressable_areas_from_deck_configuration(
+ deck_configuration,
+ deck_definition,
+ )
+ )
+
+ self._state = AddressableAreaState(
+ deck_configuration=deck_configuration,
+ loaded_addressable_areas_by_name=loaded_addressable_areas_by_name,
+ potential_cutout_fixtures_by_cutout_id={},
+ deck_definition=deck_definition,
+ robot_type=config.robot_type,
+ use_simulated_deck_config=config.use_simulated_deck_config,
+ )
+
+ def handle_action(self, action: Action) -> None:
+ """Modify state in reaction to an action."""
+ if isinstance(action, SucceedCommandAction):
+ self._handle_command(action.command)
+ elif isinstance(action, AddAddressableAreaAction):
+ self._check_location_is_addressable_area(action.addressable_area)
+ elif isinstance(action, PlayAction):
+ current_state = self._state
+ if (
+ action.deck_configuration is not None
+ and not self._state.use_simulated_deck_config
+ ):
+ self._state.deck_configuration = action.deck_configuration
+ self._state.loaded_addressable_areas_by_name = (
+ self._get_addressable_areas_from_deck_configuration(
+ deck_config=action.deck_configuration,
+ deck_definition=current_state.deck_definition,
+ )
+ )
+
+ def _handle_command(self, command: Command) -> None:
+ """Modify state in reaction to a command."""
+ if isinstance(command.result, LoadLabwareResult):
+ location = command.params.location
+ if isinstance(location, (DeckSlotLocation, AddressableAreaLocation)):
+ self._check_location_is_addressable_area(location)
+
+ elif isinstance(command.result, MoveLabwareResult):
+ location = command.params.newLocation
+ if isinstance(location, (DeckSlotLocation, AddressableAreaLocation)):
+ self._check_location_is_addressable_area(location)
+
+ elif isinstance(command.result, LoadModuleResult):
+ self._check_location_is_addressable_area(command.params.location)
+
+ elif isinstance(
+ command.result,
+ (MoveToAddressableAreaResult, MoveToAddressableAreaForDropTipResult),
+ ):
+ addressable_area_name = command.params.addressableAreaName
+ self._check_location_is_addressable_area(addressable_area_name)
+
+ @staticmethod
+ def _get_addressable_areas_from_deck_configuration(
+ deck_config: DeckConfigurationType, deck_definition: DeckDefinitionV5
+ ) -> Dict[str, AddressableArea]:
+ """Return all addressable areas provided by the given deck configuration."""
+ addressable_areas = []
+ for cutout_id, cutout_fixture_id, opentrons_module_serial_number in deck_config:
+ provided_addressable_areas = (
+ deck_configuration_provider.get_provided_addressable_area_names(
+ cutout_fixture_id, cutout_id, deck_definition
+ )
+ )
+ cutout_position = deck_configuration_provider.get_cutout_position(
+ cutout_id, deck_definition
+ )
+ base_slot = CUTOUT_TO_DECK_SLOT_MAP[cutout_id]
+ for addressable_area_name in provided_addressable_areas:
+ addressable_areas.append(
+ deck_configuration_provider.get_addressable_area_from_name(
+ addressable_area_name=addressable_area_name,
+ cutout_position=cutout_position,
+ base_slot=base_slot,
+ deck_definition=deck_definition,
+ )
+ )
+ return {area.area_name: area for area in addressable_areas}
+
+ def _check_location_is_addressable_area(
+ self, location: Union[DeckSlotLocation, AddressableAreaLocation, str]
+ ) -> None:
+ if isinstance(location, DeckSlotLocation):
+ addressable_area_name = location.slotName.id
+ elif isinstance(location, AddressableAreaLocation):
+ addressable_area_name = location.addressableAreaName
+ else:
+ addressable_area_name = location
+
+ if addressable_area_name not in self._state.loaded_addressable_areas_by_name:
+ cutout_id = self._validate_addressable_area_for_simulation(
+ addressable_area_name
+ )
+
+ cutout_position = deck_configuration_provider.get_cutout_position(
+ cutout_id, self._state.deck_definition
+ )
+ base_slot = CUTOUT_TO_DECK_SLOT_MAP[cutout_id]
+ addressable_area = (
+ deck_configuration_provider.get_addressable_area_from_name(
+ addressable_area_name=addressable_area_name,
+ cutout_position=cutout_position,
+ base_slot=base_slot,
+ deck_definition=self._state.deck_definition,
+ )
+ )
+ self._state.loaded_addressable_areas_by_name[
+ addressable_area.area_name
+ ] = addressable_area
+
+ def _validate_addressable_area_for_simulation(
+ self, addressable_area_name: str
+ ) -> str:
+ """Given an addressable area name, validate it can exist on the deck and return cutout id associated with it."""
+ (
+ cutout_id,
+ potential_fixtures,
+ ) = deck_configuration_provider.get_potential_cutout_fixtures(
+ addressable_area_name, self._state.deck_definition
+ )
+
+ if cutout_id in self._state.potential_cutout_fixtures_by_cutout_id:
+ # Get the existing potential cutout fixtures for the addressable area already loaded on this cutout
+ existing_potential_fixtures = (
+ self._state.potential_cutout_fixtures_by_cutout_id[cutout_id]
+ )
+ # Get common cutout fixture that supplies existing addressable areas and the one being loaded
+ remaining_fixtures = existing_potential_fixtures.intersection(
+ set(potential_fixtures)
+ )
+
+ self._state.potential_cutout_fixtures_by_cutout_id[
+ cutout_id
+ ] = remaining_fixtures
+ else:
+ self._state.potential_cutout_fixtures_by_cutout_id[cutout_id] = set(
+ potential_fixtures
+ )
+
+ return cutout_id
+
+
+class AddressableAreaView(HasState[AddressableAreaState]):
+ """Read-only addressable area state view."""
+
+ _state: AddressableAreaState
+
+ def __init__(self, state: AddressableAreaState) -> None:
+ """Initialize the computed view of addressable area state.
+
+ Arguments:
+ state: Addressable area state dataclass used for all calculations.
+ """
+ self._state = state
+
+ def get_addressable_area(self, addressable_area_name: str) -> AddressableArea:
+ """Get addressable area."""
+ if not self._state.use_simulated_deck_config:
+ return self._get_loaded_addressable_area(addressable_area_name)
+ else:
+ return self._get_addressable_area_from_deck_data(
+ addressable_area_name=addressable_area_name,
+ do_compatibility_check=True,
+ )
+
+ def get_all(self) -> List[str]:
+ """Get a list of all loaded addressable area names."""
+ return list(self._state.loaded_addressable_areas_by_name)
+
+ def get_all_cutout_fixtures(self) -> Optional[List[str]]:
+ """Get the names of all fixtures present in the host robot's deck configuration.
+
+ If `use_simulated_deck_config` is `True` (see `Config`), we don't have a
+ meaningful concrete layout of fixtures, so this will return `None`.
+ """
+ if self._state.use_simulated_deck_config:
+ return None
+ else:
+ assert self._state.deck_configuration is not None
+ return [
+ cutout_fixture_id
+ for _, cutout_fixture_id, _serial in self._state.deck_configuration
+ ]
+
+ def _get_loaded_addressable_area(
+ self, addressable_area_name: str
+ ) -> AddressableArea:
+ """Get an addressable area that has been loaded into state. Will raise error if it does not exist."""
+ try:
+ return self._state.loaded_addressable_areas_by_name[addressable_area_name]
+ except KeyError:
+ raise AreaNotInDeckConfigurationError(
+ f"{addressable_area_name} not provided by deck configuration."
+ )
+
+ def _check_if_area_is_compatible_with_potential_fixtures(
+ self,
+ area_name: str,
+ cutout_id: str,
+ potential_fixtures: Set[PotentialCutoutFixture],
+ ) -> None:
+ if cutout_id in self._state.potential_cutout_fixtures_by_cutout_id:
+ if not self._state.potential_cutout_fixtures_by_cutout_id[
+ cutout_id
+ ].intersection(potential_fixtures):
+ loaded_areas_on_cutout = (
+ _get_conflicting_addressable_areas_error_string(
+ self._state.potential_cutout_fixtures_by_cutout_id[cutout_id],
+ self._state.loaded_addressable_areas_by_name,
+ self.state.deck_definition,
+ )
+ )
+ area_display_name = (
+ deck_configuration_provider.get_addressable_area_display_name(
+ area_name, self.state.deck_definition
+ )
+ )
+ raise IncompatibleAddressableAreaError(
+ f"Cannot use {area_display_name}, not compatible with one or more of"
+ f" the following fixtures: {loaded_areas_on_cutout}"
+ )
+
+ def _get_addressable_area_from_deck_data(
+ self,
+ addressable_area_name: str,
+ do_compatibility_check: bool,
+ ) -> AddressableArea:
+ """Get an addressable area that may not have been already loaded for a simulated run.
+
+ Since this may be the first time this addressable area has been called, and it might not exist in the store
+ yet (and if not won't until the result completes), we have to check if it is theoretically possible and then
+ get the area data from the deck configuration provider.
+ """
+ if addressable_area_name in self._state.loaded_addressable_areas_by_name:
+ return self._state.loaded_addressable_areas_by_name[addressable_area_name]
+
+ (
+ cutout_id,
+ potential_fixtures,
+ ) = deck_configuration_provider.get_potential_cutout_fixtures(
+ addressable_area_name, self._state.deck_definition
+ )
+
+ if do_compatibility_check:
+ self._check_if_area_is_compatible_with_potential_fixtures(
+ addressable_area_name, cutout_id, potential_fixtures
+ )
+
+ cutout_position = deck_configuration_provider.get_cutout_position(
+ cutout_id, self._state.deck_definition
+ )
+ base_slot = CUTOUT_TO_DECK_SLOT_MAP[cutout_id]
+ return deck_configuration_provider.get_addressable_area_from_name(
+ addressable_area_name=addressable_area_name,
+ cutout_position=cutout_position,
+ base_slot=base_slot,
+ deck_definition=self._state.deck_definition,
+ )
+
+ def get_addressable_area_base_slot(
+ self, addressable_area_name: str
+ ) -> DeckSlotName:
+ """Get the base slot the addressable area is associated with."""
+ addressable_area = self.get_addressable_area(addressable_area_name)
+ return addressable_area.base_slot
+
+ def get_addressable_area_position(
+ self,
+ addressable_area_name: str,
+ do_compatibility_check: bool = True,
+ ) -> Point:
+ """Get the position of an addressable area.
+
+ This does not require the addressable area to be in the deck configuration.
+ This is primarily used to support legacy fixed trash labware without
+ modifying the deck layout to remove the similar, but functionally different,
+ trashBinAdapter cutout fixture.
+
+ Besides that instance, for movement purposes, this should only be called for
+ areas that have been pre-validated, otherwise there could be the risk of collision.
+ """
+ addressable_area = self._get_addressable_area_from_deck_data(
+ addressable_area_name=addressable_area_name,
+ do_compatibility_check=False, # This should probably not default to false
+ )
+ position = addressable_area.position
+ return Point(x=position.x, y=position.y, z=position.z)
+
+ def get_addressable_area_offsets_from_cutout(
+ self,
+ addressable_area_name: str,
+ ) -> Point:
+ """Get the offset form cutout fixture of an addressable area."""
+ for addressable_area in self.state.deck_definition["locations"][
+ "addressableAreas"
+ ]:
+ if addressable_area["id"] == addressable_area_name:
+ area_offset = addressable_area["offsetFromCutoutFixture"]
+ position = Point(
+ x=area_offset[0],
+ y=area_offset[1],
+ z=area_offset[2],
+ )
+ return Point(x=position.x, y=position.y, z=position.z)
+ raise ValueError(
+ f"No matching addressable area named {addressable_area_name} identified."
+ )
+
+ def get_addressable_area_bounding_box(
+ self,
+ addressable_area_name: str,
+ do_compatibility_check: bool = True,
+ ) -> Dimensions:
+ """Get the bounding box of an addressable area.
+
+ This does not require the addressable area to be in the deck configuration.
+ For movement purposes, this should only be called for
+ areas that have been pre-validated, otherwise there could be the risk of collision.
+ """
+ addressable_area = self._get_addressable_area_from_deck_data(
+ addressable_area_name=addressable_area_name,
+ do_compatibility_check=do_compatibility_check,
+ )
+ return addressable_area.bounding_box
+
+ def get_addressable_area_move_to_location(
+ self, addressable_area_name: str
+ ) -> Point:
+ """Get the move-to position (top center) for an addressable area."""
+ addressable_area = self.get_addressable_area(addressable_area_name)
+ position = addressable_area.position
+ bounding_box = addressable_area.bounding_box
+ return Point(
+ x=position.x + bounding_box.x / 2,
+ y=position.y + bounding_box.y / 2,
+ z=position.z + bounding_box.z,
+ )
+
+ def get_addressable_area_center(self, addressable_area_name: str) -> Point:
+ """Get the (x, y, z) position of the center of the area."""
+ addressable_area = self.get_addressable_area(addressable_area_name)
+ position = addressable_area.position
+ bounding_box = addressable_area.bounding_box
+ return Point(
+ x=position.x + bounding_box.x / 2,
+ y=position.y + bounding_box.y / 2,
+ z=position.z,
+ )
+
+ def get_cutout_id_by_deck_slot_name(self, slot_name: DeckSlotName) -> str:
+ """Get the Cutout ID of a given Deck Slot by Deck Slot Name."""
+ return DECK_SLOT_TO_CUTOUT_MAP[slot_name]
+
+ def get_fixture_by_deck_slot_name(
+ self, slot_name: DeckSlotName
+ ) -> Optional[CutoutFixture]:
+ """Get the Cutout Fixture currently loaded where a specific Deck Slot would be."""
+ deck_config = self.state.deck_configuration
+ if deck_config:
+ slot_cutout_id = DECK_SLOT_TO_CUTOUT_MAP[slot_name]
+ slot_cutout_fixture = None
+ # This will only ever be one under current assumptions
+ for (
+ cutout_id,
+ cutout_fixture_id,
+ opentrons_module_serial_number,
+ ) in deck_config:
+ if cutout_id == slot_cutout_id:
+ slot_cutout_fixture = (
+ deck_configuration_provider.get_cutout_fixture(
+ cutout_fixture_id, self.state.deck_definition
+ )
+ )
+ return slot_cutout_fixture
+ if slot_cutout_fixture is None:
+ # If this happens, it's a bug. Either DECK_SLOT_TO_CUTOUT_MAP
+ # is missing an entry for the slot, or the deck configuration is missing
+ # an entry for the cutout.
+ raise CutoutDoesNotExistError(
+ f"No Cutout was found in the Deck that matched provided slot {slot_name}."
+ )
+ return None
+
+ def get_fixture_height(self, cutout_fixture_name: str) -> float:
+ """Get the z height of a cutout fixture."""
+ cutout_fixture = deck_configuration_provider.get_cutout_fixture(
+ cutout_fixture_name, self._state.deck_definition
+ )
+ return cutout_fixture["height"]
+
+ def get_fixture_serial_from_deck_configuration_by_deck_slot(
+ self, slot_name: DeckSlotName
+ ) -> Optional[str]:
+ """Get the serial number provided by the deck configuration for a Fixture at a given location."""
+ deck_config = self.state.deck_configuration
+ if deck_config:
+ slot_cutout_id = DECK_SLOT_TO_CUTOUT_MAP[slot_name]
+ # This will only ever be one under current assumptions
+ for (
+ cutout_id,
+ cutout_fixture_id,
+ opentrons_module_serial_number,
+ ) in deck_config:
+ if cutout_id == slot_cutout_id:
+ return opentrons_module_serial_number
+ return None
+
+ def get_slot_definition(self, slot_id: str) -> SlotDefV3:
+ """Get the definition of a slot in the deck.
+
+ This does not require that the slot exist in deck configuration.
+ """
+ try:
+ addressable_area = self._get_addressable_area_from_deck_data(
+ addressable_area_name=slot_id,
+ do_compatibility_check=True, # From the description of get_slot_definition, this might have to be False.
+ )
+ except AddressableAreaDoesNotExistError:
+ raise SlotDoesNotExistError(
+ f"Slot ID {slot_id} does not exist in deck {self._state.deck_definition['otId']}"
+ )
+ position = addressable_area.position
+ bounding_box = addressable_area.bounding_box
+ return {
+ "id": addressable_area.area_name,
+ "position": [position.x, position.y, position.z],
+ "boundingBox": {
+ "xDimension": bounding_box.x,
+ "yDimension": bounding_box.y,
+ "zDimension": bounding_box.z,
+ },
+ "displayName": addressable_area.display_name,
+ "compatibleModuleTypes": addressable_area.compatible_module_types,
+ }
+
+ def get_deck_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all standard slot definitions available in the deck definition."""
+ if self._state.robot_type == "OT-2 Standard":
+ slots = _OT2_ORDERED_SLOTS
+ else:
+ slots = _FLEX_ORDERED_SLOTS
+ return {slot_name: self.get_slot_definition(slot_name) for slot_name in slots}
+
+ def get_staging_slot_definitions(self) -> Dict[str, SlotDefV3]:
+ """Get all staging slot definitions available in the deck definition."""
+ if self._state.robot_type == "OT-3 Standard":
+ return {
+ slot_name: self.get_slot_definition(slot_name)
+ for slot_name in _FLEX_ORDERED_STAGING_SLOTS
+ }
+ else:
+ return {}
+
+ def raise_if_area_not_in_deck_configuration(
+ self, addressable_area_name: str
+ ) -> None:
+ """Raise error if an addressable area is not compatible with or in the deck configuration.
+
+ For simulated runs/analysis, this will raise if the given addressable area is not compatible with other
+ previously referenced addressable areas, for example if a movable trash in A1 is in state, referencing the
+ deck slot A1 will raise since those two can't exist in any deck configuration combination.
+
+ For an on robot run, it will check if it is in the robot's deck configuration, if not it will raise an error.
+ """
+ if self._state.use_simulated_deck_config:
+ (
+ cutout_id,
+ potential_fixtures,
+ ) = deck_configuration_provider.get_potential_cutout_fixtures(
+ addressable_area_name, self._state.deck_definition
+ )
+
+ self._check_if_area_is_compatible_with_potential_fixtures(
+ addressable_area_name, cutout_id, potential_fixtures
+ )
+ else:
+ if (
+ addressable_area_name
+ not in self._state.loaded_addressable_areas_by_name
+ ):
+ raise AreaNotInDeckConfigurationError(
+ f"{addressable_area_name} not provided by deck configuration."
+ )
diff --git a/api/src/opentrons/protocol_engine/state/command_history.py b/api/src/opentrons/protocol_engine/state/command_history.py
new file mode 100644
index 00000000000..b21fca030ae
--- /dev/null
+++ b/api/src/opentrons/protocol_engine/state/command_history.py
@@ -0,0 +1,279 @@
+"""Protocol Engine CommandStore sub-state."""
+from collections import OrderedDict
+from dataclasses import dataclass
+from typing import Dict, List, Optional
+
+from opentrons.ordered_set import OrderedSet
+from opentrons.protocol_engine.errors.exceptions import CommandDoesNotExistError
+
+from ..commands import Command, CommandStatus, CommandIntent
+
+
+@dataclass(frozen=True)
+class CommandEntry:
+ """A command entry in state, including its index in the list."""
+
+ command: Command
+ index: int
+
+
+@dataclass # dataclass for __eq__() autogeneration.
+class CommandHistory:
+ """Command state container for command data."""
+
+ _all_command_ids: List[str]
+ """All command IDs, in insertion order."""
+
+ _commands_by_id: Dict[str, CommandEntry]
+ """All command resources, in insertion order, mapped by their unique IDs."""
+
+ _queued_command_ids: OrderedSet[str]
+ """The IDs of queued commands, in FIFO order"""
+
+ _queued_setup_command_ids: OrderedSet[str]
+ """The IDs of queued setup commands, in FIFO order"""
+
+ _queued_fixit_command_ids: OrderedSet[str]
+ """The IDs of queued fixit commands, in FIFO order"""
+
+ _running_command_id: Optional[str]
+ """The ID of the currently running command, if any"""
+
+ _terminal_command_id: Optional[str]
+ """ID of the most recent command that SUCCEEDED or FAILED, if any"""
+
+ def __init__(self) -> None:
+ self._all_command_ids = []
+ self._queued_command_ids = OrderedSet()
+ self._queued_setup_command_ids = OrderedSet()
+ self._queued_fixit_command_ids = OrderedSet()
+ self._commands_by_id = OrderedDict()
+ self._running_command_id = None
+ self._terminal_command_id = None
+
+ def length(self) -> int:
+ """Get the length of all elements added to the history."""
+ return len(self._commands_by_id)
+
+ def has(self, command_id: str) -> bool:
+ """Returns whether a command is in the history."""
+ return command_id in self._commands_by_id
+
+ def get(self, command_id: str) -> CommandEntry:
+ """Get a command entry if present, otherwise raise an exception."""
+ try:
+ return self._commands_by_id[command_id]
+ except KeyError:
+ raise CommandDoesNotExistError(f"Command {command_id} does not exist")
+
+ def get_next(self, command_id: str) -> Optional[CommandEntry]:
+ """Get the command which follows the command associated with the given ID, if any."""
+ index = self.get(command_id).index
+ try:
+ return self._commands_by_id[self._all_command_ids[index + 1]]
+ except KeyError:
+ raise CommandDoesNotExistError(f"Command {command_id} does not exist")
+ except IndexError:
+ return None
+
+ def get_prev(self, command_id: str) -> Optional[CommandEntry]:
+ """Get the command which precedes the command associated with the given ID, if any.
+
+ Returns None if the command_id corresponds to the first element in the history.
+ """
+ index = self.get(command_id).index
+ try:
+ prev_command = self._commands_by_id[self._all_command_ids[index - 1]]
+ return prev_command if index != 0 else None
+ except KeyError:
+ raise CommandDoesNotExistError(f"Command {command_id} does not exist")
+ except IndexError:
+ return None
+
+ def get_if_present(self, command_id: str) -> Optional[CommandEntry]:
+ """Get a command entry, if present."""
+ return self._commands_by_id.get(command_id)
+
+ def get_all_commands(self) -> List[Command]:
+ """Get all commands."""
+ return [
+ self._commands_by_id[command_id].command
+ for command_id in self._all_command_ids
+ ]
+
+ def get_all_ids(self) -> List[str]:
+ """Get all command IDs."""
+ return self._all_command_ids
+
+ def get_slice(self, start: int, stop: int) -> List[Command]:
+ """Get a list of commands between start and stop."""
+ commands = self._all_command_ids[start:stop]
+ return [self._commands_by_id[command].command for command in commands]
+
+ def get_tail_command(self) -> Optional[CommandEntry]:
+ """Get the command most recently added."""
+ if self._commands_by_id:
+ return next(reversed(self._commands_by_id.values()))
+ else:
+ return None
+
+ def get_terminal_command(self) -> Optional[CommandEntry]:
+ """Get the command most recently marked as SUCCEEDED or FAILED."""
+ if self._terminal_command_id is not None:
+ return self._commands_by_id[self._terminal_command_id]
+ else:
+ return None
+
+ def get_running_command(self) -> Optional[CommandEntry]:
+ """Get the command currently running, if any."""
+ if self._running_command_id is None:
+ return None
+ else:
+ return self._commands_by_id[self._running_command_id]
+
+ def get_queue_ids(self) -> OrderedSet[str]:
+ """Get the IDs of all queued protocol commands, in FIFO order."""
+ return self._queued_command_ids
+
+ def get_setup_queue_ids(self) -> OrderedSet[str]:
+ """Get the IDs of all queued setup commands, in FIFO order."""
+ return self._queued_setup_command_ids
+
+ def get_fixit_queue_ids(self) -> OrderedSet[str]:
+ """Get the IDs of all queued fixit commands, in FIFO order."""
+ return self._queued_fixit_command_ids
+
+ def clear_queue(self) -> None:
+ """Clears all commands within the queued command ids structure."""
+ self._queued_command_ids.clear()
+
+ def clear_setup_queue(self) -> None:
+ """Clears all commands within the queued setup command ids structure."""
+ self._queued_setup_command_ids.clear()
+
+ def clear_fixit_queue(self) -> None:
+ """Clears all commands within the queued setup command ids structure."""
+ self._queued_fixit_command_ids.clear()
+
+ def set_command_queued(self, command: Command) -> None:
+ """Validate and mark a command as queued in the command history."""
+ assert command.status == CommandStatus.QUEUED
+ assert not self.has(command.id)
+
+ next_index = self.length()
+ updated_command = CommandEntry(
+ index=next_index,
+ command=command,
+ )
+ self._add(command.id, updated_command)
+
+ if command.intent == CommandIntent.SETUP:
+ self._add_to_setup_queue(command.id)
+ elif command.intent == CommandIntent.FIXIT:
+ self._add_to_fixit_queue(command.id)
+ else:
+ self._add_to_queue(command.id)
+
+ def set_command_running(self, command: Command) -> None:
+ """Validate and mark a command as running in the command history."""
+ prev_entry = self.get(command.id)
+
+ assert prev_entry.command.status == CommandStatus.QUEUED
+ assert command.status == CommandStatus.RUNNING
+
+ self._add(
+ command.id,
+ CommandEntry(index=prev_entry.index, command=command),
+ )
+
+ assert self.get_running_command() is None
+ self._set_running_command_id(command.id)
+
+ self._remove_queue_id(command.id)
+ self._remove_setup_queue_id(command.id)
+ self._remove_fixit_queue_id(command.id)
+
+ def set_command_succeeded(self, command: Command) -> None:
+ """Validate and mark a command as succeeded in the command history."""
+ prev_entry = self.get(command.id)
+ assert prev_entry.command.status == CommandStatus.RUNNING
+ assert command.status == CommandStatus.SUCCEEDED
+
+ self._add(
+ command.id,
+ CommandEntry(
+ index=prev_entry.index,
+ command=command,
+ ),
+ )
+
+ running_command_entry = self.get_running_command()
+ assert running_command_entry is not None
+ assert running_command_entry.command.id == command.id
+ self._set_running_command_id(None)
+
+ self._remove_queue_id(command.id)
+ self._remove_setup_queue_id(command.id)
+ self._set_terminal_command_id(command.id)
+
+ def set_command_failed(self, command: Command) -> None:
+ """Validate and mark a command as failed in the command history."""
+ prev_entry = self.get(command.id)
+ assert (
+ prev_entry.command.status == CommandStatus.RUNNING
+ or prev_entry.command.status == CommandStatus.QUEUED
+ )
+ assert command.status == CommandStatus.FAILED
+
+ index = self.get(command.id).index
+ self._add(
+ command_id=command.id,
+ command_entry=CommandEntry(index=index, command=command),
+ )
+
+ self._set_terminal_command_id(command.id)
+
+ running_command_entry = self.get_running_command()
+ if (
+ running_command_entry is not None
+ and running_command_entry.command.id == command.id
+ ):
+ self._set_running_command_id(None)
+
+ def _add(self, command_id: str, command_entry: CommandEntry) -> None:
+ """Create or update a command entry."""
+ if command_id not in self._commands_by_id:
+ self._all_command_ids.append(command_id)
+ self._commands_by_id[command_id] = command_entry
+
+ def _add_to_queue(self, command_id: str) -> None:
+ """Add new ID to the queued."""
+ self._queued_command_ids.add(command_id)
+
+ def _add_to_setup_queue(self, command_id: str) -> None:
+ """Add a new ID to the queued setup."""
+ self._queued_setup_command_ids.add(command_id)
+
+ def _add_to_fixit_queue(self, command_id: str) -> None:
+ """Add a new ID to the queued fixit."""
+ self._queued_fixit_command_ids.add(command_id)
+
+ def _remove_queue_id(self, command_id: str) -> None:
+ """Remove a specific command from the queued command ids structure."""
+ self._queued_command_ids.discard(command_id)
+
+ def _remove_setup_queue_id(self, command_id: str) -> None:
+ """Remove a specific command from the queued setup command ids structure."""
+ self._queued_setup_command_ids.discard(command_id)
+
+ def _remove_fixit_queue_id(self, command_id: str) -> None:
+ """Remove a specific command from the queued fixit command ids structure."""
+ self._queued_fixit_command_ids.discard(command_id)
+
+ def _set_terminal_command_id(self, command_id: str) -> None:
+ """Set the ID of the most recently dequeued command."""
+ self._terminal_command_id = command_id
+
+ def _set_running_command_id(self, command_id: Optional[str]) -> None:
+ """Set the ID of the currently running command."""
+ self._running_command_id = command_id
diff --git a/api/src/opentrons/protocol_engine/state/commands.py b/api/src/opentrons/protocol_engine/state/commands.py
index 9ebef474c84..0c055fdee39 100644
--- a/api/src/opentrons/protocol_engine/state/commands.py
+++ b/api/src/opentrons/protocol_engine/state/commands.py
@@ -1,21 +1,28 @@
"""Protocol engine commands sub-state."""
from __future__ import annotations
-from collections import OrderedDict
-from enum import Enum
+
+import enum
from dataclasses import dataclass
from datetime import datetime
-from typing import Dict, List, Mapping, Optional, Union
+from typing import Dict, List, Optional, Union
+from typing_extensions import assert_never
from opentrons_shared_data.errors import EnumeratedError, ErrorCodes, PythonException
from opentrons.ordered_set import OrderedSet
from opentrons.hardware_control.types import DoorState
+from opentrons.protocol_engine.actions.actions import (
+ ResumeFromRecoveryAction,
+ RunCommandAction,
+)
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryType
+from opentrons.protocol_engine.notes.notes import CommandNote
from ..actions import (
Action,
QueueCommandAction,
- UpdateCommandAction,
+ SucceedCommandAction,
FailCommandAction,
PlayAction,
PauseAction,
@@ -27,41 +34,62 @@
from ..commands import Command, CommandStatus, CommandIntent
from ..errors import (
- CommandDoesNotExistError,
RunStoppedError,
ErrorOccurrence,
RobotDoorOpenError,
SetupCommandNotAllowedError,
+ FixitCommandNotAllowedError,
+ ResumeFromRecoveryNotAllowedError,
PauseNotAllowedError,
UnexpectedProtocolError,
ProtocolCommandFailedError,
)
from ..types import EngineStatus
from .abstract_store import HasState, HandlesActions
+from .command_history import (
+ CommandEntry,
+ CommandHistory,
+)
from .config import Config
-class QueueStatus(str, Enum):
- """Execution status of the command queue.
+class QueueStatus(enum.Enum):
+ """Execution status of the command queue."""
+
+ SETUP = enum.auto()
+ """The engine has been created, but the run has not yet started.
+
+ New protocol commands may be enqueued, but will wait to execute.
+ New setup commands may be enqueued and will execute immediately.
+ New fixup commands may not be enqueued.
+ """
+
+ RUNNING = enum.auto()
+ """The queue is running through protocol commands.
+
+ New protocol commands may be enqueued and will execute immediately.
+ New setup commands may not be enqueued.
+ New fixup commands may not be enqueued.
+ """
+
+ PAUSED = enum.auto()
+ """Execution of protocol commands has been paused.
- Properties:
- SETUP: The engine has been created, but the run has not yet started.
- New protocol commands may be enqueued but will wait to execute.
- New setup commands may be enqueued and will execute immediately.
- RUNNING: The queue is running though protocol commands.
- New protocol commands may be enqueued and will execute immediately.
- New setup commands may not be enqueued.
- PAUSED: Execution of protocol commands has been paused.
- New protocol commands may be enqueued but wait to execute.
- New setup commands may not be enqueued.
+ New protocol commands may be enqueued, but will wait to execute.
+ New setup commands may not be enqueued.
+ New fixup commands may not be enqueued.
"""
- SETUP = "setup"
- RUNNING = "running"
- PAUSED = "paused"
+ AWAITING_RECOVERY = enum.auto()
+ """A protocol command has encountered a recoverable error.
+ New protocol commands may be enqueued, but will wait to execute.
+ New setup commands may not be enqueued.
+ New fixup commands may be enqueued and will execute immediately.
+ """
-class RunResult(str, Enum):
+
+class RunResult(str, enum.Enum):
"""Result of the run."""
SUCCEEDED = "succeeded"
@@ -88,38 +116,17 @@ class CurrentCommand:
index: int
-@dataclass(frozen=True)
-class CommandEntry:
- """An command entry in state, including its index in the list."""
-
- command: Command
- index: int
-
-
@dataclass
class CommandState:
"""State of all protocol engine command resources."""
- all_command_ids: List[str]
- """All command IDs, in insertion order."""
-
- queued_command_ids: OrderedSet[str]
- """The IDs of queued commands, in FIFO order"""
-
- queued_setup_command_ids: OrderedSet[str]
- """The IDs of queued setup commands, in FIFO order"""
-
- running_command_id: Optional[str]
- """The ID of the currently running command, if any"""
-
- commands_by_id: Dict[str, CommandEntry]
- """All command resources, in insertion order, mapped by their unique IDs."""
+ command_history: CommandHistory
queue_status: QueueStatus
"""Whether the engine is currently pulling new commands off the queue to execute.
A command may still be executing, and the robot may still be in motion,
- even if INACTIVE.
+ even if PAUSED.
"""
run_started_at: Optional[datetime]
@@ -152,11 +159,35 @@ class CommandState:
are stored on the individual commands themselves.
"""
+ failed_command: Optional[CommandEntry]
+ """The most recent command failure, if any."""
+ # TODO(mm, 2024-03-19): This attribute is currently only used to help robot-server
+ # with pagination, but "the failed command" is an increasingly nuanced idea, now
+ # that we're doing error recovery. See if we can implement robot-server pagination
+ # atop simpler concepts, like "the last command that ran" or "the next command that
+ # would run."
+ #
+ # TODO(mm, 2024-04-03): Can this be replaced by
+ # CommandHistory.get_terminal_command() now?
+
+ command_error_recovery_types: Dict[str, ErrorRecoveryType]
+ """For each command that failed (indexed by ID), what its recovery type was.
+
+ This only includes commands that actually failed, not the ones that we mark as
+ failed but that are effectively "cancelled" because a command before them failed.
+
+ This separate attribute is a stopgap until error recovery concepts are a bit more
+ stable. Eventually, we might want this info to be stored directly on each command.
+ """
+
+ recovery_target_command_id: Optional[str]
+ """If we're currently recovering from a command failure, which command it was."""
+
finish_error: Optional[ErrorOccurrence]
"""The error that happened during the post-run finish steps (homing & dropping tips), if any."""
- latest_command_hash: Optional[str]
- """The latest hash value received in a QueueCommandAction.
+ latest_protocol_command_hash: Optional[str]
+ """The latest PROTOCOL command hash value received in a QueueCommandAction.
This value can be used to generate future hashes.
"""
@@ -166,7 +197,7 @@ class CommandState:
class CommandStore(HasState[CommandState], HandlesActions):
- """Command state container."""
+ """Command state container for run-level command concerns."""
_state: CommandState
@@ -179,29 +210,24 @@ def __init__(
"""Initialize a CommandStore and its state."""
self._config = config
self._state = CommandState(
+ command_history=CommandHistory(),
queue_status=QueueStatus.SETUP,
is_door_blocking=is_door_open and config.block_on_door_open,
run_result=None,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
run_error=None,
finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
run_completed_at=None,
run_started_at=None,
- latest_command_hash=None,
+ latest_protocol_command_hash=None,
stopped_by_estop=False,
)
def handle_action(self, action: Action) -> None: # noqa: C901
"""Modify state in reaction to an action."""
- errors_by_id: Mapping[str, ErrorOccurrence]
-
if isinstance(action, QueueCommandAction):
- assert action.command_id not in self._state.commands_by_id
-
# TODO(mc, 2021-06-22): mypy has trouble with this automatic
# request > command mapping, figure out how to type precisely
# (or wait for a future mypy version that can figure it out).
@@ -217,50 +243,29 @@ def handle_action(self, action: Action) -> None: # noqa: C901
params=action.request.params, # type: ignore[arg-type]
intent=action.request.intent,
status=CommandStatus.QUEUED,
+ failedCommandId=action.failed_command_id,
)
- next_index = len(self._state.all_command_ids)
- self._state.all_command_ids.append(action.command_id)
- self._state.commands_by_id[queued_command.id] = CommandEntry(
- index=next_index,
- command=queued_command,
- )
-
- if action.request.intent == CommandIntent.SETUP:
- self._state.queued_setup_command_ids.add(queued_command.id)
- else:
- self._state.queued_command_ids.add(queued_command.id)
+ self._state.command_history.set_command_queued(queued_command)
if action.request_hash is not None:
- self._state.latest_command_hash = action.request_hash
-
- # TODO(mc, 2021-12-28): replace "UpdateCommandAction" with explicit
- # state change actions (e.g. RunCommandAction, SucceedCommandAction)
- # to make a command's queue transition logic easier to follow
- elif isinstance(action, UpdateCommandAction):
- command = action.command
- prev_entry = self._state.commands_by_id.get(command.id)
-
- if prev_entry is None:
- index = len(self._state.all_command_ids)
- self._state.all_command_ids.append(command.id)
- self._state.commands_by_id[command.id] = CommandEntry(
- index=index,
- command=command,
- )
- else:
- self._state.commands_by_id[command.id] = CommandEntry(
- index=prev_entry.index,
- command=command,
- )
+ self._state.latest_protocol_command_hash = action.request_hash
+
+ elif isinstance(action, RunCommandAction):
+ prev_entry = self._state.command_history.get(action.command_id)
+
+ running_command = prev_entry.command.copy(
+ update={
+ "status": CommandStatus.RUNNING,
+ "startedAt": action.started_at,
+ }
+ )
- self._state.queued_command_ids.discard(command.id)
- self._state.queued_setup_command_ids.discard(command.id)
+ self._state.command_history.set_command_running(running_command)
- if command.status == CommandStatus.RUNNING:
- self._state.running_command_id = command.id
- elif self._state.running_command_id == command.id:
- self._state.running_command_id = None
+ elif isinstance(action, SucceedCommandAction):
+ succeeded_command = action.command
+ self._state.command_history.set_command_succeeded(succeeded_command)
elif isinstance(action, FailCommandAction):
error_occurrence = ErrorOccurrence.from_failed(
@@ -268,47 +273,73 @@ def handle_action(self, action: Action) -> None: # noqa: C901
createdAt=action.failed_at,
error=action.error,
)
- prev_entry = self._state.commands_by_id[action.command_id]
- self._state.commands_by_id[action.command_id] = CommandEntry(
- index=prev_entry.index,
- # TODO(mc, 2022-06-06): add new "cancelled" status or similar
- # and don't set `completedAt` in commands other than the
- # specific one that failed
- command=prev_entry.command.copy(
- update={
- "error": error_occurrence,
- "completedAt": action.failed_at,
- "status": CommandStatus.FAILED,
- }
- ),
+
+ self._update_to_failed(
+ command_id=action.command_id,
+ failed_at=action.failed_at,
+ error_occurrence=error_occurrence,
+ error_recovery_type=action.type,
+ notes=action.notes,
+ )
+
+ self._state.failed_command = self._state.command_history.get(
+ action.command_id
)
+ prev_entry = self.state.command_history.get(action.command_id)
if prev_entry.command.intent == CommandIntent.SETUP:
- other_command_ids_to_fail = [
- *[i for i in self._state.queued_setup_command_ids],
- ]
- self._state.queued_setup_command_ids.clear()
- else:
- other_command_ids_to_fail = [
- *[i for i in self._state.queued_command_ids],
- ]
- self._state.queued_command_ids.clear()
-
- for command_id in other_command_ids_to_fail:
- prev_entry = self._state.commands_by_id[command_id]
-
- self._state.commands_by_id[command_id] = CommandEntry(
- index=prev_entry.index,
- command=prev_entry.command.copy(
- update={
- "completedAt": action.failed_at,
- "status": CommandStatus.FAILED,
- }
- ),
+ other_command_ids_to_fail = (
+ self._state.command_history.get_setup_queue_ids()
)
-
- if self._state.running_command_id == action.command_id:
- self._state.running_command_id = None
+ for command_id in other_command_ids_to_fail:
+ # TODO(mc, 2022-06-06): add new "cancelled" status or similar
+ self._update_to_failed(
+ command_id=command_id,
+ failed_at=action.failed_at,
+ error_occurrence=None,
+ error_recovery_type=None,
+ notes=None,
+ )
+ self._state.command_history.clear_setup_queue()
+ elif (
+ prev_entry.command.intent == CommandIntent.PROTOCOL
+ or prev_entry.command.intent is None
+ ):
+ if action.type == ErrorRecoveryType.WAIT_FOR_RECOVERY:
+ self._state.queue_status = QueueStatus.AWAITING_RECOVERY
+ self._state.recovery_target_command_id = action.command_id
+ elif action.type == ErrorRecoveryType.FAIL_RUN:
+ other_command_ids_to_fail = (
+ self._state.command_history.get_queue_ids()
+ )
+ for command_id in other_command_ids_to_fail:
+ # TODO(mc, 2022-06-06): add new "cancelled" status or similar
+ self._update_to_failed(
+ command_id=command_id,
+ failed_at=action.failed_at,
+ error_occurrence=None,
+ error_recovery_type=None,
+ notes=None,
+ )
+ self._state.command_history.clear_queue()
+ else:
+ assert_never(action.type)
+ elif prev_entry.command.intent == CommandIntent.FIXIT:
+ other_command_ids_to_fail = (
+ self._state.command_history.get_fixit_queue_ids()
+ )
+ for command_id in other_command_ids_to_fail:
+ # TODO(mc, 2022-06-06): add new "cancelled" status or similar
+ self._update_to_failed(
+ command_id=command_id,
+ failed_at=action.failed_at,
+ error_occurrence=None,
+ error_recovery_type=None,
+ notes=None,
+ )
+ self._state.command_history.clear_fixit_queue()
+ else:
+ assert_never(prev_entry.command.intent)
elif isinstance(action, PlayAction):
if not self._state.run_result:
@@ -324,12 +355,22 @@ def handle_action(self, action: Action) -> None: # noqa: C901
elif isinstance(action, PauseAction):
self._state.queue_status = QueueStatus.PAUSED
+ elif isinstance(action, ResumeFromRecoveryAction):
+ self._state.command_history.clear_fixit_queue()
+ self._state.queue_status = QueueStatus.RUNNING
+ self._state.recovery_target_command_id = None
+
elif isinstance(action, StopAction):
if not self._state.run_result:
+ if self._state.queue_status == QueueStatus.AWAITING_RECOVERY:
+ self._state.recovery_target_command_id = None
+
self._state.queue_status = QueueStatus.PAUSED
- self._state.run_result = RunResult.STOPPED
if action.from_estop:
self._state.stopped_by_estop = True
+ self._state.run_result = RunResult.FAILED
+ else:
+ self._state.run_result = RunResult.STOPPED
elif isinstance(action, FinishAction):
if not self._state.run_result:
@@ -343,7 +384,16 @@ def handle_action(self, action: Action) -> None: # noqa: C901
else:
self._state.run_result = RunResult.STOPPED
- if action.error_details:
+ if not self._state.run_error and action.error_details:
+ self._state.run_error = self._map_run_exception_to_error_occurrence(
+ action.error_details.error_id,
+ action.error_details.created_at,
+ action.error_details.error,
+ )
+ else:
+ # HACK(sf): There needs to be a better way to set
+ # an estop error than this else clause
+ if self._state.stopped_by_estop and action.error_details:
self._state.run_error = self._map_run_exception_to_error_occurrence(
action.error_details.error_id,
action.error_details.created_at,
@@ -370,11 +420,37 @@ def handle_action(self, action: Action) -> None: # noqa: C901
if self._config.block_on_door_open:
if action.door_state == DoorState.OPEN:
self._state.is_door_blocking = True
+ # todo(mm, 2024-03-19): It's unclear how the door should interact
+ # with error recovery (QueueStatus.AWAITING_RECOVERY).
if self._state.queue_status != QueueStatus.SETUP:
self._state.queue_status = QueueStatus.PAUSED
elif action.door_state == DoorState.CLOSED:
self._state.is_door_blocking = False
+ def _update_to_failed(
+ self,
+ command_id: str,
+ failed_at: datetime,
+ error_occurrence: Optional[ErrorOccurrence],
+ error_recovery_type: Optional[ErrorRecoveryType],
+ notes: Optional[List[CommandNote]],
+ ) -> None:
+ prev_entry = self._state.command_history.get(command_id)
+ failed_command = prev_entry.command.copy(
+ update={
+ "completedAt": failed_at,
+ "status": CommandStatus.FAILED,
+ **({"error": error_occurrence} if error_occurrence is not None else {}),
+ # Assume we're not overwriting any existing notes because they can
+ # only be added when a command completes, and if we're failing this
+ # command, it wouldn't have completed before now.
+ **({"notes": notes} if notes is not None else {}),
+ }
+ )
+ self._state.command_history.set_command_failed(failed_command)
+ if error_recovery_type is not None:
+ self._state.command_error_recovery_types[command_id] = error_recovery_type
+
@staticmethod
def _map_run_exception_to_error_occurrence(
error_id: str, created_at: datetime, exception: Exception
@@ -425,10 +501,7 @@ def __init__(self, state: CommandState) -> None:
def get(self, command_id: str) -> Command:
"""Get a command by its unique identifier."""
- try:
- return self._state.commands_by_id[command_id].command
- except KeyError:
- raise CommandDoesNotExistError(f"Command {command_id} does not exist")
+ return self._state.command_history.get(command_id).command
def get_all(self) -> List[Command]:
"""Get a list of all commands in state.
@@ -437,10 +510,7 @@ def get_all(self) -> List[Command]:
Replacing a command (to change its status, for example) keeps its place in the
ordering.
"""
- return [
- self._state.commands_by_id[cid].command
- for cid in self._state.all_command_ids
- ]
+ return self._state.command_history.get_all_commands()
def get_slice(
self,
@@ -450,30 +520,38 @@ def get_slice(
"""Get a subset of commands around a given cursor.
If the cursor is omitted, a cursor will be selected automatically
- based on the currently running or most recently executed command."
+ based on the currently running or most recently executed command.
"""
- # TODO(mc, 2022-01-31): this is not the most performant way to implement
- # this; if this becomes a problem, change or the underlying data structure
- # to something that isn't just an OrderedDict
- all_command_ids = self._state.all_command_ids
- commands_by_id = self._state.commands_by_id
- running_command_id = self._state.running_command_id
- queued_command_ids = self._state.queued_command_ids
- total_length = len(all_command_ids)
+ running_command = self._state.command_history.get_running_command()
+ queued_command_ids = self._state.command_history.get_queue_ids()
+ total_length = self._state.command_history.length()
if cursor is None:
- if running_command_id is not None:
- cursor = commands_by_id[running_command_id].index
+ if running_command is not None:
+ cursor = running_command.index
elif len(queued_command_ids) > 0:
- cursor = commands_by_id[queued_command_ids.head()].index - 1
+ # Get the most recently executed command,
+ # which we can find just before the first queued command.
+ cursor = (
+ self._state.command_history.get(queued_command_ids.head()).index - 1
+ )
+ elif (
+ self._state.run_result
+ and self._state.run_result == RunResult.FAILED
+ and self._state.failed_command
+ ):
+ # Currently, if the run fails, we mark all the commands we didn't
+ # reach as failed. This makes command status alone insufficient to
+ # find the most recent command that actually executed, so we need to
+ # store that separately.
+ cursor = self._state.failed_command.index
else:
cursor = total_length - length
# start is inclusive, stop is exclusive
actual_cursor = max(0, min(cursor, total_length - 1))
stop = min(total_length, actual_cursor + length)
- command_ids = all_command_ids[actual_cursor:stop]
- commands = [commands_by_id[cid].command for cid in command_ids]
+ commands = self._state.command_history.get_slice(start=actual_cursor, stop=stop)
return CommandSlice(
commands=commands,
@@ -507,32 +585,41 @@ def get_error(self) -> Optional[ErrorOccurrence]:
else:
return run_error or finish_error
+ def get_running_command_id(self) -> Optional[str]:
+ """Return the ID of the command that's currently running, if there is one."""
+ running_command = self._state.command_history.get_running_command()
+ if running_command is not None:
+ return running_command.command.id
+ else:
+ return None
+
+ def get_queue_ids(self) -> OrderedSet[str]:
+ """Get the IDs of all queued protocol commands, in FIFO order."""
+ return self._state.command_history.get_queue_ids()
+
def get_current(self) -> Optional[CurrentCommand]:
"""Return the "current" command, if any.
The "current" command is the command that is currently executing,
or the most recent command to have completed.
"""
- if self._state.running_command_id:
- entry = self._state.commands_by_id[self._state.running_command_id]
+ running_command = self._state.command_history.get_running_command()
+ if running_command:
return CurrentCommand(
- command_id=entry.command.id,
- command_key=entry.command.key,
- created_at=entry.command.createdAt,
- index=entry.index,
+ command_id=running_command.command.id,
+ command_key=running_command.command.key,
+ created_at=running_command.command.createdAt,
+ index=running_command.index,
)
- # TODO(mc, 2022-02-07): this is O(n) in the worst case for no good reason.
- # Resolve prior to JSONv6 support, where this will matter.
- for reverse_index, cid in enumerate(reversed(self._state.all_command_ids)):
- if self.get_command_is_final(cid):
- entry = self._state.commands_by_id[cid]
- return CurrentCommand(
- command_id=entry.command.id,
- command_key=entry.command.key,
- created_at=entry.command.createdAt,
- index=len(self._state.all_command_ids) - reverse_index - 1,
- )
+ final_command = self.get_final_command()
+ if final_command:
+ return CurrentCommand(
+ command_id=final_command.command.id,
+ command_key=final_command.command.key,
+ created_at=final_command.command.createdAt,
+ index=final_command.index,
+ )
return None
@@ -549,14 +636,23 @@ def get_next_to_execute(self) -> Optional[str]:
if self._state.run_result:
raise RunStoppedError("Engine was stopped")
+ # if queue is in recovery mode, return the next fixit command
+ next_fixit_cmd = self._state.command_history.get_fixit_queue_ids().head(None)
+ if next_fixit_cmd and self._state.queue_status == QueueStatus.AWAITING_RECOVERY:
+ return next_fixit_cmd
+
# if there is a setup command queued, prioritize it
- next_setup_cmd = self._state.queued_setup_command_ids.head(None)
- if self._state.queue_status != QueueStatus.PAUSED and next_setup_cmd:
+ next_setup_cmd = self._state.command_history.get_setup_queue_ids().head(None)
+ if (
+ self._state.queue_status
+ not in [QueueStatus.PAUSED, QueueStatus.AWAITING_RECOVERY]
+ and next_setup_cmd
+ ):
return next_setup_cmd
# if the queue is running, return the next protocol command
if self._state.queue_status == QueueStatus.RUNNING:
- return self._state.queued_command_ids.head(None)
+ return self._state.command_history.get_queue_ids().head(None)
# otherwise we've got nothing to do
return None
@@ -567,8 +663,8 @@ def get_is_okay_to_clear(self) -> bool:
return True
elif (
self.get_status() == EngineStatus.IDLE
- and self._state.running_command_id is None
- and len(self._state.queued_setup_command_ids) == 0
+ and self._state.command_history.get_running_command() is None
+ and len(self._state.command_history.get_setup_queue_ids()) == 0
):
return True
else:
@@ -586,6 +682,36 @@ def get_is_running(self) -> bool:
"""Get whether the protocol is running & queued commands should be executed."""
return self._state.queue_status == QueueStatus.RUNNING
+ def get_final_command(self) -> Optional[CommandEntry]:
+ """Get the most recent command that has reached its final `status`. See get_command_is_final."""
+ run_requested_to_stop = self._state.run_result is not None
+
+ if run_requested_to_stop:
+ tail_command = self._state.command_history.get_tail_command()
+ if not tail_command:
+ return None
+ if tail_command.command.status != CommandStatus.RUNNING:
+ return tail_command
+ else:
+ return self._state.command_history.get_prev(tail_command.command.id)
+ else:
+ final_command = self._state.command_history.get_terminal_command()
+ # This iteration is effectively O(1) as we'll only ever have to iterate one or two times at most.
+ while final_command is not None:
+ next_command = self._state.command_history.get_next(
+ final_command.command.id
+ )
+ if (
+ next_command is not None
+ and next_command.command.status != CommandStatus.QUEUED
+ and next_command.command.status != CommandStatus.RUNNING
+ ):
+ final_command = next_command
+ else:
+ break
+
+ return final_command
+
def get_command_is_final(self, command_id: str) -> bool:
"""Get whether a given command has reached its final `status`.
@@ -601,10 +727,12 @@ def get_command_is_final(self, command_id: str) -> bool:
"""
status = self.get(command_id).status
+ run_requested_to_stop = self._state.run_result is not None
+
return (
status == CommandStatus.SUCCEEDED
or status == CommandStatus.FAILED
- or (status == CommandStatus.QUEUED and self._state.run_result is not None)
+ or (status == CommandStatus.QUEUED and run_requested_to_stop)
)
def get_all_commands_final(self) -> bool:
@@ -616,23 +744,49 @@ def get_all_commands_final(self) -> bool:
CommandExecutionFailedError: if any added command failed, and its `intent` wasn't
`setup`.
"""
- no_command_running = self._state.running_command_id is None
+ no_command_running = self._state.command_history.get_running_command() is None
+ run_requested_to_stop = self._state.run_result is not None
no_command_to_execute = (
- self._state.run_result is not None
- or len(self._state.queued_command_ids) == 0
+ run_requested_to_stop
+ # TODO(mm, 2024-03-15): This ignores queued setup commands,
+ # which seems questionable?
+ or len(self._state.command_history.get_queue_ids()) == 0
)
- if no_command_running and no_command_to_execute:
- for command_id in self._state.all_command_ids:
- command = self._state.commands_by_id[command_id].command
- if command.error and command.intent != CommandIntent.SETUP:
- # TODO(tz, 7-11-23): avoid raising an error and return the status instead
- raise ProtocolCommandFailedError(
- original_error=command.error, message=command.error.detail
- )
- return True
- else:
- return False
+ return no_command_running and no_command_to_execute
+
+ def get_recovery_in_progress_for_command(self, command_id: str) -> bool:
+ """Return whether the given command failed and its error recovery is in progress."""
+ return self._state.recovery_target_command_id == command_id
+
+ def raise_fatal_command_error(self) -> None:
+ """Raise the run's fatal command error, if there was one, as an exception.
+
+ The "fatal command error" is the error from any non-setup command.
+ It's intended to be used as the fatal error of the overall run
+ (see `ProtocolEngine.finish()`) for JSON and live HTTP protocols.
+
+ This isn't useful for Python protocols, which have to account for the
+ fatal error of the overall coming from anywhere in the Python script,
+ including in between commands.
+ """
+ failed_command = self.state.failed_command
+ if (
+ failed_command
+ and failed_command.command.error
+ and failed_command.command.intent != CommandIntent.SETUP
+ ):
+ raise ProtocolCommandFailedError(
+ original_error=failed_command.command.error,
+ message=failed_command.command.error.detail,
+ )
+
+ def get_error_recovery_type(self, command_id: str) -> ErrorRecoveryType:
+ """Return the error recovery type with which the given command failed.
+
+ The command ID is assumed to point to a failed command.
+ """
+ return self.state.command_error_recovery_types[command_id]
def get_is_stopped(self) -> bool:
"""Get whether an engine stop has completed."""
@@ -646,10 +800,22 @@ def get_is_terminal(self) -> bool:
"""Get whether engine is in a terminal state."""
return self._state.run_result is not None
- def validate_action_allowed(
+ def validate_action_allowed( # noqa: C901
self,
- action: Union[PlayAction, PauseAction, StopAction, QueueCommandAction],
- ) -> Union[PlayAction, PauseAction, StopAction, QueueCommandAction]:
+ action: Union[
+ PlayAction,
+ PauseAction,
+ StopAction,
+ ResumeFromRecoveryAction,
+ QueueCommandAction,
+ ],
+ ) -> Union[
+ PlayAction,
+ PauseAction,
+ StopAction,
+ ResumeFromRecoveryAction,
+ QueueCommandAction,
+ ]:
"""Validate whether a given control action is allowed.
Returns:
@@ -668,21 +834,57 @@ def validate_action_allowed(
elif isinstance(action, PlayAction):
if self.get_status() == EngineStatus.BLOCKED_BY_OPEN_DOOR:
raise RobotDoorOpenError("Front door or top window is currently open.")
+ elif self.get_status() == EngineStatus.AWAITING_RECOVERY:
+ raise NotImplementedError()
+ else:
+ return action
elif isinstance(action, PauseAction):
if not self.get_is_running():
raise PauseNotAllowedError("Cannot pause a run that is not running.")
+ elif self.get_status() == EngineStatus.AWAITING_RECOVERY:
+ raise NotImplementedError()
+ else:
+ return action
- elif (
- isinstance(action, QueueCommandAction)
- and action.request.intent == CommandIntent.SETUP
- ):
- if self._state.queue_status != QueueStatus.SETUP:
+ elif isinstance(action, QueueCommandAction):
+ if (
+ action.request.intent == CommandIntent.SETUP
+ and self._state.queue_status != QueueStatus.SETUP
+ ):
raise SetupCommandNotAllowedError(
"Setup commands are not allowed after run has started."
)
+ elif action.request.intent == CommandIntent.FIXIT:
+ if self._state.queue_status != QueueStatus.AWAITING_RECOVERY:
+ raise FixitCommandNotAllowedError(
+ "Fixit commands are not allowed when the run is not in a recoverable state."
+ )
+ else:
+ return action
+ else:
+ return action
- return action
+ elif isinstance(action, ResumeFromRecoveryAction):
+ if self.get_status() != EngineStatus.AWAITING_RECOVERY:
+ raise ResumeFromRecoveryNotAllowedError(
+ "Cannot resume from recovery if the run is not in recovery mode."
+ )
+ elif (
+ self.get_status() == EngineStatus.AWAITING_RECOVERY
+ and len(self._state.command_history.get_fixit_queue_ids()) > 0
+ ):
+ raise ResumeFromRecoveryNotAllowedError(
+ "Cannot resume from recovery while there are fixit commands in the queue."
+ )
+ else:
+ return action
+
+ elif isinstance(action, StopAction):
+ return action
+
+ else:
+ assert_never(action)
def get_status(self) -> EngineStatus:
"""Get the current execution status of the engine."""
@@ -719,8 +921,13 @@ def get_status(self) -> EngineStatus:
else:
return EngineStatus.PAUSED
+ elif self._state.queue_status == QueueStatus.AWAITING_RECOVERY:
+ return EngineStatus.AWAITING_RECOVERY
+
+ # todo(mm, 2024-03-19): Does this intentionally return idle if QueueStatus is
+ # SETUP and we're currently a setup command?
return EngineStatus.IDLE
- def get_latest_command_hash(self) -> Optional[str]:
+ def get_latest_protocol_command_hash(self) -> Optional[str]:
"""Get the command hash of the last queued command, if any."""
- return self._state.latest_command_hash
+ return self._state.latest_protocol_command_hash
diff --git a/api/src/opentrons/protocol_engine/state/config.py b/api/src/opentrons/protocol_engine/state/config.py
index f1ba812bb8f..c5ba5fb07db 100644
--- a/api/src/opentrons/protocol_engine/state/config.py
+++ b/api/src/opentrons/protocol_engine/state/config.py
@@ -17,10 +17,14 @@ class Config:
or pretending to control.
ignore_pause: The engine should no-op instead of waiting
for pauses and delays to complete.
+ use_virtual_pipettes: The engine should no-op instead of calling
+ instruments' hardware control API
use_virtual_modules: The engine should no-op instead of calling
modules' hardware control API.
use_virtual_gripper: The engine should no-op instead of calling
gripper hardware control API.
+ use_simulated_deck_config: The engine should lazily populate the deck
+ configuration instead of loading a provided configuration
block_on_door_open: Protocol execution should pause if the
front door is opened.
"""
@@ -31,4 +35,5 @@ class Config:
use_virtual_pipettes: bool = False
use_virtual_modules: bool = False
use_virtual_gripper: bool = False
+ use_simulated_deck_config: bool = False
block_on_door_open: bool = False
diff --git a/api/src/opentrons/protocol_engine/state/geometry.py b/api/src/opentrons/protocol_engine/state/geometry.py
index 7c26be23098..112d7d60ef4 100644
--- a/api/src/opentrons/protocol_engine/state/geometry.py
+++ b/api/src/opentrons/protocol_engine/state/geometry.py
@@ -1,12 +1,23 @@
"""Geometry state getters."""
import enum
-from numpy import array, dot
-from typing import Optional, List, Set, Tuple, Union, cast
+from numpy import array, dot, double as npdouble
+from numpy.typing import NDArray
+from typing import Optional, List, Tuple, Union, cast, TypeVar, Dict
+
+from opentrons.types import Point, DeckSlotName, StagingSlotName, MountType
-from opentrons.types import Point, DeckSlotName, MountType
from opentrons_shared_data.labware.constants import WELL_NAME_PATTERN
+from opentrons_shared_data.deck.dev_types import CutoutFixture
+from opentrons_shared_data.pipette import PIPETTE_X_SPAN
+from opentrons_shared_data.pipette.dev_types import ChannelCount
from .. import errors
+from ..errors import (
+ LabwareNotLoadedOnLabwareError,
+ LabwareNotLoadedOnModuleError,
+ LabwareMovementNotAllowedError,
+)
+from ..resources import fixture_validation
from ..types import (
OFF_DECK_LOCATION,
LoadedLabware,
@@ -23,22 +34,27 @@
LabwareOffsetVector,
ModuleOffsetVector,
ModuleOffsetData,
- DeckType,
CurrentWell,
+ CurrentPipetteLocation,
TipGeometry,
LabwareMovementOffsetData,
OnDeckLabwareLocation,
+ AddressableAreaLocation,
+ AddressableOffsetVector,
+ StagingSlotLocation,
+ LabwareOffsetLocation,
)
from .config import Config
from .labware import LabwareView
from .modules import ModuleView
from .pipettes import PipetteView
-
-from opentrons_shared_data.pipette import PIPETTE_X_SPAN
-from opentrons_shared_data.pipette.dev_types import ChannelCount
+from .addressable_areas import AddressableAreaView
SLOT_WIDTH = 128
+_PIPETTE_HOMED_POSITION_Z = (
+ 248.0 # Height of the bottom of the nozzle without the tip attached when homed
+)
class _TipDropSection(enum.Enum):
@@ -55,6 +71,9 @@ class _GripperMoveType(enum.Enum):
DROP_LABWARE = enum.auto()
+_LabwareLocation = TypeVar("_LabwareLocation", bound=LabwareLocation)
+
+
# TODO(mc, 2021-06-03): continue evaluation of which selectors should go here
# vs which selectors should be in LabwareView
class GeometryView:
@@ -66,13 +85,15 @@ def __init__(
labware_view: LabwareView,
module_view: ModuleView,
pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
) -> None:
"""Initialize a GeometryView instance."""
self._config = config
self._labware = labware_view
self._modules = module_view
self._pipettes = pipette_view
- self._last_drop_tip_location_spot: Optional[_TipDropSection] = None
+ self._addressable_areas = addressable_area_view
+ self._last_drop_tip_location_spot: Dict[str, _TipDropSection] = {}
def get_labware_highest_z(self, labware_id: str) -> float:
"""Get the highest Z-point of a labware."""
@@ -80,9 +101,8 @@ def get_labware_highest_z(self, labware_id: str) -> float:
return self._get_highest_z_from_labware_data(labware_data)
- # TODO(mc, 2022-06-24): rename this method
- def get_all_labware_highest_z(self) -> float:
- """Get the highest Z-point across all labware."""
+ def get_all_obstacle_highest_z(self) -> float:
+ """Get the highest Z-point across all obstacles that the instruments need to fly over."""
highest_labware_z = max(
(
self._get_highest_z_from_labware_data(lw_data)
@@ -92,6 +112,8 @@ def get_all_labware_highest_z(self) -> float:
default=0.0,
)
+ # Fixme (spp, 2023-12-04): the overall height is not the true highest z of modules
+ # on a Flex.
highest_module_z = max(
(
self._modules.get_overall_height(module.id)
@@ -100,32 +122,103 @@ def get_all_labware_highest_z(self) -> float:
default=0.0,
)
- return max(highest_labware_z, highest_module_z)
+ cutout_fixture_names = self._addressable_areas.get_all_cutout_fixtures()
+ if cutout_fixture_names is None:
+ # We're using a simulated deck config (see `Config.use_simulated_deck_config`).
+ # We only know the addressable areas referenced by the protocol, not the fixtures
+ # providing them. And there is more than one possible configuration of fixtures
+ # to provide them. So, we can't know what the highest fixture is. Default to 0.
+ #
+ # Defaulting to 0 may not be the right thing to do here.
+ # For example, suppose a protocol references an addressable area that implies a tall
+ # fixture must be on the deck, and then it uses long tips that wouldn't be able to
+ # clear the top of that fixture. We should perhaps raise an analysis error for that,
+ # but defaulting to 0 here means we won't.
+ highest_fixture_z = 0.0
+ else:
+ highest_fixture_z = max(
+ (
+ self._addressable_areas.get_fixture_height(cutout_fixture_name)
+ for cutout_fixture_name in cutout_fixture_names
+ ),
+ default=0.0,
+ )
+
+ return max(
+ highest_labware_z,
+ highest_module_z,
+ highest_fixture_z,
+ )
+
+ def get_highest_z_in_slot(
+ self, slot: Union[DeckSlotLocation, StagingSlotLocation]
+ ) -> float:
+ """Get the highest Z-point of all items stacked in the given deck slot.
+
+ This height includes the height of any module that occupies the given slot
+ even if it wasn't loaded in that slot (e.g., thermocycler).
+ """
+ slot_item = self.get_slot_item(slot.slotName)
+ if isinstance(slot_item, LoadedModule):
+ # get height of module + all labware on it
+ module_id = slot_item.id
+ try:
+ labware_id = self._labware.get_id_by_module(module_id=module_id)
+ except LabwareNotLoadedOnModuleError:
+ return self._modules.get_module_highest_z(
+ module_id=module_id,
+ addressable_areas=self._addressable_areas,
+ )
+ else:
+ return self.get_highest_z_of_labware_stack(labware_id)
+ elif isinstance(slot_item, LoadedLabware):
+ # get stacked heights of all labware in the slot
+ return self.get_highest_z_of_labware_stack(slot_item.id)
+ elif type(slot_item) is dict:
+ # TODO (cb, 2024-02-05): Eventually this logic should become the responsibility of bounding box
+ # conflict checking, as fixtures may not always be considered as items from slots.
+ return self._addressable_areas.get_fixture_height(slot_item["id"])
+ else:
+ return 0
+
+ def get_highest_z_of_labware_stack(self, labware_id: str) -> float:
+ """Get the highest Z-point of the topmost labware in the stack of labware on the given labware.
+
+ If there is no labware on the given labware, returns highest z of the given labware.
+ """
+ try:
+ stacked_labware_id = self._labware.get_id_by_labware(labware_id)
+ except LabwareNotLoadedOnLabwareError:
+ return self.get_labware_highest_z(labware_id)
+ return self.get_highest_z_of_labware_stack(stacked_labware_id)
def get_min_travel_z(
self,
pipette_id: str,
labware_id: str,
- location: Optional[CurrentWell],
+ location: Optional[CurrentPipetteLocation],
minimum_z_height: Optional[float],
) -> float:
"""Get the minimum allowed travel height of an arc move."""
if (
- location is not None
+ isinstance(location, CurrentWell)
and pipette_id == location.pipette_id
and labware_id == location.labware_id
):
min_travel_z = self.get_labware_highest_z(labware_id)
else:
- min_travel_z = self.get_all_labware_highest_z()
+ min_travel_z = self.get_all_obstacle_highest_z()
if minimum_z_height:
min_travel_z = max(min_travel_z, minimum_z_height)
return min_travel_z
def get_labware_parent_nominal_position(self, labware_id: str) -> Point:
"""Get the position of the labware's uncalibrated parent slot (deck, module, or another labware)."""
- slot_name = self.get_ancestor_slot_name(labware_id)
- slot_pos = self._labware.get_slot_position(slot_name)
+ try:
+ slot_name = self.get_ancestor_slot_name(labware_id).id
+ except errors.LocationIsStagingSlotError:
+ slot_name = self._get_staging_slot_name(labware_id)
+ slot_pos = self._addressable_areas.get_addressable_area_position(slot_name)
labware_data = self._labware.get(labware_id)
offset = self._get_labware_position_offset(labware_id, labware_data.location)
@@ -151,13 +244,12 @@ def _get_labware_position_offset(
on modules as well as stacking overlaps.
Does not include module calibration offset or LPC offset.
"""
- if isinstance(labware_location, DeckSlotLocation):
+ if isinstance(labware_location, (AddressableAreaLocation, DeckSlotLocation)):
return LabwareOffsetVector(x=0, y=0, z=0)
elif isinstance(labware_location, ModuleLocation):
module_id = labware_location.moduleId
- deck_type = DeckType(self._labware.get_deck_definition()["otId"])
module_offset = self._modules.get_nominal_module_offset(
- module_id=module_id, deck_type=deck_type
+ module_id=module_id, addressable_areas=self._addressable_areas
)
module_model = self._modules.get_connected_model(module_id)
stacking_overlap = self._labware.get_module_overlap_offsets(
@@ -208,9 +300,11 @@ def _normalize_module_calibration_offset(
# Check if the module has moved from one side of the deck to the other
if calibrated_slot_column != current_slot_column:
# Since the module was rotated, the calibration offset vector needs to be rotated by 180 degrees along the z axis
- saved_offset = array([offset.x, offset.y, offset.z])
- rotation_matrix = array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
- new_offset = dot(saved_offset, rotation_matrix) # type: ignore[no-untyped-call]
+ saved_offset: NDArray[npdouble] = array([offset.x, offset.y, offset.z])
+ rotation_matrix: NDArray[npdouble] = array(
+ [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
+ )
+ new_offset = dot(saved_offset, rotation_matrix)
offset = ModuleOffsetVector(
x=new_offset[0], y=new_offset[1], z=new_offset[2]
)
@@ -227,7 +321,9 @@ def _get_calibrated_module_offset(
return self._normalize_module_calibration_offset(
module_location, offset_data
)
- elif isinstance(location, DeckSlotLocation):
+ elif isinstance(location, (DeckSlotLocation, AddressableAreaLocation)):
+ # TODO we might want to do a check here to make sure addressable area location is a standard deck slot
+ # and raise if its not (or maybe we don't actually care since modules will never be loaded elsewhere)
return ModuleOffsetVector(x=0, y=0, z=0)
elif isinstance(location, OnLabwareLocation):
labware_data = self._labware.get(location.labwareId)
@@ -339,6 +435,13 @@ def _get_highest_z_from_labware_data(self, lw_data: LoadedLabware) -> float:
z_dim = definition.dimensions.zDimension
height_over_labware: float = 0
if isinstance(lw_data.location, ModuleLocation):
+ # Note: when calculating highest z of stacked labware, height-over-labware
+ # gets accounted for only if the top labware is directly on the module.
+ # So if there's a labware on an adapter on a module, then this
+ # over-module-height gets ignored. We currently do not have any modules
+ # that use an adapter and has height over labware so this doesn't cause
+ # any issues yet. But if we add one in the future then this calculation
+ # should be updated.
module_id = lw_data.location.moduleId
height_over_labware = self._modules.get_height_over_labware(module_id)
return labware_pos.z + z_dim + height_over_labware
@@ -401,12 +504,20 @@ def get_checked_tip_drop_location(
pipette_id: str,
labware_id: str,
well_location: DropTipWellLocation,
+ partially_configured: bool = False,
) -> WellLocation:
"""Get tip drop location given labware and hardware pipette.
This makes sure that the well location has an appropriate origin & offset
if one is not already set previously.
"""
+ if (
+ self._labware.get_definition(labware_id).parameters.isTiprack
+ and partially_configured
+ ):
+ raise errors.UnexpectedProtocolError(
+ "Cannot return tip to a tiprack while the pipette is configured for partial tip."
+ )
if well_location.origin != DropTipWellOrigin.DEFAULT:
return WellLocation(
origin=WellOrigin(well_location.origin.value),
@@ -432,6 +543,22 @@ def get_checked_tip_drop_location(
),
)
+ # TODO(jbl 11-30-2023) fold this function into get_ancestor_slot_name see RSS-411
+ def _get_staging_slot_name(self, labware_id: str) -> str:
+ """Get the staging slot name that the labware is on."""
+ labware_location = self._labware.get(labware_id).location
+ if isinstance(labware_location, OnLabwareLocation):
+ below_labware_id = labware_location.labwareId
+ return self._get_staging_slot_name(below_labware_id)
+ elif isinstance(
+ labware_location, AddressableAreaLocation
+ ) and fixture_validation.is_staging_slot(labware_location.addressableAreaName):
+ return labware_location.addressableAreaName
+ else:
+ raise ValueError(
+ "Cannot get staging slot name for labware not on staging slot."
+ )
+
def get_ancestor_slot_name(self, labware_id: str) -> DeckSlotName:
"""Get the slot name of the labware or the module that the labware is on."""
labware = self._labware.get(labware_id)
@@ -445,6 +572,15 @@ def get_ancestor_slot_name(self, labware_id: str) -> DeckSlotName:
elif isinstance(labware.location, OnLabwareLocation):
below_labware_id = labware.location.labwareId
slot_name = self.get_ancestor_slot_name(below_labware_id)
+ elif isinstance(labware.location, AddressableAreaLocation):
+ area_name = labware.location.addressableAreaName
+ # TODO we might want to eventually return some sort of staging slot name when we're ready to work through
+ # the linting nightmare it will create
+ if fixture_validation.is_staging_slot(area_name):
+ raise errors.LocationIsStagingSlotError(
+ "Cannot get ancestor slot name for labware on staging slot."
+ )
+ slot_name = DeckSlotName.from_primitive(area_name)
elif labware.location == OFF_DECK_LOCATION:
raise errors.LabwareNotOnDeckError(
f"Labware {labware_id} does not have a slot associated with it"
@@ -454,18 +590,33 @@ def get_ancestor_slot_name(self, labware_id: str) -> DeckSlotName:
return slot_name
def ensure_location_not_occupied(
- self, location: LabwareLocation
- ) -> LabwareLocation:
- """Ensure that the location does not already have equipment in it."""
- if isinstance(location, (DeckSlotLocation, ModuleLocation)):
+ self, location: _LabwareLocation
+ ) -> _LabwareLocation:
+ """Ensure that the location does not already have either Labware or a Module in it."""
+ # TODO (spp, 2023-11-27): Slot locations can also be addressable areas
+ # so we will need to cross-check against items loaded in both location types.
+ # Something like 'check if an item is in lists of both- labware on addressable areas
+ # as well as labware on slots'. Same for modules.
+ if isinstance(
+ location,
+ (
+ DeckSlotLocation,
+ ModuleLocation,
+ OnLabwareLocation,
+ AddressableAreaLocation,
+ ),
+ ):
self._labware.raise_if_labware_in_location(location)
+ if isinstance(location, DeckSlotLocation):
self._modules.raise_if_module_in_location(location)
return location
def get_labware_grip_point(
self,
labware_id: str,
- location: Union[DeckSlotLocation, ModuleLocation, OnLabwareLocation],
+ location: Union[
+ DeckSlotLocation, ModuleLocation, OnLabwareLocation, AddressableAreaLocation
+ ],
) -> Point:
"""Get the grip point of the labware as placed on the given location.
@@ -480,16 +631,30 @@ def get_labware_grip_point(
grip_height_from_labware_bottom = (
self._labware.get_grip_height_from_labware_bottom(labware_id)
)
- location_slot: DeckSlotName
+ location_name: str
if isinstance(location, DeckSlotLocation):
- location_slot = location.slotName
+ location_name = location.slotName.id
offset = LabwareOffsetVector(x=0, y=0, z=0)
+ elif isinstance(location, AddressableAreaLocation):
+ location_name = location.addressableAreaName
+ if fixture_validation.is_gripper_waste_chute(location_name):
+ drop_labware_location = (
+ self._addressable_areas.get_addressable_area_move_to_location(
+ location_name
+ )
+ )
+ return drop_labware_location + Point(z=grip_height_from_labware_bottom)
+ # Location should have been pre-validated so this will be a deck/staging area slot
+ else:
+ offset = LabwareOffsetVector(x=0, y=0, z=0)
else:
if isinstance(location, ModuleLocation):
- location_slot = self._modules.get_location(location.moduleId).slotName
+ location_name = self._modules.get_location(
+ location.moduleId
+ ).slotName.id
else: # OnLabwareLocation
- location_slot = self.get_ancestor_slot_name(location.labwareId)
+ location_name = self.get_ancestor_slot_name(location.labwareId).id
labware_offset = self._get_labware_position_offset(labware_id, location)
# Get the calibrated offset if the on labware location is on top of a module, otherwise return empty one
cal_offset = self._get_calibrated_module_offset(location)
@@ -499,50 +664,73 @@ def get_labware_grip_point(
z=labware_offset.z + cal_offset.z,
)
- slot_center = self._labware.get_slot_center_position(location_slot)
+ location_center = self._addressable_areas.get_addressable_area_center(
+ location_name
+ )
return Point(
- slot_center.x + offset.x,
- slot_center.y + offset.y,
- slot_center.z + offset.z + grip_height_from_labware_bottom,
+ location_center.x + offset.x,
+ location_center.y + offset.y,
+ location_center.z + offset.z + grip_height_from_labware_bottom,
)
def get_extra_waypoints(
- self, labware_id: str, location: Optional[CurrentWell]
+ self, location: Optional[CurrentPipetteLocation], to_slot: DeckSlotName
) -> List[Tuple[float, float]]:
"""Get extra waypoints for movement if thermocycler needs to be dodged."""
- if location is not None and self._modules.should_dodge_thermocycler(
- from_slot=self.get_ancestor_slot_name(location.labware_id),
- to_slot=self.get_ancestor_slot_name(labware_id),
- ):
- middle_slot = DeckSlotName.SLOT_5.to_equivalent_for_robot_type(
- self._config.robot_type
- )
- middle_slot_center = self._labware.get_slot_center_position(
- slot=middle_slot,
- )
- return [(middle_slot_center.x, middle_slot_center.y)]
+ if location is not None:
+ if isinstance(location, CurrentWell):
+ from_slot = self.get_ancestor_slot_name(location.labware_id)
+ else:
+ from_slot = self._addressable_areas.get_addressable_area_base_slot(
+ location.addressable_area_name
+ )
+ if self._modules.should_dodge_thermocycler(
+ from_slot=from_slot, to_slot=to_slot
+ ):
+ middle_slot = DeckSlotName.SLOT_5.to_equivalent_for_robot_type(
+ self._config.robot_type
+ )
+ middle_slot_center = (
+ self._addressable_areas.get_addressable_area_center(
+ addressable_area_name=middle_slot.id,
+ )
+ )
+ return [(middle_slot_center.x, middle_slot_center.y)]
return []
- # TODO(mc, 2022-12-09): enforce data integrity (e.g. one module per slot)
- # rather than shunting this work to callers via `allowed_ids`.
- # This has larger implications and is tied up in splitting LPC out of the protocol run
def get_slot_item(
- self,
- slot_name: DeckSlotName,
- allowed_labware_ids: Set[str],
- allowed_module_ids: Set[str],
- ) -> Union[LoadedLabware, LoadedModule, None]:
- """Get the item present in a deck slot, if any."""
+ self, slot_name: Union[DeckSlotName, StagingSlotName]
+ ) -> Union[LoadedLabware, LoadedModule, CutoutFixture, None]:
+ """Get the top-most item present in a deck slot, if any.
+
+ This includes any module that occupies the given slot even if it wasn't loaded
+ in that slot (e.g., thermocycler).
+ """
maybe_labware = self._labware.get_by_slot(
slot_name=slot_name,
- allowed_ids=allowed_labware_ids,
- )
- maybe_module = self._modules.get_by_slot(
- slot_name=slot_name,
- allowed_ids=allowed_module_ids,
)
- return maybe_labware or maybe_module or None
+ if isinstance(slot_name, DeckSlotName):
+ maybe_fixture = self._addressable_areas.get_fixture_by_deck_slot_name(
+ slot_name
+ )
+ # Ignore generic single slot fixtures
+ if maybe_fixture and maybe_fixture["id"] in {
+ "singleLeftSlot",
+ "singleCenterSlot",
+ "singleRightSlot",
+ }:
+ maybe_fixture = None
+
+ maybe_module = self._modules.get_by_slot(
+ slot_name=slot_name,
+ ) or self._modules.get_overflowed_module_in_slot(slot_name=slot_name)
+ else:
+ # Modules and fixtures can't be loaded on staging slots
+ maybe_fixture = None
+ maybe_module = None
+
+ return maybe_labware or maybe_module or maybe_fixture or None
@staticmethod
def get_slot_column(slot_name: DeckSlotName) -> int:
@@ -603,7 +791,7 @@ def get_next_tip_drop_location(
slot_name=self.get_ancestor_slot_name(labware_id)
)
- if self._last_drop_tip_location_spot == _TipDropSection.RIGHT:
+ if self._last_drop_tip_location_spot.get(labware_id) == _TipDropSection.RIGHT:
# Drop tip in LEFT section
x_offset = self._get_drop_tip_well_x_offset(
tip_drop_section=_TipDropSection.LEFT,
@@ -612,7 +800,7 @@ def get_next_tip_drop_location(
pipette_mount=pipette_mount,
labware_slot_column=labware_slot_column,
)
- self._last_drop_tip_location_spot = _TipDropSection.LEFT
+ self._last_drop_tip_location_spot[labware_id] = _TipDropSection.LEFT
else:
# Drop tip in RIGHT section
x_offset = self._get_drop_tip_well_x_offset(
@@ -622,7 +810,7 @@ def get_next_tip_drop_location(
pipette_mount=pipette_mount,
labware_slot_column=labware_slot_column,
)
- self._last_drop_tip_location_spot = _TipDropSection.RIGHT
+ self._last_drop_tip_location_spot[labware_id] = _TipDropSection.RIGHT
return DropTipWellLocation(
origin=DropTipWellOrigin.TOP,
@@ -633,6 +821,59 @@ def get_next_tip_drop_location(
),
)
+ # TODO find way to combine this with above
+ def get_next_tip_drop_location_for_addressable_area(
+ self,
+ addressable_area_name: str,
+ pipette_id: str,
+ ) -> AddressableOffsetVector:
+ """Get the next location within the specified well to drop the tip into.
+
+ See the doc-string for `get_next_tip_drop_location` for more info on execution.
+ """
+ area_x_dim = self._addressable_areas.get_addressable_area(
+ addressable_area_name
+ ).bounding_box.x
+
+ pipette_channels = self._pipettes.get_config(pipette_id).channels
+ pipette_mount = self._pipettes.get_mount(pipette_id)
+
+ labware_slot_column = self.get_slot_column(
+ slot_name=self._addressable_areas.get_addressable_area_base_slot(
+ addressable_area_name
+ )
+ )
+
+ if (
+ self._last_drop_tip_location_spot.get(addressable_area_name)
+ == _TipDropSection.RIGHT
+ ):
+ # Drop tip in LEFT section
+ x_offset = self._get_drop_tip_well_x_offset(
+ tip_drop_section=_TipDropSection.LEFT,
+ well_x_dim=area_x_dim,
+ pipette_channels=pipette_channels,
+ pipette_mount=pipette_mount,
+ labware_slot_column=labware_slot_column,
+ )
+ self._last_drop_tip_location_spot[
+ addressable_area_name
+ ] = _TipDropSection.LEFT
+ else:
+ # Drop tip in RIGHT section
+ x_offset = self._get_drop_tip_well_x_offset(
+ tip_drop_section=_TipDropSection.RIGHT,
+ well_x_dim=area_x_dim,
+ pipette_channels=pipette_channels,
+ pipette_mount=pipette_mount,
+ labware_slot_column=labware_slot_column,
+ )
+ self._last_drop_tip_location_spot[
+ addressable_area_name
+ ] = _TipDropSection.RIGHT
+
+ return AddressableOffsetVector(x=x_offset, y=0, z=0)
+
@staticmethod
def _get_drop_tip_well_x_offset(
tip_drop_section: _TipDropSection,
@@ -654,7 +895,7 @@ def _get_drop_tip_well_x_offset(
):
# Pipette might not reach the default left spot so use a different left spot
x_well_offset = (
- well_x_dim / 2 - SLOT_WIDTH + drop_location_margin_from_labware_edge
+ -well_x_dim / 2 + drop_location_margin_from_labware_edge * 2
)
else:
x_well_offset = -well_x_dim / 2 + drop_location_margin_from_labware_edge
@@ -706,10 +947,18 @@ def get_final_labware_movement_offset_vectors(
@staticmethod
def ensure_valid_gripper_location(
location: LabwareLocation,
- ) -> Union[DeckSlotLocation, ModuleLocation, OnLabwareLocation]:
+ ) -> Union[
+ DeckSlotLocation, ModuleLocation, OnLabwareLocation, AddressableAreaLocation
+ ]:
"""Ensure valid on-deck location for gripper, otherwise raise error."""
if not isinstance(
- location, (DeckSlotLocation, ModuleLocation, OnLabwareLocation)
+ location,
+ (
+ DeckSlotLocation,
+ ModuleLocation,
+ OnLabwareLocation,
+ AddressableAreaLocation,
+ ),
):
raise errors.LabwareMovementNotAllowedError(
"Off-deck labware movements are not supported using the gripper."
@@ -721,7 +970,9 @@ def get_total_nominal_gripper_offset_for_move_type(
) -> LabwareOffsetVector:
"""Get the total of the offsets to be used to pick up labware in its current location."""
if move_type == _GripperMoveType.PICK_UP_LABWARE:
- if isinstance(location, (ModuleLocation, DeckSlotLocation)):
+ if isinstance(
+ location, (ModuleLocation, DeckSlotLocation, AddressableAreaLocation)
+ ):
return self._nominal_gripper_offsets_for_location(location).pickUpOffset
else:
# If it's a labware on a labware (most likely an adapter),
@@ -741,7 +992,9 @@ def get_total_nominal_gripper_offset_for_move_type(
).pickUpOffset
)
else:
- if isinstance(location, (ModuleLocation, DeckSlotLocation)):
+ if isinstance(
+ location, (ModuleLocation, DeckSlotLocation, AddressableAreaLocation)
+ ):
return self._nominal_gripper_offsets_for_location(location).dropOffset
else:
# If it's a labware on a labware (most likely an adapter),
@@ -761,11 +1014,43 @@ def get_total_nominal_gripper_offset_for_move_type(
).dropOffset
)
+ def check_gripper_labware_tip_collision(
+ self,
+ gripper_homed_position_z: float,
+ labware_id: str,
+ current_location: OnDeckLabwareLocation,
+ ) -> None:
+ """Check for potential collision of tips against labware to be lifted."""
+ # TODO(cb, 2024-01-22): Remove the 1 and 8 channel special case once we are doing X axis validation
+ pipettes = self._pipettes.get_all()
+ for pipette in pipettes:
+ if self._pipettes.get_channels(pipette.id) in [1, 8]:
+ return
+
+ tip = self._pipettes.get_attached_tip(pipette.id)
+ if tip:
+ labware_top_z_when_gripped = gripper_homed_position_z + (
+ self.get_labware_highest_z(labware_id=labware_id)
+ - self.get_labware_grip_point(
+ labware_id=labware_id, location=current_location
+ ).z
+ )
+ # TODO(cb, 2024-01-18): Utilizing the nozzle map and labware X coordinates verify if collisions will occur on the X axis (analysis will use hard coded data to measure from the gripper critical point to the pipette mount)
+ if (
+ _PIPETTE_HOMED_POSITION_Z - tip.length
+ ) < labware_top_z_when_gripped:
+ raise LabwareMovementNotAllowedError(
+ f"Cannot move labware '{self._labware.get(labware_id).loadName}' when {int(tip.volume)} µL tips are attached."
+ )
+ return
+
def _nominal_gripper_offsets_for_location(
self, location: OnDeckLabwareLocation
) -> LabwareMovementOffsetData:
"""Provide the default gripper offset data for the given location type."""
- if isinstance(location, DeckSlotLocation):
+ if isinstance(location, (DeckSlotLocation, AddressableAreaLocation)):
+ # TODO we might need a separate type of gripper offset for addressable areas but that also might just
+ # be covered by the drop labware offset/location
offsets = self._labware.get_deck_default_gripper_offsets()
elif isinstance(location, ModuleLocation):
offsets = self._modules.get_default_gripper_offsets(location.moduleId)
@@ -806,3 +1091,48 @@ def _labware_gripper_offsets(
return slot_based_offset or self._labware.get_labware_gripper_offsets(
labware_id=labware_id, slot_name=None
)
+
+ def get_offset_location(self, labware_id: str) -> Optional[LabwareOffsetLocation]:
+ """Provide the LabwareOffsetLocation specifying the current position of the labware.
+
+ If the labware is in a location that cannot be specified by a LabwareOffsetLocation
+ (for instance, OFF_DECK) then return None.
+ """
+ parent_location = self._labware.get_location(labware_id)
+
+ if isinstance(parent_location, DeckSlotLocation):
+ return LabwareOffsetLocation(
+ slotName=parent_location.slotName, moduleModel=None, definitionUri=None
+ )
+ elif isinstance(parent_location, ModuleLocation):
+ module_model = self._modules.get_requested_model(parent_location.moduleId)
+ module_location = self._modules.get_location(parent_location.moduleId)
+ return LabwareOffsetLocation(
+ slotName=module_location.slotName,
+ moduleModel=module_model,
+ definitionUri=None,
+ )
+ elif isinstance(parent_location, OnLabwareLocation):
+ non_labware_parent_location = self._labware.get_parent_location(labware_id)
+
+ parent_uri = self._labware.get_definition_uri(parent_location.labwareId)
+ if isinstance(non_labware_parent_location, DeckSlotLocation):
+ return LabwareOffsetLocation(
+ slotName=non_labware_parent_location.slotName,
+ moduleModel=None,
+ definitionUri=parent_uri,
+ )
+ elif isinstance(non_labware_parent_location, ModuleLocation):
+ module_model = self._modules.get_requested_model(
+ non_labware_parent_location.moduleId
+ )
+ module_location = self._modules.get_location(
+ non_labware_parent_location.moduleId
+ )
+ return LabwareOffsetLocation(
+ slotName=module_location.slotName,
+ moduleModel=module_model,
+ definitionUri=parent_uri,
+ )
+
+ return None
diff --git a/api/src/opentrons/protocol_engine/state/labware.py b/api/src/opentrons/protocol_engine/state/labware.py
index b40a00c7b65..e9750a652b4 100644
--- a/api/src/opentrons/protocol_engine/state/labware.py
+++ b/api/src/opentrons/protocol_engine/state/labware.py
@@ -9,33 +9,34 @@
Mapping,
Optional,
Sequence,
- Set,
- Union,
Tuple,
NamedTuple,
cast,
+ Union,
)
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4, SlotDefV3
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons_shared_data.gripper.constants import LABWARE_GRIP_FORCE
from opentrons_shared_data.labware.labware_definition import LabwareRole
from opentrons_shared_data.pipette.dev_types import LabwareUri
-from opentrons.types import DeckSlotName, Point, MountType
+from opentrons.types import DeckSlotName, StagingSlotName, MountType
from opentrons.protocols.api_support.constants import OPENTRONS_NAMESPACE
from opentrons.protocols.models import LabwareDefinition, WellDefinition
from opentrons.calibration_storage.helpers import uri_from_details
from .. import errors
-from ..resources import DeckFixedLabware, labware_validation
+from ..resources import DeckFixedLabware, labware_validation, fixture_validation
from ..commands import (
Command,
LoadLabwareResult,
MoveLabwareResult,
+ ReloadLabwareResult,
)
from ..types import (
DeckSlotLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
NonStackedLocation,
Dimensions,
LabwareOffset,
@@ -47,10 +48,12 @@
ModuleModel,
OverlapOffset,
LabwareMovementOffsetData,
+ OnDeckLabwareLocation,
+ OFF_DECK_LOCATION,
)
from ..actions import (
Action,
- UpdateCommandAction,
+ SucceedCommandAction,
AddLabwareOffsetAction,
AddLabwareDefinitionAction,
)
@@ -104,7 +107,7 @@ class LabwareState:
labware_offsets_by_id: Dict[str, LabwareOffset]
definitions_by_uri: Dict[str, LabwareDefinition]
- deck_definition: DeckDefinitionV4
+ deck_definition: DeckDefinitionV5
class LabwareStore(HasState[LabwareState], HandlesActions):
@@ -114,7 +117,7 @@ class LabwareStore(HasState[LabwareState], HandlesActions):
def __init__(
self,
- deck_definition: DeckDefinitionV4,
+ deck_definition: DeckDefinitionV5,
deck_fixed_labware: Sequence[DeckFixedLabware],
) -> None:
"""Initialize a labware store and its state."""
@@ -150,7 +153,7 @@ def __init__(
def handle_action(self, action: Action) -> None:
"""Modify state in reaction to an action."""
- if isinstance(action, UpdateCommandAction):
+ if isinstance(action, SucceedCommandAction):
self._handle_command(action.command)
elif isinstance(action, AddLabwareOffsetAction):
@@ -185,24 +188,40 @@ def _handle_command(self, command: Command) -> None:
)
self._state.definitions_by_uri[definition_uri] = command.result.definition
+ if isinstance(command.result, LoadLabwareResult):
+ location = command.params.location
+ else:
+ location = self._state.labware_by_id[command.result.labwareId].location
self._state.labware_by_id[
command.result.labwareId
] = LoadedLabware.construct(
id=command.result.labwareId,
- location=command.params.location,
+ location=location,
loadName=command.result.definition.parameters.loadName,
definitionUri=definition_uri,
offsetId=command.result.offsetId,
displayName=command.params.displayName,
)
+ elif isinstance(command.result, ReloadLabwareResult):
+ labware_id = command.params.labwareId
+ new_offset_id = command.result.offsetId
+ self._state.labware_by_id[labware_id].offsetId = new_offset_id
+
elif isinstance(command.result, MoveLabwareResult):
labware_id = command.params.labwareId
new_location = command.params.newLocation
new_offset_id = command.result.offsetId
self._state.labware_by_id[labware_id].offsetId = new_offset_id
+ if isinstance(
+ new_location, AddressableAreaLocation
+ ) and fixture_validation.is_gripper_waste_chute(
+ new_location.addressableAreaName
+ ):
+ # If a labware has been moved into a waste chute it's been chuted away and is now technically off deck
+ new_location = OFF_DECK_LOCATION
self._state.labware_by_id[labware_id].location = new_location
def _add_labware_offset(self, labware_offset: LabwareOffset) -> None:
@@ -275,20 +294,20 @@ def raise_if_labware_has_labware_on_top(self, labware_id: str) -> None:
f"Cannot move to labware {labware_id}, labware has other labware stacked on top."
)
- # TODO(mc, 2022-12-09): enforce data integrity (e.g. one labware per slot)
- # rather than shunting this work to callers via `allowed_ids`.
- # This has larger implications and is tied up in splitting LPC out of the protocol run
def get_by_slot(
- self, slot_name: DeckSlotName, allowed_ids: Set[str]
+ self,
+ slot_name: Union[DeckSlotName, StagingSlotName],
) -> Optional[LoadedLabware]:
"""Get the labware located in a given slot, if any."""
- loaded_labware = reversed(list(self._state.labware_by_id.values()))
+ loaded_labware = list(self._state.labware_by_id.values())
for labware in loaded_labware:
if (
isinstance(labware.location, DeckSlotLocation)
- and labware.location.slotName == slot_name
- and labware.id in allowed_ids
+ and labware.location.slotName.id == slot_name.id
+ ) or (
+ isinstance(labware.location, AddressableAreaLocation)
+ and labware.location.addressableAreaName == slot_name.id
):
return labware
@@ -300,89 +319,24 @@ def get_definition(self, labware_id: str) -> LabwareDefinition:
LabwareUri(self.get(labware_id).definitionUri)
)
- def get_display_name(self, labware_id: str) -> Optional[str]:
+ def get_user_specified_display_name(self, labware_id: str) -> Optional[str]:
"""Get the labware's user-specified display name, if set."""
return self.get(labware_id).displayName
- def get_deck_definition(self) -> DeckDefinitionV4:
- """Get the current deck definition."""
- return self._state.deck_definition
-
- def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:
- """Get the definition of a slot in the deck."""
- deck_def = self.get_deck_definition()
+ def get_display_name(self, labware_id: str) -> str:
+ """Get the labware's display name.
- # TODO(jbl 2023-10-19 this is all incredibly hacky and ultimately we should get rid of SlotDefV3, and maybe
- # move all this to another store/provider. However for now, this can be more or less equivalent and not break
- # things TM TM TM
-
- for cutout in deck_def["locations"]["cutouts"]:
- if cutout["id"].endswith(slot.id):
- base_position = cutout["position"]
- break
- else:
- raise errors.SlotDoesNotExistError(
- f"Slot ID {slot.id} does not exist in deck {deck_def['otId']}"
- )
-
- slot_def: SlotDefV3
- # Slot 12/fixed trash for ot2 is a little weird so if its that just return some hardcoded stuff
- if slot.id == "12":
- slot_def = {
- "id": "12",
- "position": base_position,
- "boundingBox": {
- "xDimension": 128.0,
- "yDimension": 86.0,
- "zDimension": 0,
- },
- "displayName": "Slot 12",
- "compatibleModuleTypes": [],
- }
- return slot_def
-
- for area in deck_def["locations"]["addressableAreas"]:
- if area["id"] == slot.id:
- offset = area["offsetFromCutoutFixture"]
- position = [
- offset[0] + base_position[0],
- offset[1] + base_position[1],
- offset[2] + base_position[2],
- ]
- slot_def = {
- "id": area["id"],
- "position": position,
- "boundingBox": area["boundingBox"],
- "displayName": area["displayName"],
- "compatibleModuleTypes": area["compatibleModuleTypes"],
- }
- if area.get("matingSurfaceUnitVector"):
- slot_def["matingSurfaceUnitVector"] = area[
- "matingSurfaceUnitVector"
- ]
- return slot_def
-
- raise errors.SlotDoesNotExistError(
- f"Slot ID {slot.id} does not exist in deck {deck_def['otId']}"
+ If a user-specified display name exists, will return that, else will return
+ display name from the definition.
+ """
+ return (
+ self.get_user_specified_display_name(labware_id)
+ or self.get_definition(labware_id).metadata.displayName
)
- def get_slot_position(self, slot: DeckSlotName) -> Point:
- """Get the position of a deck slot."""
- slot_def = self.get_slot_definition(slot)
- position = slot_def["position"]
-
- return Point(x=position[0], y=position[1], z=position[2])
-
- def get_slot_center_position(self, slot: DeckSlotName) -> Point:
- """Get the (x, y, z) position of the center of the slot."""
- slot_def = self.get_slot_definition(slot)
- position = slot_def["position"]
-
- return Point(
- x=position[0] + slot_def["boundingBox"]["xDimension"] / 2,
- y=position[1] + slot_def["boundingBox"]["yDimension"] / 2,
- z=position[2] + slot_def["boundingBox"]["zDimension"] / 2,
- )
+ def get_deck_definition(self) -> DeckDefinitionV5:
+ """Get the current deck definition."""
+ return self._state.deck_definition
def get_definition_by_uri(self, uri: LabwareUri) -> LabwareDefinition:
"""Get the labware definition matching loadName namespace and version."""
@@ -437,6 +391,28 @@ def get_quirks(self, labware_id: str) -> List[str]:
definition = self.get_definition(labware_id)
return definition.parameters.quirks or []
+ def get_should_center_column_on_target_well(self, labware_id: str) -> bool:
+ """True if a pipette moving to this labware should center its active column on the target.
+
+ This is true for labware that have wells spanning entire columns.
+ """
+ has_quirk = self.get_has_quirk(labware_id, "centerMultichannelOnWells")
+ return has_quirk and (
+ len(self.get_definition(labware_id).wells) > 1
+ and len(self.get_definition(labware_id).wells) < 96
+ )
+
+ def get_should_center_pipette_on_target_well(self, labware_id: str) -> bool:
+ """True if a pipette moving to a well of this labware should center its body on the target.
+
+ This is true for 1-well reservoirs no matter the pipette, and for large plates.
+ """
+ has_quirk = self.get_has_quirk(labware_id, "centerMultichannelOnWells")
+ return has_quirk and (
+ len(self.get_definition(labware_id).wells) == 1
+ or len(self.get_definition(labware_id).wells) >= 96
+ )
+
def get_well_definition(
self,
labware_id: str,
@@ -722,15 +698,34 @@ def get_fixed_trash_id(self) -> Optional[str]:
DeckSlotName.SLOT_A3,
}:
return labware.id
-
return None
def is_fixed_trash(self, labware_id: str) -> bool:
"""Check if labware is fixed trash."""
- return self.get_fixed_trash_id() == labware_id
+ return self.get_has_quirk(labware_id, "fixedTrash")
+
+ def raise_if_labware_inaccessible_by_pipette(self, labware_id: str) -> None:
+ """Raise an error if the specified location cannot be reached via a pipette."""
+ labware = self.get(labware_id)
+ labware_location = labware.location
+ if isinstance(labware_location, OnLabwareLocation):
+ return self.raise_if_labware_inaccessible_by_pipette(
+ labware_location.labwareId
+ )
+ elif isinstance(labware_location, AddressableAreaLocation):
+ if fixture_validation.is_staging_slot(labware_location.addressableAreaName):
+ raise errors.LocationNotAccessibleByPipetteError(
+ f"Cannot move pipette to {labware.loadName},"
+ f" labware is on staging slot {labware_location.addressableAreaName}"
+ )
+ elif labware_location == OFF_DECK_LOCATION:
+ raise errors.LocationNotAccessibleByPipetteError(
+ f"Cannot move pipette to {labware.loadName}, labware is off-deck."
+ )
def raise_if_labware_in_location(
- self, location: Union[DeckSlotLocation, ModuleLocation]
+ self,
+ location: OnDeckLabwareLocation,
) -> None:
"""Raise an error if the specified location has labware in it."""
for labware in self.get_all():
@@ -847,3 +842,87 @@ def get_grip_height_from_labware_bottom(self, labware_id: str) -> float:
if recommended_height is not None
else self.get_dimensions(labware_id).z / 2
)
+
+ @staticmethod
+ def _max_x_of_well(well_defn: WellDefinition) -> float:
+ if well_defn.shape == "rectangular":
+ return well_defn.x + (well_defn.xDimension or 0) / 2
+ elif well_defn.shape == "circular":
+ return well_defn.x + (well_defn.diameter or 0) / 2
+ else:
+ return well_defn.x
+
+ @staticmethod
+ def _min_x_of_well(well_defn: WellDefinition) -> float:
+ if well_defn.shape == "rectangular":
+ return well_defn.x - (well_defn.xDimension or 0) / 2
+ elif well_defn.shape == "circular":
+ return well_defn.x - (well_defn.diameter or 0) / 2
+ else:
+ return 0
+
+ @staticmethod
+ def _max_y_of_well(well_defn: WellDefinition) -> float:
+ if well_defn.shape == "rectangular":
+ return well_defn.y + (well_defn.yDimension or 0) / 2
+ elif well_defn.shape == "circular":
+ return well_defn.y + (well_defn.diameter or 0) / 2
+ else:
+ return 0
+
+ @staticmethod
+ def _min_y_of_well(well_defn: WellDefinition) -> float:
+ if well_defn.shape == "rectangular":
+ return well_defn.y - (well_defn.yDimension or 0) / 2
+ elif well_defn.shape == "circular":
+ return well_defn.y - (well_defn.diameter or 0) / 2
+ else:
+ return 0
+
+ @staticmethod
+ def _max_z_of_well(well_defn: WellDefinition) -> float:
+ return well_defn.z + well_defn.depth
+
+ def get_well_bbox(self, labware_id: str) -> Dimensions:
+ """Get the bounding box implied by the wells.
+
+ The bounding box of the labware that is implied by the wells is that required
+ to contain the bounds of the wells - the y-span from the min-y bound of the min-y
+ well to the max-y bound of the max-y well, x ditto, z from labware 0 to the max-z
+ well top.
+
+ This is used for the specific purpose of finding the reasonable uncertainty bounds of
+ where and how a gripper will interact with a labware.
+ """
+ defn = self.get_definition(labware_id)
+ max_x: Optional[float] = None
+ min_x: Optional[float] = None
+ max_y: Optional[float] = None
+ min_y: Optional[float] = None
+ max_z: Optional[float] = None
+
+ for well in defn.wells.values():
+ well_max_x = self._max_x_of_well(well)
+ well_min_x = self._min_x_of_well(well)
+ well_max_y = self._max_y_of_well(well)
+ well_min_y = self._min_y_of_well(well)
+ well_max_z = self._max_z_of_well(well)
+ if (max_x is None) or (well_max_x > max_x):
+ max_x = well_max_x
+ if (max_y is None) or (well_max_y > max_y):
+ max_y = well_max_y
+ if (min_x is None) or (well_min_x < min_x):
+ min_x = well_min_x
+ if (min_y is None) or (well_min_y < min_y):
+ min_y = well_min_y
+ if (max_z is None) or (well_max_z > max_z):
+ max_z = well_max_z
+ if (
+ max_x is None
+ or max_y is None
+ or min_x is None
+ or min_y is None
+ or max_z is None
+ ):
+ return Dimensions(0, 0, 0)
+ return Dimensions(max_x - min_x, max_y - min_y, max_z)
diff --git a/api/src/opentrons/protocol_engine/state/modules.py b/api/src/opentrons/protocol_engine/state/modules.py
index 6ac289a6b79..0e79dd53cf2 100644
--- a/api/src/opentrons/protocol_engine/state/modules.py
+++ b/api/src/opentrons/protocol_engine/state/modules.py
@@ -9,13 +9,13 @@
NamedTuple,
Optional,
Sequence,
- Set,
Type,
TypeVar,
Union,
overload,
)
-from numpy import array, dot
+from numpy import array, dot, double as npdouble
+from numpy.typing import NDArray
from opentrons.hardware_control.modules.magdeck import (
OFFSET_TO_LABWARE_BOTTOM as MAGNETIC_MODULE_OFFSET_TO_LABWARE_BOTTOM,
@@ -43,10 +43,10 @@
LabwareOffsetVector,
HeaterShakerLatchStatus,
HeaterShakerMovementRestrictors,
- ModuleLocation,
DeckType,
LabwareMovementOffsetData,
)
+from .addressable_areas import AddressableAreaView
from .. import errors
from ..commands import (
Command,
@@ -55,7 +55,7 @@
temperature_module,
thermocycler,
)
-from ..actions import Action, UpdateCommandAction, AddModuleAction
+from ..actions import Action, SucceedCommandAction, AddModuleAction
from .abstract_store import HasState, HandlesActions
from .module_substates import (
MagneticModuleSubState,
@@ -70,6 +70,7 @@
MagneticBlockId,
ModuleSubStateType,
)
+from .config import Config
ModuleSubStateT = TypeVar("ModuleSubStateT", bound=ModuleSubStateType)
@@ -108,6 +109,14 @@ class SlotTransit(NamedTuple):
_OT2_THERMOCYCLER_SLOT_TRANSITS_TO_DODGE | _OT3_THERMOCYCLER_SLOT_TRANSITS_TO_DODGE
)
+_THERMOCYCLER_SLOT = DeckSlotName.SLOT_B1
+_OT2_THERMOCYCLER_ADDITIONAL_SLOTS = [
+ DeckSlotName.SLOT_8,
+ DeckSlotName.SLOT_10,
+ DeckSlotName.SLOT_11,
+]
+_OT3_THERMOCYCLER_ADDITIONAL_SLOTS = [DeckSlotName.SLOT_A1]
+
@dataclass(frozen=True)
class HardwareModule:
@@ -128,6 +137,17 @@ class ModuleState:
ProtocolEngine.use_attached_modules() instead of an explicit loadModule command.
"""
+ additional_slots_occupied_by_module_id: Dict[str, List[DeckSlotName]]
+ """List of additional slots occupied by each module.
+
+ The thermocycler (both GENs), occupies multiple slots on both OT-2 and the Flex
+ but only one slot is associated with the location of the thermocycler.
+ In order to check for deck conflicts with other items, we will keep track of any
+ additional slots occupied by a module here.
+
+ This will be None when a module occupies only one slot.
+ """
+
requested_model_by_id: Dict[str, Optional[ModuleModel]]
"""The model by which each loaded module was requested.
@@ -148,6 +168,9 @@ class ModuleState:
module_offset_by_serial: Dict[str, ModuleOffsetData]
"""Information about each modules offsets."""
+ deck_type: DeckType
+ """Type of deck that the modules are on."""
+
class ModuleStore(HasState[ModuleState], HandlesActions):
"""Module state container."""
@@ -155,20 +178,25 @@ class ModuleStore(HasState[ModuleState], HandlesActions):
_state: ModuleState
def __init__(
- self, module_calibration_offsets: Optional[Dict[str, ModuleOffsetData]] = None
+ self,
+ config: Config,
+ module_calibration_offsets: Optional[Dict[str, ModuleOffsetData]] = None,
) -> None:
"""Initialize a ModuleStore and its state."""
self._state = ModuleState(
slot_by_module_id={},
+ additional_slots_occupied_by_module_id={},
requested_model_by_id={},
hardware_by_module_id={},
substate_by_module_id={},
module_offset_by_serial=module_calibration_offsets or {},
+ deck_type=config.deck_type,
)
+ self._robot_type = config.robot_type
def handle_action(self, action: Action) -> None:
"""Modify state in reaction to an action."""
- if isinstance(action, UpdateCommandAction):
+ if isinstance(action, SucceedCommandAction):
self._handle_command(action.command)
elif isinstance(action, AddModuleAction):
@@ -183,11 +211,12 @@ def handle_action(self, action: Action) -> None:
def _handle_command(self, command: Command) -> None:
if isinstance(command.result, LoadModuleResult):
+ slot_name = command.params.location.slotName
self._add_module_substate(
module_id=command.result.moduleId,
serial_number=command.result.serialNumber,
definition=command.result.definition,
- slot_name=command.params.location.slotName,
+ slot_name=slot_name,
requested_model=command.params.model,
module_live_data=None,
)
@@ -285,11 +314,32 @@ def _add_module_substate(
target_block_temperature=live_data["targetTemp"] if live_data else None, # type: ignore[arg-type]
target_lid_temperature=live_data["lidTarget"] if live_data else None, # type: ignore[arg-type]
)
+ self._update_additional_slots_occupied_by_thermocycler(
+ module_id=module_id, slot_name=slot_name
+ )
elif ModuleModel.is_magnetic_block(actual_model):
self._state.substate_by_module_id[module_id] = MagneticBlockSubState(
module_id=MagneticBlockId(module_id)
)
+ def _update_additional_slots_occupied_by_thermocycler(
+ self,
+ module_id: str,
+ slot_name: Optional[
+ DeckSlotName
+ ], # addModuleAction will not have a slot location
+ ) -> None:
+ if slot_name != _THERMOCYCLER_SLOT.to_equivalent_for_robot_type(
+ self._robot_type
+ ):
+ return
+
+ self._state.additional_slots_occupied_by_module_id[module_id] = (
+ _OT3_THERMOCYCLER_ADDITIONAL_SLOTS
+ if self._state.deck_type == DeckType.OT3_STANDARD
+ else _OT2_THERMOCYCLER_ADDITIONAL_SLOTS
+ )
+
def _update_module_calibration(
self,
module_id: str,
@@ -493,17 +543,15 @@ def get_all(self) -> List[LoadedModule]:
"""Get a list of all module entries in state."""
return [self.get(mod_id) for mod_id in self._state.slot_by_module_id.keys()]
- # TODO(mc, 2022-12-09): enforce data integrity (e.g. one module per slot)
- # rather than shunting this work to callers via `allowed_ids`.
- # This has larger implications and is tied up in splitting LPC out of the protocol run
def get_by_slot(
- self, slot_name: DeckSlotName, allowed_ids: Set[str]
+ self,
+ slot_name: DeckSlotName,
) -> Optional[LoadedModule]:
"""Get the module located in a given slot, if any."""
slots_by_id = reversed(list(self._state.slot_by_module_id.items()))
for module_id, module_slot in slots_by_id:
- if module_slot == slot_name and module_id in allowed_ids:
+ if module_slot == slot_name:
return self.get(module_id)
return None
@@ -659,34 +707,72 @@ def get_dimensions(self, module_id: str) -> ModuleDimensions:
return self.get_definition(module_id).dimensions
def get_nominal_module_offset(
- self, module_id: str, deck_type: DeckType
+ self,
+ module_id: str,
+ addressable_areas: AddressableAreaView,
) -> LabwareOffsetVector:
"""Get the module's nominal offset vector computed with slot transform."""
- definition = self.get_definition(module_id)
- slot = self.get_location(module_id).slotName.id
-
- pre_transform = array(
- (
- definition.labwareOffset.x,
- definition.labwareOffset.y,
- definition.labwareOffset.z,
- 1,
+ if (
+ self.state.deck_type == DeckType.OT2_STANDARD
+ or self.state.deck_type == DeckType.OT2_SHORT_TRASH
+ ):
+ definition = self.get_definition(module_id)
+ slot = self.get_location(module_id).slotName.id
+
+ pre_transform: NDArray[npdouble] = array(
+ (
+ definition.labwareOffset.x,
+ definition.labwareOffset.y,
+ definition.labwareOffset.z,
+ 1,
+ )
+ )
+ xforms_ser = definition.slotTransforms.get(
+ str(self._state.deck_type.value), {}
+ ).get(
+ slot,
+ {
+ "labwareOffset": [
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1],
+ ]
+ },
+ )
+ xforms_ser_offset = xforms_ser["labwareOffset"]
+
+ # Apply the slot transform, if any
+ xform: NDArray[npdouble] = array(xforms_ser_offset)
+ xformed = dot(xform, pre_transform)
+ return LabwareOffsetVector(
+ x=xformed[0],
+ y=xformed[1],
+ z=xformed[2],
+ )
+ else:
+ module = self.get(module_id)
+ if isinstance(module.location, DeckSlotLocation):
+ location = module.location.slotName
+ elif module.model == ModuleModel.THERMOCYCLER_MODULE_V2:
+ location = DeckSlotName.SLOT_B1
+ else:
+ raise ValueError(
+ "Module location invalid for nominal module offset calculation."
+ )
+ module_addressable_area = self.ensure_and_convert_module_fixture_location(
+ location, self.state.deck_type, module.model
+ )
+ module_addressable_area_position = (
+ addressable_areas.get_addressable_area_offsets_from_cutout(
+ module_addressable_area
+ )
+ )
+ return LabwareOffsetVector(
+ x=module_addressable_area_position.x,
+ y=module_addressable_area_position.y,
+ z=module_addressable_area_position.z,
)
- )
- xforms_ser = definition.slotTransforms.get(str(deck_type.value), {}).get(
- slot,
- {"labwareOffset": [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]},
- )
- xforms_ser_offset = xforms_ser["labwareOffset"]
-
- # Apply the slot transform, if any
- xform = array(xforms_ser_offset)
- xformed = dot(xform, pre_transform) # type: ignore[no-untyped-call]
- return LabwareOffsetVector(
- x=xformed[0],
- y=xformed[1],
- z=xformed[2],
- )
def get_module_calibration_offset(
self, module_id: str
@@ -706,6 +792,43 @@ def get_height_over_labware(self, module_id: str) -> float:
"""Get the height of module parts above module labware base."""
return self.get_dimensions(module_id).overLabwareHeight
+ def get_module_highest_z(
+ self, module_id: str, addressable_areas: AddressableAreaView
+ ) -> float:
+ """Get the highest z point of the module, as placed on the robot.
+
+ The highest Z of a module, unlike the bare overall height, depends on
+ the robot it is on. We will calculate this value using the info we already have
+ about the transformation of the module's placement, based on the deck it is on.
+
+ This value is calculated as:
+ highest_z = ( nominal_robot_transformed_labware_offset_z
+ + z_difference_between_default_labware_offset_point_and_overall_height
+ + module_calibration_offset_z
+ )
+
+ For OT2, the default_labware_offset point is the same as nominal_robot_transformed_labware_offset_z
+ and hence the highest z will equal to the overall height of the module.
+
+ For Flex, since those two offsets are not the same, the final highest z will be
+ transformed the same amount as the labware offset point is.
+
+ Note: For thermocycler, the lid height is not taken into account.
+ """
+ module_height = self.get_overall_height(module_id)
+ default_lw_offset_point = self.get_definition(module_id).labwareOffset.z
+ z_difference = module_height - default_lw_offset_point
+
+ nominal_transformed_lw_offset_z = self.get_nominal_module_offset(
+ module_id=module_id, addressable_areas=addressable_areas
+ ).z
+ calibration_offset = self.get_module_calibration_offset(module_id)
+ return (
+ nominal_transformed_lw_offset_z
+ + z_difference
+ + (calibration_offset.moduleOffsetVector.z if calibration_offset else 0)
+ )
+
# TODO(mc, 2022-01-19): this method is missing unit test coverage and
# is also unused. Remove or add tests.
def get_lid_height(self, module_id: str) -> float:
@@ -859,11 +982,12 @@ def is_edge_move_unsafe(self, mount: MountType, target_slot: DeckSlotName) -> bo
return neighbor_slot in self._state.slot_by_module_id.values()
- def select_hardware_module_to_load(
+ def select_hardware_module_to_load( # noqa: C901
self,
model: ModuleModel,
location: DeckSlotLocation,
attached_modules: Sequence[HardwareModule],
+ expected_serial_number: Optional[str] = None,
) -> HardwareModule:
"""Get the next matching hardware module for the given model and location.
@@ -879,6 +1003,8 @@ def select_hardware_module_to_load(
location: The location the module will be assigned to.
attached_modules: All attached modules as reported by the HardwareAPI,
in the order in which they should be used.
+ expected_serial_number: An optional variable containing the serial number
+ expected of the module identified.
Raises:
ModuleNotAttachedError: A not-yet-assigned module matching the requested
@@ -892,7 +1018,6 @@ def select_hardware_module_to_load(
if slot == location.slotName:
existing_mod_in_slot = self._state.hardware_by_module_id.get(mod_id)
break
-
if existing_mod_in_slot:
existing_def = existing_mod_in_slot.definition
@@ -908,7 +1033,11 @@ def select_hardware_module_to_load(
for m in attached_modules:
if m not in self._state.hardware_by_module_id.values():
if model == m.definition.model or model in m.definition.compatibleWith:
- return m
+ if expected_serial_number is not None:
+ if m.serial_number == expected_serial_number:
+ return m
+ else:
+ return m
raise errors.ModuleNotAttachedError(f"No available {model.value} found.")
@@ -932,7 +1061,8 @@ def get_heater_shaker_movement_restrictors(
return hs_restrictors
def raise_if_module_in_location(
- self, location: Union[DeckSlotLocation, ModuleLocation]
+ self,
+ location: DeckSlotLocation,
) -> None:
"""Raise if the given location has a module in it."""
for module in self.get_all():
@@ -947,3 +1077,123 @@ def get_default_gripper_offsets(
"""Get the deck's default gripper offsets."""
offsets = self.get_definition(module_id).gripperOffsets
return offsets.get("default") if offsets else None
+
+ def get_overflowed_module_in_slot(
+ self, slot_name: DeckSlotName
+ ) -> Optional[LoadedModule]:
+ """Get the module that's not loaded in the given slot, but still occupies the slot.
+
+ For example, if there's a thermocycler loaded in B1,
+ `get_overflowed_module_in_slot(DeckSlotName.Slot_A1)` will return the loaded
+ thermocycler module.
+ """
+ slots_by_id = self._state.additional_slots_occupied_by_module_id
+
+ for module_id, module_slots in slots_by_id.items():
+ if module_slots and slot_name in module_slots:
+ return self.get(module_id)
+
+ return None
+
+ def is_flex_deck_with_thermocycler(self) -> bool:
+ """Return if this is a Flex deck with a thermocycler loaded in B1-A1 slots."""
+ maybe_module = self.get_by_slot(
+ DeckSlotName.SLOT_A1
+ ) or self.get_overflowed_module_in_slot(DeckSlotName.SLOT_A1)
+ if (
+ self._state.deck_type == DeckType.OT3_STANDARD
+ and maybe_module
+ and maybe_module.model == ModuleModel.THERMOCYCLER_MODULE_V2
+ ):
+ return True
+ else:
+ return False
+
+ def ensure_and_convert_module_fixture_location(
+ self,
+ deck_slot: DeckSlotName,
+ deck_type: DeckType,
+ model: ModuleModel,
+ ) -> str:
+ """Ensure module fixture load location is valid.
+
+ Also, convert the deck slot to a valid module fixture addressable area.
+ """
+ if deck_type == DeckType.OT2_STANDARD or deck_type == DeckType.OT2_SHORT_TRASH:
+ raise ValueError(
+ f"Invalid Deck Type: {deck_type.name} - Does not support modules as fixtures."
+ )
+
+ if model == ModuleModel.MAGNETIC_BLOCK_V1:
+ valid_slots = [
+ slot
+ for slot in [
+ "A1",
+ "B1",
+ "C1",
+ "D1",
+ "A2",
+ "B2",
+ "C2",
+ "D2",
+ "A3",
+ "B3",
+ "C3",
+ "D3",
+ ]
+ ]
+ addressable_areas = [
+ "magneticBlockV1A1",
+ "magneticBlockV1B1",
+ "magneticBlockV1C1",
+ "magneticBlockV1D1",
+ "magneticBlockV1A2",
+ "magneticBlockV1B2",
+ "magneticBlockV1C2",
+ "magneticBlockV1D2",
+ "magneticBlockV1A3",
+ "magneticBlockV1B3",
+ "magneticBlockV1C3",
+ "magneticBlockV1D3",
+ ]
+
+ elif model == ModuleModel.HEATER_SHAKER_MODULE_V1:
+ valid_slots = [
+ slot for slot in ["A1", "B1", "C1", "D1", "A3", "B3", "C3", "D3"]
+ ]
+ addressable_areas = [
+ "heaterShakerV1A1",
+ "heaterShakerV1B1",
+ "heaterShakerV1C1",
+ "heaterShakerV1D1",
+ "heaterShakerV1A3",
+ "heaterShakerV1B3",
+ "heaterShakerV1C3",
+ "heaterShakerV1D3",
+ ]
+ elif model == ModuleModel.TEMPERATURE_MODULE_V2:
+ valid_slots = [
+ slot for slot in ["A1", "B1", "C1", "D1", "A3", "B3", "C3", "D3"]
+ ]
+ addressable_areas = [
+ "temperatureModuleV2A1",
+ "temperatureModuleV2B1",
+ "temperatureModuleV2C1",
+ "temperatureModuleV2D1",
+ "temperatureModuleV2A3",
+ "temperatureModuleV2B3",
+ "temperatureModuleV2C3",
+ "temperatureModuleV2D3",
+ ]
+ elif model == ModuleModel.THERMOCYCLER_MODULE_V2:
+ return "thermocyclerModuleV2"
+ else:
+ raise ValueError(
+ f"Unknown module {model.name} has no addressable areas to provide."
+ )
+
+ map_addressable_area = {
+ slot: addressable_area
+ for slot, addressable_area in zip(valid_slots, addressable_areas)
+ }
+ return map_addressable_area[deck_slot.value]
diff --git a/api/src/opentrons/protocol_engine/state/motion.py b/api/src/opentrons/protocol_engine/state/motion.py
index 08195901af6..e8eff73447b 100644
--- a/api/src/opentrons/protocol_engine/state/motion.py
+++ b/api/src/opentrons/protocol_engine/state/motion.py
@@ -12,10 +12,17 @@
from . import move_types
from .. import errors
-from ..types import WellLocation, CurrentWell, MotorAxis
+from ..types import (
+ MotorAxis,
+ WellLocation,
+ CurrentWell,
+ CurrentPipetteLocation,
+ AddressableOffsetVector,
+)
from .config import Config
from .labware import LabwareView
from .pipettes import PipetteView
+from .addressable_areas import AddressableAreaView
from .geometry import GeometryView
from .modules import ModuleView
from .module_substates import HeaterShakerModuleId
@@ -37,6 +44,7 @@ def __init__(
config: Config,
labware_view: LabwareView,
pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
geometry_view: GeometryView,
module_view: ModuleView,
) -> None:
@@ -44,32 +52,36 @@ def __init__(
self._config = config
self._labware = labware_view
self._pipettes = pipette_view
+ self._addressable_areas = addressable_area_view
self._geometry = geometry_view
self._modules = module_view
def get_pipette_location(
self,
pipette_id: str,
- current_well: Optional[CurrentWell] = None,
+ current_location: Optional[CurrentPipetteLocation] = None,
) -> PipetteLocationData:
"""Get the critical point of a pipette given the current location."""
- current_well = current_well or self._pipettes.get_current_well()
+ current_location = current_location or self._pipettes.get_current_location()
pipette_data = self._pipettes.get(pipette_id)
mount = pipette_data.mount
critical_point = None
# if the pipette was last used to move to a labware that requires
- # centering, set the critical point to XY_CENTER
+ # centering, set the critical point to the appropriate center
if (
- current_well is not None
- and current_well.pipette_id == pipette_id
- and self._labware.get_has_quirk(
- current_well.labware_id,
- "centerMultichannelOnWells",
- )
+ isinstance(current_location, CurrentWell)
+ and current_location.pipette_id == pipette_id
):
- critical_point = CriticalPoint.XY_CENTER
+ if self._labware.get_should_center_column_on_target_well(
+ current_location.labware_id
+ ):
+ critical_point = CriticalPoint.Y_CENTER
+ elif self._labware.get_should_center_pipette_on_target_well(
+ current_location.labware_id
+ ):
+ critical_point = CriticalPoint.XY_CENTER
return PipetteLocationData(mount=mount, critical_point=critical_point)
def get_movement_waypoints_to_well(
@@ -86,18 +98,19 @@ def get_movement_waypoints_to_well(
minimum_z_height: Optional[float] = None,
) -> List[motion_planning.Waypoint]:
"""Calculate waypoints to a destination that's specified as a well."""
- location = current_well or self._pipettes.get_current_well()
- center_destination = self._labware.get_has_quirk(
- labware_id,
- "centerMultichannelOnWells",
- )
+ location = current_well or self._pipettes.get_current_location()
+
+ destination_cp: Optional[CriticalPoint] = None
+ if self._labware.get_should_center_column_on_target_well(labware_id):
+ destination_cp = CriticalPoint.Y_CENTER
+ elif self._labware.get_should_center_pipette_on_target_well(labware_id):
+ destination_cp = CriticalPoint.XY_CENTER
destination = self._geometry.get_well_position(
labware_id,
well_name,
well_location,
)
- destination_cp = CriticalPoint.XY_CENTER if center_destination else None
move_type = move_types.get_move_type_to_well(
pipette_id, labware_id, well_name, location, force_direct
@@ -105,9 +118,90 @@ def get_movement_waypoints_to_well(
min_travel_z = self._geometry.get_min_travel_z(
pipette_id, labware_id, location, minimum_z_height
)
+
+ destination_slot = self._geometry.get_ancestor_slot_name(labware_id)
# TODO (spp, 11-29-2021): Should log some kind of warning that pipettes
- # could crash onto the thermocycler if current well is not known.
- extra_waypoints = self._geometry.get_extra_waypoints(labware_id, location)
+ # could crash onto the thermocycler if current well or addressable area is not known.
+ extra_waypoints = self._geometry.get_extra_waypoints(
+ location=location, to_slot=destination_slot
+ )
+
+ try:
+ return motion_planning.get_waypoints(
+ move_type=move_type,
+ origin=origin,
+ origin_cp=origin_cp,
+ dest=destination,
+ dest_cp=destination_cp,
+ min_travel_z=min_travel_z,
+ max_travel_z=max_travel_z,
+ xy_waypoints=extra_waypoints,
+ )
+ except motion_planning.MotionPlanningError as error:
+ raise errors.FailedToPlanMoveError(str(error))
+
+ def get_movement_waypoints_to_addressable_area(
+ self,
+ addressable_area_name: str,
+ offset: AddressableOffsetVector,
+ origin: Point,
+ origin_cp: Optional[CriticalPoint],
+ max_travel_z: float,
+ force_direct: bool = False,
+ minimum_z_height: Optional[float] = None,
+ stay_at_max_travel_z: bool = False,
+ ignore_tip_configuration: Optional[bool] = True,
+ ) -> List[motion_planning.Waypoint]:
+ """Calculate waypoints to a destination that's specified as an addressable area."""
+ location = self._pipettes.get_current_location()
+
+ base_destination = (
+ self._addressable_areas.get_addressable_area_move_to_location(
+ addressable_area_name
+ )
+ )
+ if stay_at_max_travel_z:
+ base_destination_at_max_z = Point(
+ base_destination.x,
+ base_destination.y,
+ # HACK(mm, 2023-12-18): We want to travel exactly at max_travel_z, but
+ # motion_planning.get_waypoints() won't let us--the highest we can go is this margin
+ # beneath max_travel_z. Investigate why motion_planning.get_waypoints() does not
+ # let us travel at max_travel_z, and whether it's safe to make it do that.
+ # Possibly related: https://github.com/Opentrons/opentrons/pull/6882#discussion_r514248062
+ max_travel_z - motion_planning.waypoints.MINIMUM_Z_MARGIN,
+ )
+ destination = base_destination_at_max_z + Point(
+ offset.x, offset.y, offset.z
+ )
+ else:
+ destination = base_destination + Point(offset.x, offset.y, offset.z)
+
+ # TODO(jbl 11-28-2023) This may need to change for partial tip configurations on a 96
+ if ignore_tip_configuration:
+ destination_cp = CriticalPoint.INSTRUMENT_XY_CENTER
+ else:
+ destination_cp = CriticalPoint.XY_CENTER
+
+ all_labware_highest_z = self._geometry.get_all_obstacle_highest_z()
+ if minimum_z_height is None:
+ minimum_z_height = float("-inf")
+ min_travel_z = max(all_labware_highest_z, minimum_z_height)
+
+ move_type = (
+ motion_planning.MoveType.DIRECT
+ if force_direct
+ else motion_planning.MoveType.GENERAL_ARC
+ )
+
+ destination_slot = self._addressable_areas.get_addressable_area_base_slot(
+ addressable_area_name
+ )
+ # TODO (spp, 11-29-2021): Should log some kind of warning that pipettes
+ # could crash onto the thermocycler if current well or addressable area is not known.
+ extra_waypoints = self._geometry.get_extra_waypoints(
+ location=location, to_slot=destination_slot
+ )
try:
return motion_planning.get_waypoints(
@@ -144,7 +238,7 @@ def get_movement_waypoints_to_coords(
Ignored if `direct` is True. If lower than the default height,
the default is used; this can only increase the height, not decrease it.
"""
- all_labware_highest_z = self._geometry.get_all_labware_highest_z()
+ all_labware_highest_z = self._geometry.get_all_obstacle_highest_z()
if additional_min_travel_z is None:
additional_min_travel_z = float("-inf")
min_travel_z = max(all_labware_highest_z, additional_min_travel_z)
@@ -173,11 +267,18 @@ def check_pipette_blocking_hs_latch(
) -> bool:
"""Check if pipette would block h/s latch from opening if it is east, west or on module."""
pipette_blocking = True
- current_well = self._pipettes.get_current_well()
- if current_well is not None:
- pipette_deck_slot = self._geometry.get_ancestor_slot_name(
- current_well.labware_id
- ).as_int()
+ current_location = self._pipettes.get_current_location()
+ if current_location is not None:
+ if isinstance(current_location, CurrentWell):
+ pipette_deck_slot = self._geometry.get_ancestor_slot_name(
+ current_location.labware_id
+ ).as_int()
+ else:
+ pipette_deck_slot = (
+ self._addressable_areas.get_addressable_area_base_slot(
+ current_location.addressable_area_name
+ ).as_int()
+ )
hs_deck_slot = self._modules.get_location(hs_module_id).slotName.as_int()
conflicting_slots = get_east_west_slots(hs_deck_slot) + [hs_deck_slot]
pipette_blocking = pipette_deck_slot in conflicting_slots
@@ -188,11 +289,18 @@ def check_pipette_blocking_hs_shaker(
) -> bool:
"""Check if pipette would block h/s latch from starting shake if it is adjacent or on module."""
pipette_blocking = True
- current_well = self._pipettes.get_current_well()
- if current_well is not None:
- pipette_deck_slot = self._geometry.get_ancestor_slot_name(
- current_well.labware_id
- ).as_int()
+ current_location = self._pipettes.get_current_location()
+ if current_location is not None:
+ if isinstance(current_location, CurrentWell):
+ pipette_deck_slot = self._geometry.get_ancestor_slot_name(
+ current_location.labware_id
+ ).as_int()
+ else:
+ pipette_deck_slot = (
+ self._addressable_areas.get_addressable_area_base_slot(
+ current_location.addressable_area_name
+ ).as_int()
+ )
hs_deck_slot = self._modules.get_location(hs_module_id).slotName.as_int()
conflicting_slots = get_adjacent_slots(hs_deck_slot) + [hs_deck_slot]
pipette_blocking = pipette_deck_slot in conflicting_slots
@@ -221,12 +329,12 @@ def get_touch_tip_waypoints(
positions = move_types.get_edge_point_list(
center_point, x_offset, y_offset, edge_path_type
)
+ critical_point: Optional[CriticalPoint] = None
- critical_point = (
- CriticalPoint.XY_CENTER
- if self._labware.get_has_quirk(labware_id, "centerMultichannelOnWells")
- else None
- )
+ if self._labware.get_should_center_column_on_target_well(labware_id):
+ critical_point = CriticalPoint.Y_CENTER
+ elif self._labware.get_should_center_pipette_on_target_well(labware_id):
+ critical_point = CriticalPoint.XY_CENTER
return [
motion_planning.Waypoint(position=p, critical_point=critical_point)
diff --git a/api/src/opentrons/protocol_engine/state/move_types.py b/api/src/opentrons/protocol_engine/state/move_types.py
index b28c0d0be94..b8dcb28bd8d 100644
--- a/api/src/opentrons/protocol_engine/state/move_types.py
+++ b/api/src/opentrons/protocol_engine/state/move_types.py
@@ -6,7 +6,7 @@
from opentrons.types import Point
from opentrons.motion_planning.types import MoveType
-from ..types import CurrentWell
+from ..types import CurrentWell, CurrentPipetteLocation
@dataclass
@@ -32,14 +32,14 @@ def get_move_type_to_well(
pipette_id: str,
labware_id: str,
well_name: str,
- location: Optional[CurrentWell],
+ location: Optional[CurrentPipetteLocation],
force_direct: bool,
) -> MoveType:
"""Get the move type for a move to well command."""
if force_direct:
return MoveType.DIRECT
if (
- location is not None
+ isinstance(location, CurrentWell)
and pipette_id == location.pipette_id
and labware_id == location.labware_id
):
diff --git a/api/src/opentrons/protocol_engine/state/pipettes.py b/api/src/opentrons/protocol_engine/state/pipettes.py
index 4d1f7278971..6803d19272b 100644
--- a/api/src/opentrons/protocol_engine/state/pipettes.py
+++ b/api/src/opentrons/protocol_engine/state/pipettes.py
@@ -10,7 +10,7 @@
NozzleConfigurationType,
NozzleMap,
)
-from opentrons.types import MountType, Mount as HwMount
+from opentrons.types import MountType, Mount as HwMount, Point
from .. import errors
from ..types import (
@@ -19,24 +19,30 @@
FlowRates,
DeckPoint,
CurrentWell,
+ CurrentAddressableArea,
+ CurrentPipetteLocation,
TipGeometry,
)
from ..commands import (
Command,
LoadPipetteResult,
AspirateResult,
+ AspirateInPlaceResult,
DispenseResult,
DispenseInPlaceResult,
MoveLabwareResult,
MoveToCoordinatesResult,
MoveToWellResult,
MoveRelativeResult,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaForDropTipResult,
PickUpTipResult,
DropTipResult,
DropTipInPlaceResult,
HomeResult,
RetractAxisResult,
BlowOutResult,
+ BlowOutInPlaceResult,
TouchTipResult,
thermocycler,
heater_shaker,
@@ -50,7 +56,7 @@
from ..actions import (
Action,
SetPipetteMovementSpeedAction,
- UpdateCommandAction,
+ SucceedCommandAction,
)
from .abstract_store import HasState, HandlesActions
@@ -71,6 +77,22 @@ class CurrentDeckPoint:
deck_point: Optional[DeckPoint]
+@dataclass(frozen=True)
+class BoundingNozzlesOffsets:
+ """Offsets of the bounding nozzles of the pipette."""
+
+ back_left_offset: Point
+ front_right_offset: Point
+
+
+@dataclass(frozen=True)
+class PipetteBoundingBoxOffsets:
+ """Offsets of the corners of the pipette's bounding box."""
+
+ back_left_corner: Point
+ front_right_corner: Point
+
+
@dataclass(frozen=True)
class StaticPipetteConfig:
"""Static config for a pipette."""
@@ -87,6 +109,9 @@ class StaticPipetteConfig:
nominal_tip_overlap: Dict[str, float]
home_position: float
nozzle_offset_z: float
+ pipette_bounding_box_offsets: PipetteBoundingBoxOffsets
+ bounding_nozzle_offsets: BoundingNozzlesOffsets
+ default_nozzle_map: NozzleMap
@dataclass
@@ -95,7 +120,7 @@ class PipetteState:
pipettes_by_id: Dict[str, LoadedPipette]
aspirated_volume_by_id: Dict[str, Optional[float]]
- current_well: Optional[CurrentWell]
+ current_location: Optional[CurrentPipetteLocation]
current_deck_point: CurrentDeckPoint
attached_tip_by_id: Dict[str, Optional[TipGeometry]]
movement_speed_by_id: Dict[str, Optional[float]]
@@ -115,7 +140,7 @@ def __init__(self) -> None:
pipettes_by_id={},
aspirated_volume_by_id={},
attached_tip_by_id={},
- current_well=None,
+ current_location=None,
current_deck_point=CurrentDeckPoint(mount=None, deck_point=None),
movement_speed_by_id={},
static_config_by_id={},
@@ -125,7 +150,7 @@ def __init__(self) -> None:
def handle_action(self, action: Action) -> None:
"""Modify state in reaction to an action."""
- if isinstance(action, UpdateCommandAction):
+ if isinstance(action, SucceedCommandAction):
self._handle_command(action.command, action.private_result)
elif isinstance(action, SetPipetteMovementSpeedAction):
self._state.movement_speed_by_id[action.pipette_id] = action.speed
@@ -133,7 +158,7 @@ def handle_action(self, action: Action) -> None:
def _handle_command( # noqa: C901
self, command: Command, private_result: CommandPrivateResult
) -> None:
- self._update_current_well(command)
+ self._update_current_location(command)
self._update_deck_point(command)
if isinstance(private_result, PipetteConfigUpdateResultMixin):
@@ -151,8 +176,20 @@ def _handle_command( # noqa: C901
nominal_tip_overlap=config.nominal_tip_overlap,
home_position=config.home_position,
nozzle_offset_z=config.nozzle_offset_z,
+ pipette_bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=config.back_left_corner_offset,
+ front_right_corner=config.front_right_corner_offset,
+ ),
+ bounding_nozzle_offsets=BoundingNozzlesOffsets(
+ back_left_offset=config.nozzle_map.back_left_nozzle_offset,
+ front_right_offset=config.nozzle_map.front_right_nozzle_offset,
+ ),
+ default_nozzle_map=config.nozzle_map,
)
self._state.flow_rates_by_id[private_result.pipette_id] = config.flow_rates
+ self._state.nozzle_configuration_by_id[
+ private_result.pipette_id
+ ] = config.nozzle_map
elif isinstance(private_result, PipetteNozzleLayoutResultMixin):
self._state.nozzle_configuration_by_id[
private_result.pipette_id
@@ -169,11 +206,17 @@ def _handle_command( # noqa: C901
self._state.aspirated_volume_by_id[pipette_id] = None
self._state.movement_speed_by_id[pipette_id] = None
self._state.attached_tip_by_id[pipette_id] = None
- self._state.nozzle_configuration_by_id[pipette_id] = None
+ static_config = self._state.static_config_by_id.get(pipette_id)
+ if static_config:
+ self._state.nozzle_configuration_by_id[
+ pipette_id
+ ] = static_config.default_nozzle_map
- elif isinstance(command.result, AspirateResult):
+ elif isinstance(command.result, (AspirateResult, AspirateInPlaceResult)):
pipette_id = command.params.pipetteId
previous_volume = self._state.aspirated_volume_by_id[pipette_id] or 0
+ # PipetteHandler will have clamped command.result.volume for us, so
+ # next_volume should always be in bounds.
next_volume = previous_volume + command.result.volume
self._state.aspirated_volume_by_id[pipette_id] = next_volume
@@ -181,7 +224,9 @@ def _handle_command( # noqa: C901
elif isinstance(command.result, (DispenseResult, DispenseInPlaceResult)):
pipette_id = command.params.pipetteId
previous_volume = self._state.aspirated_volume_by_id[pipette_id] or 0
- next_volume = max(0.0, previous_volume - command.result.volume)
+ # PipetteHandler will have clamped command.result.volume for us, so
+ # next_volume should always be in bounds.
+ next_volume = previous_volume - command.result.volume
self._state.aspirated_volume_by_id[pipette_id] = next_volume
elif isinstance(command.result, PickUpTipResult):
@@ -232,7 +277,7 @@ def _handle_command( # noqa: C901
default_aspirate=tip_configuration.default_aspirate_flowrate.values_by_api_level,
default_dispense=tip_configuration.default_dispense_flowrate.values_by_api_level,
)
- elif isinstance(command.result, BlowOutResult):
+ elif isinstance(command.result, (BlowOutResult, BlowOutInPlaceResult)):
pipette_id = command.params.pipetteId
self._state.aspirated_volume_by_id[pipette_id] = None
@@ -240,9 +285,9 @@ def _handle_command( # noqa: C901
pipette_id = command.params.pipetteId
self._state.aspirated_volume_by_id[pipette_id] = 0
- def _update_current_well(self, command: Command) -> None:
- # These commands leave the pipette in a new well.
- # Update current_well to reflect that.
+ def _update_current_location(self, command: Command) -> None:
+ # These commands leave the pipette in a new location.
+ # Update current_location to reflect that.
if isinstance(
command.result,
(
@@ -255,17 +300,26 @@ def _update_current_well(self, command: Command) -> None:
TouchTipResult,
),
):
- self._state.current_well = CurrentWell(
+ self._state.current_location = CurrentWell(
pipette_id=command.params.pipetteId,
labware_id=command.params.labwareId,
well_name=command.params.wellName,
)
+ elif isinstance(
+ command.result,
+ (MoveToAddressableAreaResult, MoveToAddressableAreaForDropTipResult),
+ ):
+ self._state.current_location = CurrentAddressableArea(
+ pipette_id=command.params.pipetteId,
+ addressable_area_name=command.params.addressableAreaName,
+ )
+
# These commands leave the pipette in a place that we can't logically associate
- # with a well. Clear current_well to reflect the fact that it's now unknown.
+ # with a well. Clear current_location to reflect the fact that it's now unknown.
#
- # TODO(mc, 2021-11-12): Wipe out current_well on movement failures, too.
- # TODO(jbl 2023-02-14): Need to investigate whether move relative should clear current well
+ # TODO(mc, 2021-11-12): Wipe out current_location on movement failures, too.
+ # TODO(jbl 2023-02-14): Need to investigate whether move relative should clear current location
elif isinstance(
command.result,
(
@@ -276,7 +330,7 @@ def _update_current_well(self, command: Command) -> None:
thermocycler.CloseLidResult,
),
):
- self._state.current_well = None
+ self._state.current_location = None
# Heater-Shaker commands may have left the pipette in a place that we can't
# associate with a logical location, depending on their result.
@@ -288,10 +342,10 @@ def _update_current_well(self, command: Command) -> None:
),
):
if command.result.pipetteRetracted:
- self._state.current_well = None
+ self._state.current_location = None
# A moveLabware command may have moved the labware that contains the current
- # well out from under the pipette. Clear the current well to reflect the
+ # well out from under the pipette. Clear the current location to reflect the
# fact that the pipette is no longer over any labware.
#
# This is necessary for safe motion planning in case the next movement
@@ -300,12 +354,12 @@ def _update_current_well(self, command: Command) -> None:
moved_labware_id = command.params.labwareId
if command.params.strategy == "usingGripper":
# All mounts will have been retracted.
- self._state.current_well = None
+ self._state.current_location = None
elif (
- self._state.current_well is not None
- and self._state.current_well.labware_id == moved_labware_id
+ isinstance(self._state.current_location, CurrentWell)
+ and self._state.current_location.labware_id == moved_labware_id
):
- self._state.current_well = None
+ self._state.current_location = None
def _update_deck_point(self, command: Command) -> None:
if isinstance(
@@ -314,6 +368,8 @@ def _update_deck_point(self, command: Command) -> None:
MoveToWellResult,
MoveToCoordinatesResult,
MoveRelativeResult,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaForDropTipResult,
PickUpTipResult,
DropTipResult,
AspirateResult,
@@ -427,9 +483,9 @@ def get_hardware_pipette(
return HardwarePipette(mount=hw_mount, config=hw_config)
- def get_current_well(self) -> Optional[CurrentWell]:
- """Get the last accessed well and which pipette accessed it."""
- return self._state.current_well
+ def get_current_location(self) -> Optional[CurrentPipetteLocation]:
+ """Get the last accessed location and which pipette accessed it."""
+ return self._state.current_location
def get_deck_point(self, pipette_id: str) -> Optional[DeckPoint]:
"""Get the deck point of a pipette by ID, or None if it was not associated with the last move operation."""
@@ -623,3 +679,72 @@ def get_nozzle_layout_type(self, pipette_id: str) -> NozzleConfigurationType:
return nozzle_map_for_pipette.configuration
else:
return NozzleConfigurationType.FULL
+
+ def get_is_partially_configured(self, pipette_id: str) -> bool:
+ """Determine if the provided pipette is partially configured."""
+ return self.get_nozzle_layout_type(pipette_id) != NozzleConfigurationType.FULL
+
+ def get_primary_nozzle(self, pipette_id: str) -> Optional[str]:
+ """Get the primary nozzle, if any, related to the given pipette's nozzle configuration."""
+ nozzle_map = self._state.nozzle_configuration_by_id.get(pipette_id)
+ return nozzle_map.starting_nozzle if nozzle_map else None
+
+ def get_primary_nozzle_offset(self, pipette_id: str) -> Point:
+ """Get the pipette's current primary nozzle's offset."""
+ nozzle_map = self._state.nozzle_configuration_by_id.get(pipette_id)
+ if nozzle_map:
+ primary_nozzle_offset = nozzle_map.starting_nozzle_offset
+ else:
+ # When not in partial configuration, back-left nozzle is the primary
+ primary_nozzle_offset = self.get_config(
+ pipette_id
+ ).bounding_nozzle_offsets.back_left_offset
+ return primary_nozzle_offset
+
+ def get_pipette_bounding_nozzle_offsets(
+ self, pipette_id: str
+ ) -> BoundingNozzlesOffsets:
+ """Get the nozzle offsets of the pipette's bounding nozzles."""
+ return self.get_config(pipette_id).bounding_nozzle_offsets
+
+ def get_pipette_bounds_at_specified_move_to_position(
+ self,
+ pipette_id: str,
+ destination_position: Point,
+ ) -> Tuple[Point, Point, Point, Point]:
+ """Get the pipette's bounding offsets when primary nozzle is at the given position."""
+ primary_nozzle_offset = self.get_primary_nozzle_offset(pipette_id)
+ tip = self.get_attached_tip(pipette_id)
+ # Primary nozzle position at destination, in deck coordinates
+ primary_nozzle_position = destination_position + Point(
+ x=0, y=0, z=tip.length if tip else 0
+ )
+
+ # Get the pipette bounding box based on total nozzles
+ pipette_bounds_offsets = self.get_config(
+ pipette_id
+ ).pipette_bounding_box_offsets
+ pip_back_left_bound = (
+ primary_nozzle_position
+ - primary_nozzle_offset
+ + pipette_bounds_offsets.back_left_corner
+ )
+ pip_front_right_bound = (
+ primary_nozzle_position
+ - primary_nozzle_offset
+ + pipette_bounds_offsets.front_right_corner
+ )
+ # TODO (spp, 2024-02-27): remove back right & front left;
+ # return only back left and front right points.
+ pip_back_right_bound = Point(
+ pip_front_right_bound.x, pip_back_left_bound.y, pip_front_right_bound.z
+ )
+ pip_front_left_bound = Point(
+ pip_back_left_bound.x, pip_front_right_bound.y, pip_back_left_bound.z
+ )
+ return (
+ pip_back_left_bound,
+ pip_front_right_bound,
+ pip_back_right_bound,
+ pip_front_left_bound,
+ )
diff --git a/api/src/opentrons/protocol_engine/state/state.py b/api/src/opentrons/protocol_engine/state/state.py
index 3c402701810..dcde17a7894 100644
--- a/api/src/opentrons/protocol_engine/state/state.py
+++ b/api/src/opentrons/protocol_engine/state/state.py
@@ -2,10 +2,10 @@
from __future__ import annotations
from dataclasses import dataclass
-from functools import partial
-from typing import Any, Callable, Dict, List, Optional, Sequence, TypeVar
+from typing import Callable, Dict, List, Optional, Sequence, TypeVar
+from typing_extensions import ParamSpec
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons.protocol_engine.types import ModuleOffsetData
@@ -14,6 +14,11 @@
from .abstract_store import HasState, HandlesActions
from .change_notifier import ChangeNotifier
from .commands import CommandState, CommandStore, CommandView
+from .addressable_areas import (
+ AddressableAreaState,
+ AddressableAreaStore,
+ AddressableAreaView,
+)
from .labware import LabwareState, LabwareStore, LabwareView
from .pipettes import PipetteState, PipetteStore, PipetteView
from .modules import ModuleState, ModuleStore, ModuleView
@@ -23,8 +28,11 @@
from .motion import MotionView
from .config import Config
from .state_summary import StateSummary
+from ..types import DeckConfigurationType
-ReturnT = TypeVar("ReturnT")
+
+_ParamsT = ParamSpec("_ParamsT")
+_ReturnT = TypeVar("_ReturnT")
@dataclass(frozen=True)
@@ -32,6 +40,7 @@ class State:
"""Underlying engine state."""
commands: CommandState
+ addressable_areas: AddressableAreaState
labware: LabwareState
pipettes: PipetteState
modules: ModuleState
@@ -44,6 +53,7 @@ class StateView(HasState[State]):
_state: State
_commands: CommandView
+ _addressable_areas: AddressableAreaView
_labware: LabwareView
_pipettes: PipetteView
_modules: ModuleView
@@ -58,6 +68,11 @@ def commands(self) -> CommandView:
"""Get state view selectors for commands state."""
return self._commands
+ @property
+ def addressable_areas(self) -> AddressableAreaView:
+ """Get state view selectors for addressable area state."""
+ return self._addressable_areas
+
@property
def labware(self) -> LabwareView:
"""Get state view selectors for labware state."""
@@ -101,6 +116,7 @@ def config(self) -> Config:
def get_summary(self) -> StateSummary:
"""Get protocol run data."""
error = self._commands.get_error()
+ # TODO maybe add summary here for AA
return StateSummary.construct(
status=self._commands.get_status(),
errors=[] if error is None else [error],
@@ -126,11 +142,13 @@ def __init__(
self,
*,
config: Config,
- deck_definition: DeckDefinitionV4,
+ deck_definition: DeckDefinitionV5,
deck_fixed_labware: Sequence[DeckFixedLabware],
is_door_open: bool,
change_notifier: Optional[ChangeNotifier] = None,
module_calibration_offsets: Optional[Dict[str, ModuleOffsetData]] = None,
+ deck_configuration: Optional[DeckConfigurationType] = None,
+ notify_publishers: Optional[Callable[[], None]] = None,
) -> None:
"""Initialize a StateStore and its substores.
@@ -143,15 +161,25 @@ def __init__(
is_door_open: Whether the robot's door is currently open.
change_notifier: Internal state change notifier.
module_calibration_offsets: Module offsets to preload.
+ deck_configuration: The initial deck configuration the addressable area store will be instantiated with.
+ notify_publishers: Notifies robot server publishers of internal state change.
"""
self._command_store = CommandStore(config=config, is_door_open=is_door_open)
self._pipette_store = PipetteStore()
+ if deck_configuration is None:
+ deck_configuration = []
+ self._addressable_area_store = AddressableAreaStore(
+ deck_configuration=deck_configuration,
+ config=config,
+ deck_definition=deck_definition,
+ )
self._labware_store = LabwareStore(
deck_fixed_labware=deck_fixed_labware,
deck_definition=deck_definition,
)
self._module_store = ModuleStore(
- module_calibration_offsets=module_calibration_offsets
+ config=config,
+ module_calibration_offsets=module_calibration_offsets,
)
self._liquid_store = LiquidStore()
self._tip_store = TipStore()
@@ -159,6 +187,7 @@ def __init__(
self._substores: List[HandlesActions] = [
self._command_store,
self._pipette_store,
+ self._addressable_area_store,
self._labware_store,
self._module_store,
self._liquid_store,
@@ -166,6 +195,7 @@ def __init__(
]
self._config = config
self._change_notifier = change_notifier or ChangeNotifier()
+ self._notify_robot_server = notify_publishers
self._initialize_state()
def handle_action(self, action: Action) -> None:
@@ -182,10 +212,10 @@ def handle_action(self, action: Action) -> None:
async def wait_for(
self,
- condition: Callable[..., Optional[ReturnT]],
- *args: Any,
- **kwargs: Any,
- ) -> ReturnT:
+ condition: Callable[_ParamsT, _ReturnT],
+ *args: _ParamsT.args,
+ **kwargs: _ParamsT.kwargs,
+ ) -> _ReturnT:
"""Wait for a condition to become true, checking whenever state changes.
If the condition is already true, return immediately.
@@ -230,19 +260,49 @@ async def wait_for(
Raises:
The exception raised by the `condition` function, if any.
"""
- predicate = partial(condition, *args, **kwargs)
- is_done = predicate()
- while not is_done:
+ def predicate() -> _ReturnT:
+ return condition(*args, **kwargs)
+
+ return await self._wait_for(condition=predicate, truthiness_to_wait_for=True)
+
+ async def wait_for_not(
+ self,
+ condition: Callable[_ParamsT, _ReturnT],
+ *args: _ParamsT.args,
+ **kwargs: _ParamsT.kwargs,
+ ) -> _ReturnT:
+ """Like `wait_for()`, except wait for the condition to become false.
+
+ See the documentation in `wait_for()`, especially the warning about condition
+ design.
+
+ The advantage of having this separate method over just passing a wrapper lambda
+ as the condition to `wait_for()` yourself is that wrapper lambdas are hard to
+ test in the mock-heavy Decoy + Protocol Engine style.
+ """
+
+ def predicate() -> _ReturnT:
+ return condition(*args, **kwargs)
+
+ return await self._wait_for(condition=predicate, truthiness_to_wait_for=False)
+
+ async def _wait_for(
+ self, condition: Callable[[], _ReturnT], truthiness_to_wait_for: bool
+ ) -> _ReturnT:
+ current_value = condition()
+
+ while bool(current_value) != truthiness_to_wait_for:
await self._change_notifier.wait()
- is_done = predicate()
+ current_value = condition()
- return is_done
+ return current_value
def _get_next_state(self) -> State:
"""Get a new instance of the state value object."""
return State(
commands=self._command_store.state,
+ addressable_areas=self._addressable_area_store.state,
labware=self._labware_store.state,
pipettes=self._pipette_store.state,
modules=self._module_store.state,
@@ -257,6 +317,7 @@ def _initialize_state(self) -> None:
# Base states
self._state = state
self._commands = CommandView(state.commands)
+ self._addressable_areas = AddressableAreaView(state.addressable_areas)
self._labware = LabwareView(state.labware)
self._pipettes = PipetteView(state.pipettes)
self._modules = ModuleView(state.modules)
@@ -269,11 +330,13 @@ def _initialize_state(self) -> None:
labware_view=self._labware,
module_view=self._modules,
pipette_view=self._pipettes,
+ addressable_area_view=self._addressable_areas,
)
self._motion = MotionView(
config=self._config,
labware_view=self._labware,
pipette_view=self._pipettes,
+ addressable_area_view=self._addressable_areas,
geometry_view=self._geometry,
module_view=self._modules,
)
@@ -283,9 +346,12 @@ def _update_state_views(self) -> None:
next_state = self._get_next_state()
self._state = next_state
self._commands._state = next_state.commands
+ self._addressable_areas._state = next_state.addressable_areas
self._labware._state = next_state.labware
self._pipettes._state = next_state.pipettes
self._modules._state = next_state.modules
self._liquid._state = next_state.liquids
self._tips._state = next_state.tips
self._change_notifier.notify()
+ if self._notify_robot_server is not None:
+ self._notify_robot_server()
diff --git a/api/src/opentrons/protocol_engine/state/tips.py b/api/src/opentrons/protocol_engine/state/tips.py
index 6b70e3a19e2..5af1e19a31f 100644
--- a/api/src/opentrons/protocol_engine/state/tips.py
+++ b/api/src/opentrons/protocol_engine/state/tips.py
@@ -1,22 +1,30 @@
"""Tip state tracking."""
from dataclasses import dataclass
from enum import Enum
-from typing import Dict, Optional, List
+from typing import Dict, Optional, List, Union
from .abstract_store import HasState, HandlesActions
from ..actions import (
Action,
- UpdateCommandAction,
+ SucceedCommandAction,
+ FailCommandAction,
ResetTipsAction,
)
from ..commands import (
Command,
LoadLabwareResult,
+ PickUpTip,
PickUpTipResult,
DropTipResult,
DropTipInPlaceResult,
)
-from ..commands.configuring_common import PipetteConfigUpdateResultMixin
+from ..commands.configuring_common import (
+ PipetteConfigUpdateResultMixin,
+ PipetteNozzleLayoutResultMixin,
+)
+from ..error_recovery_policy import ErrorRecoveryType
+
+from opentrons.hardware_control.nozzle_manager import NozzleMap
class TipRackWellState(Enum):
@@ -37,6 +45,8 @@ class TipState:
column_by_labware_id: Dict[str, List[List[str]]]
channels_by_pipette_id: Dict[str, int]
length_by_pipette_id: Dict[str, float]
+ active_channels_by_pipette_id: Dict[str, int]
+ nozzle_map_by_pipette_id: Dict[str, NozzleMap]
class TipStore(HasState[TipState], HandlesActions):
@@ -51,17 +61,36 @@ def __init__(self) -> None:
column_by_labware_id={},
channels_by_pipette_id={},
length_by_pipette_id={},
+ active_channels_by_pipette_id={},
+ nozzle_map_by_pipette_id={},
)
def handle_action(self, action: Action) -> None:
"""Modify state in reaction to an action."""
- if isinstance(action, UpdateCommandAction):
+ if isinstance(action, SucceedCommandAction):
if isinstance(action.private_result, PipetteConfigUpdateResultMixin):
+ pipette_id = action.private_result.pipette_id
config = action.private_result.config
- self._state.channels_by_pipette_id[
- action.private_result.pipette_id
- ] = config.channels
- self._handle_command(action.command)
+ self._state.channels_by_pipette_id[pipette_id] = config.channels
+ self._state.active_channels_by_pipette_id[pipette_id] = config.channels
+ self._state.nozzle_map_by_pipette_id[pipette_id] = config.nozzle_map
+ self._handle_succeeded_command(action.command)
+
+ if isinstance(action.private_result, PipetteNozzleLayoutResultMixin):
+ pipette_id = action.private_result.pipette_id
+ nozzle_map = action.private_result.nozzle_map
+ if nozzle_map:
+ self._state.active_channels_by_pipette_id[
+ pipette_id
+ ] = nozzle_map.tip_count
+ self._state.nozzle_map_by_pipette_id[pipette_id] = nozzle_map
+ else:
+ self._state.active_channels_by_pipette_id[
+ pipette_id
+ ] = self._state.channels_by_pipette_id[pipette_id]
+
+ elif isinstance(action, FailCommandAction):
+ self._handle_failed_command(action)
elif isinstance(action, ResetTipsAction):
labware_id = action.labware_id
@@ -71,7 +100,7 @@ def handle_action(self, action: Action) -> None:
well_name
] = TipRackWellState.CLEAN
- def _handle_command(self, command: Command) -> None:
+ def _handle_succeeded_command(self, command: Command) -> None:
if (
isinstance(command.result, LoadLabwareResult)
and command.result.definition.parameters.isTiprack
@@ -92,7 +121,6 @@ def _handle_command(self, command: Command) -> None:
well_name = command.params.wellName
pipette_id = command.params.pipetteId
length = command.result.tipLength
-
self._set_used_tips(
pipette_id=pipette_id, well_name=well_name, labware_id=labware_id
)
@@ -102,24 +130,68 @@ def _handle_command(self, command: Command) -> None:
pipette_id = command.params.pipetteId
self._state.length_by_pipette_id.pop(pipette_id, None)
- def _set_used_tips(self, pipette_id: str, well_name: str, labware_id: str) -> None:
- pipette_channels = self._state.channels_by_pipette_id.get(pipette_id)
+ def _handle_failed_command(
+ self,
+ action: FailCommandAction,
+ ) -> None:
+ # If a pickUpTip command fails recoverably, mark the tips as used. This way,
+ # when the protocol is resumed and the Python Protocol API calls
+ # `get_next_tip()`, we'll move on to other tips as expected.
+ #
+ # We don't attempt this for nonrecoverable errors because maybe the failure
+ # was due to a bad labware ID or well name.
+ if (
+ isinstance(action.running_command, PickUpTip)
+ and action.type != ErrorRecoveryType.FAIL_RUN
+ ):
+ self._set_used_tips(
+ pipette_id=action.running_command.params.pipetteId,
+ labware_id=action.running_command.params.labwareId,
+ well_name=action.running_command.params.wellName,
+ )
+ # Note: We're logically removing the tip from the tip rack,
+ # but we're not logically updating the pipette to have that tip on it.
+
+ def _set_used_tips( # noqa: C901
+ self, pipette_id: str, well_name: str, labware_id: str
+ ) -> None:
columns = self._state.column_by_labware_id.get(labware_id, [])
wells = self._state.tips_by_labware_id.get(labware_id, {})
-
- if pipette_channels == len(wells):
- for well_name in wells.keys():
- wells[well_name] = TipRackWellState.USED
-
- elif columns and pipette_channels == len(columns[0]):
- for column in columns:
- if well_name in column:
- for well in column:
+ nozzle_map = self._state.nozzle_map_by_pipette_id[pipette_id]
+
+ # TODO (cb, 02-28-2024): Transition from using partial nozzle map to full instrument map for the set used logic
+ num_nozzle_cols = len(nozzle_map.columns)
+ num_nozzle_rows = len(nozzle_map.rows)
+
+ critical_column = 0
+ critical_row = 0
+ for column in columns:
+ if well_name in column:
+ critical_row = column.index(well_name)
+ critical_column = columns.index(column)
+
+ for i in range(num_nozzle_cols):
+ for j in range(num_nozzle_rows):
+ if nozzle_map.starting_nozzle == "A1":
+ if (critical_column + i < len(columns)) and (
+ critical_row + j < len(columns[critical_column])
+ ):
+ well = columns[critical_column + i][critical_row + j]
+ wells[well] = TipRackWellState.USED
+ elif nozzle_map.starting_nozzle == "A12":
+ if (critical_column - i >= 0) and (
+ critical_row + j < len(columns[critical_column])
+ ):
+ well = columns[critical_column - i][critical_row + j]
+ wells[well] = TipRackWellState.USED
+ elif nozzle_map.starting_nozzle == "H1":
+ if (critical_column + i < len(columns)) and (critical_row - j >= 0):
+ well = columns[critical_column + i][critical_row - j]
+ wells[well] = TipRackWellState.USED
+ elif nozzle_map.starting_nozzle == "H12":
+ if (critical_column - i >= 0) and (critical_row - j >= 0):
+ well = columns[critical_column - i][critical_row - j]
wells[well] = TipRackWellState.USED
- break
-
- else:
- wells[well_name] = TipRackWellState.USED
class TipView(HasState[TipState]):
@@ -136,51 +208,280 @@ def __init__(self, state: TipState) -> None:
self._state = state
def get_next_tip( # noqa: C901
- self, labware_id: str, num_tips: int, starting_tip_name: Optional[str]
+ self,
+ labware_id: str,
+ num_tips: int,
+ starting_tip_name: Optional[str],
+ nozzle_map: Optional[NozzleMap],
) -> Optional[str]:
- """Get the next available clean tip."""
+ """Get the next available clean tip. Does not support use of a starting tip if the pipette used is in a partial configuration."""
wells = self._state.tips_by_labware_id.get(labware_id, {})
columns = self._state.column_by_labware_id.get(labware_id, [])
- if columns and num_tips == len(columns[0]):
- column_head = [column[0] for column in columns]
- starting_column_index = 0
-
- if starting_tip_name:
- for idx, column in enumerate(columns):
- if starting_tip_name in column:
- if starting_tip_name not in column_head:
- starting_column_index = idx + 1
+ def _identify_tip_cluster(
+ active_columns: int,
+ active_rows: int,
+ critical_column: int,
+ critical_row: int,
+ entry_well: str,
+ ) -> Optional[List[str]]:
+ tip_cluster = []
+ for i in range(active_columns):
+ if entry_well == "A1" or entry_well == "H1":
+ if critical_column - i >= 0:
+ column = columns[critical_column - i]
+ else:
+ return None
+ elif entry_well == "A12" or entry_well == "H12":
+ if critical_column + i < len(columns):
+ column = columns[critical_column + i]
+ else:
+ return None
+ else:
+ raise ValueError(
+ f"Invalid entry well {entry_well} for tip cluster identification."
+ )
+ for j in range(active_rows):
+ if entry_well == "A1" or entry_well == "A12":
+ if critical_row - j >= 0:
+ well = column[critical_row - j]
else:
- starting_column_index = idx
-
- for column in columns[starting_column_index:]:
- if not any(wells[well] == TipRackWellState.USED for well in column):
- return column[0]
+ return None
+ elif entry_well == "H1" or entry_well == "H12":
+ if critical_row + j < len(column):
+ well = column[critical_row + j]
+ else:
+ return None
+ tip_cluster.append(well)
- elif num_tips == len(wells.keys()):
- if starting_tip_name and starting_tip_name != columns[0][0]:
+ if any(well not in [*wells] for well in tip_cluster):
return None
- if not any(
- tip_state == TipRackWellState.USED for tip_state in wells.values()
- ):
- return next(iter(wells))
+ return tip_cluster
+ def _validate_tip_cluster(
+ active_columns: int, active_rows: int, tip_cluster: List[str]
+ ) -> Union[str, int, None]:
+ if not any(wells[well] == TipRackWellState.USED for well in tip_cluster):
+ return tip_cluster[0]
+ elif all(wells[well] == TipRackWellState.USED for well in tip_cluster):
+ return None
+ else:
+ # In the case of an 8ch pipette where a column has mixed state tips we may simply progress to the next column in our search
+ if (
+ nozzle_map is not None
+ and len(nozzle_map.full_instrument_map_store) == 8
+ ):
+ return None
+
+ # In the case of a 96ch we can attempt to index in by singular rows and columns assuming that indexed direction is safe
+ # The tip cluster list is ordered: Each row from a column in order by columns
+ tip_cluster_final_column = []
+ for i in range(active_rows):
+ tip_cluster_final_column.append(
+ tip_cluster[((active_columns * active_rows) - 1) - i]
+ )
+ tip_cluster_final_row = []
+ for i in range(active_columns):
+ tip_cluster_final_row.append(
+ tip_cluster[(active_rows - 1) + (i * active_rows)]
+ )
+ if all(
+ wells[well] == TipRackWellState.USED
+ for well in tip_cluster_final_column
+ ):
+ return None
+ elif all(
+ wells[well] == TipRackWellState.USED
+ for well in tip_cluster_final_row
+ ):
+ return None
+ else:
+ # Tiprack has no valid tip selection, cannot progress
+ return -1
+
+ # Search through the tiprack beginning at A1
+ def _cluster_search_A1(active_columns: int, active_rows: int) -> Optional[str]:
+ critical_column = active_columns - 1
+ critical_row = active_rows - 1
+
+ while critical_column < len(columns):
+ tip_cluster = _identify_tip_cluster(
+ active_columns, active_rows, critical_column, critical_row, "A1"
+ )
+ if tip_cluster is not None:
+ result = _validate_tip_cluster(
+ active_columns, active_rows, tip_cluster
+ )
+ if isinstance(result, str):
+ return result
+ elif isinstance(result, int) and result == -1:
+ return None
+ if critical_row + active_rows < len(columns[0]):
+ critical_row = critical_row + active_rows
+ else:
+ critical_column += 1
+ critical_row = active_rows - 1
+ return None
+
+ # Search through the tiprack beginning at A12
+ def _cluster_search_A12(active_columns: int, active_rows: int) -> Optional[str]:
+ critical_column = len(columns) - active_columns
+ critical_row = active_rows - 1
+
+ while critical_column >= 0:
+ tip_cluster = _identify_tip_cluster(
+ active_columns, active_rows, critical_column, critical_row, "A12"
+ )
+ if tip_cluster is not None:
+ result = _validate_tip_cluster(
+ active_columns, active_rows, tip_cluster
+ )
+ if isinstance(result, str):
+ return result
+ elif isinstance(result, int) and result == -1:
+ return None
+ if critical_row + active_rows < len(columns[0]):
+ critical_row = critical_row + active_rows
+ else:
+ critical_column -= 1
+ critical_row = active_rows - 1
+ return None
+
+ # Search through the tiprack beginning at H1
+ def _cluster_search_H1(active_columns: int, active_rows: int) -> Optional[str]:
+ critical_column = active_columns - 1
+ critical_row = len(columns[critical_column]) - active_rows
+
+ while critical_column <= len(columns): # change to max size of labware
+ tip_cluster = _identify_tip_cluster(
+ active_columns, active_rows, critical_column, critical_row, "H1"
+ )
+ if tip_cluster is not None:
+ result = _validate_tip_cluster(
+ active_columns, active_rows, tip_cluster
+ )
+ if isinstance(result, str):
+ return result
+ elif isinstance(result, int) and result == -1:
+ return None
+ if critical_row - active_rows >= 0:
+ critical_row = critical_row - active_rows
+ else:
+ critical_column += 1
+ if critical_column >= len(columns):
+ return None
+ critical_row = len(columns[critical_column]) - active_rows
+ return None
+
+ # Search through the tiprack beginning at H12
+ def _cluster_search_H12(active_columns: int, active_rows: int) -> Optional[str]:
+ critical_column = len(columns) - active_columns
+ critical_row = len(columns[critical_column]) - active_rows
+
+ while critical_column >= 0:
+ tip_cluster = _identify_tip_cluster(
+ active_columns, active_rows, critical_column, critical_row, "H12"
+ )
+ if tip_cluster is not None:
+ result = _validate_tip_cluster(
+ active_columns, active_rows, tip_cluster
+ )
+ if isinstance(result, str):
+ return result
+ elif isinstance(result, int) and result == -1:
+ return None
+ if critical_row - active_rows >= 0:
+ critical_row = critical_row - active_rows
+ else:
+ critical_column -= 1
+ if critical_column < 0:
+ return None
+ critical_row = len(columns[critical_column]) - active_rows
+ return None
+
+ if starting_tip_name is None and nozzle_map is not None and columns:
+ num_channels = len(nozzle_map.full_instrument_map_store)
+ num_nozzle_cols = len(nozzle_map.columns)
+ num_nozzle_rows = len(nozzle_map.rows)
+ # Each pipette's cluster search is determined by the point of entry for a given pipette/configuration:
+ # - Single channel pipettes always search a tiprack top to bottom, left to right
+ # - Eight channel pipettes will begin at the top if the primary nozzle is H1 and at the bottom if
+ # it is A1. The eight channel will always progress across the columns left to right.
+ # - 96 Channel pipettes will begin in the corner opposite their primary/starting nozzle (if starting nozzle = A1, enter tiprack at H12)
+ # The 96 channel will then progress towards the opposite corner, either going up or down, left or right depending on configuration.
+
+ if num_channels == 1:
+ return _cluster_search_A1(num_nozzle_cols, num_nozzle_rows)
+ elif num_channels == 8:
+ if nozzle_map.starting_nozzle == "A1":
+ return _cluster_search_H1(num_nozzle_cols, num_nozzle_rows)
+ elif nozzle_map.starting_nozzle == "H1":
+ return _cluster_search_A1(num_nozzle_cols, num_nozzle_rows)
+ elif num_channels == 96:
+ if nozzle_map.starting_nozzle == "A1":
+ return _cluster_search_H12(num_nozzle_cols, num_nozzle_rows)
+ elif nozzle_map.starting_nozzle == "A12":
+ return _cluster_search_H1(num_nozzle_cols, num_nozzle_rows)
+ elif nozzle_map.starting_nozzle == "H1":
+ return _cluster_search_A12(num_nozzle_cols, num_nozzle_rows)
+ elif nozzle_map.starting_nozzle == "H12":
+ return _cluster_search_A1(num_nozzle_cols, num_nozzle_rows)
+ else:
+ raise ValueError(
+ f"Nozzle {nozzle_map.starting_nozzle} is an invalid starting tip for automatic tip pickup."
+ )
+ else:
+ raise RuntimeError(
+ "Invalid number of channels for automatic tip tracking."
+ )
else:
- if starting_tip_name is not None:
- wells = _drop_wells_before_starting_tip(wells, starting_tip_name)
-
- for well_name, tip_state in wells.items():
- if tip_state == TipRackWellState.CLEAN:
- return well_name
-
+ if columns and num_tips == len(columns[0]): # Get next tips for 8-channel
+ column_head = [column[0] for column in columns]
+ starting_column_index = 0
+
+ if starting_tip_name:
+ for idx, column in enumerate(columns):
+ if starting_tip_name in column:
+ if starting_tip_name not in column_head:
+ starting_column_index = idx + 1
+ else:
+ starting_column_index = idx
+
+ for column in columns[starting_column_index:]:
+ if not any(wells[well] == TipRackWellState.USED for well in column):
+ return column[0]
+
+ elif num_tips == len(wells.keys()): # Get next tips for 96 channel
+ if starting_tip_name and starting_tip_name != columns[0][0]:
+ return None
+
+ if not any(
+ tip_state == TipRackWellState.USED for tip_state in wells.values()
+ ):
+ return next(iter(wells))
+
+ else: # Get next tips for single channel
+ if starting_tip_name is not None:
+ wells = _drop_wells_before_starting_tip(wells, starting_tip_name)
+
+ for well_name, tip_state in wells.items():
+ if tip_state == TipRackWellState.CLEAN:
+ return well_name
return None
def get_pipette_channels(self, pipette_id: str) -> int:
"""Return the given pipette's number of channels."""
return self._state.channels_by_pipette_id[pipette_id]
+ def get_pipette_active_channels(self, pipette_id: str) -> int:
+ """Get the number of channels being used in the given pipette's configuration."""
+ return self._state.active_channels_by_pipette_id[pipette_id]
+
+ def get_pipette_nozzle_map(self, pipette_id: str) -> NozzleMap:
+ """Get the current nozzle map the given pipette's configuration."""
+ return self._state.nozzle_map_by_pipette_id[pipette_id]
+
def has_clean_tip(self, labware_id: str, well_name: str) -> bool:
"""Get whether a well in a labware has a clean tip.
diff --git a/api/src/opentrons/protocol_engine/types.py b/api/src/opentrons/protocol_engine/types.py
index b00e8ee1af6..13e9515e447 100644
--- a/api/src/opentrons/protocol_engine/types.py
+++ b/api/src/opentrons/protocol_engine/types.py
@@ -5,11 +5,15 @@
from enum import Enum
from dataclasses import dataclass
from pydantic import BaseModel, Field, validator
-from typing import Optional, Union, List, Dict, Any, NamedTuple
+from typing import Optional, Union, List, Dict, Any, NamedTuple, Tuple, FrozenSet
from typing_extensions import Literal, TypeGuard
from opentrons_shared_data.pipette.dev_types import PipetteNameType
-from opentrons.types import MountType, DeckSlotName
+from opentrons.types import MountType, DeckSlotName, StagingSlotName
+from opentrons.hardware_control.types import (
+ TipStateType as HwTipStateType,
+ InstrumentProbeType,
+)
from opentrons.hardware_control.modules import (
ModuleType as ModuleType,
)
@@ -18,6 +22,7 @@
# convenience re-export of LabwareUri type
LabwareUri as LabwareUri,
)
+from opentrons_shared_data.module.dev_types import ModuleType as SharedDataModuleType
class EngineStatus(str, Enum):
@@ -33,6 +38,14 @@ class EngineStatus(str, Enum):
FAILED = "failed"
SUCCEEDED = "succeeded"
+ AWAITING_RECOVERY = "awaiting-recovery"
+ """The engine is waiting for external input to recover from a nonfatal error.
+
+ New fixup commands may be enqueued, which will run immediately.
+ The run can't be paused in this state, but it can be canceled, or resumed from the
+ next protocol command if recovery is complete.
+ """
+
class DeckSlotLocation(BaseModel):
"""The location of something placed in a single deck slot."""
@@ -54,6 +67,33 @@ class DeckSlotLocation(BaseModel):
)
+class StagingSlotLocation(BaseModel):
+ """The location of something placed in a single staging slot."""
+
+ slotName: StagingSlotName = Field(
+ ...,
+ description=(
+ # This description should be kept in sync with LabwareOffsetLocation.slotName.
+ "A slot on the robot's staging area."
+ "\n\n"
+ "These apply only to the Flex. The OT-2 has no staging slots."
+ ),
+ )
+
+
+class AddressableAreaLocation(BaseModel):
+ """The location of something place in an addressable area. This is a superset of deck slots."""
+
+ addressableAreaName: str = Field(
+ ...,
+ description=(
+ "The name of the addressable area that you want to use."
+ " Valid values are the `id`s of `addressableArea`s in the"
+ " [deck definition](https://github.com/Opentrons/opentrons/tree/edge/shared-data/deck)."
+ ),
+ )
+
+
class ModuleLocation(BaseModel):
"""The location of something placed atop a hardware module."""
@@ -76,13 +116,21 @@ class OnLabwareLocation(BaseModel):
OFF_DECK_LOCATION: _OffDeckLocationType = "offDeck"
LabwareLocation = Union[
- DeckSlotLocation, ModuleLocation, OnLabwareLocation, _OffDeckLocationType
+ DeckSlotLocation,
+ ModuleLocation,
+ OnLabwareLocation,
+ _OffDeckLocationType,
+ AddressableAreaLocation,
]
"""Union of all locations where it's legal to keep a labware."""
-OnDeckLabwareLocation = Union[DeckSlotLocation, ModuleLocation, OnLabwareLocation]
+OnDeckLabwareLocation = Union[
+ DeckSlotLocation, ModuleLocation, OnLabwareLocation, AddressableAreaLocation
+]
-NonStackedLocation = Union[DeckSlotLocation, ModuleLocation, _OffDeckLocationType]
+NonStackedLocation = Union[
+ DeckSlotLocation, AddressableAreaLocation, ModuleLocation, _OffDeckLocationType
+]
"""Union of all locations where it's legal to keep a labware that can't be stacked on another labware"""
@@ -199,6 +247,17 @@ class CurrentWell:
well_name: str
+@dataclass(frozen=True)
+class CurrentAddressableArea:
+ """The latest addressable area the robot has accessed."""
+
+ pipette_id: str
+ addressable_area_name: str
+
+
+CurrentPipetteLocation = Union[CurrentWell, CurrentAddressableArea]
+
+
@dataclass(frozen=True)
class TipGeometry:
"""Tip geometry data.
@@ -390,6 +449,10 @@ class OverlapOffset(Vec3f):
"""Offset representing overlap space of one labware on top of another labware or module."""
+class AddressableOffsetVector(Vec3f):
+ """Offset, in deck coordinates, from nominal to actual position of an addressable area."""
+
+
class LabwareMovementOffsetData(BaseModel):
"""Offsets to be used during labware movement."""
@@ -637,6 +700,42 @@ class LabwareMovementStrategy(str, Enum):
MANUAL_MOVE_WITHOUT_PAUSE = "manualMoveWithoutPause"
+@dataclass(frozen=True)
+class PotentialCutoutFixture:
+ """Cutout and cutout fixture id associated with a potential cutout fixture that can be on the deck."""
+
+ cutout_id: str
+ cutout_fixture_id: str
+ provided_addressable_areas: FrozenSet[str]
+
+
+class AreaType(Enum):
+ """The type of addressable area."""
+
+ SLOT = "slot"
+ STAGING_SLOT = "stagingSlot"
+ MOVABLE_TRASH = "movableTrash"
+ FIXED_TRASH = "fixedTrash"
+ WASTE_CHUTE = "wasteChute"
+ THERMOCYCLER = "thermocycler"
+ HEATER_SHAKER = "heaterShaker"
+ TEMPERATURE = "temperatureModule"
+ MAGNETICBLOCK = "magneticBlock"
+
+
+@dataclass(frozen=True)
+class AddressableArea:
+ """Addressable area that has been loaded."""
+
+ area_name: str
+ area_type: AreaType
+ base_slot: DeckSlotName
+ display_name: str
+ bounding_box: Dimensions
+ position: AddressableOffsetVector
+ compatible_module_types: List[SharedDataModuleType]
+
+
class PostRunHardwareState(Enum):
"""State of robot gantry & motors after a stop is performed and the hardware API is reset.
@@ -664,21 +763,21 @@ class PostRunHardwareState(Enum):
DISENGAGE_IN_PLACE = "disengageInPlace"
-NOZZLE_NAME_REGEX = "[A-Z][0-100]"
+NOZZLE_NAME_REGEX = r"[A-Z]\d{1,2}"
PRIMARY_NOZZLE_LITERAL = Literal["A1", "H1", "A12", "H12"]
-class EmptyNozzleLayoutConfiguration(BaseModel):
- """Empty basemodel to represent a reset to the nozzle configuration. Sending no parameters resets to default."""
+class AllNozzleLayoutConfiguration(BaseModel):
+ """All basemodel to represent a reset to the nozzle configuration. Sending no parameters resets to default."""
- style: Literal["EMPTY"] = "EMPTY"
+ style: Literal["ALL"] = "ALL"
class SingleNozzleLayoutConfiguration(BaseModel):
"""Minimum information required for a new nozzle configuration."""
style: Literal["SINGLE"] = "SINGLE"
- primary_nozzle: PRIMARY_NOZZLE_LITERAL = Field(
+ primaryNozzle: PRIMARY_NOZZLE_LITERAL = Field(
...,
description="The primary nozzle to use in the layout configuration. This nozzle will update the critical point of the current pipette. For now, this is also the back left corner of your rectangle.",
)
@@ -688,7 +787,7 @@ class RowNozzleLayoutConfiguration(BaseModel):
"""Minimum information required for a new nozzle configuration."""
style: Literal["ROW"] = "ROW"
- primary_nozzle: PRIMARY_NOZZLE_LITERAL = Field(
+ primaryNozzle: PRIMARY_NOZZLE_LITERAL = Field(
...,
description="The primary nozzle to use in the layout configuration. This nozzle will update the critical point of the current pipette. For now, this is also the back left corner of your rectangle.",
)
@@ -698,7 +797,7 @@ class ColumnNozzleLayoutConfiguration(BaseModel):
"""Information required for nozzle configurations of type ROW and COLUMN."""
style: Literal["COLUMN"] = "COLUMN"
- primary_nozzle: PRIMARY_NOZZLE_LITERAL = Field(
+ primaryNozzle: PRIMARY_NOZZLE_LITERAL = Field(
...,
description="The primary nozzle to use in the layout configuration. This nozzle will update the critical point of the current pipette. For now, this is also the back left corner of your rectangle.",
)
@@ -708,11 +807,11 @@ class QuadrantNozzleLayoutConfiguration(BaseModel):
"""Information required for nozzle configurations of type QUADRANT."""
style: Literal["QUADRANT"] = "QUADRANT"
- primary_nozzle: PRIMARY_NOZZLE_LITERAL = Field(
+ primaryNozzle: PRIMARY_NOZZLE_LITERAL = Field(
...,
description="The primary nozzle to use in the layout configuration. This nozzle will update the critical point of the current pipette. For now, this is also the back left corner of your rectangle.",
)
- front_right_nozzle: str = Field(
+ frontRightNozzle: str = Field(
...,
regex=NOZZLE_NAME_REGEX,
description="The front right nozzle in your configuration.",
@@ -720,9 +819,144 @@ class QuadrantNozzleLayoutConfiguration(BaseModel):
NozzleLayoutConfigurationType = Union[
- EmptyNozzleLayoutConfiguration,
+ AllNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
RowNozzleLayoutConfiguration,
QuadrantNozzleLayoutConfiguration,
]
+
+# TODO make the below some sort of better type
+# TODO This should instead contain a proper cutout fixture type
+DeckConfigurationType = List[
+ Tuple[str, str, Optional[str]]
+] # cutout_id, cutout_fixture_id, opentrons_module_serial_number
+
+
+class InstrumentSensorId(str, Enum):
+ """Primary and secondary sensor ids."""
+
+ PRIMARY = "primary"
+ SECONDARY = "secondary"
+ BOTH = "both"
+
+ def to_instrument_probe_type(self) -> InstrumentProbeType:
+ """Convert to InstrumentProbeType."""
+ return {
+ InstrumentSensorId.PRIMARY: InstrumentProbeType.PRIMARY,
+ InstrumentSensorId.SECONDARY: InstrumentProbeType.SECONDARY,
+ InstrumentSensorId.BOTH: InstrumentProbeType.BOTH,
+ }[self]
+
+
+class TipPresenceStatus(str, Enum):
+ """Tip presence status reported by a pipette."""
+
+ PRESENT = "present"
+ ABSENT = "absent"
+ UNKNOWN = "unknown"
+
+ def to_hw_state(self) -> HwTipStateType:
+ """Convert to hardware tip state."""
+ assert self != TipPresenceStatus.UNKNOWN
+ return {
+ TipPresenceStatus.PRESENT: HwTipStateType.PRESENT,
+ TipPresenceStatus.ABSENT: HwTipStateType.ABSENT,
+ }[self]
+
+ @classmethod
+ def from_hw_state(cls, state: HwTipStateType) -> "TipPresenceStatus":
+ """Convert from hardware tip state."""
+ return {
+ HwTipStateType.PRESENT: TipPresenceStatus.PRESENT,
+ HwTipStateType.ABSENT: TipPresenceStatus.ABSENT,
+ }[state]
+
+
+# TODO (spp, 2024-04-02): move all RTP types to runner
+class RTPBase(BaseModel):
+ """Parameters defined in a protocol."""
+
+ displayName: str = Field(..., description="Display string for the parameter.")
+ variableName: str = Field(..., description="Python variable name of the parameter.")
+ description: Optional[str] = Field(
+ None, description="Detailed description of the parameter."
+ )
+ suffix: Optional[str] = Field(
+ None,
+ description="Units (like mL, mm/sec, etc) or a custom suffix for the parameter.",
+ )
+
+
+class NumberParameter(RTPBase):
+ """An integer parameter defined in a protocol."""
+
+ type: Literal["int", "float"] = Field(
+ ..., description="String specifying whether the number is an int or float type."
+ )
+ min: float = Field(
+ ..., description="Minimum value that the number param is allowed to have."
+ )
+ max: float = Field(
+ ..., description="Maximum value that the number param is allowed to have."
+ )
+ value: float = Field(
+ ...,
+ description="The value assigned to the parameter; if not supplied by the client, will be assigned the default value.",
+ )
+ default: float = Field(
+ ...,
+ description="Default value of the parameter, to be used when there is no client-specified value.",
+ )
+
+
+class BooleanParameter(RTPBase):
+ """A boolean parameter defined in a protocol."""
+
+ type: Literal["bool"] = Field(
+ default="bool", description="String specifying the type of this parameter"
+ )
+ value: bool = Field(
+ ...,
+ description="The value assigned to the parameter; if not supplied by the client, will be assigned the default value.",
+ )
+ default: bool = Field(
+ ...,
+ description="Default value of the parameter, to be used when there is no client-specified value.",
+ )
+
+
+class EnumChoice(BaseModel):
+ """Components of choices used in RTP Enum Parameters."""
+
+ displayName: str = Field(..., description="Display string for the param's choice.")
+ value: Union[float, str] = Field(
+ ..., description="Enum value of the param's choice."
+ )
+
+
+class EnumParameter(RTPBase):
+ """A string enum defined in a protocol."""
+
+ type: Literal["int", "float", "str"] = Field(
+ ...,
+ description="String specifying whether the parameter is an int or float or string type.",
+ )
+ choices: List[EnumChoice] = Field(
+ ..., description="List of valid choices for this parameter."
+ )
+ value: Union[float, str] = Field(
+ ...,
+ description="The value assigned to the parameter; if not supplied by the client, will be assigned the default value.",
+ )
+ default: Union[float, str] = Field(
+ ...,
+ description="Default value of the parameter, to be used when there is no client-specified value.",
+ )
+
+
+RunTimeParameter = Union[NumberParameter, EnumParameter, BooleanParameter]
+
+RunTimeParamValuesType = Dict[
+ str, Union[float, bool, str]
+] # update value types as more RTP types are added
diff --git a/api/src/opentrons/protocol_reader/input_file.py b/api/src/opentrons/protocol_reader/input_file.py
index 86390accf83..0ab1fe1dad9 100644
--- a/api/src/opentrons/protocol_reader/input_file.py
+++ b/api/src/opentrons/protocol_reader/input_file.py
@@ -1,10 +1,9 @@
"""Input file value objects."""
from __future__ import annotations
-from typing import IO
-from typing_extensions import Protocol as InterfaceShape
+from typing import BinaryIO, Protocol
-class AbstractInputFile(InterfaceShape):
+class AbstractInputFile(Protocol):
"""An individual file to be read as part of a protocol.
Properties:
@@ -14,4 +13,4 @@ class AbstractInputFile(InterfaceShape):
"""
filename: str
- file: IO[bytes]
+ file: BinaryIO
diff --git a/api/src/opentrons/protocol_reader/protocol_reader.py b/api/src/opentrons/protocol_reader/protocol_reader.py
index 309a25cd8b3..0f312ef1802 100644
--- a/api/src/opentrons/protocol_reader/protocol_reader.py
+++ b/api/src/opentrons/protocol_reader/protocol_reader.py
@@ -53,7 +53,10 @@ def __init__(
self._file_hasher = file_hasher or FileHasher()
async def save(
- self, files: Sequence[BufferedFile], directory: Path, content_hash: str
+ self,
+ files: Sequence[BufferedFile],
+ directory: Path,
+ content_hash: str,
) -> ProtocolSource:
"""Compute a `ProtocolSource` from buffered files and save them as files.
diff --git a/api/src/opentrons/protocol_runner/create_simulating_runner.py b/api/src/opentrons/protocol_runner/create_simulating_runner.py
index ff4df1020f7..c6854662c06 100644
--- a/api/src/opentrons/protocol_runner/create_simulating_runner.py
+++ b/api/src/opentrons/protocol_runner/create_simulating_runner.py
@@ -1,6 +1,5 @@
"""Simulating AbstractRunner factory."""
-from opentrons.config import feature_flags
from opentrons.hardware_control import API as OT2API, HardwareControlAPI
from opentrons.protocols.api_support import deck_type
from opentrons.protocols.api_support.deck_type import should_load_fixed_trash
@@ -57,7 +56,8 @@ async def create_simulating_runner(
ignore_pause=True,
use_virtual_modules=True,
use_virtual_gripper=True,
- use_virtual_pipettes=(not feature_flags.disable_fast_protocol_upload()),
+ use_simulated_deck_config=True,
+ use_virtual_pipettes=True,
),
load_fixed_trash=should_load_fixed_trash(protocol_config),
)
diff --git a/api/src/opentrons/protocol_runner/legacy_command_mapper.py b/api/src/opentrons/protocol_runner/legacy_command_mapper.py
index 85d341b30eb..e92cc2407aa 100644
--- a/api/src/opentrons/protocol_runner/legacy_command_mapper.py
+++ b/api/src/opentrons/protocol_runner/legacy_command_mapper.py
@@ -6,13 +6,14 @@
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons.types import MountType, DeckSlotName, Location
-from opentrons.commands import types as legacy_command_types
+from opentrons.legacy_commands import types as legacy_command_types
from opentrons.protocol_engine import (
ProtocolEngineError,
actions as pe_actions,
commands as pe_commands,
types as pe_types,
)
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryType
from opentrons.protocol_engine.resources import (
ModelUtils,
ModuleDataProvider,
@@ -47,7 +48,6 @@ class LegacyContextCommandError(ProtocolEngineError):
"""An error returned when a PAPIv2 ProtocolContext command fails."""
def __init__(self, wrapping_exc: BaseException) -> None:
-
if isinstance(wrapping_exc, EnumeratedError):
super().__init__(
wrapping_exc.code,
@@ -79,6 +79,7 @@ def __init__(self, wrapping_exc: BaseException) -> None:
legacy_command_types.DISTRIBUTE,
legacy_command_types.TRANSFER,
legacy_command_types.RETURN_TIP,
+ legacy_command_types.AIR_GAP,
}
@@ -144,13 +145,26 @@ def map_command( # noqa: C901
if stage == "before":
count = self._command_count[command_type]
command_id = f"{command_type}-{count}"
- engine_command = self._build_initial_command(command, command_id, now)
+ command_create, running_command = self._build_initial_command(
+ command, command_id, now
+ )
self._command_count[command_type] = count + 1
- self._commands_by_broker_id[broker_id] = engine_command
+ self._commands_by_broker_id[broker_id] = running_command
results.append(
- pe_actions.UpdateCommandAction(engine_command, private_result=None)
+ pe_actions.QueueCommandAction(
+ command_id=command_id,
+ created_at=running_command.createdAt,
+ request=command_create,
+ request_hash=None,
+ )
+ )
+ assert running_command.startedAt is not None
+ results.append(
+ pe_actions.RunCommandAction(
+ running_command.id, started_at=running_command.startedAt
+ )
)
elif stage == "after":
@@ -167,6 +181,7 @@ def map_command( # noqa: C901
),
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
elif isinstance(running_command, pe_commands.DropTip):
@@ -177,6 +192,7 @@ def map_command( # noqa: C901
),
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
elif isinstance(running_command, pe_commands.Aspirate):
@@ -190,6 +206,7 @@ def map_command( # noqa: C901
),
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
elif isinstance(running_command, pe_commands.Dispense):
@@ -203,6 +220,7 @@ def map_command( # noqa: C901
),
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
elif isinstance(running_command, pe_commands.BlowOut):
@@ -213,6 +231,7 @@ def map_command( # noqa: C901
),
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
elif isinstance(running_command, pe_commands.Custom):
@@ -221,6 +240,7 @@ def map_command( # noqa: C901
"result": pe_commands.CustomResult.construct(),
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
else:
@@ -228,10 +248,11 @@ def map_command( # noqa: C901
update={
"status": pe_commands.CommandStatus.SUCCEEDED,
"completedAt": now,
+ "notes": [],
}
)
results.append(
- pe_actions.UpdateCommandAction(
+ pe_actions.SucceedCommandAction(
completed_command, private_result=None
)
)
@@ -245,54 +266,54 @@ def map_command( # noqa: C901
results.append(
pe_actions.FailCommandAction(
command_id=running_command.id,
+ running_command=running_command,
error_id=ModelUtils.generate_id(),
failed_at=now,
error=LegacyContextCommandError(command_error),
+ notes=[],
+ # For legacy protocols, we don't attempt to support any kind
+ # of error recovery at the Protocol Engine level.
+ # These protocols only run on the OT-2, which doesn't have
+ # any recoverable errors, anyway.
+ type=ErrorRecoveryType.FAIL_RUN,
)
)
return results
- def map_equipment_load(
- self, load_info: LegacyLoadInfo
- ) -> Tuple[pe_commands.Command, pe_commands.CommandPrivateResult]:
+ def map_equipment_load(self, load_info: LegacyLoadInfo) -> List[pe_actions.Action]:
"""Map a labware, instrument (pipette), or module load to a PE command."""
if isinstance(load_info, LegacyLabwareLoadInfo):
- return (self._map_labware_load(load_info), None)
+ return self._map_labware_load(load_info)
elif isinstance(load_info, LegacyInstrumentLoadInfo):
return self._map_instrument_load(load_info)
elif isinstance(load_info, LegacyModuleLoadInfo):
- return (self._map_module_load(load_info), None)
+ return self._map_module_load(load_info)
def _build_initial_command(
self,
command: legacy_command_types.CommandMessage,
command_id: str,
now: datetime,
- ) -> pe_commands.Command:
- engine_command: pe_commands.Command
+ ) -> Tuple[pe_commands.CommandCreate, pe_commands.Command]:
if command["name"] == legacy_command_types.PICK_UP_TIP:
- engine_command = self._build_pick_up_tip_command(
+ return self._build_pick_up_tip(
command=command, command_id=command_id, now=now
)
elif command["name"] == legacy_command_types.DROP_TIP:
- engine_command = self._build_drop_tip_command(
- command=command, command_id=command_id, now=now
- )
+ return self._build_drop_tip(command=command, command_id=command_id, now=now)
elif (
command["name"] == legacy_command_types.ASPIRATE
or command["name"] == legacy_command_types.DISPENSE
):
- engine_command = self._build_liquid_handling_command(
+ return self._build_liquid_handling(
command=command, command_id=command_id, now=now
)
elif command["name"] == legacy_command_types.BLOW_OUT:
- engine_command = self._build_blow_out_command(
- command=command, command_id=command_id, now=now
- )
+ return self._build_blow_out(command=command, command_id=command_id, now=now)
elif command["name"] == legacy_command_types.PAUSE:
- engine_command = pe_commands.WaitForResume.construct(
+ wait_for_resume_running = pe_commands.WaitForResume.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -302,8 +323,15 @@ def _build_initial_command(
message=command["payload"]["userMessage"],
),
)
+ wait_for_resume_create: pe_commands.CommandCreate = (
+ pe_commands.WaitForResumeCreate.construct(
+ key=wait_for_resume_running.key,
+ params=wait_for_resume_running.params,
+ )
+ )
+ return wait_for_resume_create, wait_for_resume_running
else:
- engine_command = pe_commands.Custom.construct(
+ custom_running = pe_commands.Custom.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -314,15 +342,18 @@ def _build_initial_command(
legacyCommandText=command["payload"]["text"],
),
)
+ custom_create = pe_commands.CustomCreate.construct(
+ key=custom_running.key,
+ params=custom_running.params,
+ )
+ return custom_create, custom_running
- return engine_command
-
- def _build_drop_tip_command(
+ def _build_drop_tip(
self,
command: legacy_command_types.DropTipMessage,
command_id: str,
now: datetime,
- ) -> pe_commands.Command:
+ ) -> Tuple[pe_commands.CommandCreate, pe_commands.Command]:
pipette: LegacyPipetteContext = command["payload"]["instrument"]
well = command["payload"]["location"]
mount = MountType(pipette.mount)
@@ -331,7 +362,8 @@ def _build_drop_tip_command(
well_name = well.well_name
labware_id = self._labware_id_by_slot[slot]
pipette_id = self._pipette_id_by_mount[mount]
- return pe_commands.DropTip.construct(
+
+ running = pe_commands.DropTip.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -343,13 +375,18 @@ def _build_drop_tip_command(
wellName=well_name,
),
)
+ create = pe_commands.DropTipCreate.construct(
+ key=running.key,
+ params=running.params,
+ )
+ return create, running
- def _build_pick_up_tip_command(
+ def _build_pick_up_tip(
self,
command: legacy_command_types.PickUpTipMessage,
command_id: str,
now: datetime,
- ) -> pe_commands.Command:
+ ) -> Tuple[pe_commands.CommandCreate, pe_commands.Command]:
pipette: LegacyPipetteContext = command["payload"]["instrument"]
location = command["payload"]["location"]
well = location
@@ -360,7 +397,7 @@ def _build_pick_up_tip_command(
labware_id = self._labware_id_by_slot[slot]
pipette_id = self._pipette_id_by_mount[mount]
- return pe_commands.PickUpTip.construct(
+ running = pe_commands.PickUpTip.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -372,15 +409,19 @@ def _build_pick_up_tip_command(
wellName=well_name,
),
)
+ create = pe_commands.PickUpTipCreate.construct(
+ key=running.key, params=running.params
+ )
+ return create, running
- def _build_liquid_handling_command(
+ def _build_liquid_handling(
self,
command: Union[
legacy_command_types.AspirateMessage, legacy_command_types.DispenseMessage
],
command_id: str,
now: datetime,
- ) -> pe_commands.Command:
+ ) -> Tuple[pe_commands.CommandCreate, pe_commands.Command]:
pipette: LegacyPipetteContext = command["payload"]["instrument"]
location = command["payload"]["location"]
volume = command["payload"]["volume"]
@@ -404,7 +445,11 @@ def _build_liquid_handling_command(
# or aspirate() with a volume of 0, which behaves roughly like
# move_to(). Protocol Engine aspirate and dispense commands must have
# volume > 0, so we can't map into those.
- return pe_commands.MoveToWell.construct(
+ #
+ # TODO(mm, 2024-03-22): I don't think this has been true since
+ # https://github.com/Opentrons/opentrons/pull/14211. Can we just use
+ # aspirate and dispense commands now?
+ move_to_well_running = pe_commands.MoveToWell.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -416,9 +461,13 @@ def _build_liquid_handling_command(
wellName=well_name,
),
)
+ move_to_well_create = pe_commands.MoveToWellCreate.construct(
+ key=move_to_well_running.key, params=move_to_well_running.params
+ )
+ return move_to_well_create, move_to_well_running
elif command["name"] == legacy_command_types.ASPIRATE:
flow_rate = command["payload"]["rate"] * pipette.flow_rate.aspirate
- return pe_commands.Aspirate.construct(
+ aspirate_running = pe_commands.Aspirate.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -434,9 +483,13 @@ def _build_liquid_handling_command(
flowRate=flow_rate,
),
)
+ aspirate_create = pe_commands.AspirateCreate.construct(
+ key=aspirate_running.key, params=aspirate_running.params
+ )
+ return aspirate_create, aspirate_running
else:
flow_rate = command["payload"]["rate"] * pipette.flow_rate.dispense
- return pe_commands.Dispense.construct(
+ dispense_running = pe_commands.Dispense.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -452,8 +505,13 @@ def _build_liquid_handling_command(
flowRate=flow_rate,
),
)
+ dispense_create = pe_commands.DispenseCreate.construct(
+ key=dispense_running.key, params=dispense_running.params
+ )
+ return dispense_create, dispense_running
+
else:
- return pe_commands.Custom.construct(
+ running = pe_commands.Custom.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -464,13 +522,17 @@ def _build_liquid_handling_command(
legacyCommandText=command["payload"]["text"],
),
)
+ create = pe_commands.CustomCreate.construct(
+ key=running.key, params=running.params
+ )
+ return create, running
- def _build_blow_out_command(
+ def _build_blow_out(
self,
command: legacy_command_types.BlowOutMessage,
command_id: str,
now: datetime,
- ) -> pe_commands.Command:
+ ) -> Tuple[pe_commands.CommandCreate, pe_commands.Command]:
pipette: LegacyPipetteContext = command["payload"]["instrument"]
location = command["payload"]["location"]
flow_rate = pipette.flow_rate.blow_out
@@ -488,7 +550,8 @@ def _build_blow_out_command(
mount = MountType(pipette.mount)
well_name = well.well_name
pipette_id = self._pipette_id_by_mount[mount]
- return pe_commands.BlowOut.construct(
+
+ blow_out_running = pe_commands.BlowOut.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -502,10 +565,15 @@ def _build_blow_out_command(
flowRate=flow_rate,
),
)
+ blow_out_create = pe_commands.BlowOutCreate.construct(
+ key=blow_out_running.key, params=blow_out_running.params
+ )
+ return blow_out_create, blow_out_running
+
# TODO:(jr, 15.08.2022): blow_out commands with no specified labware get filtered
# into custom. Refactor this in followup legacy command mapping
else:
- return pe_commands.Custom.construct(
+ custom_running = pe_commands.Custom.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.RUNNING,
@@ -516,10 +584,14 @@ def _build_blow_out_command(
legacyCommandText=command["payload"]["text"],
),
)
+ custom_create = pe_commands.CustomCreate.construct(
+ key=custom_running.key, params=custom_running.params
+ )
+ return custom_create, custom_running
def _map_labware_load(
self, labware_load_info: LegacyLabwareLoadInfo
- ) -> pe_commands.Command:
+ ) -> List[pe_actions.Action]:
"""Map a legacy labware load to a ProtocolEngine command."""
now = ModelUtils.get_timestamp()
count = self._command_count["LOAD_LABWARE"]
@@ -535,7 +607,7 @@ def _map_labware_load(
command_id = f"commands.LOAD_LABWARE-{count}"
labware_id = f"labware-{count}"
- load_labware_command = pe_commands.LoadLabware.construct(
+ succeeded_command = pe_commands.LoadLabware.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.SUCCEEDED,
@@ -549,6 +621,7 @@ def _map_labware_load(
version=labware_load_info.labware_version,
displayName=labware_load_info.labware_display_name,
),
+ notes=[],
result=pe_commands.LoadLabwareResult.construct(
labwareId=labware_id,
definition=LabwareDefinition.parse_obj(
@@ -557,18 +630,36 @@ def _map_labware_load(
offsetId=labware_load_info.offset_id,
),
)
+ queue_action = pe_actions.QueueCommandAction(
+ command_id=succeeded_command.id,
+ created_at=succeeded_command.createdAt,
+ request=pe_commands.LoadLabwareCreate.construct(
+ key=succeeded_command.key, params=succeeded_command.params
+ ),
+ request_hash=None,
+ )
+ run_action = pe_actions.RunCommandAction(
+ command_id=succeeded_command.id,
+ # We just set this above, so we know it's not None.
+ started_at=succeeded_command.startedAt, # type: ignore[arg-type]
+ )
+ succeed_action = pe_actions.SucceedCommandAction(
+ command=succeeded_command,
+ private_result=None,
+ )
self._command_count["LOAD_LABWARE"] = count + 1
if isinstance(location, pe_types.DeckSlotLocation):
self._labware_id_by_slot[location.slotName] = labware_id
elif isinstance(location, pe_types.ModuleLocation):
self._labware_id_by_module_id[location.moduleId] = labware_id
- return load_labware_command
+
+ return [queue_action, run_action, succeed_action]
def _map_instrument_load(
self,
instrument_load_info: LegacyInstrumentLoadInfo,
- ) -> Tuple[pe_commands.Command, pe_commands.CommandPrivateResult]:
+ ) -> List[pe_actions.Action]:
"""Map a legacy instrument (pipette) load to a ProtocolEngine command.
Also creates a `AddPipetteConfigAction`, which is not necessary for the run,
@@ -580,7 +671,7 @@ def _map_instrument_load(
pipette_id = f"pipette-{count}"
mount = MountType(str(instrument_load_info.mount).lower())
- load_pipette_command = pe_commands.LoadPipette.construct(
+ succeeded_command = pe_commands.LoadPipette.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.SUCCEEDED,
@@ -591,6 +682,7 @@ def _map_instrument_load(
pipetteName=PipetteNameType(instrument_load_info.instrument_load_name),
mount=mount,
),
+ notes=[],
result=pe_commands.LoadPipetteResult.construct(pipetteId=pipette_id),
)
serial = instrument_load_info.pipette_dict.get("pipette_id", None) or ""
@@ -598,18 +690,38 @@ def _map_instrument_load(
pipette_id=pipette_id,
serial_number=serial,
config=pipette_data_provider.get_pipette_static_config(
- instrument_load_info.pipette_dict
+ # Compatibility note - this is the version of tip overlap data, it stays at 0
+ # so protocol behavior does not change when you run a legacy JSON protocol
+ instrument_load_info.pipette_dict,
+ "v0",
),
)
+ queue_action = pe_actions.QueueCommandAction(
+ command_id=succeeded_command.id,
+ created_at=succeeded_command.createdAt,
+ request=pe_commands.LoadPipetteCreate.construct(
+ key=succeeded_command.key, params=succeeded_command.params
+ ),
+ request_hash=None,
+ )
+ run_action = pe_actions.RunCommandAction(
+ command_id=succeeded_command.id,
+ # We just set this above, so we know it's not None.
+ started_at=succeeded_command.startedAt, # type: ignore[arg-type]
+ )
+ succeed_action = pe_actions.SucceedCommandAction(
+ command=succeeded_command,
+ private_result=pipette_config_result,
+ )
self._command_count["LOAD_PIPETTE"] = count + 1
self._pipette_id_by_mount[mount] = pipette_id
- return (load_pipette_command, pipette_config_result)
+ return [queue_action, run_action, succeed_action]
def _map_module_load(
self, module_load_info: LegacyModuleLoadInfo
- ) -> pe_commands.Command:
+ ) -> List[pe_actions.Action]:
"""Map a legacy module load to a Protocol Engine command."""
now = ModelUtils.get_timestamp()
@@ -628,7 +740,7 @@ def _map_module_load(
loaded_model
) or self._module_data_provider.get_definition(loaded_model)
- load_module_command = pe_commands.LoadModule.construct(
+ succeeded_command = pe_commands.LoadModule.construct(
id=command_id,
key=command_id,
status=pe_commands.CommandStatus.SUCCEEDED,
@@ -642,6 +754,7 @@ def _map_module_load(
),
moduleId=module_id,
),
+ notes=[],
result=pe_commands.LoadModuleResult.construct(
moduleId=module_id,
serialNumber=module_load_info.module_serial,
@@ -649,7 +762,26 @@ def _map_module_load(
model=loaded_model,
),
)
+ queue_action = pe_actions.QueueCommandAction(
+ command_id=succeeded_command.id,
+ created_at=succeeded_command.createdAt,
+ request=pe_commands.LoadModuleCreate.construct(
+ key=succeeded_command.key, params=succeeded_command.params
+ ),
+ request_hash=None,
+ )
+ run_action = pe_actions.RunCommandAction(
+ command_id=succeeded_command.id,
+ # We just set this above, so we know it's not None.
+ started_at=succeeded_command.startedAt, # type: ignore[arg-type]
+ )
+ succeed_action = pe_actions.SucceedCommandAction(
+ command=succeeded_command,
+ private_result=None,
+ )
+
self._command_count["LOAD_MODULE"] = count + 1
self._module_id_by_slot[module_load_info.deck_slot] = module_id
self._module_definition_by_model[loaded_model] = loaded_definition
- return load_module_command
+
+ return [queue_action, run_action, succeed_action]
diff --git a/api/src/opentrons/protocol_runner/legacy_context_plugin.py b/api/src/opentrons/protocol_runner/legacy_context_plugin.py
index 41ba0c62268..7dd882f0fb7 100644
--- a/api/src/opentrons/protocol_runner/legacy_context_plugin.py
+++ b/api/src/opentrons/protocol_runner/legacy_context_plugin.py
@@ -3,9 +3,9 @@
from asyncio import create_task, Task
from contextlib import ExitStack
-from typing import Optional
+from typing import List, Optional
-from opentrons.commands.types import CommandMessage as LegacyCommand
+from opentrons.legacy_commands.types import CommandMessage as LegacyCommand
from opentrons.legacy_broker import LegacyBroker
from opentrons.protocol_engine import AbstractPlugin, actions as pe_actions
from opentrons.util.broker import ReadOnlyBroker
@@ -55,7 +55,15 @@ def __init__(
# So if the protocol had to wait for the event loop to be free
# every time it reported some activity,
# it could visibly stall for a moment, making its motion jittery.
- self._actions_to_dispatch = ThreadAsyncQueue[pe_actions.Action]()
+ #
+ # TODO(mm, 2024-03-22): See if we can remove this non-blockingness now.
+ # It was one of several band-aids introduced in ~v5.0.0 to mitigate performance
+ # problems. v6.3.0 started running some Python protocols directly through
+ # Protocol Engine, without this plugin, and without any non-blocking queue.
+ # If performance is sufficient for those, that probably means the
+ # performance problems have been resolved in better ways elsewhere
+ # and we don't need this anymore.
+ self._actions_to_dispatch = ThreadAsyncQueue[List[pe_actions.Action]]()
self._action_dispatching_task: Optional[Task[None]] = None
self._subscription_exit_stack: Optional[ExitStack] = None
@@ -119,20 +127,15 @@ def _handle_legacy_command(self, command: LegacyCommand) -> None:
Used as a broker callback, so this will run in the APIv2 protocol's thread.
"""
pe_actions = self._legacy_command_mapper.map_command(command=command)
- for pe_action in pe_actions:
- self._actions_to_dispatch.put(pe_action)
+ self._actions_to_dispatch.put(pe_actions)
def _handle_equipment_loaded(self, load_info: LegacyLoadInfo) -> None:
- (
- pe_command,
- pe_private_result,
- ) = self._legacy_command_mapper.map_equipment_load(load_info=load_info)
-
- self._actions_to_dispatch.put(
- pe_actions.UpdateCommandAction(
- command=pe_command, private_result=pe_private_result
- )
- )
+ """Handle an equipment load reported by the APIv2 protocol.
+
+ Used as a broker callback, so this will run in the APIv2 protocol's thread.
+ """
+ pe_actions = self._legacy_command_mapper.map_equipment_load(load_info=load_info)
+ self._actions_to_dispatch.put(pe_actions)
async def _dispatch_all_actions(self) -> None:
"""Dispatch all actions to the `ProtocolEngine`.
@@ -140,5 +143,18 @@ async def _dispatch_all_actions(self) -> None:
Exits only when `self._actions_to_dispatch` is closed
(or an unexpected exception is raised).
"""
- async for action in self._actions_to_dispatch.get_async_until_closed():
- self.dispatch(action)
+ async for action_batch in self._actions_to_dispatch.get_async_until_closed():
+ # It's critical that we dispatch this batch of actions as one atomic
+ # sequence, without yielding to the event loop.
+ # Although this plugin only means to use the ProtocolEngine as a way of
+ # passively exposing the protocol's progress, the ProtocolEngine is still
+ # theoretically active, which means it's constantly watching in the
+ # background to execute any commands that it finds `queued`.
+ #
+ # For example, one of these action batches will often want to
+ # instantaneously create a running command by having a queue action
+ # immediately followed by a run action. We cannot let the
+ # ProtocolEngine's background task see the command in the `queued` state,
+ # or it will try to execute it, which the legacy protocol is already doing.
+ for action in action_batch:
+ self.dispatch(action)
diff --git a/api/src/opentrons/protocol_runner/legacy_wrappers.py b/api/src/opentrons/protocol_runner/legacy_wrappers.py
index 6a816f5e9a1..9783c877227 100644
--- a/api/src/opentrons/protocol_runner/legacy_wrappers.py
+++ b/api/src/opentrons/protocol_runner/legacy_wrappers.py
@@ -20,6 +20,7 @@
)
from opentrons.legacy_broker import LegacyBroker
from opentrons.protocol_engine import ProtocolEngine
+from opentrons.protocol_engine.types import RunTimeParamValuesType
from opentrons.protocol_reader import ProtocolSource, ProtocolFileRole
from opentrons.util.broker import Broker
@@ -29,6 +30,7 @@
ModuleContext as LegacyModuleContext,
Labware as LegacyLabware,
Well as LegacyWell,
+ ParameterContext,
create_protocol_context,
)
from opentrons.protocol_api.core.engine import ENGINE_CORE_API_VERSION
@@ -168,9 +170,16 @@ class LegacyExecutor:
"""Interface to execute Protocol API v2 protocols in a child thread."""
@staticmethod
- async def execute(protocol: LegacyProtocol, context: LegacyProtocolContext) -> None:
+ async def execute(
+ protocol: LegacyProtocol,
+ context: LegacyProtocolContext,
+ parameter_context: Optional[ParameterContext],
+ run_time_param_values: Optional[RunTimeParamValuesType],
+ ) -> None:
"""Execute a PAPIv2 protocol with a given ProtocolContext in a child thread."""
- await to_thread.run_sync(run_protocol, protocol, context)
+ await to_thread.run_sync(
+ run_protocol, protocol, context, parameter_context, run_time_param_values
+ )
__all__ = [
diff --git a/api/src/opentrons/protocol_runner/protocol_runner.py b/api/src/opentrons/protocol_runner/protocol_runner.py
index 56669077efb..9c097bbba2d 100644
--- a/api/src/opentrons/protocol_runner/protocol_runner.py
+++ b/api/src/opentrons/protocol_runner/protocol_runner.py
@@ -9,6 +9,7 @@
from opentrons.hardware_control import HardwareControlAPI
from opentrons import protocol_reader
from opentrons.legacy_broker import LegacyBroker
+from opentrons.protocol_api import ParameterContext
from opentrons.protocol_reader import (
ProtocolSource,
JsonProtocolConfig,
@@ -35,7 +36,13 @@
LegacyExecutor,
LegacyLoadInfo,
)
-from ..protocol_engine.types import PostRunHardwareState
+from ..protocol_engine.errors import ProtocolCommandFailedError
+from ..protocol_engine.types import (
+ PostRunHardwareState,
+ DeckConfigurationType,
+ RunTimeParameter,
+ RunTimeParamValuesType,
+)
class RunResult(NamedTuple):
@@ -43,6 +50,7 @@ class RunResult(NamedTuple):
commands: List[Command]
state_summary: StateSummary
+ parameters: List[RunTimeParameter]
class AbstractRunner(ABC):
@@ -75,6 +83,11 @@ def broker(self) -> LegacyBroker:
"""
return self._broker
+ @property
+ def run_time_parameters(self) -> List[RunTimeParameter]:
+ """Parameter definitions defined by protocol, if any. Currently only for python protocols."""
+ return []
+
def was_started(self) -> bool:
"""Whether the run has been started.
@@ -82,18 +95,18 @@ def was_started(self) -> bool:
"""
return self._protocol_engine.state_view.commands.has_been_played()
- def play(self) -> None:
+ def play(self, deck_configuration: Optional[DeckConfigurationType] = None) -> None:
"""Start or resume the run."""
- self._protocol_engine.play()
+ self._protocol_engine.play(deck_configuration=deck_configuration)
def pause(self) -> None:
"""Pause the run."""
- self._protocol_engine.pause()
+ self._protocol_engine.request_pause()
async def stop(self) -> None:
"""Stop (cancel) the run."""
if self.was_started():
- await self._protocol_engine.stop()
+ await self._protocol_engine.request_stop()
else:
await self._protocol_engine.finish(
drop_tips_after_run=False,
@@ -101,10 +114,16 @@ async def stop(self) -> None:
post_run_hardware_state=PostRunHardwareState.STAY_ENGAGED_IN_PLACE,
)
+ def resume_from_recovery(self) -> None:
+ """See `ProtocolEngine.resume_from_recovery()`."""
+ self._protocol_engine.resume_from_recovery()
+
@abstractmethod
async def run(
self,
+ deck_configuration: DeckConfigurationType,
protocol_source: Optional[ProtocolSource] = None,
+ run_time_param_values: Optional[RunTimeParamValuesType] = None,
) -> RunResult:
"""Run a given protocol to completion."""
@@ -120,6 +139,8 @@ def __init__(
legacy_file_reader: Optional[LegacyFileReader] = None,
legacy_context_creator: Optional[LegacyContextCreator] = None,
legacy_executor: Optional[LegacyExecutor] = None,
+ post_run_hardware_state: PostRunHardwareState = PostRunHardwareState.HOME_AND_STAY_ENGAGED,
+ drop_tips_after_run: bool = True,
) -> None:
"""Initialize the PythonAndLegacyRunner with its dependencies."""
super().__init__(protocol_engine)
@@ -132,10 +153,26 @@ def __init__(
self._legacy_executor = legacy_executor or LegacyExecutor()
# TODO(mc, 2022-01-11): replace task queue with specific implementations
# of runner interface
- self._task_queue = task_queue or TaskQueue(cleanup_func=protocol_engine.finish)
+ self._task_queue = task_queue or TaskQueue()
+ self._task_queue.set_cleanup_func(
+ func=protocol_engine.finish,
+ drop_tips_after_run=drop_tips_after_run,
+ post_run_hardware_state=post_run_hardware_state,
+ )
+ self._parameter_context: Optional[ParameterContext] = None
+
+ @property
+ def run_time_parameters(self) -> List[RunTimeParameter]:
+ """Parameter definitions defined by protocol, if any. Will always be empty before execution."""
+ if self._parameter_context is not None:
+ return self._parameter_context.export_parameters_for_analysis()
+ return []
async def load(
- self, protocol_source: ProtocolSource, python_parse_mode: PythonParseMode
+ self,
+ protocol_source: ProtocolSource,
+ python_parse_mode: PythonParseMode,
+ run_time_param_values: Optional[RunTimeParamValuesType],
) -> None:
"""Load a Python or JSONv5(& older) ProtocolSource into managed ProtocolEngine."""
labware_definitions = await protocol_reader.extract_labware_definitions(
@@ -151,6 +188,7 @@ async def load(
protocol = self._legacy_file_reader.read(
protocol_source, labware_definitions, python_parse_mode
)
+ self._parameter_context = ParameterContext(api_version=protocol.api_level)
equipment_broker = None
if protocol.api_level < LEGACY_PYTHON_API_VERSION_CUTOFF:
@@ -170,36 +208,49 @@ async def load(
equipment_broker=equipment_broker,
)
initial_home_command = pe_commands.HomeCreate(
+ # this command homes all axes, including pipette plunger and gripper jaw
params=pe_commands.HomeParams(axes=None)
)
- # this command homes all axes, including pipette plugner and gripper jaw
- self._protocol_engine.add_command(request=initial_home_command)
- self._task_queue.set_run_func(
- func=self._legacy_executor.execute,
- protocol=protocol,
- context=context,
- )
+ async def run_func() -> None:
+ await self._protocol_engine.add_and_execute_command(
+ request=initial_home_command
+ )
+ await self._legacy_executor.execute(
+ protocol=protocol,
+ context=context,
+ parameter_context=self._parameter_context,
+ run_time_param_values=run_time_param_values,
+ )
+
+ self._task_queue.set_run_func(run_func)
async def run( # noqa: D102
self,
+ deck_configuration: DeckConfigurationType,
protocol_source: Optional[ProtocolSource] = None,
+ run_time_param_values: Optional[RunTimeParamValuesType] = None,
python_parse_mode: PythonParseMode = PythonParseMode.NORMAL,
) -> RunResult:
# TODO(mc, 2022-01-11): move load to runner creation, remove from `run`
- # currently `protocol_source` arg is only used by tests
+ # currently `protocol_source` arg is only used by tests & protocol analyzer
if protocol_source:
await self.load(
- protocol_source=protocol_source, python_parse_mode=python_parse_mode
+ protocol_source=protocol_source,
+ python_parse_mode=python_parse_mode,
+ run_time_param_values=run_time_param_values,
)
- self.play()
+ self.play(deck_configuration=deck_configuration)
self._task_queue.start()
await self._task_queue.join()
run_data = self._protocol_engine.state_view.get_summary()
commands = self._protocol_engine.state_view.commands.get_all()
- return RunResult(commands=commands, state_summary=run_data)
+ parameters = self.run_time_parameters
+ return RunResult(
+ commands=commands, state_summary=run_data, parameters=parameters
+ )
class JsonRunner(AbstractRunner):
@@ -212,6 +263,8 @@ def __init__(
task_queue: Optional[TaskQueue] = None,
json_file_reader: Optional[JsonFileReader] = None,
json_translator: Optional[JsonTranslator] = None,
+ post_run_hardware_state: PostRunHardwareState = PostRunHardwareState.HOME_AND_STAY_ENGAGED,
+ drop_tips_after_run: bool = True,
) -> None:
"""Initialize the JsonRunner with its dependencies."""
super().__init__(protocol_engine)
@@ -221,8 +274,17 @@ def __init__(
self._json_translator = json_translator or JsonTranslator()
# TODO(mc, 2022-01-11): replace task queue with specific implementations
# of runner interface
- self._task_queue = task_queue or TaskQueue(cleanup_func=protocol_engine.finish)
+ self._task_queue = (
+ task_queue or TaskQueue()
+ ) # cleanup_func=protocol_engine.finish))
+ self._task_queue.set_cleanup_func(
+ func=protocol_engine.finish,
+ drop_tips_after_run=drop_tips_after_run,
+ post_run_hardware_state=post_run_hardware_state,
+ )
+
self._hardware_api.should_taskify_movement_execution(taskify=False)
+ self._queued_commands: List[pe_commands.CommandCreate] = []
async def load(self, protocol_source: ProtocolSource) -> None:
"""Load a JSONv6+ ProtocolSource into managed ProtocolEngine."""
@@ -264,34 +326,44 @@ async def load(self, protocol_source: ProtocolSource) -> None:
color=liquid.displayColor,
)
await _yield()
+
initial_home_command = pe_commands.HomeCreate(
params=pe_commands.HomeParams(axes=None)
)
# this command homes all axes, including pipette plugner and gripper jaw
self._protocol_engine.add_command(request=initial_home_command)
- for command in commands:
- self._protocol_engine.add_command(request=command)
- await _yield()
+ self._queued_commands = commands
- self._task_queue.set_run_func(func=self._protocol_engine.wait_until_complete)
+ self._task_queue.set_run_func(func=self._add_command_and_execute)
async def run( # noqa: D102
self,
+ deck_configuration: DeckConfigurationType,
protocol_source: Optional[ProtocolSource] = None,
+ run_time_param_values: Optional[RunTimeParamValuesType] = None,
) -> RunResult:
# TODO(mc, 2022-01-11): move load to runner creation, remove from `run`
# currently `protocol_source` arg is only used by tests
if protocol_source:
await self.load(protocol_source)
- self.play()
+ self.play(deck_configuration=deck_configuration)
self._task_queue.start()
await self._task_queue.join()
run_data = self._protocol_engine.state_view.get_summary()
commands = self._protocol_engine.state_view.commands.get_all()
- return RunResult(commands=commands, state_summary=run_data)
+ return RunResult(commands=commands, state_summary=run_data, parameters=[])
+
+ async def _add_command_and_execute(self) -> None:
+ for command in self._queued_commands:
+ result = await self._protocol_engine.add_and_execute_command(command)
+ if result and result.error:
+ raise ProtocolCommandFailedError(
+ original_error=result.error,
+ message=f"{result.error.errorType}: {result.error.detail}",
+ )
class LiveRunner(AbstractRunner):
@@ -309,7 +381,9 @@ def __init__(
# TODO(mc, 2022-01-11): replace task queue with specific implementations
# of runner interface
self._hardware_api = hardware_api
- self._task_queue = task_queue or TaskQueue(cleanup_func=protocol_engine.finish)
+ self._task_queue = task_queue or TaskQueue()
+ self._task_queue.set_cleanup_func(func=protocol_engine.finish)
+
self._hardware_api.should_taskify_movement_execution(taskify=False)
def prepare(self) -> None:
@@ -318,17 +392,19 @@ def prepare(self) -> None:
async def run( # noqa: D102
self,
+ deck_configuration: DeckConfigurationType,
protocol_source: Optional[ProtocolSource] = None,
+ run_time_param_values: Optional[RunTimeParamValuesType] = None,
) -> RunResult:
assert protocol_source is None
await self._hardware_api.home()
- self.play()
+ self.play(deck_configuration=deck_configuration)
self._task_queue.start()
await self._task_queue.join()
run_data = self._protocol_engine.state_view.get_summary()
commands = self._protocol_engine.state_view.commands.get_all()
- return RunResult(commands=commands, state_summary=run_data)
+ return RunResult(commands=commands, state_summary=run_data, parameters=[])
AnyRunner = Union[PythonAndLegacyRunner, JsonRunner, LiveRunner]
@@ -344,6 +420,8 @@ def create_protocol_runner(
legacy_file_reader: Optional[LegacyFileReader] = None,
legacy_context_creator: Optional[LegacyContextCreator] = None,
legacy_executor: Optional[LegacyExecutor] = None,
+ post_run_hardware_state: PostRunHardwareState = PostRunHardwareState.HOME_AND_STAY_ENGAGED,
+ drop_tips_after_run: bool = True,
) -> AnyRunner:
"""Create a protocol runner."""
if protocol_config:
@@ -357,6 +435,8 @@ def create_protocol_runner(
json_file_reader=json_file_reader,
json_translator=json_translator,
task_queue=task_queue,
+ post_run_hardware_state=post_run_hardware_state,
+ drop_tips_after_run=drop_tips_after_run,
)
else:
return PythonAndLegacyRunner(
@@ -366,6 +446,8 @@ def create_protocol_runner(
legacy_file_reader=legacy_file_reader,
legacy_context_creator=legacy_context_creator,
legacy_executor=legacy_executor,
+ post_run_hardware_state=post_run_hardware_state,
+ drop_tips_after_run=drop_tips_after_run,
)
return LiveRunner(
diff --git a/api/src/opentrons/protocol_runner/task_queue.py b/api/src/opentrons/protocol_runner/task_queue.py
index e79dc097aa1..841ba6fb60a 100644
--- a/api/src/opentrons/protocol_runner/task_queue.py
+++ b/api/src/opentrons/protocol_runner/task_queue.py
@@ -1,23 +1,12 @@
"""Asynchronous task queue to accomplish a protocol run."""
import asyncio
import logging
-from functools import partial
-from typing import Any, Awaitable, Callable, Optional
-from typing_extensions import Protocol as Callback
-
+from typing import Any, Awaitable, Callable, Optional, ParamSpec, Concatenate
log = logging.getLogger(__name__)
-
-class CleanupFunc(Callback):
- """Expected cleanup function signature."""
-
- def __call__(self, error: Optional[Exception]) -> Any:
- """Cleanup, optionally taking an error thrown.
-
- Return value will not be used.
- """
- ...
+CleanupFuncInput = ParamSpec("CleanupFuncInput")
+RunFuncInput = ParamSpec("RunFuncInput")
class TaskQueue:
@@ -26,29 +15,52 @@ class TaskQueue:
Once started, a TaskQueue may not be re-used.
"""
- def __init__(self, cleanup_func: CleanupFunc) -> None:
- """Initialize the TaskQueue.
-
- Args:
- cleanup_func: A function to call at run function completion
- with any error raised by the run function.
- """
- self._cleanup_func: CleanupFunc = cleanup_func
+ def __init__(
+ self,
+ # cleanup_func: CleanupFunc,
+ ) -> None:
+ """Initialize the TaskQueue."""
+ self._cleanup_func: Optional[
+ Callable[[Optional[Exception]], Awaitable[Any]]
+ ] = None
self._run_func: Optional[Callable[[], Any]] = None
self._run_task: Optional["asyncio.Task[None]"] = None
self._ok_to_join_event: asyncio.Event = asyncio.Event()
+ def set_cleanup_func(
+ self,
+ func: Callable[
+ Concatenate[Optional[Exception], CleanupFuncInput], Awaitable[Any]
+ ],
+ *args: CleanupFuncInput.args,
+ **kwargs: CleanupFuncInput.kwargs,
+ ) -> None:
+ """Add the protocol cleanup task to the queue.
+
+ The "cleanup" task will be run after the "run" task.
+ """
+
+ async def _do_cleanup(error: Optional[Exception]) -> None:
+ await func(error, *args, **kwargs)
+
+ self._cleanup_func = _do_cleanup
+
def set_run_func(
self,
- func: Callable[..., Awaitable[Any]],
- **kwargs: Any,
+ func: Callable[RunFuncInput, Awaitable[Any]],
+ *args: RunFuncInput.args,
+ **kwargs: RunFuncInput.kwargs,
) -> None:
"""Add the protocol run task to the queue.
The "run" task will be run first, before the "cleanup" task.
"""
- self._run_func = partial(func, **kwargs)
+
+ async def _do_run() -> None:
+ await func(*args, **kwargs)
+
+ self._run_func = _do_run
def start(self) -> None:
"""Start running tasks in the queue."""
@@ -74,4 +86,5 @@ async def _run(self) -> None:
log.exception("Exception raised by protocol")
error = e
- await self._cleanup_func(error=error)
+ if self._cleanup_func is not None:
+ await self._cleanup_func(error)
diff --git a/api/src/opentrons/protocols/advanced_control/transfers.py b/api/src/opentrons/protocols/advanced_control/transfers.py
index 8f513a3afff..df1c6961be6 100644
--- a/api/src/opentrons/protocols/advanced_control/transfers.py
+++ b/api/src/opentrons/protocols/advanced_control/transfers.py
@@ -13,10 +13,11 @@
TYPE_CHECKING,
TypeVar,
)
-from opentrons.protocol_api.labware import Well
+from opentrons.protocol_api.labware import Labware, Well
from opentrons import types
from opentrons.protocols.api_support.types import APIVersion
+
if TYPE_CHECKING:
from opentrons.protocol_api import InstrumentContext
from opentrons.protocols.execution.dev_types import Dictable
@@ -800,9 +801,13 @@ def _after_dispense(self, dest, src, is_disp_next=False): # noqa: C901
self._strategy.blow_out_strategy == BlowOutStrategy.TRASH
or self._strategy.disposal_volume
):
- yield self._format_dict(
- "blow_out", [self._instr.trash_container.wells()[0]]
- )
+ if isinstance(self._instr.trash_container, Labware):
+ yield self._format_dict(
+ "blow_out", [self._instr.trash_container.wells()[0]]
+ )
+ else:
+ yield self._format_dict("blow_out", [self._instr.trash_container])
+
else:
# Used by distribute
if self._strategy.air_gap:
diff --git a/api/src/opentrons/protocols/api_support/deck_type.py b/api/src/opentrons/protocols/api_support/deck_type.py
index f0cadebce43..4bd70c5fc28 100644
--- a/api/src/opentrons/protocols/api_support/deck_type.py
+++ b/api/src/opentrons/protocols/api_support/deck_type.py
@@ -45,15 +45,30 @@ def __init__(
)
-def should_load_fixed_trash_for_python_protocol(api_version: APIVersion) -> bool:
+def should_load_fixed_trash_labware_for_python_protocol(
+ api_version: APIVersion,
+) -> bool:
+ """Whether to automatically load the fixed trash as a labware for a Python protocol at protocol start."""
return api_version <= LOAD_FIXED_TRASH_GATE_VERSION_PYTHON
+def should_load_fixed_trash_area_for_python_protocol(
+ api_version: APIVersion, robot_type: RobotType
+) -> bool:
+ """Whether to automatically load the fixed trash addressable area for OT-2 protocols on 2.16 and above."""
+ return (
+ api_version > LOAD_FIXED_TRASH_GATE_VERSION_PYTHON
+ and robot_type == "OT-2 Standard"
+ )
+
+
def should_load_fixed_trash(protocol_config: ProtocolConfig) -> bool:
- """Decide whether to automatically load fixed trash on the deck based on version."""
+ """Decide whether to automatically load fixed trash labware on the deck based on version."""
load_fixed_trash = False
if isinstance(protocol_config, PythonProtocolConfig):
- return should_load_fixed_trash_for_python_protocol(protocol_config.api_version)
+ return should_load_fixed_trash_labware_for_python_protocol(
+ protocol_config.api_version
+ )
# TODO(jbl 2023-10-27), when schema v8 is out, use a new deck version field to support fixed trash protocols
elif isinstance(protocol_config, JsonProtocolConfig):
load_fixed_trash = (
diff --git a/api/src/opentrons/protocols/api_support/definitions.py b/api/src/opentrons/protocols/api_support/definitions.py
index 9c720d0b0e9..483f95f4801 100644
--- a/api/src/opentrons/protocols/api_support/definitions.py
+++ b/api/src/opentrons/protocols/api_support/definitions.py
@@ -1,6 +1,6 @@
from .types import APIVersion
-MAX_SUPPORTED_VERSION = APIVersion(2, 16)
+MAX_SUPPORTED_VERSION = APIVersion(2, 19)
"""The maximum supported protocol API version in this release."""
MIN_SUPPORTED_VERSION = APIVersion(2, 0)
diff --git a/api/src/opentrons/protocols/api_support/instrument.py b/api/src/opentrons/protocols/api_support/instrument.py
index 297cd3d456b..d6d9613b1cf 100644
--- a/api/src/opentrons/protocols/api_support/instrument.py
+++ b/api/src/opentrons/protocols/api_support/instrument.py
@@ -73,11 +73,14 @@ def tip_length_for(
VALID_PIP_TIPRACK_VOL = {
- "p10": [10, 20],
- "p20": [10, 20],
- "p50": [50, 200, 300],
- "p300": [200, 300],
- "p1000": [1000],
+ "FLEX": {"p50": [50], "p1000": [50, 200, 1000]},
+ "OT2": {
+ "p10": [10, 20],
+ "p20": [10, 20],
+ "p50": [50, 200, 300],
+ "p300": [200, 300],
+ "p1000": [1000],
+ },
}
@@ -92,7 +95,11 @@ def validate_tiprack(
# tipracks to the pipette as a refactor
if tip_rack.uri.startswith("opentrons/"):
tiprack_vol = tip_rack.wells()[0].max_volume
- valid_vols = VALID_PIP_TIPRACK_VOL[instrument_name.split("_")[0]]
+ instr_metadata = instrument_name.split("_")
+ gen_lookup = (
+ "FLEX" if ("flex" in instr_metadata or "96" in instr_metadata) else "OT2"
+ )
+ valid_vols = VALID_PIP_TIPRACK_VOL[gen_lookup][instrument_name.split("_")[0]]
if tiprack_vol not in valid_vols:
log.warning(
f"The pipette {instrument_name} and its tip rack {tip_rack.load_name}"
diff --git a/api/src/opentrons/protocols/duration/estimator.py b/api/src/opentrons/protocols/duration/estimator.py
index 6f481c29772..5e3b6ef2663 100644
--- a/api/src/opentrons/protocols/duration/estimator.py
+++ b/api/src/opentrons/protocols/duration/estimator.py
@@ -7,7 +7,7 @@
from dataclasses import dataclass
-from opentrons.commands import types
+from opentrons.legacy_commands import types
from opentrons.protocols.api_support.deck_type import (
guess_from_global_config as guess_deck_type_from_global_config,
)
diff --git a/api/src/opentrons/protocols/execution/execute.py b/api/src/opentrons/protocols/execution/execute.py
index ea8ef6163e9..4619e1ae08d 100644
--- a/api/src/opentrons/protocols/execution/execute.py
+++ b/api/src/opentrons/protocols/execution/execute.py
@@ -1,6 +1,8 @@
import logging
+from typing import Optional
-from opentrons.protocol_api import ProtocolContext
+from opentrons.protocol_api import ProtocolContext, ParameterContext
+from opentrons.protocol_engine.types import RunTimeParamValuesType
from opentrons.protocols.execution.execute_python import run_python
from opentrons.protocols.execution.json_dispatchers import (
pipette_command_map,
@@ -16,15 +18,31 @@
MODULE_LOG = logging.getLogger(__name__)
-def run_protocol(protocol: Protocol, context: ProtocolContext) -> None:
+def run_protocol(
+ protocol: Protocol,
+ context: ProtocolContext,
+ parameter_context: Optional[ParameterContext] = None,
+ run_time_param_overrides: Optional[RunTimeParamValuesType] = None,
+) -> None:
"""Run a protocol.
:param protocol: The :py:class:`.protocols.types.Protocol` to execute
- :param context: The context to use.
+ :param context: The protocol context to use.
+ :param parameter_context: The parameter context to use.
+ :param run_time_param_overrides: Any parameter values that are potentially overriding the defaults
"""
if isinstance(protocol, PythonProtocol):
if protocol.api_level >= APIVersion(2, 0):
- run_python(protocol, context)
+ # If this is None here then we're either running simulate or execute, in any case we don't need to report
+ # this in analysis which is the reason we'd pass it to this function
+ if parameter_context is None:
+ parameter_context = ParameterContext(protocol.api_level)
+ run_python(
+ proto=protocol,
+ context=context,
+ parameter_context=parameter_context,
+ run_time_param_overrides=run_time_param_overrides,
+ )
else:
raise RuntimeError(f"Unsupported python API version: {protocol.api_level}")
else:
diff --git a/api/src/opentrons/protocols/execution/execute_python.py b/api/src/opentrons/protocols/execution/execute_python.py
index cf5f3303cbe..f33f70d7a4b 100644
--- a/api/src/opentrons/protocols/execution/execute_python.py
+++ b/api/src/opentrons/protocols/execution/execute_python.py
@@ -3,12 +3,16 @@
import logging
import traceback
import sys
-from typing import Any, Dict
+from typing import Any, Dict, Optional
from opentrons.drivers.smoothie_drivers.errors import SmoothieAlarm
-from opentrons.protocol_api import ProtocolContext
+from opentrons.protocol_api import ProtocolContext, ParameterContext
+from opentrons.protocol_api._parameters import Parameters
from opentrons.protocols.execution.errors import ExceptionInProtocolError
from opentrons.protocols.types import PythonProtocol, MalformedPythonProtocolError
+from opentrons.protocol_engine.types import RunTimeParamValuesType
+
+
from opentrons_shared_data.errors.exceptions import ExecutionCancelledError
MODULE_LOG = logging.getLogger(__name__)
@@ -29,6 +33,14 @@ def _runfunc_ok(run_func: Any):
)
+def _add_parameters_func_ok(add_parameters_func: Any) -> None:
+ if not callable(add_parameters_func):
+ raise SyntaxError("'add_parameters' must be a function.")
+ sig = inspect.Signature.from_callable(add_parameters_func)
+ if len(sig.parameters) != 1:
+ raise SyntaxError("Function 'add_parameters' must take exactly one argument.")
+
+
def _find_protocol_error(tb, proto_name):
"""Return the FrameInfo for the lowest frame in the traceback from the
protocol.
@@ -41,7 +53,44 @@ def _find_protocol_error(tb, proto_name):
raise KeyError
-def run_python(proto: PythonProtocol, context: ProtocolContext):
+def _raise_pretty_protocol_error(exception: Exception, filename: str) -> None:
+ exc_type, exc_value, tb = sys.exc_info()
+ try:
+ frame = _find_protocol_error(tb, filename)
+ except KeyError:
+ # No pretty names, just raise it
+ raise exception
+ raise ExceptionInProtocolError(
+ exception, tb, str(exception), frame.lineno
+ ) from exception
+
+
+def _parse_and_set_parameters(
+ parameter_context: ParameterContext,
+ run_time_param_overrides: Optional[RunTimeParamValuesType],
+ new_globs: Dict[Any, Any],
+ filename: str,
+) -> Parameters:
+ try:
+ _add_parameters_func_ok(new_globs.get("add_parameters"))
+ except SyntaxError as se:
+ raise MalformedPythonProtocolError(str(se))
+ new_globs["__param_context"] = parameter_context
+ try:
+ exec("add_parameters(__param_context)", new_globs)
+ if run_time_param_overrides is not None:
+ parameter_context.set_parameters(run_time_param_overrides)
+ except Exception as e:
+ _raise_pretty_protocol_error(exception=e, filename=filename)
+ return parameter_context.export_parameters_for_protocol()
+
+
+def run_python(
+ proto: PythonProtocol,
+ context: ProtocolContext,
+ parameter_context: ParameterContext,
+ run_time_param_overrides: Optional[RunTimeParamValuesType] = None,
+) -> None:
new_globs: Dict[Any, Any] = {}
exec(proto.contents, new_globs)
# If the protocol is written correctly, it will have defined a function
@@ -60,10 +109,16 @@ def run_python(proto: PythonProtocol, context: ProtocolContext):
# AST filename.
filename = proto.filename or ""
+ if new_globs.get("add_parameters"):
+ context._params = _parse_and_set_parameters(
+ parameter_context, run_time_param_overrides, new_globs, filename
+ )
+
try:
_runfunc_ok(new_globs.get("run"))
except SyntaxError as se:
raise MalformedPythonProtocolError(str(se))
+
new_globs["__context"] = context
try:
exec("run(__context)", new_globs)
@@ -75,10 +130,4 @@ def run_python(proto: PythonProtocol, context: ProtocolContext):
# this is a protocol cancel and shouldn't have special logging
raise
except Exception as e:
- exc_type, exc_value, tb = sys.exc_info()
- try:
- frame = _find_protocol_error(tb, filename)
- except KeyError:
- # No pretty names, just raise it
- raise e
- raise ExceptionInProtocolError(e, tb, str(e), frame.lineno) from e
+ _raise_pretty_protocol_error(exception=e, filename=filename)
diff --git a/api/src/opentrons/protocols/models/__init__.py b/api/src/opentrons/protocols/models/__init__.py
index d5104e6dcea..62eccdf44ff 100644
--- a/api/src/opentrons/protocols/models/__init__.py
+++ b/api/src/opentrons/protocols/models/__init__.py
@@ -12,14 +12,10 @@
LabwareDefinition,
WellDefinition,
)
-from opentrons_shared_data.deck.deck_definitions import (
- DeckDefinitionV4,
-)
from .json_protocol import Model as JsonProtocol
__all__ = [
"LabwareDefinition",
"WellDefinition",
- "DeckDefinitionV4",
"JsonProtocol",
]
diff --git a/api/src/opentrons/protocols/models/json_protocol.py b/api/src/opentrons/protocols/models/json_protocol.py
index c600f03ca8c..6cd7c32aa2d 100644
--- a/api/src/opentrons/protocols/models/json_protocol.py
+++ b/api/src/opentrons/protocols/models/json_protocol.py
@@ -673,7 +673,7 @@ class Model(BaseModel):
None, description="All modules used in this protocol"
)
commands: List[AllCommands] = Field(
- None,
+ ...,
description="An array of command objects representing steps to be executed "
"on the robot",
)
diff --git a/api/src/opentrons/config/containers/__init__.py b/api/src/opentrons/protocols/parameters/__init__.py
similarity index 100%
rename from api/src/opentrons/config/containers/__init__.py
rename to api/src/opentrons/protocols/parameters/__init__.py
diff --git a/api/src/opentrons/protocols/parameters/parameter_definition.py b/api/src/opentrons/protocols/parameters/parameter_definition.py
new file mode 100644
index 00000000000..2ad5eed3138
--- /dev/null
+++ b/api/src/opentrons/protocols/parameters/parameter_definition.py
@@ -0,0 +1,248 @@
+"""Parameter definition and associated validators."""
+
+from typing import Generic, Optional, List, Set, Union, get_args
+
+from opentrons.protocols.parameters.types import (
+ ParamType,
+ ParameterChoice,
+ AllowedTypes,
+ ParameterDefinitionError,
+ ParameterValueError,
+)
+from opentrons.protocols.parameters import validation
+from opentrons.protocol_engine.types import (
+ RunTimeParameter,
+ NumberParameter,
+ BooleanParameter,
+ EnumParameter,
+ EnumChoice,
+)
+
+
+class ParameterDefinition(Generic[ParamType]):
+ """The definition for a user defined parameter."""
+
+ def __init__(
+ self,
+ display_name: str,
+ variable_name: str,
+ parameter_type: type,
+ default: ParamType,
+ minimum: Optional[ParamType] = None,
+ maximum: Optional[ParamType] = None,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+ unit: Optional[str] = None,
+ ) -> None:
+ """Initializes a parameter.
+
+ This stores the type, default values, range or list of possible values, and other information
+ that is defined when a parameter is created for a protocol, as well as validators for setting
+ a non-default value for the parameter.
+
+ Arguments:
+ display_name: The display name of the parameter as it would show up on the frontend.
+ variable_name: The variable name the parameter will be referred to in the run context.
+ parameter_type: Can be bool, int, float or str. Must match the type of default and all choices or
+ min and max values
+ default: The default value the parameter is set to. This will be used in initial analysis.
+ minimum: The minimum value the parameter can be set to (inclusive). Mutually exclusive with choices.
+ maximum: The maximum value the parameter can be set to (inclusive). Mutually exclusive with choices.
+ choices: A sequence of possible choices that this parameter can be set to.
+ Mutually exclusive with minimum and maximum.
+ description: An optional description for the parameter.
+ unit: An optional suffix for float and int type parameters.
+ """
+ self._display_name = validation.ensure_display_name(display_name)
+ self._variable_name = validation.ensure_variable_name(variable_name)
+ self._description = validation.ensure_description(description)
+ self._unit = validation.ensure_unit_string_length(unit)
+
+ if parameter_type not in get_args(AllowedTypes):
+ raise ParameterDefinitionError(
+ "Parameters can only be of type int, float, str, or bool."
+ )
+ self._type = parameter_type
+
+ self._choices: Optional[List[ParameterChoice]] = choices
+ self._allowed_values: Optional[Set[AllowedTypes]] = None
+
+ self._minimum: Optional[Union[int, float]] = None
+ self._maximum: Optional[Union[int, float]] = None
+
+ validation.validate_options(default, minimum, maximum, choices, parameter_type)
+ if choices is not None:
+ self._allowed_values = {choice["value"] for choice in choices}
+ else:
+ assert isinstance(minimum, (int, float)) and isinstance(
+ maximum, (int, float)
+ )
+ self._minimum = minimum
+ self._maximum = maximum
+
+ self._default: ParamType = default
+ self.value: ParamType = default
+
+ @property
+ def value(self) -> ParamType:
+ """The current value of the parameter."""
+ return self._value
+
+ @value.setter
+ def value(self, new_value: ParamType) -> None:
+ validation.validate_type(new_value, self._type)
+ if self._allowed_values is not None and new_value not in self._allowed_values:
+ raise ParameterValueError(
+ f"Parameter must be set to one of the allowed values of {self._allowed_values}."
+ )
+ elif (
+ isinstance(self._minimum, (int, float))
+ and isinstance(self._maximum, (int, float))
+ and isinstance(new_value, (int, float))
+ and not (self._minimum <= new_value <= self._maximum)
+ ):
+ raise ParameterValueError(
+ f"Parameter must be between {self._minimum} and {self._maximum} inclusive."
+ )
+ self._value = new_value
+
+ @property
+ def variable_name(self) -> str:
+ """The in-protocol variable name of the parameter."""
+ return self._variable_name
+
+ @property
+ def parameter_type(self) -> type:
+ """The python type of the parameter."""
+ return self._type
+
+ def as_protocol_engine_type(self) -> RunTimeParameter:
+ """Returns parameter as a Protocol Engine type to send to client."""
+ parameter: RunTimeParameter
+ if self._type is bool:
+ parameter = BooleanParameter(
+ displayName=self._display_name,
+ variableName=self._variable_name,
+ description=self._description,
+ value=bool(self._value),
+ default=bool(self._default),
+ )
+ elif self._choices is not None:
+ choices = [
+ EnumChoice(
+ displayName=str(choice["display_name"]),
+ value=choice["value"],
+ )
+ for choice in self._choices
+ ]
+ parameter = EnumParameter(
+ type=validation.convert_type_string_for_enum(self._type),
+ displayName=self._display_name,
+ variableName=self._variable_name,
+ description=self._description,
+ choices=choices,
+ value=self._value,
+ default=self._default,
+ )
+ elif self._minimum is not None and self._maximum is not None:
+ parameter = NumberParameter(
+ type=validation.convert_type_string_for_num_param(self._type),
+ displayName=self._display_name,
+ variableName=self._variable_name,
+ description=self._description,
+ suffix=self._unit,
+ min=float(self._minimum),
+ max=float(self._maximum),
+ value=float(self._value),
+ default=float(self._default),
+ )
+ else:
+ raise ParameterDefinitionError(
+ f"Cannot resolve parameter {self._display_name} to protocol engine type."
+ )
+
+ return parameter
+
+
+def create_int_parameter(
+ display_name: str,
+ variable_name: str,
+ default: int,
+ minimum: Optional[int] = None,
+ maximum: Optional[int] = None,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+ unit: Optional[str] = None,
+) -> ParameterDefinition[int]:
+ """Creates an integer parameter."""
+ return ParameterDefinition(
+ parameter_type=int,
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ minimum=minimum,
+ maximum=maximum,
+ choices=choices,
+ description=description,
+ unit=unit,
+ )
+
+
+def create_float_parameter(
+ display_name: str,
+ variable_name: str,
+ default: float,
+ minimum: Optional[float] = None,
+ maximum: Optional[float] = None,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+ unit: Optional[str] = None,
+) -> ParameterDefinition[float]:
+ """Creates a float parameter."""
+ return ParameterDefinition(
+ parameter_type=float,
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ minimum=minimum,
+ maximum=maximum,
+ choices=choices,
+ description=description,
+ unit=unit,
+ )
+
+
+def create_bool_parameter(
+ display_name: str,
+ variable_name: str,
+ default: bool,
+ choices: List[ParameterChoice],
+ description: Optional[str] = None,
+) -> ParameterDefinition[bool]:
+ """Creates a boolean parameter."""
+ return ParameterDefinition(
+ parameter_type=bool,
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ choices=choices,
+ description=description,
+ )
+
+
+def create_str_parameter(
+ display_name: str,
+ variable_name: str,
+ default: str,
+ choices: Optional[List[ParameterChoice]] = None,
+ description: Optional[str] = None,
+) -> ParameterDefinition[str]:
+ """Creates a string parameter."""
+ return ParameterDefinition(
+ parameter_type=str,
+ display_name=display_name,
+ variable_name=variable_name,
+ default=default,
+ choices=choices,
+ description=description,
+ )
diff --git a/api/src/opentrons/protocols/parameters/types.py b/api/src/opentrons/protocols/parameters/types.py
new file mode 100644
index 00000000000..7edf0c941d5
--- /dev/null
+++ b/api/src/opentrons/protocols/parameters/types.py
@@ -0,0 +1,25 @@
+from typing import TypeVar, Union, TypedDict
+
+
+AllowedTypes = Union[str, int, float, bool]
+
+ParamType = TypeVar("ParamType", bound=AllowedTypes)
+
+
+class ParameterChoice(TypedDict):
+ """A parameter choice containing the display name and value."""
+
+ display_name: str
+ value: AllowedTypes
+
+
+class ParameterValueError(ValueError):
+ """An error raised when a parameter value is not valid."""
+
+
+class ParameterDefinitionError(ValueError):
+ """An error raised when a parameter definition value is not valid."""
+
+
+class ParameterNameError(ValueError):
+ """An error raised when a parameter name or description is not valid."""
diff --git a/api/src/opentrons/protocols/parameters/validation.py b/api/src/opentrons/protocols/parameters/validation.py
new file mode 100644
index 00000000000..8e7a0bed8ad
--- /dev/null
+++ b/api/src/opentrons/protocols/parameters/validation.py
@@ -0,0 +1,266 @@
+import keyword
+from typing import List, Set, Optional, Union, Literal
+
+from .types import (
+ AllowedTypes,
+ ParamType,
+ ParameterChoice,
+ ParameterNameError,
+ ParameterValueError,
+ ParameterDefinitionError,
+)
+
+
+UNIT_MAX_LEN = 10
+DISPLAY_NAME_MAX_LEN = 30
+DESCRIPTION_MAX_LEN = 100
+
+
+def validate_variable_name_unique(
+ variable_name: str, other_variable_names: Set[str]
+) -> None:
+ """Validate that the given variable name is unique."""
+ if isinstance(variable_name, str) and variable_name in other_variable_names:
+ raise ParameterNameError(
+ f'"{variable_name}" is already defined as a variable name for another parameter.'
+ f" All variable names must be unique."
+ )
+
+
+def ensure_display_name(display_name: str) -> str:
+ """Validate display name is within the character limit."""
+ if not isinstance(display_name, str):
+ raise ParameterNameError(
+ f"Display name must be a string and at most {DISPLAY_NAME_MAX_LEN} characters."
+ )
+ if len(display_name) > DISPLAY_NAME_MAX_LEN:
+ raise ParameterNameError(
+ f'Display name "{display_name}" greater than {DISPLAY_NAME_MAX_LEN} characters.'
+ )
+ return display_name
+
+
+def ensure_variable_name(variable_name: str) -> str:
+ """Validate variable name is a valid python variable name."""
+ if not isinstance(variable_name, str):
+ raise ParameterNameError("Variable name must be a string.")
+ if not variable_name.isidentifier():
+ raise ParameterNameError(
+ "Variable name must only contain alphanumeric characters, underscores, and cannot start with a digit."
+ )
+ if keyword.iskeyword(variable_name):
+ raise ParameterNameError("Variable name cannot be a reserved Python keyword.")
+ return variable_name
+
+
+def ensure_description(description: Optional[str]) -> Optional[str]:
+ """Validate description is within the character limit."""
+ if description is not None:
+ if not isinstance(description, str):
+ raise ParameterNameError(
+ f"Description must be a string and at most {DESCRIPTION_MAX_LEN} characters."
+ )
+ if len(description) > DESCRIPTION_MAX_LEN:
+ raise ParameterNameError(
+ f'Description "{description}" greater than {DESCRIPTION_MAX_LEN} characters.'
+ )
+ return description
+
+
+def ensure_unit_string_length(unit: Optional[str]) -> Optional[str]:
+ """Validate unit is within the character limit."""
+ if unit is not None:
+ if not isinstance(unit, str):
+ raise ParameterNameError(
+ f"Unit must be a string and at most {UNIT_MAX_LEN} characters."
+ )
+ if len(unit) > UNIT_MAX_LEN:
+ raise ParameterNameError(
+ f'Unit "{unit}" greater than {UNIT_MAX_LEN} characters.'
+ )
+ return unit
+
+
+def ensure_value_type(
+ value: Union[float, bool, str], parameter_type: type
+) -> AllowedTypes:
+ """Ensures that the value type coming in from the client matches the given type.
+
+ This does not guarantee that the value will be the correct type for the given parameter, only that any data coming
+ in is in the format that we expect. For now, the only transformation it is doing is converting integers represented
+ as floating points to integers, and bools represented as 1.0/0.0 to True/False, and floating points represented as
+ ints to floats.
+
+ If something is labelled as a type but does not get converted here, that will be caught when it is attempted to be
+ set as the parameter value and will raise the appropriate error there.
+ """
+ validated_value: AllowedTypes = value
+ if isinstance(value, float):
+ if parameter_type is bool and (value == 0 or value == 1):
+ validated_value = bool(value)
+ elif parameter_type is int and value.is_integer():
+ validated_value = int(value)
+ elif (
+ isinstance(value, int)
+ and not isinstance(value, bool)
+ and parameter_type is float
+ ):
+ validated_value = float(value)
+ return validated_value
+
+
+def ensure_float_value(value: Union[float, int]) -> float:
+ """Ensures that if we are expecting a float and receive an int, that will be converted to a float."""
+ if not isinstance(value, bool) and isinstance(value, int):
+ return float(value)
+ return value
+
+
+def ensure_optional_float_value(value: Optional[Union[float, int]]) -> Optional[float]:
+ """Ensures that if we are expecting an optional float and receive an int, that will be converted to a float."""
+ if not isinstance(value, bool) and isinstance(value, int):
+ return float(value)
+ return value
+
+
+def ensure_float_choices(
+ choices: Optional[List[ParameterChoice]],
+) -> Optional[List[ParameterChoice]]:
+ """Ensures that if we are expecting float parameter choices and any are int types, those will be converted."""
+ if choices is not None:
+ return [
+ ParameterChoice(
+ display_name=choice["display_name"],
+ # Type ignore because if for some reason this is a str or bool, that will raise in `validate_options`
+ value=ensure_float_value(choice["value"]), # type: ignore[arg-type]
+ )
+ for choice in choices
+ ]
+ return choices
+
+
+def convert_type_string_for_enum(
+ parameter_type: type,
+) -> Literal["int", "float", "str"]:
+ """Converts a type object into a string for an enumerated parameter."""
+ if parameter_type is int:
+ return "int"
+ elif parameter_type is float:
+ return "float"
+ elif parameter_type is str:
+ return "str"
+ else:
+ raise ParameterValueError(
+ f"Cannot resolve parameter type '{parameter_type.__name__}' for an enumerated parameter."
+ )
+
+
+def convert_type_string_for_num_param(parameter_type: type) -> Literal["int", "float"]:
+ """Converts a type object into a string for a number parameter."""
+ if parameter_type is int:
+ return "int"
+ elif parameter_type is float:
+ return "float"
+ else:
+ raise ParameterValueError(
+ f"Cannot resolve parameter type '{parameter_type.__name__}' for a number parameter."
+ )
+
+
+def _validate_choices(
+ minimum: Optional[ParamType],
+ maximum: Optional[ParamType],
+ choices: List[ParameterChoice],
+ parameter_type: type,
+) -> None:
+ """Validate that min and max is not defined and all choices are properly formatted."""
+ if minimum is not None or maximum is not None:
+ raise ParameterDefinitionError(
+ "If choices are provided minimum and maximum values cannot be provided."
+ )
+ for choice in choices:
+ try:
+ display_name = choice["display_name"]
+ value = choice["value"]
+ except KeyError:
+ raise ParameterDefinitionError(
+ "All choices must be a dictionary with keys 'display_name' and 'value'."
+ )
+ ensure_display_name(display_name)
+ if not isinstance(value, parameter_type):
+ raise ParameterDefinitionError(
+ f"All choices provided must be of type '{parameter_type.__name__}'"
+ )
+
+
+def _validate_min_and_max(
+ minimum: Optional[ParamType],
+ maximum: Optional[ParamType],
+ parameter_type: type,
+) -> None:
+ """Validate the minium and maximum are both defined, the same type, and a valid range."""
+ if minimum is not None and maximum is None:
+ raise ParameterDefinitionError(
+ "If a minimum value is provided a maximum must also be provided."
+ )
+ elif maximum is not None and minimum is None:
+ raise ParameterDefinitionError(
+ "If a maximum value is provided a minimum must also be provided."
+ )
+ elif maximum is not None and minimum is not None:
+ if parameter_type is int or parameter_type is float:
+ if not isinstance(minimum, parameter_type):
+ raise ParameterDefinitionError(
+ f"Minimum is type '{type(minimum).__name__}',"
+ f" but must be of parameter type '{parameter_type.__name__}'"
+ )
+ if not isinstance(maximum, parameter_type):
+ raise ParameterDefinitionError(
+ f"Maximum is type '{type(maximum).__name__}',"
+ f" but must be of parameter type '{parameter_type.__name__}'"
+ )
+ # These asserts are for the type checker and should never actually be asserted false
+ assert isinstance(minimum, (int, float))
+ assert isinstance(maximum, (int, float))
+ if maximum < minimum:
+ raise ParameterDefinitionError(
+ "Maximum must be greater than the minimum"
+ )
+ else:
+ raise ParameterDefinitionError(
+ "Only parameters of type float or int can have a minimum and maximum."
+ )
+
+
+def validate_type(value: ParamType, parameter_type: type) -> None:
+ """Validate parameter value is the correct type."""
+ if not isinstance(value, parameter_type):
+ raise ParameterValueError(
+ f"Parameter value {value} has type '{type(value).__name__}',"
+ f" but must be of type '{parameter_type.__name__}'."
+ )
+
+
+def validate_options(
+ default: ParamType,
+ minimum: Optional[ParamType],
+ maximum: Optional[ParamType],
+ choices: Optional[List[ParameterChoice]],
+ parameter_type: type,
+) -> None:
+ """Validate default values and all possible constraints for a valid parameter definition."""
+ if not isinstance(default, parameter_type):
+ raise ParameterValueError(
+ f"Parameter default {default} has type '{type(default).__name__}',"
+ f" but must be of type '{parameter_type.__name__}'."
+ )
+
+ if choices is None and minimum is None and maximum is None:
+ raise ParameterDefinitionError(
+ "Must provide either choices or a minimum and maximum value"
+ )
+
+ if choices is not None:
+ _validate_choices(minimum, maximum, choices, parameter_type)
+ else:
+ _validate_min_and_max(minimum, maximum, parameter_type)
diff --git a/api/src/opentrons/protocols/parse.py b/api/src/opentrons/protocols/parse.py
index ee868912ed7..712b4fe4416 100644
--- a/api/src/opentrons/protocols/parse.py
+++ b/api/src/opentrons/protocols/parse.py
@@ -192,7 +192,9 @@ def version_from_string(vstr: str) -> APIVersion:
return APIVersion(major=int(matches.group(1)), minor=int(matches.group(2)))
-def _parse_json(protocol_contents: str, filename: Optional[str] = None) -> JsonProtocol:
+def _parse_json(
+ protocol_contents: Union[str, bytes], filename: Optional[str] = None
+) -> JsonProtocol:
"""Parse a protocol known or at least suspected to be json"""
protocol_json = json.loads(protocol_contents)
version, validated = validate_json(protocol_json)
@@ -208,7 +210,7 @@ def _parse_json(protocol_contents: str, filename: Optional[str] = None) -> JsonP
def _parse_python(
- protocol_contents: str,
+ protocol_contents: Union[str, bytes],
python_parse_mode: PythonParseMode,
filename: Optional[str] = None,
bundled_labware: Optional[Dict[str, "LabwareDefinition"]] = None,
@@ -338,28 +340,37 @@ def parse(
)
return result
else:
- if isinstance(protocol_file, bytes):
- protocol_str = protocol_file.decode("utf-8")
- else:
- protocol_str = protocol_file
-
if filename and filename.endswith(".json"):
- return _parse_json(protocol_str, filename)
+ return _parse_json(protocol_file, filename)
elif filename and filename.endswith(".py"):
return _parse_python(
- protocol_contents=protocol_str,
+ protocol_contents=protocol_file,
python_parse_mode=python_parse_mode,
filename=filename,
extra_labware=extra_labware,
bundled_data=extra_data,
)
- # our jsonschema says the top level json kind is object
- if protocol_str and protocol_str[0] in ("{", b"{"):
- return _parse_json(protocol_str, filename)
+ # our jsonschema says the top level json kind is object so we can
+ # rely on it starting with a { if it's valid. that could either be
+ # a string or bytes.
+ #
+ # if it's a string, then if the protocol file starts with a { and
+ # we do protocol_file[0] then we get the string "{".
+ #
+ # if it's a bytes, then if the protocol file starts with the ascii or
+ # utf-8 representation of { and we do protocol_file[0] we get 123,
+ # because while single elements of strings are strings, single elements
+ # of bytes are the byte value as a number.
+ #
+ # to get that number we could either use ord() or do what we do here
+ # which I think is a little nicer, if any of the above can be called
+ # "nice".
+ if protocol_file and protocol_file[0] in ("{", b"{"[0]):
+ return _parse_json(protocol_file, filename)
else:
return _parse_python(
- protocol_contents=protocol_str,
+ protocol_contents=protocol_file,
python_parse_mode=python_parse_mode,
filename=filename,
extra_labware=extra_labware,
@@ -499,6 +510,7 @@ def _version_from_static_python_info(
"""
from_requirements = (static_python_info.requirements or {}).get("apiLevel", None)
from_metadata = (static_python_info.metadata or {}).get("apiLevel", None)
+
requested_level = from_requirements or from_metadata
if requested_level is None:
return None
diff --git a/api/src/opentrons/protocols/types.py b/api/src/opentrons/protocols/types.py
index 792951efbfa..273a3e877d4 100644
--- a/api/src/opentrons/protocols/types.py
+++ b/api/src/opentrons/protocols/types.py
@@ -31,7 +31,13 @@ class StaticPythonInfo:
@dataclass(frozen=True)
class _ProtocolCommon:
- text: str
+ text: Union[str, bytes]
+ """The original text of the protocol file in the format it was specified with.
+
+ This leads to a wide type but it is actually quite important that we do not ever
+ str.decode('utf-8') this because it will break the interpreter's understanding of
+ line numbers for if we have to format an exception.
+ """
filename: Optional[str]
"""The original name of the main protocol file, if it had a name.
@@ -74,7 +80,7 @@ class PythonProtocol(_ProtocolCommon):
class BundleContents(NamedTuple):
- protocol: str
+ protocol: Union[str, bytes]
bundled_labware: Dict[str, "LabwareDefinition"]
bundled_data: Dict[str, bytes]
bundled_python: Dict[str, str]
diff --git a/api/src/opentrons/simulate.py b/api/src/opentrons/simulate.py
index 2dc744432c0..9626fa86b96 100644
--- a/api/src/opentrons/simulate.py
+++ b/api/src/opentrons/simulate.py
@@ -36,6 +36,7 @@
ThreadManager,
ThreadManagedHardware,
)
+from opentrons.hardware_control.types import HardwareFeatureFlags
from opentrons.hardware_control.simulator_setup import load_simulator
from opentrons.protocol_api.core.engine import ENGINE_CORE_API_VERSION
@@ -53,7 +54,7 @@
from opentrons.legacy_broker import LegacyBroker
from opentrons.config import IS_ROBOT
from opentrons import protocol_api
-from opentrons.commands import types as command_types
+from opentrons.legacy_commands import types as command_types
from opentrons.protocols import parse, bundle
from opentrons.protocols.types import (
@@ -65,7 +66,7 @@
from opentrons.protocols.api_support.deck_type import (
for_simulation as deck_type_for_simulation,
should_load_fixed_trash,
- should_load_fixed_trash_for_python_protocol,
+ should_load_fixed_trash_labware_for_python_protocol,
)
from opentrons.protocols.api_support.types import APIVersion
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
@@ -113,7 +114,7 @@
# TODO(mm, 2023-10-05): Type _SimulateResultRunLog more precisely by using TypedDicts from
-# opentrons.commands.
+# opentrons.legacy_commands.
_SimulateResultRunLog = List[Mapping[str, Any]]
_SimulateResult = Tuple[_SimulateResultRunLog, Optional[BundleContents]]
@@ -189,12 +190,12 @@ def handle_command(message: command_types.CommandMessage) -> None:
#
# TODO(mm, 2023-10-03): This is a bit too intrusive for something whose job is just to
# "scrape." The entry point function should be responsible for setting the underlying
- # logger's level.
+ # logger's level. Also, we should probably restore the original level when we're done.
level = getattr(logging, self._level.upper(), logging.WARNING)
self._logger.setLevel(level)
log_handler: Optional[_AccumulatingHandler] = _AccumulatingHandler(
- level, log_queue
+ self._level.upper(), log_queue
)
else:
log_handler = None
@@ -222,6 +223,7 @@ def get_protocol_api(
# type checking, like Jupyter Notebook.
*,
robot_type: Optional[_UserSpecifiedRobotType] = None,
+ use_virtual_hardware: bool = True,
) -> protocol_api.ProtocolContext:
"""
Build and return a ``protocol_api.ProtocolContext``
@@ -259,6 +261,7 @@ def get_protocol_api(
:param robot_type: The type of robot to simulate: either ``"Flex"`` or ``"OT-2"``.
If you're running this function on a robot, the default is the type of that
robot. Otherwise, the default is ``"OT-2"``, for backwards compatibility.
+ :param use_virtual_hardware: If true, use the protocol engines virtual hardware, if false use the lower level hardware simulator.
:return: The protocol context.
"""
if isinstance(version, str):
@@ -316,6 +319,7 @@ def get_protocol_api(
hardware_api=checked_hardware,
bundled_data=bundled_data,
extra_labware=extra_labware,
+ use_virtual_hardware=use_virtual_hardware,
)
# Intentional difference from execute.get_protocol_api():
@@ -335,9 +339,15 @@ def _make_hardware_simulator(
# Local import because this isn't available on OT-2s.
from opentrons.hardware_control.ot3api import OT3API
- return ThreadManager(OT3API.build_hardware_simulator)
+ return ThreadManager(
+ OT3API.build_hardware_simulator,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
+ )
elif robot_type == "OT-2 Standard":
- return ThreadManager(OT2API.build_hardware_simulator)
+ return ThreadManager(
+ OT2API.build_hardware_simulator,
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
+ )
@contextmanager
@@ -446,7 +456,7 @@ def simulate(
- ``payload``: The command. The human-readable run log text is available at
``payload["text"]``. The other keys of ``payload`` are command-dependent;
- see ``opentrons.commands``.
+ see ``opentrons.legacy_commands``.
.. note::
In older software versions, ``payload["text"]`` was a
@@ -783,6 +793,7 @@ def _create_live_context_pe(
deck_type: str,
extra_labware: Dict[str, "LabwareDefinitionDict"],
bundled_data: Optional[Dict[str, bytes]],
+ use_virtual_hardware: bool = True,
) -> ProtocolContext:
"""Return a live ProtocolContext that controls the robot through ProtocolEngine."""
assert api_version >= ENGINE_CORE_API_VERSION
@@ -791,10 +802,14 @@ def _create_live_context_pe(
pe, loop = _LIVE_PROTOCOL_ENGINE_CONTEXTS.enter_context(
create_protocol_engine_in_thread(
hardware_api=hardware_api.wrapped(),
- config=_get_protocol_engine_config(robot_type),
+ config=_get_protocol_engine_config(
+ robot_type, virtual=use_virtual_hardware
+ ),
drop_tips_after_run=False,
post_run_hardware_state=PostRunHardwareState.STAY_ENGAGED_IN_PLACE,
- load_fixed_trash=should_load_fixed_trash_for_python_protocol(api_version),
+ load_fixed_trash=should_load_fixed_trash_labware_for_python_protocol(
+ api_version
+ ),
)
)
@@ -857,7 +872,9 @@ def _run_file_non_pe(
context.home()
with scraper.scrape():
try:
- execute.run_protocol(protocol, context)
+ # TODO (spp, 2024-03-18): use true run-time param overrides once enabled
+ # for cli protocol simulation/ execution
+ execute.run_protocol(protocol, context, run_time_param_overrides=None)
if (
isinstance(protocol, PythonProtocol)
and protocol.api_level >= APIVersion(2, 0)
@@ -888,7 +905,7 @@ def _run_file_pe(
async def run(protocol_source: ProtocolSource) -> _SimulateResult:
protocol_engine = await create_protocol_engine(
hardware_api=hardware_api.wrapped(),
- config=_get_protocol_engine_config(robot_type),
+ config=_get_protocol_engine_config(robot_type, virtual=True),
load_fixed_trash=should_load_fixed_trash(protocol_source.config),
)
@@ -900,7 +917,12 @@ async def run(protocol_source: ProtocolSource) -> _SimulateResult:
scraper = _CommandScraper(stack_logger, log_level, protocol_runner.broker)
with scraper.scrape():
- result = await protocol_runner.run(protocol_source)
+ result = await protocol_runner.run(
+ # deck_configuration=[] is a placeholder value, ignored because
+ # the Protocol Engine config specifies use_simulated_deck_config=True.
+ deck_configuration=[],
+ protocol_source=protocol_source,
+ )
if result.state_summary.status != EngineStatus.SUCCEEDED:
raise entrypoint_util.ProtocolEngineExecuteError(
@@ -918,15 +940,16 @@ async def run(protocol_source: ProtocolSource) -> _SimulateResult:
return asyncio.run(run(protocol_source))
-def _get_protocol_engine_config(robot_type: RobotType) -> Config:
+def _get_protocol_engine_config(robot_type: RobotType, virtual: bool) -> Config:
"""Return a Protocol Engine config to execute protocols on this device."""
return Config(
robot_type=robot_type,
deck_type=DeckType(deck_type_for_simulation(robot_type)),
ignore_pause=True,
- use_virtual_pipettes=True,
- use_virtual_modules=True,
- use_virtual_gripper=True,
+ use_virtual_pipettes=virtual,
+ use_virtual_modules=virtual,
+ use_virtual_gripper=virtual,
+ use_simulated_deck_config=True,
)
diff --git a/api/src/opentrons/system/camera.py b/api/src/opentrons/system/camera.py
index 1c2d09d8747..761a9ba66a1 100644
--- a/api/src/opentrons/system/camera.py
+++ b/api/src/opentrons/system/camera.py
@@ -1,6 +1,7 @@
import asyncio
import os
from pathlib import Path
+
from opentrons.config import ARCHITECTURE, SystemArchitecture
from opentrons_shared_data.errors.exceptions import CommunicationError
from opentrons_shared_data.errors.codes import ErrorCodes
@@ -29,7 +30,7 @@ async def take_picture(filename: Path) -> None:
pass
if ARCHITECTURE == SystemArchitecture.YOCTO:
- cmd = f"v4l2-ctl --device /dev/video0 --set-fmt-video=width=1280,height=720,pixelformat=MJPG --stream-mmap --stream-to={str(filename)} --stream-count=1"
+ cmd = f"v4l2-ctl --device /dev/video2 --set-fmt-video=width=1280,height=720,pixelformat=MJPG --stream-mmap --stream-to={str(filename)} --stream-count=1"
elif ARCHITECTURE == SystemArchitecture.BUILDROOT:
cmd = f"ffmpeg -f video4linux2 -s 640x480 -i /dev/video0 -ss 0:0:1 -frames 1 {str(filename)}"
else: # HOST
diff --git a/api/src/opentrons/system/log_control.py b/api/src/opentrons/system/log_control.py
index 24f505d6b08..bd44af3c7c2 100644
--- a/api/src/opentrons/system/log_control.py
+++ b/api/src/opentrons/system/log_control.py
@@ -7,7 +7,7 @@
import asyncio
import logging
import subprocess
-from typing import Tuple
+from typing import List
LOG = logging.getLogger(__name__)
@@ -16,6 +16,12 @@
DEFAULT_RECORDS = 50000
UNIT_SELECTORS = ["opentrons-robot-server", "opentrons-robot-app"]
+SERIAL_SPECIAL = "ALL_SERIAL"
+SERIAL_SELECTORS = [
+ "opentrons-api-serial",
+ "opentrons-api-serial-can",
+ "opentrons-api-serial-usbbin",
+]
async def get_records_dumb(selector: str, records: int, mode: str) -> bytes:
@@ -25,13 +31,19 @@ async def get_records_dumb(selector: str, records: int, mode: str) -> bytes:
:param records: The maximum number of records to print
:param mode: A journalctl dump mode. Should be either "short-precise" or "json".
"""
- selector_flag = "-u" if selector in UNIT_SELECTORS else "-t"
+ selector_array: List[str] = []
+ if selector == SERIAL_SPECIAL:
+ for serial_selector in SERIAL_SELECTORS:
+ selector_array.extend(["-t", serial_selector])
+ elif selector in UNIT_SELECTORS:
+ selector_array.extend(["-u", selector])
+ else:
+ selector_array.extend(["-t", selector])
proc = await asyncio.create_subprocess_exec(
"journalctl",
"--no-pager",
- selector_flag,
- selector,
+ *selector_array,
"-n",
str(records),
"-o",
@@ -41,36 +53,3 @@ async def get_records_dumb(selector: str, records: int, mode: str) -> bytes:
)
stdout, _ = await proc.communicate()
return stdout
-
-
-async def set_syslog_level(level: str) -> Tuple[int, str, str]:
- """
- Set the minimum level for which logs will be sent upstream via syslog-ng.
-
- This is the function that actually does the work for
- :py:meth:`set_syslog_level_handler`.
-
- Similar to :py:meth:`opentrons.server.endpoints.settings.set_log_level`,
- the level should be a python log level like "debug", "info", "warning", or
- "error". If it is null, sets the minimum log level to emergency which we
- do not log at since there's not really a matching level in python logging,
- which effectively disables log upstreaming.
-
- :returns tuple(int, str, str): The error code, stdout, and stderr from
- ``syslog-ng-ctl``. ``0`` is success,
- anything else is failure
- """
- with open("/var/lib/syslog-ng/min-level", "w") as ml:
- ml.write(level)
- proc = await asyncio.create_subprocess_exec(
- "syslog-ng-ctl",
- "reload",
- stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
- )
- stdout, stderr = await proc.communicate()
- if proc.returncode is None:
- snc_reload_result = -1
- else:
- snc_reload_result: int = proc.returncode # type: ignore
- return snc_reload_result, stdout.decode(), stderr.decode()
diff --git a/api/src/opentrons/system/wifi.py b/api/src/opentrons/system/wifi.py
index 24880348f20..3b5ac91d5ee 100644
--- a/api/src/opentrons/system/wifi.py
+++ b/api/src/opentrons/system/wifi.py
@@ -2,7 +2,7 @@
import logging
import os
import shutil
-from typing import Generator, Optional, Dict, Any
+from typing import Generator, Optional, Dict, Any, TypedDict, List
from dataclasses import dataclass
from opentrons.config import CONFIG
@@ -15,7 +15,17 @@ class ConfigureArgsError(Exception):
pass
-EAP_CONFIG_SHAPE = {
+class EapConfigItem(TypedDict):
+ name: str
+ displayName: str
+ options: List[Dict[str, Any]]
+
+
+class EapConfigShape(TypedDict):
+ options: List[EapConfigItem]
+
+
+EAP_CONFIG_SHAPE: EapConfigShape = {
"options": [
{
"name": method.qualified_name(),
@@ -186,9 +196,7 @@ def eap_check_config(eap_config: Dict[str, Any]) -> Dict[str, Any]:
_eap_check_no_extra_args(eap_config, options)
- for opt in options: # type: ignore
- # Ignoring most types to do with EAP_CONFIG_SHAPE because of issues
- # wth type inference for dict comprehensions
+ for opt in options:
_eap_check_option_ok(opt, eap_config)
if opt["type"] == "file" and opt["name"] in eap_config:
# Special work for file: rewrite from key id to path
diff --git a/api/src/opentrons/tools/args_handler.py b/api/src/opentrons/tools/args_handler.py
index dbb89808882..e8bdf5ead05 100644
--- a/api/src/opentrons/tools/args_handler.py
+++ b/api/src/opentrons/tools/args_handler.py
@@ -2,6 +2,7 @@
from typing import Optional, Tuple, cast
from opentrons.hardware_control import API, Controller
+from opentrons.hardware_control.types import HardwareFeatureFlags
from opentrons.drivers.smoothie_drivers import SmoothieDriver
@@ -14,6 +15,8 @@ def root_argparser(description: Optional[str] = None) -> argparse.ArgumentParser
async def build_driver(port: Optional[str] = None) -> Tuple[API, SmoothieDriver]:
- hardware = await API.build_hardware_controller(port=port)
+ hardware = await API.build_hardware_controller(
+ port=port, feature_flags=HardwareFeatureFlags.build_from_ff()
+ )
backend: Controller = cast(Controller, hardware._backend)
return hardware, backend._smoothie_driver
diff --git a/api/src/opentrons/types.py b/api/src/opentrons/types.py
index 6c8eb06f027..44035851b35 100644
--- a/api/src/opentrons/types.py
+++ b/api/src/opentrons/types.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import enum
from math import sqrt, isclose
-from typing import TYPE_CHECKING, Any, NamedTuple, Iterable, Union, List
+from typing import TYPE_CHECKING, Any, NamedTuple, Iterator, Union, List
from opentrons_shared_data.robot.dev_types import RobotType
@@ -41,12 +41,12 @@ def __sub__(self, other: Any) -> Point:
return NotImplemented
return Point(self.x - other.x, self.y - other.y, self.z - other.z)
- def __mul__(self, other: Union[int, float]) -> Point:
+ def __mul__(self, other: Union[int, float]) -> Point: # type: ignore[override]
if not isinstance(other, (float, int)):
return NotImplemented
return Point(self.x * other, self.y * other, self.z * other)
- def __rmul__(self, other: Union[int, float]) -> Point:
+ def __rmul__(self, other: Union[int, float]) -> Point: # type: ignore[override]
if not isinstance(other, (float, int)):
return NotImplemented
return Point(self.x * other, self.y * other, self.z * other)
@@ -132,14 +132,16 @@ def point(self) -> Point:
def labware(self) -> LabwareLike:
return self._labware
- def __iter__(self) -> Iterable[Union[Point, LabwareLike]]:
- """Iterable interface to support unpacking. Like a tuple."""
- return iter(
- (
- self._point,
- self._labware,
- )
- )
+ def __iter__(self) -> Iterator[Union[Point, LabwareLike]]:
+ """Iterable interface to support unpacking. Like a tuple.
+
+ .. note::
+ While type annotations cannot properly support this, it will work in practice:
+
+ point, labware = location
+ some_function_taking_both(*location)
+ """
+ return iter((self._point, self._labware)) # type: ignore [arg-type]
def __eq__(self, other: object) -> bool:
return (
@@ -357,6 +359,38 @@ def __str__(self) -> str:
_ot3_to_ot2 = {ot3: ot2 for ot2, ot3 in _slot_equivalencies}
+# TODO(jbl 11-17-2023) move this away from being an Enum and make this a NewType or something similar
+class StagingSlotName(enum.Enum):
+ """Staging slot identifiers."""
+
+ SLOT_A4 = "A4"
+ SLOT_B4 = "B4"
+ SLOT_C4 = "C4"
+ SLOT_D4 = "D4"
+
+ @classmethod
+ def from_primitive(cls, value: str) -> StagingSlotName:
+ str_val = value.upper()
+ return cls(str_val)
+
+ @property
+ def id(self) -> str:
+ """This slot's unique ID, as it appears in the deck definition.
+
+ This can be used to look up slot details in the deck definition.
+
+ This is preferred over `.value` or `.__str__()` for explicitness.
+ """
+ return self.value
+
+ def __str__(self) -> str:
+ """Stringify to the unique ID.
+
+ For explicitness, prefer using `.id` instead.
+ """
+ return self.id
+
+
class TransferTipPolicy(enum.Enum):
ONCE = enum.auto()
NEVER = enum.auto()
diff --git a/api/src/opentrons/util/entrypoint_util.py b/api/src/opentrons/util/entrypoint_util.py
index 442b0686ebe..63779eda18f 100644
--- a/api/src/opentrons/util/entrypoint_util.py
+++ b/api/src/opentrons/util/entrypoint_util.py
@@ -27,6 +27,7 @@
from opentrons.protocol_engine.errors.error_occurrence import (
ErrorOccurrence as ProtocolEngineErrorOccurrence,
)
+from opentrons.protocol_engine.types import DeckConfigurationType
from opentrons.protocol_reader import ProtocolReader, ProtocolSource
from opentrons.protocols.types import JsonProtocol, Protocol, PythonProtocol
@@ -123,6 +124,15 @@ def datafiles_from_paths(paths: Sequence[Union[str, pathlib.Path]]) -> Dict[str,
return datafiles
+def get_deck_configuration() -> DeckConfigurationType:
+ """Return the host robot's current deck configuration."""
+ # TODO: Search for the file where robot-server stores it.
+ # Flex: /var/lib/opentrons-robot-server/deck_configuration.json
+ # OT-2: /data/opentrons_robot_server/deck_configuration.json
+ # https://opentrons.atlassian.net/browse/RSS-400
+ return []
+
+
@contextlib.contextmanager
def adapt_protocol_source(protocol: Protocol) -> Generator[ProtocolSource, None, None]:
"""Convert a `Protocol` to a `ProtocolSource`.
@@ -156,7 +166,10 @@ def adapt_protocol_source(protocol: Protocol) -> Generator[ProtocolSource, None,
# through the filesystem. https://opentrons.atlassian.net/browse/RSS-281
main_file = pathlib.Path(temporary_directory) / main_file_name
- main_file.write_text(protocol.text, encoding="utf-8")
+ if isinstance(protocol.text, str):
+ main_file.write_text(protocol.text, encoding="utf-8")
+ else:
+ main_file.write_bytes(protocol.text)
labware_files: List[pathlib.Path] = []
if isinstance(protocol, PythonProtocol) and protocol.extra_labware is not None:
diff --git a/api/src/opentrons/util/linal.py b/api/src/opentrons/util/linal.py
index 58b293342b1..9456b2d80e1 100644
--- a/api/src/opentrons/util/linal.py
+++ b/api/src/opentrons/util/linal.py
@@ -3,7 +3,8 @@
import numpy as np
from numpy import insert, dot
from numpy.linalg import inv
-from typing import TYPE_CHECKING, List, Tuple, Union
+from numpy.typing import NDArray
+from typing import List, Tuple, Union
from opentrons.calibration_storage.types import AttitudeMatrix
@@ -17,12 +18,8 @@
Tuple[float, float, float], Tuple[float, float, float], Tuple[float, float, float]
]
-# TODO(mc, 2022-02-23): numpy.typing is not available on the version
-# of numpy we ship on the OT-2. We should update that numpy version.
-if TYPE_CHECKING:
- import numpy.typing as npt
-
- DoubleArray = npt.NDArray[np.double]
+DoubleArray = NDArray[np.double]
+DoubleMatrix = NDArray[np.double]
def identity_deck_transform() -> DoubleArray:
@@ -31,11 +28,11 @@ def identity_deck_transform() -> DoubleArray:
def solve_attitude(expected: SolvePoints, actual: SolvePoints) -> AttitudeMatrix:
- ex = np.array([list(point) for point in expected]).transpose()
- ac = np.array([list(point) for point in actual]).transpose()
- t = np.dot(ac, inv(ex)) # type: ignore[no-untyped-call]
+ ex: DoubleMatrix = np.array([list(point) for point in expected]).transpose()
+ ac: DoubleMatrix = np.array([list(point) for point in actual]).transpose()
+ t = np.dot(ac, inv(ex))
- mask_transform = np.array(
+ mask_transform: NDArray[np.bool_] = np.array(
[[True, True, False], [True, True, False], [False, False, False]]
)
masked_array = np.ma.masked_array(t, ~mask_transform) # type: ignore[var-annotated, no-untyped-call]
@@ -97,15 +94,15 @@ def solve(
# [ (x1, y1),
# (x2, y2),
# (x3, y3) ]
- ex = np.array([list(point) + [1] for point in expected]).transpose()
+ ex: DoubleMatrix = np.array([list(point) + [1] for point in expected]).transpose()
- ac = np.array([list(point) + [1] for point in actual]).transpose()
+ ac: DoubleMatrix = np.array([list(point) + [1] for point in actual]).transpose()
# Shape of `ex` and `ac`:
# [ x1 x2 x3 ]
# [ y1 y2 y3 ]
# [ 1 1 1 ]
- transform = np.dot(ac, inv(ex)) # type: ignore[no-untyped-call]
+ transform = np.dot(ac, inv(ex))
# `dot` in numpy is a misnomer. When both arguments are square, N-
# dimensional arrays, the return type is the result of performing matrix
# multiplication, rather than the dot-product (so the return here will be
@@ -132,21 +129,21 @@ def add_z(xy: DoubleArray, z: float) -> DoubleArray:
[ 0 0 0 1 ]
"""
# First, insert a column of zeros as into the input matrix
- interm = insert(xy, 2, [0, 0, 0], axis=1) # type: ignore[no-untyped-call]
+ interm = insert(xy, 2, [0, 0, 0], axis=1)
# Result:
# [ 1 0 0 x ]
# [ 0 1 0 y ]
# [ 0 0 0 1 ]
# Then, insert the z row to create a properly formed 3-D transform matrix:
- xyz = insert(interm, 2, [0, 0, 1, z], axis=0) # type: ignore[no-untyped-call]
+ xyz: DoubleMatrix = insert(interm, 2, [0, 0, 1, z], axis=0)
# Result:
# [ 1 0 0 x ]
# [ 0 1 0 y ]
# [ 0 0 1 z ]
# [ 0 0 0 1 ]
- return xyz.round(11) # type: ignore[no-any-return]
+ return xyz.round(11)
def add_matrices(
@@ -155,7 +152,7 @@ def add_matrices(
"""
Simple method to convert tuples to numpy arrays and add them.
"""
- return tuple(np.asarray(t1) + np.asarray(t2)) # type: ignore
+ return tuple(np.asarray(t1) + np.asarray(t2))
def apply_transform(
@@ -170,7 +167,7 @@ def apply_transform(
:param pos: XYZ point in space A
:return: corresponding XYZ point in space B
"""
- return tuple(dot(t, list(pos))[:3]) # type: ignore
+ return tuple(dot(t, list(pos))[:3])
def apply_reverse(
@@ -178,4 +175,4 @@ def apply_reverse(
pos: AxisPosition,
) -> Tuple[float, float, float]:
"""Like apply_transform but inverts the transform first"""
- return apply_transform(inv(t), pos) # type: ignore[no-untyped-call]
+ return apply_transform(inv(t), pos)
diff --git a/api/src/opentrons/util/logging_config.py b/api/src/opentrons/util/logging_config.py
index d57e762899a..e9a4d2042a2 100644
--- a/api/src/opentrons/util/logging_config.py
+++ b/api/src/opentrons/util/logging_config.py
@@ -94,6 +94,18 @@ def _buildroot_config(level_value: int) -> Dict[str, Any]:
"formatter": "message_only",
"SYSLOG_IDENTIFIER": "opentrons-api-serial",
},
+ "can_serial": {
+ "class": "systemd.journal.JournalHandler",
+ "level": logging.DEBUG,
+ "formatter": "message_only",
+ "SYSLOG_IDENTIFIER": "opentrons-api-serial-can",
+ },
+ "usbbin_serial": {
+ "class": "systemd.journal.JournalHandler",
+ "level": logging.DEBUG,
+ "formatter": "message_only",
+ "SYSLOG_IDENTIFIER": "opentrons-api-serial-usbbin",
+ },
},
"loggers": {
"opentrons.drivers.asyncio.communication.serial_connection": {
@@ -110,12 +122,12 @@ def _buildroot_config(level_value: int) -> Dict[str, Any]:
"level": level_value,
},
"opentrons_hardware.drivers.can_bus.can_messenger": {
- "handlers": ["serial"],
+ "handlers": ["can_serial"],
"level": logging.DEBUG,
"propagate": False,
},
"opentrons_hardware.drivers.binary_usb.bin_serial": {
- "handlers": ["serial"],
+ "handlers": ["usbbin_serial"],
"level": logging.DEBUG,
"propagate": False,
},
diff --git a/api/src/opentrons/util/performance_helpers.py b/api/src/opentrons/util/performance_helpers.py
new file mode 100644
index 00000000000..a157908303d
--- /dev/null
+++ b/api/src/opentrons/util/performance_helpers.py
@@ -0,0 +1,76 @@
+"""Performance helpers for tracking robot context."""
+
+from pathlib import Path
+from opentrons_shared_data.performance.dev_types import (
+ SupportsTracking,
+ F,
+ RobotContextState,
+)
+from opentrons_shared_data.robot.dev_types import RobotTypeEnum
+from typing import Callable, Type
+from opentrons.config import (
+ feature_flags as ff,
+ get_performance_metrics_data_dir,
+ robot_configs,
+)
+
+
+_should_track = ff.enable_performance_metrics(
+ RobotTypeEnum.robot_literal_to_enum(robot_configs.load().model)
+)
+
+
+class StubbedTracker(SupportsTracking):
+ """A stubbed tracker that does nothing."""
+
+ def __init__(self, storage_location: Path, should_track: bool) -> None:
+ """Initialize the stubbed tracker."""
+ pass
+
+ def track(self, state: RobotContextState) -> Callable[[F], F]:
+ """Return the function unchanged."""
+
+ def inner_decorator(func: F) -> F:
+ """Return the function unchanged."""
+ return func
+
+ return inner_decorator
+
+ def store(self) -> None:
+ """Do nothing."""
+ pass
+
+
+def _handle_package_import() -> Type[SupportsTracking]:
+ """Handle the import of the performance_metrics package.
+
+ If the package is not available, return a stubbed tracker.
+ """
+ try:
+ from performance_metrics import RobotContextTracker
+
+ return RobotContextTracker
+ except ImportError:
+ return StubbedTracker
+
+
+package_to_use = _handle_package_import()
+_robot_context_tracker: SupportsTracking | None = None
+
+
+def _get_robot_context_tracker() -> SupportsTracking:
+ """Singleton for the robot context tracker."""
+ global _robot_context_tracker
+ if _robot_context_tracker is None:
+ # TODO: replace with path lookup and should_store lookup
+ _robot_context_tracker = package_to_use(
+ get_performance_metrics_data_dir(), _should_track
+ )
+ return _robot_context_tracker
+
+
+def track_analysis(func: F) -> F:
+ """Track the analysis of a protocol."""
+ return _get_robot_context_tracker().track(RobotContextState.ANALYZING_PROTOCOL)(
+ func
+ )
diff --git a/api/tests/opentrons/calibration_storage/test_deck_configuration.py b/api/tests/opentrons/calibration_storage/test_deck_configuration.py
new file mode 100644
index 00000000000..afdd4449eb4
--- /dev/null
+++ b/api/tests/opentrons/calibration_storage/test_deck_configuration.py
@@ -0,0 +1,38 @@
+from datetime import datetime, timezone
+
+import pytest
+
+from opentrons.calibration_storage import deck_configuration as subject
+from opentrons.calibration_storage.types import CutoutFixturePlacement
+
+
+def test_deck_configuration_serdes() -> None:
+ """Test that deck configuration serialization/deserialization survives a round trip."""
+
+ dummy_cutout_fixture_placements = [
+ CutoutFixturePlacement(
+ cutout_fixture_id="a", cutout_id="b", opentrons_module_serial_number="1"
+ ),
+ CutoutFixturePlacement(
+ cutout_fixture_id="c", cutout_id="d", opentrons_module_serial_number="2"
+ ),
+ ]
+ dummy_datetime = datetime(year=1961, month=5, day=6, tzinfo=timezone.utc)
+
+ serialized = subject.serialize_deck_configuration(
+ dummy_cutout_fixture_placements, dummy_datetime
+ )
+ deserialized = subject.deserialize_deck_configuration(serialized)
+ assert deserialized == (dummy_cutout_fixture_placements, dummy_datetime)
+
+
+@pytest.mark.parametrize(
+ "input",
+ [
+ b'{"hello": "world"}', # Valid JSON, but not valid for the model.
+ "😾".encode("utf-8"), # Not valid JSON.
+ ],
+)
+def test_deserialize_deck_configuration_error_handling(input: bytes) -> None:
+ """Test that deserialization handles errors gracefully."""
+ assert subject.deserialize_deck_configuration(input) is None
diff --git a/api/tests/opentrons/calibration_storage/test_file_operators.py b/api/tests/opentrons/calibration_storage/test_file_operators.py
index c608b7619f1..5a95f225fe3 100644
--- a/api/tests/opentrons/calibration_storage/test_file_operators.py
+++ b/api/tests/opentrons/calibration_storage/test_file_operators.py
@@ -1,10 +1,18 @@
-import pytest
import json
import typing
from pathlib import Path
+
+import pydantic
+import pytest
+
from opentrons.calibration_storage import file_operators as io
+class DummyModel(pydantic.BaseModel):
+ integer_field: int
+ aliased_field: str = pydantic.Field(alias="! aliased field !")
+
+
@pytest.fixture
def calibration() -> typing.Dict[str, typing.Any]:
return {
@@ -70,3 +78,26 @@ def test_malformed_calibration(
)
with pytest.raises(AssertionError):
io.read_cal_file(malformed_calibration_path)
+
+
+def test_deserialize_pydantic_model_valid() -> None:
+ serialized = b'{"integer_field": 123, "! aliased field !": "abc"}'
+ assert io.deserialize_pydantic_model(
+ serialized, DummyModel
+ ) == DummyModel.construct(integer_field=123, aliased_field="abc")
+
+
+def test_deserialize_pydantic_model_invalid_as_json() -> None:
+ serialized = "😾".encode("utf-8")
+ assert io.deserialize_pydantic_model(serialized, DummyModel) is None
+ # Ideally we would assert that the subject logged a message saying "not valid JSON",
+ # but the opentrons.simulate and opentrons.execute tests interfere with the process's logger
+ # settings and prevent that message from showing up in pytest's caplog fixture.
+
+
+def test_read_pydantic_model_from_file_invalid_model(tmp_path: Path) -> None:
+ serialized = b'{"integer_field": "not an integer"}'
+ assert io.deserialize_pydantic_model(serialized, DummyModel) is None
+ # Ideally we would assert that the subject logged a message saying "does not match model",
+ # but the opentrons.simulate and opentrons.execute tests interfere with the process's logger
+ # settings and prevent that message from showing up in pytest's caplog fixture.
diff --git a/api/tests/opentrons/calibration_storage/test_tip_length_ot2.py b/api/tests/opentrons/calibration_storage/test_tip_length_ot2.py
index 93a208e0071..2d593bda67e 100644
--- a/api/tests/opentrons/calibration_storage/test_tip_length_ot2.py
+++ b/api/tests/opentrons/calibration_storage/test_tip_length_ot2.py
@@ -1,9 +1,11 @@
import pytest
-from typing import cast, Any, TYPE_CHECKING
+from typing import Any, TYPE_CHECKING
+from opentrons import config
from opentrons.calibration_storage import (
types as cs_types,
helpers,
+ file_operators as io,
)
from opentrons.calibration_storage.ot2 import (
@@ -15,10 +17,10 @@
clear_tip_length_calibration,
models,
)
+from opentrons_shared_data.pipette.dev_types import LabwareUri
if TYPE_CHECKING:
from opentrons_shared_data.labware.dev_types import LabwareDefinition
- from opentrons_shared_data.pipette.dev_types import LabwareUri
@pytest.fixture
@@ -38,6 +40,18 @@ def starting_calibration_data(
save_tip_length_calibration("pip1", tip_length1)
save_tip_length_calibration("pip2", tip_length2)
save_tip_length_calibration("pip1", tip_length3)
+ inside_data = tip_length3[LabwareUri("dummy_namespace/minimal_labware_def/1")]
+ data = {
+ inside_data.definitionHash: {
+ "tipLength": 27,
+ "lastModified": inside_data.lastModified.isoformat(),
+ "source": inside_data.source,
+ "status": inside_data.status.dict(),
+ "uri": "dummy_namespace/minimal_labware_def/1",
+ }
+ }
+ tip_length_dir_path = config.get_tip_length_cal_path()
+ io.save_to_file(tip_length_dir_path, "pip2", data)
def test_save_tip_length_calibration(
@@ -48,13 +62,13 @@ def test_save_tip_length_calibration(
"""
assert tip_lengths_for_pipette("pip1") == {}
assert tip_lengths_for_pipette("pip2") == {}
- tip_rack_hash = helpers.hash_labware_def(minimal_labware_def)
+ tip_rack_uri = helpers.uri_from_definition(minimal_labware_def)
tip_length1 = create_tip_length_data(minimal_labware_def, 22.0)
tip_length2 = create_tip_length_data(minimal_labware_def, 31.0)
save_tip_length_calibration("pip1", tip_length1)
save_tip_length_calibration("pip2", tip_length2)
- assert tip_lengths_for_pipette("pip1")[tip_rack_hash].tipLength == 22.0
- assert tip_lengths_for_pipette("pip2")[tip_rack_hash].tipLength == 31.0
+ assert tip_lengths_for_pipette("pip1")[tip_rack_uri].tipLength == 22.0
+ assert tip_lengths_for_pipette("pip2")[tip_rack_uri].tipLength == 31.0
def test_get_tip_length_calibration(
@@ -64,11 +78,12 @@ def test_get_tip_length_calibration(
Test ability to get a tip length calibration model.
"""
tip_length_data = load_tip_length_calibration("pip1", minimal_labware_def)
+ tip_rack_hash = helpers.hash_labware_def(minimal_labware_def)
assert tip_length_data == models.v1.TipLengthModel(
tipLength=22.0,
source=cs_types.SourceType.user,
lastModified=tip_length_data.lastModified,
- uri=cast("LabwareUri", "opentronstest/minimal_labware_def/1"),
+ definitionHash=tip_rack_hash,
)
with pytest.raises(cs_types.TipLengthCalNotFound):
@@ -83,8 +98,8 @@ def test_delete_specific_tip_calibration(
"""
assert len(tip_lengths_for_pipette("pip1").keys()) == 2
assert tip_lengths_for_pipette("pip2") != {}
- tip_rack_hash = helpers.hash_labware_def(minimal_labware_def)
- delete_tip_length_calibration(tip_rack_hash, "pip1")
+ tip_rack_uri = helpers.uri_from_definition(minimal_labware_def)
+ delete_tip_length_calibration("pip1", tiprack_uri=tip_rack_uri)
assert len(tip_lengths_for_pipette("pip1").keys()) == 1
assert tip_lengths_for_pipette("pip2") != {}
@@ -98,3 +113,31 @@ def test_delete_all_tip_calibration(starting_calibration_data: Any) -> None:
clear_tip_length_calibration()
assert tip_lengths_for_pipette("pip1") == {}
assert tip_lengths_for_pipette("pip2") == {}
+
+
+def test_uriless_calibrations_are_dropped(ot_config_tempdir: object) -> None:
+ """Legacy records without a `uri` field should be silently ignored."""
+
+ data = {
+ "ed323db6ca1ddf197aeb20667c1a7a91c89cfb2f931f45079d483928da056812": {
+ "tipLength": 123,
+ "lastModified": "2021-01-11T00:34:29.291073+00:00",
+ "source": "user",
+ "status": {"markedBad": False},
+ },
+ "130e17bb7b2f0c0472dcc01c1ff6f600ca1a6f9f86a90982df56c4bf43776824": {
+ "tipLength": 456,
+ "lastModified": "2021-05-12T22:16:14.249567+00:00",
+ "source": "user",
+ "status": {"markedBad": False},
+ "uri": "opentrons/opentrons_96_filtertiprack_200ul/1",
+ },
+ }
+
+ io.save_to_file(config.get_tip_length_cal_path(), "pipette1234", data)
+ result = tip_lengths_for_pipette("pipette1234")
+ assert len(result) == 1
+ assert (
+ result[LabwareUri("opentrons/opentrons_96_filtertiprack_200ul/1")].tipLength
+ == 456
+ )
diff --git a/api/tests/opentrons/cli/test_cli.py b/api/tests/opentrons/cli/test_cli.py
index eae5aa31ccc..818c4e9a1df 100644
--- a/api/tests/opentrons/cli/test_cli.py
+++ b/api/tests/opentrons/cli/test_cli.py
@@ -1,16 +1,42 @@
"""Test cli execution."""
+
+
import json
import tempfile
import textwrap
-from dataclasses import dataclass
+from dataclasses import dataclass, replace
from typing import Any, Dict, Iterator, List, Optional
from pathlib import Path
import pytest
from click.testing import CliRunner
+from opentrons_shared_data.performance.dev_types import (
+ RobotContextState,
+)
+from opentrons.util.performance_helpers import _get_robot_context_tracker
+
+
+# Enable tracking for the RobotContextTracker
+# This must come before the import of the analyze CLI
+context_tracker = _get_robot_context_tracker()
+
+# Ignore the type error for the next line, as we're setting a private attribute for testing purposes
+context_tracker._should_track = True # type: ignore[attr-defined]
+
+from opentrons.cli.analyze import analyze # noqa: E402
+
-from opentrons.cli.analyze import analyze
+@pytest.fixture
+def override_data_store(tmp_path: Path) -> Iterator[None]:
+ """Override the data store metadata for the RobotContextTracker."""
+ old_store = context_tracker._store # type: ignore[attr-defined]
+ old_metadata = old_store.metadata
+ new_metadata = replace(old_metadata, storage_dir=tmp_path)
+ context_tracker._store = old_store.__class__(metadata=new_metadata) # type: ignore[attr-defined]
+ context_tracker._store.setup() # type: ignore[attr-defined]
+ yield
+ context_tracker._store = old_store # type: ignore[attr-defined]
def _list_fixtures(version: int) -> Iterator[Path]:
@@ -26,7 +52,9 @@ class _AnalysisCLIResult:
stdout_stderr: str
-def _get_analysis_result(protocol_files: List[Path]) -> _AnalysisCLIResult:
+def _get_analysis_result(
+ protocol_files: List[Path], output_type: str, check: bool = False
+) -> _AnalysisCLIResult:
"""Run `protocol_files` as a single protocol through the analysis CLI.
Returns:
@@ -38,14 +66,15 @@ def _get_analysis_result(protocol_files: List[Path]) -> _AnalysisCLIResult:
with tempfile.TemporaryDirectory() as temp_dir:
analysis_output_file = Path(temp_dir) / "analysis_output.json"
runner = CliRunner()
- result = runner.invoke(
- analyze,
- [
- "--json-output",
- str(analysis_output_file),
- *[str(p.resolve()) for p in protocol_files],
- ],
- )
+ args = [
+ output_type,
+ str(analysis_output_file),
+ *[str(p.resolve()) for p in protocol_files],
+ ]
+ if check:
+ args.append("--check")
+
+ result = runner.invoke(analyze, args)
if analysis_output_file.exists():
json_output = json.loads(analysis_output_file.read_bytes())
else:
@@ -57,12 +86,14 @@ def _get_analysis_result(protocol_files: List[Path]) -> _AnalysisCLIResult:
)
+@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
@pytest.mark.parametrize("fixture_path", _list_fixtures(6))
def test_analyze(
fixture_path: Path,
+ output: str,
) -> None:
"""Should return with no errors and a non-empty output."""
- result = _get_analysis_result([fixture_path])
+ result = _get_analysis_result([fixture_path], output)
assert result.exit_code == 0
@@ -98,6 +129,7 @@ def run(protocol):
)
+@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
@pytest.mark.parametrize(
("api_level", "robot_type", "expected_point"),
[
@@ -119,6 +151,7 @@ def test_analysis_deck_definition(
robot_type: str,
expected_point: str,
tmp_path: Path,
+ output: str,
) -> None:
"""Test that the analysis uses the appropriate deck definition for the protocol's robot type.
@@ -135,7 +168,7 @@ def test_analysis_deck_definition(
encoding="utf-8",
)
- result = _get_analysis_result([protocol_source_file])
+ result = _get_analysis_result([protocol_source_file], output)
assert result.exit_code == 0
@@ -151,7 +184,8 @@ def test_analysis_deck_definition(
# TODO(mm, 2023-08-12): We can remove this test when we remove special handling for these
# protocols. https://opentrons.atlassian.net/browse/RSS-306
-def test_strict_metatada_requirements_validation(tmp_path: Path) -> None:
+@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
+def test_strict_metatada_requirements_validation(tmp_path: Path, output: str) -> None:
"""It should apply strict validation to the metadata and requirements dicts.
It should reject protocols with questionable metadata and requirements dicts,
@@ -172,7 +206,7 @@ def run(protocol):
protocol_source_file = tmp_path / "protocol.py"
protocol_source_file.write_text(protocol_source, encoding="utf-8")
- result = _get_analysis_result([protocol_source_file])
+ result = _get_analysis_result([protocol_source_file], output)
assert result.exit_code != 0
@@ -182,6 +216,8 @@ def run(protocol):
assert expected_message in result.stdout_stderr
+@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
+@pytest.mark.parametrize("check", [True, False])
@pytest.mark.parametrize(
("python_protocol_source", "expected_detail"),
[
@@ -230,15 +266,60 @@ def run(protocol): # line 3
],
)
def test_python_error_line_numbers(
- tmp_path: Path, python_protocol_source: str, expected_detail: str
+ tmp_path: Path,
+ python_protocol_source: str,
+ expected_detail: str,
+ output: str,
+ check: bool,
) -> None:
"""Test that error messages from Python protocols have line numbers."""
protocol_source_file = tmp_path / "protocol.py"
protocol_source_file.write_text(python_protocol_source, encoding="utf-8")
- result = _get_analysis_result([protocol_source_file])
+ result = _get_analysis_result([protocol_source_file], output, check)
- assert result.exit_code == 0
+ if check:
+ assert result.exit_code != 0
+ else:
+ assert result.exit_code == 0
assert result.json_output is not None
[error] = result.json_output["errors"]
assert error["detail"] == expected_detail
+
+
+@pytest.mark.usefixtures("override_data_store")
+@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
+def test_track_analysis(tmp_path: Path, output: str) -> None:
+ """Test that the RobotContextTracker tracks analysis."""
+ protocol_source = textwrap.dedent(
+ """
+ requirements = {"apiLevel": "2.15"}
+
+ def run(protocol):
+ pass
+ """
+ )
+ protocol_source_file = tmp_path / "protocol.py"
+ protocol_source_file.write_text(protocol_source, encoding="utf-8")
+ store = context_tracker._store # type: ignore[attr-defined]
+
+ num_storage_entities_before_analysis = len(store._data)
+
+ _get_analysis_result([protocol_source_file], output)
+
+ assert len(store._data) == num_storage_entities_before_analysis + 1
+
+ with open(store.metadata.data_file_location, "r") as f:
+ stored_data = f.readlines()
+ assert len(stored_data) == 0
+
+ context_tracker.store()
+
+ with open(store.metadata.data_file_location, "r") as f:
+ stored_data = f.readlines()
+ stored_data = [line.strip() for line in stored_data if line.strip()]
+ assert len(stored_data) == 1
+ state_id, start_time, duration = stored_data[0].strip().split(",")
+ assert state_id == str(RobotContextState.ANALYZING_PROTOCOL.state_id)
+ assert start_time.isdigit()
+ assert duration.isdigit()
diff --git a/api/tests/opentrons/config/ot3_settings.py b/api/tests/opentrons/config/ot3_settings.py
index 8c805f3a154..3cfa9b7c34c 100644
--- a/api/tests/opentrons/config/ot3_settings.py
+++ b/api/tests/opentrons/config/ot3_settings.py
@@ -1,3 +1,5 @@
+from opentrons.config.types import OutputOptions
+
ot3_dummy_settings = {
"name": "Marie Curie",
"model": "OT-3 Standard",
@@ -109,7 +111,6 @@
},
},
"log_level": "NADA",
- "z_retract_distance": 10,
"safe_home_distance": 5,
"deck_transform": [[-0.5, 0, 1], [0.1, -2, 4], [0, 0, -1]],
"carriage_offset": (1, 2, 3),
@@ -124,11 +125,11 @@
"plunger_speed": 10,
"sensor_threshold_pascals": 17,
"expected_liquid_height": 90,
- "log_pressure": True,
+ "output_option": OutputOptions.stream_to_csv,
"aspirate_while_sensing": False,
"auto_zero_sensor": True,
"num_baseline_reads": 10,
- "data_file": "/var/pressure_sensor_data.csv",
+ "data_files": {"PRIMARY": "/data/pressure_sensor_data.csv"},
},
"calibration": {
"z_offset": {
diff --git a/api/tests/opentrons/config/test_advanced_settings.py b/api/tests/opentrons/config/test_advanced_settings.py
index 21140b0f3d7..17122fca0dd 100644
--- a/api/tests/opentrons/config/test_advanced_settings.py
+++ b/api/tests/opentrons/config/test_advanced_settings.py
@@ -1,9 +1,9 @@
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
-from typing import Any, Dict, Generator, Optional, Tuple
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
+from typing import Dict, Generator, Optional
from unittest.mock import MagicMock, patch
-from opentrons.config import advanced_settings, ARCHITECTURE, CONFIG
+from opentrons.config import advanced_settings, CONFIG
from opentrons_shared_data.robot.dev_types import RobotTypeEnum
@@ -34,6 +34,15 @@ def mock_settings_values_flex() -> Dict[str, Optional[bool]]:
}
+@pytest.fixture
+def mock_settings_values_flex_all() -> Dict[str, Optional[bool]]:
+ return {
+ s.id: False
+ for s in advanced_settings.settings
+ if RobotTypeEnum.FLEX in s.robot_type
+ }
+
+
@pytest.fixture
def mock_settings_values_empty() -> Dict[str, Optional[bool]]:
return {s.id: None for s in advanced_settings.settings}
@@ -57,12 +66,12 @@ def mock_settings(
@pytest.fixture
def mock_read_settings_file_ot2(
- mock_settings_values_ot2: Dict[str, Optional[bool]],
+ mock_settings_values_ot2_all: Dict[str, Optional[bool]],
mock_settings_version: int,
) -> Generator[MagicMock, None, None]:
with patch("opentrons.config.advanced_settings._read_settings_file") as p:
p.return_value = advanced_settings.SettingsData(
- settings_map=mock_settings_values_ot2,
+ settings_map=mock_settings_values_ot2_all,
version=mock_settings_version,
)
yield p
@@ -70,12 +79,12 @@ def mock_read_settings_file_ot2(
@pytest.fixture
def mock_read_settings_file_flex(
- mock_settings_values_flex: Dict[str, Optional[bool]],
+ mock_settings_values_flex_all: Dict[str, Optional[bool]],
mock_settings_version: int,
) -> Generator[MagicMock, None, None]:
with patch("opentrons.config.advanced_settings._read_settings_file") as p:
p.return_value = advanced_settings.SettingsData(
- settings_map=mock_settings_values_flex,
+ settings_map=mock_settings_values_flex_all,
version=mock_settings_version,
)
yield p
@@ -168,19 +177,19 @@ def test_get_all_adv_settings_empty(
async def test_set_adv_setting(
mock_read_settings_file_ot2: MagicMock,
- mock_settings_values_ot2: MagicMock,
+ mock_settings_values_ot2_all: MagicMock,
mock_write_settings_file: MagicMock,
mock_settings_version: int,
restore_restart_required: None,
) -> None:
- for k, v in mock_settings_values_ot2.items():
+ for k, v in mock_settings_values_ot2_all.items():
# Toggle the advanced setting
await advanced_settings.set_adv_setting(k, not v)
mock_write_settings_file.assert_called_with(
# Only the current key is toggled
{
nk: nv if nk != k else not v
- for nk, nv in mock_settings_values_ot2.items()
+ for nk, nv in mock_settings_values_ot2_all.items()
},
mock_settings_version,
CONFIG["feature_flags_file"],
@@ -253,47 +262,6 @@ async def test_restart_required(
assert advanced_settings.is_restart_required() is True
-@pytest.mark.parametrize(
- argnames=["v", "expected_level"],
- argvalues=[
- [True, "emerg"],
- [False, "info"],
- ],
-)
-async def test_disable_log_integration_side_effect(
- v: bool, expected_level: str
-) -> None:
- with patch("opentrons.config.advanced_settings.log_control") as mock_log_control:
-
- async def set_syslog_level(level: Any) -> Tuple[int, str, str]:
- return 0, "", ""
-
- mock_log_control.set_syslog_level.side_effect = set_syslog_level
- with patch(
- "opentrons.config.advanced_settings.ARCHITECTURE",
- new=ARCHITECTURE.BUILDROOT,
- ):
- s = advanced_settings.DisableLogIntegrationSettingDefinition()
- await s.on_change(v)
- mock_log_control.set_syslog_level.assert_called_once_with(expected_level)
-
-
-async def test_disable_log_integration_side_effect_error() -> None:
- with patch("opentrons.config.advanced_settings.log_control") as mock_log_control:
-
- async def set_syslog_level(level: Any) -> Tuple[int, str, str]:
- return 1, "", ""
-
- mock_log_control.set_syslog_level.side_effect = set_syslog_level
- with patch(
- "opentrons.config.advanced_settings.ARCHITECTURE",
- new=ARCHITECTURE.BUILDROOT,
- ):
- s = advanced_settings.DisableLogIntegrationSettingDefinition()
- with pytest.raises(advanced_settings.SettingException):
- await s.on_change(True)
-
-
def test_per_robot_true_defaults(mock_read_settings_file_empty: MagicMock) -> None:
with patch.object(advanced_settings, "settings_by_id", new={}):
assert (
diff --git a/api/tests/opentrons/config/test_advanced_settings_migration.py b/api/tests/opentrons/config/test_advanced_settings_migration.py
index bcdeff2ee03..283d11a3000 100644
--- a/api/tests/opentrons/config/test_advanced_settings_migration.py
+++ b/api/tests/opentrons/config/test_advanced_settings_migration.py
@@ -1,13 +1,14 @@
-from typing import Any, Dict
+from typing import Any, Dict, cast
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from _pytest.fixtures import SubRequest
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from opentrons.config.advanced_settings import _migrate, _ensure
@pytest.fixture
def migrated_file_version() -> int:
- return 29
+ return 34
# make sure to set a boolean value in default_file_settings only if
@@ -21,14 +22,15 @@ def default_file_settings() -> Dict[str, Any]:
"useOldAspirationFunctions": None,
"disableLogAggregation": None,
"enableDoorSafetySwitch": None,
- "disableFastProtocolUpload": None,
"enableOT3HardwareController": None,
"rearPanelIntegration": True,
"disableStallDetection": None,
"disableStatusBar": None,
"disableOverpressureDetection": None,
- "disableTipPresenceDetection": None,
"estopNotRequired": None,
+ "enableErrorRecoveryExperiments": None,
+ "enableOEMMode": None,
+ "enablePerformanceMetrics": None,
}
@@ -359,6 +361,57 @@ def v29_config(v28_config: Dict[str, Any]) -> Dict[str, Any]:
return r
+@pytest.fixture
+def v30_config(v29_config: Dict[str, Any]) -> Dict[str, Any]:
+ r = {k: v for k, v in v29_config.items() if k != "disableTipPresenceDetection"}
+ r["_version"] = 30
+ return r
+
+
+@pytest.fixture
+def v31_config(v30_config: Dict[str, Any]) -> Dict[str, Any]:
+ r = v30_config.copy()
+ r.update(
+ {
+ "_version": 31,
+ "enableErrorRecoveryExperiments": None,
+ }
+ )
+ return r
+
+
+@pytest.fixture
+def v32_config(v31_config: Dict[str, Any]) -> Dict[str, Any]:
+ r = v31_config.copy()
+ r.update(
+ {
+ "_version": 32,
+ "enableOEMMode": None,
+ }
+ )
+ return r
+
+
+@pytest.fixture
+def v33_config(v32_config: Dict[str, Any]) -> Dict[str, Any]:
+ r = v32_config.copy()
+ r.update(
+ {
+ "_version": 33,
+ "enablePerformanceMetrics": None,
+ }
+ )
+ return r
+
+
+@pytest.fixture
+def v34_config(v33_config: Dict[str, Any]) -> Dict[str, Any]:
+ r = v33_config.copy()
+ r.pop("disableFastProtocolUpload")
+ r["_version"] = 34
+ return r
+
+
@pytest.fixture(
scope="session",
params=[
@@ -393,10 +446,15 @@ def v29_config(v28_config: Dict[str, Any]) -> Dict[str, Any]:
lazy_fixture("v27_config"),
lazy_fixture("v28_config"),
lazy_fixture("v29_config"),
+ lazy_fixture("v30_config"),
+ lazy_fixture("v31_config"),
+ lazy_fixture("v32_config"),
+ lazy_fixture("v33_config"),
+ lazy_fixture("v34_config"),
],
)
-def old_settings(request: pytest.FixtureRequest) -> Dict[str, Any]:
- return request.param # type: ignore[attr-defined, no-any-return]
+def old_settings(request: SubRequest) -> Dict[str, Any]:
+ return cast(Dict[str, Any], request.param)
def test_migrations(
@@ -477,12 +535,13 @@ def test_ensures_config() -> None:
"useOldAspirationFunctions": None,
"disableLogAggregation": True,
"enableDoorSafetySwitch": None,
- "disableFastProtocolUpload": None,
"enableOT3HardwareController": None,
"rearPanelIntegration": None,
"disableStallDetection": None,
"disableStatusBar": None,
"estopNotRequired": None,
- "disableTipPresenceDetection": None,
"disableOverpressureDetection": None,
+ "enableErrorRecoveryExperiments": None,
+ "enableOEMMode": None,
+ "enablePerformanceMetrics": None,
}
diff --git a/api/tests/opentrons/config/test_defaults_ot2.py b/api/tests/opentrons/config/test_defaults_ot2.py
index df2cd61ce18..77c7c0a3589 100644
--- a/api/tests/opentrons/config/test_defaults_ot2.py
+++ b/api/tests/opentrons/config/test_defaults_ot2.py
@@ -1,5 +1,5 @@
from opentrons.config.types import CurrentDict
-from opentrons.config import robot_configs, defaults_ot2
+from opentrons.config import defaults_ot2
def test_load_currents() -> None:
@@ -19,7 +19,4 @@ def test_load_currents() -> None:
defaults_ot2._build_hw_versioned_current_dict(default_different_vals, default)
== default_different_vals
)
- assert (
- robot_configs.defaults_ot2._build_hw_versioned_current_dict(None, default)
- == default
- )
+ assert defaults_ot2._build_hw_versioned_current_dict(None, default) == default
diff --git a/api/tests/opentrons/config/test_reset.py b/api/tests/opentrons/config/test_reset.py
index 3561412bdb0..aacea130e1f 100644
--- a/api/tests/opentrons/config/test_reset.py
+++ b/api/tests/opentrons/config/test_reset.py
@@ -123,6 +123,7 @@ def test_reset_all_set(
mock_reset_boot_scripts.assert_called_once()
mock_reset_pipette_offset.assert_called_once()
mock_reset_deck_calibration.assert_called_once()
+ mock_reset_deck_calibration.assert_called_once()
mock_reset_tip_length_calibrations.assert_called_once()
diff --git a/api/tests/opentrons/conftest.py b/api/tests/opentrons/conftest.py
index 979fe9b936a..de731268bce 100755
--- a/api/tests/opentrons/conftest.py
+++ b/api/tests/opentrons/conftest.py
@@ -23,12 +23,13 @@
from typing_extensions import TypedDict
import pytest
+from _pytest.fixtures import SubRequest
from decoy import Decoy
from opentrons.protocol_engine.types import PostRunHardwareState
try:
- import aionotify # type: ignore[import]
+ import aionotify # type: ignore[import-untyped]
except (OSError, ModuleNotFoundError):
aionotify = None
@@ -39,7 +40,7 @@
from opentrons_shared_data.deck.dev_types import (
RobotModel,
DeckDefinitionV3,
- DeckDefinitionV4,
+ DeckDefinitionV5,
)
from opentrons_shared_data.deck import (
load as load_deck,
@@ -88,7 +89,7 @@ class Bundle(TypedDict):
filelike: io.BytesIO
binary_zipfile: bytes
metadata: Dict[str, str]
- bundled_data: Dict[str, str]
+ bundled_data: Dict[str, bytes]
bundled_labware: Dict[str, LabwareDefinition]
bundled_python: Dict[str, Any]
@@ -156,11 +157,11 @@ def virtual_smoothie_env(monkeypatch: pytest.MonkeyPatch) -> None:
@pytest.fixture(params=["ot2", "ot3"])
async def machine_variant_ffs(
- request: pytest.FixtureRequest,
+ request: SubRequest,
decoy: Decoy,
mock_feature_flags: None,
) -> None:
- device_param = request.param # type: ignore[attr-defined]
+ device_param = request.param
if request.node.get_closest_marker("ot3_only") and device_param == "ot2":
pytest.skip()
@@ -229,7 +230,7 @@ async def robot_model(
mock_feature_flags: None,
virtual_smoothie_env: None,
) -> AsyncGenerator[RobotModel, None]:
- which_machine = cast(RobotModel, request.param) # type: ignore[attr-defined]
+ which_machine = cast(RobotModel, request.param)
if request.node.get_closest_marker("ot2_only") and which_machine == "OT-3 Standard":
pytest.skip("test requests only ot-2")
if request.node.get_closest_marker("ot3_only") and which_machine == "OT-2 Standard":
@@ -255,7 +256,7 @@ def deck_definition_name(robot_model: RobotModel) -> str:
@pytest.fixture
-def deck_definition(deck_definition_name: str) -> DeckDefinitionV4:
+def deck_definition(deck_definition_name: str) -> DeckDefinitionV5:
return load_deck(deck_definition_name, DEFAULT_DECK_DEFINITION_VERSION)
@@ -266,7 +267,7 @@ def legacy_deck_definition(deck_definition_name: str) -> DeckDefinitionV3:
@pytest.fixture()
async def hardware(
- request: pytest.FixtureRequest,
+ request: SubRequest,
decoy: Decoy,
mock_feature_flags: None,
virtual_smoothie_env: None,
@@ -308,6 +309,8 @@ def _make_ot3_pe_ctx(
use_virtual_pipettes=True,
use_virtual_modules=True,
use_virtual_gripper=True,
+ # TODO figure out if we will want to use a "real" deck config here or if we are fine with simulated
+ use_simulated_deck_config=True,
block_on_door_open=False,
),
drop_tips_after_run=False,
@@ -496,7 +499,7 @@ def _get_bundle_protocol_fixture(fixture_name: str) -> Bundle:
if fixture_name == "simple_bundle":
with open(fixture_dir / "protocol.py", "r") as f:
result["contents"] = f.read()
- with open(fixture_dir / "data.txt", "rb") as f: # type: ignore[assignment]
+ with open(fixture_dir / "data.txt", "rb") as f:
result["bundled_data"] = {"data.txt": f.read()}
with open(fixture_dir / "custom_labware.json", "r") as f:
custom_labware = json.load(f)
diff --git a/api/tests/opentrons/data/ot2_drop_tip.py b/api/tests/opentrons/data/ot2_drop_tip.py
new file mode 100644
index 00000000000..4d98ecda909
--- /dev/null
+++ b/api/tests/opentrons/data/ot2_drop_tip.py
@@ -0,0 +1,11 @@
+from opentrons import protocol_api
+
+requirements = {"robotType": "OT-2", "apiLevel": "2.16"}
+
+
+def run(ctx: protocol_api.ProtocolContext) -> None:
+ tipracks = [ctx.load_labware("opentrons_96_tiprack_300ul", "5")]
+ m300 = ctx.load_instrument("p300_multi_gen2", "right", tipracks)
+
+ m300.pick_up_tip()
+ m300.drop_tip()
diff --git a/api/tests/opentrons/drivers/asyncio/communication/test_async_serial.py b/api/tests/opentrons/drivers/asyncio/communication/test_async_serial.py
index 16399ce5d5d..d75ea01592f 100644
--- a/api/tests/opentrons/drivers/asyncio/communication/test_async_serial.py
+++ b/api/tests/opentrons/drivers/asyncio/communication/test_async_serial.py
@@ -4,7 +4,7 @@
import pytest
from mock import MagicMock, PropertyMock, call
-from serial import Serial # type: ignore[import]
+from serial import Serial # type: ignore[import-untyped]
from opentrons.drivers.asyncio.communication import AsyncSerial
diff --git a/api/tests/opentrons/drivers/asyncio/communication/test_serial_connection.py b/api/tests/opentrons/drivers/asyncio/communication/test_serial_connection.py
index 7fc16241684..0acf47af3d5 100644
--- a/api/tests/opentrons/drivers/asyncio/communication/test_serial_connection.py
+++ b/api/tests/opentrons/drivers/asyncio/communication/test_serial_connection.py
@@ -1,6 +1,7 @@
from typing import Type, Union
import pytest
+from _pytest.fixtures import SubRequest
from mock import AsyncMock, call
import mock
@@ -35,10 +36,10 @@ def ack() -> str:
params=[AsyncResponseSerialConnection, SerialConnection], # type: ignore[return]
)
async def subject(
- request: pytest.FixtureRequest, mock_serial_port: AsyncMock, ack: str
+ request: SubRequest, mock_serial_port: AsyncMock, ack: str
) -> SerialKind:
"""Create the test subject."""
- serial_class = request.param # type: ignore[attr-defined]
+ serial_class = request.param
serial_class.RETRY_WAIT_TIME = 0
if serial_class == AsyncResponseSerialConnection:
return serial_class( # type: ignore[no-any-return]
diff --git a/api/tests/opentrons/drivers/rpi_drivers/test_usb.py b/api/tests/opentrons/drivers/rpi_drivers/test_usb.py
index cad9a40b9b9..1f409181a50 100644
--- a/api/tests/opentrons/drivers/rpi_drivers/test_usb.py
+++ b/api/tests/opentrons/drivers/rpi_drivers/test_usb.py
@@ -84,7 +84,7 @@ def test_modify_module_list(revision: BoardRevision, usb_bus: USBBus):
# TODO(mc, 2022-03-01): partial patching the class under test creates
# a contaminated test subject that reduces the value of these tests
# https://github.com/testdouble/contributing-tests/wiki/Partial-Mock
- usb_bus._read_symlink = MagicMock(return_value="ttyACM1") # type: ignore[assignment]
+ usb_bus._read_symlink = MagicMock(return_value="ttyACM1") # type: ignore[method-assign]
mod_at_port_list = [
ModuleAtPort(
name="temperature module", port="dev/ot_module_temperature_module"
@@ -123,7 +123,7 @@ def test_modify_module_list(revision: BoardRevision, usb_bus: USBBus):
hub_port=None,
)
- usb_bus._read_symlink = MagicMock(return_value="ttyACM2") # type: ignore[assignment]
+ usb_bus._read_symlink = MagicMock(return_value="ttyACM2") # type: ignore[method-assign]
mod_at_port_list = [
ModuleAtPort(name="magnetic module", port="dev/ot_module_magnetic_module"),
]
@@ -161,7 +161,7 @@ def test_modify_module_list(revision: BoardRevision, usb_bus: USBBus):
)
if revision == BoardRevision.FLEX_B2:
- usb_bus._read_symlink = MagicMock(return_value="ttyACM4") # type: ignore[assignment]
+ usb_bus._read_symlink = MagicMock(return_value="ttyACM4") # type: ignore[method-assign]
mod_at_port_list = [
ModuleAtPort(
name="heater-shaker module", port="dev/ot_module_heater_shaker_module"
@@ -178,7 +178,7 @@ def test_modify_module_list(revision: BoardRevision, usb_bus: USBBus):
hub_port=None,
)
- usb_bus._read_symlink = MagicMock(return_value="ttyACM5") # type: ignore[assignment]
+ usb_bus._read_symlink = MagicMock(return_value="ttyACM5") # type: ignore[method-assign]
mod_at_port_list = [
ModuleAtPort(
name="thermocycler module", port="dev/ot_module_thermocycler_module"
diff --git a/api/tests/opentrons/drivers/smoothie_drivers/test_driver.py b/api/tests/opentrons/drivers/smoothie_drivers/test_driver.py
index c94668204fe..8821c491a6d 100755
--- a/api/tests/opentrons/drivers/smoothie_drivers/test_driver.py
+++ b/api/tests/opentrons/drivers/smoothie_drivers/test_driver.py
@@ -272,7 +272,7 @@ async def test_home_flagged_axes(
expected: str,
) -> None:
"""It should only home un-homed axes."""
- smoothie.home = AsyncMock() # type: ignore[assignment]
+ smoothie.home = AsyncMock() # type: ignore[method-assign]
await smoothie.update_homed_flags(home_flags)
await smoothie.home_flagged_axes(axes_string=axis_string)
@@ -292,7 +292,7 @@ async def test_home_flagged_axes_no_call(
smoothie: driver_3_0.SmoothieDriver, home_flags: Dict[str, bool], axis_string: str
) -> None:
"""It should not home homed axes."""
- smoothie.home = AsyncMock() # type: ignore[assignment]
+ smoothie.home = AsyncMock() # type: ignore[method-assign]
await smoothie.update_homed_flags(home_flags)
await smoothie.home_flagged_axes(axes_string=axis_string)
diff --git a/api/tests/opentrons/hardware_control/backends/test_ot3_controller.py b/api/tests/opentrons/hardware_control/backends/test_ot3_controller.py
index 79baa8d868a..ed639444b3d 100644
--- a/api/tests/opentrons/hardware_control/backends/test_ot3_controller.py
+++ b/api/tests/opentrons/hardware_control/backends/test_ot3_controller.py
@@ -1,9 +1,14 @@
import mock
import pytest
from decoy import Decoy
+import asyncio
-from contextlib import nullcontext as does_not_raise
+from contextlib import (
+ nullcontext as does_not_raise,
+ AbstractContextManager,
+)
from typing import (
+ cast,
Dict,
List,
Optional,
@@ -30,7 +35,12 @@
MessageListenerCallbackFilter,
CanMessenger,
)
-from opentrons.config.types import OT3Config, GantryLoad, LiquidProbeSettings
+from opentrons.config.types import (
+ OT3Config,
+ GantryLoad,
+ LiquidProbeSettings,
+ OutputOptions,
+)
from opentrons.config.robot_configs import build_config_ot3
from opentrons_hardware.firmware_bindings.arbitration_id import ArbitrationId
from opentrons_hardware.firmware_bindings.constants import (
@@ -51,6 +61,7 @@
UpdateState,
EstopState,
CurrentConfig,
+ InstrumentProbeType,
)
from opentrons.hardware_control.errors import (
InvalidPipetteName,
@@ -77,12 +88,13 @@
GripperInformation,
)
-from opentrons.hardware_control.estop_state import EstopStateMachine
+from opentrons.hardware_control.backends.estop_state import EstopStateMachine
from opentrons_shared_data.errors.exceptions import (
EStopActivatedError,
EStopNotPresentError,
FirmwareUpdateRequiredError,
+ FailedGripperPickupError,
)
from opentrons_hardware.hardware_control.move_group_runner import MoveGroupRunner
@@ -153,9 +165,9 @@ def controller(
mock_config: OT3Config,
mock_can_driver: AbstractCanDriver,
mock_eeprom_driver: EEPROMDriver,
-) -> Iterator[OT3Controller]:
+) -> OT3Controller:
with (mock.patch("opentrons.hardware_control.backends.ot3controller.OT3GPIO")):
- yield OT3Controller(
+ return OT3Controller(
mock_config, mock_can_driver, eeprom_driver=mock_eeprom_driver
)
@@ -170,11 +182,11 @@ def fake_liquid_settings() -> LiquidProbeSettings:
plunger_speed=10,
sensor_threshold_pascals=15,
expected_liquid_height=109,
- log_pressure=False,
+ output_option=OutputOptions.can_bus_only,
aspirate_while_sensing=False,
auto_zero_sensor=False,
num_baseline_reads=8,
- data_file="fake_data_file",
+ data_files={InstrumentProbeType.PRIMARY: "fake_file_name"},
)
@@ -197,6 +209,25 @@ def mock_move_group_run() -> Iterator[mock.AsyncMock]:
yield mock_mgr_run
+@pytest.fixture
+def mock_check_overpressure() -> Iterator[mock.AsyncMock]:
+ with mock.patch(
+ "opentrons.hardware_control.backends.ot3controller.check_overpressure",
+ autospec=True,
+ ) as mock_check_overpressure:
+ queue: asyncio.Queue[Any] = asyncio.Queue()
+
+ class FakeOverpressure:
+ async def __aenter__(self) -> asyncio.Queue[Any]:
+ return queue
+
+ async def __aexit__(self, *args: Any, **kwargs: Any) -> None:
+ pass
+
+ mock_check_overpressure.return_value = lambda: FakeOverpressure()
+ yield mock_check_overpressure
+
+
def _device_info_entry(subsystem: SubSystem) -> Tuple[SubSystem, DeviceInfoCache]:
return subsystem, DeviceInfoCache(
target=subsystem_to_target(subsystem),
@@ -337,11 +368,12 @@ async def test_home_execute(
controller: OT3Controller,
axes: List[Axis],
mock_present_devices: None,
+ mock_check_overpressure: None,
) -> None:
config = {"run.side_effect": move_group_run_side_effect(controller, axes)}
with mock.patch( # type: ignore [call-overload]
"opentrons.hardware_control.backends.ot3controller.MoveGroupRunner",
- spec=mock.Mock(MoveGroupRunner),
+ spec=MoveGroupRunner,
**config
) as mock_runner:
present_axes = set(ax for ax in axes if controller.axis_is_present(ax))
@@ -349,7 +381,6 @@ async def test_home_execute(
# nothing has been homed
assert not controller._motor_status
await controller.home(axes, GantryLoad.LOW_THROUGHPUT)
-
all_groups = [
group
for arg in mock_runner.call_args_list
@@ -392,7 +423,7 @@ async def test_home_gantry_order(
) -> None:
with mock.patch(
"opentrons.hardware_control.backends.ot3controller.MoveGroupRunner",
- spec=mock.Mock(MoveGroupRunner),
+ spec=MoveGroupRunner,
) as mock_runner:
controller._build_home_gantry_z_runner(axes, GantryLoad.LOW_THROUGHPUT)
has_mount = len(set(Axis.ot3_mount_axes()) & set(axes)) > 0
@@ -444,6 +475,7 @@ async def test_home_only_present_devices(
mock_move_group_run: mock.AsyncMock,
axes: List[Axis],
mock_present_devices: None,
+ mock_check_overpressure: None,
) -> None:
starting_position = {
NodeId.head_l: 20.0,
@@ -574,7 +606,7 @@ async def test_gripper_home_jaw(
async def test_gripper_grip(
controller: OT3Controller, mock_move_group_run: mock.AsyncMock
) -> None:
- await controller.gripper_grip_jaw(duty_cycle=50)
+ await controller.gripper_grip_jaw(duty_cycle=50, expected_displacement=0)
for call in mock_move_group_run.call_args_list:
move_group_runner = call[0][0]
for move_group in move_group_runner._move_groups:
@@ -689,7 +721,7 @@ async def test_liquid_probe(
mount_speed=fake_liquid_settings.mount_speed,
plunger_speed=fake_liquid_settings.plunger_speed,
threshold_pascals=fake_liquid_settings.sensor_threshold_pascals,
- log_pressure=fake_liquid_settings.log_pressure,
+ output_option=fake_liquid_settings.output_option,
)
move_groups = (mock_move_group_run.call_args_list[0][0][0]._move_groups)[0][0]
head_node = axis_to_node(Axis.by_mount(mount))
@@ -1059,19 +1091,6 @@ async def fake_src(
await controller.set_active_current({Axis.X: 2})
-async def test_monitor_pressure(
- controller: OT3Controller,
- mock_move_group_run: mock.AsyncMock,
- mock_present_devices: None,
-) -> None:
- mount = NodeId.pipette_left
- mock_move_group_run.side_effect = move_group_run_side_effect(controller, [Axis.P_L])
- async with controller._monitor_overpressure([mount]):
- await controller.home([Axis.P_L], GantryLoad.LOW_THROUGHPUT)
-
- mock_move_group_run.assert_called_once()
-
-
@pytest.mark.parametrize(
"estop_state, expectation",
[
@@ -1087,6 +1106,7 @@ async def test_requires_estop(
decoy: Decoy,
estop_state: EstopState,
expectation: ContextManager[None],
+ mock_check_overpressure: None,
) -> None:
"""Test that the estop state machine raises properly."""
decoy.when(mock_estop_state_machine.state).then_return(estop_state)
@@ -1209,3 +1229,44 @@ async def test_engage_motors(
)
else:
set_tip_axes.assert_not_awaited()
+
+
+@pytest.mark.parametrize(
+ "expected_grip_width,actual_grip_width,wider,narrower,allowed_error,hard_max,hard_min,raise_error",
+ [
+ (80, 80, 0, 0, 0, 92, 60, False),
+ (80, 81, 0, 0, 0, 92, 60, True),
+ (80, 79, 0, 0, 0, 92, 60, True),
+ (80, 81, 1, 0, 0, 92, 60, False),
+ (80, 79, 0, 1, 0, 92, 60, False),
+ (80, 81, 0, 0, 1, 92, 60, False),
+ (80, 79, 0, 0, 1, 92, 60, False),
+ (80, 45, 40, 0, 1, 92, 60, True),
+ (80, 100, 0, 40, 0, 92, 60, True),
+ ],
+)
+def test_grip_error_detection(
+ controller: OT3Controller,
+ expected_grip_width: float,
+ actual_grip_width: float,
+ wider: float,
+ narrower: float,
+ allowed_error: float,
+ hard_max: float,
+ hard_min: float,
+ raise_error: bool,
+) -> None:
+ context = cast(
+ AbstractContextManager[None],
+ pytest.raises(FailedGripperPickupError) if raise_error else does_not_raise(),
+ )
+ with context:
+ controller.check_gripper_position_within_bounds(
+ expected_grip_width,
+ wider,
+ narrower,
+ actual_grip_width,
+ allowed_error,
+ hard_max,
+ hard_min,
+ )
diff --git a/api/tests/opentrons/hardware_control/test_ot3_estop_state.py b/api/tests/opentrons/hardware_control/backends/test_ot3_estop_state.py
similarity index 94%
rename from api/tests/opentrons/hardware_control/test_ot3_estop_state.py
rename to api/tests/opentrons/hardware_control/backends/test_ot3_estop_state.py
index af660606b9a..1d4a86a1343 100644
--- a/api/tests/opentrons/hardware_control/test_ot3_estop_state.py
+++ b/api/tests/opentrons/hardware_control/backends/test_ot3_estop_state.py
@@ -1,8 +1,8 @@
import pytest
from decoy import Decoy
-from typing import List, Tuple, Optional
+from typing import List, Tuple, Optional, cast
-from opentrons.hardware_control.estop_state import EstopStateMachine
+from opentrons.hardware_control.backends.estop_state import EstopStateMachine
from opentrons_hardware.hardware_control.estop.detector import (
EstopSummary,
EstopDetector,
@@ -59,7 +59,7 @@ async def test_estop_state_no_detector(
subject.subscribe_to_detector(detector=mock_estop_detector)
- assert subject.state == EstopState.PHYSICALLY_ENGAGED
+ assert cast(EstopState, subject.state) == EstopState.PHYSICALLY_ENGAGED
assert (
subject.get_physical_status(EstopAttachLocation.LEFT)
== EstopPhysicalStatus.NOT_PRESENT
@@ -73,11 +73,9 @@ async def test_estop_state_no_detector(
subject.subscribe_to_detector(detector=mock_estop_detector)
decoy.verify(
- [
- mock_estop_detector.add_listener(subject.detector_listener),
- mock_estop_detector.remove_listener(subject.detector_listener),
- mock_estop_detector.add_listener(subject.detector_listener),
- ]
+ mock_estop_detector.add_listener(subject.detector_listener),
+ mock_estop_detector.remove_listener(subject.detector_listener),
+ mock_estop_detector.add_listener(subject.detector_listener),
)
diff --git a/api/tests/opentrons/hardware_control/backends/test_ot3_status_bar.py b/api/tests/opentrons/hardware_control/backends/test_ot3_status_bar.py
new file mode 100644
index 00000000000..9e23d545961
--- /dev/null
+++ b/api/tests/opentrons/hardware_control/backends/test_ot3_status_bar.py
@@ -0,0 +1,43 @@
+import pytest
+from decoy import Decoy
+from opentrons.hardware_control.types import StatusBarState
+from opentrons.hardware_control.backends.status_bar_state import (
+ StatusBarStateController,
+)
+from opentrons_hardware.hardware_control.status_bar import StatusBar
+
+
+@pytest.fixture
+def mock_status_bar_controller(decoy: Decoy) -> StatusBar:
+ return decoy.mock(cls=StatusBar)
+
+
+@pytest.fixture
+def subject(mock_status_bar_controller: StatusBar) -> StatusBarStateController:
+ return StatusBarStateController(mock_status_bar_controller)
+
+
+@pytest.mark.parametrize(argnames=["enabled"], argvalues=[[True], [False]])
+async def test_status_bar_interface(
+ subject: StatusBarStateController, enabled: bool
+) -> None:
+ """Test setting status bar statuses and make sure the cached status is correct."""
+ await subject.set_enabled(enabled)
+
+ settings = {
+ StatusBarState.IDLE: StatusBarState.IDLE,
+ StatusBarState.RUNNING: StatusBarState.RUNNING,
+ StatusBarState.PAUSED: StatusBarState.PAUSED,
+ StatusBarState.HARDWARE_ERROR: StatusBarState.HARDWARE_ERROR,
+ StatusBarState.SOFTWARE_ERROR: StatusBarState.SOFTWARE_ERROR,
+ StatusBarState.CONFIRMATION: StatusBarState.IDLE,
+ StatusBarState.RUN_COMPLETED: StatusBarState.RUN_COMPLETED,
+ StatusBarState.UPDATING: StatusBarState.UPDATING,
+ StatusBarState.ACTIVATION: StatusBarState.IDLE,
+ StatusBarState.DISCO: StatusBarState.IDLE,
+ StatusBarState.OFF: StatusBarState.OFF,
+ }
+
+ for setting, response in settings.items():
+ await subject.set_status_bar_state(state=setting)
+ assert subject.get_current_state() == response
diff --git a/api/tests/opentrons/hardware_control/backends/test_ot3_tip_presence_manager.py b/api/tests/opentrons/hardware_control/backends/test_ot3_tip_presence_manager.py
index 543f7b3b400..6ea39738fc2 100644
--- a/api/tests/opentrons/hardware_control/backends/test_ot3_tip_presence_manager.py
+++ b/api/tests/opentrons/hardware_control/backends/test_ot3_tip_presence_manager.py
@@ -2,7 +2,7 @@
from typing import AsyncIterator, Dict
from decoy import Decoy
-from opentrons.hardware_control.types import OT3Mount, TipStateType
+from opentrons.hardware_control.types import OT3Mount, TipStateType, InstrumentProbeType
from opentrons.hardware_control.backends.tip_presence_manager import TipPresenceManager
from opentrons_hardware.hardware_control.tip_presence import (
TipDetector,
@@ -110,6 +110,51 @@ async def test_get_tip_status_for_high_throughput(
result == expected_type
+@pytest.mark.parametrize(
+ "tip_presence,expected_type,sensor_to_look_at",
+ [
+ (
+ {SensorId.S0: False, SensorId.S1: False},
+ TipStateType.ABSENT,
+ InstrumentProbeType.PRIMARY,
+ ),
+ (
+ {SensorId.S0: True, SensorId.S1: True},
+ TipStateType.PRESENT,
+ InstrumentProbeType.SECONDARY,
+ ),
+ (
+ {SensorId.S0: False, SensorId.S1: True},
+ TipStateType.ABSENT,
+ InstrumentProbeType.PRIMARY,
+ ),
+ (
+ {SensorId.S0: False, SensorId.S1: True},
+ TipStateType.PRESENT,
+ InstrumentProbeType.SECONDARY,
+ ),
+ ],
+)
+async def test_allow_different_tip_states_ht(
+ subject: TipPresenceManager,
+ tip_detector_controller: TipDetectorController,
+ tip_presence: Dict[SensorId, bool],
+ expected_type: TipStateType,
+ sensor_to_look_at: InstrumentProbeType,
+) -> None:
+ mount = OT3Mount.LEFT
+ await tip_detector_controller.retrieve_tip_status_highthroughput(tip_presence)
+
+ result = await subject.get_tip_status(mount, sensor_to_look_at)
+ result == expected_type
+
+ # if sensor_to_look_at is not used, different tip states
+ # should result in an UnmatchedTipStates error
+ if len(set(tip_presence[t] for t in tip_presence)) > 1:
+ with pytest.raises(UnmatchedTipPresenceStates):
+ result = await subject.get_tip_status(mount)
+
+
@pytest.mark.parametrize(
"tip_presence",
[
diff --git a/api/tests/opentrons/hardware_control/backends/test_ot3_utils.py b/api/tests/opentrons/hardware_control/backends/test_ot3_utils.py
index 9cf0de99f44..efdd64392d1 100644
--- a/api/tests/opentrons/hardware_control/backends/test_ot3_utils.py
+++ b/api/tests/opentrons/hardware_control/backends/test_ot3_utils.py
@@ -72,7 +72,7 @@ def test_filter_zero_duration_step() -> None:
Axis.P_R: 0,
}
moves = [Move.build_dummy([Axis.X, Axis.Y, Axis.Z_L, Axis.Z_R, Axis.P_L])]
- for block in moves[0].blocks:
+ for block in (moves[0].blocks[0], moves[0].blocks[1]):
block.distance = f64(25.0)
block.time = f64(1.0)
block.initial_speed = f64(25.0)
@@ -84,7 +84,7 @@ def test_filter_zero_duration_step() -> None:
moves=moves,
present_nodes=present_nodes,
)
- assert len(move_group) == 3
+ assert len(move_group) == 2
for step in move_group:
assert set(present_nodes) == set(step.keys())
diff --git a/api/tests/opentrons/hardware_control/instruments/test_instrument_calibration.py b/api/tests/opentrons/hardware_control/instruments/test_instrument_calibration.py
index 6dfc646547e..d1f705d596f 100644
--- a/api/tests/opentrons/hardware_control/instruments/test_instrument_calibration.py
+++ b/api/tests/opentrons/hardware_control/instruments/test_instrument_calibration.py
@@ -3,7 +3,7 @@
from typing import Union, cast
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from decoy import Decoy
from opentrons_shared_data.labware.dev_types import (
@@ -81,11 +81,11 @@ def test_load_tip_length(
tip_length_data = v1_models.TipLengthModel(
tipLength=1.23,
lastModified=datetime(year=2023, month=1, day=1),
- uri=LabwareUri("def456"),
- source=subject.types.SourceType.factory,
+ definitionHash="asdfghjk",
+ source=subject.SourceType.factory,
status=v1_models.CalibrationStatus(
markedBad=True,
- source=subject.types.SourceType.user,
+ source=subject.SourceType.user,
markedAt=datetime(year=2023, month=2, day=2),
),
)
@@ -99,6 +99,9 @@ def test_load_tip_length(
decoy.when(calibration_storage.helpers.hash_labware_def(tip_rack_dict)).then_return(
"asdfghjk"
)
+ decoy.when(
+ calibration_storage.helpers.uri_from_definition(tip_rack_dict)
+ ).then_return(LabwareUri("def456"))
result = subject.load_tip_length_for_pipette(
pipette_id="abc123", tiprack=tip_rack_definition
@@ -106,14 +109,14 @@ def test_load_tip_length(
assert result == subject.TipLengthCalibration(
tip_length=1.23,
- source=subject.types.SourceType.factory,
+ source=subject.SourceType.factory,
pipette="abc123",
tiprack="asdfghjk",
last_modified=datetime(year=2023, month=1, day=1),
uri=LabwareUri("def456"),
- status=subject.types.CalibrationStatus(
+ status=subject.CalibrationStatus(
markedBad=True,
- source=subject.types.SourceType.user,
+ source=subject.SourceType.user,
markedAt=datetime(year=2023, month=2, day=2),
),
)
@@ -131,9 +134,9 @@ def test_load_tip_length(
(top_types.Point(0, 1.0, 1.5), top_types.Point(-1, 0, 0.2), True),
# If both points are non-zero but at least one element is more than
# the range different the test should fail
- (top_types.Point(0.1, -1, 1.5), top_types.Point(1.7, 0, 0.2), False),
- (top_types.Point(0.1, -1, 1.5), top_types.Point(0.6, 0.6, 1.3), False),
- (top_types.Point(0.1, -1, 1.5), top_types.Point(-0.2, -0.1, 5), False),
+ (top_types.Point(0.1, -1, 4.3), top_types.Point(1.7, 0, 0.2), False),
+ (top_types.Point(0.1, -3.2, 1.5), top_types.Point(0.6, 0.9, 1.3), False),
+ (top_types.Point(0.1, -1, 1.5), top_types.Point(-0.2, -0.1, 6), False),
],
)
def test_instrument_consistency_check_ot3(
@@ -148,4 +151,4 @@ def test_instrument_consistency_check_ot3(
top_types.Mount.LEFT: left,
top_types.Mount.RIGHT: right,
}
- assert result[0].limit == 1.5
+ assert result[0].limit == 4.0
diff --git a/api/tests/opentrons/hardware_control/instruments/test_nozzle_manager.py b/api/tests/opentrons/hardware_control/instruments/test_nozzle_manager.py
index 1761e59a6ec..bd521a6e8a2 100644
--- a/api/tests/opentrons/hardware_control/instruments/test_nozzle_manager.py
+++ b/api/tests/opentrons/hardware_control/instruments/test_nozzle_manager.py
@@ -1,199 +1,919 @@
import pytest
-from typing import Dict, List, ContextManager, Tuple
+from typing import Dict, List, Tuple, Union, Iterator, cast
-from contextlib import nullcontext as does_not_raise
from opentrons.hardware_control import nozzle_manager
from opentrons.types import Point
-from opentrons.hardware_control.types import CriticalPoint
-
-
-def build_nozzle_manger(
- nozzle_map: Dict[str, List[float]]
-) -> nozzle_manager.NozzleConfigurationManager:
- return nozzle_manager.NozzleConfigurationManager.build_from_nozzlemap(
- nozzle_map, pick_up_current_map={1: 0.1}
- )
-
-
-NINETY_SIX_CHANNEL_MAP = {
- "A1": [-36.0, -25.5, -259.15],
- "A2": [-27.0, -25.5, -259.15],
- "A3": [-18.0, -25.5, -259.15],
- "A4": [-9.0, -25.5, -259.15],
- "A5": [0.0, -25.5, -259.15],
- "A6": [9.0, -25.5, -259.15],
- "A7": [18.0, -25.5, -259.15],
- "A8": [27.0, -25.5, -259.15],
- "A9": [36.0, -25.5, -259.15],
- "A10": [45.0, -25.5, -259.15],
- "A11": [54.0, -25.5, -259.15],
- "A12": [63.0, -25.5, -259.15],
- "B1": [-36.0, -34.5, -259.15],
- "B2": [-27.0, -34.5, -259.15],
- "B3": [-18.0, -34.5, -259.15],
- "B4": [-9.0, -34.5, -259.15],
- "B5": [0.0, -34.5, -259.15],
- "B6": [9.0, -34.5, -259.15],
- "B7": [18.0, -34.5, -259.15],
- "B8": [27.0, -34.5, -259.15],
- "B9": [36.0, -34.5, -259.15],
- "B10": [45.0, -34.5, -259.15],
- "B11": [54.0, -34.5, -259.15],
- "B12": [63.0, -34.5, -259.15],
- "C1": [-36.0, -43.5, -259.15],
- "C2": [-27.0, -43.5, -259.15],
- "C3": [-18.0, -43.5, -259.15],
- "C4": [-9.0, -43.5, -259.15],
- "C5": [0.0, -43.5, -259.15],
- "C6": [9.0, -43.5, -259.15],
- "C7": [18.0, -43.5, -259.15],
- "C8": [27.0, -43.5, -259.15],
- "C9": [36.0, -43.5, -259.15],
- "C10": [45.0, -43.5, -259.15],
- "C11": [54.0, -43.5, -259.15],
- "C12": [63.0, -43.5, -259.15],
- "D1": [-36.0, -52.5, -259.15],
- "D2": [-27.0, -52.5, -259.15],
- "D3": [-18.0, -52.5, -259.15],
- "D4": [-9.0, -52.5, -259.15],
- "D5": [0.0, -52.5, -259.15],
- "D6": [9.0, -52.5, -259.15],
- "D7": [18.0, -52.5, -259.15],
- "D8": [27.0, -52.5, -259.15],
- "D9": [36.0, -52.5, -259.15],
- "D10": [45.0, -52.5, -259.15],
- "D11": [54.0, -52.5, -259.15],
- "D12": [63.0, -52.5, -259.15],
- "E1": [-36.0, -61.5, -259.15],
- "E2": [-27.0, -61.5, -259.15],
- "E3": [-18.0, -61.5, -259.15],
- "E4": [-9.0, -61.5, -259.15],
- "E5": [0.0, -61.5, -259.15],
- "E6": [9.0, -61.5, -259.15],
- "E7": [18.0, -61.5, -259.15],
- "E8": [27.0, -61.5, -259.15],
- "E9": [36.0, -61.5, -259.15],
- "E10": [45.0, -61.5, -259.15],
- "E11": [54.0, -61.5, -259.15],
- "E12": [63.0, -61.5, -259.15],
- "F1": [-36.0, -70.5, -259.15],
- "F2": [-27.0, -70.5, -259.15],
- "F3": [-18.0, -70.5, -259.15],
- "F4": [-9.0, -70.5, -259.15],
- "F5": [0.0, -70.5, -259.15],
- "F6": [9.0, -70.5, -259.15],
- "F7": [18.0, -70.5, -259.15],
- "F8": [27.0, -70.5, -259.15],
- "F9": [36.0, -70.5, -259.15],
- "F10": [45.0, -70.5, -259.15],
- "F11": [54.0, -70.5, -259.15],
- "F12": [63.0, -70.5, -259.15],
- "G1": [-36.0, -79.5, -259.15],
- "G2": [-27.0, -79.5, -259.15],
- "G3": [-18.0, -79.5, -259.15],
- "G4": [-9.0, -79.5, -259.15],
- "G5": [0.0, -79.5, -259.15],
- "G6": [9.0, -79.5, -259.15],
- "G7": [18.0, -79.5, -259.15],
- "G8": [27.0, -79.5, -259.15],
- "G9": [36.0, -79.5, -259.15],
- "G10": [45.0, -79.5, -259.15],
- "G11": [54.0, -79.5, -259.15],
- "G12": [63.0, -79.5, -259.15],
- "H1": [-36.0, -88.5, -259.15],
- "H2": [-27.0, -88.5, -259.15],
- "H3": [-18.0, -88.5, -259.15],
- "H4": [-9.0, -88.5, -259.15],
- "H5": [0.0, -88.5, -259.15],
- "H6": [9.0, -88.5, -259.15],
- "H7": [18.0, -88.5, -259.15],
- "H8": [27.0, -88.5, -259.15],
- "H9": [36.0, -88.5, -259.15],
- "H10": [45.0, -88.5, -259.15],
- "H11": [54.0, -88.5, -259.15],
- "H12": [63.0, -88.5, -259.15],
-}
+
+from opentrons_shared_data.pipette.load_data import load_definition
+from opentrons_shared_data.pipette.types import (
+ PipetteModelType,
+ PipetteChannelType,
+ PipetteVersionType,
+)
+from opentrons_shared_data.pipette.pipette_definition import PipetteConfigurations
+
+
+@pytest.mark.parametrize(
+ "pipette_details",
+ [
+ (PipetteModelType.p10, PipetteVersionType(major=1, minor=3)),
+ (PipetteModelType.p20, PipetteVersionType(major=2, minor=0)),
+ (PipetteModelType.p50, PipetteVersionType(major=3, minor=4)),
+ (PipetteModelType.p300, PipetteVersionType(major=2, minor=1)),
+ (PipetteModelType.p1000, PipetteVersionType(major=3, minor=5)),
+ ],
+)
+def test_single_pipettes_always_full(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.SINGLE_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+ subject.update_nozzle_configuration("A1", "A1", "A1")
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+ subject.reset_to_default_configuration()
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+
+@pytest.mark.parametrize(
+ "pipette_details",
+ [
+ (PipetteModelType.p10, PipetteVersionType(major=1, minor=3)),
+ (PipetteModelType.p20, PipetteVersionType(major=2, minor=0)),
+ (PipetteModelType.p50, PipetteVersionType(major=3, minor=4)),
+ (PipetteModelType.p300, PipetteVersionType(major=2, minor=1)),
+ (PipetteModelType.p1000, PipetteVersionType(major=3, minor=5)),
+ ],
+)
+def test_single_pipette_map_entries(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.SINGLE_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ def test_map_entries(nozzlemap: nozzle_manager.NozzleMap) -> None:
+ assert nozzlemap.back_left == "A1"
+ assert nozzlemap.front_right == "A1"
+ assert list(nozzlemap.map_store.keys()) == ["A1"]
+ assert list(nozzlemap.rows.keys()) == ["A"]
+ assert list(nozzlemap.columns.keys()) == ["1"]
+ assert nozzlemap.rows["A"] == ["A1"]
+ assert nozzlemap.columns["1"] == ["A1"]
+ assert nozzlemap.tip_count == 1
+
+ test_map_entries(subject.current_configuration)
+ subject.update_nozzle_configuration("A1", "A1", "A1")
+ test_map_entries(subject.current_configuration)
+ subject.reset_to_default_configuration()
+ test_map_entries(subject.current_configuration)
@pytest.mark.parametrize(
- argnames=["nozzle_map", "critical_point_configuration", "expected"],
- argvalues=[
- [
- {
- "A1": [-8.0, -16.0, -259.15],
- "B1": [-8.0, -25.0, -259.15],
- "C1": [-8.0, -34.0, -259.15],
- "D1": [-8.0, -43.0, -259.15],
- "E1": [-8.0, -52.0, -259.15],
- "F1": [-8.0, -61.0, -259.15],
- "G1": [-8.0, -70.0, -259.15],
- "H1": [-8.0, -79.0, -259.15],
- },
- CriticalPoint.XY_CENTER,
- Point(-8.0, -47.5, -259.15),
- ],
- [
- NINETY_SIX_CHANNEL_MAP,
- CriticalPoint.XY_CENTER,
- Point(13.5, -57.0, -259.15),
- ],
- [
- {"A1": [1, 1, 1]},
- CriticalPoint.FRONT_NOZZLE,
- Point(1, 1, 1),
- ],
+ "pipette_details",
+ [
+ (PipetteModelType.p10, PipetteVersionType(major=1, minor=3)),
+ (PipetteModelType.p20, PipetteVersionType(major=2, minor=0)),
+ (PipetteModelType.p50, PipetteVersionType(major=3, minor=4)),
+ (PipetteModelType.p300, PipetteVersionType(major=2, minor=1)),
+ (PipetteModelType.p1000, PipetteVersionType(major=3, minor=5)),
],
)
-def test_update_nozzles_with_critical_points(
- nozzle_map: Dict[str, List[float]],
- critical_point_configuration: CriticalPoint,
- expected: List[float],
+def test_single_pipette_map_geometry(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
) -> None:
- subject = build_nozzle_manger(nozzle_map)
- new_cp = subject.critical_point_with_tip_length(critical_point_configuration)
- assert new_cp == expected
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.SINGLE_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ def test_map_geometry(nozzlemap: nozzle_manager.NozzleMap) -> None:
+ assert nozzlemap.xy_center_offset == Point(*config.nozzle_map["A1"])
+ assert nozzlemap.y_center_offset == Point(*config.nozzle_map["A1"])
+ assert nozzlemap.front_nozzle_offset == Point(*config.nozzle_map["A1"])
+ assert nozzlemap.starting_nozzle_offset == Point(*config.nozzle_map["A1"])
+
+ test_map_geometry(subject.current_configuration)
+ subject.update_nozzle_configuration("A1", "A1", "A1")
+ test_map_geometry(subject.current_configuration)
+ subject.reset_to_default_configuration()
+ test_map_geometry(subject.current_configuration)
@pytest.mark.parametrize(
- argnames=["nozzle_map", "updated_nozzle_configuration", "exception", "expected_cp"],
- argvalues=[
- [
- {
- "A1": [0.0, 31.5, 0.8],
- "B1": [0.0, 22.5, 0.8],
- "C1": [0.0, 13.5, 0.8],
- "D1": [0.0, 4.5, 0.8],
- "E1": [0.0, -4.5, 0.8],
- "F1": [0.0, -13.5, 0.8],
- "G1": [0.0, -22.5, 0.8],
- "H1": [0.0, -31.5, 0.8],
- },
- ("D1", "H1"),
- does_not_raise(),
- Point(0.0, 4.5, 0.8),
- ],
- [
- {"A1": [1, 1, 1]},
- ("A1", "D1"),
- pytest.raises(nozzle_manager.IncompatibleNozzleConfiguration),
- Point(1, 1, 1),
- ],
- [
- NINETY_SIX_CHANNEL_MAP,
- ("A12", "H12"),
- does_not_raise(),
- Point(x=63.0, y=-25.5, z=-259.15),
- ],
+ "pipette_details",
+ [
+ (PipetteModelType.p10, PipetteVersionType(major=1, minor=3)),
+ (PipetteModelType.p20, PipetteVersionType(major=2, minor=0)),
+ (PipetteModelType.p50, PipetteVersionType(major=3, minor=4)),
+ (PipetteModelType.p300, PipetteVersionType(major=2, minor=1)),
+ (PipetteModelType.p1000, PipetteVersionType(major=3, minor=5)),
],
)
-def test_update_nozzle_configuration(
- nozzle_map: Dict[str, List[float]],
- updated_nozzle_configuration: Tuple[str, str],
- exception: ContextManager[None],
- expected_cp: List[float],
+def test_multi_config_identification(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
) -> None:
- subject = build_nozzle_manger(nozzle_map)
- with exception:
- subject.update_nozzle_configuration(*updated_nozzle_configuration)
- assert subject.starting_nozzle_offset == expected_cp
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.EIGHT_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+ subject.update_nozzle_configuration("A1", "H1", "A1")
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+ subject.reset_to_default_configuration()
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+ subject.update_nozzle_configuration("A1", "D1", "A1")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.COLUMN
+ )
+
+ subject.update_nozzle_configuration("A1", "A1", "A1")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SINGLE
+ )
+
+ subject.update_nozzle_configuration("H1", "H1", "H1")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SINGLE
+ )
+
+ subject.update_nozzle_configuration("C1", "F1", "C1")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.COLUMN
+ )
+
+ subject.reset_to_default_configuration()
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+
+
+@pytest.mark.parametrize(
+ "pipette_details",
+ [
+ (PipetteModelType.p10, PipetteVersionType(major=1, minor=3)),
+ (PipetteModelType.p20, PipetteVersionType(major=2, minor=0)),
+ (PipetteModelType.p50, PipetteVersionType(major=3, minor=4)),
+ (PipetteModelType.p300, PipetteVersionType(major=2, minor=1)),
+ (PipetteModelType.p1000, PipetteVersionType(major=3, minor=5)),
+ ],
+)
+def test_multi_config_map_entries(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.EIGHT_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ def test_map_entries(
+ nozzlemap: nozzle_manager.NozzleMap, nozzles: List[str]
+ ) -> None:
+ assert nozzlemap.back_left == nozzles[0]
+ assert nozzlemap.front_right == nozzles[-1]
+ assert list(nozzlemap.map_store.keys()) == nozzles
+ assert list(nozzlemap.rows.keys()) == [nozzle[0] for nozzle in nozzles]
+ assert list(nozzlemap.columns.keys()) == ["1"]
+ for rowname, row_elements in nozzlemap.rows.items():
+ assert row_elements == [f"{rowname}1"]
+
+ assert nozzlemap.columns["1"] == nozzles
+ assert nozzlemap.tip_count == len(nozzles)
+
+ test_map_entries(
+ subject.current_configuration, ["A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1"]
+ )
+ subject.update_nozzle_configuration("A1", "H1", "A1")
+ test_map_entries(
+ subject.current_configuration, ["A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1"]
+ )
+ subject.update_nozzle_configuration("A1", "D1", "A1")
+ test_map_entries(subject.current_configuration, ["A1", "B1", "C1", "D1"])
+ subject.update_nozzle_configuration("A1", "A1", "A1")
+ test_map_entries(subject.current_configuration, ["A1"])
+ subject.update_nozzle_configuration("H1", "H1", "H1")
+ test_map_entries(subject.current_configuration, ["H1"])
+ subject.update_nozzle_configuration("C1", "F1", "C1")
+ test_map_entries(subject.current_configuration, ["C1", "D1", "E1", "F1"])
+
+
+def assert_offset_in_center_of(
+ offset: Point, between: Union[Tuple[str, str], str], config: PipetteConfigurations
+) -> None:
+ if isinstance(between, str):
+ assert offset == Point(*config.nozzle_map[between])
+ else:
+ assert (
+ offset
+ == (
+ Point(*config.nozzle_map[between[0]])
+ + Point(*config.nozzle_map[between[1]])
+ )
+ * 0.5
+ )
+
+
+@pytest.mark.parametrize(
+ "pipette_details",
+ [
+ (PipetteModelType.p10, PipetteVersionType(major=1, minor=3)),
+ (PipetteModelType.p20, PipetteVersionType(major=2, minor=0)),
+ (PipetteModelType.p50, PipetteVersionType(major=3, minor=4)),
+ (PipetteModelType.p300, PipetteVersionType(major=2, minor=1)),
+ (PipetteModelType.p1000, PipetteVersionType(major=3, minor=5)),
+ ],
+)
+def test_multi_config_geometry(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.EIGHT_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ def test_map_geometry(
+ nozzlemap: nozzle_manager.NozzleMap,
+ front_nozzle: str,
+ starting_nozzle: str,
+ xy_center_in_center_of: Union[Tuple[str, str], str],
+ y_center_in_center_of: Union[Tuple[str, str], str],
+ ) -> None:
+ assert_offset_in_center_of(
+ nozzlemap.xy_center_offset, xy_center_in_center_of, config
+ )
+ assert_offset_in_center_of(
+ nozzlemap.y_center_offset, y_center_in_center_of, config
+ )
+
+ assert nozzlemap.front_nozzle_offset == Point(*config.nozzle_map[front_nozzle])
+ assert nozzlemap.starting_nozzle_offset == Point(
+ *config.nozzle_map[starting_nozzle]
+ )
+
+ test_map_geometry(
+ subject.current_configuration, "H1", "A1", ("A1", "H1"), ("A1", "H1")
+ )
+
+ subject.update_nozzle_configuration("A1", "A1", "A1")
+ test_map_geometry(subject.current_configuration, "A1", "A1", "A1", "A1")
+
+ subject.update_nozzle_configuration("D1", "D1", "D1")
+ test_map_geometry(subject.current_configuration, "D1", "D1", "D1", "D1")
+
+ subject.update_nozzle_configuration("C1", "G1", "C1")
+ test_map_geometry(subject.current_configuration, "G1", "C1", "E1", "E1")
+
+ subject.update_nozzle_configuration("E1", "H1", "E1")
+ test_map_geometry(
+ subject.current_configuration, "H1", "E1", ("E1", "H1"), ("E1", "H1")
+ )
+
+ subject.reset_to_default_configuration()
+ test_map_geometry(
+ subject.current_configuration, "H1", "A1", ("A1", "H1"), ("A1", "H1")
+ )
+
+
+@pytest.mark.parametrize(
+ "pipette_details", [(PipetteModelType.p1000, PipetteVersionType(major=3, minor=5))]
+)
+def test_96_config_identification(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.NINETY_SIX_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+ subject.update_nozzle_configuration("A1", "H12")
+ assert (
+ subject.current_configuration.configuration
+ == nozzle_manager.NozzleConfigurationType.FULL
+ )
+ subject.update_nozzle_configuration("A1", "H1")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.COLUMN
+ )
+ subject.update_nozzle_configuration("A12", "H12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.COLUMN
+ )
+ subject.update_nozzle_configuration("A8", "H8")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.COLUMN
+ )
+
+ subject.update_nozzle_configuration("A1", "A12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.ROW
+ )
+ subject.update_nozzle_configuration("H1", "H12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.ROW
+ )
+ subject.update_nozzle_configuration("D1", "D12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.ROW
+ )
+
+ subject.update_nozzle_configuration("E1", "H6")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+ subject.update_nozzle_configuration("E7", "H12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+
+ subject.update_nozzle_configuration("C4", "F9")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+ subject.update_nozzle_configuration("A1", "B12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+ subject.update_nozzle_configuration("G1", "H12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+ subject.update_nozzle_configuration("A1", "H3")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+ subject.update_nozzle_configuration("A10", "H12")
+ assert (
+ cast(
+ nozzle_manager.NozzleConfigurationType,
+ subject.current_configuration.configuration,
+ )
+ == nozzle_manager.NozzleConfigurationType.SUBRECT
+ )
+
+
+@pytest.mark.parametrize(
+ "pipette_details", [(PipetteModelType.p1000, PipetteVersionType(major=3, minor=5))]
+)
+def test_96_config_map_entries(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.NINETY_SIX_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ def test_map_entries(
+ nozzlemap: nozzle_manager.NozzleMap,
+ rows: Dict[str, List[str]],
+ cols: Dict[str, List[str]],
+ ) -> None:
+ assert nozzlemap.back_left == next(iter(rows.values()))[0]
+ assert nozzlemap.front_right == next(reversed(list(rows.values())))[-1]
+
+ def _nozzles() -> Iterator[str]:
+ for row in rows.values():
+ for nozzle in row:
+ yield nozzle
+
+ assert list(nozzlemap.map_store.keys()) == list(_nozzles())
+ assert nozzlemap.rows == rows
+ assert nozzlemap.columns == cols
+ assert nozzlemap.tip_count == sum(len(row) for row in rows.values())
+
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": [
+ "A1",
+ "A2",
+ "A3",
+ "A4",
+ "A5",
+ "A6",
+ "A7",
+ "A8",
+ "A9",
+ "A10",
+ "A11",
+ "A12",
+ ],
+ "B": [
+ "B1",
+ "B2",
+ "B3",
+ "B4",
+ "B5",
+ "B6",
+ "B7",
+ "B8",
+ "B9",
+ "B10",
+ "B11",
+ "B12",
+ ],
+ "C": [
+ "C1",
+ "C2",
+ "C3",
+ "C4",
+ "C5",
+ "C6",
+ "C7",
+ "C8",
+ "C9",
+ "C10",
+ "C11",
+ "C12",
+ ],
+ "D": [
+ "D1",
+ "D2",
+ "D3",
+ "D4",
+ "D5",
+ "D6",
+ "D7",
+ "D8",
+ "D9",
+ "D10",
+ "D11",
+ "D12",
+ ],
+ "E": [
+ "E1",
+ "E2",
+ "E3",
+ "E4",
+ "E5",
+ "E6",
+ "E7",
+ "E8",
+ "E9",
+ "E10",
+ "E11",
+ "E12",
+ ],
+ "F": [
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ ],
+ "G": [
+ "G1",
+ "G2",
+ "G3",
+ "G4",
+ "G5",
+ "G6",
+ "G7",
+ "G8",
+ "G9",
+ "G10",
+ "G11",
+ "G12",
+ ],
+ "H": [
+ "H1",
+ "H2",
+ "H3",
+ "H4",
+ "H5",
+ "H6",
+ "H7",
+ "H8",
+ "H9",
+ "H10",
+ "H11",
+ "H12",
+ ],
+ },
+ {
+ "1": ["A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1"],
+ "2": ["A2", "B2", "C2", "D2", "E2", "F2", "G2", "H2"],
+ "3": ["A3", "B3", "C3", "D3", "E3", "F3", "G3", "H3"],
+ "4": ["A4", "B4", "C4", "D4", "E4", "F4", "G4", "H4"],
+ "5": ["A5", "B5", "C5", "D5", "E5", "F5", "G5", "H5"],
+ "6": ["A6", "B6", "C6", "D6", "E6", "F6", "G6", "H6"],
+ "7": ["A7", "B7", "C7", "D7", "E7", "F7", "G7", "H7"],
+ "8": ["A8", "B8", "C8", "D8", "E8", "F8", "G8", "H8"],
+ "9": ["A9", "B9", "C9", "D9", "E9", "F9", "G9", "H9"],
+ "10": ["A10", "B10", "C10", "D10", "E10", "F10", "G10", "H10"],
+ "11": ["A11", "B11", "C11", "D11", "E11", "F11", "G11", "H11"],
+ "12": ["A12", "B12", "C12", "D12", "E12", "F12", "G12", "H12"],
+ },
+ )
+
+ subject.update_nozzle_configuration("A1", "H1")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": ["A1"],
+ "B": ["B1"],
+ "C": ["C1"],
+ "D": ["D1"],
+ "E": ["E1"],
+ "F": ["F1"],
+ "G": ["G1"],
+ "H": ["H1"],
+ },
+ {"1": ["A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1"]},
+ )
+
+ subject.update_nozzle_configuration("A12", "H12")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": ["A12"],
+ "B": ["B12"],
+ "C": ["C12"],
+ "D": ["D12"],
+ "E": ["E12"],
+ "F": ["F12"],
+ "G": ["G12"],
+ "H": ["H12"],
+ },
+ {"12": ["A12", "B12", "C12", "D12", "E12", "F12", "G12", "H12"]},
+ )
+
+ subject.update_nozzle_configuration("A8", "H8")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": ["A8"],
+ "B": ["B8"],
+ "C": ["C8"],
+ "D": ["D8"],
+ "E": ["E8"],
+ "F": ["F8"],
+ "G": ["G8"],
+ "H": ["H8"],
+ },
+ {"8": ["A8", "B8", "C8", "D8", "E8", "F8", "G8", "H8"]},
+ )
+
+ subject.update_nozzle_configuration("A1", "A12")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": [
+ "A1",
+ "A2",
+ "A3",
+ "A4",
+ "A5",
+ "A6",
+ "A7",
+ "A8",
+ "A9",
+ "A10",
+ "A11",
+ "A12",
+ ]
+ },
+ {
+ "1": ["A1"],
+ "2": ["A2"],
+ "3": ["A3"],
+ "4": ["A4"],
+ "5": ["A5"],
+ "6": ["A6"],
+ "7": ["A7"],
+ "8": ["A8"],
+ "9": ["A9"],
+ "10": ["A10"],
+ "11": ["A11"],
+ "12": ["A12"],
+ },
+ )
+
+ subject.update_nozzle_configuration("H1", "H12")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "H": [
+ "H1",
+ "H2",
+ "H3",
+ "H4",
+ "H5",
+ "H6",
+ "H7",
+ "H8",
+ "H9",
+ "H10",
+ "H11",
+ "H12",
+ ]
+ },
+ {
+ "1": ["H1"],
+ "2": ["H2"],
+ "3": ["H3"],
+ "4": ["H4"],
+ "5": ["H5"],
+ "6": ["H6"],
+ "7": ["H7"],
+ "8": ["H8"],
+ "9": ["H9"],
+ "10": ["H10"],
+ "11": ["H11"],
+ "12": ["H12"],
+ },
+ )
+ subject.update_nozzle_configuration("D1", "D12")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "D": [
+ "D1",
+ "D2",
+ "D3",
+ "D4",
+ "D5",
+ "D6",
+ "D7",
+ "D8",
+ "D9",
+ "D10",
+ "D11",
+ "D12",
+ ]
+ },
+ {
+ "1": ["D1"],
+ "2": ["D2"],
+ "3": ["D3"],
+ "4": ["D4"],
+ "5": ["D5"],
+ "6": ["D6"],
+ "7": ["D7"],
+ "8": ["D8"],
+ "9": ["D9"],
+ "10": ["D10"],
+ "11": ["D11"],
+ "12": ["D12"],
+ },
+ )
+
+ subject.update_nozzle_configuration("A1", "D6")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": ["A1", "A2", "A3", "A4", "A5", "A6"],
+ "B": ["B1", "B2", "B3", "B4", "B5", "B6"],
+ "C": ["C1", "C2", "C3", "C4", "C5", "C6"],
+ "D": ["D1", "D2", "D3", "D4", "D5", "D6"],
+ },
+ {
+ "1": ["A1", "B1", "C1", "D1"],
+ "2": ["A2", "B2", "C2", "D2"],
+ "3": ["A3", "B3", "C3", "D3"],
+ "4": ["A4", "B4", "C4", "D4"],
+ "5": ["A5", "B5", "C5", "D5"],
+ "6": ["A6", "B6", "C6", "D6"],
+ },
+ )
+
+ subject.update_nozzle_configuration("A7", "D12")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "A": ["A7", "A8", "A9", "A10", "A11", "A12"],
+ "B": ["B7", "B8", "B9", "B10", "B11", "B12"],
+ "C": ["C7", "C8", "C9", "C10", "C11", "C12"],
+ "D": ["D7", "D8", "D9", "D10", "D11", "D12"],
+ },
+ {
+ "7": ["A7", "B7", "C7", "D7"],
+ "8": ["A8", "B8", "C8", "D8"],
+ "9": ["A9", "B9", "C9", "D9"],
+ "10": ["A10", "B10", "C10", "D10"],
+ "11": ["A11", "B11", "C11", "D11"],
+ "12": ["A12", "B12", "C12", "D12"],
+ },
+ )
+
+ subject.update_nozzle_configuration("E1", "H6")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "E": ["E1", "E2", "E3", "E4", "E5", "E6"],
+ "F": ["F1", "F2", "F3", "F4", "F5", "F6"],
+ "G": ["G1", "G2", "G3", "G4", "G5", "G6"],
+ "H": ["H1", "H2", "H3", "H4", "H5", "H6"],
+ },
+ {
+ "1": ["E1", "F1", "G1", "H1"],
+ "2": ["E2", "F2", "G2", "H2"],
+ "3": ["E3", "F3", "G3", "H3"],
+ "4": ["E4", "F4", "G4", "H4"],
+ "5": ["E5", "F5", "G5", "H5"],
+ "6": ["E6", "F6", "G6", "H6"],
+ },
+ )
+
+ subject.update_nozzle_configuration("E7", "H12")
+ test_map_entries(
+ subject.current_configuration,
+ {
+ "E": ["E7", "E8", "E9", "E10", "E11", "E12"],
+ "F": ["F7", "F8", "F9", "F10", "F11", "F12"],
+ "G": ["G7", "G8", "G9", "G10", "G11", "G12"],
+ "H": ["H7", "H8", "H9", "H10", "H11", "H12"],
+ },
+ {
+ "7": ["E7", "F7", "G7", "H7"],
+ "8": ["E8", "F8", "G8", "H8"],
+ "9": ["E9", "F9", "G9", "H9"],
+ "10": ["E10", "F10", "G10", "H10"],
+ "11": ["E11", "F11", "G11", "H11"],
+ "12": ["E12", "F12", "G12", "H12"],
+ },
+ )
+
+ subject.update_nozzle_configuration("C4", "D5")
+ test_map_entries(
+ subject.current_configuration,
+ {"C": ["C4", "C5"], "D": ["D4", "D5"]},
+ {"4": ["C4", "D4"], "5": ["C5", "D5"]},
+ )
+
+
+@pytest.mark.parametrize(
+ "pipette_details", [(PipetteModelType.p1000, PipetteVersionType(major=3, minor=5))]
+)
+def test_96_config_geometry(
+ pipette_details: Tuple[PipetteModelType, PipetteVersionType]
+) -> None:
+ config = load_definition(
+ pipette_details[0], PipetteChannelType.NINETY_SIX_CHANNEL, pipette_details[1]
+ )
+ subject = nozzle_manager.NozzleConfigurationManager.build_from_config(config)
+
+ def test_map_geometry(
+ config: PipetteConfigurations,
+ nozzlemap: nozzle_manager.NozzleMap,
+ starting_nozzle: str,
+ front_nozzle: str,
+ xy_center_between: Union[str, Tuple[str, str]],
+ y_center_between: Union[str, Tuple[str, str]],
+ ) -> None:
+ assert_offset_in_center_of(
+ nozzlemap.xy_center_offset, xy_center_between, config
+ )
+ assert_offset_in_center_of(nozzlemap.y_center_offset, y_center_between, config)
+
+ assert nozzlemap.front_nozzle_offset == Point(*config.nozzle_map[front_nozzle])
+ assert nozzlemap.starting_nozzle_offset == Point(
+ *config.nozzle_map[starting_nozzle]
+ )
+
+ test_map_geometry(
+ config, subject.current_configuration, "A1", "H1", ("A1", "H12"), ("A1", "H1")
+ )
+
+ subject.update_nozzle_configuration("A1", "H1")
+ test_map_geometry(
+ config, subject.current_configuration, "A1", "H1", ("A1", "H1"), ("A1", "H1")
+ )
+
+ subject.update_nozzle_configuration("A12", "H12")
+ test_map_geometry(
+ config,
+ subject.current_configuration,
+ "A12",
+ "H12",
+ ("A12", "H12"),
+ ("A12", "H12"),
+ )
+
+ subject.update_nozzle_configuration("A1", "A12")
+ test_map_geometry(
+ config, subject.current_configuration, "A1", "A1", ("A1", "A12"), "A1"
+ )
+
+ subject.update_nozzle_configuration("H1", "H12")
+ test_map_geometry(
+ config, subject.current_configuration, "H1", "H1", ("H1", "H12"), "H1"
+ )
+
+ subject.update_nozzle_configuration("A1", "D6")
+ test_map_geometry(
+ config, subject.current_configuration, "A1", "D1", ("A1", "D6"), ("A1", "D1")
+ )
+
+ subject.update_nozzle_configuration("E7", "H12")
+ test_map_geometry(
+ config, subject.current_configuration, "E7", "H7", ("E7", "H12"), ("E7", "H7")
+ )
+
+ subject.update_nozzle_configuration("C4", "D5")
+ test_map_geometry(
+ config, subject.current_configuration, "C4", "D4", ("C4", "D5"), ("C4", "D4")
+ )
diff --git a/api/tests/opentrons/hardware_control/test_gripper.py b/api/tests/opentrons/hardware_control/test_gripper.py
index 02d2285bdb0..6066b8a74a1 100644
--- a/api/tests/opentrons/hardware_control/test_gripper.py
+++ b/api/tests/opentrons/hardware_control/test_gripper.py
@@ -7,6 +7,7 @@
from opentrons.hardware_control.types import CriticalPoint
from opentrons.config import gripper_config
from opentrons_shared_data.gripper import GripperModel
+from opentrons_shared_data.errors.exceptions import MotionFailedError
if TYPE_CHECKING:
from opentrons.hardware_control.instruments.ot3.instrument_calibration import (
@@ -73,6 +74,7 @@ def test_reload_instrument_cal_ot3(fake_offset: "GripperCalibrationOffset") -> N
fake_gripper_conf,
fake_offset,
"fakeid123",
+ jaw_max_offset=15,
)
# if only calibration is changed
new_cal = instrument_calibration.GripperCalibrationOffset(
@@ -86,5 +88,53 @@ def test_reload_instrument_cal_ot3(fake_offset: "GripperCalibrationOffset") -> N
# it's the same gripper
assert new_gripper == old_gripper
+ # jaw offset should persists as well
+ assert new_gripper._jaw_max_offset == old_gripper._jaw_max_offset
# we said upstream could skip
assert skip
+
+
+@pytest.mark.ot3_only
+def test_reload_instrument_cal_ot3_conf_changed(
+ fake_offset: "GripperCalibrationOffset",
+) -> None:
+ old_gripper = gripper.Gripper(
+ fake_gripper_conf,
+ fake_offset,
+ "fakeid123",
+ jaw_max_offset=15,
+ )
+ new_conf = fake_gripper_conf.copy(
+ update={"grip_force_profile": {"default_grip_force": 1}}
+ )
+ assert new_conf != old_gripper.config
+
+ new_gripper, skip = gripper._reload_gripper(new_conf, old_gripper, fake_offset)
+
+ # it's not the same gripper
+ assert new_gripper != old_gripper
+ # do not pass in the old jaw max offse
+ assert not new_gripper._jaw_max_offset
+ # we said upstream could skip
+ assert not skip
+
+
+@pytest.mark.ot3_only
+def test_jaw_calibration_error_checking() -> None:
+ subject = gripper.Gripper(fake_gripper_conf, fake_offset, "fakeid123")
+ with pytest.raises(MotionFailedError):
+ subject.update_jaw_open_position_from_closed_position(0)
+
+
+@pytest.mark.ot3_only
+def test_jaw_calibration() -> None:
+ subject = gripper.Gripper(fake_gripper_conf, fake_offset, "fakeid123")
+ subject.update_jaw_open_position_from_closed_position(
+ (
+ fake_gripper_conf.geometry.jaw_width["max"]
+ - fake_gripper_conf.geometry.jaw_width["min"]
+ + 2
+ )
+ / 2
+ )
+ assert subject.max_jaw_width == fake_gripper_conf.geometry.jaw_width["max"] + 2
diff --git a/api/tests/opentrons/hardware_control/test_importability.py b/api/tests/opentrons/hardware_control/test_importability.py
new file mode 100644
index 00000000000..c3c62424309
--- /dev/null
+++ b/api/tests/opentrons/hardware_control/test_importability.py
@@ -0,0 +1,8 @@
+import pytest
+from opentrons.hardware_control.ot3api import OT3API
+
+
+@pytest.mark.ot2_only
+async def test_flex_simulator_always_importable() -> None:
+ api = await OT3API.build_hardware_simulator()
+ assert isinstance(api, OT3API)
diff --git a/api/tests/opentrons/hardware_control/test_instruments.py b/api/tests/opentrons/hardware_control/test_instruments.py
index d3d75483377..d3907451717 100644
--- a/api/tests/opentrons/hardware_control/test_instruments.py
+++ b/api/tests/opentrons/hardware_control/test_instruments.py
@@ -11,9 +11,9 @@
aionotify = None # type: ignore
-from opentrons import types, config
+from opentrons import types
from opentrons.hardware_control import API
-from opentrons.hardware_control.types import Axis, OT3Mount
+from opentrons.hardware_control.types import Axis, OT3Mount, HardwareFeatureFlags
from opentrons_shared_data.errors.exceptions import CommandPreconditionViolated
@@ -191,7 +191,10 @@ async def test_cache_instruments_hc(
is_robot,
cntrlr_mock_connect,
):
- hw_api_cntrlr = await API.build_hardware_controller(loop=asyncio.get_running_loop())
+ hw_api_cntrlr = await API.build_hardware_controller(
+ loop=asyncio.get_running_loop(),
+ feature_flags=HardwareFeatureFlags.build_from_ff(),
+ )
async def mock_driver_model(mount):
attached_pipette = {"left": LEFT_PIPETTE_MODEL, "right": None}
@@ -260,7 +263,7 @@ def fake_func2(mount, value):
{types.Mount.LEFT: "p10_single", types.Mount.RIGHT: "p300_single_gen2"}
)
attached = sim.attached_instruments
- assert attached[types.Mount.LEFT]["model"] == "p10_single_v1"
+ assert attached[types.Mount.LEFT]["model"] == "p10_single_v1.5"
assert attached[types.Mount.LEFT]["name"] == "p10_single"
steps_mm_calls = [mock.call({"B": 768}), mock.call({"C": 3200})]
@@ -288,7 +291,7 @@ def fake_func2(mount, value):
# If we use prefixes, that should work too
await sim.cache_instruments({types.Mount.RIGHT: "p300_single"})
attached = sim.attached_instruments
- assert attached[types.Mount.RIGHT]["model"] == "p300_single_v1"
+ assert attached[types.Mount.RIGHT]["model"] == "p300_single_v1.5"
assert attached[types.Mount.RIGHT]["name"] == "p300_single"
# If we specify instruments at init time, we should get them without
# passing an expectation
@@ -350,7 +353,9 @@ async def test_prep_aspirate(sim_and_instr):
async def test_aspirate_new(dummy_instruments):
hw_api = await API.build_hardware_simulator(
- attached_instruments=dummy_instruments[0], loop=asyncio.get_running_loop()
+ attached_instruments=dummy_instruments[0],
+ loop=asyncio.get_running_loop(),
+ feature_flags=HardwareFeatureFlags(use_old_aspiration_functions=False),
)
await hw_api.home()
await hw_api.cache_instruments()
@@ -367,11 +372,12 @@ async def test_aspirate_new(dummy_instruments):
assert pos[Axis.B] == pytest.approx(new_plunger_pos)
-async def test_aspirate_old(decoy: Decoy, mock_feature_flags: None, dummy_instruments):
- decoy.when(config.feature_flags.use_old_aspiration_functions()).then_return(True)
+async def test_aspirate_old(decoy: Decoy, dummy_instruments):
hw_api = await API.build_hardware_simulator(
- attached_instruments=dummy_instruments[0], loop=asyncio.get_running_loop()
+ attached_instruments=dummy_instruments[0],
+ loop=asyncio.get_running_loop(),
+ feature_flags=HardwareFeatureFlags(use_old_aspiration_functions=True),
)
await hw_api.home()
await hw_api.cache_instruments()
diff --git a/api/tests/opentrons/hardware_control/test_module_control.py b/api/tests/opentrons/hardware_control/test_module_control.py
index b683f12d590..36fd6cb1793 100644
--- a/api/tests/opentrons/hardware_control/test_module_control.py
+++ b/api/tests/opentrons/hardware_control/test_module_control.py
@@ -1,13 +1,17 @@
"""Tests for opentrons.hardware_control.module_control."""
import pytest
from decoy import Decoy, matchers
-from typing import Awaitable, Callable, cast
+from typing import Awaitable, Callable, cast, Union, List
from opentrons.drivers.rpi_drivers.types import USBPort
from opentrons.drivers.rpi_drivers.interfaces import USBDriverInterface
from opentrons.hardware_control import API as HardwareAPI
from opentrons.hardware_control.modules import AbstractModule
-from opentrons.hardware_control.modules.types import ModuleAtPort, ModuleType
+from opentrons.hardware_control.modules.types import (
+ ModuleAtPort,
+ ModuleType,
+ SimulatingModuleAtPort,
+)
from opentrons.hardware_control.module_control import AttachedModulesControl
@@ -34,7 +38,10 @@ def build_module(decoy: Decoy) -> Callable[..., Awaitable[AbstractModule]]:
`AttachedModulesControl` is doing too much work _and_ these tests
are too brittle and of questionable value.
"""
- return cast(Callable[..., Awaitable[AbstractModule]], decoy.mock(is_async=True))
+ return cast(
+ Callable[..., Awaitable[AbstractModule]],
+ decoy.mock(name="build_module", is_async=True),
+ )
@pytest.fixture()
@@ -52,15 +59,28 @@ def subject(
return modules_control
+@pytest.mark.parametrize(
+ "module_at_port_input",
+ [
+ ([ModuleAtPort(port="/dev/foo", name="bar")]),
+ (
+ [
+ SimulatingModuleAtPort(
+ port="/dev/foo", name="bar", serial_number="test-123"
+ )
+ ]
+ ),
+ ],
+)
async def test_register_modules(
decoy: Decoy,
usb_bus: USBDriverInterface,
build_module: Callable[..., Awaitable[AbstractModule]],
hardware_api: HardwareAPI,
subject: AttachedModulesControl,
+ module_at_port_input: Union[List[ModuleAtPort], List[SimulatingModuleAtPort]],
) -> None:
"""It should register attached modules."""
- new_mods_at_ports = [ModuleAtPort(port="/dev/foo", name="bar")]
actual_ports = [
ModuleAtPort(
port="/dev/foo",
@@ -72,16 +92,19 @@ async def test_register_modules(
module = decoy.mock(cls=AbstractModule)
decoy.when(module.usb_port).then_return(USBPort(name="baz", port_number=0))
- decoy.when(usb_bus.match_virtual_ports(new_mods_at_ports)).then_return(actual_ports)
+ decoy.when(usb_bus.match_virtual_ports(module_at_port_input)).then_return(
+ actual_ports
+ )
decoy.when(
await build_module(
port="/dev/foo",
usb_port=USBPort(name="baz", port_number=0),
type=ModuleType.TEMPERATURE,
+ sim_serial_number=None,
)
).then_return(module)
- await subject.register_modules(new_mods_at_ports=new_mods_at_ports)
+ await subject.register_modules(new_mods_at_ports=module_at_port_input)
result = subject.available_modules
assert result == [module]
@@ -127,6 +150,7 @@ async def test_register_modules_sort(
usb_port=mod.usb_port,
port=matchers.Anything(),
type=matchers.Anything(),
+ sim_serial_number=None,
)
).then_return(mod)
diff --git a/api/tests/opentrons/hardware_control/test_modules.py b/api/tests/opentrons/hardware_control/test_modules.py
index 49e6ba4b766..eb3d0e48c6c 100644
--- a/api/tests/opentrons/hardware_control/test_modules.py
+++ b/api/tests/opentrons/hardware_control/test_modules.py
@@ -3,6 +3,7 @@
from pathlib import Path
from unittest import mock
+from packaging.version import Version
from opentrons.hardware_control import ExecutionManager
from opentrons.hardware_control.modules import ModuleAtPort
@@ -22,13 +23,19 @@
HeaterShaker,
AbstractModule,
)
+from opentrons.hardware_control.modules.mod_abc import parse_fw_version
from opentrons.drivers.rpi_drivers.types import USBPort
async def test_get_modules_simulating():
import opentrons.hardware_control as hardware_control
- mods = ["tempdeck", "magdeck", "thermocycler", "heatershaker"]
+ mods = {
+ "tempdeck": ["111"],
+ "magdeck": ["222"],
+ "thermocycler": ["333"],
+ "heatershaker": ["444"],
+ }
api = await hardware_control.API.build_hardware_simulator(attached_modules=mods)
await asyncio.sleep(0.05)
from_api = api.attached_modules
@@ -40,7 +47,7 @@ async def test_get_modules_simulating():
async def test_module_caching():
import opentrons.hardware_control as hardware_control
- mod_names = ["tempdeck"]
+ mod_names = {"tempdeck": ["111"]}
api = await hardware_control.API.build_hardware_simulator(
attached_modules=mod_names
)
@@ -59,10 +66,11 @@ async def test_module_caching():
assert with_magdeck[0] is found_mods[0]
await api._backend.module_controls.register_modules(
removed_mods_at_ports=[
- ModuleAtPort(port="/dev/ot_module_sim_tempdeck0", name="tempdeck")
+ ModuleAtPort(port="/dev/ot_module_sim_tempdeck111", name="tempdeck")
]
)
only_magdeck = api.attached_modules.copy()
+
assert only_magdeck[0] is with_magdeck[1]
# Check that two modules of the same kind on different ports are
@@ -94,7 +102,7 @@ async def test_create_simulating_module(
"""It should create simulating module instance for specified module."""
import opentrons.hardware_control as hardware_control
- api = await hardware_control.API.build_hardware_simulator(attached_modules=[])
+ api = await hardware_control.API.build_hardware_simulator(attached_modules={})
await asyncio.sleep(0.05)
simulating_module = await api.create_simulating_module(module_model)
@@ -230,8 +238,6 @@ async def test_module_update_integration(
):
from opentrons.hardware_control import modules
- loop = asyncio.get_running_loop()
-
def async_return(result):
f = asyncio.Future()
f.set_result(result)
@@ -240,7 +246,6 @@ def async_return(result):
bootloader_kwargs = {
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.PIPE,
- "loop": loop,
}
upload_via_avrdude_mock = mock.Mock(
@@ -256,14 +261,14 @@ async def mock_find_avrdude_bootloader_port():
)
# test temperature module update with avrdude bootloader
- await modules.update_firmware(mod_tempdeck, "fake_fw_file_path", loop)
+ await modules.update_firmware(mod_tempdeck, "fake_fw_file_path")
upload_via_avrdude_mock.assert_called_once_with(
"ot_module_avrdude_bootloader1", "fake_fw_file_path", bootloader_kwargs
)
upload_via_avrdude_mock.reset_mock()
# test magnetic module update with avrdude bootloader
- await modules.update_firmware(mod_magdeck, "fake_fw_file_path", loop)
+ await modules.update_firmware(mod_magdeck, "fake_fw_file_path")
upload_via_avrdude_mock.assert_called_once_with(
"ot_module_avrdude_bootloader1", "fake_fw_file_path", bootloader_kwargs
)
@@ -281,7 +286,7 @@ async def mock_find_bossa_bootloader_port():
modules.update, "find_bootloader_port", mock_find_bossa_bootloader_port
)
- await modules.update_firmware(mod_thermocycler, "fake_fw_file_path", loop)
+ await modules.update_firmware(mod_thermocycler, "fake_fw_file_path")
upload_via_bossa_mock.assert_called_once_with(
"ot_module_bossa_bootloader1", "fake_fw_file_path", bootloader_kwargs
)
@@ -299,7 +304,7 @@ async def mock_find_dfu_device_hs(pid: str, expected_device_count: int):
monkeypatch.setattr(modules.update, "find_dfu_device", mock_find_dfu_device_hs)
- await modules.update_firmware(mod_heatershaker, "fake_fw_file_path", loop)
+ await modules.update_firmware(mod_heatershaker, "fake_fw_file_path")
upload_via_dfu_mock.assert_called_once_with(
"df11", "fake_fw_file_path", bootloader_kwargs
)
@@ -312,7 +317,7 @@ async def mock_find_dfu_device_tc2(pid: str, expected_device_count: int):
monkeypatch.setattr(modules.update, "find_dfu_device", mock_find_dfu_device_tc2)
- await modules.update_firmware(mod_thermocycler_gen2, "fake_fw_file_path", loop)
+ await modules.update_firmware(mod_thermocycler_gen2, "fake_fw_file_path")
upload_via_dfu_mock.assert_called_once_with(
"df11", "fake_fw_file_path", bootloader_kwargs
)
@@ -343,7 +348,13 @@ async def test_get_bundled_fw(monkeypatch, tmpdir):
from opentrons.hardware_control import API
- mods = ["tempdeck", "magdeck", "thermocycler", "heatershaker"]
+ mods = {
+ "tempdeck": ["111"],
+ "magdeck": ["222"],
+ "thermocycler": ["333"],
+ "heatershaker": ["444"],
+ }
+
api = await API.build_hardware_simulator(attached_modules=mods)
await asyncio.sleep(0.05)
@@ -413,3 +424,20 @@ def test_magnetic_module_revision_parsing(revision, model):
)
def test_temperature_module_revision_parsing(revision, model):
assert TempDeck._model_from_revision(revision) == model
+
+
+@pytest.mark.parametrize(
+ argnames=["device_version", "expected_result"],
+ argvalues=[
+ ["v1.0.4", Version("v1.0.4")],
+ ["v0.5.6", Version("v0.5.6")],
+ ["v1.0.4-dhfs", Version("v0.0.0")],
+ ["v3.0.dshjfd", Version("v0.0.0")],
+ ],
+)
+async def test_catch_invalid_fw_version(
+ device_version: str,
+ expected_result: bool,
+) -> None:
+ """Assert that invalid firmware versions prompt a valid Version object of v0.0.0."""
+ assert parse_fw_version(device_version) == expected_result
diff --git a/api/tests/opentrons/hardware_control/test_ot3_api.py b/api/tests/opentrons/hardware_control/test_ot3_api.py
index 9c92d5b936f..7ab0a2f1c00 100644
--- a/api/tests/opentrons/hardware_control/test_ot3_api.py
+++ b/api/tests/opentrons/hardware_control/test_ot3_api.py
@@ -1,6 +1,18 @@
""" Tests for behaviors specific to the OT3 hardware controller.
"""
-from typing import Iterator, Union, Dict, Tuple, List, Any, OrderedDict, Optional
+from typing import (
+ AsyncIterator,
+ Iterator,
+ Union,
+ Dict,
+ Tuple,
+ List,
+ Any,
+ OrderedDict,
+ Optional,
+ cast,
+ TypedDict,
+)
from typing_extensions import Literal
from math import copysign
import pytest
@@ -13,11 +25,13 @@
GantryLoad,
CapacitivePassSettings,
LiquidProbeSettings,
+ OutputOptions,
)
from opentrons.hardware_control.dev_types import (
AttachedGripper,
AttachedPipette,
GripperDict,
+ GripperSpec,
)
from opentrons.hardware_control.motion_utilities import target_position_from_plunger
from opentrons.hardware_control.instruments.ot3.gripper_handler import GripperHandler
@@ -40,7 +54,6 @@
InstrumentProbeType,
SubSystem,
GripperJawState,
- StatusBarState,
EstopState,
EstopStateNotification,
TipStateType,
@@ -49,13 +62,11 @@
from opentrons.hardware_control.errors import InvalidCriticalPoint
from opentrons.hardware_control.ot3api import OT3API
from opentrons.hardware_control import ThreadManager
-from opentrons.hardware_control.backends.ot3utils import (
- axis_to_node,
-)
+
+from opentrons.hardware_control.backends.ot3simulator import OT3Simulator
from opentrons_hardware.firmware_bindings.constants import NodeId
from opentrons.types import Point, Mount
-from opentrons_hardware.hardware_control.motion import MoveStopCondition
from opentrons_hardware.hardware_control.motion_planning.types import Move
from opentrons.config import gripper_config as gc
@@ -74,6 +85,7 @@
from opentrons_shared_data.pipette import (
load_data as load_pipette_data,
)
+from opentrons_shared_data.pipette.dev_types import PipetteModel
from opentrons.hardware_control.modules import (
Thermocycler,
TempDeck,
@@ -82,6 +94,7 @@
SpeedStatus,
)
from opentrons.hardware_control.module_control import AttachedModulesControl
+from opentrons.hardware_control.backends.types import HWStopCondition
# TODO (spp, 2023-08-22): write tests for ot3api.stop & ot3api.halt
@@ -107,47 +120,54 @@ def fake_liquid_settings() -> LiquidProbeSettings:
plunger_speed=10,
sensor_threshold_pascals=15,
expected_liquid_height=109,
- log_pressure=False,
+ output_option=OutputOptions.can_bus_only,
aspirate_while_sensing=False,
auto_zero_sensor=False,
num_baseline_reads=10,
- data_file="fake_file_name",
+ data_files={InstrumentProbeType.PRIMARY: "fake_file_name"},
)
@pytest.fixture
-def mock_move_to(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def managed_obj(ot3_hardware: ThreadManager[OT3API]) -> OT3API:
+ managed = ot3_hardware.managed_obj
+ assert managed
+ return managed
+
+
+@pytest.fixture
+def mock_move_to(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"move_to",
AsyncMock(
- spec=ot3_hardware.managed_obj.move_to,
- wraps=ot3_hardware.managed_obj.move_to,
+ spec=managed_obj.move_to,
+ wraps=managed_obj.move_to,
),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_home(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_home(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"home",
AsyncMock(
- spec=ot3_hardware.managed_obj.home,
- wraps=ot3_hardware.managed_obj.home,
+ spec=managed_obj.home,
+ wraps=managed_obj.home,
),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_home_plunger(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_home_plunger(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"home_plunger",
AsyncMock(
- spec=ot3_hardware.managed_obj.home_plunger,
+ spec=managed_obj.home_plunger,
),
) as mock_move:
yield mock_move
@@ -155,254 +175,308 @@ def mock_home_plunger(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock
@pytest.fixture
def mock_move_to_plunger_bottom(
- ot3_hardware: ThreadManager[OT3API],
+ managed_obj: OT3API,
) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"_move_to_plunger_bottom",
AsyncMock(
- spec=ot3_hardware.managed_obj._move_to_plunger_bottom,
+ spec=managed_obj._move_to_plunger_bottom,
),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_move(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_move(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"_move",
AsyncMock(
- spec=ot3_hardware.managed_obj._move,
+ spec=managed_obj._move,
),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_gantry_position(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_gantry_position(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"gantry_position",
AsyncMock(
- spec=ot3_hardware.managed_obj.gantry_position,
- wraps=ot3_hardware.managed_obj.gantry_position,
+ spec=managed_obj.gantry_position,
+ wraps=managed_obj.gantry_position,
),
) as mock_gantry_pos:
yield mock_gantry_pos
@pytest.fixture
-def mock_grip(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_grip(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"_grip",
AsyncMock(
- spec=ot3_hardware.managed_obj._grip,
- wraps=ot3_hardware.managed_obj._grip,
+ spec=managed_obj._grip,
+ wraps=managed_obj._grip,
),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_ungrip(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_ungrip(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"_ungrip",
AsyncMock(
- spec=ot3_hardware.managed_obj._ungrip,
- wraps=ot3_hardware.managed_obj._ungrip,
+ spec=managed_obj._ungrip,
+ wraps=managed_obj._ungrip,
),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_home_gear_motors(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_home_gear_motors(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"home_gear_motors",
AsyncMock(
- spec=ot3_hardware.managed_obj.home_gear_motors,
- wraps=ot3_hardware.managed_obj.home_gear_motors,
+ spec=managed_obj.home_gear_motors,
+ wraps=managed_obj.home_gear_motors,
),
) as mock_home_gear:
yield mock_home_gear
@pytest.fixture
-def mock_hold_jaw_width(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_hold_jaw_width(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"_hold_jaw_width",
AsyncMock(
- spec=ot3_hardware.managed_obj._hold_jaw_width,
- wraps=ot3_hardware.managed_obj._hold_jaw_width,
+ spec=managed_obj._hold_jaw_width,
+ wraps=managed_obj._hold_jaw_width,
),
) as mock_move:
yield mock_move
@pytest.fixture
-async def mock_backend_move(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+async def mock_backend_move(managed_obj: OT3API) -> AsyncIterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj._backend,
+ managed_obj._backend,
"move",
- AsyncMock(spec=ot3_hardware.managed_obj._backend.move),
+ AsyncMock(spec=managed_obj._backend.move),
) as mock_move:
yield mock_move
@pytest.fixture
-def mock_check_motor(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_check_motor(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj._backend,
+ managed_obj._backend,
"check_motor_status",
- Mock(spec=ot3_hardware.managed_obj._backend.check_motor_status),
+ Mock(spec=managed_obj._backend.check_motor_status),
) as mock_check:
yield mock_check
@pytest.fixture
-def mock_check_encoder(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+def mock_check_encoder(managed_obj: OT3API) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj._backend,
+ managed_obj._backend,
"check_encoder_status",
- Mock(spec=ot3_hardware.managed_obj._backend.check_encoder_status),
+ Mock(spec=managed_obj._backend.check_encoder_status),
) as mock_check:
yield mock_check
@pytest.fixture
-async def mock_refresh(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+async def mock_refresh(managed_obj: OT3API) -> AsyncIterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"refresh_positions",
AsyncMock(
- spec=ot3_hardware.managed_obj.refresh_positions,
- wraps=ot3_hardware.managed_obj.refresh_positions,
+ spec=managed_obj.refresh_positions,
+ wraps=managed_obj.refresh_positions,
),
) as mock_refresh:
yield mock_refresh
@pytest.fixture
-async def mock_reset(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+async def mock_reset(managed_obj: OT3API) -> AsyncIterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"reset",
AsyncMock(),
) as mock_reset:
yield mock_reset
+@pytest.fixture
+def mock_jaw_width() -> Iterator[MagicMock]:
+ with patch(
+ "opentrons.hardware_control.instruments.ot3.gripper.Gripper.jaw_width",
+ new_callable=PropertyMock,
+ ) as jaw_width:
+ yield jaw_width
+
+
+@pytest.fixture
+def mock_max_grip_error() -> Iterator[MagicMock]:
+ with patch(
+ "opentrons.hardware_control.instruments.ot3.gripper.Gripper.max_allowed_grip_error",
+ new_callable=PropertyMock,
+ ) as max_error:
+ yield max_error
+
+
@pytest.fixture
async def mock_instrument_handlers(
- ot3_hardware: ThreadManager[OT3API],
-) -> Iterator[Tuple[MagicMock]]:
+ managed_obj: OT3API,
+) -> AsyncIterator[Tuple[MagicMock, MagicMock]]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"_gripper_handler",
MagicMock(spec=GripperHandler),
) as mock_gripper_handler, patch.object(
- ot3_hardware.managed_obj, "_pipette_handler", MagicMock(spec=OT3PipetteHandler)
+ managed_obj, "_pipette_handler", MagicMock(spec=OT3PipetteHandler)
) as mock_pipette_handler:
yield mock_gripper_handler, mock_pipette_handler
@pytest.fixture
-async def gripper_present(ot3_hardware: ThreadManager[OT3API]) -> None:
+async def gripper_present(
+ managed_obj: OT3API,
+ ot3_hardware: ThreadManager[OT3API],
+ hardware_backend: OT3Simulator,
+) -> None:
# attach a gripper if we're testing the gripper mount
gripper_config = gc.load(GripperModel.v1)
instr_data = AttachedGripper(config=gripper_config, id="test")
- ot3_hardware._backend._attached_instruments[OT3Mount.GRIPPER] = {
- "model": GripperModel.v1,
- "id": "test",
- }
- ot3_hardware._backend._present_nodes.add(NodeId.gripper)
+ hardware_backend._attached_instruments[OT3Mount.GRIPPER] = cast(
+ GripperSpec,
+ {
+ "model": GripperModel.v1,
+ "id": "test",
+ },
+ )
+ hardware_backend._present_axes.update((Axis.G, Axis.Z_G))
await ot3_hardware.cache_gripper(instr_data)
+@pytest.fixture
+def hardware_backend(managed_obj: OT3API) -> OT3Simulator:
+ assert isinstance(
+ managed_obj._backend, OT3Simulator
+ ), "Tests only work with simulator"
+ return managed_obj._backend
+
+
+class PipetteLoadConfig(TypedDict):
+ channels: Literal[1, 8, 96]
+ version: Tuple[Literal[1, 2, 3], Literal[0, 1, 2, 3, 4, 5, 6]]
+ model: PipetteModel
+
+
+class GripperLoadConfig(TypedDict):
+ model: GripperModel
+ id: str
+
+
+LoadConfigs = List[
+ Union[
+ Tuple[Literal[OT3Mount.RIGHT], PipetteLoadConfig],
+ Tuple[Literal[OT3Mount.LEFT], PipetteLoadConfig],
+ Tuple[Literal[OT3Mount.GRIPPER], GripperLoadConfig],
+ ]
+]
+
+
@pytest.mark.parametrize(
"load_configs,load",
(
(
- {
- OT3Mount.RIGHT: {"channels": 8, "version": (3, 3), "model": "p50"},
- OT3Mount.LEFT: {"channels": 1, "version": (3, 3), "model": "p1000"},
- },
+ [
+ (OT3Mount.RIGHT, {"channels": 8, "version": (3, 3), "model": "p50"}),
+ (OT3Mount.LEFT, {"channels": 1, "version": (3, 3), "model": "p1000"}),
+ ],
GantryLoad.LOW_THROUGHPUT,
),
- ({}, GantryLoad.LOW_THROUGHPUT),
+ ([], GantryLoad.LOW_THROUGHPUT),
(
- {OT3Mount.GRIPPER: {"model": GripperModel.v1, "id": "g12345"}},
+ [(OT3Mount.GRIPPER, {"model": GripperModel.v1, "id": "g12345"})],
GantryLoad.LOW_THROUGHPUT,
),
(
- {OT3Mount.LEFT: {"channels": 8, "version": (3, 3), "model": "p1000"}},
+ [(OT3Mount.LEFT, {"channels": 8, "version": (3, 3), "model": "p1000"})],
GantryLoad.LOW_THROUGHPUT,
),
(
- {OT3Mount.RIGHT: {"channels": 8, "version": (3, 3), "model": "p1000"}},
+ [(OT3Mount.RIGHT, {"channels": 8, "version": (3, 3), "model": "p1000"})],
GantryLoad.LOW_THROUGHPUT,
),
(
- {OT3Mount.LEFT: {"channels": 96, "model": "p1000", "version": (3, 3)}},
+ [(OT3Mount.LEFT, {"channels": 96, "model": "p1000", "version": (3, 3)})],
GantryLoad.HIGH_THROUGHPUT,
),
(
- {
- OT3Mount.LEFT: {"channels": 1, "version": (3, 3), "model": "p1000"},
- OT3Mount.GRIPPER: {"model": GripperModel.v1, "id": "g12345"},
- },
+ [
+ (OT3Mount.LEFT, {"channels": 1, "version": (3, 3), "model": "p1000"}),
+ (OT3Mount.GRIPPER, {"model": GripperModel.v1, "id": "g12345"}),
+ ],
GantryLoad.LOW_THROUGHPUT,
),
(
- {
- OT3Mount.RIGHT: {"channels": 8, "version": (3, 3), "model": "p1000"},
- OT3Mount.GRIPPER: {"model": GripperModel.v1, "id": "g12345"},
- },
+ [
+ (OT3Mount.RIGHT, {"channels": 8, "version": (3, 3), "model": "p1000"}),
+ (OT3Mount.GRIPPER, {"model": GripperModel.v1, "id": "g12345"}),
+ ],
GantryLoad.LOW_THROUGHPUT,
),
(
- {
- OT3Mount.LEFT: {"channels": 96, "model": "p1000", "version": (3, 3)},
- OT3Mount.GRIPPER: {"model": GripperModel.v1, "id": "g12345"},
- },
+ [
+ (OT3Mount.LEFT, {"channels": 96, "model": "p1000", "version": (3, 3)}),
+ (OT3Mount.GRIPPER, {"model": GripperModel.v1, "id": "g12345"}),
+ ],
GantryLoad.HIGH_THROUGHPUT,
),
),
)
async def test_gantry_load_transform(
ot3_hardware: ThreadManager[OT3API],
- load_configs: Dict[str, Union[int, str, Tuple[int, int]]],
+ load_configs: LoadConfigs,
load: GantryLoad,
) -> None:
- for mount, configs in load_configs.items():
- if mount == OT3Mount.GRIPPER:
- gripper_config = gc.load(configs["model"])
- instr_data = AttachedGripper(config=gripper_config, id="2345")
- await ot3_hardware.cache_gripper(instr_data)
+ for pair in load_configs:
+ if pair[0] == OT3Mount.GRIPPER:
+ gripper_config = gc.load(pair[1]["model"])
+ gripper_data = AttachedGripper(config=gripper_config, id="2345")
+ await ot3_hardware.cache_gripper(gripper_data)
else:
pipette_config = load_pipette_data.load_definition(
- PipetteModelType(configs["model"]),
- PipetteChannelType(configs["channels"]),
- PipetteVersionType(*configs["version"]),
+ PipetteModelType(pair[1]["model"]),
+ PipetteChannelType(pair[1]["channels"]),
+ PipetteVersionType(*pair[1]["version"]),
)
instr_data = AttachedPipette(config=pipette_config, id="fakepip")
- await ot3_hardware.cache_pipette(mount, instr_data, None)
+ await ot3_hardware.cache_pipette(pair[0], instr_data, None)
assert ot3_hardware._gantry_load_from_instruments() == load
@pytest.fixture
def mock_backend_capacitive_probe(
- ot3_hardware: ThreadManager[OT3API],
+ hardware_backend: OT3Simulator,
) -> Iterator[AsyncMock]:
- backend = ot3_hardware.managed_obj._backend
with patch.object(
- backend, "capacitive_probe", AsyncMock(spec=backend.capacitive_probe)
+ hardware_backend,
+ "capacitive_probe",
+ AsyncMock(spec=hardware_backend.capacitive_probe),
) as mock_probe:
def _update_position(
@@ -413,7 +487,7 @@ def _update_position(
threshold_pf: float,
probe: InstrumentProbeType,
) -> None:
- ot3_hardware._backend._position[axis_to_node(moving)] += distance_mm / 2
+ hardware_backend._position[moving] += distance_mm / 2
mock_probe.side_effect = _update_position
@@ -422,12 +496,12 @@ def _update_position(
@pytest.fixture
def mock_current_position_ot3(
- ot3_hardware: ThreadManager[OT3API],
+ managed_obj: OT3API,
) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"current_position_ot3",
- AsyncMock(spec=ot3_hardware.managed_obj.current_position_ot3),
+ AsyncMock(spec=managed_obj.current_position_ot3),
) as mock_position:
mock_position.return_value = {
Axis.X: 477.2,
@@ -443,12 +517,11 @@ def mock_current_position_ot3(
@pytest.fixture
-def mock_backend_capacitive_pass(
- ot3_hardware: ThreadManager[OT3API],
-) -> Iterator[AsyncMock]:
- backend = ot3_hardware.managed_obj._backend
+def mock_backend_capacitive_pass(hardware_backend: OT3Simulator) -> Iterator[AsyncMock]:
with patch.object(
- backend, "capacitive_pass", AsyncMock(spec=backend.capacitive_pass)
+ hardware_backend,
+ "capacitive_pass",
+ AsyncMock(spec=hardware_backend.capacitive_pass),
) as mock_pass:
async def _update_position(
@@ -457,8 +530,8 @@ async def _update_position(
distance_mm: float,
speed_mm_per_s: float,
probe: InstrumentProbeType,
- ) -> None:
- ot3_hardware._backend._position[axis_to_node(moving)] += distance_mm / 2
+ ) -> List[float]:
+ hardware_backend._position[moving] += distance_mm / 2
return [1, 2, 3, 4, 5, 6, 8]
mock_pass.side_effect = _update_position
@@ -466,20 +539,19 @@ async def _update_position(
@pytest.fixture
-def mock_backend_get_tip_status(
- ot3_hardware: ThreadManager[OT3API],
-) -> Iterator[AsyncMock]:
- backend = ot3_hardware.managed_obj._backend
- with patch.object(backend, "get_tip_status", AsyncMock()) as mock_tip_status:
+def mock_backend_get_tip_status(hardware_backend: OT3Simulator) -> Iterator[AsyncMock]:
+ with patch.object(
+ hardware_backend, "get_tip_status", AsyncMock()
+ ) as mock_tip_status:
yield mock_tip_status
@pytest.fixture
def mock_verify_tip_presence(
- ot3_hardware: ThreadManager[OT3API],
+ managed_obj: OT3API,
) -> Iterator[AsyncMock]:
with patch.object(
- ot3_hardware.managed_obj, "verify_tip_presence", AsyncMock()
+ managed_obj, "verify_tip_presence", AsyncMock()
) as mock_check_tip:
yield mock_check_tip
@@ -520,10 +592,11 @@ async def prepare_for_mock_blowout(
@pytest.mark.parametrize("load_configs", load_pipette_configs)
async def test_pickup_moves(
ot3_hardware: ThreadManager[OT3API],
- mock_instrument_handlers: Tuple[Mock],
+ managed_obj: OT3API,
+ mock_instrument_handlers: Tuple[Mock, Mock],
mock_move_to_plunger_bottom: AsyncMock,
mock_home_gear_motors: AsyncMock,
- load_configs: List[Dict[str, Any]],
+ load_configs: Dict[OT3Mount, PipetteLoadConfig],
) -> None:
_, pipette_handler = mock_instrument_handlers
for mount, configs in load_configs.items():
@@ -558,9 +631,9 @@ async def test_pickup_moves(
pipette_handler.plan_lt_pick_up_tip.return_value = move_plan_return_val
with patch.object(
- ot3_hardware.managed_obj,
+ managed_obj,
"move_rel",
- AsyncMock(spec=ot3_hardware.managed_obj.move_rel),
+ AsyncMock(spec=managed_obj.move_rel),
) as mock_move_rel:
await ot3_hardware.pick_up_tip(Mount.LEFT, 40.0)
move_call_list = [call.args for call in mock_move_rel.call_args_list]
@@ -580,7 +653,7 @@ async def test_pickup_moves(
async def test_blow_out_position(
ot3_hardware: ThreadManager[OT3API],
mock_backend_get_tip_status: AsyncMock,
- load_configs: List[Dict[str, Any]],
+ load_configs: Dict[OT3Mount, PipetteLoadConfig],
blowout_volume: float,
) -> None:
liquid_class = LiquidClasses.default
@@ -630,7 +703,7 @@ async def test_blow_out_position(
async def test_blow_out_error(
ot3_hardware: ThreadManager[OT3API],
mock_backend_get_tip_status: AsyncMock,
- load_configs: List[Dict[str, Any]],
+ load_configs: Dict[OT3Mount, PipetteLoadConfig],
blowout_volume: float,
) -> None:
liquid_class = LiquidClasses.default
@@ -698,6 +771,7 @@ async def test_move_to_without_homing_first(
async def test_liquid_probe(
mock_move_to: AsyncMock,
ot3_hardware: ThreadManager[OT3API],
+ hardware_backend: OT3Simulator,
head_node: NodeId,
pipette_node: Axis,
mount: OT3Mount,
@@ -708,13 +782,12 @@ async def test_liquid_probe(
mock_move_to_plunger_bottom: AsyncMock,
) -> None:
mock_ungrip.return_value = None
- backend = ot3_hardware.managed_obj._backend
await ot3_hardware.home()
mock_move_to.return_value = None
with patch.object(
- backend, "liquid_probe", AsyncMock(spec=backend.liquid_probe)
- ) as mock_position:
+ hardware_backend, "liquid_probe", AsyncMock(spec=hardware_backend.liquid_probe)
+ ) as mock_liquid_probe:
return_dict = {
head_node: 140,
NodeId.gantry_x: 0,
@@ -723,7 +796,7 @@ async def test_liquid_probe(
}
# make sure aspirate while sensing reverses direction
- mock_position.return_value = return_dict
+ mock_liquid_probe.return_value = return_dict
fake_settings_aspirate = LiquidProbeSettings(
starting_mount_height=100,
max_z_distance=15,
@@ -732,28 +805,29 @@ async def test_liquid_probe(
plunger_speed=10,
sensor_threshold_pascals=15,
expected_liquid_height=109,
- log_pressure=False,
+ output_option=OutputOptions.can_bus_only,
aspirate_while_sensing=True,
auto_zero_sensor=False,
num_baseline_reads=10,
- data_file="fake_file_name",
+ data_files={InstrumentProbeType.PRIMARY: "fake_file_name"},
)
await ot3_hardware.liquid_probe(mount, fake_settings_aspirate)
mock_move_to_plunger_bottom.assert_called_once()
- backend.liquid_probe.assert_called_once_with(
+ mock_liquid_probe.assert_called_once_with(
mount,
fake_settings_aspirate.max_z_distance,
fake_settings_aspirate.mount_speed,
(fake_settings_aspirate.plunger_speed * -1),
fake_settings_aspirate.sensor_threshold_pascals,
- fake_settings_aspirate.log_pressure,
+ fake_settings_aspirate.output_option,
+ fake_settings_aspirate.data_files,
fake_settings_aspirate.auto_zero_sensor,
fake_settings_aspirate.num_baseline_reads,
probe=InstrumentProbeType.PRIMARY,
)
return_dict[head_node], return_dict[pipette_node] = 142, 142
- mock_position.return_value = return_dict
+ mock_liquid_probe.return_value = return_dict
await ot3_hardware.liquid_probe(
mount, fake_liquid_settings
) # should raise no exceptions
@@ -800,9 +874,6 @@ async def test_capacitive_probe(
assert this_point == original
-Direction = Union[Literal[0.0], Literal[1.0], Literal[-1.0]]
-
-
@pytest.mark.parametrize(
"target,origin,prep_direction,probe_direction",
[
@@ -841,8 +912,8 @@ async def test_probe_direction(
fake_settings: CapacitivePassSettings,
target: float,
origin: Point,
- prep_direction: Direction,
- probe_direction: Direction,
+ prep_direction: float,
+ probe_direction: float,
) -> None:
mock_gantry_position.return_value = origin
await ot3_hardware.capacitive_probe(OT3Mount.RIGHT, Axis.X, target, fake_settings)
@@ -1014,12 +1085,15 @@ async def test_gripper_action_fails_with_no_gripper(
mock_ungrip.assert_not_called()
+@pytest.mark.parametrize("needs_calibration", [True, False])
async def test_gripper_action_works_with_gripper(
ot3_hardware: ThreadManager[OT3API],
+ managed_obj: OT3API,
mock_grip: AsyncMock,
mock_ungrip: AsyncMock,
mock_hold_jaw_width: AsyncMock,
gripper_present: None,
+ needs_calibration: bool,
) -> None:
gripper_config = gc.load(GripperModel.v1)
@@ -1034,15 +1108,35 @@ async def test_gripper_action_works_with_gripper(
CommandPreconditionViolated, match="Cannot grip gripper jaw before homing"
):
await ot3_hardware.grip(5.0)
+ gripper = managed_obj._gripper_handler._gripper
+ assert gripper
+ calibration_offset = 5
+ gripper._jaw_max_offset = None if needs_calibration else calibration_offset
await ot3_hardware.home_gripper_jaw()
- mock_ungrip.assert_called_once()
+ if needs_calibration:
+ assert mock_ungrip.call_count == 2
+ mock_grip.assert_called_once()
+ else:
+ mock_ungrip.assert_called_once()
mock_ungrip.reset_mock()
+ mock_grip.reset_mock()
+ gripper._jaw_max_offset = None if needs_calibration else 5
await ot3_hardware.home([Axis.G])
- mock_ungrip.assert_called_once()
+ if needs_calibration:
+ assert mock_ungrip.call_count == 2
+ mock_grip.assert_called_once()
+ else:
+ mock_ungrip.assert_called_once()
+
+ mock_grip.reset_mock()
mock_ungrip.reset_mock()
await ot3_hardware.grip(5.0)
+ expected_displacement = 16.0
+ if not needs_calibration:
+ expected_displacement += calibration_offset / 2
mock_grip.assert_called_once_with(
- gc.duty_cycle_by_force(5.0, gripper_config.grip_force_profile),
+ duty_cycle=gc.duty_cycle_by_force(5.0, gripper_config.grip_force_profile),
+ expected_displacement=expected_displacement,
stay_engaged=True,
)
@@ -1100,7 +1194,7 @@ async def test_gripper_fails_for_pipette_cps(
@pytest.mark.xfail
-async def test_gripper_position(ot3_hardware: ThreadManager[OT3API]):
+async def test_gripper_position(ot3_hardware: ThreadManager[OT3API]) -> None:
gripper_config = gc.load(GripperModel.v1)
instr_data = AttachedGripper(config=gripper_config, id="g12345")
await ot3_hardware.cache_gripper(instr_data)
@@ -1116,27 +1210,29 @@ async def test_gripper_position(ot3_hardware: ThreadManager[OT3API]):
async def test_gripper_move_to(
ot3_hardware: ThreadManager[OT3API], mock_backend_move: AsyncMock
-):
+) -> None:
# Moving the gripper should, well, work
gripper_config = gc.load(GripperModel.v1)
instr_data = AttachedGripper(config=gripper_config, id="g12345")
await ot3_hardware.cache_gripper(instr_data)
await ot3_hardware.move_to(OT3Mount.GRIPPER, Point(0, 0, 0))
- _, moves, _ = mock_backend_move.call_args_list[0][0]
- for move in moves:
- assert list(sorted(move.unit_vector.keys(), key=lambda elem: elem.value)) == [
+ origin, target, _, _ = mock_backend_move.call_args_list[0][0]
+ assert sorted(target.keys(), key=lambda elem: cast(int, elem.value)) == sorted(
+ [
Axis.X,
Axis.Y,
Axis.Z_G,
- ]
+ ],
+ key=lambda elem: cast(int, elem.value),
+ )
async def test_home_plunger(
ot3_hardware: ThreadManager[OT3API],
mock_move_to_plunger_bottom: AsyncMock,
mock_home: AsyncMock,
-):
+) -> None:
mount = OT3Mount.LEFT
instr_data = AttachedPipette(
config=load_pipette_data.load_definition(
@@ -1155,7 +1251,7 @@ async def test_home_plunger(
async def test_prepare_for_aspirate(
ot3_hardware: ThreadManager[OT3API],
mock_move_to_plunger_bottom: AsyncMock,
-):
+) -> None:
mount = OT3Mount.LEFT
instr_data = AttachedPipette(
config=load_pipette_data.load_definition(
@@ -1188,7 +1284,7 @@ async def test_plunger_ready_to_aspirate_after_dispense(
disp_vol: float,
push_out: Optional[float],
is_ready: bool,
-):
+) -> None:
mount = OT3Mount.LEFT
instr_data = AttachedPipette(
@@ -1217,7 +1313,7 @@ async def test_plunger_ready_to_aspirate_after_dispense(
async def test_move_to_plunger_bottom(
ot3_hardware: ThreadManager[OT3API],
mock_move: AsyncMock,
-):
+) -> None:
mount = OT3Mount.LEFT
instr_data = AttachedPipette(
config=load_pipette_data.load_definition(
@@ -1315,7 +1411,7 @@ async def test_move_axes(
mock_check_motor: Mock,
input_position: Dict[Axis, float],
expected_move_pos: OrderedDict[Axis, float],
-):
+) -> None:
await ot3_hardware.move_axes(position=input_position)
mock_check_motor.return_value = True
@@ -1336,11 +1432,11 @@ async def test_move_expect_stall_flag(
expect_stalls: bool,
) -> None:
- expected = MoveStopCondition.stall if expect_stalls else MoveStopCondition.none
+ expected = HWStopCondition.stall if expect_stalls else HWStopCondition.none
await ot3_hardware.move_to(Mount.LEFT, Point(0, 0, 0), _expect_stalls=expect_stalls)
mock_backend_move.assert_called_once()
- _, _, condition = mock_backend_move.call_args_list[0][0]
+ _, _, _, condition = mock_backend_move.call_args_list[0][0]
assert condition == expected
mock_backend_move.reset_mock()
@@ -1348,7 +1444,7 @@ async def test_move_expect_stall_flag(
Mount.LEFT, Point(10, 0, 0), _expect_stalls=expect_stalls
)
mock_backend_move.assert_called_once()
- _, _, condition = mock_backend_move.call_args_list[0][0]
+ _, _, _, condition = mock_backend_move.call_args_list[0][0]
assert condition == expected
@@ -1365,7 +1461,7 @@ async def test_move_expect_stall_flag(
async def test_reset_instrument_offset(
ot3_hardware: ThreadManager[OT3API],
mount: Union[OT3Mount, Mount],
- mock_instrument_handlers: Tuple[Mock],
+ mock_instrument_handlers: Tuple[Mock, Mock],
) -> None:
gripper_handler, pipette_handler = mock_instrument_handlers
await ot3_hardware.reset_instrument_offset(mount)
@@ -1379,60 +1475,66 @@ async def test_reset_instrument_offset(
@pytest.mark.parametrize(
- argnames=["mount", "expected_offset"],
+ argnames=["mount_expected_offset"],
argvalues=[
[
- OT3Mount.GRIPPER,
- GripperCalibrationOffset(
- offset=Point(1, 2, 3),
- source=SourceType.default,
- status=CalibrationStatus(),
- last_modified=None,
+ (
+ OT3Mount.GRIPPER,
+ GripperCalibrationOffset(
+ offset=Point(1, 2, 3),
+ source=SourceType.default,
+ status=CalibrationStatus(),
+ last_modified=None,
+ ),
),
],
[
- OT3Mount.RIGHT,
- PipetteOffsetByPipetteMount(
- offset=Point(10, 20, 30),
- source=SourceType.default,
- status=CalibrationStatus(),
- last_modified=None,
+ (
+ OT3Mount.RIGHT,
+ PipetteOffsetByPipetteMount(
+ offset=Point(10, 20, 30),
+ source=SourceType.default,
+ status=CalibrationStatus(),
+ last_modified=None,
+ ),
),
],
[
- OT3Mount.LEFT,
- PipetteOffsetByPipetteMount(
- offset=Point(100, 200, 300),
- source=SourceType.default,
- status=CalibrationStatus(),
- last_modified=None,
+ (
+ OT3Mount.LEFT,
+ PipetteOffsetByPipetteMount(
+ offset=Point(100, 200, 300),
+ source=SourceType.default,
+ status=CalibrationStatus(),
+ last_modified=None,
+ ),
),
],
],
)
def test_get_instrument_offset(
ot3_hardware: ThreadManager[OT3API],
- mount: OT3Mount,
- expected_offset: Union[GripperCalibrationOffset, PipetteOffsetByPipetteMount],
- mock_instrument_handlers: Tuple[Mock],
+ mount_expected_offset: Union[
+ Tuple[Literal[OT3Mount.GRIPPER], GripperCalibrationOffset],
+ Tuple[Literal[OT3Mount.RIGHT], PipetteOffsetByPipetteMount],
+ Tuple[Literal[OT3Mount.LEFT], PipetteOffsetByPipetteMount],
+ ],
+ mock_instrument_handlers: Tuple[Mock, Mock],
) -> None:
gripper_handler, pipette_handler = mock_instrument_handlers
- if mount == OT3Mount.GRIPPER:
+ if mount_expected_offset[0] == OT3Mount.GRIPPER:
gripper_handler.get_gripper_dict.return_value = GripperDict(
model=GripperModel.v1,
gripper_id="abc",
state=GripperJawState.UNHOMED,
display_name="abc",
- fw_update_required=False,
- fw_current_version=100,
- fw_next_version=None,
- calibration_offset=expected_offset,
+ calibration_offset=mount_expected_offset[1],
)
else:
- pipette_handler.get_instrument_offset.return_value = expected_offset
+ pipette_handler.get_instrument_offset.return_value = mount_expected_offset[1]
- found_offset = ot3_hardware.get_instrument_offset(mount=mount)
- assert found_offset == expected_offset
+ found_offset = ot3_hardware.get_instrument_offset(mount=mount_expected_offset[0])
+ assert found_offset == mount_expected_offset[1]
@pytest.mark.parametrize(
@@ -1448,7 +1550,7 @@ def test_get_instrument_offset(
async def test_save_instrument_offset(
ot3_hardware: ThreadManager[OT3API],
mount: Union[OT3Mount, Mount],
- mock_instrument_handlers: Tuple[Mock],
+ mock_instrument_handlers: Tuple[Mock, Mock],
) -> None:
gripper_handler, pipette_handler = mock_instrument_handlers
await ot3_hardware.save_instrument_offset(mount, Point(1, 1, 1))
@@ -1464,7 +1566,8 @@ async def test_save_instrument_offset(
@pytest.mark.xfail()
async def test_pick_up_tip_full_tiprack(
ot3_hardware: ThreadManager[OT3API],
- mock_instrument_handlers: Tuple[Mock],
+ hardware_backend: OT3Simulator,
+ mock_instrument_handlers: Tuple[Mock, Mock],
mock_ungrip: AsyncMock,
mock_move_to_plunger_bottom: AsyncMock,
mock_home_gear_motors: AsyncMock,
@@ -1473,15 +1576,14 @@ async def test_pick_up_tip_full_tiprack(
mock_ungrip.return_value = None
await ot3_hardware.home()
_, pipette_handler = mock_instrument_handlers
- backend = ot3_hardware.managed_obj._backend
instr_mock = AsyncMock(spec=Pipette)
instr_mock.nozzle_manager.current_configruation.configuration.return_value = (
NozzleConfigurationType.FULL
)
with patch.object(
- backend, "tip_action", AsyncMock(spec=backend.tip_action)
+ hardware_backend, "tip_action", AsyncMock(spec=hardware_backend.tip_action)
) as tip_action:
- backend._gear_motor_position = {NodeId: 0}
+ hardware_backend._gear_motor_position = {Axis.P_L: 0}
pipette_handler.get_pipette.return_value = instr_mock
pipette_handler.plan_ht_pick_up_tip.return_value = TipActionSpec(
@@ -1503,16 +1605,16 @@ def _update_gear_motor_pos(
moves: Optional[List[Move[Axis]]] = None,
distance: Optional[float] = None,
) -> None:
- if NodeId.pipette_left not in backend._gear_motor_position:
- backend._gear_motor_position = {NodeId.pipette_left: 0.0}
+ if Axis.P_L not in hardware_backend._gear_motor_position:
+ hardware_backend._gear_motor_position = {Axis.P_L: 0.0}
if moves:
for move in moves:
for block in move.blocks:
- backend._gear_motor_position[NodeId.pipette_left] += (
+ hardware_backend._gear_motor_position[Axis.P_L] += float(
block.distance * move.unit_vector[Axis.Q]
)
elif distance:
- backend._gear_motor_position[NodeId.pipette_left] += distance
+ hardware_backend._gear_motor_position[Axis.P_L] += distance
tip_action.side_effect = _update_gear_motor_pos
await ot3_hardware.set_gantry_load(GantryLoad.HIGH_THROUGHPUT)
@@ -1529,17 +1631,20 @@ def _update_gear_motor_pos(
async def test_drop_tip_full_tiprack(
ot3_hardware: ThreadManager[OT3API],
- mock_instrument_handlers: Tuple[Mock],
+ hardware_backend: OT3Simulator,
+ mock_instrument_handlers: Tuple[Mock, Mock],
+ mock_backend_get_tip_status: AsyncMock,
mock_home_gear_motors: AsyncMock,
mock_verify_tip_presence: AsyncMock,
) -> None:
_, pipette_handler = mock_instrument_handlers
- backend = ot3_hardware.managed_obj._backend
with patch.object(
- backend, "tip_action", AsyncMock(spec=backend.tip_action)
+ hardware_backend,
+ "tip_action",
+ AsyncMock(spec=hardware_backend.tip_action, wraps=hardware_backend.tip_action),
) as tip_action:
- backend._gear_motor_position = {NodeId.pipette_left: 0}
+ hardware_backend._gear_motor_position = {Axis.Q: 0}
pipette_handler.plan_ht_drop_tip.return_value = TipActionSpec(
tip_action_moves=[
TipActionMoveSpec(
@@ -1557,35 +1662,21 @@ def set_mock_plunger_configs() -> None:
mock_instr.config.plunger_homing_configurations.current = 1.0
mock_instr.plunger_positions.bottom = -18.5
- def _update_gear_motor_pos(
- moves: Optional[List[Move[Axis]]] = None,
- distance: Optional[float] = None,
- velocity: Optional[float] = None,
- tip_action: str = "home",
- ) -> None:
- if NodeId.pipette_left not in backend._gear_motor_position:
- backend._gear_motor_position = {NodeId.pipette_left: 0.0}
- if moves:
- for move in moves:
- for block in move.blocks:
- backend._gear_motor_position[
- NodeId.pipette_left
- ] += block.distance
- elif distance:
- backend._gear_motor_position[NodeId.pipette_left] += distance
-
- tip_action.side_effect = _update_gear_motor_pos
set_mock_plunger_configs()
await ot3_hardware.set_gantry_load(GantryLoad.HIGH_THROUGHPUT)
mock_backend_get_tip_status.return_value = TipStateType.ABSENT
await ot3_hardware.drop_tip(Mount.LEFT, home_after=True)
pipette_handler.plan_ht_drop_tip.assert_called_once_with()
+ assert len(tip_action.call_args_list) == 2
# first call should be "clamp", moving down
- assert tip_action.call_args_list[0][-1]["moves"][0].unit_vector == {Axis.Q: 1}
+ first_target = tip_action.call_args_list[0][-1]["targets"][0][0]
+ assert list(first_target.keys()) == [Axis.Q]
+ assert first_target[Axis.Q] == 10
# next call should be "clamp", moving back up
- assert tip_action.call_args_list[1][-1]["moves"][0].unit_vector == {Axis.Q: -1}
- assert len(tip_action.call_args_list) == 2
+ second_target = tip_action.call_args_list[1][-1]["targets"][0][0]
+ assert list(second_target.keys()) == [Axis.Q]
+ assert second_target[Axis.Q] < 10
# home should be called after tip_action is done
assert len(mock_home_gear_motors.call_args_list) == 1
@@ -1595,14 +1686,14 @@ def _update_gear_motor_pos(
[[Axis.X], [Axis.X, Axis.Y], [Axis.X, Axis.Y, Axis.P_L], None],
)
async def test_update_position_estimation(
- ot3_hardware: ThreadManager[OT3API], axes: List[Axis]
+ ot3_hardware: ThreadManager[OT3API],
+ hardware_backend: OT3Simulator,
+ axes: List[Axis],
) -> None:
-
- backend = ot3_hardware.managed_obj._backend
with patch.object(
- backend,
+ hardware_backend,
"update_motor_estimation",
- AsyncMock(spec=backend.update_motor_estimation),
+ AsyncMock(spec=hardware_backend.update_motor_estimation),
) as mock_update:
await ot3_hardware._update_position_estimation(axes)
if axes is None:
@@ -1610,24 +1701,25 @@ async def test_update_position_estimation(
mock_update.assert_called_once_with(axes)
-async def test_refresh_positions(ot3_hardware: ThreadManager[OT3API]) -> None:
+async def test_refresh_positions(
+ ot3_hardware: ThreadManager[OT3API], hardware_backend: OT3Simulator
+) -> None:
- backend = ot3_hardware.managed_obj._backend
ot3_hardware._current_position.clear()
ot3_hardware._encoder_position.clear()
with patch.object(
- backend,
+ hardware_backend,
"update_motor_status",
- AsyncMock(spec=backend.update_motor_status),
+ AsyncMock(spec=hardware_backend.update_motor_status),
) as mock_update_status, patch.object(
- backend,
+ hardware_backend,
"update_position",
- AsyncMock(spec=backend.update_position),
+ AsyncMock(spec=hardware_backend.update_position),
) as mock_pos, patch.object(
- backend,
+ hardware_backend,
"update_encoder_position",
- AsyncMock(spec=backend.update_encoder_position),
+ AsyncMock(spec=hardware_backend.update_encoder_position),
) as mock_encoder:
mock_pos.return_value = {ax: 100 for ax in Axis}
@@ -1654,6 +1746,7 @@ async def test_refresh_positions(ot3_hardware: ThreadManager[OT3API]) -> None:
)
async def test_home_axis(
ot3_hardware: ThreadManager[OT3API],
+ hardware_backend: OT3Simulator,
mock_check_motor: Mock,
mock_check_encoder: Mock,
axis: Axis,
@@ -1669,37 +1762,34 @@ async def test_home_axis(
instr_data = AttachedPipette(config=pipette_config, id="fakepip")
await ot3_hardware.cache_pipette(Axis.to_ot3_mount(axis), instr_data, None)
- backend = ot3_hardware.managed_obj._backend
origin_pos = {ax: 100 for ax in Axis}
origin_encoder = {ax: 99 for ax in Axis}
- backend._position = {axis_to_node(ax): v for ax, v in origin_pos.items()}
- backend._encoder_position = {
- axis_to_node(ax): v for ax, v in origin_encoder.items()
- }
+ hardware_backend._position = {ax: v for ax, v in origin_pos.items()}
+ hardware_backend._encoder_position = {ax: v for ax, v in origin_encoder.items()}
mock_check_motor.return_value = stepper_ok
mock_check_encoder.return_value = encoder_ok
with patch.object(
- backend,
+ hardware_backend,
"move",
AsyncMock(
- spec=backend.move,
- wraps=backend.move,
+ spec=hardware_backend.move,
+ wraps=hardware_backend.move,
),
- ) as mock_backend_move, patch.object(
- backend,
+ ) as mock_hardware_backend_move, patch.object(
+ hardware_backend,
"home",
AsyncMock(
- spec=backend.home,
- wraps=backend.home,
+ spec=hardware_backend.home,
+ wraps=hardware_backend.home,
),
- ) as mock_backend_home, patch.object(
- backend,
+ ) as mock_hardware_backend_home, patch.object(
+ hardware_backend,
"update_motor_estimation",
AsyncMock(
- spec=backend.update_motor_estimation,
- wraps=backend.update_motor_estimation,
+ spec=hardware_backend.update_motor_estimation,
+ wraps=hardware_backend.update_motor_estimation,
),
) as mock_estimate:
@@ -1713,31 +1803,31 @@ async def test_home_axis(
if stepper_ok and encoder_ok:
"""Copy encoder position to stepper pos"""
- # for accurate axis, we just move to home pos:
+ # for accurate axis, we just move very close to home pos
if axis in [Axis.Z_L, Axis.P_L]:
# move is called
- mock_backend_move.assert_awaited_once()
- move = mock_backend_move.call_args_list[0][0][1][0]
- assert move.distance == 95.0
+ mock_hardware_backend_move.assert_awaited_once()
+ target = mock_hardware_backend_move.call_args_list[0][0][1][axis]
+ assert target == 5
# then home is called
- mock_backend_home.assert_awaited_once()
+ mock_hardware_backend_home.assert_awaited_once()
else:
# we move to 20 mm away from home
- mock_backend_move.assert_awaited_once()
- move = mock_backend_move.call_args_list[0][0][1][0]
- assert move.distance == 80.0
+ mock_hardware_backend_move.assert_awaited_once()
+ target = mock_hardware_backend_move.call_args_list[0][0][1][axis]
+ assert target == 20.0
# then home is called
- mock_backend_home.assert_awaited_once()
+ mock_hardware_backend_home.assert_awaited_once()
else:
# home axis
- mock_backend_home.assert_awaited_once()
+ mock_hardware_backend_home.assert_awaited_once()
# move not called
- mock_backend_move.assert_not_awaited()
+ mock_hardware_backend_move.assert_not_awaited()
# axis is at the home position
- expected_pos = {axis_to_node(ax): v for ax, v in origin_pos.items()}
- expected_pos.update({axis_to_node(axis): 0})
- assert backend._position == expected_pos
+ expected_pos = {ax: v for ax, v in origin_pos.items()}
+ expected_pos.update({axis: 0})
+ assert hardware_backend._position == expected_pos
@pytest.mark.parametrize("setting", [True, False])
@@ -1790,33 +1880,6 @@ def test_fw_version(
assert ot3_hardware.get_fw_version() == version_str
-@pytest.mark.parametrize(argnames=["enabled"], argvalues=[[True], [False]])
-async def test_status_bar_interface(
- ot3_hardware: ThreadManager[OT3API],
- enabled: bool,
-) -> None:
- """Test setting status bar statuses and make sure the cached status is correct."""
- await ot3_hardware.set_status_bar_enabled(enabled)
-
- settings = {
- StatusBarState.IDLE: StatusBarState.IDLE,
- StatusBarState.RUNNING: StatusBarState.RUNNING,
- StatusBarState.PAUSED: StatusBarState.PAUSED,
- StatusBarState.HARDWARE_ERROR: StatusBarState.HARDWARE_ERROR,
- StatusBarState.SOFTWARE_ERROR: StatusBarState.SOFTWARE_ERROR,
- StatusBarState.CONFIRMATION: StatusBarState.IDLE,
- StatusBarState.RUN_COMPLETED: StatusBarState.RUN_COMPLETED,
- StatusBarState.UPDATING: StatusBarState.UPDATING,
- StatusBarState.ACTIVATION: StatusBarState.IDLE,
- StatusBarState.DISCO: StatusBarState.IDLE,
- StatusBarState.OFF: StatusBarState.OFF,
- }
-
- for setting, response in settings.items():
- await ot3_hardware.set_status_bar_state(state=setting)
- assert ot3_hardware.get_status_bar_state() == response
-
-
@pytest.mark.parametrize(
argnames=["old_state", "new_state", "should_trigger"],
argvalues=[
@@ -1884,7 +1947,7 @@ async def test_stop_only_home_necessary_axes(
mock_home: AsyncMock,
mock_reset: AsyncMock,
jaw_state: GripperJawState,
-):
+) -> None:
gripper_config = gc.load(GripperModel.v1)
instr_data = AttachedGripper(config=gripper_config, id="test")
await ot3_hardware.cache_gripper(instr_data)
diff --git a/api/tests/opentrons/hardware_control/test_ot3_calibration.py b/api/tests/opentrons/hardware_control/test_ot3_calibration.py
index 6ecd5f360c1..f2eee2bdcca 100644
--- a/api/tests/opentrons/hardware_control/test_ot3_calibration.py
+++ b/api/tests/opentrons/hardware_control/test_ot3_calibration.py
@@ -4,8 +4,7 @@
import pytest
import json
from math import isclose
-from typing import Iterator, Tuple
-from typing_extensions import Literal
+from typing import AsyncIterator, Iterator, Tuple, Any, Literal
from mock import patch, AsyncMock, Mock, call as mock_call
from opentrons.hardware_control import ThreadManager
from opentrons.hardware_control.ot3api import OT3API
@@ -14,7 +13,6 @@
from opentrons.hardware_control.ot3_calibration import (
find_edge_binary,
find_axis_center,
- EarlyCapacitiveSenseTrigger,
find_calibration_structure_height,
find_slot_center_binary,
find_slot_center_noncontact,
@@ -23,30 +21,35 @@
_edges_from_data,
_probe_deck_at,
_verify_edge_pos,
- InaccurateNonContactSweepError,
- CalibrationStructureNotFoundError,
- EdgeNotFoundError,
PREP_OFFSET_DEPTH,
EDGES,
)
from opentrons.types import Point
from opentrons_shared_data.deck import get_calibration_square_position_in_slot
+from opentrons_shared_data.errors.exceptions import (
+ CalibrationStructureNotFoundError,
+ EdgeNotFoundError,
+ EarlyCapacitiveSenseTrigger,
+ InaccurateNonContactSweepError,
+)
@pytest.fixture(autouse=True)
-def mock_save_json():
+def mock_save_json() -> Iterator[Mock]:
with patch("json.dump", Mock(spec=json.dump)) as jd:
yield jd
@pytest.fixture
def mock_move_to(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+ managed = ot3_hardware.managed_obj
+ assert managed
with patch.object(
- ot3_hardware.managed_obj,
+ managed,
"move_to",
AsyncMock(
- spec=ot3_hardware.managed_obj.move_to,
- wraps=ot3_hardware.managed_obj.move_to,
+ spec=managed.move_to,
+ wraps=managed.move_to,
),
) as mock_move:
yield mock_move
@@ -54,12 +57,14 @@ def mock_move_to(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
@pytest.fixture
def mock_capacitive_probe(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+ managed = ot3_hardware.managed_obj
+ assert managed
with patch.object(
- ot3_hardware.managed_obj,
+ managed,
"capacitive_probe",
AsyncMock(
- spec=ot3_hardware.managed_obj.capacitive_probe,
- wraps=ot3_hardware.managed_obj.capacitive_probe,
+ spec=managed.capacitive_probe,
+ wraps=managed.capacitive_probe,
),
) as mock_probe:
yield mock_probe
@@ -79,12 +84,14 @@ def mock_probe_deck() -> Iterator[AsyncMock]:
@pytest.fixture
def mock_capacitive_sweep(ot3_hardware: ThreadManager[OT3API]) -> Iterator[AsyncMock]:
+ managed = ot3_hardware.managed_obj
+ assert managed
with patch.object(
- ot3_hardware.managed_obj,
+ managed,
"capacitive_sweep",
AsyncMock(
- spec=ot3_hardware.managed_obj.capacitive_sweep,
- wraps=ot3_hardware.managed_obj.capacitive_sweep,
+ spec=managed.capacitive_sweep,
+ wraps=managed.capacitive_sweep,
),
) as mock_sweep:
yield mock_sweep
@@ -111,13 +118,15 @@ def mock_data_analysis() -> Iterator[Mock]:
def _update_edge_sense_config(
- old: OT3CalibrationSettings, **new_edge_sense_settings
+ old: OT3CalibrationSettings, **new_edge_sense_settings: Any
) -> OT3CalibrationSettings:
return replace(old, edge_sense=replace(old.edge_sense, **new_edge_sense_settings))
@pytest.fixture
-async def override_cal_config(ot3_hardware: ThreadManager[OT3API]) -> Iterator[None]:
+async def override_cal_config(
+ ot3_hardware: ThreadManager[OT3API],
+) -> AsyncIterator[None]:
old_calibration = copy.deepcopy(ot3_hardware.config.calibration)
await ot3_hardware.update_config(
calibration=_update_edge_sense_config(
@@ -148,18 +157,18 @@ def _other_axis_val(point: Tuple[float, float, float], main_axis: Axis) -> float
@pytest.mark.parametrize(
- "search_axis,direction_if_hit,probe_results,search_result",
+ "direction_if_hit,probe_results,search_result",
[
# For each axis and direction, test
# 1. hit-miss-miss
# 2. miss-hit-hit
# 3. miss-hit-miss
- (Axis.X, -1, (_HIT, _MISS, _MISS), -1),
- (Axis.X, -1, (_MISS, _HIT, _HIT), 1),
- (Axis.X, -1, (_MISS, _HIT, _MISS), 3),
- (Axis.X, 1, (_HIT, _MISS, _MISS), 1),
- (Axis.X, 1, (_MISS, _HIT, _HIT), -1),
- (Axis.X, 1, (_MISS, _HIT, _MISS), -3),
+ (-1, (_HIT, _MISS, _MISS), -1),
+ (-1, (_MISS, _HIT, _HIT), 1),
+ (-1, (_MISS, _HIT, _MISS), 3),
+ (1, (_HIT, _MISS, _MISS), 1),
+ (1, (_MISS, _HIT, _HIT), -1),
+ (1, (_MISS, _HIT, _MISS), -3),
],
)
async def test_find_edge(
@@ -168,7 +177,6 @@ async def test_find_edge(
override_cal_config: None,
mock_verify_edge: AsyncMock,
mock_move_to: AsyncMock,
- search_axis: Axis,
direction_if_hit: Literal[1, -1],
probe_results: Tuple[float, float, float],
search_result: float,
@@ -179,18 +187,18 @@ async def test_find_edge(
ot3_hardware,
OT3Mount.RIGHT,
Point(0, 0, 0),
- search_axis,
+ Axis.X,
direction_if_hit,
False,
)
- assert search_axis.of_point(result) == search_result
+ assert Axis.X.of_point(result) == search_result
# the first move is in z only to the cal height
checked_calls = mock_move_to.call_args_list[1:]
# all other moves should only move in the search axis
for call in checked_calls:
assert call[0][0] == OT3Mount.RIGHT
- assert _other_axis_val(call[0][1], search_axis) == pytest.approx(
- _other_axis_val(Point(0, 0, 0), search_axis)
+ assert _other_axis_val(call[0][1], Axis.X) == pytest.approx(
+ _other_axis_val(Point(0, 0, 0), Axis.X)
)
@@ -206,7 +214,7 @@ async def test_edge_not_found(
mock_capacitive_probe: AsyncMock,
override_cal_config: None,
mock_move_to: AsyncMock,
- search_axis: Axis,
+ search_axis: Literal[Axis.X, Axis.Y],
direction_if_hit: Literal[1, -1],
probe_results: Tuple[float, float, float],
) -> None:
@@ -295,6 +303,8 @@ async def test_method_enum(
ot3_hardware: ThreadManager[OT3API],
override_cal_config: None,
) -> None:
+ managed = ot3_hardware.managed_obj
+ assert managed
with patch(
"opentrons.hardware_control.ot3_calibration.find_slot_center_binary",
AsyncMock(spec=find_slot_center_binary),
@@ -308,9 +318,9 @@ async def test_method_enum(
"opentrons.hardware_control.ot3_calibration.find_calibration_structure_height",
AsyncMock(spec=find_calibration_structure_height),
) as find_deck, patch.object(
- ot3_hardware.managed_obj, "reset_instrument_offset", AsyncMock()
+ managed, "reset_instrument_offset", AsyncMock()
) as reset_instrument_offset, patch.object(
- ot3_hardware.managed_obj, "save_instrument_offset", AsyncMock()
+ managed, "save_instrument_offset", AsyncMock()
) as save_instrument_offset:
find_deck.return_value = 10
calibration_target.return_value = Point(0.0, 0.0, 0.0)
@@ -348,10 +358,12 @@ async def test_method_enum(
async def test_calibrate_mount_errors(
ot3_hardware: ThreadManager[OT3API], mock_data_analysis: Mock
) -> None:
+ managed = ot3_hardware.managed_obj
+ assert managed
with patch.object(
- ot3_hardware.managed_obj, "reset_instrument_offset", AsyncMock()
+ managed, "reset_instrument_offset", AsyncMock()
) as reset_instrument_offset, patch.object(
- ot3_hardware.managed_obj, "save_instrument_offset", AsyncMock()
+ managed, "save_instrument_offset", AsyncMock()
) as save_instrument_offset, patch(
"opentrons.hardware_control.ot3_calibration.find_calibration_structure_height",
AsyncMock(spec=find_calibration_structure_height),
diff --git a/api/tests/opentrons/hardware_control/test_ot3_transforms.py b/api/tests/opentrons/hardware_control/test_ot3_transforms.py
index a365e1866d7..37328043e84 100644
--- a/api/tests/opentrons/hardware_control/test_ot3_transforms.py
+++ b/api/tests/opentrons/hardware_control/test_ot3_transforms.py
@@ -1,16 +1,18 @@
import pytest
+from typing import Dict, Optional
from unittest import mock
from opentrons import types
from opentrons.hardware_control import ot3api
-from opentrons.hardware_control.types import Axis, OT3Mount
+from opentrons.hardware_control.types import Axis
from opentrons_shared_data.pipette import name_for_model
+from opentrons_shared_data.pipette.dev_types import PipetteModel
@pytest.mark.parametrize(
"pipette_model", ["p1000_single_v3.3", "p1000_single_v3.3", "p50_multi_v3.3"]
)
-async def test_transforms_roundtrip(pipette_model):
- attached = {
+async def test_transforms_roundtrip(pipette_model: PipetteModel) -> None:
+ attached: Dict[types.Mount, Dict[str, Optional[str]]] = {
types.Mount.LEFT: {
"model": pipette_model,
"id": pipette_model + "_idididid_left",
@@ -21,7 +23,6 @@ async def test_transforms_roundtrip(pipette_model):
"id": pipette_model + "_idididid_right",
"name": name_for_model(pipette_model),
},
- OT3Mount.GRIPPER: None,
}
sim = await ot3api.OT3API.build_hardware_simulator(attached_instruments=attached)
target = types.Point(20, 30, 40)
@@ -32,8 +33,10 @@ async def test_transforms_roundtrip(pipette_model):
@pytest.mark.parametrize(
"pipette_model", ["p1000_single_v3.3", "p50_single_v3.3", "p1000_multi_v3.3"]
)
-async def test_transform_values(pipette_model, enable_ot3_hardware_controller):
- attached = {
+async def test_transform_values(
+ pipette_model: PipetteModel, enable_ot3_hardware_controller: None
+) -> None:
+ attached: Dict[types.Mount, Dict[str, Optional[str]]] = {
types.Mount.LEFT: {
"model": pipette_model,
"id": pipette_model + "_idididid_left",
@@ -48,13 +51,15 @@ async def test_transform_values(pipette_model, enable_ot3_hardware_controller):
sim = await ot3api.OT3API.build_hardware_simulator(attached_instruments=attached)
target = types.Point(20, 30, 40)
with mock.patch.object(
- sim._move_manager,
- "plan_motion",
- mock.MagicMock(side_effect=sim._move_manager.plan_motion),
- spec=sim._move_manager.plan_motion,
+ sim._backend,
+ "move",
+ mock.MagicMock(side_effect=sim._backend.move),
+ spec=sim._backend.move,
) as mock_move:
await sim.move_to(types.Mount.RIGHT, target)
- right_offset = sim.hardware_instruments[types.Mount.RIGHT].critical_point()
+ right_pipette = sim.hardware_instruments[types.Mount.RIGHT]
+ assert right_pipette
+ right_offset = right_pipette.critical_point()
point = [
(target.x - right_offset[0] - sim.config.right_mount_offset[0]) * -1
+ sim.config.carriage_offset[0],
@@ -63,18 +68,20 @@ async def test_transform_values(pipette_model, enable_ot3_hardware_controller):
(target.z - right_offset[2] - sim.config.right_mount_offset[2]) * -1
+ sim.config.carriage_offset[2],
]
- assert mock_move.call_args[1]["target_list"][0].position[Axis.X] == point[0]
- assert mock_move.call_args[1]["target_list"][0].position[Axis.Y] == point[1]
- assert mock_move.call_args[1]["target_list"][0].position[Axis.Z_R] == point[2]
+ assert mock_move.call_args[0][1][Axis.X] == point[0]
+ assert mock_move.call_args[0][1][Axis.Y] == point[1]
+ assert mock_move.call_args[0][1][Axis.Z_R] == point[2]
with mock.patch.object(
- sim._move_manager,
- "plan_motion",
- mock.MagicMock(side_effect=sim._move_manager.plan_motion),
- spec=sim._move_manager.plan_motion,
+ sim._backend,
+ "move",
+ mock.MagicMock(side_effect=sim._backend.move),
+ spec=sim._backend.move,
) as mock_move:
await sim.move_to(types.Mount.LEFT, target)
- left_offset = sim.hardware_instruments[types.Mount.LEFT].critical_point()
+ left_pipette = sim.hardware_instruments[types.Mount.LEFT]
+ assert left_pipette
+ left_offset = left_pipette.critical_point()
point = [
(target.x - left_offset[0] - sim.config.left_mount_offset[0]) * -1
+ sim.config.carriage_offset[0],
@@ -83,6 +90,6 @@ async def test_transform_values(pipette_model, enable_ot3_hardware_controller):
(target.z - left_offset[2] - sim.config.left_mount_offset[2]) * -1
+ sim.config.carriage_offset[2],
]
- assert mock_move.call_args[1]["target_list"][0].position[Axis.X] == point[0]
- assert mock_move.call_args[1]["target_list"][0].position[Axis.Y] == point[1]
- assert mock_move.call_args[1]["target_list"][0].position[Axis.Z_L] == point[2]
+ assert mock_move.call_args[0][1][Axis.X] == point[0]
+ assert mock_move.call_args[0][1][Axis.Y] == point[1]
+ assert mock_move.call_args[0][1][Axis.Z_L] == point[2]
diff --git a/api/tests/opentrons/hardware_control/test_pipette.py b/api/tests/opentrons/hardware_control/test_pipette.py
index c6b298c51c8..b6224a4e3dd 100644
--- a/api/tests/opentrons/hardware_control/test_pipette.py
+++ b/api/tests/opentrons/hardware_control/test_pipette.py
@@ -85,8 +85,7 @@ def test_tip_tracking(
model: Union[str, pipette_definition.PipetteModelVersionType],
) -> None:
hw_pipette = pipette_builder(model)
- with pytest.raises(AssertionError):
- hw_pipette.remove_tip()
+ hw_pipette.remove_tip()
assert not hw_pipette.has_tip
tip_length = 25.0
hw_pipette.add_tip(tip_length)
@@ -95,8 +94,7 @@ def test_tip_tracking(
hw_pipette.add_tip(tip_length)
hw_pipette.remove_tip()
assert not hw_pipette.has_tip
- with pytest.raises(AssertionError):
- hw_pipette.remove_tip()
+ hw_pipette.remove_tip()
@pytest.mark.parametrize(
@@ -393,7 +391,7 @@ def test_reload_instrument_cal_ot3(
status=cal_types.CalibrationStatus(),
)
new_pip, skipped = ot3_pipette._reload_and_check_skip(
- old_pip.config, old_pip, new_cal
+ old_pip.config, old_pip, new_cal, use_old_aspiration_functions=False
)
assert skipped
diff --git a/api/tests/opentrons/hardware_control/test_pipette_handler.py b/api/tests/opentrons/hardware_control/test_pipette_handler.py
index c962fc592c5..1134a09b807 100644
--- a/api/tests/opentrons/hardware_control/test_pipette_handler.py
+++ b/api/tests/opentrons/hardware_control/test_pipette_handler.py
@@ -16,6 +16,11 @@
TipActionMoveSpec,
)
+from opentrons_shared_data.pipette.pipette_definition import (
+ PressFitPickUpTipConfiguration,
+ CamActionPickUpTipConfiguration,
+)
+
@pytest.fixture
def mock_pipette(decoy: Decoy) -> Pipette:
@@ -106,15 +111,23 @@ def test_plan_check_pick_up_tip_with_presses_argument(
decoy.when(mock_pipette.has_tip).then_return(False)
decoy.when(mock_pipette.config.quirks).then_return([])
- decoy.when(mock_pipette.pick_up_configurations.distance).then_return(0)
- decoy.when(mock_pipette.pick_up_configurations.increment).then_return(0)
- decoy.when(mock_pipette.connect_tiprack_distance_mm).then_return(8)
- decoy.when(mock_pipette.end_tip_action_retract_distance_mm).then_return(2)
-
- if presses_input is None:
- decoy.when(mock_pipette.pick_up_configurations.presses).then_return(
- expected_array_length
- )
+ decoy.when(mock_pipette.pick_up_configurations.press_fit.presses).then_return(
+ expected_array_length
+ )
+ decoy.when(
+ mock_pipette.pick_up_configurations.press_fit.distance_by_tip_count
+ ).then_return({1: 5})
+ decoy.when(mock_pipette.pick_up_configurations.press_fit.increment).then_return(0)
+ decoy.when(
+ mock_pipette.pick_up_configurations.press_fit.speed_by_tip_count
+ ).then_return({1: 10})
+ decoy.when(mock_pipette.config.end_tip_action_retract_distance_mm).then_return(0)
+ decoy.when(
+ mock_pipette.pick_up_configurations.press_fit.current_by_tip_count
+ ).then_return({1: 1.0})
+ decoy.when(mock_pipette.nozzle_manager.current_configuration.tip_count).then_return(
+ 1
+ )
spec, _add_tip_to_instrs = subject.plan_check_pick_up_tip(
mount, tip_length, presses, increment
@@ -147,32 +160,37 @@ def test_plan_check_pick_up_tip_with_presses_argument_ot3(
increment = 1
decoy.when(mock_pipette_ot3.has_tip).then_return(False)
- decoy.when(mock_pipette_ot3.pick_up_configurations.presses).then_return(2)
- decoy.when(mock_pipette_ot3.pick_up_configurations.increment).then_return(increment)
- decoy.when(mock_pipette_ot3.pick_up_configurations.speed).then_return(5.5)
- decoy.when(mock_pipette_ot3.pick_up_configurations.distance).then_return(10)
decoy.when(
- mock_pipette_ot3.nozzle_manager.get_tip_configuration_current()
- ).then_return(1)
+ mock_pipette_ot3.get_pick_up_configuration_for_tip_count(channels)
+ ).then_return(
+ CamActionPickUpTipConfiguration(
+ distance=10,
+ speed=5.5,
+ prep_move_distance=19.0,
+ prep_move_speed=10,
+ currentByTipCount={96: 1.0},
+ connectTiprackDistanceMM=8,
+ )
+ if channels == 96
+ else PressFitPickUpTipConfiguration(
+ presses=2,
+ increment=increment,
+ distanceByTipCount={channels: 10},
+ speedByTipCount={channels: 5.5},
+ currentByTipCount={channels: 1.0},
+ )
+ )
decoy.when(mock_pipette_ot3.plunger_motor_current.run).then_return(1)
decoy.when(mock_pipette_ot3.config.quirks).then_return([])
decoy.when(mock_pipette_ot3.channels).then_return(channels)
- decoy.when(mock_pipette_ot3.pick_up_configurations.prep_move_distance).then_return(
- 19.0
+ decoy.when(mock_pipette_ot3.config.end_tip_action_retract_distance_mm).then_return(
+ 2
)
- decoy.when(mock_pipette_ot3.pick_up_configurations.prep_move_speed).then_return(10)
- decoy.when(mock_pipette_ot3.connect_tiprack_distance_mm).then_return(8)
- decoy.when(mock_pipette_ot3.end_tip_action_retract_distance_mm).then_return(2)
-
- if presses_input is None:
- decoy.when(mock_pipette_ot3.config.pick_up_presses).then_return(
- expected_array_length
- )
if channels == 96:
- spec = subject_ot3.plan_ht_pick_up_tip()
+ spec = subject_ot3.plan_ht_pick_up_tip(96)
else:
- spec = subject_ot3.plan_lt_pick_up_tip(mount, presses, increment)
+ spec = subject_ot3.plan_lt_pick_up_tip(mount, channels, presses, increment)
assert len(spec.tip_action_moves) == expected_array_length
assert spec.tip_action_moves == request.getfixturevalue(
expected_pick_up_motor_actions
diff --git a/api/tests/opentrons/hardware_control/test_simulator_setup.py b/api/tests/opentrons/hardware_control/test_simulator_setup.py
index 0c23412a834..2507a9969b3 100644
--- a/api/tests/opentrons/hardware_control/test_simulator_setup.py
+++ b/api/tests/opentrons/hardware_control/test_simulator_setup.py
@@ -56,16 +56,31 @@ async def test_with_magdeck(setup_klass: Type[simulator_setup.SimulatorSetup]) -
"""It should work to build a magdeck."""
setup = setup_klass(
attached_modules={
- "magdeck": [simulator_setup.ModuleCall("engage", kwargs={"height": 3})]
+ "magdeck": [
+ simulator_setup.ModuleItem(
+ serial_number="123",
+ calls=[simulator_setup.ModuleCall("engage", kwargs={"height": 3})],
+ ),
+ simulator_setup.ModuleItem(
+ serial_number="1234",
+ calls=[simulator_setup.ModuleCall("engage", kwargs={"height": 5})],
+ ),
+ ]
}
)
simulator = await simulator_setup.create_simulator(setup)
- assert type(simulator.attached_modules[0]) == MagDeck
+ assert isinstance(simulator.attached_modules[0], MagDeck)
assert simulator.attached_modules[0].live_data == {
"data": {"engaged": True, "height": 3},
"status": "engaged",
}
+ assert simulator.attached_modules[0].device_info["serial"] == "123"
+ assert simulator.attached_modules[1].live_data == {
+ "data": {"engaged": True, "height": 5},
+ "status": "engaged",
+ }
+ assert simulator.attached_modules[1].device_info["serial"] == "1234"
async def test_with_thermocycler(
@@ -75,21 +90,26 @@ async def test_with_thermocycler(
setup = setup_klass(
attached_modules={
"thermocycler": [
- simulator_setup.ModuleCall(
- "set_temperature",
- kwargs={
- "temperature": 3,
- "hold_time_seconds": 1,
- "hold_time_minutes": 2,
- "volume": 5,
- },
+ simulator_setup.ModuleItem(
+ serial_number="123",
+ calls=[
+ simulator_setup.ModuleCall(
+ "set_temperature",
+ kwargs={
+ "temperature": 3,
+ "hold_time_seconds": 1,
+ "hold_time_minutes": 2,
+ "volume": 5,
+ },
+ )
+ ],
)
]
}
)
simulator = await simulator_setup.create_simulator(setup)
- assert type(simulator.attached_modules[0]) == Thermocycler
+ assert isinstance(simulator.attached_modules[0], Thermocycler)
assert simulator.attached_modules[0].live_data == {
"data": {
"currentCycleIndex": None,
@@ -107,6 +127,7 @@ async def test_with_thermocycler(
},
"status": "holding at target",
}
+ assert simulator.attached_modules[0].device_info["serial"] == "123"
async def test_with_tempdeck(setup_klass: Type[simulator_setup.SimulatorSetup]) -> None:
@@ -114,22 +135,28 @@ async def test_with_tempdeck(setup_klass: Type[simulator_setup.SimulatorSetup])
setup = setup_klass(
attached_modules={
"tempdeck": [
- simulator_setup.ModuleCall(
- "start_set_temperature", kwargs={"celsius": 23}
- ),
- simulator_setup.ModuleCall(
- "await_temperature", kwargs={"awaiting_temperature": None}
- ),
+ simulator_setup.ModuleItem(
+ serial_number="123",
+ calls=[
+ simulator_setup.ModuleCall(
+ "start_set_temperature", kwargs={"celsius": 23}
+ ),
+ simulator_setup.ModuleCall(
+ "await_temperature", kwargs={"awaiting_temperature": None}
+ ),
+ ],
+ )
]
}
)
simulator = await simulator_setup.create_simulator(setup)
- assert type(simulator.attached_modules[0]) == TempDeck
+ assert isinstance(simulator.attached_modules[0], TempDeck)
assert simulator.attached_modules[0].live_data == {
"data": {"currentTemp": 23, "targetTemp": 23},
"status": "holding at target",
}
+ assert simulator.attached_modules[0].device_info["serial"] == "123"
def test_persistence_ot2(tmpdir: str) -> None:
@@ -139,10 +166,24 @@ def test_persistence_ot2(tmpdir: str) -> None:
Mount.RIGHT: {"id": "some id"},
},
attached_modules={
- "magdeck": [simulator_setup.ModuleCall("engage", kwargs={"height": 3})],
+ "magdeck": [
+ simulator_setup.ModuleItem(
+ serial_number="111",
+ calls=[simulator_setup.ModuleCall("engage", kwargs={"height": 3})],
+ )
+ ],
"tempdeck": [
- simulator_setup.ModuleCall("set_temperature", kwargs={"celsius": 23}),
- simulator_setup.ModuleCall("set_temperature", kwargs={"celsius": 24}),
+ simulator_setup.ModuleItem(
+ serial_number="111",
+ calls=[
+ simulator_setup.ModuleCall(
+ "set_temperature", kwargs={"celsius": 23}
+ ),
+ simulator_setup.ModuleCall(
+ "set_temperature", kwargs={"celsius": 24}
+ ),
+ ],
+ )
],
},
config=robot_configs.build_config_ot2({}),
@@ -162,10 +203,44 @@ def test_persistence_ot3(tmpdir: str) -> None:
OT3Mount.GRIPPER: {"id": "some-other-id"},
},
attached_modules={
- "magdeck": [simulator_setup.ModuleCall("engage", kwargs={"height": 3})],
+ "magdeck": [
+ simulator_setup.ModuleItem(
+ serial_number="mag-1",
+ calls=[
+ simulator_setup.ModuleCall(
+ function_name="engage",
+ kwargs={"height": 3},
+ )
+ ],
+ )
+ ],
"tempdeck": [
- simulator_setup.ModuleCall("set_temperature", kwargs={"celsius": 23}),
- simulator_setup.ModuleCall("set_temperature", kwargs={"celsius": 24}),
+ simulator_setup.ModuleItem(
+ serial_number="temp-1",
+ calls=[
+ simulator_setup.ModuleCall(
+ function_name="set_temperature",
+ kwargs={"celsius": 23},
+ ),
+ simulator_setup.ModuleCall(
+ function_name="set_temperature",
+ kwargs={"celsius": 24},
+ ),
+ ],
+ ),
+ simulator_setup.ModuleItem(
+ serial_number="temp-2",
+ calls=[
+ simulator_setup.ModuleCall(
+ function_name="set_temperature",
+ kwargs={"celsius": 23},
+ ),
+ simulator_setup.ModuleCall(
+ function_name="set_temperature",
+ kwargs={"celsius": 24},
+ ),
+ ],
+ ),
],
},
config=robot_configs.build_config_ot3({}),
diff --git a/api/tests/opentrons/hardware_control/test_thread_manager.py b/api/tests/opentrons/hardware_control/test_thread_manager.py
index fe3f53309ad..193740b4d75 100644
--- a/api/tests/opentrons/hardware_control/test_thread_manager.py
+++ b/api/tests/opentrons/hardware_control/test_thread_manager.py
@@ -28,7 +28,7 @@ def test_build_fail_raises_exception():
def test_module_cache_add_entry():
"""Test that _cached_modules updates correctly."""
- mod_names = ["tempdeck"]
+ mod_names = {"tempdeck": ["111"]}
thread_manager = ThreadManager(
API.build_hardware_simulator, attached_modules=mod_names
)
@@ -49,7 +49,7 @@ def test_module_cache_add_entry():
async def test_module_cache_remove_entry():
"""Test that module entry gets removed from cache when module detaches."""
- mod_names = ["tempdeck", "magdeck"]
+ mod_names = {"tempdeck": ["111"], "magdeck": ["222"]}
thread_manager = ThreadManager(
API.build_hardware_simulator, attached_modules=mod_names
)
@@ -63,7 +63,7 @@ async def test_module_cache_remove_entry():
future = asyncio.run_coroutine_threadsafe(
thread_manager._backend.module_controls.register_modules(
removed_mods_at_ports=[
- ModuleAtPort(port="/dev/ot_module_sim_tempdeck0", name="tempdeck")
+ ModuleAtPort(port="/dev/ot_module_sim_tempdeck111", name="tempdeck")
]
),
loop,
diff --git a/api/tests/opentrons/commands/__init__.py b/api/tests/opentrons/legacy_commands/__init__.py
similarity index 100%
rename from api/tests/opentrons/commands/__init__.py
rename to api/tests/opentrons/legacy_commands/__init__.py
diff --git a/api/tests/opentrons/commands/test_protocol_commands.py b/api/tests/opentrons/legacy_commands/test_protocol_commands.py
similarity index 96%
rename from api/tests/opentrons/commands/test_protocol_commands.py
rename to api/tests/opentrons/legacy_commands/test_protocol_commands.py
index e7fb31aed1c..1ff5475f95b 100644
--- a/api/tests/opentrons/commands/test_protocol_commands.py
+++ b/api/tests/opentrons/legacy_commands/test_protocol_commands.py
@@ -1,5 +1,5 @@
import pytest
-from opentrons.commands import protocol_commands
+from opentrons.legacy_commands import protocol_commands
@pytest.mark.parametrize(
diff --git a/api/tests/opentrons/commands/test_publisher.py b/api/tests/opentrons/legacy_commands/test_publisher.py
similarity index 95%
rename from api/tests/opentrons/commands/test_publisher.py
rename to api/tests/opentrons/legacy_commands/test_publisher.py
index f38142984bf..359b6b3c5fd 100644
--- a/api/tests/opentrons/commands/test_publisher.py
+++ b/api/tests/opentrons/legacy_commands/test_publisher.py
@@ -1,12 +1,16 @@
-"""Tests for opentrons.commands.publisher."""
+"""Tests for opentrons.legacy_commands.publisher."""
from __future__ import annotations
import pytest
from decoy import Decoy, matchers
from typing import Any, Dict, cast
from opentrons.legacy_broker import LegacyBroker
-from opentrons.commands.types import Command as CommandDict, CommandMessage
-from opentrons.commands.publisher import CommandPublisher, publish, publish_context
+from opentrons.legacy_commands.types import Command as CommandDict, CommandMessage
+from opentrons.legacy_commands.publisher import (
+ CommandPublisher,
+ publish,
+ publish_context,
+)
@pytest.fixture
@@ -17,7 +21,7 @@ def broker(decoy: Decoy) -> LegacyBroker:
def test_publish_decorator(decoy: Decoy, broker: LegacyBroker) -> None:
"""It should publish "before" and "after" messages for decorated methods."""
- _act = decoy.mock()
+ _act = decoy.mock(name="_act")
def _get_command_payload(foo: str, bar: int) -> CommandDict:
return cast(
@@ -73,7 +77,7 @@ def test_publish_decorator_with_arg_defaults(
decoy: Decoy, broker: LegacyBroker
) -> None:
"""It should pass method argument defaults to the command creator."""
- _act = decoy.mock()
+ _act = decoy.mock(name="_act")
def _get_command_payload(foo: str, bar: int) -> CommandDict:
return cast(
@@ -175,7 +179,7 @@ def test_publish_decorator_remaps_instrument(
decoy: Decoy, broker: LegacyBroker
) -> None:
"""It should pass "self" to command creator arguments named "instrument"."""
- _act = decoy.mock()
+ _act = decoy.mock(name="_act")
def _get_command_payload(foo: str, instrument: _Subject) -> Dict[str, Any]:
return {
@@ -226,7 +230,7 @@ def act(self, foo: str) -> None:
def test_publish_context(decoy: Decoy, broker: LegacyBroker) -> None:
- _act = decoy.mock()
+ _act = decoy.mock(name="_act")
command = cast(
CommandDict,
diff --git a/api/tests/opentrons/motion_planning/test_adjacent_slots_getters.py b/api/tests/opentrons/motion_planning/test_adjacent_slots_getters.py
index 6e53002d332..09805e93ca8 100644
--- a/api/tests/opentrons/motion_planning/test_adjacent_slots_getters.py
+++ b/api/tests/opentrons/motion_planning/test_adjacent_slots_getters.py
@@ -2,6 +2,9 @@
import pytest
from typing import List, Optional
+from opentrons_shared_data.robot.dev_types import RobotType
+
+from opentrons.types import DeckSlotName, StagingSlotName
from opentrons.motion_planning.adjacent_slots_getters import (
get_east_slot,
get_south_slot,
@@ -10,6 +13,10 @@
get_east_west_slots,
get_north_south_slots,
get_adjacent_slots,
+ get_west_of_staging_slot,
+ get_adjacent_staging_slot,
+ _MixedTypeSlots,
+ get_surrounding_slots,
)
@@ -91,3 +98,112 @@ def test_get_north_south_slots(
def test_get_adjacent_slots(slot: int, expected_adjacent: List[int]) -> None:
"""It should return a list of adjacent slots."""
assert sorted(get_adjacent_slots(slot)) == sorted(expected_adjacent)
+
+
+@pytest.mark.parametrize(
+ argnames=["slot", "expected_adjacent"],
+ argvalues=[
+ (StagingSlotName.SLOT_A4, DeckSlotName.SLOT_A3),
+ (StagingSlotName.SLOT_B4, DeckSlotName.SLOT_B3),
+ (StagingSlotName.SLOT_C4, DeckSlotName.SLOT_C3),
+ (StagingSlotName.SLOT_D4, DeckSlotName.SLOT_D3),
+ ],
+)
+def test_get_west_of_staging_slot(
+ slot: StagingSlotName, expected_adjacent: DeckSlotName
+) -> None:
+ """It should find the slot directly west of a staging slot."""
+ assert get_west_of_staging_slot(slot) == expected_adjacent
+
+
+@pytest.mark.parametrize(
+ argnames=["slot", "expected_adjacent"],
+ argvalues=[
+ (DeckSlotName.SLOT_A3, StagingSlotName.SLOT_A4),
+ (DeckSlotName.SLOT_B3, StagingSlotName.SLOT_B4),
+ (DeckSlotName.SLOT_C3, StagingSlotName.SLOT_C4),
+ (DeckSlotName.SLOT_D3, StagingSlotName.SLOT_D4),
+ (DeckSlotName.SLOT_D1, None),
+ (DeckSlotName.SLOT_1, None),
+ ],
+)
+def test_get_adjacent_staging_slot(
+ slot: DeckSlotName, expected_adjacent: Optional[StagingSlotName]
+) -> None:
+ """It should find the adjacent slot east of a staging slot if it exists."""
+ assert get_adjacent_staging_slot(slot) == expected_adjacent
+
+
+@pytest.mark.parametrize(
+ argnames=["slot", "robot_type", "expected_surrounding_slots"],
+ argvalues=[
+ (
+ 2,
+ "OT-2 Standard",
+ _MixedTypeSlots(
+ regular_slots=[
+ DeckSlotName.SLOT_3,
+ DeckSlotName.SLOT_1,
+ DeckSlotName.SLOT_5,
+ DeckSlotName.SLOT_6,
+ DeckSlotName.SLOT_4,
+ ],
+ staging_slots=[],
+ ),
+ ),
+ (
+ 6,
+ "OT-2 Standard",
+ _MixedTypeSlots(
+ regular_slots=[
+ DeckSlotName.SLOT_5,
+ DeckSlotName.SLOT_9,
+ DeckSlotName.SLOT_3,
+ DeckSlotName.SLOT_8,
+ DeckSlotName.SLOT_2,
+ ],
+ staging_slots=[],
+ ),
+ ),
+ (
+ 6,
+ "OT-3 Standard",
+ _MixedTypeSlots(
+ regular_slots=[
+ DeckSlotName.SLOT_C2,
+ DeckSlotName.SLOT_B3,
+ DeckSlotName.SLOT_D3,
+ DeckSlotName.SLOT_B2,
+ DeckSlotName.SLOT_D2,
+ ],
+ staging_slots=[
+ StagingSlotName.SLOT_B4,
+ StagingSlotName.SLOT_C4,
+ StagingSlotName.SLOT_D4,
+ ],
+ ),
+ ),
+ (
+ 10,
+ "OT-3 Standard",
+ _MixedTypeSlots(
+ regular_slots=[
+ DeckSlotName.SLOT_A2,
+ DeckSlotName.SLOT_B1,
+ DeckSlotName.SLOT_B2,
+ ],
+ staging_slots=[],
+ ),
+ ),
+ ],
+)
+def test_get_surrounding_slots(
+ slot: int,
+ robot_type: RobotType,
+ expected_surrounding_slots: _MixedTypeSlots,
+) -> None:
+ """It should get the list of surrounding slots appropriate for the robot type."""
+ assert (
+ get_surrounding_slots(slot=slot, robot_type=robot_type)
+ == expected_surrounding_slots
+ )
diff --git a/api/tests/opentrons/motion_planning/test_deck_conflict.py b/api/tests/opentrons/motion_planning/test_deck_conflict.py
index f0dfbe6eea7..553821289fc 100644
--- a/api/tests/opentrons/motion_planning/test_deck_conflict.py
+++ b/api/tests/opentrons/motion_planning/test_deck_conflict.py
@@ -9,12 +9,16 @@
from opentrons.motion_planning import deck_conflict
-from opentrons.types import DeckSlotName
+from opentrons.types import DeckSlotName, StagingSlotName
@pytest.mark.parametrize(
"robot_type, slot_name",
- [("OT-2 Standard", DeckSlotName.SLOT_1), ("OT-3 Standard", DeckSlotName.SLOT_A1)],
+ [
+ ("OT-2 Standard", DeckSlotName.SLOT_1),
+ ("OT-3 Standard", DeckSlotName.SLOT_A1),
+ ("OT-3 Standard", DeckSlotName.SLOT_A3),
+ ],
)
def test_empty_no_conflict(robot_type: RobotType, slot_name: DeckSlotName) -> None:
"""It should not raise on empty input."""
@@ -30,142 +34,34 @@ def test_empty_no_conflict(robot_type: RobotType, slot_name: DeckSlotName) -> No
@pytest.mark.parametrize(
"robot_type, slot_name",
- [("OT-2 Standard", DeckSlotName.SLOT_1), ("OT-3 Standard", DeckSlotName.SLOT_A1)],
-)
-def test_no_multiple_locations(robot_type: RobotType, slot_name: DeckSlotName) -> None:
- """It should not allow two items in the same slot."""
- item_1 = deck_conflict.OtherModule(
- highest_z_including_labware=123, name_for_errors="some_item_1"
- )
- item_2 = deck_conflict.OtherModule(
- highest_z_including_labware=123, name_for_errors="some_item_2"
- )
-
- with pytest.raises(
- deck_conflict.DeckConflictError,
- match=f"some_item_1 in slot {slot_name} prevents some_item_2 from using slot {slot_name}",
- ):
- deck_conflict.check(
- existing_items={slot_name: item_1},
- new_item=item_2,
- new_location=slot_name,
- robot_type=robot_type,
- )
-
-
-@pytest.mark.parametrize(
- "slot_name, robot_type",
[
- (DeckSlotName.FIXED_TRASH, "OT-2 Standard"),
- (DeckSlotName.SLOT_A3, "OT-3 Standard"),
+ ("OT-2 Standard", DeckSlotName.SLOT_1),
+ ("OT-3 Standard", DeckSlotName.SLOT_A1),
+ ("OT-3 Standard", StagingSlotName.SLOT_A4),
],
)
-def test_only_trash_in_fixed_slot(
- slot_name: DeckSlotName, robot_type: RobotType
-) -> None:
- """It should only allow trash labware in slot 12."""
- trash_labware = deck_conflict.Labware(
- uri=LabwareUri("trash_labware_uri"),
- highest_z=123,
- is_fixed_trash=True,
- name_for_errors="trash_labware",
- )
- not_trash_labware = deck_conflict.Labware(
- uri=LabwareUri("not_trash_labware_uri"),
+def test_no_multiple_locations(robot_type: RobotType, slot_name: DeckSlotName) -> None:
+ """It should not allow two items in the same slot."""
+ item_1 = deck_conflict.Labware(
+ uri=LabwareUri("some_labware_uri"),
highest_z=123,
is_fixed_trash=False,
- name_for_errors="not_trash_labware",
- )
- not_trash_module = deck_conflict.OtherModule(
- highest_z_including_labware=123, name_for_errors="not_trash_module"
- )
-
- deck_conflict.check(
- existing_items={},
- new_item=trash_labware,
- new_location=slot_name,
- robot_type=robot_type,
- )
-
- with pytest.raises(
- deck_conflict.DeckConflictError,
- match=f"Only fixed-trash is allowed in slot {slot_name}",
- ):
- deck_conflict.check(
- existing_items={},
- new_item=not_trash_labware,
- new_location=slot_name,
- robot_type=robot_type,
- )
-
- with pytest.raises(
- deck_conflict.DeckConflictError,
- match=f"Only fixed-trash is allowed in slot {slot_name}",
- ):
- deck_conflict.check(
- existing_items={},
- new_item=not_trash_module,
- new_location=slot_name,
- robot_type=robot_type,
- )
-
-
-@pytest.mark.parametrize(
- "slot_name, robot_type",
- [
- (DeckSlotName.FIXED_TRASH, "OT-2 Standard"),
- (DeckSlotName.SLOT_A3, "OT-3 Standard"),
- ],
-)
-def test_trash_override(slot_name: DeckSlotName, robot_type: RobotType) -> None:
- """It should allow the trash labware to be replaced with another trash labware."""
- trash_labware_1 = deck_conflict.Labware(
- uri=LabwareUri("trash_labware_1_uri"),
- highest_z=123,
- is_fixed_trash=True,
- name_for_errors="trash_labware_1",
- )
- trash_labware_2 = deck_conflict.Labware(
- uri=LabwareUri("trash_labware_2_uri"),
- highest_z=123,
- is_fixed_trash=True,
- name_for_errors="trash_labware_2",
+ name_for_errors="some_item_1",
)
- not_trash_labware = deck_conflict.Labware(
- uri=LabwareUri("not_trash_labware_uri"),
+ item_2 = deck_conflict.Labware(
+ uri=LabwareUri("some_labware_uri"),
highest_z=123,
is_fixed_trash=False,
- name_for_errors="not_trash_labware",
- )
- not_trash_module = deck_conflict.OtherModule(
- highest_z_including_labware=123, name_for_errors="not_trash_module"
- )
-
- deck_conflict.check(
- existing_items={slot_name: trash_labware_1},
- new_item=trash_labware_2,
- new_location=slot_name,
- robot_type=robot_type,
+ name_for_errors="some_item_2",
)
with pytest.raises(
deck_conflict.DeckConflictError,
- match=f"Only fixed-trash is allowed in slot {slot_name}",
- ):
- deck_conflict.check(
- existing_items={slot_name: trash_labware_1},
- new_item=not_trash_labware,
- new_location=slot_name,
- robot_type=robot_type,
- )
-
- with pytest.raises(
- deck_conflict.DeckConflictError,
- match=f"Only fixed-trash is allowed in slot {slot_name}",
+ match=f"some_item_1 in slot {slot_name} prevents some_item_2 from using slot {slot_name}",
):
deck_conflict.check(
- existing_items={slot_name: trash_labware_1},
- new_item=not_trash_module,
+ existing_items={slot_name: item_1},
+ new_item=item_2,
new_location=slot_name,
robot_type=robot_type,
)
@@ -311,6 +207,41 @@ def test_flex_labware_when_thermocycler(
)
+def test_flex_trash_bin_blocks_thermocycler() -> None:
+ """It should prevent loading a thermocycler when there is a trash in A1 and vice-versa."""
+ thermocycler = deck_conflict.ThermocyclerModule(
+ name_for_errors="some_thermocycler",
+ highest_z_including_labware=123,
+ is_semi_configuration=False,
+ )
+ trash = deck_conflict.TrashBin(name_for_errors="some_trash_bin", highest_z=1.23)
+
+ with pytest.raises(
+ deck_conflict.DeckConflictError,
+ match=(
+ "some_trash_bin in slot A1 prevents some_thermocycler from using slot B1"
+ ),
+ ):
+ deck_conflict.check(
+ existing_items={DeckSlotName.SLOT_A1: trash},
+ new_item=thermocycler,
+ new_location=DeckSlotName.SLOT_B1,
+ robot_type="OT-3 Standard",
+ )
+ with pytest.raises(
+ deck_conflict.DeckConflictError,
+ match=(
+ "some_thermocycler in slot B1 prevents some_trash_bin from using slot A1"
+ ),
+ ):
+ deck_conflict.check(
+ existing_items={DeckSlotName.SLOT_B1: thermocycler},
+ new_item=trash,
+ new_location=DeckSlotName.SLOT_A1,
+ robot_type="OT-3 Standard",
+ )
+
+
@pytest.mark.parametrize(
("heater_shaker_location", "labware_location"),
[
@@ -598,3 +529,118 @@ def test_no_heater_shaker_south_of_trash() -> None:
new_location=DeckSlotName.SLOT_9,
robot_type="OT-2 Standard",
)
+
+
+def test_heater_shaker_restrictions_trash_bin_addressable_area() -> None:
+ """It should prevent loading a Heater-Shaker adjacent of a non-labware trash bin.
+
+ This is for the OT-2 only and for slot 11 and slot 9
+ """
+ heater_shaker = deck_conflict.HeaterShakerModule(
+ highest_z_including_labware=123, name_for_errors="some_heater_shaker"
+ )
+ trash = deck_conflict.TrashBin(name_for_errors="some_trash_bin", highest_z=456)
+
+ with pytest.raises(
+ deck_conflict.DeckConflictError,
+ match=(
+ "some_trash_bin in slot 12" " prevents some_heater_shaker from using slot 9"
+ ),
+ ):
+ deck_conflict.check(
+ existing_items={DeckSlotName.FIXED_TRASH: trash},
+ new_item=heater_shaker,
+ new_location=DeckSlotName.SLOT_9,
+ robot_type="OT-2 Standard",
+ )
+ with pytest.raises(
+ deck_conflict.DeckConflictError,
+ match=(
+ "some_trash_bin in slot 12"
+ " prevents some_heater_shaker from using slot 11"
+ ),
+ ):
+ deck_conflict.check(
+ existing_items={DeckSlotName.FIXED_TRASH: trash},
+ new_item=heater_shaker,
+ new_location=DeckSlotName.SLOT_11,
+ robot_type="OT-2 Standard",
+ )
+
+
+@pytest.mark.parametrize(
+ ("deck_slot_name", "adjacent_staging_slot", "non_adjacent_staging_slot"),
+ [
+ (DeckSlotName.SLOT_A3, StagingSlotName.SLOT_A4, StagingSlotName.SLOT_B4),
+ (DeckSlotName.SLOT_B3, StagingSlotName.SLOT_B4, StagingSlotName.SLOT_C4),
+ (DeckSlotName.SLOT_C3, StagingSlotName.SLOT_C4, StagingSlotName.SLOT_D4),
+ (DeckSlotName.SLOT_D3, StagingSlotName.SLOT_D4, StagingSlotName.SLOT_A4),
+ ],
+)
+def test_no_staging_slot_adjacent_to_module(
+ deck_slot_name: DeckSlotName,
+ adjacent_staging_slot: StagingSlotName,
+ non_adjacent_staging_slot: StagingSlotName,
+) -> None:
+ """It should raise if certain modules are placed adjacent to labware on a staging slot."""
+ staging_slot_labware = deck_conflict.Labware(
+ uri=LabwareUri("some_labware_uri"),
+ highest_z=123,
+ is_fixed_trash=False,
+ name_for_errors="some_labware",
+ )
+ heater_shaker = deck_conflict.HeaterShakerModule(
+ name_for_errors="some_heater_shaker",
+ highest_z_including_labware=123,
+ )
+ with pytest.raises(
+ deck_conflict.DeckConflictError,
+ match=(
+ f"some_labware in slot {adjacent_staging_slot}"
+ f" prevents some_heater_shaker from using slot {deck_slot_name}"
+ ),
+ ):
+ deck_conflict.check(
+ existing_items={adjacent_staging_slot: staging_slot_labware},
+ new_item=heater_shaker,
+ new_location=deck_slot_name,
+ robot_type="OT-3 Standard",
+ )
+
+ # Non-adjacent staging slot passes
+ deck_conflict.check(
+ existing_items={non_adjacent_staging_slot: staging_slot_labware},
+ new_item=heater_shaker,
+ new_location=deck_slot_name,
+ robot_type="OT-3 Standard",
+ )
+
+ other_module = deck_conflict.OtherModule(
+ name_for_errors="some_other_module",
+ highest_z_including_labware=123,
+ )
+ with pytest.raises(
+ deck_conflict.DeckConflictError,
+ match=(
+ f"some_other_module in slot {deck_slot_name}"
+ f" prevents some_labware from using slot {adjacent_staging_slot}"
+ ),
+ ):
+ deck_conflict.check(
+ existing_items={deck_slot_name: other_module},
+ new_item=staging_slot_labware,
+ new_location=adjacent_staging_slot,
+ robot_type="OT-3 Standard",
+ )
+
+ # Magnetic block is allowed
+ magnetic_block = deck_conflict.MagneticBlockModule(
+ name_for_errors="some_mag_block",
+ highest_z_including_labware=123,
+ )
+ deck_conflict.check(
+ existing_items={adjacent_staging_slot: staging_slot_labware},
+ new_item=magnetic_block,
+ new_location=deck_slot_name,
+ robot_type="OT-3 Standard",
+ )
diff --git a/api/tests/opentrons/motion_planning/test_waypoints.py b/api/tests/opentrons/motion_planning/test_waypoints.py
index 4930d9a1e70..7f3fa2c91e6 100644
--- a/api/tests/opentrons/motion_planning/test_waypoints.py
+++ b/api/tests/opentrons/motion_planning/test_waypoints.py
@@ -275,18 +275,49 @@ def test_get_gripper_labware_movement_waypoints() -> None:
pickUpOffset=LabwareOffsetVector(x=-1, y=-2, z=-3),
dropOffset=LabwareOffsetVector(x=1, y=2, z=3),
),
+ post_drop_slide_offset=None,
)
assert result == [
# move to above "from" slot
- GripperMovementWaypointsWithJawStatus(Point(100, 100, 999), False),
+ GripperMovementWaypointsWithJawStatus(Point(100, 100, 999), False, False),
# with jaw open, move to labware on "from" slot
- GripperMovementWaypointsWithJawStatus(Point(100, 100, 116.5), True),
+ GripperMovementWaypointsWithJawStatus(Point(100, 100, 116.5), True, False),
# grip labware and retract in place
- GripperMovementWaypointsWithJawStatus(Point(100, 100, 999), False),
+ GripperMovementWaypointsWithJawStatus(Point(100, 100, 999), False, False),
# with labware gripped, move to above "to" slot
- GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 999), False),
+ GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 999), False, False),
# with labware gripped, move down to labware drop height on "to" slot
- GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 222.5), False),
+ GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 222.5), False, False),
# ungrip labware and retract in place
- GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 999), True),
+ GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 999), True, True),
+ ]
+
+
+def test_get_gripper_labware_movement_waypoint_with_slide() -> None:
+ """It should get the correct waypoints for gripper movement."""
+ result = get_gripper_labware_movement_waypoints(
+ from_labware_center=Point(101, 102, 119.5),
+ to_labware_center=Point(201, 202, 219.5),
+ gripper_home_z=999,
+ offset_data=LabwareMovementOffsetData(
+ pickUpOffset=LabwareOffsetVector(x=-1, y=-2, z=-3),
+ dropOffset=LabwareOffsetVector(x=1, y=2, z=3),
+ ),
+ post_drop_slide_offset=Point(x=10, y=10, z=1),
+ )
+ assert result == [
+ # move to above "from" slot
+ GripperMovementWaypointsWithJawStatus(Point(100, 100, 999), False, False),
+ # with jaw open, move to labware on "from" slot
+ GripperMovementWaypointsWithJawStatus(Point(100, 100, 116.5), True, False),
+ # grip labware and retract in place
+ GripperMovementWaypointsWithJawStatus(Point(100, 100, 999), False, False),
+ # with labware gripped, move to above "to" slot
+ GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 999), False, False),
+ # with labware gripped, move down to labware drop height on "to" slot
+ GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 222.5), False, False),
+ # ungrip labware and retract in place
+ GripperMovementWaypointsWithJawStatus(Point(202.0, 204.0, 999), True, True),
+ # slide after ungripping
+ GripperMovementWaypointsWithJawStatus(Point(212.0, 214.0, 1000), True, False),
]
diff --git a/api/tests/opentrons/protocol_api/__init__.py b/api/tests/opentrons/protocol_api/__init__.py
index 70938f49e66..8cf95c55e6d 100644
--- a/api/tests/opentrons/protocol_api/__init__.py
+++ b/api/tests/opentrons/protocol_api/__init__.py
@@ -1 +1,131 @@
"""Tests for opentrons.protocol_api."""
+from typing import List, overload, Optional
+
+from opentrons.protocols.api_support.types import APIVersion
+from opentrons.protocol_api import (
+ MAX_SUPPORTED_VERSION,
+ MIN_SUPPORTED_VERSION,
+ MIN_SUPPORTED_VERSION_FOR_FLEX,
+)
+
+
+def versions_at_or_above(from_version: APIVersion) -> List[APIVersion]:
+ """Get a list of versions >= the specified one."""
+ return versions_between(
+ low_inclusive_bound=from_version, high_inclusive_bound=MAX_SUPPORTED_VERSION
+ )
+
+
+def versions_at_or_below(
+ from_version: APIVersion, flex_only: bool = False
+) -> List[APIVersion]:
+ """Get a list of versions <= the specified one.
+
+ Since there are different minimum supported versions for Flex and OT-2, specify which you care about
+ with the second argument.
+ """
+ if flex_only:
+ return versions_between(
+ low_inclusive_bound=MIN_SUPPORTED_VERSION_FOR_FLEX,
+ high_inclusive_bound=from_version,
+ )
+ else:
+ return versions_between(
+ low_inclusive_bound=MIN_SUPPORTED_VERSION, high_inclusive_bound=from_version
+ )
+
+
+def versions_above(from_version: APIVersion) -> List[APIVersion]:
+ """Get a list of versions > the specified one."""
+ return versions_between(
+ low_exclusive_bound=from_version, high_inclusive_bound=MAX_SUPPORTED_VERSION
+ )
+
+
+def versions_below(from_version: APIVersion, flex_only: bool) -> List[APIVersion]:
+ """Get a list of versions < the specified one.
+
+ Since there are different minimum supported versions for Flex and OT-2, specify which you care about
+ with the second argument.
+ """
+ if flex_only:
+ return versions_between(
+ low_inclusive_bound=MIN_SUPPORTED_VERSION_FOR_FLEX,
+ high_exclusive_bound=from_version,
+ )
+ else:
+ return versions_between(
+ low_inclusive_bound=MIN_SUPPORTED_VERSION, high_exclusive_bound=from_version
+ )
+
+
+@overload
+def versions_between(
+ *,
+ low_inclusive_bound: APIVersion,
+ high_inclusive_bound: APIVersion,
+) -> List[APIVersion]:
+ ...
+
+
+@overload
+def versions_between(
+ *, low_inclusive_bound: APIVersion, high_exclusive_bound: APIVersion
+) -> List[APIVersion]:
+ ...
+
+
+@overload
+def versions_between(
+ *,
+ high_inclusive_bound: APIVersion,
+ low_exclusive_bound: APIVersion,
+) -> List[APIVersion]:
+ ...
+
+
+@overload
+def versions_between(
+ *, low_exclusive_bound: APIVersion, high_exclusive_bound: APIVersion
+) -> List[APIVersion]:
+ ...
+
+
+def versions_between(
+ low_inclusive_bound: Optional[APIVersion] = None,
+ high_inclusive_bound: Optional[APIVersion] = None,
+ low_exclusive_bound: Optional[APIVersion] = None,
+ high_exclusive_bound: Optional[APIVersion] = None,
+) -> List[APIVersion]:
+ """Build a list of versions based on exclusive and inclusive constraints."""
+ if low_inclusive_bound and high_inclusive_bound:
+ assert (
+ low_inclusive_bound.major == high_inclusive_bound.major
+ ), "You need to change this test when you add a new major version"
+ major = low_inclusive_bound.major
+ start = low_inclusive_bound.minor
+ stop = high_inclusive_bound.minor + 1
+ elif low_inclusive_bound and high_exclusive_bound:
+ assert (
+ low_inclusive_bound.major == high_exclusive_bound.major
+ ), "You need to change this test when you add a new major version"
+ major = low_inclusive_bound.major
+ start = low_inclusive_bound.minor
+ stop = high_exclusive_bound.minor
+ elif low_exclusive_bound and high_inclusive_bound:
+ assert (
+ low_exclusive_bound.major == high_inclusive_bound.major
+ ), "You need to change this test when you add a new major version"
+ major = low_exclusive_bound.major
+ start = low_exclusive_bound.minor + 1
+ stop = high_inclusive_bound.minor + 1
+ elif low_exclusive_bound and high_exclusive_bound:
+ assert (
+ low_exclusive_bound.major == high_exclusive_bound.major
+ ), "You need to change this test when you add a new major version"
+ major = low_exclusive_bound.major
+ start = low_exclusive_bound.minor + 1
+ stop = high_exclusive_bound.minor
+ else:
+ raise ValueError("You must specify one low bound and one high bound")
+ return [APIVersion(major, minor) for minor in range(start, stop)]
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_deck_conflict.py b/api/tests/opentrons/protocol_api/core/engine/test_deck_conflict.py
index 6d682b3e9a5..82ce80695d3 100644
--- a/api/tests/opentrons/protocol_api/core/engine/test_deck_conflict.py
+++ b/api/tests/opentrons/protocol_api/core/engine/test_deck_conflict.py
@@ -1,18 +1,65 @@
"""Unit tests for the deck_conflict module."""
-
-from decoy import Decoy
import pytest
-
+from typing import ContextManager, Any, NamedTuple, List, Tuple
+from decoy import Decoy
+from contextlib import nullcontext as does_not_raise
from opentrons_shared_data.labware.dev_types import LabwareUri
from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons.hardware_control.nozzle_manager import NozzleConfigurationType
from opentrons.motion_planning import deck_conflict as wrapped_deck_conflict
+from opentrons.motion_planning import adjacent_slots_getters
+from opentrons.motion_planning.adjacent_slots_getters import _MixedTypeSlots
+from opentrons.protocols.api_support.types import APIVersion
+from opentrons.protocol_api import MAX_SUPPORTED_VERSION
+from opentrons.protocol_api.disposal_locations import (
+ TrashBin,
+ WasteChute,
+ _TRASH_BIN_CUTOUT_FIXTURE,
+)
+from opentrons.protocol_api.labware import Labware
from opentrons.protocol_api.core.engine import deck_conflict
-from opentrons.protocol_engine import Config, DeckSlotLocation, ModuleModel, StateView
+from opentrons.protocol_engine import (
+ Config,
+ DeckSlotLocation,
+ ModuleModel,
+ StateView,
+)
+from opentrons.protocol_engine.clients import SyncClient
from opentrons.protocol_engine.errors import LabwareNotLoadedOnModuleError
-from opentrons.types import DeckSlotName
+from opentrons.types import DeckSlotName, Point, StagingSlotName
-from opentrons.protocol_engine.types import DeckType
+from opentrons.protocol_engine.types import (
+ DeckType,
+ LoadedLabware,
+ LoadedModule,
+ WellLocation,
+ WellOrigin,
+ WellOffset,
+ OnDeckLabwareLocation,
+ OnLabwareLocation,
+ Dimensions,
+ StagingSlotLocation,
+)
+
+
+@pytest.fixture(autouse=True)
+def patch_slot_getters(decoy: Decoy, monkeypatch: pytest.MonkeyPatch) -> None:
+ """Mock out adjacent_slots_getters functions."""
+ mock_get_surrounding_slots = decoy.mock(
+ func=adjacent_slots_getters.get_surrounding_slots
+ )
+ mock_get_surrounding_staging_slots = decoy.mock(
+ func=adjacent_slots_getters.get_surrounding_staging_slots
+ )
+ monkeypatch.setattr(
+ adjacent_slots_getters, "get_surrounding_slots", mock_get_surrounding_slots
+ )
+ monkeypatch.setattr(
+ adjacent_slots_getters,
+ "get_surrounding_staging_slots",
+ mock_get_surrounding_staging_slots,
+ )
@pytest.fixture(autouse=True)
@@ -24,6 +71,18 @@ def use_mock_wrapped_deck_conflict(
monkeypatch.setattr(wrapped_deck_conflict, "check", mock_check)
+@pytest.fixture
+def api_version() -> APIVersion:
+ """Get mocked api_version."""
+ return MAX_SUPPORTED_VERSION
+
+
+@pytest.fixture
+def mock_sync_client(decoy: Decoy) -> SyncClient:
+ """Return a mock in the shape of a SyncClient."""
+ return decoy.mock(cls=SyncClient)
+
+
@pytest.fixture
def mock_state_view(
decoy: Decoy,
@@ -70,6 +129,7 @@ def test_maps_labware_on_deck(decoy: Decoy, mock_state_view: StateView) -> None:
engine_state=mock_state_view,
existing_labware_ids=["labware-id"],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="labware-id",
)
decoy.verify(
@@ -124,6 +184,7 @@ def test_maps_module_without_labware(decoy: Decoy, mock_state_view: StateView) -
engine_state=mock_state_view,
existing_labware_ids=[],
existing_module_ids=["module-id"],
+ existing_disposal_locations=[],
new_module_id="module-id",
)
decoy.verify(
@@ -177,6 +238,7 @@ def test_maps_module_with_labware(decoy: Decoy, mock_state_view: StateView) -> N
engine_state=mock_state_view,
existing_labware_ids=[],
existing_module_ids=["module-id"],
+ existing_disposal_locations=[],
new_module_id="module-id",
)
decoy.verify(
@@ -217,6 +279,11 @@ def get_expected_mapping_result() -> wrapped_deck_conflict.DeckItem:
name_for_errors=expected_name_for_errors,
highest_z_including_labware=3.14159,
)
+ elif module_model is ModuleModel.MAGNETIC_BLOCK_V1:
+ return wrapped_deck_conflict.MagneticBlockModule(
+ name_for_errors=expected_name_for_errors,
+ highest_z_including_labware=3.14159,
+ )
elif (
module_model is ModuleModel.THERMOCYCLER_MODULE_V1
or module_model is ModuleModel.THERMOCYCLER_MODULE_V2
@@ -255,6 +322,7 @@ def get_expected_mapping_result() -> wrapped_deck_conflict.DeckItem:
engine_state=mock_state_view,
existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_module_id="module-id",
)
decoy.verify(
@@ -265,3 +333,499 @@ def get_expected_mapping_result() -> wrapped_deck_conflict.DeckItem:
robot_type=mock_state_view.config.robot_type,
)
)
+
+
+@pytest.mark.parametrize(
+ ("robot_type", "deck_type"),
+ [
+ ("OT-2 Standard", DeckType.OT2_STANDARD),
+ ("OT-3 Standard", DeckType.OT3_STANDARD),
+ ],
+)
+def test_maps_trash_bins(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ api_version: APIVersion,
+ mock_sync_client: SyncClient,
+) -> None:
+ """It should correctly map disposal locations."""
+ mock_trash_lw = decoy.mock(cls=Labware)
+
+ decoy.when(
+ mock_sync_client.state.addressable_areas.get_fixture_height(
+ _TRASH_BIN_CUTOUT_FIXTURE
+ )
+ ).then_return(1.23)
+
+ deck_conflict.check(
+ engine_state=mock_state_view,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[
+ TrashBin(
+ location=DeckSlotName.SLOT_B1,
+ addressable_area_name="blah",
+ engine_client=mock_sync_client,
+ api_version=api_version,
+ ),
+ WasteChute(engine_client=mock_sync_client, api_version=api_version),
+ mock_trash_lw,
+ ],
+ new_trash_bin=TrashBin(
+ location=DeckSlotName.SLOT_A1,
+ addressable_area_name="blah",
+ engine_client=mock_sync_client,
+ api_version=api_version,
+ ),
+ )
+ decoy.verify(
+ wrapped_deck_conflict.check(
+ existing_items={
+ DeckSlotName.SLOT_B1: wrapped_deck_conflict.TrashBin(
+ name_for_errors="trash bin", highest_z=1.23
+ )
+ },
+ new_item=wrapped_deck_conflict.TrashBin(
+ name_for_errors="trash bin", highest_z=1.23
+ ),
+ new_location=DeckSlotName.SLOT_A1,
+ robot_type=mock_state_view.config.robot_type,
+ )
+ )
+
+
+plate = LoadedLabware(
+ id="plate-id",
+ loadName="plate-load-name",
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_C1),
+ definitionUri="some-plate-uri",
+ offsetId=None,
+ displayName="Fancy Plate Name",
+)
+
+module = LoadedModule(
+ id="module-id",
+ model=ModuleModel.TEMPERATURE_MODULE_V1,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_C1),
+ serialNumber="serial-number",
+)
+
+
+@pytest.mark.parametrize(
+ ("robot_type", "deck_type"),
+ [("OT-3 Standard", DeckType.OT3_STANDARD)],
+)
+@pytest.mark.parametrize(
+ ["pipette_bounds", "expected_raise"],
+ [
+ ( # nozzles above highest Z
+ (
+ Point(x=50, y=150, z=60),
+ Point(x=150, y=50, z=60),
+ Point(x=150, y=150, z=60),
+ Point(x=50, y=50, z=60),
+ ),
+ does_not_raise(),
+ ),
+ # X, Y, Z collisions
+ (
+ (
+ Point(x=50, y=150, z=40),
+ Point(x=150, y=50, z=40),
+ Point(x=150, y=150, z=40),
+ Point(x=50, y=50, z=40),
+ ),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="collision with items in deck slot D1",
+ ),
+ ),
+ (
+ (
+ Point(x=101, y=150, z=40),
+ Point(x=150, y=50, z=40),
+ Point(x=150, y=150, z=40),
+ Point(x=101, y=50, z=40),
+ ),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="collision with items in deck slot D2",
+ ),
+ ),
+ ( # Collision with staging slot
+ (
+ Point(x=150, y=150, z=40),
+ Point(x=250, y=101, z=40),
+ Point(x=150, y=101, z=40),
+ Point(x=250, y=150, z=40),
+ ),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="collision with items in staging slot C4",
+ ),
+ ),
+ ],
+)
+def test_deck_conflict_raises_for_bad_pipette_move(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ pipette_bounds: Tuple[Point, Point, Point, Point],
+ expected_raise: ContextManager[Any],
+) -> None:
+ """It should raise errors when moving to locations with restrictions for partial pipette movement.
+
+ Test premise:
+ - we are using a pipette configured for COLUMN nozzle layout with primary nozzle A12
+ - there are labware of height 50mm in C1, D1 & D2
+ - we are checking for conflicts when moving to a labware in C2.
+ For each test case, we are moving to a different point in the destination labware,
+ with the same pipette and tip
+
+ Note: this test does not stub out the slot overlap checker function
+ in order to preserve readability of the test. That means the test does
+ actual slot overlap checks.
+ """
+ destination_well_point = Point(x=123, y=123, z=123)
+ decoy.when(
+ mock_state_view.pipettes.get_is_partially_configured("pipette-id")
+ ).then_return(True)
+ decoy.when(mock_state_view.pipettes.get_primary_nozzle("pipette-id")).then_return(
+ "A12"
+ )
+ decoy.when(
+ mock_state_view.geometry.get_ancestor_slot_name("destination-labware-id")
+ ).then_return(DeckSlotName.SLOT_C2)
+
+ decoy.when(
+ mock_state_view.geometry.get_well_position(
+ labware_id="destination-labware-id",
+ well_name="A2",
+ well_location=WellLocation(origin=WellOrigin.TOP, offset=WellOffset(z=10)),
+ )
+ ).then_return(destination_well_point)
+ decoy.when(
+ mock_state_view.pipettes.get_pipette_bounds_at_specified_move_to_position(
+ pipette_id="pipette-id", destination_position=destination_well_point
+ )
+ ).then_return(pipette_bounds)
+
+ decoy.when(
+ adjacent_slots_getters.get_surrounding_slots(5, robot_type="OT-3 Standard")
+ ).then_return(
+ _MixedTypeSlots(
+ regular_slots=[
+ DeckSlotName.SLOT_D1,
+ DeckSlotName.SLOT_D2,
+ DeckSlotName.SLOT_C1,
+ ],
+ staging_slots=[StagingSlotName.SLOT_C4],
+ )
+ )
+ decoy.when(
+ adjacent_slots_getters.get_surrounding_staging_slots(DeckSlotName.SLOT_C2)
+ ).then_return([StagingSlotName.SLOT_C4])
+
+ decoy.when(
+ mock_state_view.addressable_areas.get_addressable_area_position(
+ addressable_area_name="C1", do_compatibility_check=False
+ )
+ ).then_return(Point(0, 100, 0))
+ decoy.when(
+ mock_state_view.addressable_areas.get_addressable_area_position(
+ addressable_area_name="D1", do_compatibility_check=False
+ )
+ ).then_return(Point(0, 0, 0))
+ decoy.when(
+ mock_state_view.addressable_areas.get_addressable_area_position(
+ addressable_area_name="D2", do_compatibility_check=False
+ )
+ ).then_return(Point(100, 0, 0))
+ decoy.when(
+ mock_state_view.addressable_areas.get_addressable_area_position(
+ addressable_area_name="C4", do_compatibility_check=False
+ )
+ ).then_return(Point(200, 100, 0))
+ decoy.when(
+ mock_state_view.addressable_areas.get_addressable_area_bounding_box(
+ addressable_area_name="C4", do_compatibility_check=False
+ )
+ ).then_return(Dimensions(90, 90, 0))
+ decoy.when(
+ mock_state_view.geometry.get_highest_z_in_slot(
+ StagingSlotLocation(slotName=StagingSlotName.SLOT_C4)
+ )
+ ).then_return(50)
+ for slot_name in [DeckSlotName.SLOT_C1, DeckSlotName.SLOT_D1, DeckSlotName.SLOT_D2]:
+ decoy.when(
+ mock_state_view.geometry.get_highest_z_in_slot(
+ DeckSlotLocation(slotName=slot_name)
+ )
+ ).then_return(50)
+ decoy.when(
+ mock_state_view.addressable_areas.get_addressable_area_bounding_box(
+ addressable_area_name=slot_name.id, do_compatibility_check=False
+ )
+ ).then_return(Dimensions(90, 90, 0))
+
+ with expected_raise:
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_state_view,
+ pipette_id="pipette-id",
+ labware_id="destination-labware-id",
+ well_name="A2",
+ well_location=WellLocation(origin=WellOrigin.TOP, offset=WellOffset(z=10)),
+ )
+
+
+@pytest.mark.parametrize(
+ ("robot_type", "deck_type"),
+ [("OT-3 Standard", DeckType.OT3_STANDARD)],
+)
+def test_deck_conflict_raises_for_collision_with_tc_lid(
+ decoy: Decoy,
+ mock_state_view: StateView,
+) -> None:
+ """It should raise an error if pipette might collide with thermocycler lid on the Flex."""
+ destination_well_point = Point(x=123, y=123, z=123)
+ pipette_bounds_at_destination = (
+ Point(x=50, y=350, z=204.5),
+ Point(x=150, y=450, z=204.5),
+ Point(x=150, y=400, z=204.5),
+ Point(x=50, y=300, z=204.5),
+ )
+
+ decoy.when(
+ mock_state_view.pipettes.get_is_partially_configured("pipette-id")
+ ).then_return(True)
+ decoy.when(mock_state_view.pipettes.get_primary_nozzle("pipette-id")).then_return(
+ "A12"
+ )
+ decoy.when(
+ mock_state_view.geometry.get_ancestor_slot_name("destination-labware-id")
+ ).then_return(DeckSlotName.SLOT_C2)
+
+ decoy.when(
+ mock_state_view.geometry.get_well_position(
+ labware_id="destination-labware-id",
+ well_name="A2",
+ well_location=WellLocation(origin=WellOrigin.TOP, offset=WellOffset(z=10)),
+ )
+ ).then_return(destination_well_point)
+ decoy.when(
+ mock_state_view.pipettes.get_pipette_bounds_at_specified_move_to_position(
+ pipette_id="pipette-id", destination_position=destination_well_point
+ )
+ ).then_return(pipette_bounds_at_destination)
+
+ decoy.when(
+ adjacent_slots_getters.get_surrounding_slots(5, robot_type="OT-3 Standard")
+ ).then_return(
+ _MixedTypeSlots(
+ regular_slots=[
+ DeckSlotName.SLOT_A1,
+ DeckSlotName.SLOT_B1,
+ ],
+ staging_slots=[StagingSlotName.SLOT_C4],
+ )
+ )
+ decoy.when(mock_state_view.modules.is_flex_deck_with_thermocycler()).then_return(
+ True
+ )
+ with pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="collision with thermocycler lid in deck slot A1.",
+ ):
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_state_view,
+ pipette_id="pipette-id",
+ labware_id="destination-labware-id",
+ well_name="A2",
+ well_location=WellLocation(origin=WellOrigin.TOP, offset=WellOffset(z=10)),
+ )
+
+
+@pytest.mark.parametrize(
+ ("robot_type", "deck_type"),
+ [("OT-3 Standard", DeckType.OT3_STANDARD)],
+)
+@pytest.mark.parametrize(
+ ["destination_well_point", "expected_raise"],
+ [
+ (
+ Point(x=-12, y=100, z=60),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="outside of robot bounds",
+ ),
+ ),
+ (
+ Point(x=593, y=100, z=60),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="outside of robot bounds",
+ ),
+ ),
+ (
+ Point(x=100, y=1, z=60),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="outside of robot bounds",
+ ),
+ ),
+ (
+ Point(x=100, y=507, z=60),
+ pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="outside of robot bounds",
+ ),
+ ),
+ ],
+)
+def test_deck_conflict_raises_for_out_of_bounds_96_channel_move(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ destination_well_point: Point,
+ expected_raise: ContextManager[Any],
+) -> None:
+ """It should raise errors when moving to locations out of robot's bounds for partial tip 96-channel movement.
+
+ Test premise:
+ - we are using a pipette configured for COLUMN nozzle layout with primary nozzle A12
+ """
+ decoy.when(mock_state_view.pipettes.get_channels("pipette-id")).then_return(96)
+ decoy.when(
+ mock_state_view.labware.get_display_name("destination-labware-id")
+ ).then_return("destination-labware")
+ decoy.when(
+ mock_state_view.pipettes.get_nozzle_layout_type("pipette-id")
+ ).then_return(NozzleConfigurationType.COLUMN)
+ decoy.when(
+ mock_state_view.pipettes.get_is_partially_configured("pipette-id")
+ ).then_return(True)
+ decoy.when(mock_state_view.pipettes.get_primary_nozzle("pipette-id")).then_return(
+ "A12"
+ )
+ decoy.when(
+ mock_state_view.geometry.get_ancestor_slot_name("destination-labware-id")
+ ).then_return(DeckSlotName.SLOT_C2)
+
+ decoy.when(
+ mock_state_view.geometry.get_well_position(
+ labware_id="destination-labware-id",
+ well_name="A2",
+ well_location=WellLocation(origin=WellOrigin.TOP, offset=WellOffset(z=10)),
+ )
+ ).then_return(destination_well_point)
+
+
+class PipetteMovementSpec(NamedTuple):
+ """Spec data to test deck_conflict.check_safe_for_tip_pickup_and_return ."""
+
+ tiprack_parent: OnDeckLabwareLocation
+ tiprack_dim: Dimensions
+ is_on_flex_adapter: bool
+ is_partial_config: bool
+ expected_raise: ContextManager[Any]
+
+
+pipette_movement_specs: List[PipetteMovementSpec] = [
+ PipetteMovementSpec(
+ tiprack_parent=DeckSlotLocation(slotName=DeckSlotName.SLOT_5),
+ tiprack_dim=Dimensions(x=0, y=0, z=50),
+ is_on_flex_adapter=False,
+ is_partial_config=False,
+ expected_raise=pytest.raises(
+ deck_conflict.UnsuitableTiprackForPipetteMotion,
+ match="A cool tiprack must be on an Opentrons Flex 96 Tip Rack Adapter",
+ ),
+ ),
+ PipetteMovementSpec(
+ tiprack_parent=OnLabwareLocation(labwareId="adapter-id"),
+ tiprack_dim=Dimensions(x=0, y=0, z=50),
+ is_on_flex_adapter=True,
+ is_partial_config=False,
+ expected_raise=does_not_raise(),
+ ),
+ PipetteMovementSpec(
+ tiprack_parent=OnLabwareLocation(labwareId="adapter-id"),
+ tiprack_dim=Dimensions(x=0, y=0, z=50),
+ is_on_flex_adapter=False,
+ is_partial_config=False,
+ expected_raise=pytest.raises(
+ deck_conflict.UnsuitableTiprackForPipetteMotion,
+ match="A cool tiprack must be on an Opentrons Flex 96 Tip Rack Adapter",
+ ),
+ ),
+ PipetteMovementSpec(
+ tiprack_parent=OnLabwareLocation(labwareId="adapter-id"),
+ tiprack_dim=Dimensions(x=0, y=0, z=50),
+ is_on_flex_adapter=True,
+ is_partial_config=True,
+ expected_raise=pytest.raises(
+ deck_conflict.PartialTipMovementNotAllowedError,
+ match="A cool tiprack cannot be on an adapter taller than the tip rack",
+ ),
+ ),
+ PipetteMovementSpec(
+ tiprack_parent=OnLabwareLocation(labwareId="adapter-id"),
+ tiprack_dim=Dimensions(x=0, y=0, z=101),
+ is_on_flex_adapter=True,
+ is_partial_config=True,
+ expected_raise=does_not_raise(),
+ ),
+ PipetteMovementSpec(
+ tiprack_parent=DeckSlotLocation(slotName=DeckSlotName.SLOT_5),
+ tiprack_dim=Dimensions(x=0, y=0, z=50),
+ is_on_flex_adapter=True, # will be ignored
+ is_partial_config=True,
+ expected_raise=does_not_raise(),
+ ),
+]
+
+
+@pytest.mark.parametrize(
+ ("robot_type", "deck_type"),
+ [("OT-3 Standard", DeckType.OT3_STANDARD)],
+)
+@pytest.mark.parametrize(
+ argnames=PipetteMovementSpec._fields,
+ argvalues=pipette_movement_specs,
+)
+def test_valid_96_pipette_movement_for_tiprack_and_adapter(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ tiprack_parent: OnDeckLabwareLocation,
+ tiprack_dim: Dimensions,
+ is_on_flex_adapter: bool,
+ is_partial_config: bool,
+ expected_raise: ContextManager[Any],
+) -> None:
+ """It should raise appropriate error for unsuitable tiprack parent when moving 96 channel to it."""
+ decoy.when(mock_state_view.pipettes.get_channels("pipette-id")).then_return(96)
+ decoy.when(mock_state_view.labware.get_dimensions("adapter-id")).then_return(
+ Dimensions(x=0, y=0, z=100)
+ )
+ decoy.when(mock_state_view.labware.get_display_name("labware-id")).then_return(
+ "A cool tiprack"
+ )
+ decoy.when(
+ mock_state_view.pipettes.get_is_partially_configured("pipette-id")
+ ).then_return(is_partial_config)
+ decoy.when(mock_state_view.labware.get_location("labware-id")).then_return(
+ tiprack_parent
+ )
+ decoy.when(mock_state_view.labware.get_dimensions("labware-id")).then_return(
+ tiprack_dim
+ )
+ decoy.when(
+ mock_state_view.labware.get_has_quirk(
+ labware_id="adapter-id", quirk="tiprackAdapterFor96Channel"
+ )
+ ).then_return(is_on_flex_adapter)
+
+ with expected_raise:
+ deck_conflict.check_safe_for_tip_pickup_and_return(
+ engine_state=mock_state_view,
+ pipette_id="pipette-id",
+ labware_id="labware-id",
+ )
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_instrument_core.py b/api/tests/opentrons/protocol_api/core/engine/test_instrument_core.py
index 333c39e0bfd..b7e77a44d63 100644
--- a/api/tests/opentrons/protocol_api/core/engine/test_instrument_core.py
+++ b/api/tests/opentrons/protocol_api/core/engine/test_instrument_core.py
@@ -1,5 +1,5 @@
"""Test for the ProtocolEngine-based instrument API core."""
-from typing import cast, Optional
+from typing import cast, Optional, Union
import pytest
from decoy import Decoy
@@ -8,6 +8,7 @@
from opentrons.hardware_control import SyncHardwareAPI
from opentrons.hardware_control.dev_types import PipetteDict
+from opentrons.hardware_control.nozzle_manager import NozzleConfigurationType
from opentrons.protocol_engine import (
DeckPoint,
LoadedPipette,
@@ -18,6 +19,7 @@
DropTipWellLocation,
DropTipWellOrigin,
)
+from opentrons.protocol_engine.clients.sync_client import SyncClient
from opentrons.protocol_engine.errors.exceptions import TipNotAttachedError
from opentrons.protocol_engine.clients import SyncClient as EngineClient
from opentrons.protocol_engine.types import (
@@ -27,11 +29,26 @@
RowNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
+ AddressableOffsetVector,
+)
+from opentrons.protocol_api.disposal_locations import (
+ TrashBin,
+ WasteChute,
+ DisposalOffset,
)
from opentrons.protocol_api._nozzle_layout import NozzleLayout
-from opentrons.protocol_api.core.engine import InstrumentCore, WellCore, ProtocolCore
+from opentrons.protocol_api.core.engine import (
+ InstrumentCore,
+ WellCore,
+ ProtocolCore,
+ deck_conflict,
+)
+from opentrons.protocols.api_support.definitions import MAX_SUPPORTED_VERSION
+from opentrons.protocols.api_support.types import APIVersion
from opentrons.types import Location, Mount, MountType, Point
+from ... import versions_below, versions_at_or_above
+
@pytest.fixture
def mock_engine_client(decoy: Decoy) -> EngineClient:
@@ -51,6 +68,15 @@ def mock_protocol_core(decoy: Decoy) -> ProtocolCore:
return decoy.mock(cls=ProtocolCore)
+@pytest.fixture(autouse=True)
+def patch_mock_pipette_movement_safety_check(
+ decoy: Decoy, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ """Replace deck_conflict.check() with a mock."""
+ mock = decoy.mock(func=deck_conflict.check_safe_for_pipette_movement)
+ monkeypatch.setattr(deck_conflict, "check_safe_for_pipette_movement", mock)
+
+
@pytest.fixture
def subject(
decoy: Decoy,
@@ -238,7 +264,21 @@ def test_pick_up_tip(
)
decoy.verify(
- mock_engine_client.pick_up_tip(
+ deck_conflict.check_safe_for_tip_pickup_and_return(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="labware-id",
+ ),
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="labware-id",
+ well_name="well-name",
+ well_location=WellLocation(
+ origin=WellOrigin.TOP, offset=WellOffset(x=3, y=2, z=1)
+ ),
+ ),
+ mock_engine_client.pick_up_tip_wait_for_recovery(
pipette_id="abc123",
labware_id="labware-id",
well_name="well-name",
@@ -276,6 +316,16 @@ def test_drop_tip_no_location(
subject.drop_tip(location=None, well_core=well_core, home_after=True)
decoy.verify(
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="labware-id",
+ well_name="well-name",
+ well_location=DropTipWellLocation(
+ origin=DropTipWellOrigin.DEFAULT,
+ offset=WellOffset(x=0, y=0, z=0),
+ ),
+ ),
mock_engine_client.drop_tip(
pipette_id="abc123",
labware_id="labware-id",
@@ -287,7 +337,6 @@ def test_drop_tip_no_location(
home_after=True,
alternateDropLocation=False,
),
- times=1,
)
@@ -309,10 +358,27 @@ def test_drop_tip_with_location(
absolute_point=Point(1, 2, 3),
)
).then_return(WellLocation(origin=WellOrigin.TOP, offset=WellOffset(x=3, y=2, z=1)))
+ decoy.when(mock_engine_client.state.labware.is_tiprack("labware-id")).then_return(
+ True
+ )
subject.drop_tip(location=location, well_core=well_core, home_after=True)
decoy.verify(
+ deck_conflict.check_safe_for_tip_pickup_and_return(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="labware-id",
+ ),
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="labware-id",
+ well_name="well-name",
+ well_location=DropTipWellLocation(
+ origin=DropTipWellOrigin.TOP, offset=WellOffset(x=3, y=2, z=1)
+ ),
+ ),
mock_engine_client.drop_tip(
pipette_id="abc123",
labware_id="labware-id",
@@ -323,7 +389,68 @@ def test_drop_tip_with_location(
home_after=True,
alternateDropLocation=False,
),
- times=1,
+ )
+
+
+def test_drop_tip_in_trash_bin(
+ decoy: Decoy, mock_engine_client: EngineClient, subject: InstrumentCore
+) -> None:
+ """It should move to the trash bin and drop the tip in place."""
+ trash_bin = decoy.mock(cls=TrashBin)
+
+ decoy.when(trash_bin.offset).then_return(DisposalOffset(x=1, y=2, z=3))
+ decoy.when(trash_bin.area_name).then_return("my tubular area")
+
+ subject.drop_tip_in_disposal_location(
+ trash_bin, home_after=True, alternate_tip_drop=True
+ )
+
+ decoy.verify(
+ mock_engine_client.move_to_addressable_area_for_drop_tip(
+ pipette_id="abc123",
+ addressable_area_name="my tubular area",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ force_direct=False,
+ speed=None,
+ minimum_z_height=None,
+ alternate_drop_location=True,
+ ignore_tip_configuration=True,
+ ),
+ mock_engine_client.drop_tip_in_place(
+ pipette_id="abc123",
+ home_after=True,
+ ),
+ )
+
+
+def test_drop_tip_in_waste_chute(
+ decoy: Decoy, mock_engine_client: EngineClient, subject: InstrumentCore
+) -> None:
+ """It should move to the trash bin and drop the tip in place."""
+ waste_chute = decoy.mock(cls=WasteChute)
+
+ decoy.when(waste_chute.offset).then_return(DisposalOffset(x=4, y=5, z=6))
+ decoy.when(
+ mock_engine_client.state.tips.get_pipette_channels("abc123")
+ ).then_return(96)
+
+ subject.drop_tip_in_disposal_location(
+ waste_chute, home_after=True, alternate_tip_drop=True
+ )
+
+ decoy.verify(
+ mock_engine_client.move_to_addressable_area(
+ pipette_id="abc123",
+ addressable_area_name="96ChannelWasteChute",
+ offset=AddressableOffsetVector(x=4, y=5, z=6),
+ force_direct=False,
+ speed=None,
+ minimum_z_height=None,
+ ),
+ mock_engine_client.drop_tip_in_place(
+ pipette_id="abc123",
+ home_after=True,
+ ),
)
@@ -356,6 +483,15 @@ def test_aspirate_from_well(
)
decoy.verify(
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="123abc",
+ well_name="my cool well",
+ well_location=WellLocation(
+ origin=WellOrigin.TOP, offset=WellOffset(x=3, y=2, z=1)
+ ),
+ ),
mock_engine_client.aspirate(
pipette_id="abc123",
labware_id="123abc",
@@ -453,6 +589,15 @@ def test_blow_out_to_well(
subject.blow_out(location=location, well_core=well_core, in_place=False)
decoy.verify(
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="123abc",
+ well_name="my cool well",
+ well_location=WellLocation(
+ origin=WellOrigin.TOP, offset=WellOffset(x=3, y=2, z=1)
+ ),
+ ),
mock_engine_client.blow_out(
pipette_id="abc123",
labware_id="123abc",
@@ -528,6 +673,8 @@ def test_dispense_to_well(
name="my cool well", labware_id="123abc", engine_client=mock_engine_client
)
+ decoy.when(mock_protocol_core.api_version).then_return(MAX_SUPPORTED_VERSION)
+
decoy.when(
mock_engine_client.state.geometry.get_relative_well_location(
labware_id="123abc", well_name="my cool well", absolute_point=Point(1, 2, 3)
@@ -545,6 +692,15 @@ def test_dispense_to_well(
)
decoy.verify(
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="123abc",
+ well_name="my cool well",
+ well_location=WellLocation(
+ origin=WellOrigin.TOP, offset=WellOffset(x=3, y=2, z=1)
+ ),
+ ),
mock_engine_client.dispense(
pipette_id="abc123",
labware_id="123abc",
@@ -567,6 +723,7 @@ def test_dispense_in_place(
subject: InstrumentCore,
) -> None:
"""It should dispense in place."""
+ decoy.when(mock_protocol_core.api_version).then_return(MAX_SUPPORTED_VERSION)
location = Location(point=Point(1, 2, 3), labware=None)
subject.dispense(
volume=12.34,
@@ -592,6 +749,7 @@ def test_dispense_to_coordinates(
subject: InstrumentCore,
) -> None:
"""It should dispense in place."""
+ decoy.when(mock_protocol_core.api_version).then_return(MAX_SUPPORTED_VERSION)
location = Location(point=Point(1, 2, 3), labware=None)
subject.dispense(
volume=12.34,
@@ -617,6 +775,51 @@ def test_dispense_to_coordinates(
)
+@pytest.mark.parametrize(
+ ("api_version", "expect_clampage"),
+ [(APIVersion(2, 16), True), (APIVersion(2, 17), False)],
+)
+def test_dispense_conditionally_clamps_volume(
+ api_version: APIVersion,
+ expect_clampage: bool,
+ decoy: Decoy,
+ subject: InstrumentCore,
+ mock_protocol_core: ProtocolCore,
+ mock_engine_client: SyncClient,
+) -> None:
+ """It should clamp the dispensed volume to the available volume on older API versions."""
+ decoy.when(mock_protocol_core.api_version).then_return(api_version)
+ decoy.when(
+ mock_engine_client.state.pipettes.get_aspirated_volume(subject.pipette_id)
+ ).then_return(111.111)
+
+ subject.dispense(
+ volume=99999999.99999999,
+ rate=5.6,
+ flow_rate=7.8,
+ well_core=None,
+ location=Location(point=Point(1, 2, 3), labware=None),
+ in_place=True,
+ push_out=None,
+ )
+
+ if expect_clampage:
+ decoy.verify(
+ mock_engine_client.dispense_in_place(
+ pipette_id="abc123", volume=111.111, flow_rate=7.8, push_out=None
+ ),
+ )
+ else:
+ decoy.verify(
+ mock_engine_client.dispense_in_place(
+ pipette_id="abc123",
+ volume=99999999.99999999,
+ flow_rate=7.8,
+ push_out=None,
+ ),
+ )
+
+
def test_initialization_sets_default_movement_speed(
decoy: Decoy,
subject: InstrumentCore,
@@ -857,6 +1060,15 @@ def test_touch_tip(
)
decoy.verify(
+ deck_conflict.check_safe_for_pipette_movement(
+ engine_state=mock_engine_client.state,
+ pipette_id="abc123",
+ labware_id="123abc",
+ well_name="my cool well",
+ well_location=WellLocation(
+ origin=WellOrigin.TOP, offset=WellOffset(x=0, y=0, z=4.56)
+ ),
+ ),
mock_engine_client.touch_tip(
pipette_id="abc123",
labware_id="123abc",
@@ -891,19 +1103,19 @@ def test_has_tip(
NozzleLayout.COLUMN,
"A1",
"H1",
- ColumnNozzleLayoutConfiguration(primary_nozzle="A1"),
+ ColumnNozzleLayoutConfiguration(primaryNozzle="A1"),
],
[
NozzleLayout.SINGLE,
"H12",
None,
- SingleNozzleLayoutConfiguration(primary_nozzle="H12"),
+ SingleNozzleLayoutConfiguration(primaryNozzle="H12"),
],
[
NozzleLayout.ROW,
"A12",
None,
- RowNozzleLayoutConfiguration(primary_nozzle="A12"),
+ RowNozzleLayoutConfiguration(primaryNozzle="A12"),
],
],
)
@@ -922,3 +1134,78 @@ def test_configure_nozzle_layout(
decoy.verify(
mock_engine_client.configure_nozzle_layout(subject._pipette_id, expected_model)
)
+
+
+@pytest.mark.parametrize(
+ argnames=["pipette_channels", "nozzle_layout", "primary_nozzle", "expected_result"],
+ argvalues=[
+ (96, NozzleConfigurationType.FULL, "A1", True),
+ (96, NozzleConfigurationType.FULL, None, True),
+ (96, NozzleConfigurationType.ROW, "A1", True),
+ (96, NozzleConfigurationType.COLUMN, "A1", True),
+ (96, NozzleConfigurationType.COLUMN, "A12", True),
+ (96, NozzleConfigurationType.SINGLE, "H12", True),
+ (96, NozzleConfigurationType.SINGLE, "A1", True),
+ (8, NozzleConfigurationType.FULL, "A1", True),
+ (8, NozzleConfigurationType.FULL, None, True),
+ (8, NozzleConfigurationType.SINGLE, "H1", True),
+ (8, NozzleConfigurationType.SINGLE, "A1", False),
+ (1, NozzleConfigurationType.FULL, None, True),
+ ],
+)
+def test_is_tip_tracking_available(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ subject: InstrumentCore,
+ pipette_channels: int,
+ nozzle_layout: NozzleConfigurationType,
+ primary_nozzle: Union[str, None],
+ expected_result: bool,
+) -> None:
+ """It should return whether tip tracking is available based on nozzle configuration."""
+ decoy.when(
+ mock_engine_client.state.tips.get_pipette_channels(subject.pipette_id)
+ ).then_return(pipette_channels)
+ decoy.when(
+ mock_engine_client.state.pipettes.get_nozzle_layout_type(subject.pipette_id)
+ ).then_return(nozzle_layout)
+ decoy.when(
+ mock_engine_client.state.pipettes.get_primary_nozzle(subject.pipette_id)
+ ).then_return(primary_nozzle)
+ assert subject.is_tip_tracking_available() == expected_result
+
+
+@pytest.mark.parametrize("version", versions_below(APIVersion(2, 19), flex_only=False))
+def test_configure_for_volume_pre_219(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ mock_protocol_core: ProtocolCore,
+ subject: InstrumentCore,
+ version: APIVersion,
+) -> None:
+ """Configure_for_volume should specify overlap version."""
+ decoy.when(mock_protocol_core.api_version).then_return(version)
+ subject.configure_for_volume(123.0)
+ decoy.verify(
+ mock_engine_client.configure_for_volume(
+ pipette_id=subject.pipette_id, volume=123.0, tip_overlap_version="v0"
+ )
+ )
+
+
+@pytest.mark.parametrize("version", versions_at_or_above(APIVersion(2, 19)))
+def test_configure_for_volume_post_219(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ mock_protocol_core: ProtocolCore,
+ subject: InstrumentCore,
+ version: APIVersion,
+) -> None:
+ """Configure_for_volume should specify overlap version."""
+ decoy.when(mock_protocol_core.api_version).then_return(version)
+ subject.configure_for_volume(123.0)
+ decoy.verify(
+ mock_engine_client.configure_for_volume(
+ pipette_id=subject.pipette_id, volume=123.0, tip_overlap_version="v1"
+ )
+ )
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_labware_core.py b/api/tests/opentrons/protocol_api/core/engine/test_labware_core.py
index cfd97644a22..e02b5e7b8f7 100644
--- a/api/tests/opentrons/protocol_api/core/engine/test_labware_core.py
+++ b/api/tests/opentrons/protocol_api/core/engine/test_labware_core.py
@@ -19,9 +19,15 @@
from opentrons.types import DeckSlotName, Point
from opentrons.protocol_engine.clients import SyncClient as EngineClient
from opentrons.protocol_engine.errors import LabwareNotOnDeckError
+from opentrons.protocol_engine.types import (
+ LabwareOffsetCreate,
+ LabwareOffsetLocation,
+ LabwareOffsetVector,
+)
from opentrons.protocol_api.core.labware import LabwareLoadParams
from opentrons.protocol_api.core.engine import LabwareCore, WellCore
+from opentrons.calibration_storage.helpers import uri_from_details
@pytest.fixture
@@ -36,11 +42,9 @@ def mock_engine_client(
) -> EngineClient:
"""Get a mock ProtocolEngine synchronous client."""
engine_client = decoy.mock(cls=EngineClient)
-
decoy.when(engine_client.state.labware.get_definition("cool-labware")).then_return(
labware_definition
)
-
return engine_client
@@ -67,9 +71,87 @@ def test_get_load_params(subject: LabwareCore) -> None:
assert subject.load_name == "world"
-def test_set_calibration(subject: LabwareCore) -> None:
- """It should raise if you attempt to set calibration."""
- with pytest.raises(NotImplementedError):
+@pytest.mark.parametrize(
+ "labware_definition",
+ [
+ LabwareDefinition.construct( # type: ignore[call-arg]
+ namespace="hello",
+ version=42,
+ parameters=LabwareDefinitionParameters.construct(loadName="world"), # type: ignore[call-arg]
+ ordering=[],
+ metadata=LabwareDefinitionMetadata.construct(displayName="what a cool labware"), # type: ignore[call-arg]
+ )
+ ],
+)
+def test_set_calibration_succeeds_in_ok_location(
+ decoy: Decoy,
+ subject: LabwareCore,
+ mock_engine_client: EngineClient,
+ labware_definition: LabwareDefinition,
+) -> None:
+ """It should pass along an AddLabwareOffset if possible."""
+ decoy.when(
+ mock_engine_client.state.labware.get_definition_uri("cool-labware")
+ ).then_return(
+ uri_from_details(
+ load_name=labware_definition.parameters.loadName,
+ namespace=labware_definition.namespace,
+ version=labware_definition.version,
+ )
+ )
+ decoy.when(
+ mock_engine_client.state.labware.get_display_name("cool-labware")
+ ).then_return("what a cool labware")
+ location = LabwareOffsetLocation(slotName=DeckSlotName.SLOT_C2)
+ decoy.when(
+ mock_engine_client.state.geometry.get_offset_location("cool-labware")
+ ).then_return(location)
+ subject.set_calibration(Point(1, 2, 3))
+ decoy.verify(
+ mock_engine_client.add_labware_offset(
+ LabwareOffsetCreate(
+ definitionUri="hello/world/42",
+ location=location,
+ vector=LabwareOffsetVector(x=1, y=2, z=3),
+ )
+ ),
+ mock_engine_client.reload_labware(
+ labware_id="cool-labware",
+ ),
+ )
+
+
+@pytest.mark.parametrize(
+ "labware_definition",
+ [
+ LabwareDefinition.construct( # type: ignore[call-arg]
+ namespace="hello",
+ version=42,
+ parameters=LabwareDefinitionParameters.construct(loadName="world"), # type: ignore[call-arg]
+ ordering=[],
+ )
+ ],
+)
+def test_set_calibration_fails_in_bad_location(
+ decoy: Decoy,
+ subject: LabwareCore,
+ mock_engine_client: EngineClient,
+ labware_definition: LabwareDefinition,
+) -> None:
+ """It should raise if you attempt to set calibration when the labware is not on deck."""
+ decoy.when(
+ mock_engine_client.state.labware.get_definition_uri("cool-labware")
+ ).then_return(
+ uri_from_details(
+ load_name=labware_definition.parameters.loadName,
+ namespace=labware_definition.namespace,
+ version=labware_definition.version,
+ )
+ )
+ decoy.when(
+ mock_engine_client.state.geometry.get_offset_location("cool-labware")
+ ).then_return(None)
+ with pytest.raises(LabwareNotOnDeckError):
subject.set_calibration(Point(1, 2, 3))
@@ -80,6 +162,10 @@ def test_set_calibration(subject: LabwareCore) -> None:
namespace="hello",
parameters=LabwareDefinitionParameters.construct(loadName="world"), # type: ignore[call-arg]
ordering=[],
+ allowedRoles=[],
+ stackingOffsetWithLabware={},
+ stackingOffsetWithModule={},
+ gripperOffsets={},
)
],
)
@@ -103,7 +189,7 @@ def test_get_definition(subject: LabwareCore) -> None:
def test_get_user_display_name(decoy: Decoy, mock_engine_client: EngineClient) -> None:
"""It should get the labware's user-provided label, if any."""
decoy.when(
- mock_engine_client.state.labware.get_display_name("cool-labware")
+ mock_engine_client.state.labware.get_user_specified_display_name("cool-labware")
).then_return("Cool Label")
subject = LabwareCore(labware_id="cool-labware", engine_client=mock_engine_client)
@@ -149,7 +235,7 @@ def test_get_name_load_name(subject: LabwareCore) -> None:
def test_get_name_display_name(decoy: Decoy, mock_engine_client: EngineClient) -> None:
"""It should get the user display name when one is defined."""
decoy.when(
- mock_engine_client.state.labware.get_display_name("cool-labware")
+ mock_engine_client.state.labware.get_user_specified_display_name("cool-labware")
).then_return("my cool display name")
subject = LabwareCore(labware_id="cool-labware", engine_client=mock_engine_client)
@@ -245,13 +331,16 @@ def test_get_next_tip(
labware_id="cool-labware",
num_tips=8,
starting_tip_name="B1",
+ nozzle_map=None,
)
).then_return("A2")
starting_tip = WellCore(
name="B1", labware_id="cool-labware", engine_client=mock_engine_client
)
- result = subject.get_next_tip(num_tips=8, starting_tip=starting_tip)
+ result = subject.get_next_tip(
+ num_tips=8, starting_tip=starting_tip, nozzle_map=None
+ )
assert result == "A2"
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_overlap_versions.py b/api/tests/opentrons/protocol_api/core/engine/test_overlap_versions.py
new file mode 100644
index 00000000000..9d41a431026
--- /dev/null
+++ b/api/tests/opentrons/protocol_api/core/engine/test_overlap_versions.py
@@ -0,0 +1,26 @@
+"""Test the tip overlap selection logic in the API core."""
+import pytest
+
+from opentrons.protocol_api.core.engine.overlap_versions import overlap_for_api_version
+from opentrons.protocols.api_support.types import APIVersion
+
+from ... import versions_below, versions_at_or_above
+
+
+@pytest.mark.parametrize(
+ "api_version", versions_below(APIVersion(2, 19), flex_only=False)
+)
+def test_all_below_219_use_v0(api_version: APIVersion) -> None:
+ """Versions below 2.19 should use v0."""
+ assert overlap_for_api_version(api_version) == "v0"
+
+
+@pytest.mark.parametrize("api_version", versions_at_or_above(APIVersion(2, 19)))
+def test_all_above_219_use_v1(api_version: APIVersion) -> None:
+ """Versions above 2.19 should use v1."""
+ assert overlap_for_api_version(api_version) == "v1"
+
+
+def test_future_api_version_uses_v1() -> None:
+ """Future versions should use v1."""
+ assert overlap_for_api_version(APIVersion(2, 99)) == "v1"
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_point_calculations.py b/api/tests/opentrons/protocol_api/core/engine/test_point_calculations.py
index f8c77ad5ef6..2c28fa84cb2 100644
--- a/api/tests/opentrons/protocol_api/core/engine/test_point_calculations.py
+++ b/api/tests/opentrons/protocol_api/core/engine/test_point_calculations.py
@@ -1,4 +1,8 @@
"""Tests for Protocol API point calculation."""
+from typing import Tuple
+
+import pytest
+
from opentrons.types import Point
from opentrons.protocol_api.core.engine import point_calculations as subject
@@ -15,3 +19,51 @@ def test_get_relative_offset() -> None:
)
assert result == Point(2.0, 3.0, 4.0)
+
+
+@pytest.mark.parametrize(
+ argnames=["rect1", "rect2", "expected_result"],
+ argvalues=[
+ ( # One rectangle inside the other
+ (Point(236, 170, 0), Point(391, 375, 0)),
+ (Point(237, 250, 0), Point(388, 294, 0)),
+ True,
+ ),
+ ( # One rectangle inside the other
+ (Point(237, 250, 0), Point(388, 294, 0)),
+ (Point(236, 170, 0), Point(391, 375, 0)),
+ True,
+ ),
+ ( # Two non-overlapping rectangles
+ (Point(236, 170, 0), Point(391, 375, 0)),
+ (Point(438, 216, 100), Point(937, 306, 200)),
+ False,
+ ),
+ ( # Two non-overlapping rectangles in 2nd quadrant
+ (Point(-438, 216, 100), Point(-937, 306, 200)),
+ (Point(-236, 170, 0), Point(-391, 375, 0)),
+ False,
+ ),
+ ( # Overlapping rectangles with one corner of each rectangle overlapping
+ (Point(719, 304, 20), Point(970, 370, 20)),
+ (Point(438, 216, 100), Point(937, 306, 200)),
+ True,
+ ),
+ ( # Overlapping rectangles with no overlapping corners
+ # (think two rectangles making a '+' sign)
+ (Point(630, 94, 20), Point(800, 500, 20)),
+ (Point(438, 216, 100), Point(937, 306, 200)),
+ True,
+ ),
+ ],
+)
+def test_are_overlapping_rectangles(
+ rect1: Tuple[Point, Point],
+ rect2: Tuple[Point, Point],
+ expected_result: bool,
+) -> None:
+ """It should calculate correctly whether the rectangles are overlapping."""
+ assert (
+ subject.are_overlapping_rectangles(rectangle1=rect1, rectangle2=rect2)
+ == expected_result
+ )
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_protocol_core.py b/api/tests/opentrons/protocol_api/core/engine/test_protocol_core.py
index 749f6cc4f60..dfe29bc10d0 100644
--- a/api/tests/opentrons/protocol_api/core/engine/test_protocol_core.py
+++ b/api/tests/opentrons/protocol_api/core/engine/test_protocol_core.py
@@ -3,11 +3,14 @@
from typing import Optional, Type, cast, Tuple
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from decoy import Decoy
from opentrons_shared_data.deck import load as load_deck
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4, SlotDefV3
+from opentrons_shared_data.deck.dev_types import (
+ DeckDefinitionV5,
+ SlotDefV3,
+)
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.labware.dev_types import (
LabwareDefinition as LabwareDefDict,
@@ -16,7 +19,7 @@
from opentrons_shared_data.labware.labware_definition import LabwareDefinition
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons.types import DeckSlotName, Mount, MountType, Point
+from opentrons.types import DeckSlotName, StagingSlotName, Mount, MountType, Point
from opentrons.protocol_api import OFF_DECK
from opentrons.hardware_control import SyncHardwareAPI, SynchronousAdapter
from opentrons.hardware_control.modules import AbstractModule, ModuleType
@@ -33,6 +36,7 @@
DeckSlotLocation,
ModuleLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
ModuleDefinition,
LabwareMovementStrategy,
LoadedLabware,
@@ -65,6 +69,7 @@
load_labware_params,
)
from opentrons.protocol_api._liquid import Liquid
+from opentrons.protocol_api.disposal_locations import TrashBin, WasteChute
from opentrons.protocol_api.core.engine.exceptions import InvalidModuleLocationError
from opentrons.protocol_api.core.engine.module_core import (
TemperatureModuleCore,
@@ -81,17 +86,19 @@
STANDARD_OT3_DECK,
)
+from ... import versions_below, versions_at_or_above
+
@pytest.fixture(scope="session")
-def ot2_standard_deck_def() -> DeckDefinitionV4:
+def ot2_standard_deck_def() -> DeckDefinitionV5:
"""Get the OT-2 standard deck definition."""
- return load_deck(STANDARD_OT2_DECK, 4)
+ return load_deck(STANDARD_OT2_DECK, 5)
@pytest.fixture(scope="session")
-def ot3_standard_deck_def() -> DeckDefinitionV4:
+def ot3_standard_deck_def() -> DeckDefinitionV5:
"""Get the OT-2 standard deck definition."""
- return load_deck(STANDARD_OT3_DECK, 4)
+ return load_deck(STANDARD_OT3_DECK, 5)
@pytest.fixture(autouse=True)
@@ -144,6 +151,8 @@ def mock_sync_hardware_api(decoy: Decoy) -> SyncHardwareAPI:
@pytest.fixture
+# APIv2.15 because we're expecting a fixed trash.
+@pytest.mark.parametrize("api_version", [APIVersion(2, 15)])
def subject(
decoy: Decoy,
mock_engine_client: EngineClient,
@@ -175,15 +184,34 @@ def test_api_version(
assert subject.api_version == api_version
-# def test_get_slot_definition(ot2_standard_deck_def: DeckDefinitionV3, subject: ProtocolCore, decoy: Decoy) -> None:
-# """It should return a deck slot's definition."""
-# decoy.when(subject._engine_client.state.labware.get_slot_definition(5))
-# result = subject.get_slot_definition(DeckSlotName.SLOT_6)
-#
-# assert result["id"] == "6"
-# assert result == ot2_standard_deck_def["locations"]["orderedSlots"][5]
+def test_get_slot_definition(
+ ot2_standard_deck_def: DeckDefinitionV5, subject: ProtocolCore, decoy: Decoy
+) -> None:
+ """It should return a deck slot's definition."""
+ expected_slot_def = cast(
+ SlotDefV3,
+ {
+ "id": "abc",
+ "position": [1, 2, 3],
+ "boundingBox": {
+ "xDimension": 4,
+ "yDimension": 5,
+ "zDimension": 6,
+ },
+ "displayName": "xyz",
+ "compatibleModuleTypes": [],
+ },
+ )
+ decoy.when(
+ subject._engine_client.state.addressable_areas.get_slot_definition(
+ DeckSlotName.SLOT_6.id
+ )
+ ).then_return(expected_slot_def)
+ assert subject.get_slot_definition(DeckSlotName.SLOT_6) == expected_slot_def
+# APIv2.15 because we're expecting a fixed trash.
+@pytest.mark.parametrize("api_version", [APIVersion(2, 15)])
def test_fixed_trash(subject: ProtocolCore) -> None:
"""It should have a single labware core for the fixed trash."""
result = subject.fixed_trash
@@ -203,15 +231,16 @@ def test_get_slot_item_empty(
decoy.when(
mock_engine_client.state.geometry.get_slot_item(
slot_name=DeckSlotName.SLOT_1,
- allowed_labware_ids={"fixed-trash-123"},
- allowed_module_ids=set(),
)
).then_return(None)
assert subject.get_slot_item(DeckSlotName.SLOT_1) is None
-def test_load_instrument(
+@pytest.mark.parametrize(
+ "api_version", versions_below(APIVersion(2, 19), flex_only=False)
+)
+def test_load_instrument_pre_219(
decoy: Decoy,
mock_sync_hardware_api: SyncHardwareAPI,
mock_engine_client: EngineClient,
@@ -220,7 +249,43 @@ def test_load_instrument(
"""It should issue a LoadPipette command."""
decoy.when(
mock_engine_client.load_pipette(
- pipette_name=PipetteNameType.P300_SINGLE, mount=MountType.LEFT
+ pipette_name=PipetteNameType.P300_SINGLE,
+ mount=MountType.LEFT,
+ tip_overlap_version="v0",
+ )
+ ).then_return(commands.LoadPipetteResult(pipetteId="cool-pipette"))
+
+ decoy.when(
+ mock_engine_client.state.pipettes.get_flow_rates("cool-pipette")
+ ).then_return(
+ FlowRates(
+ default_aspirate={"1.1": 22},
+ default_dispense={"3.3": 44},
+ default_blow_out={"5.5": 66},
+ ),
+ )
+
+ result = subject.load_instrument(
+ instrument_name=PipetteNameType.P300_SINGLE, mount=Mount.LEFT
+ )
+
+ assert isinstance(result, InstrumentCore)
+ assert result.pipette_id == "cool-pipette"
+
+
+@pytest.mark.parametrize("api_version", versions_at_or_above(APIVersion(2, 19)))
+def test_load_instrument_post_219(
+ decoy: Decoy,
+ mock_sync_hardware_api: SyncHardwareAPI,
+ mock_engine_client: EngineClient,
+ subject: ProtocolCore,
+) -> None:
+ """It should issue a LoadPipette command."""
+ decoy.when(
+ mock_engine_client.load_pipette(
+ pipette_name=PipetteNameType.P300_SINGLE,
+ mount=MountType.LEFT,
+ tip_overlap_version="v1",
)
).then_return(commands.LoadPipetteResult(pipetteId="cool-pipette"))
@@ -291,13 +356,14 @@ def test_load_labware(
assert isinstance(result, LabwareCore)
assert result.labware_id == "abc123"
- assert subject.get_labware_cores() == [subject.fixed_trash, result]
+ assert subject.get_labware_cores() == [result]
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="abc123",
)
)
@@ -305,8 +371,6 @@ def test_load_labware(
decoy.when(
mock_engine_client.state.geometry.get_slot_item(
slot_name=DeckSlotName.SLOT_5,
- allowed_labware_ids={"fixed-trash-123", "abc123"},
- allowed_module_ids=set(),
)
).then_return(
LoadedLabware.construct(id="abc123") # type: ignore[call-arg]
@@ -315,6 +379,78 @@ def test_load_labware(
assert subject.get_slot_item(DeckSlotName.SLOT_5) is result
+def test_load_labware_on_staging_slot(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ subject: ProtocolCore,
+) -> None:
+ """It should issue a LoadLabware command for a labware on a staging slot."""
+ decoy.when(
+ mock_engine_client.state.labware.find_custom_labware_load_params()
+ ).then_return([EngineLabwareLoadParams("hello", "world", 654)])
+
+ decoy.when(
+ load_labware_params.resolve(
+ "some_labware",
+ "a_namespace",
+ 456,
+ [EngineLabwareLoadParams("hello", "world", 654)],
+ )
+ ).then_return(("some_namespace", 9001))
+
+ decoy.when(
+ mock_engine_client.load_labware(
+ location=AddressableAreaLocation(addressableAreaName="B4"),
+ load_name="some_labware",
+ display_name="some_display_name",
+ namespace="some_namespace",
+ version=9001,
+ )
+ ).then_return(
+ commands.LoadLabwareResult(
+ labwareId="abc123",
+ definition=LabwareDefinition.construct(), # type: ignore[call-arg]
+ offsetId=None,
+ )
+ )
+
+ decoy.when(mock_engine_client.state.labware.get_definition("abc123")).then_return(
+ LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]
+ )
+
+ result = subject.load_labware(
+ load_name="some_labware",
+ location=StagingSlotName.SLOT_B4,
+ label="some_display_name", # maps to optional display name
+ namespace="a_namespace",
+ version=456,
+ )
+
+ assert isinstance(result, LabwareCore)
+ assert result.labware_id == "abc123"
+ assert subject.get_labware_cores() == [result]
+
+ decoy.verify(
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[],
+ new_labware_id="abc123",
+ )
+ )
+
+ decoy.when(
+ mock_engine_client.state.geometry.get_slot_item(
+ slot_name=StagingSlotName.SLOT_B4,
+ )
+ ).then_return(
+ LoadedLabware.construct(id="abc123") # type: ignore[call-arg]
+ )
+
+ assert subject.get_slot_item(StagingSlotName.SLOT_B4) is result
+
+
def test_load_labware_on_labware(
decoy: Decoy,
mock_engine_client: EngineClient,
@@ -371,13 +507,14 @@ def test_load_labware_on_labware(
assert isinstance(result, LabwareCore)
assert result.labware_id == "abc123"
- assert subject.get_labware_cores() == [subject.fixed_trash, result]
+ assert subject.get_labware_cores() == [result]
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="abc123",
)
)
@@ -434,13 +571,14 @@ def test_load_labware_off_deck(
assert isinstance(result, LabwareCore)
assert result.labware_id == "abc123"
- assert subject.get_labware_cores() == [subject.fixed_trash, result]
+ assert subject.get_labware_cores() == [result]
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="abc123",
)
)
@@ -493,13 +631,14 @@ def test_load_adapter(
assert isinstance(result, LabwareCore)
assert result.labware_id == "abc123"
- assert subject.get_labware_cores() == [subject.fixed_trash, result]
+ assert subject.get_labware_cores() == [result]
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="abc123",
)
)
@@ -507,8 +646,6 @@ def test_load_adapter(
decoy.when(
mock_engine_client.state.geometry.get_slot_item(
slot_name=DeckSlotName.SLOT_5,
- allowed_labware_ids={"fixed-trash-123", "abc123"},
- allowed_module_ids=set(),
)
).then_return(
LoadedLabware.construct(id="abc123") # type: ignore[call-arg]
@@ -517,6 +654,150 @@ def test_load_adapter(
assert subject.get_slot_item(DeckSlotName.SLOT_5) is result
+def test_load_adapter_on_staging_slot(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ subject: ProtocolCore,
+) -> None:
+ """It should issue a LoadLabware command for an adapter."""
+ decoy.when(
+ mock_engine_client.state.labware.find_custom_labware_load_params()
+ ).then_return([EngineLabwareLoadParams("hello", "world", 654)])
+
+ decoy.when(
+ load_labware_params.resolve(
+ "some_adapter",
+ "a_namespace",
+ 456,
+ [EngineLabwareLoadParams("hello", "world", 654)],
+ )
+ ).then_return(("some_namespace", 9001))
+
+ decoy.when(
+ mock_engine_client.load_labware(
+ location=AddressableAreaLocation(addressableAreaName="B4"),
+ load_name="some_adapter",
+ namespace="some_namespace",
+ version=9001,
+ )
+ ).then_return(
+ commands.LoadLabwareResult(
+ labwareId="abc123",
+ definition=LabwareDefinition.construct(), # type: ignore[call-arg]
+ offsetId=None,
+ )
+ )
+
+ decoy.when(mock_engine_client.state.labware.get_definition("abc123")).then_return(
+ LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]
+ )
+
+ result = subject.load_adapter(
+ load_name="some_adapter",
+ location=StagingSlotName.SLOT_B4,
+ namespace="a_namespace",
+ version=456,
+ )
+
+ assert isinstance(result, LabwareCore)
+ assert result.labware_id == "abc123"
+ assert subject.get_labware_cores() == [result]
+
+ decoy.verify(
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[],
+ new_labware_id="abc123",
+ )
+ )
+
+ decoy.when(
+ mock_engine_client.state.geometry.get_slot_item(
+ slot_name=StagingSlotName.SLOT_B4,
+ )
+ ).then_return(
+ LoadedLabware.construct(id="abc123") # type: ignore[call-arg]
+ )
+
+ assert subject.get_slot_item(StagingSlotName.SLOT_B4) is result
+
+
+def test_load_trash_bin(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ subject: ProtocolCore,
+) -> None:
+ """It should load a trash bin."""
+ prior_disposal_locations = subject.get_disposal_locations()
+ trash = subject.load_trash_bin(
+ slot_name=DeckSlotName.SLOT_D2, area_name="my trendy area"
+ )
+ assert isinstance(trash, TrashBin)
+ decoy.verify(
+ mock_engine_client.state.addressable_areas.raise_if_area_not_in_deck_configuration(
+ "my trendy area"
+ ),
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ new_trash_bin=trash,
+ existing_disposal_locations=prior_disposal_locations,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ ),
+ mock_engine_client.add_addressable_area("my trendy area"),
+ )
+
+ assert trash in subject.get_disposal_locations()
+
+
+def test_load_ot2_fixed_trash_bin(
+ decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore
+) -> None:
+ """It should load a fixed trash bin for the OT-2."""
+ prior_disposal_locations = subject.get_disposal_locations()
+ subject.load_ot2_fixed_trash_bin()
+ fixed_trash = subject.get_disposal_locations()[-1]
+ assert isinstance(fixed_trash, TrashBin)
+ assert fixed_trash.area_name == "fixedTrash"
+ decoy.verify(
+ mock_engine_client.state.addressable_areas.raise_if_area_not_in_deck_configuration(
+ "fixedTrash"
+ ),
+ times=0,
+ )
+ decoy.verify(
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ new_trash_bin=fixed_trash,
+ existing_disposal_locations=prior_disposal_locations,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ ),
+ times=0,
+ )
+ decoy.verify(mock_engine_client.add_addressable_area("fixedTrash"), times=0)
+
+
+def test_load_waste_chute(
+ decoy: Decoy,
+ mock_engine_client: EngineClient,
+ subject: ProtocolCore,
+) -> None:
+ """It should load a waste chute."""
+ waste_chute = subject.load_waste_chute()
+ assert isinstance(waste_chute, WasteChute)
+ decoy.verify(
+ mock_engine_client.state.addressable_areas.raise_if_area_not_in_deck_configuration(
+ "1ChannelWasteChute"
+ ),
+ mock_engine_client.add_addressable_area("1ChannelWasteChute"),
+ )
+
+ assert waste_chute in subject.get_disposal_locations()
+
+
@pytest.mark.parametrize(
argnames=["use_gripper", "pause_for_manual_move", "expected_strategy"],
argvalues=[
@@ -568,7 +849,53 @@ def test_move_labware(
if pick_up_offset
else None,
drop_offset=LabwareOffsetVector(x=4, y=5, z=6) if drop_offset else None,
- )
+ ),
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[],
+ new_labware_id="labware-id",
+ ),
+ )
+
+
+def test_move_labware_on_staging_slot(
+ decoy: Decoy,
+ subject: ProtocolCore,
+ mock_engine_client: EngineClient,
+ api_version: APIVersion,
+) -> None:
+ """It should issue a move labware command to the engine."""
+ decoy.when(
+ mock_engine_client.state.labware.get_definition("labware-id")
+ ).then_return(
+ LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]
+ )
+ labware = LabwareCore(labware_id="labware-id", engine_client=mock_engine_client)
+ subject.move_labware(
+ labware_core=labware,
+ new_location=StagingSlotName.SLOT_B4,
+ use_gripper=False,
+ pause_for_manual_move=True,
+ pick_up_offset=None,
+ drop_offset=None,
+ )
+ decoy.verify(
+ mock_engine_client.move_labware(
+ labware_id="labware-id",
+ new_location=AddressableAreaLocation(addressableAreaName="B4"),
+ strategy=LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE,
+ pick_up_offset=None,
+ drop_offset=None,
+ ),
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[],
+ new_labware_id="labware-id",
+ ),
)
@@ -605,7 +932,14 @@ def test_move_labware_on_non_connected_module(
strategy=LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE,
pick_up_offset=None,
drop_offset=None,
- )
+ ),
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[],
+ new_labware_id="labware-id",
+ ),
)
@@ -638,11 +972,17 @@ def test_move_labware_off_deck(
strategy=LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE,
pick_up_offset=None,
drop_offset=None,
- )
+ ),
+ deck_conflict.check(
+ engine_state=mock_engine_client.state,
+ existing_labware_ids=[],
+ existing_module_ids=[],
+ existing_disposal_locations=[],
+ new_labware_id="labware-id",
+ ),
)
-@pytest.mark.parametrize("api_version", [APIVersion(2, 3)])
def test_load_labware_on_module(
decoy: Decoy,
mock_engine_client: EngineClient,
@@ -705,8 +1045,9 @@ def test_load_labware_on_module(
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="abc123",
)
)
@@ -778,8 +1119,9 @@ def test_load_labware_on_non_connected_module(
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_labware_id="abc123",
)
)
@@ -856,7 +1198,7 @@ def test_add_labware_definition(
EngineModuleModel.THERMOCYCLER_MODULE_V2,
ThermocyclerModuleCore,
lazy_fixture("ot3_standard_deck_def"),
- DeckSlotName.SLOT_A1,
+ DeckSlotName.SLOT_B1,
"OT-3 Standard",
),
(
@@ -869,6 +1211,8 @@ def test_add_labware_definition(
),
],
)
+# APIv2.15 because we're expecting a fixed trash.
+@pytest.mark.parametrize("api_version", [APIVersion(2, 15)])
def test_load_module(
decoy: Decoy,
mock_engine_client: EngineClient,
@@ -877,7 +1221,7 @@ def test_load_module(
engine_model: EngineModuleModel,
expected_core_cls: Type[ModuleCore],
subject: ProtocolCore,
- deck_def: DeckDefinitionV4,
+ deck_def: DeckDefinitionV5,
slot_name: DeckSlotName,
robot_type: RobotType,
) -> None:
@@ -893,12 +1237,22 @@ def test_load_module(
[mock_hw_mod_1, mock_hw_mod_2]
)
- decoy.when(subject.get_slot_definition(slot_name)).then_return(
- cast(
- SlotDefV3,
- {"compatibleModuleTypes": [ModuleType.from_model(requested_model)]},
+ if robot_type == "OT-2 Standard":
+ decoy.when(subject.get_slot_definition(slot_name)).then_return(
+ cast(
+ SlotDefV3,
+ {"compatibleModuleTypes": [ModuleType.from_model(requested_model)]},
+ )
)
- )
+ else:
+ decoy.when(
+ mock_engine_client.state.addressable_areas.state.deck_definition
+ ).then_return(deck_def)
+ decoy.when(
+ mock_engine_client.state.addressable_areas.get_cutout_id_by_deck_slot_name(
+ slot_name
+ )
+ ).then_return("cutout" + slot_name.value)
decoy.when(mock_engine_client.state.config.robot_type).then_return(robot_type)
@@ -931,6 +1285,7 @@ def test_load_module(
engine_state=mock_engine_client.state,
existing_labware_ids=["fixed-trash-123"],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_module_id="abc123",
)
)
@@ -938,8 +1293,6 @@ def test_load_module(
decoy.when(
mock_engine_client.state.geometry.get_slot_item(
slot_name=slot_name,
- allowed_labware_ids={"fixed-trash-123"},
- allowed_module_ids={"abc123"},
)
).then_return(
LoadedModule.construct(id="abc123") # type: ignore[call-arg]
@@ -952,103 +1305,14 @@ def test_load_module(
assert subject.get_labware_on_module(result) is None
-@pytest.mark.parametrize(
- (
- "requested_model",
- "engine_model",
- "expected_core_cls",
- "deck_def",
- "slot_name",
- "robot_type",
- ),
- [
- (
- TemperatureModuleModel.TEMPERATURE_V2,
- EngineModuleModel.TEMPERATURE_MODULE_V2,
- TemperatureModuleCore,
- lazy_fixture("ot3_standard_deck_def"),
- DeckSlotName.SLOT_D2,
- "OT-3 Standard",
- ),
- (
- MagneticModuleModel.MAGNETIC_V2,
- EngineModuleModel.MAGNETIC_MODULE_V2,
- MagneticModuleCore,
- lazy_fixture("ot3_standard_deck_def"),
- DeckSlotName.SLOT_A2,
- "OT-3 Standard",
- ),
- (
- ThermocyclerModuleModel.THERMOCYCLER_V1,
- EngineModuleModel.THERMOCYCLER_MODULE_V1,
- ThermocyclerModuleCore,
- lazy_fixture("ot2_standard_deck_def"),
- DeckSlotName.SLOT_1,
- "OT-2 Standard",
- ),
- (
- ThermocyclerModuleModel.THERMOCYCLER_V2,
- EngineModuleModel.THERMOCYCLER_MODULE_V2,
- ThermocyclerModuleCore,
- lazy_fixture("ot3_standard_deck_def"),
- DeckSlotName.SLOT_A2,
- "OT-3 Standard",
- ),
- (
- HeaterShakerModuleModel.HEATER_SHAKER_V1,
- EngineModuleModel.HEATER_SHAKER_MODULE_V1,
- HeaterShakerModuleCore,
- lazy_fixture("ot3_standard_deck_def"),
- DeckSlotName.SLOT_A2,
- "OT-3 Standard",
- ),
- ],
-)
-def test_load_module_raises_wrong_location(
- decoy: Decoy,
- mock_engine_client: EngineClient,
- mock_sync_hardware_api: SyncHardwareAPI,
- requested_model: ModuleModel,
- engine_model: EngineModuleModel,
- expected_core_cls: Type[ModuleCore],
- subject: ProtocolCore,
- deck_def: DeckDefinitionV4,
- slot_name: DeckSlotName,
- robot_type: RobotType,
-) -> None:
- """It should issue a load module engine command."""
- mock_hw_mod_1 = decoy.mock(cls=AbstractModule)
- mock_hw_mod_2 = decoy.mock(cls=AbstractModule)
-
- decoy.when(mock_hw_mod_1.device_info).then_return({"serial": "abc123"})
- decoy.when(mock_hw_mod_2.device_info).then_return({"serial": "xyz789"})
- decoy.when(mock_sync_hardware_api.attached_modules).then_return(
- [mock_hw_mod_1, mock_hw_mod_2]
- )
-
- decoy.when(mock_engine_client.state.config.robot_type).then_return(robot_type)
-
- decoy.when(subject.get_slot_definition(slot_name)).then_return(
- cast(SlotDefV3, {"compatibleModuleTypes": []})
- )
-
- with pytest.raises(
- ValueError,
- match=f"A {ModuleType.from_model(requested_model).value} cannot be loaded into slot {slot_name}",
- ):
- subject.load_module(
- model=requested_model,
- deck_slot=slot_name,
- configuration=None,
- )
-
-
+# APIv2.15 because we're expecting a fixed trash.
+@pytest.mark.parametrize("api_version", [APIVersion(2, 15)])
def test_load_mag_block(
decoy: Decoy,
mock_engine_client: EngineClient,
mock_sync_hardware_api: SyncHardwareAPI,
subject: ProtocolCore,
- ot3_standard_deck_def: DeckDefinitionV4,
+ ot3_standard_deck_def: DeckDefinitionV5,
) -> None:
"""It should issue a load module engine command."""
definition = ModuleDefinition.construct() # type: ignore[call-arg]
@@ -1065,6 +1329,14 @@ def test_load_mag_block(
},
)
)
+ decoy.when(
+ mock_engine_client.state.addressable_areas.state.deck_definition
+ ).then_return(ot3_standard_deck_def)
+ decoy.when(
+ mock_engine_client.state.addressable_areas.get_cutout_id_by_deck_slot_name(
+ DeckSlotName.SLOT_A2
+ )
+ ).then_return("cutout" + DeckSlotName.SLOT_A2.value)
decoy.when(
mock_engine_client.load_module(
@@ -1095,6 +1367,7 @@ def test_load_mag_block(
engine_state=mock_engine_client.state,
existing_labware_ids=["fixed-trash-123"],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_module_id="abc123",
)
)
@@ -1102,8 +1375,6 @@ def test_load_mag_block(
decoy.when(
mock_engine_client.state.geometry.get_slot_item(
slot_name=DeckSlotName.SLOT_1,
- allowed_labware_ids={"fixed-trash-123"},
- allowed_module_ids={"abc123"},
)
).then_return(
LoadedModule.construct(id="abc123") # type: ignore[call-arg]
@@ -1140,7 +1411,7 @@ def test_load_module_thermocycler_with_no_location(
requested_model: ModuleModel,
engine_model: EngineModuleModel,
subject: ProtocolCore,
- deck_def: DeckDefinitionV4,
+ deck_def: DeckDefinitionV5,
expected_slot: DeckSlotName,
) -> None:
"""It should issue a load module engine command with location at 7."""
@@ -1150,12 +1421,14 @@ def test_load_module_thermocycler_with_no_location(
decoy.when(mock_hw_mod.device_info).then_return({"serial": "xyz789"})
decoy.when(mock_sync_hardware_api.attached_modules).then_return([mock_hw_mod])
decoy.when(mock_engine_client.state.config.robot_type).then_return("OT-3 Standard")
- decoy.when(subject.get_slot_definition(expected_slot)).then_return(
- cast(
- SlotDefV3,
- {"compatibleModuleTypes": [ModuleType.from_model(requested_model)]},
+ decoy.when(
+ mock_engine_client.state.addressable_areas.state.deck_definition
+ ).then_return(deck_def)
+ decoy.when(
+ mock_engine_client.state.addressable_areas.get_cutout_id_by_deck_slot_name(
+ expected_slot
)
- )
+ ).then_return("cutout" + expected_slot.value)
decoy.when(
mock_engine_client.load_module(
@@ -1180,8 +1453,9 @@ def test_load_module_thermocycler_with_no_location(
decoy.verify(
deck_conflict.check(
engine_state=mock_engine_client.state,
- existing_labware_ids=["fixed-trash-123"],
+ existing_labware_ids=[],
existing_module_ids=[],
+ existing_disposal_locations=[],
new_module_id="abc123",
)
)
@@ -1289,7 +1563,7 @@ def test_get_deck_definition(
decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore
) -> None:
"""It should return the loaded deck definition from engine state."""
- deck_definition = cast(DeckDefinitionV4, {"schemaVersion": "4"})
+ deck_definition = cast(DeckDefinitionV5, {"schemaVersion": "5"})
decoy.when(mock_engine_client.state.labware.get_deck_definition()).then_return(
deck_definition
@@ -1367,7 +1641,9 @@ def test_get_slot_center(
) -> None:
"""It should return a slot center from engine state."""
decoy.when(
- mock_engine_client.state.labware.get_slot_center_position(DeckSlotName.SLOT_2)
+ mock_engine_client.state.addressable_areas.get_addressable_area_center(
+ DeckSlotName.SLOT_2.id
+ )
).then_return(Point(1, 2, 3))
result = subject.get_slot_center(DeckSlotName.SLOT_2)
@@ -1380,7 +1656,7 @@ def test_get_highest_z(
) -> None:
"""It should return a slot center from engine state."""
decoy.when(
- mock_engine_client.state.geometry.get_all_labware_highest_z()
+ mock_engine_client.state.geometry.get_all_obstacle_highest_z()
).then_return(9001)
result = subject.get_highest_z()
diff --git a/api/tests/opentrons/protocol_api/core/engine/test_stringify.py b/api/tests/opentrons/protocol_api/core/engine/test_stringify.py
index 2ba44a36b97..4ccc8e5f9ba 100644
--- a/api/tests/opentrons/protocol_api/core/engine/test_stringify.py
+++ b/api/tests/opentrons/protocol_api/core/engine/test_stringify.py
@@ -33,9 +33,9 @@ def _make_dummy_module_definition(decoy: Decoy, display_name: str) -> ModuleDefi
def test_well_on_labware_without_user_display_name(decoy: Decoy) -> None:
"""Test stringifying a well on a labware that doesn't have a user-defined label."""
mock_client = decoy.mock(cls=SyncClient)
- decoy.when(mock_client.state.labware.get_display_name("labware-id")).then_return(
- None
- )
+ decoy.when(
+ mock_client.state.labware.get_user_specified_display_name("labware-id")
+ ).then_return(None)
decoy.when(mock_client.state.labware.get_definition("labware-id")).then_return(
_make_dummy_labware_definition(decoy, "definition-display-name")
)
@@ -52,9 +52,9 @@ def test_well_on_labware_without_user_display_name(decoy: Decoy) -> None:
def test_well_on_labware_with_user_display_name(decoy: Decoy) -> None:
"""Test stringifying a well on a labware that does have a user-defined label."""
mock_client = decoy.mock(cls=SyncClient)
- decoy.when(mock_client.state.labware.get_display_name("labware-id")).then_return(
- "user-display-name"
- )
+ decoy.when(
+ mock_client.state.labware.get_user_specified_display_name("labware-id")
+ ).then_return("user-display-name")
decoy.when(mock_client.state.labware.get_definition("labware-id")).then_return(
_make_dummy_labware_definition(decoy, "definition-display-name")
)
@@ -72,9 +72,9 @@ def test_well_on_labware_with_complicated_location(decoy: Decoy) -> None:
"""Test stringifying a well on a labware with a deeply-nested location."""
mock_client = decoy.mock(cls=SyncClient)
- decoy.when(mock_client.state.labware.get_display_name("labware-id-1")).then_return(
- None
- )
+ decoy.when(
+ mock_client.state.labware.get_user_specified_display_name("labware-id-1")
+ ).then_return(None)
decoy.when(mock_client.state.labware.get_definition("labware-id-1")).then_return(
_make_dummy_labware_definition(decoy, "lw-1-display-name")
)
@@ -82,9 +82,9 @@ def test_well_on_labware_with_complicated_location(decoy: Decoy) -> None:
OnLabwareLocation(labwareId="labware-id-2")
)
- decoy.when(mock_client.state.labware.get_display_name("labware-id-2")).then_return(
- None
- )
+ decoy.when(
+ mock_client.state.labware.get_user_specified_display_name("labware-id-2")
+ ).then_return(None)
decoy.when(mock_client.state.labware.get_definition("labware-id-2")).then_return(
_make_dummy_labware_definition(decoy, "lw-2-display-name")
)
diff --git a/api/tests/opentrons/protocol_api/core/legacy/test_module_geometry.py b/api/tests/opentrons/protocol_api/core/legacy/test_module_geometry.py
index 22e787baab7..744235ea03a 100644
--- a/api/tests/opentrons/protocol_api/core/legacy/test_module_geometry.py
+++ b/api/tests/opentrons/protocol_api/core/legacy/test_module_geometry.py
@@ -3,7 +3,7 @@
import mock
from typing import ContextManager, Any, Optional
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from contextlib import nullcontext as does_not_raise
from opentrons.types import Location, Point
diff --git a/api/tests/opentrons/protocol_api/core/legacy/test_protocol_context_implementation.py b/api/tests/opentrons/protocol_api/core/legacy/test_protocol_context_implementation.py
index 6961658b712..a2993444d6b 100644
--- a/api/tests/opentrons/protocol_api/core/legacy/test_protocol_context_implementation.py
+++ b/api/tests/opentrons/protocol_api/core/legacy/test_protocol_context_implementation.py
@@ -9,7 +9,7 @@
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.module.dev_types import ModuleDefinitionV3
-from opentrons.types import DeckSlotName, Location, Mount, Point
+from opentrons.types import DeckSlotName, StagingSlotName, Location, Mount, Point
from opentrons.util.broker import Broker
from opentrons.hardware_control import SyncHardwareAPI
@@ -179,6 +179,20 @@ def test_load_labware_off_deck_raises(
)
+def test_load_labware_on_staging_slot_raises(
+ subject: LegacyProtocolCore,
+) -> None:
+ """It should raise an api error when loading onto a staging slot."""
+ with pytest.raises(APIVersionError):
+ subject.load_labware(
+ load_name="cool load name",
+ location=StagingSlotName.SLOT_B4,
+ label="cool label",
+ namespace="cool namespace",
+ version=1337,
+ )
+
+
def test_load_labware(
decoy: Decoy,
mock_deck: Deck,
diff --git a/api/tests/opentrons/protocol_api/test_deck.py b/api/tests/opentrons/protocol_api/test_deck.py
index b5464603036..f471cb936e1 100644
--- a/api/tests/opentrons/protocol_api/test_deck.py
+++ b/api/tests/opentrons/protocol_api/test_deck.py
@@ -1,11 +1,11 @@
"""Tests for opentrons.legacy.Deck."""
import inspect
-from typing import cast
+from typing import cast, Dict
import pytest
from decoy import Decoy
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5, SlotDefV3
from opentrons.motion_planning import adjacent_slots_getters as mock_adjacent_slots
from opentrons.protocols.api_support.types import APIVersion
@@ -23,10 +23,10 @@
@pytest.fixture
-def deck_definition() -> DeckDefinitionV4:
+def deck_definition() -> DeckDefinitionV5:
"""Get a deck definition value object."""
return cast(
- DeckDefinitionV4,
+ DeckDefinitionV5,
{
"locations": {"addressableAreas": [], "calibrationPoints": []},
"cutoutFixtures": {},
@@ -37,7 +37,7 @@ def deck_definition() -> DeckDefinitionV4:
@pytest.fixture
def api_version() -> APIVersion:
"""Get a dummy `APIVersion` with which to configure the subject."""
- return APIVersion(123, 456)
+ return APIVersion(1, 234)
@pytest.fixture(autouse=True)
@@ -66,16 +66,36 @@ def mock_core_map(decoy: Decoy) -> LoadedCoreMap:
return decoy.mock(cls=LoadedCoreMap)
+@pytest.fixture
+def slot_definitions_by_name() -> Dict[str, SlotDefV3]:
+ """Get a dictionary of slot names to slot definitions."""
+ return {"1": {}}
+
+
+@pytest.fixture
+def staging_slot_definitions_by_name() -> Dict[str, SlotDefV3]:
+ """Get a dictionary of staging slot names to slot definitions."""
+ return {"2": {}}
+
+
@pytest.fixture
def subject(
decoy: Decoy,
- deck_definition: DeckDefinitionV4,
+ deck_definition: DeckDefinitionV5,
mock_protocol_core: ProtocolCore,
mock_core_map: LoadedCoreMap,
api_version: APIVersion,
+ slot_definitions_by_name: Dict[str, SlotDefV3],
+ staging_slot_definitions_by_name: Dict[str, SlotDefV3],
) -> Deck:
"""Get a Deck test subject with its dependencies mocked out."""
decoy.when(mock_protocol_core.get_deck_definition()).then_return(deck_definition)
+ decoy.when(mock_protocol_core.get_slot_definitions()).then_return(
+ slot_definitions_by_name
+ )
+ decoy.when(mock_protocol_core.get_staging_slot_definitions()).then_return(
+ staging_slot_definitions_by_name
+ )
return Deck(
protocol_core=mock_protocol_core,
@@ -228,120 +248,126 @@ def test_delitem_raises_if_slot_has_module(
del subject[2]
-# TODO(jbl 10-30-2023) the following commented out tests are too tightly coupled to DeckDefinitionV3 to easily port over
-# Either refactor them when the deck class is updated/made anew or delete them later
-# @pytest.mark.parametrize(
-# "deck_definition",
-# [
-# {
-# "locations": {
-# "orderedSlots": [
-# {"id": "1"},
-# {"id": "2"},
-# {"id": "3"},
-# ],
-# "calibrationPoints": [],
-# }
-# },
-# ],
-# )
-# def test_slot_keys_iter(subject: Deck) -> None:
-# """It should provide an iterable interface to deck slots."""
-# result = list(subject)
-#
-# assert len(subject) == 3
-# assert result == ["1", "2", "3"]
-
-
-# @pytest.mark.parametrize(
-# "deck_definition",
-# [
-# {
-# "locations": {
-# "orderedSlots": [
-# {"id": "fee"},
-# {"id": "foe"},
-# {"id": "fum"},
-# ],
-# "calibrationPoints": [],
-# }
-# },
-# ],
-# )
-# def test_slots_property(subject: Deck) -> None:
-# """It should provide slot definitions."""
-# assert subject.slots == [
-# {"id": "fee"},
-# {"id": "foe"},
-# {"id": "fum"},
-# ]
-
-
-# @pytest.mark.parametrize(
-# "deck_definition",
-# [
-# {
-# "locations": {
-# "orderedSlots": [
-# {"id": DeckSlotName.SLOT_2.id, "displayName": "foobar"},
-# ],
-# "calibrationPoints": [],
-# }
-# },
-# ],
-# )
-# def test_get_slot_definition(
-# decoy: Decoy,
-# mock_protocol_core: ProtocolCore,
-# api_version: APIVersion,
-# subject: Deck,
-# ) -> None:
-# """It should provide slot definitions."""
-# decoy.when(mock_protocol_core.robot_type).then_return("OT-3 Standard")
-# decoy.when(
-# mock_validation.ensure_and_convert_deck_slot(222, api_version, "OT-3 Standard")
-# ).then_return(DeckSlotName.SLOT_2)
-#
-# assert subject.get_slot_definition(222) == {
-# "id": DeckSlotName.SLOT_2.id,
-# "displayName": "foobar",
-# }
-
-
-# @pytest.mark.parametrize(
-# "deck_definition",
-# [
-# {
-# "locations": {
-# "orderedSlots": [
-# {"id": DeckSlotName.SLOT_3.id, "position": [1.0, 2.0, 3.0]},
-# ],
-# "calibrationPoints": [],
-# }
-# },
-# ],
-# )
-# def test_get_position_for(
-# decoy: Decoy,
-# mock_protocol_core: ProtocolCore,
-# api_version: APIVersion,
-# subject: Deck,
-# ) -> None:
-# """It should return a `Location` for a deck slot."""
-# decoy.when(mock_protocol_core.robot_type).then_return("OT-3 Standard")
-# decoy.when(
-# mock_validation.ensure_and_convert_deck_slot(333, api_version, "OT-3 Standard")
-# ).then_return(DeckSlotName.SLOT_3)
-# decoy.when(
-# mock_validation.internal_slot_to_public_string(
-# DeckSlotName.SLOT_3, "OT-3 Standard"
-# )
-# ).then_return("foo")
-#
-# result = subject.position_for(333)
-# assert result.point == Point(x=1.0, y=2.0, z=3.0)
-# assert result.labware.is_slot is True
-# assert str(result.labware) == "foo"
+@pytest.mark.parametrize(
+ argnames=["slot_definitions_by_name", "staging_slot_definitions_by_name"],
+ argvalues=[
+ (
+ {
+ "1": {},
+ "2": {},
+ "3": {},
+ },
+ {"4": {}},
+ )
+ ],
+)
+def test_slot_keys_iter(subject: Deck) -> None:
+ """It should provide an iterable interface to deck slots."""
+ result = list(subject)
+
+ assert len(subject) == 3
+ assert result == ["1", "2", "3"]
+
+
+@pytest.mark.parametrize(
+ argnames=[
+ "slot_definitions_by_name",
+ "staging_slot_definitions_by_name",
+ "api_version",
+ ],
+ argvalues=[
+ (
+ {
+ "1": {},
+ "2": {},
+ "3": {},
+ },
+ {"4": {}},
+ APIVersion(2, 16),
+ )
+ ],
+)
+def test_slot_keys_iter_with_staging_slots(subject: Deck) -> None:
+ """It should provide an iterable interface to deck slots."""
+ result = list(subject)
+
+ assert len(subject) == 4
+ assert result == ["1", "2", "3", "4"]
+
+
+@pytest.mark.parametrize(
+ "slot_definitions_by_name",
+ [
+ {
+ "1": {"id": "fee"},
+ "2": {"id": "foe"},
+ "3": {"id": "fum"},
+ }
+ ],
+)
+def test_slots_property(subject: Deck) -> None:
+ """It should provide slot definitions."""
+ assert subject.slots == [
+ {"id": "fee"},
+ {"id": "foe"},
+ {"id": "fum"},
+ ]
+
+
+@pytest.mark.parametrize(
+ "slot_definitions_by_name",
+ [
+ {
+ "2": {
+ "id": DeckSlotName.SLOT_2.id,
+ "displayName": "foobar",
+ }
+ }
+ ],
+)
+def test_get_slot_definition(
+ decoy: Decoy,
+ mock_protocol_core: ProtocolCore,
+ api_version: APIVersion,
+ subject: Deck,
+) -> None:
+ """It should provide slot definitions."""
+ decoy.when(mock_protocol_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(222, api_version, "OT-3 Standard")
+ ).then_return(DeckSlotName.SLOT_2)
+
+ assert subject.get_slot_definition(222) == {
+ "id": DeckSlotName.SLOT_2.id,
+ "displayName": "foobar",
+ }
+
+
+@pytest.mark.parametrize(
+ "slot_definitions_by_name",
+ [{"3": {"position": [1.0, 2.0, 3.0]}}],
+)
+def test_get_position_for(
+ decoy: Decoy,
+ mock_protocol_core: ProtocolCore,
+ api_version: APIVersion,
+ subject: Deck,
+) -> None:
+ """It should return a `Location` for a deck slot."""
+ decoy.when(mock_protocol_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(333, api_version, "OT-3 Standard")
+ ).then_return(DeckSlotName.SLOT_3)
+ decoy.when(
+ mock_validation.internal_slot_to_public_string(
+ DeckSlotName.SLOT_3, "OT-3 Standard"
+ )
+ ).then_return("foo")
+
+ result = subject.position_for(333)
+ assert result.point == Point(x=1.0, y=2.0, z=3.0)
+ assert result.labware.is_slot is True
+ assert str(result.labware) == "foo"
def test_highest_z(
diff --git a/api/tests/opentrons/protocol_api/test_instrument_context.py b/api/tests/opentrons/protocol_api/test_instrument_context.py
index c181add69f5..d0e18f6fda9 100644
--- a/api/tests/opentrons/protocol_api/test_instrument_context.py
+++ b/api/tests/opentrons/protocol_api/test_instrument_context.py
@@ -1,8 +1,9 @@
"""Tests for the InstrumentContext public interface."""
+from collections import OrderedDict
import inspect
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from decoy import Decoy
from opentrons.legacy_broker import LegacyBroker
@@ -29,6 +30,9 @@
from opentrons.protocol_api.core.legacy.legacy_instrument_core import (
LegacyInstrumentCore,
)
+
+from opentrons.hardware_control.nozzle_manager import NozzleMap
+from opentrons.protocol_api.disposal_locations import TrashBin, WasteChute
from opentrons.protocol_api._nozzle_layout import NozzleLayout
from opentrons.types import Location, Mount, Point
@@ -123,6 +127,38 @@ def test_api_version(api_version: APIVersion, subject: InstrumentContext) -> Non
assert subject.api_version == api_version
+@pytest.mark.parametrize("channels_from_core", [1, 8, 96])
+def test_channels(
+ decoy: Decoy,
+ subject: InstrumentContext,
+ mock_instrument_core: InstrumentCore,
+ channels_from_core: int,
+) -> None:
+ """It should return the number of channels, as returned by the core."""
+ decoy.when(mock_instrument_core.get_channels()).then_return(channels_from_core)
+ assert subject.channels == channels_from_core
+
+
+@pytest.mark.parametrize(
+ ("channels_from_core", "expected_type"),
+ [
+ (1, "single"),
+ (8, "multi"),
+ (96, "multi"),
+ ],
+)
+def test_type(
+ decoy: Decoy,
+ subject: InstrumentContext,
+ mock_instrument_core: InstrumentCore,
+ channels_from_core: int,
+ expected_type: str,
+) -> None:
+ """It should map the number of channels from the core into the string "single" or "multi"."""
+ decoy.when(mock_instrument_core.get_channels()).then_return(channels_from_core)
+ assert subject.type == expected_type
+
+
def test_trash_container(
decoy: Decoy,
mock_trash: Labware,
@@ -472,20 +508,39 @@ def test_blow_out_raises_no_location(
subject.blow_out(location=None)
+MOCK_MAP = NozzleMap.build(
+ physical_nozzles=OrderedDict({"A1": Point(0, 0, 0)}),
+ physical_rows=OrderedDict({"A": ["A1"]}),
+ physical_columns=OrderedDict({"1": ["A1"]}),
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="A1",
+)
+
+
+@pytest.mark.parametrize(
+ argnames=["api_version", "mock_map"],
+ argvalues=[(APIVersion(2, 18), MOCK_MAP), (APIVersion(2, 17), None)],
+)
def test_pick_up_tip_from_labware(
- decoy: Decoy, mock_instrument_core: InstrumentCore, subject: InstrumentContext
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_map: Optional[NozzleMap],
) -> None:
"""It should pick up the next tip from a given labware."""
mock_tip_rack = decoy.mock(cls=Labware)
mock_well = decoy.mock(cls=Well)
top_location = Location(point=Point(1, 2, 3), labware=mock_well)
- decoy.when(mock_instrument_core.get_channels()).then_return(123)
+ decoy.when(mock_instrument_core.get_active_channels()).then_return(123)
+ decoy.when(mock_instrument_core.get_nozzle_map()).then_return(MOCK_MAP)
decoy.when(
labware.next_available_tip(
starting_tip=None,
tip_racks=[mock_tip_rack],
channels=123,
+ nozzle_map=mock_map,
)
).then_return((mock_tip_rack, mock_well))
decoy.when(mock_well.top()).then_return(top_location)
@@ -525,8 +580,15 @@ def test_pick_up_tip_from_well_location(
)
+@pytest.mark.parametrize(
+ argnames=["api_version", "mock_map"],
+ argvalues=[(APIVersion(2, 18), MOCK_MAP), (APIVersion(2, 17), None)],
+)
def test_pick_up_tip_from_labware_location(
- decoy: Decoy, mock_instrument_core: InstrumentCore, subject: InstrumentContext
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_map: Optional[NozzleMap],
) -> None:
"""It should pick up the next tip from a given labware-based Location."""
mock_tip_rack = decoy.mock(cls=Labware)
@@ -534,12 +596,14 @@ def test_pick_up_tip_from_labware_location(
location = Location(point=Point(1, 2, 3), labware=mock_tip_rack)
top_location = Location(point=Point(1, 2, 3), labware=mock_well)
- decoy.when(mock_instrument_core.get_channels()).then_return(123)
+ decoy.when(mock_instrument_core.get_active_channels()).then_return(123)
+ decoy.when(mock_instrument_core.get_nozzle_map()).then_return(MOCK_MAP)
decoy.when(
labware.next_available_tip(
starting_tip=None,
tip_racks=[mock_tip_rack],
channels=123,
+ nozzle_map=mock_map,
)
).then_return((mock_tip_rack, mock_well))
decoy.when(mock_well.top()).then_return(top_location)
@@ -558,22 +622,32 @@ def test_pick_up_tip_from_labware_location(
)
+@pytest.mark.parametrize(
+ argnames=["api_version", "mock_map"],
+ argvalues=[(APIVersion(2, 18), MOCK_MAP), (APIVersion(2, 17), None)],
+)
def test_pick_up_from_associated_tip_racks(
- decoy: Decoy, mock_instrument_core: InstrumentCore, subject: InstrumentContext
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_map: Optional[NozzleMap],
) -> None:
- """It should pick up from it associated tip racks."""
+ """It should pick up from its associated tip racks."""
mock_tip_rack_1 = decoy.mock(cls=Labware)
mock_tip_rack_2 = decoy.mock(cls=Labware)
mock_starting_tip = decoy.mock(cls=Well)
mock_well = decoy.mock(cls=Well)
top_location = Location(point=Point(1, 2, 3), labware=mock_well)
- decoy.when(mock_instrument_core.get_channels()).then_return(123)
+ decoy.when(mock_instrument_core.is_tip_tracking_available()).then_return(True)
+ decoy.when(mock_instrument_core.get_active_channels()).then_return(123)
+ decoy.when(mock_instrument_core.get_nozzle_map()).then_return(MOCK_MAP)
decoy.when(
labware.next_available_tip(
starting_tip=mock_starting_tip,
tip_racks=[mock_tip_rack_1, mock_tip_rack_2],
channels=123,
+ nozzle_map=mock_map,
)
).then_return((mock_tip_rack_2, mock_well))
decoy.when(mock_well.top()).then_return(top_location)
@@ -594,6 +668,22 @@ def test_pick_up_from_associated_tip_racks(
)
+def test_pick_up_fails_when_tip_tracking_unavailable(
+ decoy: Decoy, mock_instrument_core: InstrumentCore, subject: InstrumentContext
+) -> None:
+ """It should raise an error if automatic tip tracking is not available.."""
+ mock_tip_rack_1 = decoy.mock(cls=Labware)
+
+ decoy.when(mock_instrument_core.is_tip_tracking_available()).then_return(False)
+ decoy.when(mock_instrument_core.get_active_channels()).then_return(123)
+
+ subject.tip_racks = [mock_tip_rack_1]
+ with pytest.raises(
+ CommandPreconditionViolated, match="Automatic tip tracking is not available"
+ ):
+ subject.pick_up_tip()
+
+
def test_drop_tip_to_well(
decoy: Decoy, mock_instrument_core: InstrumentCore, subject: InstrumentContext
) -> None:
@@ -663,6 +753,77 @@ def test_drop_tip_to_randomized_trash_location(
)
+@pytest.mark.parametrize(
+ ["api_version", "alternate_drop"],
+ [(APIVersion(2, 17), True), (APIVersion(2, 18), False)],
+)
+def test_drop_tip_in_trash_bin(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ alternate_drop: bool,
+ subject: InstrumentContext,
+) -> None:
+ """It should drop a tip in a deck configured trash bin."""
+ trash_bin = decoy.mock(cls=TrashBin)
+
+ subject.drop_tip(trash_bin)
+
+ decoy.verify(
+ mock_instrument_core.drop_tip_in_disposal_location(
+ trash_bin,
+ home_after=None,
+ alternate_tip_drop=alternate_drop,
+ ),
+ times=1,
+ )
+
+
+@pytest.mark.parametrize(
+ ["api_version", "alternate_drop"],
+ [(APIVersion(2, 17), True), (APIVersion(2, 18), False)],
+)
+def test_drop_tip_in_waste_chute(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ alternate_drop: bool,
+ subject: InstrumentContext,
+) -> None:
+ """It should drop a tip in a deck configured trash bin or waste chute."""
+ waste_chute = decoy.mock(cls=WasteChute)
+
+ subject.drop_tip(waste_chute)
+
+ decoy.verify(
+ mock_instrument_core.drop_tip_in_disposal_location(
+ waste_chute,
+ home_after=None,
+ alternate_tip_drop=alternate_drop,
+ ),
+ times=1,
+ )
+
+
+def test_drop_tip_in_disposal_location_implicitly(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+) -> None:
+ """It should drop a tip in a deck configured trash bin when no arguments have been provided."""
+ trash_bin = decoy.mock(cls=TrashBin)
+ subject.trash_container = trash_bin
+
+ subject.drop_tip()
+
+ decoy.verify(
+ mock_instrument_core.drop_tip_in_disposal_location(
+ trash_bin,
+ home_after=None,
+ alternate_tip_drop=True,
+ ),
+ times=1,
+ )
+
+
def test_return_tip(
decoy: Decoy, mock_instrument_core: InstrumentCore, subject: InstrumentContext
) -> None:
@@ -958,3 +1119,140 @@ def test_configure_nozzle_layout(
"""The correct model is passed to the engine client."""
with exception:
subject.configure_nozzle_layout(style, primary_nozzle, front_right_nozzle)
+
+
+@pytest.mark.parametrize("api_version", [APIVersion(2, 15)])
+def test_dispense_0_volume_means_dispense_everything(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_protocol_core: ProtocolCore,
+) -> None:
+ """It should dispense all liquid to a well."""
+ input_location = Location(point=Point(2, 2, 2), labware=None)
+ decoy.when(
+ mock_validation.validate_location(location=input_location, last_location=None)
+ ).then_return(mock_validation.PointTarget(location=input_location, in_place=False))
+ decoy.when(mock_instrument_core.get_current_volume()).then_return(100)
+ decoy.when(mock_instrument_core.get_dispense_flow_rate(1.23)).then_return(5.67)
+ subject.dispense(volume=0, location=input_location, rate=1.23, push_out=None)
+
+ decoy.verify(
+ mock_instrument_core.dispense(
+ location=input_location,
+ well_core=None,
+ in_place=False,
+ volume=100,
+ rate=1.23,
+ flow_rate=5.67,
+ push_out=None,
+ ),
+ times=1,
+ )
+
+
+@pytest.mark.parametrize("api_version", [APIVersion(2, 16)])
+def test_dispense_0_volume_means_dispense_nothing(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_protocol_core: ProtocolCore,
+) -> None:
+ """It should dispense no liquid to a well."""
+ input_location = Location(point=Point(2, 2, 2), labware=None)
+ decoy.when(
+ mock_validation.validate_location(location=input_location, last_location=None)
+ ).then_return(mock_validation.PointTarget(location=input_location, in_place=False))
+ decoy.when(mock_instrument_core.get_dispense_flow_rate(1.23)).then_return(5.67)
+ subject.dispense(volume=0, location=input_location, rate=1.23, push_out=None)
+
+ decoy.verify(
+ mock_instrument_core.dispense(
+ location=input_location,
+ well_core=None,
+ in_place=False,
+ volume=0,
+ rate=1.23,
+ flow_rate=5.67,
+ push_out=None,
+ ),
+ times=1,
+ )
+
+
+@pytest.mark.parametrize("api_version", [APIVersion(2, 15)])
+def test_aspirate_0_volume_means_aspirate_everything(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_protocol_core: ProtocolCore,
+) -> None:
+ """It should aspirate to a well."""
+ mock_well = decoy.mock(cls=Well)
+ input_location = Location(point=Point(2, 2, 2), labware=mock_well)
+ last_location = Location(point=Point(9, 9, 9), labware=None)
+ decoy.when(mock_instrument_core.get_mount()).then_return(Mount.RIGHT)
+
+ decoy.when(mock_protocol_core.get_last_location(Mount.RIGHT)).then_return(
+ last_location
+ )
+
+ decoy.when(
+ mock_validation.validate_location(
+ location=input_location, last_location=last_location
+ )
+ ).then_return(WellTarget(well=mock_well, location=input_location, in_place=False))
+ decoy.when(mock_instrument_core.get_aspirate_flow_rate(1.23)).then_return(5.67)
+ decoy.when(mock_instrument_core.get_available_volume()).then_return(200)
+ subject.aspirate(volume=0, location=input_location, rate=1.23)
+
+ decoy.verify(
+ mock_instrument_core.aspirate(
+ location=input_location,
+ well_core=mock_well._core,
+ in_place=False,
+ volume=200,
+ rate=1.23,
+ flow_rate=5.67,
+ ),
+ times=1,
+ )
+
+
+@pytest.mark.parametrize("api_version", [APIVersion(2, 16)])
+def test_aspirate_0_volume_means_aspirate_nothing(
+ decoy: Decoy,
+ mock_instrument_core: InstrumentCore,
+ subject: InstrumentContext,
+ mock_protocol_core: ProtocolCore,
+) -> None:
+ """It should aspirate to a well."""
+ mock_well = decoy.mock(cls=Well)
+ input_location = Location(point=Point(2, 2, 2), labware=mock_well)
+ last_location = Location(point=Point(9, 9, 9), labware=None)
+ decoy.when(mock_instrument_core.get_mount()).then_return(Mount.RIGHT)
+
+ decoy.when(mock_protocol_core.get_last_location(Mount.RIGHT)).then_return(
+ last_location
+ )
+
+ decoy.when(
+ mock_validation.validate_location(
+ location=input_location, last_location=last_location
+ )
+ ).then_return(WellTarget(well=mock_well, location=input_location, in_place=False))
+ decoy.when(mock_instrument_core.get_aspirate_flow_rate(1.23)).then_return(5.67)
+
+ subject.aspirate(volume=0, location=input_location, rate=1.23)
+
+ decoy.verify(
+ mock_instrument_core.aspirate(
+ location=input_location,
+ well_core=mock_well._core,
+ in_place=False,
+ volume=0,
+ rate=1.23,
+ flow_rate=5.67,
+ ),
+ times=1,
+ )
diff --git a/api/tests/opentrons/protocol_api/test_labware.py b/api/tests/opentrons/protocol_api/test_labware.py
index 58bb3ca0b0d..b9b008e77a1 100644
--- a/api/tests/opentrons/protocol_api/test_labware.py
+++ b/api/tests/opentrons/protocol_api/test_labware.py
@@ -24,6 +24,8 @@
from opentrons.types import Point
+from . import versions_at_or_below, versions_at_or_above, versions_between
+
@pytest.fixture(autouse=True)
def _mock_well_grid_module(decoy: Decoy, monkeypatch: pytest.MonkeyPatch) -> None:
@@ -317,7 +319,7 @@ def test_child(
assert subject.child == mock_labware
-@pytest.mark.parametrize("api_version", [APIVersion(2, 13)])
+@pytest.mark.parametrize("api_version", versions_at_or_below(APIVersion(2, 13)))
def test_set_offset_succeeds_on_low_api_version(
decoy: Decoy,
subject: Labware,
@@ -328,8 +330,13 @@ def test_set_offset_succeeds_on_low_api_version(
decoy.verify(mock_labware_core.set_calibration(Point(1, 2, 3)))
-@pytest.mark.parametrize("api_version", [APIVersion(2, 14)])
-def test_set_offset_raises_on_high_api_version(
+@pytest.mark.parametrize(
+ "api_version",
+ versions_between(
+ low_inclusive_bound=APIVersion(2, 14), high_inclusive_bound=APIVersion(2, 17)
+ ),
+)
+def test_set_offset_raises_on_intermediate_api_version(
decoy: Decoy,
subject: Labware,
mock_labware_core: LabwareCore,
@@ -339,7 +346,16 @@ def test_set_offset_raises_on_high_api_version(
subject.set_offset(1, 2, 3)
-@pytest.mark.parametrize("api_version", [APIVersion(2, 14)])
+@pytest.mark.parametrize("api_version", versions_at_or_above(APIVersion(2, 18)))
+def test_set_offset_succeeds_on_high_api_version(
+ decoy: Decoy, subject: Labware, mock_labware_core: LabwareCore
+) -> None:
+ """It should not raise an API version error on the most recent versions."""
+ subject.set_offset(1, 2, 3)
+ decoy.verify(mock_labware_core.set_calibration(Point(1, 2, 3)))
+
+
+@pytest.mark.parametrize("api_version", versions_at_or_above(APIVersion(2, 14)))
def test_separate_calibration_raises_on_high_api_version(
decoy: Decoy,
subject: Labware,
diff --git a/api/tests/opentrons/protocol_api/test_module_context.py b/api/tests/opentrons/protocol_api/test_module_context.py
index 6ce8928abc4..c57f1ff52dc 100644
--- a/api/tests/opentrons/protocol_api/test_module_context.py
+++ b/api/tests/opentrons/protocol_api/test_module_context.py
@@ -108,7 +108,7 @@ def test_load_labware(
decoy.when(mock_labware_core.get_well_columns()).then_return([])
result = subject.load_labware(
- name="infinite tip rack",
+ name="Infinite Tip Rack",
label="it doesn't run out",
namespace="ideal",
version=101,
diff --git a/api/tests/opentrons/protocol_api/test_parameter_context.py b/api/tests/opentrons/protocol_api/test_parameter_context.py
new file mode 100644
index 00000000000..7dcc246f216
--- /dev/null
+++ b/api/tests/opentrons/protocol_api/test_parameter_context.py
@@ -0,0 +1,229 @@
+"""Tests for the ParameterContext public interface."""
+import inspect
+
+import pytest
+from decoy import Decoy
+
+from opentrons.protocols.api_support.types import APIVersion
+from opentrons.protocol_api import (
+ MAX_SUPPORTED_VERSION,
+)
+from opentrons.protocols.parameters import (
+ parameter_definition as mock_parameter_definition,
+ validation as mock_validation,
+)
+from opentrons.protocols.parameters.types import ParameterDefinitionError
+from opentrons.protocol_engine.types import BooleanParameter
+
+from opentrons.protocol_api._parameter_context import ParameterContext
+
+
+@pytest.fixture(autouse=True)
+def _mock_parameter_definition_creates(
+ decoy: Decoy, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ for name, func in inspect.getmembers(mock_parameter_definition, inspect.isfunction):
+ monkeypatch.setattr(mock_parameter_definition, name, decoy.mock(func=func))
+
+
+@pytest.fixture(autouse=True)
+def _patch_parameter_validation(decoy: Decoy, monkeypatch: pytest.MonkeyPatch) -> None:
+ for name, func in inspect.getmembers(mock_validation, inspect.isfunction):
+ monkeypatch.setattr(mock_validation, name, decoy.mock(func=func))
+
+
+@pytest.fixture
+def api_version() -> APIVersion:
+ """The API version under test."""
+ return MAX_SUPPORTED_VERSION
+
+
+@pytest.fixture
+def subject(api_version: APIVersion) -> ParameterContext:
+ """Get a ParameterContext test subject."""
+ return ParameterContext(api_version=api_version)
+
+
+def test_add_int(decoy: Decoy, subject: ParameterContext) -> None:
+ """It should create and add an int parameter definition."""
+ subject._parameters["other_param"] = decoy.mock(
+ cls=mock_parameter_definition.ParameterDefinition
+ )
+ param_def = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ decoy.when(param_def.variable_name).then_return("my cool variable")
+ decoy.when(
+ mock_parameter_definition.create_int_parameter(
+ display_name="abc",
+ variable_name="xyz",
+ default=123,
+ minimum=45,
+ maximum=678,
+ choices=[{"display_name": "foo", "value": 42}],
+ description="blah blah blah",
+ unit="foot candles",
+ )
+ ).then_return(param_def)
+
+ subject.add_int(
+ display_name="abc",
+ variable_name="xyz",
+ default=123,
+ minimum=45,
+ maximum=678,
+ choices=[{"display_name": "foo", "value": 42}],
+ description="blah blah blah",
+ unit="foot candles",
+ )
+
+ assert param_def is subject._parameters["my cool variable"]
+ decoy.verify(mock_validation.validate_variable_name_unique("xyz", {"other_param"}))
+
+
+def test_add_float(decoy: Decoy, subject: ParameterContext) -> None:
+ """It should create and add a float parameter definition."""
+ subject._parameters["other_param"] = decoy.mock(
+ cls=mock_parameter_definition.ParameterDefinition
+ )
+ param_def = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ decoy.when(param_def.variable_name).then_return("my cooler variable")
+ decoy.when(mock_validation.ensure_float_value(12.3)).then_return(3.21)
+ decoy.when(mock_validation.ensure_optional_float_value(4.5)).then_return(5.4)
+ decoy.when(mock_validation.ensure_optional_float_value(67.8)).then_return(87.6)
+ decoy.when(
+ mock_validation.ensure_float_choices([{"display_name": "foo", "value": 4.2}])
+ ).then_return([{"display_name": "bar", "value": 2.4}])
+ decoy.when(
+ mock_parameter_definition.create_float_parameter(
+ display_name="abc",
+ variable_name="xyz",
+ default=3.21,
+ minimum=5.4,
+ maximum=87.6,
+ choices=[{"display_name": "bar", "value": 2.4}],
+ description="blah blah blah",
+ unit="lux",
+ )
+ ).then_return(param_def)
+
+ subject.add_float(
+ display_name="abc",
+ variable_name="xyz",
+ default=12.3,
+ minimum=4.5,
+ maximum=67.8,
+ choices=[{"display_name": "foo", "value": 4.2}],
+ description="blah blah blah",
+ unit="lux",
+ )
+
+ assert param_def is subject._parameters["my cooler variable"]
+ decoy.verify(mock_validation.validate_variable_name_unique("xyz", {"other_param"}))
+
+
+def test_add_bool(decoy: Decoy, subject: ParameterContext) -> None:
+ """It should create and add a boolean parameter definition."""
+ subject._parameters["other_param"] = decoy.mock(
+ cls=mock_parameter_definition.ParameterDefinition
+ )
+ param_def = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ decoy.when(param_def.variable_name).then_return("my coolest variable")
+ decoy.when(
+ mock_parameter_definition.create_bool_parameter(
+ display_name="cba",
+ variable_name="zxy",
+ default=False,
+ choices=[
+ {"display_name": "On", "value": True},
+ {"display_name": "Off", "value": False},
+ ],
+ description="lorem ipsum",
+ )
+ ).then_return(param_def)
+
+ subject.add_bool(
+ display_name="cba",
+ variable_name="zxy",
+ default=False,
+ description="lorem ipsum",
+ )
+
+ assert param_def is subject._parameters["my coolest variable"]
+ decoy.verify(mock_validation.validate_variable_name_unique("zxy", {"other_param"}))
+
+
+def test_add_string(decoy: Decoy, subject: ParameterContext) -> None:
+ """It should create and add a string parameter definition."""
+ subject._parameters["other_param"] = decoy.mock(
+ cls=mock_parameter_definition.ParameterDefinition
+ )
+ param_def = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ decoy.when(param_def.variable_name).then_return("my slightly less cool variable")
+ decoy.when(
+ mock_parameter_definition.create_str_parameter(
+ display_name="jkl",
+ variable_name="qwerty",
+ default="asdf",
+ choices=[{"display_name": "bar", "value": "aaa"}],
+ description="fee foo fum",
+ )
+ ).then_return(param_def)
+
+ subject.add_str(
+ display_name="jkl",
+ variable_name="qwerty",
+ default="asdf",
+ choices=[{"display_name": "bar", "value": "aaa"}],
+ description="fee foo fum",
+ )
+
+ assert param_def is subject._parameters["my slightly less cool variable"]
+ decoy.verify(
+ mock_validation.validate_variable_name_unique("qwerty", {"other_param"})
+ )
+
+
+def test_set_parameters(decoy: Decoy, subject: ParameterContext) -> None:
+ """It should set the parameter values."""
+ param_def = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ decoy.when(param_def.parameter_type).then_return(bool)
+ decoy.when(mock_validation.ensure_value_type("bar", bool)).then_return("rhubarb")
+ subject._parameters["foo"] = param_def
+
+ subject.set_parameters({"foo": "bar"})
+
+ assert param_def.value == "rhubarb"
+
+
+def test_set_parameters_raises(decoy: Decoy, subject: ParameterContext) -> None:
+ """It should raise if the given parameter is not defined."""
+ with pytest.raises(ParameterDefinitionError):
+ subject.set_parameters({"foo": "bar"})
+
+
+def test_export_parameters_for_analysis(
+ decoy: Decoy, subject: ParameterContext
+) -> None:
+ """It should export the parameters as protocol engine types."""
+ param_def = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ boolean_param = decoy.mock(cls=BooleanParameter)
+ decoy.when(param_def.as_protocol_engine_type()).then_return(boolean_param)
+ subject._parameters["foo"] = param_def
+
+ assert subject.export_parameters_for_analysis() == [boolean_param]
+
+
+def test_export_parameters_for_protocol(
+ decoy: Decoy, subject: ParameterContext
+) -> None:
+ """It should export the parameters as a Parameters object with the parameters as dynamic attributes."""
+ param_def_1 = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ param_def_2 = decoy.mock(cls=mock_parameter_definition.ParameterDefinition)
+ decoy.when(param_def_1.variable_name).then_return("x")
+ decoy.when(param_def_1.value).then_return("a")
+ decoy.when(param_def_2.variable_name).then_return("y")
+ decoy.when(param_def_2.value).then_return(1.23)
+ subject._parameters = {"foo": param_def_1, "bar": param_def_2}
+
+ result = subject.export_parameters_for_protocol()
+ assert result.x == "a" # type: ignore[attr-defined]
+ assert result.y == 1.23 # type: ignore[attr-defined]
diff --git a/api/tests/opentrons/protocol_api/test_protocol_context.py b/api/tests/opentrons/protocol_api/test_protocol_context.py
index d31d0c43ed8..c792fc4574c 100644
--- a/api/tests/opentrons/protocol_api/test_protocol_context.py
+++ b/api/tests/opentrons/protocol_api/test_protocol_context.py
@@ -8,7 +8,7 @@
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.labware.dev_types import LabwareDefinition as LabwareDefDict
-from opentrons.types import Mount, DeckSlotName
+from opentrons.types import Mount, DeckSlotName, StagingSlotName
from opentrons.protocol_api import OFF_DECK
from opentrons.legacy_broker import LegacyBroker
from opentrons.hardware_control.modules.types import ModuleType, TemperatureModuleModel
@@ -38,6 +38,10 @@
MagneticModuleCore,
MagneticBlockCore,
)
+from opentrons.protocol_api.disposal_locations import TrashBin, WasteChute
+from opentrons.protocols.api_support.deck_type import (
+ NoTrashDefinedError,
+)
@pytest.fixture(autouse=True)
@@ -78,6 +82,12 @@ def mock_deck(decoy: Decoy) -> Deck:
return decoy.mock(cls=Deck)
+@pytest.fixture
+def mock_fixed_trash(decoy: Decoy) -> Labware:
+ """Get a mock Fixed Trash."""
+ return decoy.mock(cls=Labware)
+
+
@pytest.fixture
def api_version() -> APIVersion:
"""The API version under test."""
@@ -90,8 +100,11 @@ def subject(
mock_core_map: LoadedCoreMap,
mock_deck: Deck,
api_version: APIVersion,
+ mock_fixed_trash: Labware,
+ decoy: Decoy,
) -> ProtocolContext:
"""Get a ProtocolContext test subject with its dependencies mocked out."""
+ decoy.when(mock_core_map.get(mock_core.fixed_trash)).then_return(mock_fixed_trash)
return ProtocolContext(
api_version=api_version,
core=mock_core,
@@ -100,6 +113,42 @@ def subject(
)
+def test_legacy_trash_loading(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ mock_core_map: LoadedCoreMap,
+ mock_fixed_trash: Labware,
+ mock_deck: Deck,
+) -> None:
+ """It should load a trash labware on init on API level 2.15 and below."""
+ decoy.when(mock_core_map.get(mock_core.fixed_trash)).then_return(mock_fixed_trash)
+ context = ProtocolContext(
+ api_version=APIVersion(2, 15),
+ core=mock_core,
+ core_map=mock_core_map,
+ deck=mock_deck,
+ )
+ assert mock_fixed_trash == context.fixed_trash
+ decoy.verify(mock_core.append_disposal_location(mock_fixed_trash))
+
+
+def test_automatic_ot2_trash_loading(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ mock_core_map: LoadedCoreMap,
+ mock_deck: Deck,
+) -> None:
+ """It should load a trash labware on init on API level 2.15 and below."""
+ decoy.when(mock_core.robot_type).then_return("OT-2 Standard")
+ ProtocolContext(
+ api_version=APIVersion(2, 16),
+ core=mock_core,
+ core_map=mock_core_map,
+ deck=mock_deck,
+ )
+ decoy.verify(mock_core.load_ot2_fixed_trash_bin())
+
+
def test_fixed_trash(
decoy: Decoy,
mock_core: ProtocolCore,
@@ -115,7 +164,7 @@ def test_fixed_trash(
trash = trash_captor.value
decoy.when(mock_core_map.get(mock_core.fixed_trash)).then_return(trash)
-
+ decoy.when(mock_core.get_disposal_locations()).then_return([trash])
result = subject.fixed_trash
assert result is trash
@@ -138,11 +187,15 @@ def test_load_instrument(
mock_instrument_core = decoy.mock(cls=InstrumentCore)
mock_tip_racks = [decoy.mock(cls=Labware), decoy.mock(cls=Labware)]
- decoy.when(mock_validation.ensure_mount("shadowfax")).then_return(Mount.LEFT)
decoy.when(mock_validation.ensure_lowercase_name("Gandalf")).then_return("gandalf")
decoy.when(mock_validation.ensure_pipette_name("gandalf")).then_return(
PipetteNameType.P300_SINGLE
)
+ decoy.when(
+ mock_validation.ensure_mount_for_pipette(
+ "shadowfax", PipetteNameType.P300_SINGLE
+ )
+ ).then_return(Mount.LEFT)
decoy.when(
mock_core.load_instrument(
@@ -152,6 +205,9 @@ def test_load_instrument(
).then_return(mock_instrument_core)
decoy.when(mock_instrument_core.get_pipette_name()).then_return("Gandalf the Grey")
+ decoy.when(mock_core.get_disposal_locations()).then_raise(
+ NoTrashDefinedError("No trash!")
+ )
result = subject.load_instrument(
instrument_name="Gandalf", mount="shadowfax", tip_racks=mock_tip_racks
@@ -182,13 +238,17 @@ def test_load_instrument_replace(
"""It should allow/disallow pipette replacement."""
mock_instrument_core = decoy.mock(cls=InstrumentCore)
- decoy.when(mock_validation.ensure_lowercase_name("ada")).then_return("ada")
- decoy.when(mock_validation.ensure_mount(matchers.IsA(Mount))).then_return(
- Mount.RIGHT
+ decoy.when(mock_validation.ensure_lowercase_name(matchers.IsA(str))).then_return(
+ "ada"
)
decoy.when(mock_validation.ensure_pipette_name(matchers.IsA(str))).then_return(
PipetteNameType.P300_SINGLE
)
+ decoy.when(
+ mock_validation.ensure_mount_for_pipette(
+ matchers.IsA(Mount), matchers.IsA(PipetteNameType)
+ )
+ ).then_return(Mount.RIGHT)
decoy.when(
mock_core.load_instrument(
instrument_name=matchers.IsA(PipetteNameType),
@@ -196,6 +256,9 @@ def test_load_instrument_replace(
)
).then_return(mock_instrument_core)
decoy.when(mock_instrument_core.get_pipette_name()).then_return("Ada Lovelace")
+ decoy.when(mock_core.get_disposal_locations()).then_raise(
+ NoTrashDefinedError("No trash!")
+ )
pipette_1 = subject.load_instrument(instrument_name="ada", mount=Mount.RIGHT)
assert subject.loaded_instruments["right"] is pipette_1
@@ -209,33 +272,6 @@ def test_load_instrument_replace(
subject.load_instrument(instrument_name="ada", mount=Mount.RIGHT)
-def test_96_channel_pipette_always_loads_on_the_left_mount(
- decoy: Decoy,
- mock_core: ProtocolCore,
- subject: ProtocolContext,
-) -> None:
- """It should always load a 96-channel pipette on left mount, regardless of the mount arg specified."""
- mock_instrument_core = decoy.mock(cls=InstrumentCore)
-
- decoy.when(mock_validation.ensure_lowercase_name("A 96 Channel Name")).then_return(
- "a 96 channel name"
- )
- decoy.when(mock_validation.ensure_pipette_name("a 96 channel name")).then_return(
- PipetteNameType.P1000_96
- )
- decoy.when(
- mock_core.load_instrument(
- instrument_name=PipetteNameType.P1000_96,
- mount=Mount.LEFT,
- )
- ).then_return(mock_instrument_core)
-
- result = subject.load_instrument(
- instrument_name="A 96 Channel Name", mount="shadowfax"
- )
- assert result == subject.loaded_instruments["left"]
-
-
def test_96_channel_pipette_raises_if_another_pipette_attached(
decoy: Decoy,
mock_core: ProtocolCore,
@@ -244,13 +280,17 @@ def test_96_channel_pipette_raises_if_another_pipette_attached(
"""It should always raise when loading a 96-channel pipette when another pipette is attached."""
mock_instrument_core = decoy.mock(cls=InstrumentCore)
- decoy.when(mock_validation.ensure_lowercase_name("ada")).then_return("ada")
- decoy.when(mock_validation.ensure_pipette_name("ada")).then_return(
- PipetteNameType.P300_SINGLE
- )
- decoy.when(mock_validation.ensure_mount(matchers.IsA(Mount))).then_return(
- Mount.RIGHT
- )
+ decoy.when(
+ mock_validation.ensure_lowercase_name("A Single Channel Name")
+ ).then_return("a single channel name")
+ decoy.when(
+ mock_validation.ensure_pipette_name("a single channel name")
+ ).then_return(PipetteNameType.P300_SINGLE)
+ decoy.when(
+ mock_validation.ensure_mount_for_pipette(
+ Mount.RIGHT, PipetteNameType.P300_SINGLE
+ )
+ ).then_return(Mount.RIGHT)
decoy.when(
mock_core.load_instrument(
@@ -261,7 +301,13 @@ def test_96_channel_pipette_raises_if_another_pipette_attached(
decoy.when(mock_instrument_core.get_pipette_name()).then_return("ada")
- pipette_1 = subject.load_instrument(instrument_name="ada", mount=Mount.RIGHT)
+ decoy.when(mock_core.get_disposal_locations()).then_raise(
+ NoTrashDefinedError("No trash!")
+ )
+
+ pipette_1 = subject.load_instrument(
+ instrument_name="A Single Channel Name", mount=Mount.RIGHT
+ )
assert subject.loaded_instruments["right"] is pipette_1
decoy.when(mock_validation.ensure_lowercase_name("A 96 Channel Name")).then_return(
@@ -270,6 +316,9 @@ def test_96_channel_pipette_raises_if_another_pipette_attached(
decoy.when(mock_validation.ensure_pipette_name("a 96 channel name")).then_return(
PipetteNameType.P1000_96
)
+ decoy.when(
+ mock_validation.ensure_mount_for_pipette("shadowfax", PipetteNameType.P1000_96)
+ ).then_return(Mount.LEFT)
decoy.when(
mock_core.load_instrument(
instrument_name=PipetteNameType.P1000_96,
@@ -383,6 +432,52 @@ def test_load_labware_off_deck_raises(
)
+def test_load_labware_on_staging_slot(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ mock_core_map: LoadedCoreMap,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should create a labware on a staging slot using its execution core."""
+ mock_labware_core = decoy.mock(cls=LabwareCore)
+
+ decoy.when(mock_validation.ensure_lowercase_name("UPPERCASE_LABWARE")).then_return(
+ "lowercase_labware"
+ )
+ decoy.when(mock_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(42, api_version, "OT-3 Standard")
+ ).then_return(StagingSlotName.SLOT_B4)
+
+ decoy.when(
+ mock_core.load_labware(
+ load_name="lowercase_labware",
+ location=StagingSlotName.SLOT_B4,
+ label="some_display_name",
+ namespace="some_namespace",
+ version=1337,
+ )
+ ).then_return(mock_labware_core)
+
+ decoy.when(mock_labware_core.get_name()).then_return("Full Name")
+ decoy.when(mock_labware_core.get_display_name()).then_return("Display Name")
+ decoy.when(mock_labware_core.get_well_columns()).then_return([])
+
+ result = subject.load_labware(
+ load_name="UPPERCASE_LABWARE",
+ location=42,
+ label="some_display_name",
+ namespace="some_namespace",
+ version=1337,
+ )
+
+ assert isinstance(result, Labware)
+ assert result.name == "Full Name"
+
+ decoy.verify(mock_core_map.add(mock_labware_core, result), times=1)
+
+
def test_load_labware_from_definition(
decoy: Decoy,
mock_core: ProtocolCore,
@@ -468,6 +563,47 @@ def test_load_adapter(
decoy.verify(mock_core_map.add(mock_labware_core, result), times=1)
+def test_load_adapter_on_staging_slot(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ mock_core_map: LoadedCoreMap,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should create an adapter on a staging slot using its execution core."""
+ mock_labware_core = decoy.mock(cls=LabwareCore)
+
+ decoy.when(mock_validation.ensure_lowercase_name("UPPERCASE_ADAPTER")).then_return(
+ "lowercase_adapter"
+ )
+ decoy.when(mock_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(42, api_version, "OT-3 Standard")
+ ).then_return(StagingSlotName.SLOT_B4)
+
+ decoy.when(
+ mock_core.load_adapter(
+ load_name="lowercase_adapter",
+ location=StagingSlotName.SLOT_B4,
+ namespace="some_namespace",
+ version=1337,
+ )
+ ).then_return(mock_labware_core)
+
+ decoy.when(mock_labware_core.get_well_columns()).then_return([])
+
+ result = subject.load_adapter(
+ load_name="UPPERCASE_ADAPTER",
+ location=42,
+ namespace="some_namespace",
+ version=1337,
+ )
+
+ assert isinstance(result, Labware)
+
+ decoy.verify(mock_core_map.add(mock_labware_core, result), times=1)
+
+
def test_load_labware_on_adapter(
decoy: Decoy,
mock_core: ProtocolCore,
@@ -599,6 +735,50 @@ def test_move_labware_to_slot(
)
+def test_move_labware_to_staging_slot(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ mock_core_map: LoadedCoreMap,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should move labware to new slot location."""
+ drop_offset = {"x": 4, "y": 5, "z": 6}
+ mock_labware_core = decoy.mock(cls=LabwareCore)
+
+ decoy.when(mock_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(42, api_version, "OT-3 Standard")
+ ).then_return(StagingSlotName.SLOT_B4)
+ decoy.when(mock_labware_core.get_well_columns()).then_return([])
+
+ movable_labware = Labware(
+ core=mock_labware_core,
+ api_version=MAX_SUPPORTED_VERSION,
+ protocol_core=mock_core,
+ core_map=mock_core_map,
+ )
+ decoy.when(
+ mock_validation.ensure_valid_labware_offset_vector(drop_offset)
+ ).then_return((1, 2, 3))
+ subject.move_labware(
+ labware=movable_labware,
+ new_location=42,
+ drop_offset=drop_offset,
+ )
+
+ decoy.verify(
+ mock_core.move_labware(
+ labware_core=mock_labware_core,
+ new_location=StagingSlotName.SLOT_B4,
+ use_gripper=False,
+ pause_for_manual_move=True,
+ pick_up_offset=None,
+ drop_offset=(1, 2, 3),
+ )
+ )
+
+
def test_move_labware_to_module(
decoy: Decoy,
mock_core: ProtocolCore,
@@ -611,6 +791,9 @@ def test_move_labware_to_module(
mock_broker = decoy.mock(cls=LegacyBroker)
decoy.when(mock_labware_core.get_well_columns()).then_return([])
+ decoy.when(mock_module_core.get_deck_slot()).then_return(DeckSlotName.SLOT_A1)
+ decoy.when(mock_core.get_labware_on_module(mock_module_core)).then_return(None)
+ decoy.when(mock_core_map.get(None)).then_return(None)
movable_labware = Labware(
core=mock_labware_core,
@@ -693,6 +876,66 @@ def test_move_labware_off_deck_raises(
subject.move_labware(labware=movable_labware, new_location=OFF_DECK)
+def test_load_trash_bin(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should load a trash bin."""
+ mock_trash = decoy.mock(cls=TrashBin)
+
+ decoy.when(mock_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(
+ "blah", api_version, "OT-3 Standard"
+ )
+ ).then_return(DeckSlotName.SLOT_A1)
+ decoy.when(
+ mock_validation.ensure_and_convert_trash_bin_location(
+ "blah", api_version, "OT-3 Standard"
+ )
+ ).then_return("my swanky trash bin")
+ decoy.when(
+ mock_core.load_trash_bin(DeckSlotName.SLOT_A1, "my swanky trash bin")
+ ).then_return(mock_trash)
+
+ result = subject.load_trash_bin("blah")
+
+ assert result == mock_trash
+
+
+def test_load_trash_bin_raises_for_staging_slot(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should raise when a trash bin load is attempted in a staging slot."""
+ decoy.when(mock_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(
+ "bleh", api_version, "OT-3 Standard"
+ )
+ ).then_return(StagingSlotName.SLOT_A4)
+
+ with pytest.raises(ValueError, match="Staging areas not permitted"):
+ subject.load_trash_bin("bleh")
+
+
+def test_load_wast_chute(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should load a waste chute."""
+ mock_chute = decoy.mock(cls=WasteChute)
+ decoy.when(mock_core.load_waste_chute()).then_return(mock_chute)
+ result = subject.load_waste_chute()
+ assert result == mock_chute
+
+
def test_load_module(
decoy: Decoy,
mock_core: ProtocolCore,
@@ -785,6 +1028,26 @@ def test_load_module_with_mag_block_raises(subject: ProtocolContext) -> None:
)
+def test_load_module_on_staging_slot_raises(
+ decoy: Decoy,
+ mock_core: ProtocolCore,
+ mock_core_map: LoadedCoreMap,
+ api_version: APIVersion,
+ subject: ProtocolContext,
+) -> None:
+ """It should raise when attempting to load a module onto a staging slot."""
+ decoy.when(mock_validation.ensure_module_model("spline reticulator")).then_return(
+ TemperatureModuleModel.TEMPERATURE_V1
+ )
+ decoy.when(mock_core.robot_type).then_return("OT-3 Standard")
+ decoy.when(
+ mock_validation.ensure_and_convert_deck_slot(42, api_version, "OT-3 Standard")
+ ).then_return(StagingSlotName.SLOT_B4)
+
+ with pytest.raises(ValueError, match="Cannot load a module onto a staging slot."):
+ subject.load_module(module_name="spline reticulator", location=42)
+
+
def test_loaded_modules(
decoy: Decoy,
mock_core_map: LoadedCoreMap,
diff --git a/api/tests/opentrons/protocol_api/test_validation.py b/api/tests/opentrons/protocol_api/test_validation.py
index 13ec1d77db6..667349f0f8d 100644
--- a/api/tests/opentrons/protocol_api/test_validation.py
+++ b/api/tests/opentrons/protocol_api/test_validation.py
@@ -12,7 +12,7 @@
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons.types import Mount, DeckSlotName, Location, Point
+from opentrons.types import Mount, DeckSlotName, StagingSlotName, Location, Point
from opentrons.hardware_control.modules.types import (
ModuleModel,
MagneticModuleModel,
@@ -28,18 +28,28 @@
@pytest.mark.parametrize(
- ["input_value", "expected"],
+ ["input_mount", "input_pipette", "expected"],
[
- ("left", Mount.LEFT),
- ("right", Mount.RIGHT),
- ("LeFt", Mount.LEFT),
- (Mount.LEFT, Mount.LEFT),
- (Mount.RIGHT, Mount.RIGHT),
+ # Different string capitalizations:
+ ("left", PipetteNameType.P300_MULTI_GEN2, Mount.LEFT),
+ ("right", PipetteNameType.P300_MULTI_GEN2, Mount.RIGHT),
+ ("LeFt", PipetteNameType.P300_MULTI_GEN2, Mount.LEFT),
+ # Passing in a Mount:
+ (Mount.LEFT, PipetteNameType.P300_MULTI_GEN2, Mount.LEFT),
+ (Mount.RIGHT, PipetteNameType.P300_MULTI_GEN2, Mount.RIGHT),
+ # Special handling for the 96-channel:
+ ("left", PipetteNameType.P1000_96, Mount.LEFT),
+ ("right", PipetteNameType.P1000_96, Mount.LEFT),
+ (None, PipetteNameType.P1000_96, Mount.LEFT),
],
)
-def test_ensure_mount(input_value: Union[str, Mount], expected: Mount) -> None:
+def test_ensure_mount(
+ input_mount: Union[str, Mount, None],
+ input_pipette: PipetteNameType,
+ expected: Mount,
+) -> None:
"""It should properly map strings and mounts."""
- result = subject.ensure_mount(input_value)
+ result = subject.ensure_mount_for_pipette(input_mount, input_pipette)
assert result == expected
@@ -48,18 +58,31 @@ def test_ensure_mount_input_invalid() -> None:
with pytest.raises(
subject.InvalidPipetteMountError, match="must be 'left' or 'right'"
):
- subject.ensure_mount("oh no")
+ subject.ensure_mount_for_pipette("oh no", PipetteNameType.P300_MULTI_GEN2)
+
+ # Any mount is valid for the 96-Channel, but it needs to be a valid mount.
+ with pytest.raises(
+ subject.InvalidPipetteMountError, match="must be 'left' or 'right'"
+ ):
+ subject.ensure_mount_for_pipette("oh no", PipetteNameType.P1000_96)
with pytest.raises(
subject.PipetteMountTypeError,
match="'left', 'right', or an opentrons.types.Mount",
):
- subject.ensure_mount(42) # type: ignore[arg-type]
+ subject.ensure_mount_for_pipette(42, PipetteNameType.P300_MULTI_GEN2) # type: ignore[arg-type]
with pytest.raises(
subject.InvalidPipetteMountError, match="Use the left or right mounts instead"
):
- subject.ensure_mount(Mount.EXTENSION)
+ subject.ensure_mount_for_pipette(
+ Mount.EXTENSION, PipetteNameType.P300_MULTI_GEN2
+ )
+
+ with pytest.raises(
+ subject.InvalidPipetteMountError, match="You must specify a left or right mount"
+ ):
+ subject.ensure_mount_for_pipette(None, PipetteNameType.P300_MULTI_GEN2)
@pytest.mark.parametrize(
@@ -131,6 +154,11 @@ def test_ensure_pipette_input_invalid(input_value: str) -> None:
("a3", APIVersion(2, 15), "OT-3 Standard", DeckSlotName.SLOT_A3),
("A3", APIVersion(2, 15), "OT-2 Standard", DeckSlotName.FIXED_TRASH),
("A3", APIVersion(2, 15), "OT-3 Standard", DeckSlotName.SLOT_A3),
+ # Staging slots:
+ ("A4", APIVersion(2, 16), "OT-3 Standard", StagingSlotName.SLOT_A4),
+ ("b4", APIVersion(2, 16), "OT-3 Standard", StagingSlotName.SLOT_B4),
+ ("C4", APIVersion(2, 16), "OT-3 Standard", StagingSlotName.SLOT_C4),
+ ("d4", APIVersion(2, 16), "OT-3 Standard", StagingSlotName.SLOT_D4),
],
)
def test_ensure_and_convert_deck_slot(
@@ -162,6 +190,7 @@ def test_ensure_and_convert_deck_slot(
APIVersionError,
'"A1" requires apiLevel 2.15. Increase your protocol\'s apiLevel, or use slot "10" instead.',
),
+ ("A4", APIVersion(2, 15), APIVersionError, "Using a staging deck slot"),
],
)
@pytest.mark.parametrize("input_robot_type", ["OT-2 Standard", "OT-3 Standard"])
diff --git a/api/tests/opentrons/protocol_api_integration/__init__.py b/api/tests/opentrons/protocol_api_integration/__init__.py
new file mode 100644
index 00000000000..58628c808e6
--- /dev/null
+++ b/api/tests/opentrons/protocol_api_integration/__init__.py
@@ -0,0 +1,10 @@
+"""Integration tests for the Python Protocol API.
+
+These test the Python Protocol API from the point of view of the user's Python protocol.
+
+They do not make sure the robot would actually move to the right place--that is the job of
+other tests, such as g-code-testing.
+
+These are supplementary for Python Protocol API features that depend on nontrivial interaction
+between those layers.
+"""
diff --git a/api/tests/opentrons/protocol_api_integration/test_pipette_movement_deck_conflicts.py b/api/tests/opentrons/protocol_api_integration/test_pipette_movement_deck_conflicts.py
new file mode 100644
index 00000000000..33e92086edb
--- /dev/null
+++ b/api/tests/opentrons/protocol_api_integration/test_pipette_movement_deck_conflicts.py
@@ -0,0 +1,216 @@
+"""Tests for the APIs around deck conflicts during pipette movement."""
+
+import pytest
+
+from opentrons import simulate
+from opentrons.protocol_api import COLUMN, ALL
+from opentrons.protocol_api.core.engine.deck_conflict import (
+ PartialTipMovementNotAllowedError,
+)
+
+
+@pytest.mark.ot3_only
+def test_deck_conflicts_for_96_ch_a12_column_configuration() -> None:
+ """It should raise errors for the expected deck conflicts."""
+ protocol_context = simulate.get_protocol_api(version="2.16", robot_type="Flex")
+ trash_labware = protocol_context.load_labware(
+ "opentrons_1_trash_3200ml_fixed", "A3"
+ )
+ badly_placed_tiprack = protocol_context.load_labware(
+ "opentrons_flex_96_tiprack_50ul", "C2"
+ )
+ well_placed_tiprack = protocol_context.load_labware(
+ "opentrons_flex_96_tiprack_50ul", "C1"
+ )
+ tiprack_on_adapter = protocol_context.load_labware(
+ "opentrons_flex_96_tiprack_50ul",
+ "C3",
+ adapter="opentrons_flex_96_tiprack_adapter",
+ )
+
+ thermocycler = protocol_context.load_module("thermocyclerModuleV2")
+ tc_adjacent_plate = protocol_context.load_labware(
+ "opentrons_96_wellplate_200ul_pcr_full_skirt", "A2"
+ )
+ accessible_plate = thermocycler.load_labware(
+ "opentrons_96_wellplate_200ul_pcr_full_skirt"
+ )
+
+ instrument = protocol_context.load_instrument("flex_96channel_1000", mount="left")
+ instrument.trash_container = trash_labware
+
+ # ############ SHORT LABWARE ################
+ # These labware should be to the west of tall labware to avoid any partial tip deck conflicts
+ badly_placed_labware = protocol_context.load_labware(
+ "nest_96_wellplate_200ul_flat", "D2"
+ )
+ well_placed_labware = protocol_context.load_labware(
+ "nest_96_wellplate_200ul_flat", "D3"
+ )
+
+ # ############ TALL LABWARE ##############
+ protocol_context.load_labware(
+ "opentrons_10_tuberack_falcon_4x50ml_6x15ml_conical", "D1"
+ )
+
+ # ########### Use Partial Nozzles #############
+ instrument.configure_nozzle_layout(style=COLUMN, start="A12")
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="collision with items in deck slot"
+ ):
+ instrument.pick_up_tip(badly_placed_tiprack.wells_by_name()["A1"])
+
+ # No error since no tall item in west slot of destination slot
+ instrument.pick_up_tip(well_placed_tiprack.wells_by_name()["A1"])
+ instrument.aspirate(50, well_placed_labware.wells_by_name()["A4"])
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="collision with items in deck slot D1"
+ ):
+ instrument.dispense(50, badly_placed_labware.wells()[0])
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="collision with thermocycler lid"
+ ):
+ instrument.dispense(10, tc_adjacent_plate.wells_by_name()["A1"])
+
+ # No error cuz dispensing from high above plate, so it clears tuberack in west slot
+ instrument.dispense(15, badly_placed_labware.wells_by_name()["A1"].top(150))
+
+ thermocycler.open_lid() # type: ignore[union-attr]
+
+ # Will NOT raise error since first column of TC labware is accessible
+ # (it is just a few mm away from the left bound)
+ instrument.dispense(25, accessible_plate.wells_by_name()["A1"])
+
+ instrument.drop_tip()
+
+ # ######## CHANGE CONFIG TO ALL #########
+ instrument.configure_nozzle_layout(style=ALL, tip_racks=[tiprack_on_adapter])
+
+ # No error because of full config
+ instrument.pick_up_tip()
+
+ # No error NOW because of full config
+ instrument.aspirate(50, badly_placed_labware.wells_by_name()["A1"])
+
+ # No error
+ instrument.dispense(50, accessible_plate.wells_by_name()["A1"])
+
+
+@pytest.mark.ot3_only
+def test_close_shave_deck_conflicts_for_96_ch_a12_column_configuration() -> None:
+ """Shouldn't raise errors for "almost collision"s."""
+ protocol_context = simulate.get_protocol_api(version="2.16", robot_type="Flex")
+ res12 = protocol_context.load_labware("nest_12_reservoir_15ml", "C3")
+
+ # Mag block and tiprack adapter are very close to the destination reservoir labware
+ protocol_context.load_module("magneticBlockV1", "D2")
+ protocol_context.load_labware(
+ "opentrons_flex_96_tiprack_200ul",
+ "B3",
+ adapter="opentrons_flex_96_tiprack_adapter",
+ )
+ tiprack_8 = protocol_context.load_labware("opentrons_flex_96_tiprack_200ul", "B2")
+ hs = protocol_context.load_module("heaterShakerModuleV1", "D1")
+ hs_adapter = hs.load_adapter("opentrons_96_deep_well_adapter")
+ deepwell = hs_adapter.load_labware("nest_96_wellplate_2ml_deep")
+ protocol_context.load_trash_bin("A3")
+ p1000_96 = protocol_context.load_instrument("flex_96channel_1000")
+ p1000_96.configure_nozzle_layout(style=COLUMN, start="A12", tip_racks=[tiprack_8])
+
+ hs.close_labware_latch() # type: ignore[union-attr]
+ p1000_96.distribute(
+ 15,
+ res12.wells()[0],
+ deepwell.rows()[0],
+ disposal_vol=0,
+ )
+
+
+@pytest.mark.ot3_only
+def test_deck_conflicts_for_96_ch_a1_column_configuration() -> None:
+ """It should raise errors for expected deck conflicts."""
+ protocol = simulate.get_protocol_api(version="2.16", robot_type="Flex")
+ instrument = protocol.load_instrument("flex_96channel_1000", mount="left")
+ trash_labware = protocol.load_labware("opentrons_1_trash_3200ml_fixed", "A3")
+ instrument.trash_container = trash_labware
+
+ badly_placed_tiprack = protocol.load_labware("opentrons_flex_96_tiprack_50ul", "C2")
+ well_placed_tiprack = protocol.load_labware("opentrons_flex_96_tiprack_50ul", "A1")
+ tiprack_on_adapter = protocol.load_labware(
+ "opentrons_flex_96_tiprack_50ul",
+ "C3",
+ adapter="opentrons_flex_96_tiprack_adapter",
+ )
+
+ # ############ SHORT LABWARE ################
+ # These labware should be to the east of tall labware to avoid any partial tip deck conflicts
+ badly_placed_plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "B1")
+ well_placed_plate = protocol.load_labware("nest_96_wellplate_200ul_flat", "B3")
+
+ # ############ TALL LABWARE ###############
+ my_tuberack = protocol.load_labware(
+ "opentrons_10_tuberack_falcon_4x50ml_6x15ml_conical", "B2"
+ )
+
+ # ########### Use Partial Nozzles #############
+ instrument.configure_nozzle_layout(style=COLUMN, start="A1")
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="collision with items in deck slot"
+ ):
+ instrument.pick_up_tip(badly_placed_tiprack.wells_by_name()["H12"])
+
+ # No error cuz within pipette extent bounds and no taller labware to right of tiprack
+ instrument.pick_up_tip(well_placed_tiprack.wells_by_name()["A12"])
+
+ # No error cuz no labware on right of plate, and also well A10 is juusst inside the right bound
+ instrument.aspirate(25, well_placed_plate.wells_by_name()["A10"])
+
+ # No error cuz dispensing from high above plate, so it clears tuberack on the right
+ instrument.dispense(25, badly_placed_plate.wells_by_name()["A1"].top(150))
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="collision with items in deck slot"
+ ):
+ instrument.aspirate(25, badly_placed_plate.wells_by_name()["A10"])
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="outside of robot bounds"
+ ):
+ instrument.aspirate(25, well_placed_plate.wells_by_name()["A11"])
+
+ # No error cuz no taller labware on the right
+ instrument.aspirate(10, my_tuberack.wells_by_name()["A1"])
+
+ with pytest.raises(
+ PartialTipMovementNotAllowedError, match="outside of robot bounds"
+ ):
+ # Raises error because drop tip alternation makes the pipette drop the tips
+ # near the trash bin labware's right edge, which is out of bounds for column1 nozzles
+ # We should probably move this tip drop location within the nozzles' accessible area,
+ # but since we do not recommend loading the trash as labware (there are other things
+ # wrong with that approach), it is not a critical issue.
+ instrument.drop_tip()
+
+ instrument.trash_container = None # type: ignore
+ protocol.load_trash_bin("C1")
+
+ # This doesn't raise an error because it now treats the trash bin as an addressable area
+ # and the bounds check doesn't yet check moves to addressable areas.
+ # The aim is to do checks for ALL moves, but also, fix the offset used for tip drop alternation.
+ instrument.drop_tip()
+
+ # ######## CHANGE CONFIG TO ALL #########
+ instrument.configure_nozzle_layout(style=ALL, tip_racks=[tiprack_on_adapter])
+
+ # No error because of full config
+ instrument.pick_up_tip()
+
+ # No error NOW because of full config
+ instrument.aspirate(50, badly_placed_plate.wells_by_name()["A1"])
+
+ # No error NOW because of full config
+ instrument.dispense(50, badly_placed_plate.wells_by_name()["A1"].bottom())
diff --git a/api/tests/opentrons/protocol_api_integration/test_trashes.py b/api/tests/opentrons/protocol_api_integration/test_trashes.py
new file mode 100644
index 00000000000..1c8250fe44e
--- /dev/null
+++ b/api/tests/opentrons/protocol_api_integration/test_trashes.py
@@ -0,0 +1,164 @@
+"""Tests for the APIs around waste chutes and trash bins."""
+
+
+from opentrons import protocol_api, simulate
+from opentrons.protocols.api_support.types import APIVersion
+
+import contextlib
+from typing import ContextManager, Optional, Type
+from typing_extensions import Literal
+
+import pytest
+
+
+@pytest.mark.parametrize(
+ ("version", "robot_type", "expected_trash_class"),
+ [
+ ("2.13", "OT-2", protocol_api.Labware),
+ ("2.14", "OT-2", protocol_api.Labware),
+ ("2.15", "OT-2", protocol_api.Labware),
+ pytest.param(
+ "2.15",
+ "Flex",
+ protocol_api.Labware,
+ marks=pytest.mark.ot3_only, # Simulating a Flex protocol requires a Flex hardware API.
+ ),
+ pytest.param(
+ "2.16",
+ "OT-2",
+ protocol_api.TrashBin,
+ ),
+ pytest.param(
+ "2.16",
+ "Flex",
+ None,
+ marks=pytest.mark.ot3_only, # Simulating a Flex protocol requires a Flex hardware API.
+ ),
+ ],
+)
+def test_fixed_trash_presence(
+ robot_type: Literal["OT-2", "Flex"],
+ version: str,
+ expected_trash_class: Optional[Type[object]],
+) -> None:
+ """Test the presence of the fixed trash.
+
+ Certain combinations of API version and robot type have a fixed trash.
+ For those that do, ProtocolContext.fixed_trash and InstrumentContext.trash_container
+ should point to it. The type of the object depends on the API version.
+ """
+ protocol = simulate.get_protocol_api(version=version, robot_type=robot_type)
+ instrument = protocol.load_instrument(
+ "p300_single_gen2" if robot_type == "OT-2" else "flex_1channel_50",
+ mount="left",
+ )
+
+ if expected_trash_class is None:
+ with pytest.raises(
+ Exception,
+ match="Fixed Trash is not supported on Flex protocols in API Version 2.16 and above.",
+ ):
+ protocol.fixed_trash
+ with pytest.raises(Exception, match="No trash container has been defined"):
+ instrument.trash_container
+
+ else:
+ assert isinstance(protocol.fixed_trash, expected_trash_class)
+ assert instrument.trash_container is protocol.fixed_trash
+
+
+@pytest.mark.ot3_only # Simulating a Flex protocol requires a Flex hardware API.
+def test_trash_search() -> None:
+ """Test the automatic trash search for protocols without a fixed trash."""
+ protocol = simulate.get_protocol_api(version="2.16", robot_type="Flex")
+ instrument = protocol.load_instrument("flex_1channel_50", mount="left")
+
+ # By default, there should be no trash.
+ with pytest.raises(
+ Exception,
+ match="Fixed Trash is not supported on Flex protocols in API Version 2.16 and above.",
+ ):
+ protocol.fixed_trash
+ with pytest.raises(Exception, match="No trash container has been defined"):
+ instrument.trash_container
+
+ loaded_first = protocol.load_trash_bin("A1")
+ loaded_second = protocol.load_trash_bin("B1")
+
+ # After loading some trashes, there should still be no protocol.fixed_trash...
+ with pytest.raises(
+ Exception,
+ match="Fixed Trash is not supported on Flex protocols in API Version 2.16 and above.",
+ ):
+ protocol.fixed_trash
+ # ...but instrument.trash_container should automatically update to point to
+ # the first trash that we loaded.
+ assert instrument.trash_container is loaded_first
+
+ # You should be able to override instrument.trash_container explicitly.
+ instrument.trash_container = loaded_second
+ assert instrument.trash_container is loaded_second
+
+
+@pytest.mark.parametrize(
+ ("version", "robot_type", "expect_load_to_succeed"),
+ [
+ pytest.param(
+ "2.13",
+ "OT-2",
+ False,
+ # This xfail (the system does let you load a labware onto slot 12, and does not raise)
+ # is surprising to me. It may be be a bug in old PAPI versions.
+ marks=pytest.mark.xfail(strict=True, raises=pytest.fail.Exception),
+ ),
+ ("2.14", "OT-2", False),
+ ("2.15", "OT-2", False),
+ pytest.param(
+ "2.15",
+ "Flex",
+ False,
+ marks=pytest.mark.ot3_only, # Simulating a Flex protocol requires a Flex hardware API.
+ ),
+ pytest.param(
+ "2.16",
+ "OT-2",
+ False,
+ ),
+ pytest.param(
+ "2.16",
+ "Flex",
+ True,
+ marks=pytest.mark.ot3_only, # Simulating a Flex protocol requires a Flex hardware API.
+ ),
+ ],
+)
+def test_fixed_trash_load_conflicts(
+ robot_type: Literal["Flex", "OT-2"],
+ version: str,
+ expect_load_to_succeed: bool,
+) -> None:
+ """Test loading something onto the location historically used for the fixed trash.
+
+ In configurations where there is a fixed trash, this should be disallowed.
+ In configurations without a fixed trash, this should be allowed.
+ """
+ protocol = simulate.get_protocol_api(version=version, robot_type=robot_type)
+
+ if expect_load_to_succeed:
+ expected_error: ContextManager[object] = contextlib.nullcontext()
+ else:
+ # If we're expecting an error, it'll be a LocationIsOccupied for 2.15 and below, otherwise
+ # it will fail with an IncompatibleAddressableAreaError, since slot 12 will not be in the deck config
+ if APIVersion.from_string(version) < APIVersion(2, 16):
+ error_name = "LocationIsOccupiedError"
+ else:
+ error_name = "IncompatibleAddressableAreaError"
+
+ expected_error = pytest.raises(
+ Exception,
+ # Exact message doesn't matter, as long as it's definitely a labware load or addressable area conflict.
+ match=error_name,
+ )
+
+ with expected_error:
+ protocol.load_labware("opentrons_96_wellplate_200ul_pcr_full_skirt", 12)
diff --git a/api/tests/opentrons/protocol_api_old/core/protocol_api/test_instrument_context.py b/api/tests/opentrons/protocol_api_old/core/protocol_api/test_instrument_context.py
index 77438c1f4ed..7cea4113e28 100644
--- a/api/tests/opentrons/protocol_api_old/core/protocol_api/test_instrument_context.py
+++ b/api/tests/opentrons/protocol_api_old/core/protocol_api/test_instrument_context.py
@@ -2,6 +2,7 @@
from typing import cast
import pytest
+from _pytest.fixtures import SubRequest
from decoy import Decoy
from opentrons.types import Mount
@@ -18,9 +19,9 @@
@pytest.fixture(params=[Mount.LEFT, Mount.RIGHT])
-def mount(request: pytest.FixtureRequest) -> Mount:
+def mount(request: SubRequest) -> Mount:
"""Set the subject's mount."""
- return cast(Mount, request.param) # type: ignore[attr-defined]
+ return cast(Mount, request.param)
@pytest.fixture
diff --git a/api/tests/opentrons/protocol_api_old/core/simulator/conftest.py b/api/tests/opentrons/protocol_api_old/core/simulator/conftest.py
index 015040a39f9..723dc568add 100644
--- a/api/tests/opentrons/protocol_api_old/core/simulator/conftest.py
+++ b/api/tests/opentrons/protocol_api_old/core/simulator/conftest.py
@@ -117,8 +117,8 @@ def tip_rack(minimal_labware_def: LabwareDefinition) -> LegacyLabwareCore:
tip_rack_parameters["isTiprack"] = True
tip_rack_parameters["tipLength"] = 123
tip_rack_definition["parameters"] = tip_rack_parameters
- tip_rack_definition["wells"]["A1"]["totalLiquidVolume"] = 200 # type: ignore
- tip_rack_definition["wells"]["A2"]["totalLiquidVolume"] = 200 # type: ignore
+ tip_rack_definition["wells"]["A1"]["totalLiquidVolume"] = 200
+ tip_rack_definition["wells"]["A2"]["totalLiquidVolume"] = 200
"""Labware fixture."""
return LegacyLabwareCore(
diff --git a/api/tests/opentrons/protocol_api_old/core/simulator/test_instrument_context.py b/api/tests/opentrons/protocol_api_old/core/simulator/test_instrument_context.py
index d0916dd4108..c9bd57c0997 100644
--- a/api/tests/opentrons/protocol_api_old/core/simulator/test_instrument_context.py
+++ b/api/tests/opentrons/protocol_api_old/core/simulator/test_instrument_context.py
@@ -1,8 +1,9 @@
"""Test instrument context simulation."""
-from typing import Callable
+from typing import Callable, cast
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from _pytest.fixtures import SubRequest
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from opentrons.protocol_api.core.common import InstrumentCore, LabwareCore
from opentrons.types import Location, Point
@@ -23,8 +24,8 @@
lazy_fixture("simulating_instrument_context"),
]
)
-def subject(request: pytest.FixtureRequest) -> InstrumentCore:
- return request.param # type: ignore[attr-defined, no-any-return]
+def subject(request: SubRequest) -> InstrumentCore:
+ return cast(InstrumentCore, request.param)
def test_same_pipette(
@@ -269,7 +270,7 @@ def _aspirate_blowout(i: InstrumentCore, labware: LabwareCore) -> None:
@pytest.mark.parametrize(
argnames=["side_effector"],
argvalues=[
- [lambda i, l: None],
+ [lambda i, l: None], # noqa: E741
[_aspirate],
[_aspirate_dispense],
[_aspirate_blowout],
diff --git a/api/tests/opentrons/protocol_api_old/core/simulator/test_protocol_context.py b/api/tests/opentrons/protocol_api_old/core/simulator/test_protocol_context.py
index f0c634b02cb..022ce3e5853 100644
--- a/api/tests/opentrons/protocol_api_old/core/simulator/test_protocol_context.py
+++ b/api/tests/opentrons/protocol_api_old/core/simulator/test_protocol_context.py
@@ -1,6 +1,8 @@
"""Test instrument context simulation."""
+from typing import cast
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from _pytest.fixtures import SubRequest
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from opentrons_shared_data.pipette.dev_types import PipetteNameType
@@ -14,8 +16,8 @@
lazy_fixture("simulating_protocol_context"),
]
)
-def subject(request: pytest.FixtureRequest) -> ProtocolCore:
- return request.param # type: ignore[attr-defined, no-any-return]
+def subject(request: SubRequest) -> ProtocolCore:
+ return cast(ProtocolCore, request.param)
@pytest.mark.ot2_only
@@ -52,3 +54,11 @@ def test_replacing_instrument_tip_state(
assert pip1.has_tip() is False
assert pip2.has_tip() is False
+
+
+@pytest.mark.ot2_only
+def test_load_instrument_raises(simulating_protocol_context: ProtocolCore) -> None:
+ with pytest.raises(ValueError):
+ simulating_protocol_context.load_instrument(
+ instrument_name=PipetteNameType.P1000_SINGLE_FLEX, mount=Mount.RIGHT
+ )
diff --git a/api/tests/opentrons/protocol_api_old/test_context.py b/api/tests/opentrons/protocol_api_old/test_context.py
index abcdd06a6d8..c356c477f7f 100644
--- a/api/tests/opentrons/protocol_api_old/test_context.py
+++ b/api/tests/opentrons/protocol_api_old/test_context.py
@@ -85,7 +85,7 @@ async def test_motion(ctx, hardware):
old_pos[Axis.X] = 0.0
old_pos[Axis.Y] = 0.0
old_pos[Axis.A] = 0.0
- old_pos[Axis.C] = 2.0
+ old_pos[Axis.C] = 2.5
assert await hardware.current_position(instr._core.get_mount()) == pytest.approx(
old_pos
)
@@ -876,46 +876,47 @@ def fake_execute_transfer(xfer_plan):
def test_flow_rate(ctx, monkeypatch):
- old_sfm = ctx._core.get_hardware()
+ instr = ctx.load_instrument("p300_single", Mount.RIGHT)
+ old_sfm = instr._core.set_flow_rate
- def pass_on(mount, aspirate=None, dispense=None, blow_out=None):
- old_sfm(mount, aspirate=None, dispense=None, blow_out=None)
+ def pass_on(aspirate=None, dispense=None, blow_out=None):
+ old_sfm(aspirate=aspirate, dispense=dispense, blow_out=blow_out)
set_flow_rate = mock.Mock(side_effect=pass_on)
- monkeypatch.setattr(ctx._core.get_hardware(), "set_flow_rate", set_flow_rate)
- instr = ctx.load_instrument("p300_single", Mount.RIGHT)
+ monkeypatch.setattr(instr._core, "set_flow_rate", set_flow_rate)
ctx.home()
instr.flow_rate.aspirate = 1
- assert set_flow_rate.called_once_with(Mount.RIGHT, aspirate=1)
+ set_flow_rate.assert_called_once_with(aspirate=1)
set_flow_rate.reset_mock()
instr.flow_rate.dispense = 10
- assert set_flow_rate.called_once_with(Mount.RIGHT, dispense=10)
+ set_flow_rate.assert_called_once_with(dispense=10)
set_flow_rate.reset_mock()
instr.flow_rate.blow_out = 2
- assert set_flow_rate.called_once_with(Mount.RIGHT, blow_out=2)
+ set_flow_rate.assert_called_once_with(blow_out=2)
assert instr.flow_rate.aspirate == 1
assert instr.flow_rate.dispense == 10
assert instr.flow_rate.blow_out == 2
def test_pipette_speed(ctx, monkeypatch):
- old_sfm = ctx._core.get_hardware()
+ instr = ctx.load_instrument("p300_single", Mount.RIGHT)
+ old_sfm = instr._core.set_pipette_speed
- def pass_on(mount, aspirate=None, dispense=None, blow_out=None):
- old_sfm(aspirate=None, dispense=None, blow_out=None)
+ def pass_on(aspirate=None, dispense=None, blow_out=None):
+ old_sfm(aspirate=aspirate, dispense=dispense, blow_out=blow_out)
set_speed = mock.Mock(side_effect=pass_on)
- monkeypatch.setattr(ctx._core.get_hardware(), "set_pipette_speed", set_speed)
- instr = ctx.load_instrument("p300_single", Mount.RIGHT)
-
+ monkeypatch.setattr(instr._core, "set_pipette_speed", set_speed)
ctx.home()
instr.speed.aspirate = 1
- assert set_speed.called_once_with(Mount.RIGHT, dispense=1)
+ set_speed.assert_called_once_with(aspirate=1)
+ set_speed.reset_mock()
instr.speed.dispense = 10
+ set_speed.assert_called_once_with(dispense=10)
+ set_speed.reset_mock()
instr.speed.blow_out = 2
- assert set_speed.called_with(Mount.RIGHT, dispense=10)
- assert set_speed.called_with(Mount.RIGHT, blow_out=2)
+ set_speed.assert_called_once_with(blow_out=2)
assert instr.speed.aspirate == 1
assert instr.speed.dispense == 10
assert instr.speed.blow_out == 2
@@ -958,7 +959,7 @@ def test_order_of_module_load():
import opentrons.hardware_control as hardware_control
import opentrons.protocol_api as protocol_api
- mods = ["tempdeck", "thermocycler", "tempdeck"]
+ mods = {"tempdeck": ["111", "333"], "thermocycler": ["222"]}
thread_manager = hardware_control.ThreadManager(
hardware_control.API.build_hardware_simulator, attached_modules=mods
)
@@ -966,7 +967,7 @@ def test_order_of_module_load():
attached_modules = fake_hardware.attached_modules
hw_temp1 = attached_modules[0]
- hw_temp2 = attached_modules[2]
+ hw_temp2 = attached_modules[1]
ctx1 = protocol_api.create_protocol_context(
api_version=APIVersion(2, 13),
@@ -1187,11 +1188,11 @@ def raiser(*args, **kwargs):
mod = ctx.load_module("thermocycler")
assert isinstance(mod, ThermocyclerContext)
- mod._core.flag_unsafe_move = mock.MagicMock(side_effect=raiser) # type: ignore[attr-defined, assignment]
+ mod._core.flag_unsafe_move = mock.MagicMock(side_effect=raiser) # type: ignore[attr-defined]
instr = ctx.load_instrument("p1000_single", "left")
with pytest.raises(RuntimeError, match="Cannot"):
instr.move_to(Location(Point(0, 0, 0), None))
- mod._core.flag_unsafe_move.assert_called_once_with( # type: ignore[attr-defined]
+ mod._core.flag_unsafe_move.assert_called_once_with( # type: ignore[attr-defined] # type: ignore[attr-defined]
to_loc=Location(Point(0, 0, 0), None), from_loc=Location(Point(0, 0, 0), None)
)
diff --git a/api/tests/opentrons/protocol_api_old/test_labware.py b/api/tests/opentrons/protocol_api_old/test_labware.py
index a245fb5be5f..8f6f1da267b 100644
--- a/api/tests/opentrons/protocol_api_old/test_labware.py
+++ b/api/tests/opentrons/protocol_api_old/test_labware.py
@@ -15,6 +15,7 @@
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocol_api import labware, validation
from opentrons.protocol_api.core.labware import AbstractLabware
+from opentrons.protocol_api.core.well import AbstractWellCore
from opentrons.protocol_api.core.legacy import module_geometry
from opentrons.protocol_api.core.legacy.legacy_labware_core import LegacyLabwareCore
from opentrons.protocol_api.core.legacy.legacy_well_core import LegacyWellCore
@@ -543,7 +544,10 @@ def test_tiprack_list():
core_map=None, # type: ignore[arg-type]
)
- assert labware.select_tiprack_from_list([tiprack], 1) == (tiprack, tiprack["A1"])
+ assert labware.select_tiprack_from_list([tiprack], 1) == (
+ tiprack,
+ tiprack["A1"],
+ )
assert labware.select_tiprack_from_list([tiprack], 1, tiprack.wells()[1]) == (
tiprack,
@@ -652,7 +656,7 @@ def test_labware_hash_func_diff_implementation_same_version(
def test_set_offset(decoy: Decoy) -> None:
"""It should set the labware's offset using the implementation."""
- labware_impl = decoy.mock(cls=AbstractLabware)
+ labware_impl: AbstractLabware[AbstractWellCore] = decoy.mock(cls=AbstractLabware)
decoy.when(labware_impl.get_well_columns()).then_return([])
subject = labware.Labware(
core=labware_impl,
diff --git a/api/tests/opentrons/protocol_engine/clients/test_sync_client.py b/api/tests/opentrons/protocol_engine/clients/test_sync_client.py
index f172b93cab1..e4f5d7602ca 100644
--- a/api/tests/opentrons/protocol_engine/clients/test_sync_client.py
+++ b/api/tests/opentrons/protocol_engine/clients/test_sync_client.py
@@ -31,6 +31,7 @@
Liquid,
LabwareMovementStrategy,
LabwareOffsetVector,
+ AddressableOffsetVector,
)
@@ -67,6 +68,23 @@ def test_add_labware_definition(
assert result == expected_labware_uri
+def test_add_addressable_area(
+ decoy: Decoy,
+ transport: ChildThreadTransport,
+ subject: SyncClient,
+) -> None:
+ """It should add an addressable area."""
+ subject.add_addressable_area(addressable_area_name="cool-area")
+
+ decoy.verify(
+ transport.call_method(
+ "add_addressable_area",
+ addressable_area_name="cool-area",
+ ),
+ times=1,
+ )
+
+
def test_add_liquid(
decoy: Decoy,
transport: ChildThreadTransport,
@@ -143,6 +161,30 @@ def test_load_labware(
assert result == expected_result
+def test_reload_labware(
+ decoy: Decoy,
+ transport: ChildThreadTransport,
+ subject: SyncClient,
+) -> None:
+ """It should execute a reload labware command."""
+ expected_request = commands.ReloadLabwareCreate(
+ params=commands.ReloadLabwareParams(
+ labwareId="some-labware-id",
+ )
+ )
+
+ expected_result = commands.ReloadLabwareResult(
+ labwareId="some-labware-id", offsetId=None
+ )
+ decoy.when(transport.execute_command(request=expected_request)).then_return(
+ expected_result
+ )
+ result = subject.reload_labware(
+ labware_id="some-labware-id",
+ )
+ assert result == expected_result
+
+
def test_load_module(
decoy: Decoy,
transport: ChildThreadTransport,
@@ -267,6 +309,74 @@ def test_move_to_well(
assert result == response
+def test_move_to_addressable_area(
+ decoy: Decoy,
+ transport: ChildThreadTransport,
+ subject: SyncClient,
+) -> None:
+ """It should execute a move to addressable area command."""
+ request = commands.MoveToAddressableAreaCreate(
+ params=commands.MoveToAddressableAreaParams(
+ pipetteId="123",
+ addressableAreaName="abc",
+ offset=AddressableOffsetVector(x=3, y=2, z=1),
+ forceDirect=True,
+ minimumZHeight=4.56,
+ speed=7.89,
+ )
+ )
+ response = commands.MoveToAddressableAreaResult(position=DeckPoint(x=4, y=5, z=6))
+
+ decoy.when(transport.execute_command(request=request)).then_return(response)
+
+ result = subject.move_to_addressable_area(
+ pipette_id="123",
+ addressable_area_name="abc",
+ offset=AddressableOffsetVector(x=3, y=2, z=1),
+ force_direct=True,
+ minimum_z_height=4.56,
+ speed=7.89,
+ )
+
+ assert result == response
+
+
+def test_move_to_addressable_area_for_drop_tip(
+ decoy: Decoy,
+ transport: ChildThreadTransport,
+ subject: SyncClient,
+) -> None:
+ """It should execute a move to addressable area for drop tip command."""
+ request = commands.MoveToAddressableAreaForDropTipCreate(
+ params=commands.MoveToAddressableAreaForDropTipParams(
+ pipetteId="123",
+ addressableAreaName="abc",
+ offset=AddressableOffsetVector(x=3, y=2, z=1),
+ forceDirect=True,
+ minimumZHeight=4.56,
+ speed=7.89,
+ alternateDropLocation=True,
+ )
+ )
+ response = commands.MoveToAddressableAreaForDropTipResult(
+ position=DeckPoint(x=4, y=5, z=6)
+ )
+
+ decoy.when(transport.execute_command(request=request)).then_return(response)
+
+ result = subject.move_to_addressable_area_for_drop_tip(
+ pipette_id="123",
+ addressable_area_name="abc",
+ offset=AddressableOffsetVector(x=3, y=2, z=1),
+ force_direct=True,
+ minimum_z_height=4.56,
+ speed=7.89,
+ alternate_drop_location=True,
+ )
+
+ assert result == response
+
+
def test_move_to_coordinates(
decoy: Decoy,
transport: ChildThreadTransport,
diff --git a/api/tests/opentrons/protocol_engine/commands/calibration/test_move_to_maintenance_position.py b/api/tests/opentrons/protocol_engine/commands/calibration/test_move_to_maintenance_position.py
index 8cc9017a021..df58ab7dbc0 100644
--- a/api/tests/opentrons/protocol_engine/commands/calibration/test_move_to_maintenance_position.py
+++ b/api/tests/opentrons/protocol_engine/commands/calibration/test_move_to_maintenance_position.py
@@ -1,6 +1,6 @@
"""Test for Calibration Set Up Position Implementation."""
from __future__ import annotations
-from typing import TYPE_CHECKING, Mapping
+from typing import TYPE_CHECKING
import pytest
from decoy import Decoy
@@ -32,106 +32,68 @@ def subject(
@pytest.mark.ot3_only
-@pytest.mark.parametrize(
- "maintenance_position, verify_axes",
- [
- (
- MaintenancePosition.ATTACH_INSTRUMENT,
- {Axis.Z_L: 400},
- ),
- (
- MaintenancePosition.ATTACH_PLATE,
- {Axis.Z_L: 90, Axis.Z_R: 105},
- ),
- ],
-)
-async def test_calibration_move_to_location_implementation(
+@pytest.mark.parametrize("mount_type", [MountType.LEFT, MountType.RIGHT])
+async def test_calibration_move_to_location_implementatio_for_attach_instrument(
decoy: Decoy,
subject: MoveToMaintenancePositionImplementation,
state_view: StateView,
ot3_hardware_api: OT3API,
- maintenance_position: MaintenancePosition,
- verify_axes: Mapping[Axis, float],
+ mount_type: MountType,
) -> None:
"""Command should get a move to target location and critical point and should verify move_to call."""
params = MoveToMaintenancePositionParams(
- mount=MountType.LEFT, maintenancePosition=maintenance_position
+ mount=mount_type, maintenancePosition=MaintenancePosition.ATTACH_INSTRUMENT
)
decoy.when(
await ot3_hardware_api.gantry_position(
Mount.LEFT, critical_point=CriticalPoint.MOUNT
)
- ).then_return(Point(x=1, y=2, z=3))
-
- decoy.when(
- ot3_hardware_api.get_instrument_max_height(
- Mount.LEFT, critical_point=CriticalPoint.MOUNT
- )
- ).then_return(250)
+ ).then_return(Point(x=1, y=2, z=250))
decoy.when(ot3_hardware_api.get_instrument_max_height(Mount.LEFT)).then_return(300)
result = await subject.execute(params=params)
assert result == MoveToMaintenancePositionResult()
+ hw_mount = mount_type.to_hw_mount()
decoy.verify(
- await ot3_hardware_api.move_to(
- mount=Mount.LEFT,
- abs_position=Point(x=1, y=2, z=250),
- critical_point=CriticalPoint.MOUNT,
- ),
- times=1,
- )
-
- decoy.verify(
+ await ot3_hardware_api.prepare_for_mount_movement(Mount.LEFT),
+ await ot3_hardware_api.retract(Mount.LEFT),
await ot3_hardware_api.move_to(
mount=Mount.LEFT,
abs_position=Point(x=0, y=110, z=250),
critical_point=CriticalPoint.MOUNT,
),
- times=1,
- )
-
- decoy.verify(
+ await ot3_hardware_api.prepare_for_mount_movement(hw_mount),
await ot3_hardware_api.move_axes(
- position=verify_axes,
+ position={Axis.by_mount(hw_mount): 400},
+ ),
+ await ot3_hardware_api.disengage_axes(
+ [Axis.by_mount(hw_mount)],
),
- times=1,
)
- if params.maintenancePosition == MaintenancePosition.ATTACH_INSTRUMENT:
- decoy.verify(
- await ot3_hardware_api.disengage_axes(
- list(verify_axes.keys()),
- ),
- times=1,
- )
-
@pytest.mark.ot3_only
-async def test_calibration_move_to_location_implementation_for_gripper(
+@pytest.mark.parametrize("mount_type", [MountType.LEFT, MountType.RIGHT])
+async def test_calibration_move_to_location_implementatio_for_attach_plate(
decoy: Decoy,
subject: MoveToMaintenancePositionImplementation,
state_view: StateView,
ot3_hardware_api: OT3API,
+ mount_type: MountType,
) -> None:
"""Command should get a move to target location and critical point and should verify move_to call."""
params = MoveToMaintenancePositionParams(
- mount=MountType.LEFT, maintenancePosition=MaintenancePosition.ATTACH_INSTRUMENT
+ mount=mount_type, maintenancePosition=MaintenancePosition.ATTACH_PLATE
)
decoy.when(
await ot3_hardware_api.gantry_position(
Mount.LEFT, critical_point=CriticalPoint.MOUNT
)
- ).then_return(Point(x=1, y=2, z=3))
-
- decoy.when(
- ot3_hardware_api.get_instrument_max_height(
- Mount.LEFT, critical_point=CriticalPoint.MOUNT
- )
- ).then_return(250)
+ ).then_return(Point(x=1, y=2, z=250))
decoy.when(ot3_hardware_api.get_instrument_max_height(Mount.LEFT)).then_return(300)
@@ -139,21 +101,56 @@ async def test_calibration_move_to_location_implementation_for_gripper(
assert result == MoveToMaintenancePositionResult()
decoy.verify(
+ await ot3_hardware_api.prepare_for_mount_movement(Mount.LEFT),
+ await ot3_hardware_api.retract(Mount.LEFT),
await ot3_hardware_api.move_to(
mount=Mount.LEFT,
- abs_position=Point(x=1, y=2, z=250),
+ abs_position=Point(x=0, y=110, z=250),
critical_point=CriticalPoint.MOUNT,
),
- times=1,
+ await ot3_hardware_api.move_axes(
+ position={
+ Axis.Z_L: 90,
+ Axis.Z_R: 105,
+ }
+ ),
+ await ot3_hardware_api.disengage_axes(
+ [Axis.Z_L, Axis.Z_R],
+ ),
+ )
+
+
+@pytest.mark.ot3_only
+async def test_calibration_move_to_location_implementation_for_gripper(
+ decoy: Decoy,
+ subject: MoveToMaintenancePositionImplementation,
+ state_view: StateView,
+ ot3_hardware_api: OT3API,
+) -> None:
+ """Command should get a move to target location and critical point and should verify move_to call."""
+ params = MoveToMaintenancePositionParams(
+ mount=MountType.EXTENSION,
+ maintenancePosition=MaintenancePosition.ATTACH_INSTRUMENT,
)
+ decoy.when(
+ await ot3_hardware_api.gantry_position(
+ Mount.LEFT, critical_point=CriticalPoint.MOUNT
+ )
+ ).then_return(Point(x=1, y=2, z=250))
+ decoy.when(ot3_hardware_api.get_instrument_max_height(Mount.LEFT)).then_return(300)
+
+ result = await subject.execute(params=params)
+ assert result == MoveToMaintenancePositionResult()
+
decoy.verify(
+ await ot3_hardware_api.prepare_for_mount_movement(Mount.LEFT),
+ await ot3_hardware_api.retract(Mount.LEFT),
await ot3_hardware_api.move_to(
mount=Mount.LEFT,
abs_position=Point(x=0, y=110, z=250),
critical_point=CriticalPoint.MOUNT,
),
- times=1,
)
decoy.verify(
@@ -162,7 +159,6 @@ async def test_calibration_move_to_location_implementation_for_gripper(
),
times=0,
)
-
decoy.verify(
await ot3_hardware_api.disengage_axes(
[Axis.Z_G],
diff --git a/api/tests/opentrons/protocol_engine/commands/test_aspirate.py b/api/tests/opentrons/protocol_engine/commands/test_aspirate.py
index 178f118cc50..f625c19f93f 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_aspirate.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_aspirate.py
@@ -19,6 +19,7 @@
)
from opentrons.protocol_engine.types import CurrentWell, LoadedPipette
from opentrons.hardware_control import HardwareControlAPI
+from opentrons.protocol_engine.notes import CommandNoteAdder
@pytest.fixture
@@ -27,6 +28,7 @@ def subject(
hardware_api: HardwareControlAPI,
movement: MovementHandler,
pipetting: PipettingHandler,
+ mock_command_note_adder: CommandNoteAdder,
) -> AspirateImplementation:
"""Get the implementation subject."""
return AspirateImplementation(
@@ -34,6 +36,7 @@ def subject(
state_view=state_view,
movement=movement,
hardware_api=hardware_api,
+ command_note_adder=mock_command_note_adder,
)
@@ -44,6 +47,7 @@ async def test_aspirate_implementation_no_prep(
movement: MovementHandler,
pipetting: PipettingHandler,
subject: AspirateImplementation,
+ mock_command_note_adder: CommandNoteAdder,
) -> None:
"""An Aspirate should have an execution implementation without preparing to aspirate."""
location = WellLocation(origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1))
@@ -70,7 +74,12 @@ async def test_aspirate_implementation_no_prep(
).then_return(Point(x=1, y=2, z=3))
decoy.when(
- await pipetting.aspirate_in_place(pipette_id="abc", volume=50, flow_rate=1.23),
+ await pipetting.aspirate_in_place(
+ pipette_id="abc",
+ volume=50,
+ flow_rate=1.23,
+ command_note_adder=mock_command_note_adder,
+ ),
).then_return(50)
result = await subject.execute(data)
@@ -84,6 +93,7 @@ async def test_aspirate_implementation_with_prep(
hardware_api: HardwareControlAPI,
movement: MovementHandler,
pipetting: PipettingHandler,
+ mock_command_note_adder: CommandNoteAdder,
subject: AspirateImplementation,
) -> None:
"""An Aspirate should have an execution implementation with preparing to aspirate."""
@@ -120,7 +130,12 @@ async def test_aspirate_implementation_with_prep(
).then_return(Point(x=1, y=2, z=3))
decoy.when(
- await pipetting.aspirate_in_place(pipette_id="abc", volume=50, flow_rate=1.23),
+ await pipetting.aspirate_in_place(
+ pipette_id="abc",
+ volume=50,
+ flow_rate=1.23,
+ command_note_adder=mock_command_note_adder,
+ ),
).then_return(50)
result = await subject.execute(data)
@@ -139,7 +154,10 @@ async def test_aspirate_implementation_with_prep(
async def test_aspirate_raises_volume_error(
- decoy: Decoy, pipetting: PipettingHandler, subject: AspirateImplementation
+ decoy: Decoy,
+ pipetting: PipettingHandler,
+ mock_command_note_adder: CommandNoteAdder,
+ subject: AspirateImplementation,
) -> None:
"""Should raise an assertion error for volume larger than working volume."""
location = WellLocation(origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=1))
@@ -156,7 +174,12 @@ async def test_aspirate_raises_volume_error(
decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id="abc")).then_return(True)
decoy.when(
- await pipetting.aspirate_in_place(pipette_id="abc", volume=50, flow_rate=1.23)
+ await pipetting.aspirate_in_place(
+ pipette_id="abc",
+ volume=50,
+ flow_rate=1.23,
+ command_note_adder=mock_command_note_adder,
+ )
).then_raise(AssertionError("blah blah"))
with pytest.raises(AssertionError):
diff --git a/api/tests/opentrons/protocol_engine/commands/test_aspirate_in_place.py b/api/tests/opentrons/protocol_engine/commands/test_aspirate_in_place.py
index 26a39b9001f..3d09c029bcd 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_aspirate_in_place.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_aspirate_in_place.py
@@ -11,6 +11,7 @@
AspirateInPlaceImplementation,
)
from opentrons.protocol_engine.errors.exceptions import PipetteNotReadyToAspirateError
+from opentrons.protocol_engine.notes import CommandNoteAdder
from opentrons.protocol_engine.state import (
StateStore,
@@ -40,12 +41,14 @@ def subject(
pipetting: PipettingHandler,
state_store: StateStore,
hardware_api: HardwareAPI,
+ mock_command_note_adder: CommandNoteAdder,
) -> AspirateInPlaceImplementation:
"""Get the impelementation subject."""
return AspirateInPlaceImplementation(
pipetting=pipetting,
hardware_api=hardware_api,
state_view=state_store,
+ command_note_adder=mock_command_note_adder,
)
@@ -54,6 +57,7 @@ async def test_aspirate_in_place_implementation(
pipetting: PipettingHandler,
state_store: StateStore,
hardware_api: HardwareAPI,
+ mock_command_note_adder: CommandNoteAdder,
subject: AspirateInPlaceImplementation,
) -> None:
"""It should aspirate in place."""
@@ -71,7 +75,10 @@ async def test_aspirate_in_place_implementation(
decoy.when(
await pipetting.aspirate_in_place(
- pipette_id="pipette-id-abc", volume=123, flow_rate=1.234
+ pipette_id="pipette-id-abc",
+ volume=123,
+ flow_rate=1.234,
+ command_note_adder=mock_command_note_adder,
)
).then_return(123)
@@ -110,7 +117,10 @@ async def test_handle_aspirate_in_place_request_not_ready_to_aspirate(
async def test_aspirate_raises_volume_error(
- decoy: Decoy, pipetting: PipettingHandler, subject: AspirateInPlaceImplementation
+ decoy: Decoy,
+ pipetting: PipettingHandler,
+ subject: AspirateInPlaceImplementation,
+ mock_command_note_adder: CommandNoteAdder,
) -> None:
"""Should raise an assertion error for volume larger than working volume."""
data = AspirateInPlaceParams(
@@ -122,7 +132,12 @@ async def test_aspirate_raises_volume_error(
decoy.when(pipetting.get_is_ready_to_aspirate(pipette_id="abc")).then_return(True)
decoy.when(
- await pipetting.aspirate_in_place(pipette_id="abc", volume=50, flow_rate=1.23)
+ await pipetting.aspirate_in_place(
+ pipette_id="abc",
+ volume=50,
+ flow_rate=1.23,
+ command_note_adder=mock_command_note_adder,
+ )
).then_raise(AssertionError("blah blah"))
with pytest.raises(AssertionError):
diff --git a/api/tests/opentrons/protocol_engine/commands/test_configure_for_volume.py b/api/tests/opentrons/protocol_engine/commands/test_configure_for_volume.py
index 857386f83a0..625e8322e31 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_configure_for_volume.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_configure_for_volume.py
@@ -1,4 +1,5 @@
"""Test load pipette commands."""
+import pytest
from decoy import Decoy
from opentrons.protocol_engine.execution import (
@@ -16,20 +17,31 @@
ConfigureForVolumePrivateResult,
ConfigureForVolumeImplementation,
)
+from opentrons_shared_data.pipette.dev_types import PipetteNameType
+from ..pipette_fixtures import get_default_nozzle_map
+from opentrons.types import Point
+@pytest.mark.parametrize(
+ "data",
+ [
+ ConfigureForVolumeParams(
+ pipetteId="some id",
+ volume=1,
+ ),
+ ConfigureForVolumeParams(
+ pipetteId="some id",
+ volume=1,
+ tipOverlapNotAfterVersion="v3",
+ ),
+ ],
+)
async def test_configure_for_volume_implementation(
- decoy: Decoy,
- equipment: EquipmentHandler,
+ decoy: Decoy, equipment: EquipmentHandler, data: ConfigureForVolumeParams
) -> None:
"""A ConfigureForVolume command should have an execution implementation."""
subject = ConfigureForVolumeImplementation(equipment=equipment)
- data = ConfigureForVolumeParams(
- pipetteId="some id",
- volume=1,
- )
-
config = LoadedStaticPipetteData(
model="some-model",
display_name="Hello",
@@ -43,12 +55,16 @@ async def test_configure_for_volume_implementation(
),
tip_configuration_lookup_table={},
nominal_tip_overlap={},
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_MULTI),
+ back_left_corner_offset=Point(10, 20, 30),
+ front_right_corner_offset=Point(40, 50, 60),
)
decoy.when(
await equipment.configure_for_volume(
pipette_id="some id",
volume=1,
+ tip_overlap_version=data.tipOverlapNotAfterVersion,
)
).then_return(
LoadedConfigureForVolumeData(
diff --git a/api/tests/opentrons/protocol_engine/commands/test_configure_nozzle_layout.py b/api/tests/opentrons/protocol_engine/commands/test_configure_nozzle_layout.py
index 44fc10530e5..23cdddd98be 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_configure_nozzle_layout.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_configure_nozzle_layout.py
@@ -1,7 +1,8 @@
"""Test configure nozzle layout commands."""
import pytest
from decoy import Decoy
-from typing import Union, Optional, Dict
+from typing import Union, Dict
+from collections import OrderedDict
from opentrons.protocol_engine.execution import (
EquipmentHandler,
@@ -19,120 +20,27 @@
)
from opentrons.protocol_engine.types import (
- EmptyNozzleLayoutConfiguration,
+ AllNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
QuadrantNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
)
-
-
-NINETY_SIX_MAP = {
- "A1": Point(-36.0, -25.5, -259.15),
- "A2": Point(-27.0, -25.5, -259.15),
- "A3": Point(-18.0, -25.5, -259.15),
- "A4": Point(-9.0, -25.5, -259.15),
- "A5": Point(0.0, -25.5, -259.15),
- "A6": Point(9.0, -25.5, -259.15),
- "A7": Point(18.0, -25.5, -259.15),
- "A8": Point(27.0, -25.5, -259.15),
- "A9": Point(36.0, -25.5, -259.15),
- "A10": Point(45.0, -25.5, -259.15),
- "A11": Point(54.0, -25.5, -259.15),
- "A12": Point(63.0, -25.5, -259.15),
- "B1": Point(-36.0, -34.5, -259.15),
- "B2": Point(-27.0, -34.5, -259.15),
- "B3": Point(-18.0, -34.5, -259.15),
- "B4": Point(-9.0, -34.5, -259.15),
- "B5": Point(0.0, -34.5, -259.15),
- "B6": Point(9.0, -34.5, -259.15),
- "B7": Point(18.0, -34.5, -259.15),
- "B8": Point(27.0, -34.5, -259.15),
- "B9": Point(36.0, -34.5, -259.15),
- "B10": Point(45.0, -34.5, -259.15),
- "B11": Point(54.0, -34.5, -259.15),
- "B12": Point(63.0, -34.5, -259.15),
- "C1": Point(-36.0, -43.5, -259.15),
- "C2": Point(-27.0, -43.5, -259.15),
- "C3": Point(-18.0, -43.5, -259.15),
- "C4": Point(-9.0, -43.5, -259.15),
- "C5": Point(0.0, -43.5, -259.15),
- "C6": Point(9.0, -43.5, -259.15),
- "C7": Point(18.0, -43.5, -259.15),
- "C8": Point(27.0, -43.5, -259.15),
- "C9": Point(36.0, -43.5, -259.15),
- "C10": Point(45.0, -43.5, -259.15),
- "C11": Point(54.0, -43.5, -259.15),
- "C12": Point(63.0, -43.5, -259.15),
- "D1": Point(-36.0, -52.5, -259.15),
- "D2": Point(-27.0, -52.5, -259.15),
- "D3": Point(-18.0, -52.5, -259.15),
- "D4": Point(-9.0, -52.5, -259.15),
- "D5": Point(0.0, -52.5, -259.15),
- "D6": Point(9.0, -52.5, -259.15),
- "D7": Point(18.0, -52.5, -259.15),
- "D8": Point(27.0, -52.5, -259.15),
- "D9": Point(36.0, -52.5, -259.15),
- "D10": Point(45.0, -52.5, -259.15),
- "D11": Point(54.0, -52.5, -259.15),
- "D12": Point(63.0, -52.5, -259.15),
- "E1": Point(-36.0, -61.5, -259.15),
- "E2": Point(-27.0, -61.5, -259.15),
- "E3": Point(-18.0, -61.5, -259.15),
- "E4": Point(-9.0, -61.5, -259.15),
- "E5": Point(0.0, -61.5, -259.15),
- "E6": Point(9.0, -61.5, -259.15),
- "E7": Point(18.0, -61.5, -259.15),
- "E8": Point(27.0, -61.5, -259.15),
- "E9": Point(36.0, -61.5, -259.15),
- "E10": Point(45.0, -61.5, -259.15),
- "E11": Point(54.0, -61.5, -259.15),
- "E12": Point(63.0, -61.5, -259.15),
- "F1": Point(-36.0, -70.5, -259.15),
- "F2": Point(-27.0, -70.5, -259.15),
- "F3": Point(-18.0, -70.5, -259.15),
- "F4": Point(-9.0, -70.5, -259.15),
- "F5": Point(0.0, -70.5, -259.15),
- "F6": Point(9.0, -70.5, -259.15),
- "F7": Point(18.0, -70.5, -259.15),
- "F8": Point(27.0, -70.5, -259.15),
- "F9": Point(36.0, -70.5, -259.15),
- "F10": Point(45.0, -70.5, -259.15),
- "F11": Point(54.0, -70.5, -259.15),
- "F12": Point(63.0, -70.5, -259.15),
- "G1": Point(-36.0, -79.5, -259.15),
- "G2": Point(-27.0, -79.5, -259.15),
- "G3": Point(-18.0, -79.5, -259.15),
- "G4": Point(-9.0, -79.5, -259.15),
- "G5": Point(0.0, -79.5, -259.15),
- "G6": Point(9.0, -79.5, -259.15),
- "G7": Point(18.0, -79.5, -259.15),
- "G8": Point(27.0, -79.5, -259.15),
- "G9": Point(36.0, -79.5, -259.15),
- "G10": Point(45.0, -79.5, -259.15),
- "G11": Point(54.0, -79.5, -259.15),
- "G12": Point(63.0, -79.5, -259.15),
- "H1": Point(-36.0, -88.5, -259.15),
- "H2": Point(-27.0, -88.5, -259.15),
- "H3": Point(-18.0, -88.5, -259.15),
- "H4": Point(-9.0, -88.5, -259.15),
- "H5": Point(0.0, -88.5, -259.15),
- "H6": Point(9.0, -88.5, -259.15),
- "H7": Point(18.0, -88.5, -259.15),
- "H8": Point(27.0, -88.5, -259.15),
- "H9": Point(36.0, -88.5, -259.15),
- "H10": Point(45.0, -88.5, -259.15),
- "H11": Point(54.0, -88.5, -259.15),
- "H12": Point(63.0, -88.5, -259.15),
-}
+from ..pipette_fixtures import (
+ NINETY_SIX_MAP,
+ NINETY_SIX_COLS,
+ NINETY_SIX_ROWS,
+)
@pytest.mark.parametrize(
argnames=["request_model", "expected_nozzlemap", "nozzle_params"],
argvalues=[
[
- SingleNozzleLayoutConfiguration(primary_nozzle="A1"),
+ SingleNozzleLayoutConfiguration(primaryNozzle="A1"),
NozzleMap.build(
- physical_nozzle_map={"A1": Point(0, 0, 0)},
+ physical_nozzles=OrderedDict({"A1": Point(0, 0, 0)}),
+ physical_rows=OrderedDict({"A": ["A1"]}),
+ physical_columns=OrderedDict({"1": ["A1"]}),
starting_nozzle="A1",
back_left_nozzle="A1",
front_right_nozzle="A1",
@@ -140,9 +48,11 @@
{"primary_nozzle": "A1"},
],
[
- ColumnNozzleLayoutConfiguration(primary_nozzle="A1"),
+ ColumnNozzleLayoutConfiguration(primaryNozzle="A1"),
NozzleMap.build(
- physical_nozzle_map=NINETY_SIX_MAP,
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
starting_nozzle="A1",
back_left_nozzle="A1",
front_right_nozzle="H1",
@@ -151,21 +61,18 @@
],
[
QuadrantNozzleLayoutConfiguration(
- primary_nozzle="A1", front_right_nozzle="E1"
+ primaryNozzle="A1", frontRightNozzle="E1"
),
NozzleMap.build(
- physical_nozzle_map=NINETY_SIX_MAP,
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
starting_nozzle="A1",
back_left_nozzle="A1",
front_right_nozzle="E1",
),
{"primary_nozzle": "A1", "front_right_nozzle": "E1"},
],
- [
- EmptyNozzleLayoutConfiguration(),
- None,
- {},
- ],
],
)
async def test_configure_nozzle_layout_implementation(
@@ -173,12 +80,12 @@ async def test_configure_nozzle_layout_implementation(
equipment: EquipmentHandler,
tip_handler: TipHandler,
request_model: Union[
- EmptyNozzleLayoutConfiguration,
+ AllNozzleLayoutConfiguration,
ColumnNozzleLayoutConfiguration,
QuadrantNozzleLayoutConfiguration,
SingleNozzleLayoutConfiguration,
],
- expected_nozzlemap: Optional[NozzleMap],
+ expected_nozzlemap: NozzleMap,
nozzle_params: Dict[str, str],
) -> None:
"""A ConfigureForVolume command should have an execution implementation."""
@@ -188,12 +95,25 @@ async def test_configure_nozzle_layout_implementation(
requested_nozzle_layout = ConfigureNozzleLayoutParams(
pipetteId="pipette-id",
- configuration_params=request_model,
+ configurationParams=request_model,
+ )
+ primary_nozzle = (
+ None
+ if isinstance(request_model, AllNozzleLayoutConfiguration)
+ else request_model.primaryNozzle
+ )
+ front_right_nozzle = (
+ request_model.frontRightNozzle
+ if isinstance(request_model, QuadrantNozzleLayoutConfiguration)
+ else None
)
decoy.when(
await tip_handler.available_for_nozzle_layout(
- "pipette-id", **request_model.dict()
+ pipette_id="pipette-id",
+ style=request_model.style,
+ primary_nozzle=primary_nozzle,
+ front_right_nozzle=front_right_nozzle,
)
).then_return(nozzle_params)
@@ -208,5 +128,6 @@ async def test_configure_nozzle_layout_implementation(
assert result == ConfigureNozzleLayoutResult()
assert private_result == ConfigureNozzleLayoutPrivateResult(
- pipette_id="pipette-id", nozzle_map=expected_nozzlemap
+ pipette_id="pipette-id",
+ nozzle_map=expected_nozzlemap,
)
diff --git a/api/tests/opentrons/protocol_engine/commands/test_drop_tip.py b/api/tests/opentrons/protocol_engine/commands/test_drop_tip.py
index 7551c67ea25..4a3c547c07a 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_drop_tip.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_drop_tip.py
@@ -86,11 +86,16 @@ async def test_drop_tip_implementation(
homeAfter=True,
)
+ decoy.when(
+ mock_state_view.pipettes.get_is_partially_configured(pipette_id="abc")
+ ).then_return(False)
+
decoy.when(
mock_state_view.geometry.get_checked_tip_drop_location(
pipette_id="abc",
labware_id="123",
well_location=DropTipWellLocation(offset=WellOffset(x=1, y=2, z=3)),
+ partially_configured=False,
)
).then_return(WellLocation(offset=WellOffset(x=4, y=5, z=6)))
@@ -142,9 +147,16 @@ async def test_drop_tip_with_alternating_locations(
)
).then_return(drop_location)
+ decoy.when(
+ mock_state_view.pipettes.get_is_partially_configured(pipette_id="abc")
+ ).then_return(False)
+
decoy.when(
mock_state_view.geometry.get_checked_tip_drop_location(
- pipette_id="abc", labware_id="123", well_location=drop_location
+ pipette_id="abc",
+ labware_id="123",
+ well_location=drop_location,
+ partially_configured=False,
)
).then_return(WellLocation(offset=WellOffset(x=4, y=5, z=6)))
diff --git a/api/tests/opentrons/protocol_engine/commands/test_get_tip_presence.py b/api/tests/opentrons/protocol_engine/commands/test_get_tip_presence.py
new file mode 100644
index 00000000000..94fe8caadf3
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/commands/test_get_tip_presence.py
@@ -0,0 +1,42 @@
+"""Test get tip presence commands."""
+from decoy import Decoy
+import pytest
+
+from opentrons.protocol_engine.execution import TipHandler
+from opentrons.protocol_engine.types import TipPresenceStatus
+
+from opentrons.protocol_engine.commands.get_tip_presence import (
+ GetTipPresenceParams,
+ GetTipPresenceResult,
+ GetTipPresenceImplementation,
+)
+
+
+@pytest.mark.parametrize(
+ "status",
+ [
+ TipPresenceStatus.PRESENT,
+ TipPresenceStatus.ABSENT,
+ TipPresenceStatus.UNKNOWN,
+ ],
+)
+async def test_get_tip_presence_implementation(
+ decoy: Decoy,
+ tip_handler: TipHandler,
+ status: TipPresenceStatus,
+) -> None:
+ """A GetTipPresence command should have an execution implementation."""
+ subject = GetTipPresenceImplementation(tip_handler=tip_handler)
+ data = GetTipPresenceParams(
+ pipetteId="pipette-id",
+ )
+
+ decoy.when(
+ await tip_handler.get_tip_presence(
+ pipette_id="pipette-id",
+ )
+ ).then_return(status)
+
+ result = await subject.execute(data)
+
+ assert result == GetTipPresenceResult(status=status)
diff --git a/api/tests/opentrons/protocol_engine/commands/test_hash_command_params.py b/api/tests/opentrons/protocol_engine/commands/test_hash_command_params.py
index 098ce53c321..e881a13a9fb 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_hash_command_params.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_hash_command_params.py
@@ -1,8 +1,11 @@
"""Tests for hash_command_params."""
+import pytest
from opentrons.protocol_engine import CommandIntent
from opentrons.protocol_engine import commands
-from opentrons.protocol_engine.commands.hash_command_params import hash_command_params
+from opentrons.protocol_engine.commands.hash_command_params import (
+ hash_protocol_command_params,
+)
def test_equivalent_commands() -> None:
@@ -20,10 +23,14 @@ def test_equivalent_commands() -> None:
params=commands.WaitForDurationParams(seconds=123)
)
- assert hash_command_params(b, None) == hash_command_params(c, None)
+ assert hash_protocol_command_params(b, None) == hash_protocol_command_params(
+ c, None
+ )
- a_hash = hash_command_params(a, None)
- assert hash_command_params(b, a_hash) == hash_command_params(c, a_hash)
+ a_hash = hash_protocol_command_params(a, None)
+ assert hash_protocol_command_params(b, a_hash) == hash_protocol_command_params(
+ c, a_hash
+ )
def test_nonequivalent_commands() -> None:
@@ -32,33 +39,39 @@ def test_nonequivalent_commands() -> None:
params=commands.BlowOutInPlaceParams(
pipetteId="abc123",
flowRate=123,
- )
+ ),
+ intent=CommandIntent.PROTOCOL,
)
b = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123)
)
- assert hash_command_params(a, None) != hash_command_params(b, None)
+ assert hash_protocol_command_params(a, None) != hash_protocol_command_params(
+ b, None
+ )
def test_repeated_commands() -> None:
"""Repeated commands should hash differently, even though they're equivalent in isolation."""
a = commands.WaitForDurationCreate(
- params=commands.WaitForDurationParams(seconds=123)
+ params=commands.WaitForDurationParams(seconds=123),
+ intent=CommandIntent.PROTOCOL,
)
b = commands.WaitForDurationCreate(
- params=commands.WaitForDurationParams(seconds=123)
+ params=commands.WaitForDurationParams(seconds=123),
+ intent=CommandIntent.PROTOCOL,
)
- a_hash = hash_command_params(a, None)
- b_hash = hash_command_params(b, a_hash)
+ a_hash = hash_protocol_command_params(a, None)
+ b_hash = hash_protocol_command_params(b, a_hash)
assert a_hash != b_hash
-def test_setup_command() -> None:
- """Setup commands should always hash to None."""
+@pytest.mark.parametrize("command_intent", [CommandIntent.SETUP, CommandIntent.FIXIT])
+def test_setup_and_fixit_command(command_intent: CommandIntent) -> None:
+ """Setup and fixit commands should always skip hashing."""
setup_command = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123),
- intent=CommandIntent.SETUP,
+ intent=command_intent,
)
- assert hash_command_params(setup_command, None) is None
+ assert hash_protocol_command_params(setup_command, None) is None
diff --git a/api/tests/opentrons/protocol_engine/commands/test_load_labware.py b/api/tests/opentrons/protocol_engine/commands/test_load_labware.py
index 9444a3df5ec..7ca9d112e27 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_load_labware.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_load_labware.py
@@ -9,6 +9,7 @@
from opentrons.protocol_engine.errors import (
LabwareIsNotAllowedInLocationError,
+ LocationIsOccupiedError,
)
from opentrons.protocol_engine.types import (
@@ -52,9 +53,14 @@ async def test_load_labware_implementation(
displayName="My custom display name",
)
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ DeckSlotLocation(slotName=DeckSlotName.SLOT_3)
+ )
+ ).then_return(DeckSlotLocation(slotName=DeckSlotName.SLOT_4))
decoy.when(
await equipment.load_labware(
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4),
load_name="some-load-name",
namespace="opentrons-test",
version=1,
@@ -120,9 +126,14 @@ async def test_load_labware_on_labware(
displayName="My custom display name",
)
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ OnLabwareLocation(labwareId="other-labware-id")
+ )
+ ).then_return(OnLabwareLocation(labwareId="another-labware-id"))
decoy.when(
await equipment.load_labware(
- location=OnLabwareLocation(labwareId="other-labware-id"),
+ location=OnLabwareLocation(labwareId="another-labware-id"),
load_name="some-load-name",
namespace="opentrons-test",
version=1,
@@ -150,6 +161,33 @@ async def test_load_labware_on_labware(
decoy.verify(
state_view.labware.raise_if_labware_cannot_be_stacked(
- well_plate_def, "other-labware-id"
+ well_plate_def, "another-labware-id"
)
)
+
+
+async def test_load_labware_raises_if_location_occupied(
+ decoy: Decoy,
+ well_plate_def: LabwareDefinition,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+) -> None:
+ """A LoadLabware command should have an execution implementation."""
+ subject = LoadLabwareImplementation(equipment=equipment, state_view=state_view)
+
+ data = LoadLabwareParams(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
+ loadName="some-load-name",
+ namespace="opentrons-test",
+ version=1,
+ displayName="My custom display name",
+ )
+
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ DeckSlotLocation(slotName=DeckSlotName.SLOT_3)
+ )
+ ).then_raise(LocationIsOccupiedError("Get your own spot!"))
+
+ with pytest.raises(LocationIsOccupiedError):
+ await subject.execute(data)
diff --git a/api/tests/opentrons/protocol_engine/commands/test_load_module.py b/api/tests/opentrons/protocol_engine/commands/test_load_module.py
index e86d402058c..65306f34adc 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_load_module.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_load_module.py
@@ -1,6 +1,11 @@
"""Test load module command."""
+import pytest
+from typing import cast
from decoy import Decoy
+from opentrons.protocol_engine.errors import LocationIsOccupiedError
+from opentrons.protocol_engine.state import StateView
+from opentrons_shared_data.robot.dev_types import RobotType
from opentrons.types import DeckSlotName
from opentrons.protocol_engine.types import (
DeckSlotLocation,
@@ -8,32 +13,66 @@
ModuleDefinition,
)
from opentrons.protocol_engine.execution import EquipmentHandler, LoadedModuleData
+from opentrons.protocol_engine import ModuleModel as EngineModuleModel
+from opentrons.hardware_control.modules import ModuleType
from opentrons.protocol_engine.commands.load_module import (
LoadModuleParams,
LoadModuleResult,
LoadModuleImplementation,
)
+from opentrons.hardware_control.modules.types import (
+ ModuleModel as HardwareModuleModel,
+ TemperatureModuleModel,
+ MagneticModuleModel,
+ ThermocyclerModuleModel,
+ HeaterShakerModuleModel,
+)
+from opentrons_shared_data.deck.dev_types import (
+ DeckDefinitionV5,
+ SlotDefV3,
+)
+from opentrons_shared_data.deck import load as load_deck
+from opentrons.protocols.api_support.deck_type import (
+ STANDARD_OT2_DECK,
+ STANDARD_OT3_DECK,
+)
async def test_load_module_implementation(
decoy: Decoy,
equipment: EquipmentHandler,
+ state_view: StateView,
tempdeck_v2_def: ModuleDefinition,
) -> None:
"""A loadModule command should have an execution implementation."""
- subject = LoadModuleImplementation(equipment=equipment)
+ subject = LoadModuleImplementation(equipment=equipment, state_view=state_view)
data = LoadModuleParams(
- model=ModuleModel.TEMPERATURE_MODULE_V1,
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ model=ModuleModel.TEMPERATURE_MODULE_V2,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_D1),
moduleId="some-id",
)
+ deck_def = load_deck(STANDARD_OT3_DECK, 5)
+
+ decoy.when(state_view.addressable_areas.state.deck_definition).then_return(deck_def)
+ decoy.when(
+ state_view.addressable_areas.get_cutout_id_by_deck_slot_name(
+ DeckSlotName.SLOT_D1
+ )
+ ).then_return("cutout" + DeckSlotName.SLOT_D1.value)
+
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ DeckSlotLocation(slotName=DeckSlotName.SLOT_D1)
+ )
+ ).then_return(DeckSlotLocation(slotName=DeckSlotName.SLOT_2))
+
decoy.when(
await equipment.load_module(
- model=ModuleModel.TEMPERATURE_MODULE_V1,
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ model=ModuleModel.TEMPERATURE_MODULE_V2,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2),
module_id="some-id",
)
).then_return(
@@ -56,21 +95,37 @@ async def test_load_module_implementation(
async def test_load_module_implementation_mag_block(
decoy: Decoy,
equipment: EquipmentHandler,
+ state_view: StateView,
mag_block_v1_def: ModuleDefinition,
) -> None:
"""A loadModule command for mag block should have an execution implementation."""
- subject = LoadModuleImplementation(equipment=equipment)
+ subject = LoadModuleImplementation(equipment=equipment, state_view=state_view)
data = LoadModuleParams(
model=ModuleModel.MAGNETIC_BLOCK_V1,
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_D1),
moduleId="some-id",
)
+ deck_def = load_deck(STANDARD_OT3_DECK, 5)
+
+ decoy.when(state_view.addressable_areas.state.deck_definition).then_return(deck_def)
+ decoy.when(
+ state_view.addressable_areas.get_cutout_id_by_deck_slot_name(
+ DeckSlotName.SLOT_D1
+ )
+ ).then_return("cutout" + DeckSlotName.SLOT_D1.value)
+
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ DeckSlotLocation(slotName=DeckSlotName.SLOT_D1)
+ )
+ ).then_return(DeckSlotLocation(slotName=DeckSlotName.SLOT_2))
+
decoy.when(
await equipment.load_magnetic_block(
model=ModuleModel.MAGNETIC_BLOCK_V1,
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2),
module_id="some-id",
)
).then_return(
@@ -88,3 +143,173 @@ async def test_load_module_implementation_mag_block(
model=ModuleModel.MAGNETIC_BLOCK_V1,
definition=mag_block_v1_def,
)
+
+
+async def test_load_module_raises_if_location_occupied(
+ decoy: Decoy,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+) -> None:
+ """A loadModule command should have an execution implementation."""
+ subject = LoadModuleImplementation(equipment=equipment, state_view=state_view)
+
+ data = LoadModuleParams(
+ model=ModuleModel.TEMPERATURE_MODULE_V2,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_D1),
+ moduleId="some-id",
+ )
+
+ deck_def = load_deck(STANDARD_OT3_DECK, 5)
+
+ decoy.when(state_view.addressable_areas.state.deck_definition).then_return(deck_def)
+ decoy.when(
+ state_view.addressable_areas.get_cutout_id_by_deck_slot_name(
+ DeckSlotName.SLOT_D1
+ )
+ ).then_return("cutout" + DeckSlotName.SLOT_D1.value)
+
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ DeckSlotLocation(slotName=DeckSlotName.SLOT_D1)
+ )
+ ).then_raise(LocationIsOccupiedError("Get your own spot!"))
+
+ with pytest.raises(LocationIsOccupiedError):
+ await subject.execute(data)
+
+
+@pytest.mark.parametrize(
+ (
+ "requested_model",
+ "engine_model",
+ "deck_def",
+ "slot_name",
+ "robot_type",
+ ),
+ [
+ (
+ TemperatureModuleModel.TEMPERATURE_V2,
+ EngineModuleModel.TEMPERATURE_MODULE_V2,
+ load_deck(STANDARD_OT3_DECK, 5),
+ DeckSlotName.SLOT_D2,
+ "OT-3 Standard",
+ ),
+ (
+ ThermocyclerModuleModel.THERMOCYCLER_V1,
+ EngineModuleModel.THERMOCYCLER_MODULE_V1,
+ load_deck(STANDARD_OT2_DECK, 5),
+ DeckSlotName.SLOT_1,
+ "OT-2 Standard",
+ ),
+ (
+ ThermocyclerModuleModel.THERMOCYCLER_V2,
+ EngineModuleModel.THERMOCYCLER_MODULE_V2,
+ load_deck(STANDARD_OT3_DECK, 5),
+ DeckSlotName.SLOT_A2,
+ "OT-3 Standard",
+ ),
+ (
+ HeaterShakerModuleModel.HEATER_SHAKER_V1,
+ EngineModuleModel.HEATER_SHAKER_MODULE_V1,
+ load_deck(STANDARD_OT3_DECK, 5),
+ DeckSlotName.SLOT_A2,
+ "OT-3 Standard",
+ ),
+ ],
+)
+async def test_load_module_raises_wrong_location(
+ decoy: Decoy,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+ requested_model: HardwareModuleModel,
+ engine_model: EngineModuleModel,
+ deck_def: DeckDefinitionV5,
+ slot_name: DeckSlotName,
+ robot_type: RobotType,
+) -> None:
+ """It should issue a load module engine command."""
+ subject = LoadModuleImplementation(equipment=equipment, state_view=state_view)
+
+ data = LoadModuleParams(
+ model=engine_model,
+ location=DeckSlotLocation(slotName=slot_name),
+ moduleId="some-id",
+ )
+
+ decoy.when(state_view.config.robot_type).then_return(robot_type)
+
+ if robot_type == "OT-2 Standard":
+ decoy.when(
+ state_view.addressable_areas.get_slot_definition(slot_name.id)
+ ).then_return(cast(SlotDefV3, {"compatibleModuleTypes": []}))
+ else:
+ decoy.when(state_view.addressable_areas.state.deck_definition).then_return(
+ deck_def
+ )
+ decoy.when(
+ state_view.addressable_areas.get_cutout_id_by_deck_slot_name(slot_name)
+ ).then_return("cutout" + slot_name.value)
+
+ with pytest.raises(
+ ValueError,
+ match=f"A {ModuleType.from_model(model=requested_model).value} cannot be loaded into slot {slot_name}",
+ ):
+ await subject.execute(data)
+
+
+@pytest.mark.parametrize(
+ (
+ "requested_model",
+ "engine_model",
+ "deck_def",
+ "slot_name",
+ "robot_type",
+ ),
+ [
+ (
+ MagneticModuleModel.MAGNETIC_V2,
+ EngineModuleModel.MAGNETIC_MODULE_V2,
+ load_deck(STANDARD_OT3_DECK, 5),
+ DeckSlotName.SLOT_A2,
+ "OT-3 Standard",
+ ),
+ ],
+)
+async def test_load_module_raises_module_fixture_id_does_not_exist(
+ decoy: Decoy,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+ requested_model: HardwareModuleModel,
+ engine_model: EngineModuleModel,
+ deck_def: DeckDefinitionV5,
+ slot_name: DeckSlotName,
+ robot_type: RobotType,
+) -> None:
+ """It should issue a load module engine command and raise an error for unmatched fixtures."""
+ subject = LoadModuleImplementation(equipment=equipment, state_view=state_view)
+
+ data = LoadModuleParams(
+ model=engine_model,
+ location=DeckSlotLocation(slotName=slot_name),
+ moduleId="some-id",
+ )
+
+ decoy.when(state_view.config.robot_type).then_return(robot_type)
+
+ if robot_type == "OT-2 Standard":
+ decoy.when(
+ state_view.addressable_areas.get_slot_definition(slot_name.id)
+ ).then_return(cast(SlotDefV3, {"compatibleModuleTypes": []}))
+ else:
+ decoy.when(state_view.addressable_areas.state.deck_definition).then_return(
+ deck_def
+ )
+ decoy.when(
+ state_view.addressable_areas.get_cutout_id_by_deck_slot_name(slot_name)
+ ).then_return("cutout" + slot_name.value)
+
+ with pytest.raises(
+ ValueError,
+ match=f"Module Type {ModuleType.from_model(requested_model).value} does not have a related fixture ID.",
+ ):
+ await subject.execute(data)
diff --git a/api/tests/opentrons/protocol_engine/commands/test_load_pipette.py b/api/tests/opentrons/protocol_engine/commands/test_load_pipette.py
index 45a935c9d08..6896808e574 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_load_pipette.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_load_pipette.py
@@ -1,29 +1,51 @@
"""Test load pipette commands."""
+import pytest
from decoy import Decoy
from opentrons_shared_data.pipette.dev_types import PipetteNameType
+from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons.types import MountType, Point
-from opentrons.types import MountType
+from opentrons.protocol_engine.errors import InvalidSpecificationForRobotTypeError
from opentrons.protocol_engine.types import FlowRates
from opentrons.protocol_engine.execution import LoadedPipetteData, EquipmentHandler
from opentrons.protocol_engine.resources.pipette_data_provider import (
LoadedStaticPipetteData,
)
-
+from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.commands.load_pipette import (
LoadPipetteParams,
LoadPipetteResult,
LoadPipettePrivateResult,
LoadPipetteImplementation,
)
+from ..pipette_fixtures import get_default_nozzle_map
+@pytest.mark.parametrize(
+ "data",
+ [
+ LoadPipetteParams(
+ pipetteName=PipetteNameType.P300_SINGLE,
+ mount=MountType.LEFT,
+ pipetteId="some id",
+ ),
+ LoadPipetteParams(
+ pipetteName=PipetteNameType.P300_SINGLE,
+ mount=MountType.LEFT,
+ pipetteId="some id",
+ tipOverlapNotAfterVersion="v2",
+ ),
+ ],
+)
async def test_load_pipette_implementation(
decoy: Decoy,
equipment: EquipmentHandler,
+ state_view: StateView,
+ data: LoadPipetteParams,
) -> None:
"""A LoadPipette command should have an execution implementation."""
- subject = LoadPipetteImplementation(equipment=equipment)
+ subject = LoadPipetteImplementation(equipment=equipment, state_view=state_view)
config_data = LoadedStaticPipetteData(
model="some-model",
display_name="Hello",
@@ -37,11 +59,9 @@ async def test_load_pipette_implementation(
),
tip_configuration_lookup_table={},
nominal_tip_overlap={},
- )
- data = LoadPipetteParams(
- pipetteName=PipetteNameType.P300_SINGLE,
- mount=MountType.LEFT,
- pipetteId="some id",
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_MULTI),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
)
decoy.when(
@@ -49,6 +69,7 @@ async def test_load_pipette_implementation(
pipette_name=PipetteNameType.P300_SINGLE,
mount=MountType.LEFT,
pipette_id="some id",
+ tip_overlap_version=data.tipOverlapNotAfterVersion,
)
).then_return(
LoadedPipetteData(
@@ -69,9 +90,10 @@ async def test_load_pipette_implementation(
async def test_load_pipette_implementation_96_channel(
decoy: Decoy,
equipment: EquipmentHandler,
+ state_view: StateView,
) -> None:
"""A LoadPipette command should have an execution implementation."""
- subject = LoadPipetteImplementation(equipment=equipment)
+ subject = LoadPipetteImplementation(equipment=equipment, state_view=state_view)
data = LoadPipetteParams(
pipetteName=PipetteNameType.P1000_96,
@@ -91,6 +113,9 @@ async def test_load_pipette_implementation_96_channel(
),
tip_configuration_lookup_table={},
nominal_tip_overlap={},
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P1000_96),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
)
decoy.when(
@@ -98,6 +123,7 @@ async def test_load_pipette_implementation_96_channel(
pipette_name=PipetteNameType.P1000_96,
mount=MountType.LEFT,
pipette_id="some id",
+ tip_overlap_version=None,
)
).then_return(
LoadedPipetteData(
@@ -111,3 +137,34 @@ async def test_load_pipette_implementation_96_channel(
assert private_result == LoadPipettePrivateResult(
pipette_id="pipette-id", serial_number="some id", config=config_data
)
+
+
+@pytest.mark.parametrize(
+ argnames=["pipette_type", "robot_type"],
+ argvalues=[
+ (PipetteNameType.P300_SINGLE, "OT-3 Standard"),
+ (PipetteNameType.P20_MULTI_GEN2, "OT-3 Standard"),
+ (PipetteNameType.P10_MULTI, "OT-3 Standard"),
+ (PipetteNameType.P1000_SINGLE, "OT-3 Standard"),
+ (PipetteNameType.P1000_MULTI_FLEX, "OT-2 Standard"),
+ (PipetteNameType.P50_SINGLE_FLEX, "OT-2 Standard"),
+ (PipetteNameType.P1000_96, "OT-2 Standard"),
+ ],
+)
+async def test_loading_wrong_pipette_for_robot_raises_error(
+ decoy: Decoy,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+ pipette_type: PipetteNameType,
+ robot_type: RobotType,
+) -> None:
+ """A LoadPipette command should raise error when pipette is not supported on robot."""
+ subject = LoadPipetteImplementation(equipment=equipment, state_view=state_view)
+ p1000_params = LoadPipetteParams(
+ pipetteName=pipette_type,
+ mount=MountType.LEFT,
+ pipetteId="p1000-id",
+ )
+ decoy.when(state_view.config.robot_type).then_return(robot_type)
+ with pytest.raises(InvalidSpecificationForRobotTypeError):
+ await subject.execute(p1000_params)
diff --git a/api/tests/opentrons/protocol_engine/commands/test_move_labware.py b/api/tests/opentrons/protocol_engine/commands/test_move_labware.py
index 0b76e8a2b56..beb9e14c11d 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_move_labware.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_move_labware.py
@@ -3,9 +3,10 @@
import pytest
from decoy import Decoy
-from opentrons_shared_data.labware.labware_definition import Parameters
+from opentrons_shared_data.labware.labware_definition import Parameters, Dimensions
+from opentrons_shared_data.gripper.constants import GRIPPER_PADDLE_WIDTH
-from opentrons.types import DeckSlotName
+from opentrons.types import DeckSlotName, Point
from opentrons.protocols.models import LabwareDefinition
from opentrons.protocol_engine import errors, Config
from opentrons.protocol_engine.resources import labware_validation
@@ -18,6 +19,7 @@
LabwareOffsetVector,
LabwareMovementOffsetData,
DeckType,
+ AddressableAreaLocation,
)
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.commands.move_labware import (
@@ -241,6 +243,94 @@ async def test_gripper_move_labware_implementation(
pickUpOffset=LabwareOffsetVector(x=1, y=2, z=3),
dropOffset=LabwareOffsetVector(x=0, y=0, z=0),
),
+ post_drop_slide_offset=None,
+ ),
+ )
+ assert result == MoveLabwareResult(
+ offsetId="wowzers-a-new-offset-id",
+ )
+
+
+async def test_gripper_move_to_waste_chute_implementation(
+ decoy: Decoy,
+ equipment: EquipmentHandler,
+ labware_movement: LabwareMovementHandler,
+ state_view: StateView,
+ run_control: RunControlHandler,
+) -> None:
+ """It should drop the labware with a delay added."""
+ subject = MoveLabwareImplementation(
+ state_view=state_view,
+ equipment=equipment,
+ labware_movement=labware_movement,
+ run_control=run_control,
+ )
+ from_location = DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
+ new_location = AddressableAreaLocation(addressableAreaName="gripperWasteChute")
+ labware_width = 50
+ expected_slide_offset = Point(
+ x=labware_width / 2 + GRIPPER_PADDLE_WIDTH / 2 + 8, y=0, z=0
+ )
+
+ data = MoveLabwareParams(
+ labwareId="my-cool-labware-id",
+ newLocation=new_location,
+ strategy=LabwareMovementStrategy.USING_GRIPPER,
+ pickUpOffset=LabwareOffsetVector(x=1, y=2, z=3),
+ dropOffset=None,
+ )
+ labware_def = LabwareDefinition.construct( # type: ignore[call-arg]
+ namespace="my-cool-namespace",
+ dimensions=Dimensions(
+ yDimension=labware_width, zDimension=labware_width, xDimension=labware_width
+ ),
+ )
+ decoy.when(
+ state_view.labware.get_definition(labware_id="my-cool-labware-id")
+ ).then_return(labware_def)
+ decoy.when(state_view.labware.get(labware_id="my-cool-labware-id")).then_return(
+ LoadedLabware(
+ id="my-cool-labware-id",
+ loadName="load-name",
+ definitionUri="opentrons-test/load-name/1",
+ location=from_location,
+ offsetId=None,
+ )
+ )
+ decoy.when(
+ state_view.geometry.ensure_location_not_occupied(
+ location=new_location,
+ )
+ ).then_return(new_location)
+ decoy.when(
+ equipment.find_applicable_labware_offset_id(
+ labware_definition_uri="opentrons-test/load-name/1",
+ labware_location=new_location,
+ )
+ ).then_return("wowzers-a-new-offset-id")
+
+ decoy.when(
+ state_view.geometry.ensure_valid_gripper_location(from_location)
+ ).then_return(from_location)
+ decoy.when(
+ state_view.geometry.ensure_valid_gripper_location(new_location)
+ ).then_return(new_location)
+ decoy.when(labware_validation.validate_gripper_compatible(labware_def)).then_return(
+ True
+ )
+
+ result = await subject.execute(data)
+ decoy.verify(
+ state_view.labware.raise_if_labware_has_labware_on_top("my-cool-labware-id"),
+ await labware_movement.move_labware_with_gripper(
+ labware_id="my-cool-labware-id",
+ current_location=from_location,
+ new_location=new_location,
+ user_offset_data=LabwareMovementOffsetData(
+ pickUpOffset=LabwareOffsetVector(x=1, y=2, z=3),
+ dropOffset=LabwareOffsetVector(x=0, y=0, z=0),
+ ),
+ post_drop_slide_offset=expected_slide_offset,
),
)
assert result == MoveLabwareResult(
diff --git a/api/tests/opentrons/protocol_engine/commands/test_move_to_addressable_area.py b/api/tests/opentrons/protocol_engine/commands/test_move_to_addressable_area.py
new file mode 100644
index 00000000000..20515bc12c4
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/commands/test_move_to_addressable_area.py
@@ -0,0 +1,50 @@
+"""Test move to addressable area commands."""
+from decoy import Decoy
+
+from opentrons.protocol_engine import DeckPoint, AddressableOffsetVector
+from opentrons.protocol_engine.execution import MovementHandler
+from opentrons.protocol_engine.state import StateView
+from opentrons.types import Point
+
+from opentrons.protocol_engine.commands.move_to_addressable_area import (
+ MoveToAddressableAreaParams,
+ MoveToAddressableAreaResult,
+ MoveToAddressableAreaImplementation,
+)
+
+
+async def test_move_to_addressable_area_implementation(
+ decoy: Decoy,
+ state_view: StateView,
+ movement: MovementHandler,
+) -> None:
+ """A MoveToAddressableArea command should have an execution implementation."""
+ subject = MoveToAddressableAreaImplementation(
+ movement=movement, state_view=state_view
+ )
+
+ data = MoveToAddressableAreaParams(
+ pipetteId="abc",
+ addressableAreaName="123",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ forceDirect=True,
+ minimumZHeight=4.56,
+ speed=7.89,
+ stayAtHighestPossibleZ=True,
+ )
+
+ decoy.when(
+ await movement.move_to_addressable_area(
+ pipette_id="abc",
+ addressable_area_name="123",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ force_direct=True,
+ minimum_z_height=4.56,
+ speed=7.89,
+ stay_at_highest_possible_z=True,
+ )
+ ).then_return(Point(x=9, y=8, z=7))
+
+ result = await subject.execute(data)
+
+ assert result == MoveToAddressableAreaResult(position=DeckPoint(x=9, y=8, z=7))
diff --git a/api/tests/opentrons/protocol_engine/commands/test_move_to_addressable_area_for_drop_tip.py b/api/tests/opentrons/protocol_engine/commands/test_move_to_addressable_area_for_drop_tip.py
new file mode 100644
index 00000000000..73478ccafd5
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/commands/test_move_to_addressable_area_for_drop_tip.py
@@ -0,0 +1,59 @@
+"""Test move to addressable area for drop tip commands."""
+from decoy import Decoy
+
+from opentrons.protocol_engine import DeckPoint, AddressableOffsetVector
+from opentrons.protocol_engine.execution import MovementHandler
+from opentrons.protocol_engine.state import StateView
+from opentrons.types import Point
+
+from opentrons.protocol_engine.commands.move_to_addressable_area_for_drop_tip import (
+ MoveToAddressableAreaForDropTipParams,
+ MoveToAddressableAreaForDropTipResult,
+ MoveToAddressableAreaForDropTipImplementation,
+)
+
+
+async def test_move_to_addressable_area_for_drop_tip_implementation(
+ decoy: Decoy,
+ state_view: StateView,
+ movement: MovementHandler,
+) -> None:
+ """A MoveToAddressableAreaForDropTip command should have an execution implementation."""
+ subject = MoveToAddressableAreaForDropTipImplementation(
+ movement=movement, state_view=state_view
+ )
+
+ data = MoveToAddressableAreaForDropTipParams(
+ pipetteId="abc",
+ addressableAreaName="123",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ forceDirect=True,
+ minimumZHeight=4.56,
+ speed=7.89,
+ alternateDropLocation=True,
+ ignoreTipConfiguration=False,
+ )
+
+ decoy.when(
+ state_view.geometry.get_next_tip_drop_location_for_addressable_area(
+ addressable_area_name="123", pipette_id="abc"
+ )
+ ).then_return(AddressableOffsetVector(x=10, y=11, z=12))
+
+ decoy.when(
+ await movement.move_to_addressable_area(
+ pipette_id="abc",
+ addressable_area_name="123",
+ offset=AddressableOffsetVector(x=10, y=11, z=12),
+ force_direct=True,
+ minimum_z_height=4.56,
+ speed=7.89,
+ ignore_tip_configuration=False,
+ )
+ ).then_return(Point(x=9, y=8, z=7))
+
+ result = await subject.execute(data)
+
+ assert result == MoveToAddressableAreaForDropTipResult(
+ position=DeckPoint(x=9, y=8, z=7)
+ )
diff --git a/api/tests/opentrons/protocol_engine/commands/test_reload_labware.py b/api/tests/opentrons/protocol_engine/commands/test_reload_labware.py
new file mode 100644
index 00000000000..556d4975786
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/commands/test_reload_labware.py
@@ -0,0 +1,85 @@
+"""Test load labware commands."""
+import inspect
+import pytest
+
+from decoy import Decoy
+
+from opentrons.types import DeckSlotName
+from opentrons.protocols.models import LabwareDefinition
+
+from opentrons.protocol_engine.errors import (
+ LabwareNotLoadedError,
+)
+
+from opentrons.protocol_engine.types import (
+ DeckSlotLocation,
+)
+from opentrons.protocol_engine.execution import ReloadedLabwareData, EquipmentHandler
+from opentrons.protocol_engine.resources import labware_validation
+from opentrons.protocol_engine.state import StateView
+
+from opentrons.protocol_engine.commands.reload_labware import (
+ ReloadLabwareParams,
+ ReloadLabwareResult,
+ ReloadLabwareImplementation,
+)
+
+
+@pytest.fixture(autouse=True)
+def patch_mock_labware_validation(
+ decoy: Decoy, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ """Mock out move_types.py functions."""
+ for name, func in inspect.getmembers(labware_validation, inspect.isfunction):
+ monkeypatch.setattr(labware_validation, name, decoy.mock(func=func))
+
+
+async def test_reload_labware_implementation(
+ decoy: Decoy,
+ well_plate_def: LabwareDefinition,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+) -> None:
+ """A ReloadLabware command should have an execution implementation."""
+ subject = ReloadLabwareImplementation(equipment=equipment, state_view=state_view)
+
+ data = ReloadLabwareParams(
+ labwareId="my-labware-id",
+ )
+
+ decoy.when(await equipment.reload_labware(labware_id="my-labware-id",)).then_return(
+ ReloadedLabwareData(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4),
+ offsetId="labware-offset-id",
+ )
+ )
+
+ result = await subject.execute(data)
+
+ assert result == ReloadLabwareResult(
+ labwareId="my-labware-id",
+ offsetId="labware-offset-id",
+ )
+
+
+async def test_reload_labware_raises_labware_does_not_exist(
+ decoy: Decoy,
+ well_plate_def: LabwareDefinition,
+ equipment: EquipmentHandler,
+ state_view: StateView,
+) -> None:
+ """A ReloadLabware command should raise if the specified labware is not loaded."""
+ subject = ReloadLabwareImplementation(equipment=equipment, state_view=state_view)
+
+ data = ReloadLabwareParams(
+ labwareId="my-labware-id",
+ )
+
+ decoy.when(
+ await equipment.reload_labware(
+ labware_id="my-labware-id",
+ )
+ ).then_raise(LabwareNotLoadedError("What labware is this!"))
+
+ with pytest.raises(LabwareNotLoadedError):
+ await subject.execute(data)
diff --git a/api/tests/opentrons/protocol_engine/commands/test_save_position.py b/api/tests/opentrons/protocol_engine/commands/test_save_position.py
index e31f44c779c..99b52a4cd42 100644
--- a/api/tests/opentrons/protocol_engine/commands/test_save_position.py
+++ b/api/tests/opentrons/protocol_engine/commands/test_save_position.py
@@ -35,10 +35,7 @@ async def test_save_position_implementation(
subject = SavePositionImplementation(
model_utils=mock_model_utils, gantry_mover=mock_gantry_mover
)
- params = SavePositionParams(
- pipetteId="abc",
- positionId="123",
- )
+ params = SavePositionParams(pipetteId="abc", positionId="123", failOnNotHomed=True)
decoy.when(mock_model_utils.ensure_id("123")).then_return("456")
diff --git a/api/tests/opentrons/protocol_engine/commands/test_verify_tip_presence.py b/api/tests/opentrons/protocol_engine/commands/test_verify_tip_presence.py
new file mode 100644
index 00000000000..160ee056ae8
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/commands/test_verify_tip_presence.py
@@ -0,0 +1,34 @@
+"""Test verify tip presence commands."""
+from decoy import Decoy
+
+from opentrons.protocol_engine.execution import TipHandler
+from opentrons.protocol_engine.types import TipPresenceStatus
+
+from opentrons.protocol_engine.commands.verify_tip_presence import (
+ VerifyTipPresenceParams,
+ VerifyTipPresenceResult,
+ VerifyTipPresenceImplementation,
+)
+
+
+async def test_verify_tip_presence_implementation(
+ decoy: Decoy,
+ tip_handler: TipHandler,
+) -> None:
+ """A VerifyTipPresence command should have an execution implementation."""
+ subject = VerifyTipPresenceImplementation(tip_handler=tip_handler)
+ data = VerifyTipPresenceParams(
+ pipetteId="pipette-id",
+ expectedState=TipPresenceStatus.PRESENT,
+ )
+
+ decoy.when(
+ await tip_handler.verify_tip_presence(
+ pipette_id="pipette-id",
+ expected=TipPresenceStatus.PRESENT,
+ )
+ ).then_return(None)
+
+ result = await subject.execute(data)
+
+ assert isinstance(result, VerifyTipPresenceResult)
diff --git a/api/tests/opentrons/protocol_engine/conftest.py b/api/tests/opentrons/protocol_engine/conftest.py
index 8574cefe248..ab23f7e9e08 100644
--- a/api/tests/opentrons/protocol_engine/conftest.py
+++ b/api/tests/opentrons/protocol_engine/conftest.py
@@ -7,7 +7,7 @@
from opentrons_shared_data import load_shared_data
from opentrons_shared_data.deck import load as load_deck
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons_shared_data.labware import load_definition
from opentrons_shared_data.pipette import pipette_definition
from opentrons.protocols.models import LabwareDefinition
@@ -20,6 +20,8 @@
from opentrons.hardware_control import HardwareControlAPI, OT2HardwareControlAPI
from opentrons.hardware_control.api import API
+from opentrons.hardware_control.protocols.types import FlexRobotType, OT2RobotType
+from opentrons.protocol_engine.notes import CommandNoteAdder
if TYPE_CHECKING:
from opentrons.hardware_control.ot3api import OT3API
@@ -34,7 +36,9 @@ def hardware_api(decoy: Decoy) -> HardwareControlAPI:
@pytest.fixture
def ot2_hardware_api(decoy: Decoy) -> API:
"""Get a mocked out OT-2 hardware API."""
- return decoy.mock(cls=API)
+ mock = decoy.mock(cls=API)
+ decoy.when(mock.get_robot_type()).then_return(OT2RobotType)
+ return mock
@pytest.mark.ot3_only
@@ -44,28 +48,30 @@ def ot3_hardware_api(decoy: Decoy) -> OT3API:
try:
from opentrons.hardware_control.ot3api import OT3API
- return decoy.mock(cls=OT3API)
+ mock = decoy.mock(cls=OT3API)
+ decoy.when(mock.get_robot_type()).then_return(FlexRobotType)
+ return mock
except ImportError:
# TODO (tz, 9-23-22) Figure out a better way to use this fixture with OT-3 api only.
return None # type: ignore[return-value]
@pytest.fixture(scope="session")
-def ot2_standard_deck_def() -> DeckDefinitionV4:
+def ot2_standard_deck_def() -> DeckDefinitionV5:
"""Get the OT-2 standard deck definition."""
- return load_deck(STANDARD_OT2_DECK, 4)
+ return load_deck(STANDARD_OT2_DECK, 5)
@pytest.fixture(scope="session")
-def ot2_short_trash_deck_def() -> DeckDefinitionV4:
+def ot2_short_trash_deck_def() -> DeckDefinitionV5:
"""Get the OT-2 short-trash deck definition."""
- return load_deck(SHORT_TRASH_DECK, 4)
+ return load_deck(SHORT_TRASH_DECK, 5)
@pytest.fixture(scope="session")
-def ot3_standard_deck_def() -> DeckDefinitionV4:
+def ot3_standard_deck_def() -> DeckDefinitionV5:
"""Get the OT-2 standard deck definition."""
- return load_deck(STANDARD_OT3_DECK, 4)
+ return load_deck(STANDARD_OT3_DECK, 5)
@pytest.fixture(scope="session")
@@ -225,3 +231,9 @@ def supported_tip_fixture() -> pipette_definition.SupportedTipsDefinition:
dispense=pipette_definition.ulPerMMDefinition(default={"1": [(0, 0, 0)]}),
defaultPushOutVolume=3,
)
+
+
+@pytest.fixture
+def mock_command_note_adder(decoy: Decoy) -> CommandNoteAdder:
+ """Get a command note adder."""
+ return decoy.mock(cls=CommandNoteAdder)
diff --git a/api/tests/opentrons/protocol_engine/execution/test_command_executor.py b/api/tests/opentrons/protocol_engine/execution/test_command_executor.py
index 50c54eceacf..8f4433a9ebe 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_command_executor.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_command_executor.py
@@ -10,6 +10,10 @@
from opentrons.hardware_control import HardwareControlAPI, OT2HardwareControlAPI
from opentrons.protocol_engine import errors
+from opentrons.protocol_engine.error_recovery_policy import (
+ ErrorRecoveryPolicy,
+ ErrorRecoveryType,
+)
from opentrons.protocol_engine.errors.exceptions import (
EStopActivatedError as PE_EStopActivatedError,
)
@@ -17,7 +21,8 @@
from opentrons.protocol_engine.state import StateStore
from opentrons.protocol_engine.actions import (
ActionDispatcher,
- UpdateCommandAction,
+ RunCommandAction,
+ SucceedCommandAction,
FailCommandAction,
)
@@ -40,8 +45,12 @@
RailLightsHandler,
StatusBarHandler,
)
+from opentrons.protocol_engine.execution.command_executor import (
+ CommandNoteTrackerProvider,
+)
from opentrons_shared_data.errors.exceptions import EStopActivatedError, PythonException
+from opentrons.protocol_engine.notes import CommandNoteTracker, CommandNote
@pytest.fixture
@@ -122,6 +131,39 @@ def status_bar(decoy: Decoy) -> StatusBarHandler:
return decoy.mock(cls=StatusBarHandler)
+@pytest.fixture
+def command_note_tracker_provider(decoy: Decoy) -> CommandNoteTrackerProvider:
+ """Get a mock tracker provider."""
+ return decoy.mock(cls=CommandNoteTrackerProvider)
+
+
+@pytest.fixture
+def error_recovery_policy(decoy: Decoy) -> ErrorRecoveryPolicy:
+ """Get a mock error recovery policy."""
+ return decoy.mock(cls=ErrorRecoveryPolicy)
+
+
+def get_next_tracker(
+ decoy: Decoy, provider: CommandNoteTrackerProvider
+) -> CommandNoteTracker:
+ """Get the next tracker provided by a provider, in code without being a fixture.
+
+ This is useful for testing the execution of multiple commands, each of which will get
+ a different tracker instance.
+ """
+ new_tracker = decoy.mock(cls=CommandNoteTracker)
+ decoy.when(provider()).then_return(new_tracker)
+ return new_tracker
+
+
+@pytest.fixture
+def command_note_tracker(
+ decoy: Decoy, command_note_tracker_provider: CommandNoteTrackerProvider
+) -> CommandNoteTracker:
+ """Get the tracker that the provider will provide."""
+ return get_next_tracker(decoy, command_note_tracker_provider)
+
+
@pytest.fixture
def subject(
hardware_api: HardwareControlAPI,
@@ -137,6 +179,8 @@ def subject(
rail_lights: RailLightsHandler,
status_bar: StatusBarHandler,
model_utils: ModelUtils,
+ command_note_tracker_provider: CommandNoteTrackerProvider,
+ error_recovery_policy: ErrorRecoveryPolicy,
) -> CommandExecutor:
"""Get a CommandExecutor test subject with its dependencies mocked out."""
return CommandExecutor(
@@ -153,6 +197,8 @@ def subject(
model_utils=model_utils,
rail_lights=rail_lights,
status_bar=status_bar,
+ command_note_tracker_provider=command_note_tracker_provider,
+ error_recovery_policy=error_recovery_policy,
)
@@ -184,6 +230,7 @@ async def test_execute(
rail_lights: RailLightsHandler,
status_bar: StatusBarHandler,
model_utils: ModelUtils,
+ command_note_tracker: CommandNoteTracker,
subject: CommandExecutor,
) -> None:
"""It should be able to execute a command."""
@@ -195,9 +242,7 @@ class _TestCommand(BaseCommand[_TestCommandParams, _TestCommandResult]):
params: _TestCommandParams
result: Optional[_TestCommandResult]
- @property
- def _ImplementationCls(self) -> Type[_TestCommandImpl]:
- return TestCommandImplCls
+ _ImplementationCls: Type[_TestCommandImpl] = TestCommandImplCls
command_params = _TestCommandParams()
command_result = _TestCommandResult()
@@ -225,7 +270,16 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
),
)
- completed_command = cast(
+ command_notes = [
+ CommandNote(
+ noteKind="warning",
+ shortMessage="hello",
+ longMessage="test command note",
+ source="test",
+ )
+ ]
+
+ expected_completed_command = cast(
Command,
_TestCommand(
id="command-id",
@@ -236,6 +290,7 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
status=CommandStatus.SUCCEEDED,
params=command_params,
result=command_result,
+ notes=command_notes,
),
)
@@ -243,6 +298,18 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
queued_command
)
+ decoy.when(
+ action_dispatcher.dispatch(
+ RunCommandAction(
+ command_id="command-id", started_at=datetime(year=2022, month=2, day=2)
+ )
+ )
+ ).then_do(
+ lambda _: decoy.when(
+ state_store.commands.get(command_id="command-id")
+ ).then_return(running_command)
+ )
+
decoy.when(
queued_command._ImplementationCls(
state_view=state_store,
@@ -256,11 +323,14 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
run_control=run_control,
rail_lights=rail_lights,
status_bar=status_bar,
+ command_note_adder=command_note_tracker,
)
).then_return(
command_impl # type: ignore[arg-type]
)
+ decoy.when(command_note_tracker.get_notes()).then_return(command_notes)
+
decoy.when(await command_impl.execute(command_params)).then_return(command_result)
decoy.when(model_utils.get_timestamp()).then_return(
@@ -272,10 +342,9 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
decoy.verify(
action_dispatcher.dispatch(
- UpdateCommandAction(private_result=None, command=running_command)
- ),
- action_dispatcher.dispatch(
- UpdateCommandAction(private_result=None, command=completed_command)
+ SucceedCommandAction(
+ private_result=None, command=expected_completed_command
+ )
),
)
@@ -289,8 +358,8 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
False,
),
(
- EStopActivatedError("oh no"),
- matchers.ErrorMatching(PE_EStopActivatedError, match="oh no"),
+ EStopActivatedError(),
+ matchers.ErrorMatching(PE_EStopActivatedError),
True,
),
(
@@ -321,6 +390,8 @@ async def test_execute_raises_protocol_engine_error(
status_bar: StatusBarHandler,
model_utils: ModelUtils,
subject: CommandExecutor,
+ command_note_tracker: CommandNoteTracker,
+ error_recovery_policy: ErrorRecoveryPolicy,
command_error: Exception,
expected_error: Any,
unexpected_error: bool,
@@ -334,9 +405,7 @@ class _TestCommand(BaseCommand[_TestCommandParams, _TestCommandResult]):
params: _TestCommandParams
result: Optional[_TestCommandResult]
- @property
- def _ImplementationCls(self) -> Type[_TestCommandImpl]:
- return TestCommandImplCls
+ _ImplementationCls: Type[_TestCommandImpl] = TestCommandImplCls
command_params = _TestCommandParams()
@@ -363,10 +432,31 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
),
)
+ command_notes = [
+ CommandNote(
+ noteKind="warning",
+ shortMessage="hello",
+ longMessage="test command note",
+ source="test",
+ )
+ ]
+
decoy.when(state_store.commands.get(command_id="command-id")).then_return(
queued_command
)
+ decoy.when(
+ action_dispatcher.dispatch(
+ RunCommandAction(
+ command_id="command-id", started_at=datetime(year=2022, month=2, day=2)
+ )
+ )
+ ).then_do(
+ lambda _: decoy.when(
+ state_store.commands.get(command_id="command-id")
+ ).then_return(running_command)
+ )
+
decoy.when(
queued_command._ImplementationCls(
state_view=state_store,
@@ -380,6 +470,7 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
run_control=run_control,
rail_lights=rail_lights,
status_bar=status_bar,
+ command_note_adder=command_note_tracker,
)
).then_return(
command_impl # type: ignore[arg-type]
@@ -393,18 +484,24 @@ def _ImplementationCls(self) -> Type[_TestCommandImpl]:
datetime(year=2023, month=3, day=3),
)
+ decoy.when(error_recovery_policy(matchers.Anything(), expected_error)).then_return(
+ ErrorRecoveryType.WAIT_FOR_RECOVERY
+ )
+
+ decoy.when(command_note_tracker.get_notes()).then_return(command_notes)
+
await subject.execute("command-id")
decoy.verify(
- action_dispatcher.dispatch(
- UpdateCommandAction(private_result=None, command=running_command)
- ),
action_dispatcher.dispatch(
FailCommandAction(
command_id="command-id",
+ running_command=running_command,
error_id="error-id",
failed_at=datetime(year=2023, month=3, day=3),
error=expected_error,
+ type=ErrorRecoveryType.WAIT_FOR_RECOVERY,
+ notes=command_notes,
)
),
)
diff --git a/api/tests/opentrons/protocol_engine/execution/test_door_watcher.py b/api/tests/opentrons/protocol_engine/execution/test_door_watcher.py
index fd326b04920..1e252650957 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_door_watcher.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_door_watcher.py
@@ -68,7 +68,7 @@ async def test_event_forwarding(
) -> None:
"""It should forward events that come from a different thread."""
handler_captor = matchers.Captor()
- unsubscribe_callback = decoy.mock()
+ unsubscribe_callback = decoy.mock(name="unsubscribe_callback")
decoy.when(hardware_control_api.register_callback(handler_captor)).then_return(
unsubscribe_callback
)
@@ -104,8 +104,8 @@ async def test_one_subscribe_one_unsubscribe(
subject: DoorWatcher,
) -> None:
"""Multiple start()s and stop()s should be collapsed."""
- unsubscribe = decoy.mock()
- wrong_unsubscribe = decoy.mock()
+ unsubscribe = decoy.mock(name="unsubscribe_callback")
+ wrong_unsubscribe = decoy.mock(name="wrong_unsubscribe")
decoy.when(hardware_control_api.register_callback(matchers.Anything())).then_return(
unsubscribe, wrong_unsubscribe
diff --git a/api/tests/opentrons/protocol_engine/execution/test_equipment_handler.py b/api/tests/opentrons/protocol_engine/execution/test_equipment_handler.py
index 17cf5d53248..c8db4b3191f 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_equipment_handler.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_equipment_handler.py
@@ -1,16 +1,17 @@
"""Test equipment command execution side effects."""
import pytest
+from _pytest.fixtures import SubRequest
import inspect
from datetime import datetime
from decoy import Decoy, matchers
-from typing import Any, Optional, cast
+from typing import Any, Optional, cast, Dict
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.pipette import pipette_definition
from opentrons_shared_data.labware.dev_types import LabwareUri
from opentrons.calibration_storage.helpers import uri_from_details
-from opentrons.types import Mount as HwMount, MountType, DeckSlotName
+from opentrons.types import Mount as HwMount, MountType, DeckSlotName, Point
from opentrons.hardware_control import HardwareControlAPI
from opentrons.hardware_control.modules import (
TempDeck,
@@ -22,7 +23,6 @@
from opentrons.protocols.models import LabwareDefinition
from opentrons.protocol_engine import errors
-from opentrons.protocol_engine.actions import ActionDispatcher
from opentrons.protocol_engine.types import (
DeckSlotLocation,
DeckType,
@@ -56,6 +56,7 @@
LoadedPipetteData,
LoadedModuleData,
)
+from ..pipette_fixtures import get_default_nozzle_map
def _make_config(use_virtual_modules: bool) -> Config:
@@ -83,12 +84,6 @@ def state_store(decoy: Decoy) -> StateStore:
return decoy.mock(cls=StateStore)
-@pytest.fixture
-def action_dispatcher(decoy: Decoy) -> ActionDispatcher:
- """Get a mocked out ActionDispatcher instance."""
- return decoy.mock(cls=ActionDispatcher)
-
-
@pytest.fixture
def model_utils(decoy: Decoy) -> ModelUtils:
"""Get a mocked out ModelUtils instance."""
@@ -127,9 +122,16 @@ async def temp_module_v2(decoy: Decoy) -> TempDeck:
return temp_mod
+@pytest.fixture(params=["v0", "v1", "v3"])
+def tip_overlap_versions(request: SubRequest) -> str:
+ """Get a series of tip overlap versions."""
+ return cast(str, request.param)
+
+
@pytest.fixture
def loaded_static_pipette_data(
supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+ target_tip_overlap_data: Dict[str, float],
) -> LoadedStaticPipetteData:
"""Get a pipette config data value object."""
return LoadedStaticPipetteData(
@@ -144,12 +146,21 @@ def loaded_static_pipette_data(
default_dispense={"c": 7.89},
),
tip_configuration_lookup_table={4.56: supported_tip_fixture},
- nominal_tip_overlap={"default": 9.87},
+ nominal_tip_overlap=target_tip_overlap_data,
home_position=10.11,
nozzle_offset_z=12.13,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
)
+@pytest.fixture
+def target_tip_overlap_data(tip_overlap_versions: str) -> Dict[str, float]:
+ """Get the corresponding overlap data for the version."""
+ return {"default": 2.13 * int(tip_overlap_versions[1:])}
+
+
@pytest.fixture
def virtual_pipette_data_provider(
decoy: Decoy,
@@ -162,7 +173,6 @@ def virtual_pipette_data_provider(
def subject(
hardware_api: HardwareControlAPI,
state_store: StateStore,
- action_dispatcher: ActionDispatcher,
labware_data_provider: LabwareDataProvider,
module_data_provider: ModuleDataProvider,
model_utils: ModelUtils,
@@ -172,7 +182,6 @@ def subject(
return EquipmentHandler(
hardware_api=hardware_api,
state_store=state_store,
- action_dispatcher=action_dispatcher,
labware_data_provider=labware_data_provider,
module_data_provider=module_data_provider,
model_utils=model_utils,
@@ -610,8 +619,8 @@ async def test_load_pipette(
model_utils: ModelUtils,
hardware_api: HardwareControlAPI,
state_store: StateStore,
- action_dispatcher: ActionDispatcher,
loaded_static_pipette_data: LoadedStaticPipetteData,
+ tip_overlap_versions: str,
subject: EquipmentHandler,
) -> None:
"""It should load pipette data, check attachment, and generate an ID."""
@@ -627,7 +636,14 @@ async def test_load_pipette(
)
decoy.when(
- pipette_data_provider.get_pipette_static_config(pipette_dict)
+ pipette_data_provider.validate_and_default_tip_overlap_version(
+ tip_overlap_versions
+ )
+ ).then_return(tip_overlap_versions)
+ decoy.when(
+ pipette_data_provider.get_pipette_static_config(
+ pipette_dict=pipette_dict, tip_overlap_version=tip_overlap_versions
+ ),
).then_return(loaded_static_pipette_data)
decoy.when(hardware_api.get_instrument_max_height(mount=HwMount.LEFT)).then_return(
@@ -638,6 +654,7 @@ async def test_load_pipette(
pipette_name=PipetteNameType.P300_SINGLE,
mount=MountType.LEFT,
pipette_id=None,
+ tip_overlap_version=tip_overlap_versions,
)
assert result == LoadedPipetteData(
@@ -661,8 +678,8 @@ async def test_load_pipette_96_channels(
model_utils: ModelUtils,
hardware_api: HardwareControlAPI,
state_store: StateStore,
- action_dispatcher: ActionDispatcher,
loaded_static_pipette_data: LoadedStaticPipetteData,
+ tip_overlap_versions: str,
subject: EquipmentHandler,
) -> None:
"""It should load pipette data, check attachment, and generate an ID."""
@@ -674,7 +691,14 @@ async def test_load_pipette_96_channels(
pipette_dict
)
decoy.when(
- pipette_data_provider.get_pipette_static_config(pipette_dict)
+ pipette_data_provider.validate_and_default_tip_overlap_version(
+ tip_overlap_versions
+ )
+ ).then_return(tip_overlap_versions)
+ decoy.when(
+ pipette_data_provider.get_pipette_static_config(
+ pipette_dict=pipette_dict, tip_overlap_version=tip_overlap_versions
+ )
).then_return(loaded_static_pipette_data)
decoy.when(hardware_api.get_instrument_max_height(mount=HwMount.LEFT)).then_return(
@@ -685,6 +709,7 @@ async def test_load_pipette_96_channels(
pipette_name=PipetteNameType.P1000_96,
mount=MountType.LEFT,
pipette_id=None,
+ tip_overlap_version=tip_overlap_versions,
)
assert result == LoadedPipetteData(
@@ -698,8 +723,8 @@ async def test_load_pipette_uses_provided_id(
decoy: Decoy,
hardware_api: HardwareControlAPI,
state_store: StateStore,
- action_dispatcher: ActionDispatcher,
loaded_static_pipette_data: LoadedStaticPipetteData,
+ tip_overlap_versions: str,
subject: EquipmentHandler,
) -> None:
"""It should use the provided ID rather than generating an ID for the pipette."""
@@ -710,13 +735,21 @@ async def test_load_pipette_uses_provided_id(
pipette_dict
)
decoy.when(
- pipette_data_provider.get_pipette_static_config(pipette_dict)
+ pipette_data_provider.validate_and_default_tip_overlap_version(
+ tip_overlap_versions
+ )
+ ).then_return(tip_overlap_versions)
+ decoy.when(
+ pipette_data_provider.get_pipette_static_config(
+ pipette_dict=pipette_dict, tip_overlap_version=tip_overlap_versions
+ )
).then_return(loaded_static_pipette_data)
result = await subject.load_pipette(
pipette_name=PipetteNameType.P300_SINGLE,
mount=MountType.LEFT,
pipette_id="my-pipette-id",
+ tip_overlap_version=tip_overlap_versions,
)
assert result == LoadedPipetteData(
@@ -730,9 +763,9 @@ async def test_load_pipette_use_virtual(
decoy: Decoy,
model_utils: ModelUtils,
state_store: StateStore,
- action_dispatcher: ActionDispatcher,
loaded_static_pipette_data: LoadedStaticPipetteData,
subject: EquipmentHandler,
+ tip_overlap_versions: str,
virtual_pipette_data_provider: pipette_data_provider.VirtualPipetteDataProvider,
) -> None:
"""It should use the provided ID rather than generating an ID for the pipette."""
@@ -742,15 +775,22 @@ async def test_load_pipette_use_virtual(
decoy.when(model_utils.generate_id(prefix="fake-serial-number-")).then_return(
"fake-serial"
)
-
+ decoy.when(
+ pipette_data_provider.validate_and_default_tip_overlap_version(
+ tip_overlap_versions
+ )
+ ).then_return(tip_overlap_versions)
decoy.when(
virtual_pipette_data_provider.get_virtual_pipette_static_config(
- PipetteNameType.P300_SINGLE.value, "unique-id"
+ PipetteNameType.P300_SINGLE.value, "unique-id", tip_overlap_versions
)
).then_return(loaded_static_pipette_data)
result = await subject.load_pipette(
- pipette_name=PipetteNameType.P300_SINGLE, mount=MountType.LEFT, pipette_id=None
+ pipette_name=PipetteNameType.P300_SINGLE,
+ mount=MountType.LEFT,
+ pipette_id=None,
+ tip_overlap_version=tip_overlap_versions,
)
assert result == LoadedPipetteData(
@@ -790,6 +830,7 @@ async def test_load_pipette_raises_if_pipette_not_attached(
pipette_name=PipetteNameType.P300_SINGLE,
mount=MountType.LEFT,
pipette_id=None,
+ tip_overlap_version="v9999",
)
@@ -833,6 +874,7 @@ async def test_load_module(
HardwareModule(serial_number="serial-1", definition=tempdeck_v1_def),
HardwareModule(serial_number="serial-2", definition=tempdeck_v2_def),
],
+ expected_serial_number=None,
)
).then_return(HardwareModule(serial_number="serial-1", definition=tempdeck_v1_def))
diff --git a/api/tests/opentrons/protocol_engine/execution/test_hardware_stopper.py b/api/tests/opentrons/protocol_engine/execution/test_hardware_stopper.py
index 96e0cc3ea88..537fd07613c 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_hardware_stopper.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_hardware_stopper.py
@@ -15,7 +15,12 @@
TipHandler,
HardwareStopper,
)
-from opentrons.protocol_engine.types import MotorAxis, TipGeometry, PostRunHardwareState
+from opentrons.protocol_engine.types import (
+ MotorAxis,
+ TipGeometry,
+ PostRunHardwareState,
+ AddressableOffsetVector,
+)
if TYPE_CHECKING:
from opentrons.hardware_control.ot3api import OT3API
@@ -91,7 +96,7 @@ async def test_hardware_stopping_sequence(
post_run_hardware_state: PostRunHardwareState,
expected_home_after: bool,
) -> None:
- """It should stop the hardware, home the robot and perform drop tip if required."""
+ """It should stop the hardware, and home the robot. Flex no longer performs automatic drop tip.."""
decoy.when(state_store.pipettes.get_all_attached_tips()).then_return(
[
("pipette-id", TipGeometry(length=1.0, volume=2.0, diameter=3.0)),
@@ -99,7 +104,8 @@ async def test_hardware_stopping_sequence(
)
await subject.do_stop_and_recover(
- drop_tips_after_run=True, post_run_hardware_state=post_run_hardware_state
+ drop_tips_after_run=True,
+ post_run_hardware_state=post_run_hardware_state,
)
decoy.verify(
@@ -107,16 +113,6 @@ async def test_hardware_stopping_sequence(
await movement.home(
axes=[MotorAxis.X, MotorAxis.Y, MotorAxis.LEFT_Z, MotorAxis.RIGHT_Z]
),
- await mock_tip_handler.add_tip(
- pipette_id="pipette-id",
- tip=TipGeometry(length=1.0, volume=2.0, diameter=3.0),
- ),
- await movement.move_to_well(
- pipette_id="pipette-id",
- labware_id="fixedTrash",
- well_name="A1",
- ),
- await mock_tip_handler.drop_tip(pipette_id="pipette-id", home_after=False),
await hardware_api.stop(home_after=expected_home_after),
)
@@ -210,7 +206,7 @@ async def test_hardware_stopping_sequence_with_gripper(
movement: MovementHandler,
mock_tip_handler: TipHandler,
) -> None:
- """It should stop the hardware, home the robot and perform drop tip if required."""
+ """It should stop the hardware, and home the robot. Flex no longer performs automatic drop tip."""
subject = HardwareStopper(
hardware_api=ot3_hardware_api,
state_store=state_store,
@@ -224,6 +220,46 @@ async def test_hardware_stopping_sequence_with_gripper(
)
decoy.when(state_store.config.use_virtual_gripper).then_return(False)
decoy.when(ot3_hardware_api.has_gripper()).then_return(True)
+
+ await subject.do_stop_and_recover(
+ drop_tips_after_run=True,
+ post_run_hardware_state=PostRunHardwareState.HOME_AND_STAY_ENGAGED,
+ )
+
+ decoy.verify(
+ await ot3_hardware_api.stop(home_after=False),
+ await ot3_hardware_api.home_z(mount=OT3Mount.GRIPPER),
+ await movement.home(
+ axes=[MotorAxis.X, MotorAxis.Y, MotorAxis.LEFT_Z, MotorAxis.RIGHT_Z]
+ ),
+ await ot3_hardware_api.stop(home_after=True),
+ )
+
+
+@pytest.mark.ot3_only
+async def test_hardware_stopping_sequence_with_fixed_trash(
+ decoy: Decoy,
+ state_store: StateStore,
+ ot3_hardware_api: OT3API,
+ movement: MovementHandler,
+ mock_tip_handler: TipHandler,
+) -> None:
+ """It should stop the hardware, and home the robot. Flex no longer performs automatic drop tip."""
+ subject = HardwareStopper(
+ hardware_api=ot3_hardware_api,
+ state_store=state_store,
+ movement=movement,
+ tip_handler=mock_tip_handler,
+ )
+ decoy.when(state_store.pipettes.get_all_attached_tips()).then_return(
+ [
+ ("pipette-id", TipGeometry(length=1.0, volume=2.0, diameter=3.0)),
+ ]
+ )
+ decoy.when(state_store.labware.get_fixed_trash_id()).then_return("fixedTrash")
+ decoy.when(state_store.config.use_virtual_gripper).then_return(False)
+ decoy.when(ot3_hardware_api.has_gripper()).then_return(True)
+
await subject.do_stop_and_recover(
drop_tips_after_run=True,
post_run_hardware_state=PostRunHardwareState.HOME_AND_STAY_ENGAGED,
@@ -250,3 +286,55 @@ async def test_hardware_stopping_sequence_with_gripper(
),
await ot3_hardware_api.stop(home_after=True),
)
+
+
+async def test_hardware_stopping_sequence_with_OT2_addressable_area(
+ decoy: Decoy,
+ state_store: StateStore,
+ hardware_api: HardwareAPI,
+ movement: MovementHandler,
+ mock_tip_handler: TipHandler,
+) -> None:
+ """It should stop the hardware, and home the robot. Flex no longer performs automatic drop tip."""
+ subject = HardwareStopper(
+ hardware_api=hardware_api,
+ state_store=state_store,
+ movement=movement,
+ tip_handler=mock_tip_handler,
+ )
+ decoy.when(state_store.pipettes.get_all_attached_tips()).then_return(
+ [
+ ("pipette-id", TipGeometry(length=1.0, volume=2.0, diameter=3.0)),
+ ]
+ )
+ decoy.when(state_store.config.robot_type).then_return("OT-2 Standard")
+ decoy.when(state_store.config.use_virtual_gripper).then_return(False)
+
+ await subject.do_stop_and_recover(
+ drop_tips_after_run=True,
+ post_run_hardware_state=PostRunHardwareState.HOME_AND_STAY_ENGAGED,
+ )
+
+ decoy.verify(
+ await hardware_api.stop(home_after=False),
+ await movement.home(
+ axes=[MotorAxis.X, MotorAxis.Y, MotorAxis.LEFT_Z, MotorAxis.RIGHT_Z]
+ ),
+ await mock_tip_handler.add_tip(
+ pipette_id="pipette-id",
+ tip=TipGeometry(length=1.0, volume=2.0, diameter=3.0),
+ ),
+ await movement.move_to_addressable_area(
+ pipette_id="pipette-id",
+ addressable_area_name="fixedTrash",
+ offset=AddressableOffsetVector(x=0, y=0, z=0),
+ force_direct=False,
+ speed=None,
+ minimum_z_height=None,
+ ),
+ await mock_tip_handler.drop_tip(
+ pipette_id="pipette-id",
+ home_after=False,
+ ),
+ await hardware_api.stop(home_after=True),
+ )
diff --git a/api/tests/opentrons/protocol_engine/execution/test_labware_movement_handler.py b/api/tests/opentrons/protocol_engine/execution/test_labware_movement_handler.py
index 0934b6d1c10..58619647f54 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_labware_movement_handler.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_labware_movement_handler.py
@@ -5,7 +5,7 @@
import pytest
from decoy import Decoy, matchers
-from typing import TYPE_CHECKING, Union
+from typing import TYPE_CHECKING, Union, Optional, Tuple
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
from opentrons.hardware_control import HardwareControlAPI
@@ -22,6 +22,7 @@
LabwareLocation,
NonStackedLocation,
LabwareMovementOffsetData,
+ Dimensions,
)
from opentrons.protocol_engine.execution.thermocycler_plate_lifter import (
ThermocyclerPlateLifter,
@@ -85,6 +86,22 @@ def heater_shaker_movement_flagger(decoy: Decoy) -> HeaterShakerMovementFlagger:
return decoy.mock(cls=HeaterShakerMovementFlagger)
+@pytest.fixture
+def hardware_gripper_offset_data() -> Tuple[
+ LabwareMovementOffsetData, LabwareMovementOffsetData
+]:
+ """Get a set of mocked labware offset data."""
+ user_offset_data = LabwareMovementOffsetData(
+ pickUpOffset=LabwareOffsetVector(x=123, y=234, z=345),
+ dropOffset=LabwareOffsetVector(x=111, y=222, z=333),
+ )
+ final_offset_data = LabwareMovementOffsetData(
+ pickUpOffset=LabwareOffsetVector(x=-1, y=-2, z=-3),
+ dropOffset=LabwareOffsetVector(x=1, y=2, z=3),
+ )
+ return user_offset_data, final_offset_data
+
+
def default_experimental_movement_data() -> LabwareMovementOffsetData:
"""Experimental movement data with default values."""
return LabwareMovementOffsetData(
@@ -93,6 +110,45 @@ def default_experimental_movement_data() -> LabwareMovementOffsetData:
)
+async def set_up_decoy_hardware_gripper(
+ decoy: Decoy, ot3_hardware_api: OT3API, state_store: StateStore
+) -> None:
+ """Shared hardware gripper decoy setup."""
+ decoy.when(state_store.config.use_virtual_gripper).then_return(False)
+ decoy.when(ot3_hardware_api.has_gripper()).then_return(True)
+ decoy.when(ot3_hardware_api.gripper_jaw_can_home()).then_return(True)
+ assert ot3_hardware_api.hardware_gripper
+ decoy.when(
+ await ot3_hardware_api.gantry_position(mount=OT3Mount.GRIPPER)
+ ).then_return(Point(x=777, y=888, z=999))
+
+ decoy.when(
+ await ot3_hardware_api.encoder_current_position_ot3(OT3Mount.GRIPPER)
+ ).then_return({Axis.G: 4.0})
+
+ decoy.when(
+ ot3_hardware_api.hardware_gripper.geometry.max_allowed_grip_error
+ ).then_return(6.0)
+
+ decoy.when(ot3_hardware_api.hardware_gripper.jaw_width).then_return(89)
+
+ decoy.when(
+ state_store.labware.get_grip_force("my-teleporting-labware")
+ ).then_return(100)
+
+ decoy.when(state_store.labware.get_labware_offset("new-offset-id")).then_return(
+ LabwareOffset(
+ id="new-offset-id",
+ createdAt=datetime(year=2022, month=10, day=20),
+ definitionUri="my-labware",
+ location=LabwareOffsetLocation(
+ slotName=DeckSlotName.SLOT_5
+ ), # this location doesn't matter for this test
+ vector=LabwareOffsetVector(x=0.5, y=0.6, z=0.7),
+ )
+ )
+
+
@pytest.mark.ot3_only
@pytest.fixture
def subject(
@@ -116,27 +172,126 @@ def subject(
)
+@pytest.mark.ot3_only
+async def test_raise_error_if_gripper_pickup_failed(
+ decoy: Decoy,
+ state_store: StateStore,
+ thermocycler_plate_lifter: ThermocyclerPlateLifter,
+ ot3_hardware_api: OT3API,
+ subject: LabwareMovementHandler,
+ hardware_gripper_offset_data: Tuple[
+ LabwareMovementOffsetData, LabwareMovementOffsetData
+ ],
+) -> None:
+ """Test that the gripper position check is called at the right time."""
+ # This function should only be called when after the gripper opens,
+ # and then closes again. This is when we expect the labware to be
+ # in the gripper jaws.
+ await set_up_decoy_hardware_gripper(decoy, ot3_hardware_api, state_store)
+ assert ot3_hardware_api.hardware_gripper
+
+ user_offset_data, final_offset_data = hardware_gripper_offset_data
+
+ starting_location = DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
+ to_location = DeckSlotLocation(slotName=DeckSlotName.SLOT_2)
+
+ mock_tc_context_manager = decoy.mock(name="mock_tc_context_manager")
+ decoy.when(
+ thermocycler_plate_lifter.lift_plate_for_labware_movement(
+ labware_location=starting_location
+ )
+ ).then_return(mock_tc_context_manager)
+
+ decoy.when(
+ state_store.geometry.get_final_labware_movement_offset_vectors(
+ from_location=starting_location,
+ to_location=to_location,
+ additional_offset_vector=user_offset_data,
+ )
+ ).then_return(final_offset_data)
+
+ decoy.when(
+ state_store.geometry.get_labware_grip_point(
+ labware_id="my-teleporting-labware", location=starting_location
+ )
+ ).then_return(Point(101, 102, 119.5))
+
+ decoy.when(
+ state_store.geometry.get_labware_grip_point(
+ labware_id="my-teleporting-labware", location=to_location
+ )
+ ).then_return(Point(201, 202, 219.5))
+
+ decoy.when(
+ state_store.labware.get_dimensions(labware_id="my-teleporting-labware")
+ ).then_return(Dimensions(x=100, y=85, z=0))
+
+ decoy.when(
+ state_store.labware.get_well_bbox(labware_id="my-teleporting-labware")
+ ).then_return(Dimensions(x=99, y=80, z=1))
+
+ await subject.move_labware_with_gripper(
+ labware_id="my-teleporting-labware",
+ current_location=starting_location,
+ new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2),
+ user_offset_data=user_offset_data,
+ post_drop_slide_offset=Point(x=1, y=1, z=1),
+ )
+
+ decoy.verify(
+ await ot3_hardware_api.home(axes=[Axis.Z_L, Axis.Z_R, Axis.Z_G]),
+ await mock_tc_context_manager.__aenter__(),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.grip(force_newtons=100),
+ ot3_hardware_api.raise_error_if_gripper_pickup_failed(
+ expected_grip_width=85,
+ grip_width_uncertainty_wider=0,
+ grip_width_uncertainty_narrower=5,
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ ot3_hardware_api.raise_error_if_gripper_pickup_failed(
+ expected_grip_width=85,
+ grip_width_uncertainty_wider=0,
+ grip_width_uncertainty_narrower=5,
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ ot3_hardware_api.raise_error_if_gripper_pickup_failed(
+ expected_grip_width=85,
+ grip_width_uncertainty_wider=0,
+ grip_width_uncertainty_narrower=5,
+ ),
+ await ot3_hardware_api.disengage_axes([Axis.Z_G]),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.ungrip(),
+ )
+
+
# TODO (spp, 2022-10-18):
# 1. Should write an acceptance test w/ real labware on ot3 deck.
# 2. This test will be split once waypoints generation is moved to motion planning.
@pytest.mark.parametrize(
- argnames=["from_location", "to_location"],
+ argnames=["from_location", "to_location", "slide_offset"],
argvalues=[
(
DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
+ None,
),
(
DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
ModuleLocation(moduleId="module-id"),
+ Point(x=50, y=0, z=0),
),
(
OnLabwareLocation(labwareId="a-labware-id"),
OnLabwareLocation(labwareId="another-labware-id"),
+ None,
),
(
ModuleLocation(moduleId="a-module-id"),
DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ Point(x=-10, y=40, z=-40),
),
],
)
@@ -149,30 +304,18 @@ async def test_move_labware_with_gripper(
subject: LabwareMovementHandler,
from_location: Union[DeckSlotLocation, ModuleLocation, OnLabwareLocation],
to_location: Union[DeckSlotLocation, ModuleLocation, OnLabwareLocation],
+ slide_offset: Optional[Point],
+ hardware_gripper_offset_data: Tuple[
+ LabwareMovementOffsetData, LabwareMovementOffsetData
+ ],
) -> None:
"""It should perform a labware movement with gripper by delegating to OT3API."""
# TODO (spp, 2023-07-26): this test does NOT stub out movement waypoints in order to
# keep this as the semi-smoke test that it previously was. We should add a proper
# smoke test for gripper labware movement with actual labware and make this a unit test.
+ await set_up_decoy_hardware_gripper(decoy, ot3_hardware_api, state_store)
- user_offset_data = LabwareMovementOffsetData(
- pickUpOffset=LabwareOffsetVector(x=123, y=234, z=345),
- dropOffset=LabwareOffsetVector(x=111, y=222, z=333),
- )
- final_offset_data = LabwareMovementOffsetData(
- pickUpOffset=LabwareOffsetVector(x=-1, y=-2, z=-3),
- dropOffset=LabwareOffsetVector(x=1, y=2, z=3),
- )
-
- decoy.when(state_store.config.use_virtual_gripper).then_return(False)
- decoy.when(ot3_hardware_api.has_gripper()).then_return(True)
- decoy.when(ot3_hardware_api._gripper_handler.is_ready_for_jaw_home()).then_return(
- True
- )
-
- decoy.when(
- await ot3_hardware_api.gantry_position(mount=OT3Mount.GRIPPER)
- ).then_return(Point(x=777, y=888, z=999))
+ user_offset_data, final_offset_data = hardware_gripper_offset_data
decoy.when(
state_store.geometry.get_final_labware_movement_offset_vectors(
@@ -182,39 +325,31 @@ async def test_move_labware_with_gripper(
)
).then_return(final_offset_data)
+ decoy.when(
+ state_store.labware.get_dimensions(labware_id="my-teleporting-labware")
+ ).then_return(Dimensions(x=100, y=85, z=0))
+
+ decoy.when(
+ state_store.labware.get_well_bbox(labware_id="my-teleporting-labware")
+ ).then_return(Dimensions(x=99, y=80, z=1))
+
decoy.when(
state_store.geometry.get_labware_grip_point(
labware_id="my-teleporting-labware", location=from_location
)
).then_return(Point(101, 102, 119.5))
-
decoy.when(
state_store.geometry.get_labware_grip_point(
labware_id="my-teleporting-labware", location=to_location
)
).then_return(Point(201, 202, 219.5))
- decoy.when(
- state_store.labware.get_grip_force("my-teleporting-labware")
- ).then_return(100)
- mock_tc_context_manager = decoy.mock()
+ mock_tc_context_manager = decoy.mock(name="mock_tc_context_manager")
decoy.when(
thermocycler_plate_lifter.lift_plate_for_labware_movement(
labware_location=from_location
)
).then_return(mock_tc_context_manager)
- decoy.when(state_store.labware.get_labware_offset("new-offset-id")).then_return(
- LabwareOffset(
- id="new-offset-id",
- createdAt=datetime(year=2022, month=10, day=20),
- definitionUri="my-labware",
- location=LabwareOffsetLocation(
- slotName=DeckSlotName.SLOT_5
- ), # this location doesn't matter for this test
- vector=LabwareOffsetVector(x=0.5, y=0.6, z=0.7),
- )
- )
-
expected_waypoints = [
Point(100, 100, 999), # move to above slot 1
Point(100, 100, 116.5), # move to labware on slot 1
@@ -229,38 +364,79 @@ async def test_move_labware_with_gripper(
current_location=from_location,
new_location=to_location,
user_offset_data=user_offset_data,
+ post_drop_slide_offset=slide_offset,
)
gripper = OT3Mount.GRIPPER
- decoy.verify(
- await ot3_hardware_api.home(axes=[Axis.Z_L, Axis.Z_R, Axis.Z_G]),
- await mock_tc_context_manager.__aenter__(),
- await ot3_hardware_api.grip(force_newtons=100),
- await ot3_hardware_api.move_to(
- mount=gripper, abs_position=expected_waypoints[0]
- ),
- await ot3_hardware_api.ungrip(),
- await ot3_hardware_api.move_to(
- mount=gripper, abs_position=expected_waypoints[1]
- ),
- await ot3_hardware_api.grip(force_newtons=100),
- await ot3_hardware_api.move_to(
- mount=gripper, abs_position=expected_waypoints[2]
- ),
- await ot3_hardware_api.grip(force_newtons=100),
- await ot3_hardware_api.move_to(
- mount=gripper, abs_position=expected_waypoints[3]
- ),
- await ot3_hardware_api.grip(force_newtons=100),
- await ot3_hardware_api.move_to(
- mount=gripper, abs_position=expected_waypoints[4]
- ),
- await ot3_hardware_api.ungrip(),
- await ot3_hardware_api.move_to(
- mount=gripper, abs_position=expected_waypoints[5]
- ),
- await ot3_hardware_api.idle_gripper(),
- )
+
+ if slide_offset is None:
+ decoy.verify(
+ await ot3_hardware_api.home(axes=[Axis.Z_L, Axis.Z_R, Axis.Z_G]),
+ await mock_tc_context_manager.__aenter__(),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[0]
+ ),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[1]
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[2]
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[3]
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[4]
+ ),
+ await ot3_hardware_api.disengage_axes([Axis.Z_G]),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.home_z(OT3Mount.GRIPPER),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[5]
+ ),
+ await ot3_hardware_api.idle_gripper(),
+ )
+ else:
+ decoy.verify(
+ await ot3_hardware_api.home(axes=[Axis.Z_L, Axis.Z_R, Axis.Z_G]),
+ await mock_tc_context_manager.__aenter__(),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[0]
+ ),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[1]
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[2]
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[3]
+ ),
+ await ot3_hardware_api.grip(force_newtons=100),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[4]
+ ),
+ await ot3_hardware_api.disengage_axes([Axis.Z_G]),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.home_z(OT3Mount.GRIPPER),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[5]
+ ),
+ await ot3_hardware_api.ungrip(),
+ await ot3_hardware_api.move_to(
+ mount=gripper, abs_position=expected_waypoints[5] + slide_offset
+ ),
+ await ot3_hardware_api.idle_gripper(),
+ )
async def test_labware_movement_raises_on_ot2(
@@ -285,6 +461,7 @@ async def test_labware_movement_raises_on_ot2(
current_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
user_offset_data=default_experimental_movement_data(),
+ post_drop_slide_offset=None,
)
@@ -302,6 +479,7 @@ async def test_labware_movement_skips_for_virtual_gripper(
current_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
user_offset_data=default_experimental_movement_data(),
+ post_drop_slide_offset=None,
)
decoy.verify(
await ot3_hardware_api.move_to(
@@ -328,6 +506,7 @@ async def test_labware_movement_raises_without_gripper(
current_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
user_offset_data=default_experimental_movement_data(),
+ post_drop_slide_offset=None,
)
diff --git a/api/tests/opentrons/protocol_engine/execution/test_movement_handler.py b/api/tests/opentrons/protocol_engine/execution/test_movement_handler.py
index e53242c93e7..75205b6e45d 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_movement_handler.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_movement_handler.py
@@ -17,6 +17,7 @@
DeckSlotLocation,
CurrentWell,
MotorAxis,
+ AddressableOffsetVector,
)
from opentrons.protocol_engine.state import (
StateStore,
@@ -111,7 +112,7 @@ async def test_move_to_well(
decoy.when(
state_store.motion.get_pipette_location(
pipette_id="pipette-id",
- current_well=None,
+ current_location=None,
)
).then_return(
PipetteLocationData(
@@ -225,7 +226,7 @@ async def test_move_to_well_from_starting_location(
decoy.when(
state_store.motion.get_pipette_location(
pipette_id="pipette-id",
- current_well=current_well,
+ current_location=current_well,
)
).then_return(
PipetteLocationData(
@@ -296,6 +297,103 @@ async def test_move_to_well_from_starting_location(
)
+async def test_move_to_addressable_area(
+ decoy: Decoy,
+ state_store: StateStore,
+ thermocycler_movement_flagger: ThermocyclerMovementFlagger,
+ heater_shaker_movement_flagger: HeaterShakerMovementFlagger,
+ mock_gantry_mover: GantryMover,
+ subject: MovementHandler,
+) -> None:
+ """Move requests should call hardware controller with movement data."""
+ decoy.when(
+ state_store.modules.get_heater_shaker_movement_restrictors()
+ ).then_return([])
+
+ decoy.when(
+ state_store.addressable_areas.get_addressable_area_base_slot("area-name")
+ ).then_return(DeckSlotName.SLOT_1)
+
+ decoy.when(state_store.tips.get_pipette_channels("pipette-id")).then_return(1)
+
+ decoy.when(
+ state_store.motion.get_pipette_location(
+ pipette_id="pipette-id",
+ current_location=None,
+ )
+ ).then_return(
+ PipetteLocationData(
+ mount=MountType.LEFT,
+ critical_point=CriticalPoint.FRONT_NOZZLE,
+ )
+ )
+
+ decoy.when(
+ await mock_gantry_mover.get_position(
+ pipette_id="pipette-id",
+ )
+ ).then_return(Point(1, 1, 1))
+
+ decoy.when(mock_gantry_mover.get_max_travel_z(pipette_id="pipette-id")).then_return(
+ 42.0
+ )
+
+ decoy.when(
+ state_store.pipettes.get_movement_speed(
+ pipette_id="pipette-id", requested_speed=45.6
+ )
+ ).then_return(39339.5)
+
+ decoy.when(
+ state_store.motion.get_movement_waypoints_to_addressable_area(
+ addressable_area_name="area-name",
+ offset=AddressableOffsetVector(x=9, y=8, z=7),
+ origin=Point(1, 1, 1),
+ origin_cp=CriticalPoint.FRONT_NOZZLE,
+ max_travel_z=42.0,
+ force_direct=True,
+ minimum_z_height=12.3,
+ stay_at_max_travel_z=True,
+ ignore_tip_configuration=False,
+ )
+ ).then_return(
+ [Waypoint(Point(1, 2, 3), CriticalPoint.XY_CENTER), Waypoint(Point(4, 5, 6))]
+ )
+
+ decoy.when(
+ await mock_gantry_mover.move_to(
+ pipette_id="pipette-id",
+ waypoints=[
+ Waypoint(Point(1, 2, 3), CriticalPoint.XY_CENTER),
+ Waypoint(Point(4, 5, 6)),
+ ],
+ speed=39339.5,
+ ),
+ ).then_return(Point(4, 5, 6))
+
+ result = await subject.move_to_addressable_area(
+ pipette_id="pipette-id",
+ addressable_area_name="area-name",
+ offset=AddressableOffsetVector(x=9, y=8, z=7),
+ force_direct=True,
+ minimum_z_height=12.3,
+ speed=45.6,
+ stay_at_highest_possible_z=True,
+ ignore_tip_configuration=False,
+ )
+
+ assert result == Point(x=4, y=5, z=6)
+
+ decoy.verify(
+ heater_shaker_movement_flagger.raise_if_movement_restricted(
+ hs_movement_restrictors=[],
+ destination_slot=1,
+ is_multi_channel=False,
+ destination_is_tip_rack=False,
+ )
+ )
+
+
class MoveRelativeSpec(NamedTuple):
"""Test data for move_relative."""
diff --git a/api/tests/opentrons/protocol_engine/execution/test_pipetting_handler.py b/api/tests/opentrons/protocol_engine/execution/test_pipetting_handler.py
index 402a1487db6..b087084abff 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_pipetting_handler.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_pipetting_handler.py
@@ -1,5 +1,5 @@
"""Pipetting execution handler."""
-from typing import cast, Optional
+from typing import cast
import pytest
from decoy import Decoy
@@ -17,10 +17,12 @@
)
from opentrons.protocol_engine.errors.exceptions import (
TipNotAttachedError,
- InvalidPipettingVolumeError,
+ InvalidAspirateVolumeError,
InvalidPushOutVolumeError,
InvalidDispenseVolumeError,
)
+from opentrons.protocol_engine.notes import CommandNoteAdder, CommandNote
+from ..note_utils import CommandNoteMatcher
@pytest.fixture
@@ -77,7 +79,7 @@ async def test_create_pipette_handler(
(1.0, False, False),
],
)
-def test_get_is_ready_to_aspirate(
+def test_hw_get_is_ready_to_aspirate(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
@@ -103,7 +105,7 @@ def test_get_is_ready_to_aspirate(
assert hardware_subject.get_is_ready_to_aspirate("pipette-id") == expected
-def test_get_is_ready_to_aspirate_raises_no_tip_attached(
+def test_hw_get_is_ready_to_aspirate_raises_no_tip_attached(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
@@ -127,13 +129,17 @@ def test_get_is_ready_to_aspirate_raises_no_tip_attached(
assert hardware_subject.get_is_ready_to_aspirate("pipette-id")
-async def test_dispense_in_place(
+async def test_hw_dispense_in_place(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
hardware_subject: HardwarePipettingHandler,
) -> None:
"""It should find the pipette by ID and use it to dispense."""
+ decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
+ 25
+ )
+
decoy.when(mock_hardware_api.attached_instruments).then_return({})
decoy.when(
mock_state_view.pipettes.get_hardware_pipette(
@@ -171,13 +177,17 @@ async def test_dispense_in_place(
)
-async def test_dispense_in_place_raises_invalid_push_out(
+async def test_hw_dispense_in_place_raises_invalid_push_out(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
hardware_subject: HardwarePipettingHandler,
) -> None:
"""It should raise an InvalidPushOutVolumeError."""
+ decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
+ 25
+ )
+
decoy.when(mock_hardware_api.attached_instruments).then_return({})
decoy.when(
mock_state_view.pipettes.get_hardware_pipette(
@@ -204,13 +214,21 @@ async def test_dispense_in_place_raises_invalid_push_out(
)
-async def test_aspirate_in_place(
+async def test_hw_aspirate_in_place(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
hardware_subject: HardwarePipettingHandler,
+ mock_command_note_adder: CommandNoteAdder,
) -> None:
"""Should set flow_rate and call hardware_api aspirate."""
+ decoy.when(mock_state_view.pipettes.get_working_volume("pipette-id")).then_return(
+ 25
+ )
+ decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
+ 0
+ )
+
decoy.when(mock_hardware_api.attached_instruments).then_return({})
decoy.when(
mock_state_view.pipettes.get_hardware_pipette(
@@ -232,7 +250,10 @@ async def test_aspirate_in_place(
)
result = await hardware_subject.aspirate_in_place(
- pipette_id="pipette-id", volume=25, flow_rate=2.5
+ pipette_id="pipette-id",
+ volume=25,
+ flow_rate=2.5,
+ command_note_adder=mock_command_note_adder,
)
assert result == 25
@@ -248,28 +269,7 @@ async def test_aspirate_in_place(
)
-async def test_virtual_validate_aspirated_volume_raises(
- decoy: Decoy,
- mock_state_view: StateView,
-) -> None:
- """Should validate if trying to aspirate more than the working volume."""
- decoy.when(mock_state_view.pipettes.get_attached_tip("pipette-id")).then_return(
- TipGeometry(length=1, diameter=2, volume=3)
- )
-
- decoy.when(mock_state_view.pipettes.get_working_volume("pipette-id")).then_return(3)
-
- decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
- 2
- )
-
- subject = VirtualPipettingHandler(state_view=mock_state_view)
-
- with pytest.raises(InvalidPipettingVolumeError):
- await subject.aspirate_in_place(pipette_id="pipette-id", volume=4, flow_rate=1)
-
-
-async def test_blow_out_in_place(
+async def test_virtual_blow_out_in_place(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
@@ -309,7 +309,7 @@ async def test_blow_out_in_place(
)
-def test_get_is_ready_to_aspirate_virtual(
+def test_virtual_get_is_ready_to_aspirate(
decoy: Decoy, mock_state_view: StateView
) -> None:
"""Should check if pipette is ready to aspirate."""
@@ -329,8 +329,8 @@ def test_get_is_ready_to_aspirate_virtual(
assert subject.get_is_ready_to_aspirate(pipette_id="pipette-id-123") is True
-async def test_aspirate_in_place_virtual(
- mock_state_view: StateView, decoy: Decoy
+async def test_virtual_aspirate_in_place(
+ mock_state_view: StateView, decoy: Decoy, mock_command_note_adder: CommandNoteAdder
) -> None:
"""Should return the volume."""
decoy.when(
@@ -348,12 +348,15 @@ async def test_aspirate_in_place_virtual(
)
result = await subject.aspirate_in_place(
- pipette_id="pipette-id", volume=2, flow_rate=5
+ pipette_id="pipette-id",
+ volume=2,
+ flow_rate=5,
+ command_note_adder=mock_command_note_adder,
)
assert result == 2
-async def test_dispense_in_place_virtual(
+async def test_virtual_dispense_in_place(
decoy: Decoy, mock_state_view: StateView
) -> None:
"""Should return the volume."""
@@ -373,7 +376,7 @@ async def test_dispense_in_place_virtual(
assert result == 3
-async def test_dispense_in_place_virtual_raises_invalid_push_out(
+async def test_virtual_dispense_in_place_raises_invalid_push_out(
decoy: Decoy, mock_state_view: StateView
) -> None:
"""Should raise an InvalidPushOutVolumeError."""
@@ -393,9 +396,8 @@ async def test_dispense_in_place_virtual_raises_invalid_push_out(
)
-@pytest.mark.parametrize("aspirated_volume", [(None), (1)])
-async def test_dispense_in_place_virtual_raises_invalid_dispense(
- decoy: Decoy, mock_state_view: StateView, aspirated_volume: Optional[float]
+async def test_virtual_dispense_in_place_raises_no_tip(
+ decoy: Decoy, mock_state_view: StateView
) -> None:
"""Should raise an InvalidDispenseVolumeError."""
subject = VirtualPipettingHandler(state_view=mock_state_view)
@@ -405,7 +407,7 @@ async def test_dispense_in_place_virtual_raises_invalid_dispense(
)
decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
- aspirated_volume
+ None
)
with pytest.raises(InvalidDispenseVolumeError):
@@ -414,8 +416,8 @@ async def test_dispense_in_place_virtual_raises_invalid_dispense(
)
-async def test_validate_tip_attached_in_aspirate(
- mock_state_view: StateView, decoy: Decoy
+async def test_virtual_aspirate_validate_tip_attached(
+ mock_state_view: StateView, decoy: Decoy, mock_command_note_adder: CommandNoteAdder
) -> None:
"""Should raise an error that a tip is not attached."""
subject = VirtualPipettingHandler(state_view=mock_state_view)
@@ -427,10 +429,15 @@ async def test_validate_tip_attached_in_aspirate(
with pytest.raises(
TipNotAttachedError, match="Cannot perform aspirate without a tip attached"
):
- await subject.aspirate_in_place("pipette-id", volume=20, flow_rate=1)
+ await subject.aspirate_in_place(
+ "pipette-id",
+ volume=20,
+ flow_rate=1,
+ command_note_adder=mock_command_note_adder,
+ )
-async def test_validate_tip_attached_in_dispense(
+async def test_virtual_dispense_validate_tip_attached(
mock_state_view: StateView, decoy: Decoy
) -> None:
"""Should raise an error that a tip is not attached."""
@@ -446,3 +453,138 @@ async def test_validate_tip_attached_in_dispense(
await subject.dispense_in_place(
"pipette-id", volume=20, flow_rate=1, push_out=None
)
+
+
+async def test_aspirate_volume_validation(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ mock_hardware_api: HardwareAPI,
+ hardware_subject: HardwarePipettingHandler,
+ mock_command_note_adder: CommandNoteAdder,
+) -> None:
+ """It should validate the input volume, possibly adjusting it for rounding error.
+
+ This is done on both the VirtualPipettingHandler and HardwarePipettingHandler
+ because they should behave the same way.
+ """
+ virtual_subject = VirtualPipettingHandler(state_view=mock_state_view)
+
+ decoy.when(mock_state_view.pipettes.get_attached_tip("pipette-id")).then_return(
+ TipGeometry(length=1, diameter=2, volume=3)
+ )
+ decoy.when(mock_state_view.pipettes.get_working_volume("pipette-id")).then_return(3)
+ decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
+ 2
+ )
+
+ # Stuff that only matters for the hardware subject:
+ decoy.when(mock_hardware_api.attached_instruments).then_return({})
+ decoy.when(
+ mock_state_view.pipettes.get_hardware_pipette(
+ pipette_id="pipette-id",
+ attached_pipettes={},
+ )
+ ).then_return(
+ HardwarePipette(
+ mount=Mount.LEFT,
+ config=cast(
+ PipetteDict,
+ {
+ "aspirate_flow_rate": 1.23,
+ "dispense_flow_rate": 4.56,
+ "blow_out_flow_rate": 7.89,
+ },
+ ),
+ )
+ )
+
+ ok_volume = 1.0000000000001
+ not_ok_volume = 1.01
+ expected_adjusted_volume = 1
+
+ for subject in [virtual_subject, hardware_subject]:
+ assert (
+ await subject.aspirate_in_place(
+ pipette_id="pipette-id",
+ volume=ok_volume,
+ flow_rate=1,
+ command_note_adder=mock_command_note_adder,
+ )
+ == expected_adjusted_volume
+ )
+ decoy.verify(
+ mock_command_note_adder(
+ cast(
+ CommandNote,
+ CommandNoteMatcher(
+ matching_noteKind_regex="warning",
+ matching_shortMessage_regex="Aspirate clamped to 1 µL",
+ ),
+ )
+ )
+ )
+ with pytest.raises(InvalidAspirateVolumeError):
+ await subject.aspirate_in_place(
+ pipette_id="pipette-id",
+ volume=not_ok_volume,
+ flow_rate=1,
+ command_note_adder=mock_command_note_adder,
+ )
+
+
+async def test_dispense_volume_validation(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ mock_hardware_api: HardwareAPI,
+ hardware_subject: HardwarePipettingHandler,
+) -> None:
+ """It should validate the input volume, possibly adjusting it for rounding error.
+
+ This is done on both the VirtualPipettingHandler and HardwarePipettingHandler
+ because they should behave the same way.
+ """
+ virtual_subject = VirtualPipettingHandler(state_view=mock_state_view)
+
+ decoy.when(mock_state_view.pipettes.get_attached_tip("pipette-id")).then_return(
+ TipGeometry(length=1, diameter=2, volume=3)
+ )
+ decoy.when(mock_state_view.pipettes.get_aspirated_volume("pipette-id")).then_return(
+ 1
+ )
+
+ # Stuff that only matters for the hardware subject:
+ decoy.when(mock_hardware_api.attached_instruments).then_return({})
+ decoy.when(
+ mock_state_view.pipettes.get_hardware_pipette(
+ pipette_id="pipette-id",
+ attached_pipettes={},
+ )
+ ).then_return(
+ HardwarePipette(
+ mount=Mount.LEFT,
+ config=cast(
+ PipetteDict,
+ {
+ "aspirate_flow_rate": 1.23,
+ "dispense_flow_rate": 4.56,
+ "blow_out_flow_rate": 7.89,
+ },
+ ),
+ )
+ )
+
+ ok_volume = 1.0000000000001
+ not_ok_volume = 1.01
+ expected_adjusted_volume = 1
+
+ for subject in [virtual_subject, hardware_subject]:
+ assert (
+ await subject.dispense_in_place(
+ pipette_id="pipette-id", volume=ok_volume, flow_rate=5, push_out=7
+ )
+ == expected_adjusted_volume
+ )
+ with pytest.raises(InvalidDispenseVolumeError):
+ await subject.dispense_in_place(
+ pipette_id="pipette-id", volume=not_ok_volume, flow_rate=5, push_out=7
+ )
diff --git a/api/tests/opentrons/protocol_engine/execution/test_tip_handler.py b/api/tests/opentrons/protocol_engine/execution/test_tip_handler.py
index d9052872cff..e7e0284debe 100644
--- a/api/tests/opentrons/protocol_engine/execution/test_tip_handler.py
+++ b/api/tests/opentrons/protocol_engine/execution/test_tip_handler.py
@@ -7,10 +7,12 @@
from opentrons.types import Mount, MountType
from opentrons.hardware_control import API as HardwareAPI
+from opentrons.hardware_control.types import TipStateType
+from opentrons.hardware_control.protocols.types import OT2RobotType, FlexRobotType
from opentrons.protocols.models import LabwareDefinition
from opentrons.protocol_engine.state import StateView
-from opentrons.protocol_engine.types import TipGeometry
+from opentrons.protocol_engine.types import TipGeometry, TipPresenceStatus
from opentrons.protocol_engine.resources import LabwareDataProvider
from opentrons_shared_data.errors.exceptions import (
CommandPreconditionViolated,
@@ -26,7 +28,9 @@
@pytest.fixture
def mock_hardware_api(decoy: Decoy) -> HardwareAPI:
"""Get a mock in the shape of a HardwareAPI."""
- return decoy.mock(cls=HardwareAPI)
+ mock = decoy.mock(cls=HardwareAPI)
+ decoy.when(mock.get_robot_type()).then_return(OT2RobotType)
+ return mock
@pytest.fixture
@@ -333,3 +337,89 @@ async def test_virtual_drop_tip(decoy: Decoy, mock_state_view: StateView) -> Non
mock_state_view.pipettes.validate_tip_state("pipette-id", True),
times=1,
)
+
+
+async def test_get_tip_presence_on_ot2(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ mock_hardware_api: HardwareAPI,
+ mock_labware_data_provider: LabwareDataProvider,
+) -> None:
+ """It should use the hardware API to up a tip."""
+ subject = HardwareTipHandler(
+ state_view=mock_state_view,
+ hardware_api=mock_hardware_api,
+ labware_data_provider=mock_labware_data_provider,
+ )
+
+ result = await subject.get_tip_presence(pipette_id="pipette-id")
+ assert result == TipPresenceStatus.UNKNOWN
+
+
+@pytest.mark.parametrize("hw_tip_state", [TipStateType.ABSENT, TipStateType.PRESENT])
+async def test_get_tip_presence_on_ot3(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ mock_labware_data_provider: LabwareDataProvider,
+ hw_tip_state: TipStateType,
+) -> None:
+ """It should use the hardware API to up a tip."""
+ try:
+ from opentrons.hardware_control.ot3api import OT3API
+
+ ot3_hardware_api = decoy.mock(cls=OT3API)
+ decoy.when(ot3_hardware_api.get_robot_type()).then_return(FlexRobotType)
+
+ subject = HardwareTipHandler(
+ state_view=mock_state_view,
+ hardware_api=ot3_hardware_api,
+ labware_data_provider=mock_labware_data_provider,
+ )
+
+ decoy.when(mock_state_view.pipettes.get_mount("pipette-id")).then_return(
+ MountType.LEFT
+ )
+ decoy.when(
+ await ot3_hardware_api.get_tip_presence_status(Mount.LEFT)
+ ).then_return(hw_tip_state)
+ result = await subject.get_tip_presence(pipette_id="pipette-id")
+ assert result == TipPresenceStatus.from_hw_state(hw_tip_state)
+
+ except ImportError:
+ pass
+
+
+@pytest.mark.parametrize(
+ "expected", [TipPresenceStatus.ABSENT, TipPresenceStatus.PRESENT]
+)
+async def test_verify_tip_presence_on_ot3(
+ decoy: Decoy,
+ mock_state_view: StateView,
+ mock_labware_data_provider: LabwareDataProvider,
+ expected: TipPresenceStatus,
+) -> None:
+ """It should use the hardware API to up a tip."""
+ try:
+ from opentrons.hardware_control.ot3api import OT3API
+
+ ot3_hardware_api = decoy.mock(cls=OT3API)
+ decoy.when(ot3_hardware_api.get_robot_type()).then_return(FlexRobotType)
+
+ subject = HardwareTipHandler(
+ state_view=mock_state_view,
+ hardware_api=ot3_hardware_api,
+ labware_data_provider=mock_labware_data_provider,
+ )
+ decoy.when(mock_state_view.pipettes.get_mount("pipette-id")).then_return(
+ MountType.LEFT
+ )
+ await subject.verify_tip_presence("pipette-id", expected, None)
+
+ decoy.verify(
+ await ot3_hardware_api.verify_tip_presence(
+ Mount.LEFT, expected.to_hw_state(), None
+ )
+ )
+
+ except ImportError:
+ pass
diff --git a/api/tests/opentrons/protocol_engine/note_utils.py b/api/tests/opentrons/protocol_engine/note_utils.py
new file mode 100644
index 00000000000..0ca3af9ccca
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/note_utils.py
@@ -0,0 +1,63 @@
+"""Test utilities for dealing with notes."""
+import re
+from typing import Optional
+from opentrons.protocol_engine.notes import CommandNote
+
+
+class CommandNoteMatcher:
+ """Decoy matcher for notes instances."""
+
+ def __init__(
+ self,
+ matching_noteKind_regex: Optional[str] = None,
+ matching_shortMessage_regex: Optional[str] = None,
+ matching_longMessage_regex: Optional[str] = None,
+ matching_source_regex: Optional[str] = None,
+ ) -> None:
+ """Build a CommandNoteMatcher. All provided arguments are checked with re.search."""
+ self._matching_noteKind_regex = (
+ re.compile(matching_noteKind_regex)
+ if matching_noteKind_regex is not None
+ else None
+ )
+ self._matching_shortMessage_regex = (
+ re.compile(matching_shortMessage_regex)
+ if matching_shortMessage_regex is not None
+ else None
+ )
+ self._matching_longMessage_regex = (
+ re.compile(matching_longMessage_regex)
+ if matching_longMessage_regex is not None
+ else None
+ )
+ self._matching_source_regex = (
+ re.compile(matching_source_regex)
+ if matching_source_regex is not None
+ else None
+ )
+
+ def __eq__(self, other: object) -> bool:
+ """Called by Decoy. returns True on a match, False otherwise."""
+ if not isinstance(other, CommandNote):
+ return False
+ if (
+ self._matching_noteKind_regex is not None
+ and not self._matching_noteKind_regex.search(other.noteKind)
+ ):
+ return False
+ if (
+ self._matching_shortMessage_regex is not None
+ and not self._matching_shortMessage_regex.search(other.shortMessage)
+ ):
+ return False
+ if (
+ self._matching_longMessage_regex is not None
+ and not self._matching_longMessage_regex.search(other.longMessage)
+ ):
+ return False
+ if (
+ self._matching_source_regex is not None
+ and not self._matching_source_regex.search(other.source)
+ ):
+ return False
+ return True
diff --git a/api/tests/opentrons/protocol_engine/pipette_fixtures.py b/api/tests/opentrons/protocol_engine/pipette_fixtures.py
new file mode 100644
index 00000000000..70937beeb9f
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/pipette_fixtures.py
@@ -0,0 +1,353 @@
+"""Nozzle Map data to use in tests."""
+
+from collections import OrderedDict
+
+from opentrons.types import Point
+from opentrons.hardware_control.nozzle_manager import NozzleMap
+from opentrons_shared_data.pipette.dev_types import PipetteNameType
+
+
+NINETY_SIX_ROWS = OrderedDict(
+ (
+ (
+ "A",
+ [
+ "A1",
+ "A2",
+ "A3",
+ "A4",
+ "A5",
+ "A6",
+ "A7",
+ "A8",
+ "A9",
+ "A10",
+ "A11",
+ "A12",
+ ],
+ ),
+ (
+ "B",
+ [
+ "B1",
+ "B2",
+ "B3",
+ "B4",
+ "B5",
+ "B6",
+ "B7",
+ "B8",
+ "B9",
+ "B10",
+ "B11",
+ "B12",
+ ],
+ ),
+ (
+ "C",
+ [
+ "C1",
+ "C2",
+ "C3",
+ "C4",
+ "C5",
+ "C6",
+ "C7",
+ "C8",
+ "C9",
+ "C10",
+ "C11",
+ "C12",
+ ],
+ ),
+ (
+ "D",
+ [
+ "D1",
+ "D2",
+ "D3",
+ "D4",
+ "D5",
+ "D6",
+ "D7",
+ "D8",
+ "D9",
+ "D10",
+ "D11",
+ "D12",
+ ],
+ ),
+ (
+ "E",
+ [
+ "E1",
+ "E2",
+ "E3",
+ "E4",
+ "E5",
+ "E6",
+ "E7",
+ "E8",
+ "E9",
+ "E10",
+ "E11",
+ "E12",
+ ],
+ ),
+ (
+ "F",
+ [
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ ],
+ ),
+ (
+ "G",
+ [
+ "G1",
+ "G2",
+ "G3",
+ "G4",
+ "G5",
+ "G6",
+ "G7",
+ "G8",
+ "G9",
+ "G10",
+ "G11",
+ "G12",
+ ],
+ ),
+ (
+ "H",
+ [
+ "H1",
+ "H2",
+ "H3",
+ "H4",
+ "H5",
+ "H6",
+ "H7",
+ "H8",
+ "H9",
+ "H10",
+ "H11",
+ "H12",
+ ],
+ ),
+ )
+)
+
+
+NINETY_SIX_COLS = OrderedDict(
+ (
+ ("1", ["A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1"]),
+ ("2", ["A2", "B2", "C2", "D2", "E2", "F2", "G2", "H2"]),
+ ("3", ["A3", "B3", "C3", "D3", "E3", "F3", "G3", "H3"]),
+ ("4", ["A4", "B4", "C4", "D4", "E4", "F4", "G4", "H4"]),
+ ("5", ["A5", "B5", "C5", "D5", "E5", "F5", "G5", "H5"]),
+ ("6", ["A6", "B6", "C6", "D6", "E6", "F6", "G6", "H6"]),
+ ("7", ["A7", "B7", "C7", "D7", "E7", "F7", "G7", "H7"]),
+ ("8", ["A8", "B8", "C8", "D8", "E8", "F8", "G8", "H8"]),
+ ("9", ["A9", "B9", "C9", "D9", "E9", "F9", "G9", "H9"]),
+ ("10", ["A10", "B10", "C10", "D10", "E10", "F10", "G10", "H10"]),
+ ("11", ["A11", "B11", "C11", "D11", "E11", "F11", "G11", "H11"]),
+ ("12", ["A12", "B12", "C12", "D12", "E12", "F12", "G12", "H12"]),
+ )
+)
+
+NINETY_SIX_MAP = OrderedDict(
+ (
+ ("A1", Point(-36.0, -25.5, -259.15)),
+ ("A2", Point(-27.0, -25.5, -259.15)),
+ ("A3", Point(-18.0, -25.5, -259.15)),
+ ("A4", Point(-9.0, -25.5, -259.15)),
+ ("A5", Point(0.0, -25.5, -259.15)),
+ ("A6", Point(9.0, -25.5, -259.15)),
+ ("A7", Point(18.0, -25.5, -259.15)),
+ ("A8", Point(27.0, -25.5, -259.15)),
+ ("A9", Point(36.0, -25.5, -259.15)),
+ ("A10", Point(45.0, -25.5, -259.15)),
+ ("A11", Point(54.0, -25.5, -259.15)),
+ ("A12", Point(63.0, -25.5, -259.15)),
+ ("B1", Point(-36.0, -34.5, -259.15)),
+ ("B2", Point(-27.0, -34.5, -259.15)),
+ ("B3", Point(-18.0, -34.5, -259.15)),
+ ("B4", Point(-9.0, -34.5, -259.15)),
+ ("B5", Point(0.0, -34.5, -259.15)),
+ ("B6", Point(9.0, -34.5, -259.15)),
+ ("B7", Point(18.0, -34.5, -259.15)),
+ ("B8", Point(27.0, -34.5, -259.15)),
+ ("B9", Point(36.0, -34.5, -259.15)),
+ ("B10", Point(45.0, -34.5, -259.15)),
+ ("B11", Point(54.0, -34.5, -259.15)),
+ ("B12", Point(63.0, -34.5, -259.15)),
+ ("C1", Point(-36.0, -43.5, -259.15)),
+ ("C2", Point(-27.0, -43.5, -259.15)),
+ ("C3", Point(-18.0, -43.5, -259.15)),
+ ("C4", Point(-9.0, -43.5, -259.15)),
+ ("C5", Point(0.0, -43.5, -259.15)),
+ ("C6", Point(9.0, -43.5, -259.15)),
+ ("C7", Point(18.0, -43.5, -259.15)),
+ ("C8", Point(27.0, -43.5, -259.15)),
+ ("C9", Point(36.0, -43.5, -259.15)),
+ ("C10", Point(45.0, -43.5, -259.15)),
+ ("C11", Point(54.0, -43.5, -259.15)),
+ ("C12", Point(63.0, -43.5, -259.15)),
+ ("D1", Point(-36.0, -52.5, -259.15)),
+ ("D2", Point(-27.0, -52.5, -259.15)),
+ ("D3", Point(-18.0, -52.5, -259.15)),
+ ("D4", Point(-9.0, -52.5, -259.15)),
+ ("D5", Point(0.0, -52.5, -259.15)),
+ ("D6", Point(9.0, -52.5, -259.15)),
+ ("D7", Point(18.0, -52.5, -259.15)),
+ ("D8", Point(27.0, -52.5, -259.15)),
+ ("D9", Point(36.0, -52.5, -259.15)),
+ ("D10", Point(45.0, -52.5, -259.15)),
+ ("D11", Point(54.0, -52.5, -259.15)),
+ ("D12", Point(63.0, -52.5, -259.15)),
+ ("E1", Point(-36.0, -61.5, -259.15)),
+ ("E2", Point(-27.0, -61.5, -259.15)),
+ ("E3", Point(-18.0, -61.5, -259.15)),
+ ("E4", Point(-9.0, -61.5, -259.15)),
+ ("E5", Point(0.0, -61.5, -259.15)),
+ ("E6", Point(9.0, -61.5, -259.15)),
+ ("E7", Point(18.0, -61.5, -259.15)),
+ ("E8", Point(27.0, -61.5, -259.15)),
+ ("E9", Point(36.0, -61.5, -259.15)),
+ ("E10", Point(45.0, -61.5, -259.15)),
+ ("E11", Point(54.0, -61.5, -259.15)),
+ ("E12", Point(63.0, -61.5, -259.15)),
+ ("F1", Point(-36.0, -70.5, -259.15)),
+ ("F2", Point(-27.0, -70.5, -259.15)),
+ ("F3", Point(-18.0, -70.5, -259.15)),
+ ("F4", Point(-9.0, -70.5, -259.15)),
+ ("F5", Point(0.0, -70.5, -259.15)),
+ ("F6", Point(9.0, -70.5, -259.15)),
+ ("F7", Point(18.0, -70.5, -259.15)),
+ ("F8", Point(27.0, -70.5, -259.15)),
+ ("F9", Point(36.0, -70.5, -259.15)),
+ ("F10", Point(45.0, -70.5, -259.15)),
+ ("F11", Point(54.0, -70.5, -259.15)),
+ ("F12", Point(63.0, -70.5, -259.15)),
+ ("G1", Point(-36.0, -79.5, -259.15)),
+ ("G2", Point(-27.0, -79.5, -259.15)),
+ ("G3", Point(-18.0, -79.5, -259.15)),
+ ("G4", Point(-9.0, -79.5, -259.15)),
+ ("G5", Point(0.0, -79.5, -259.15)),
+ ("G6", Point(9.0, -79.5, -259.15)),
+ ("G7", Point(18.0, -79.5, -259.15)),
+ ("G8", Point(27.0, -79.5, -259.15)),
+ ("G9", Point(36.0, -79.5, -259.15)),
+ ("G10", Point(45.0, -79.5, -259.15)),
+ ("G11", Point(54.0, -79.5, -259.15)),
+ ("G12", Point(63.0, -79.5, -259.15)),
+ ("H1", Point(-36.0, -88.5, -259.15)),
+ ("H2", Point(-27.0, -88.5, -259.15)),
+ ("H3", Point(-18.0, -88.5, -259.15)),
+ ("H4", Point(-9.0, -88.5, -259.15)),
+ ("H5", Point(0.0, -88.5, -259.15)),
+ ("H6", Point(9.0, -88.5, -259.15)),
+ ("H7", Point(18.0, -88.5, -259.15)),
+ ("H8", Point(27.0, -88.5, -259.15)),
+ ("H9", Point(36.0, -88.5, -259.15)),
+ ("H10", Point(45.0, -88.5, -259.15)),
+ ("H11", Point(54.0, -88.5, -259.15)),
+ ("H12", Point(63.0, -88.5, -259.15)),
+ )
+)
+
+EIGHT_CHANNEL_ROWS = OrderedDict(
+ (
+ (
+ "A",
+ ["A1"],
+ ),
+ (
+ "B",
+ ["B1"],
+ ),
+ (
+ "C",
+ ["C1"],
+ ),
+ (
+ "D",
+ ["D1"],
+ ),
+ (
+ "E",
+ ["E1"],
+ ),
+ (
+ "F",
+ ["F1"],
+ ),
+ (
+ "G",
+ ["G1"],
+ ),
+ (
+ "H",
+ ["H1"],
+ ),
+ )
+)
+
+EIGHT_CHANNEL_COLS = OrderedDict(
+ (("1", ["A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1"]),)
+)
+
+EIGHT_CHANNEL_MAP = OrderedDict(
+ (
+ ("A1", Point(0.0, 31.5, 35.52)),
+ ("B1", Point(0.0, 22.5, 35.52)),
+ ("C1", Point(0.0, 13.5, 35.52)),
+ ("D1", Point(0.0, 4.5, 35.52)),
+ ("E1", Point(0.0, -4.5, 35.52)),
+ ("F1", Point(0.0, -13.5, 35.52)),
+ ("G1", Point(0.0, -22.5, 35.52)),
+ ("H1", Point(0.0, -31.5, 35.52)),
+ )
+)
+
+
+def get_default_nozzle_map(pipette_type: PipetteNameType) -> NozzleMap:
+ """Get default nozzle map for a given pipette type."""
+ if "multi" in pipette_type.value:
+ return NozzleMap.build(
+ physical_nozzles=EIGHT_CHANNEL_MAP,
+ physical_rows=EIGHT_CHANNEL_ROWS,
+ physical_columns=EIGHT_CHANNEL_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="H1",
+ )
+ elif "96" in pipette_type.value:
+ return NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="H12",
+ )
+ else:
+ return NozzleMap.build(
+ physical_nozzles=OrderedDict({"A1": Point(0, 0, 0)}),
+ physical_rows=OrderedDict({"A": ["A1"]}),
+ physical_columns=OrderedDict({"1": ["A1"]}),
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="A1",
+ )
diff --git a/api/tests/opentrons/protocol_engine/resources/test_deck_configuration_provider.py b/api/tests/opentrons/protocol_engine/resources/test_deck_configuration_provider.py
new file mode 100644
index 00000000000..12b324955be
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/resources/test_deck_configuration_provider.py
@@ -0,0 +1,334 @@
+"""Test deck configuration provider."""
+from typing import List, Set
+
+import pytest
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
+
+from opentrons_shared_data.deck import load as load_deck
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
+
+from opentrons.types import DeckSlotName
+
+from opentrons.protocol_engine.errors import (
+ FixtureDoesNotExistError,
+ CutoutDoesNotExistError,
+ AddressableAreaDoesNotExistError,
+)
+from opentrons.protocol_engine.types import (
+ AddressableArea,
+ AreaType,
+ PotentialCutoutFixture,
+ DeckPoint,
+ Dimensions,
+ AddressableOffsetVector,
+)
+from opentrons.protocols.api_support.deck_type import (
+ SHORT_TRASH_DECK,
+ STANDARD_OT2_DECK,
+ STANDARD_OT3_DECK,
+)
+
+from opentrons.protocol_engine.resources import deck_configuration_provider as subject
+
+
+@pytest.fixture(scope="session")
+def ot2_standard_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 standard deck definition."""
+ return load_deck(STANDARD_OT2_DECK, 5)
+
+
+@pytest.fixture(scope="session")
+def ot2_short_trash_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 standard deck definition."""
+ return load_deck(SHORT_TRASH_DECK, 5)
+
+
+@pytest.fixture(scope="session")
+def ot3_standard_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 standard deck definition."""
+ return load_deck(STANDARD_OT3_DECK, 5)
+
+
+@pytest.mark.parametrize(
+ ("cutout_id", "expected_deck_point", "deck_def"),
+ [
+ (
+ "cutout5",
+ DeckPoint(x=132.5, y=90.5, z=0.0),
+ lazy_fixture("ot2_standard_deck_def"),
+ ),
+ (
+ "cutout5",
+ DeckPoint(x=132.5, y=90.5, z=0.0),
+ lazy_fixture("ot2_short_trash_deck_def"),
+ ),
+ (
+ "cutoutC2",
+ DeckPoint(x=164.0, y=107, z=0.0),
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ ],
+)
+def test_get_cutout_position(
+ cutout_id: str,
+ expected_deck_point: DeckPoint,
+ deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get the deck position for the requested cutout id."""
+ cutout_position = subject.get_cutout_position(cutout_id, deck_def)
+ assert cutout_position == expected_deck_point
+
+
+def test_get_cutout_position_raises(
+ ot3_standard_deck_def: DeckDefinitionV5,
+) -> None:
+ """It should raise if there is no cutout with that ID in the deck definition."""
+ with pytest.raises(CutoutDoesNotExistError):
+ subject.get_cutout_position("theFunCutout", ot3_standard_deck_def)
+
+
+@pytest.mark.parametrize(
+ ("cutout_fixture_id", "expected_display_name", "deck_def"),
+ [
+ ("singleStandardSlot", "Standard Slot", lazy_fixture("ot2_standard_deck_def")),
+ (
+ "singleStandardSlot",
+ "Standard Slot",
+ lazy_fixture("ot2_short_trash_deck_def"),
+ ),
+ (
+ "singleRightSlot",
+ "Standard Slot Right",
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ ],
+)
+def test_get_cutout_fixture(
+ cutout_fixture_id: str,
+ expected_display_name: str,
+ deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get the cutout fixture given the cutout fixture id."""
+ cutout_fixture = subject.get_cutout_fixture(cutout_fixture_id, deck_def)
+ assert cutout_fixture["displayName"] == expected_display_name
+
+
+def test_get_cutout_fixture_raises(
+ ot3_standard_deck_def: DeckDefinitionV5,
+) -> None:
+ """It should raise if the given cutout fixture id does not exist."""
+ with pytest.raises(FixtureDoesNotExistError):
+ subject.get_cutout_fixture("theFunFixture", ot3_standard_deck_def)
+
+
+@pytest.mark.parametrize(
+ ("cutout_fixture_id", "cutout_id", "expected_areas", "deck_def"),
+ [
+ (
+ "singleStandardSlot",
+ "cutout1",
+ ["1"],
+ lazy_fixture("ot2_standard_deck_def"),
+ ),
+ (
+ "singleStandardSlot",
+ "cutout1",
+ ["1"],
+ lazy_fixture("ot2_short_trash_deck_def"),
+ ),
+ (
+ "stagingAreaRightSlot",
+ "cutoutD3",
+ ["D3", "D4"],
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ ],
+)
+def test_get_provided_addressable_area_names(
+ cutout_fixture_id: str,
+ cutout_id: str,
+ expected_areas: List[str],
+ deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get the provided addressable area for the cutout fixture and cutout."""
+ provided_addressable_areas = subject.get_provided_addressable_area_names(
+ cutout_fixture_id, cutout_id, deck_def
+ )
+ assert provided_addressable_areas == expected_areas
+
+
+@pytest.mark.parametrize(
+ (
+ "addressable_area_name",
+ "expected_cutout_id",
+ "expected_potential_fixtures",
+ "deck_def",
+ ),
+ [
+ (
+ "3",
+ "cutout3",
+ {
+ PotentialCutoutFixture(
+ cutout_id="cutout3",
+ cutout_fixture_id="singleStandardSlot",
+ provided_addressable_areas=frozenset({"3"}),
+ )
+ },
+ lazy_fixture("ot2_standard_deck_def"),
+ ),
+ (
+ "3",
+ "cutout3",
+ {
+ PotentialCutoutFixture(
+ cutout_id="cutout3",
+ cutout_fixture_id="singleStandardSlot",
+ provided_addressable_areas=frozenset({"3"}),
+ )
+ },
+ lazy_fixture("ot2_short_trash_deck_def"),
+ ),
+ (
+ "D3",
+ "cutoutD3",
+ {
+ PotentialCutoutFixture(
+ cutout_id="cutoutD3",
+ cutout_fixture_id="singleRightSlot",
+ provided_addressable_areas=frozenset({"D3"}),
+ ),
+ PotentialCutoutFixture(
+ cutout_id="cutoutD3",
+ cutout_fixture_id="stagingAreaRightSlot",
+ provided_addressable_areas=frozenset({"D3", "D4"}),
+ ),
+ },
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ ],
+)
+def test_get_potential_cutout_fixtures(
+ addressable_area_name: str,
+ expected_cutout_id: str,
+ expected_potential_fixtures: Set[PotentialCutoutFixture],
+ deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get a cutout id and a set of potential cutout fixtures for an addressable area name."""
+ cutout_id, potential_fixtures = subject.get_potential_cutout_fixtures(
+ addressable_area_name, deck_def
+ )
+ assert cutout_id == expected_cutout_id
+ assert potential_fixtures == expected_potential_fixtures
+
+
+def test_get_potential_cutout_fixtures_raises(
+ ot3_standard_deck_def: DeckDefinitionV5,
+) -> None:
+ """It should raise if there is no fixtures that provide the requested area."""
+ with pytest.raises(AddressableAreaDoesNotExistError):
+ subject.get_potential_cutout_fixtures("theFunArea", ot3_standard_deck_def)
+
+
+# TODO put in fixed trash for OT2 decks
+@pytest.mark.parametrize(
+ ("addressable_area_name", "expected_addressable_area", "deck_def"),
+ [
+ (
+ "1",
+ AddressableArea(
+ area_name="1",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_A1,
+ display_name="Slot 1",
+ bounding_box=Dimensions(x=128.0, y=86.0, z=0),
+ position=AddressableOffsetVector(x=1, y=2, z=3),
+ compatible_module_types=[
+ "magneticModuleType",
+ "temperatureModuleType",
+ "heaterShakerModuleType",
+ ],
+ ),
+ lazy_fixture("ot2_standard_deck_def"),
+ ),
+ (
+ "1",
+ AddressableArea(
+ area_name="1",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_A1,
+ display_name="Slot 1",
+ bounding_box=Dimensions(x=128.0, y=86.0, z=0),
+ position=AddressableOffsetVector(x=1, y=2, z=3),
+ compatible_module_types=[
+ "magneticModuleType",
+ "temperatureModuleType",
+ "heaterShakerModuleType",
+ ],
+ ),
+ lazy_fixture("ot2_short_trash_deck_def"),
+ ),
+ (
+ "D1",
+ AddressableArea(
+ area_name="D1",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_A1,
+ display_name="Slot D1",
+ bounding_box=Dimensions(x=128.0, y=86.0, z=0),
+ position=AddressableOffsetVector(x=1, y=2, z=3),
+ compatible_module_types=[],
+ ),
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ (
+ "movableTrashB3",
+ AddressableArea(
+ area_name="movableTrashB3",
+ area_type=AreaType.MOVABLE_TRASH,
+ base_slot=DeckSlotName.SLOT_A1,
+ display_name="Trash Bin in B3",
+ bounding_box=Dimensions(x=225, y=78, z=40),
+ position=AddressableOffsetVector(x=-5.25, y=6, z=3),
+ compatible_module_types=[],
+ ),
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ (
+ "gripperWasteChute",
+ AddressableArea(
+ area_name="gripperWasteChute",
+ area_type=AreaType.WASTE_CHUTE,
+ base_slot=DeckSlotName.SLOT_A1,
+ display_name="Waste Chute",
+ bounding_box=Dimensions(x=0, y=0, z=0),
+ position=AddressableOffsetVector(x=65, y=31, z=139.5),
+ compatible_module_types=[],
+ ),
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ ],
+)
+def test_get_addressable_area_from_name(
+ addressable_area_name: str,
+ expected_addressable_area: AddressableArea,
+ deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get the deck position for the requested cutout id."""
+ addressable_area = subject.get_addressable_area_from_name(
+ addressable_area_name, DeckPoint(x=1, y=2, z=3), DeckSlotName.SLOT_A1, deck_def
+ )
+ assert addressable_area == expected_addressable_area
+
+
+def test_get_addressable_area_from_name_raises(
+ ot3_standard_deck_def: DeckDefinitionV5,
+) -> None:
+ """It should raise if there is no addressable area by that name in the deck."""
+ with pytest.raises(AddressableAreaDoesNotExistError):
+ subject.get_addressable_area_from_name(
+ "theFunArea",
+ DeckPoint(x=1, y=2, z=3),
+ DeckSlotName.SLOT_A1,
+ ot3_standard_deck_def,
+ )
diff --git a/api/tests/opentrons/protocol_engine/resources/test_deck_data_provider.py b/api/tests/opentrons/protocol_engine/resources/test_deck_data_provider.py
index 2e01abb3119..bd720777ed6 100644
--- a/api/tests/opentrons/protocol_engine/resources/test_deck_data_provider.py
+++ b/api/tests/opentrons/protocol_engine/resources/test_deck_data_provider.py
@@ -1,9 +1,9 @@
"""Test deck data provider."""
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from decoy import Decoy
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons.protocols.models import LabwareDefinition
from opentrons.types import DeckSlotName
@@ -31,7 +31,7 @@ def mock_labware_data_provider(decoy: Decoy) -> LabwareDataProvider:
)
async def test_get_deck_definition(
deck_type: DeckType,
- expected_definition: DeckDefinitionV4,
+ expected_definition: DeckDefinitionV5,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
"""It should be able to load the correct deck definition."""
@@ -44,7 +44,7 @@ async def test_get_deck_definition(
async def test_get_deck_labware_fixtures_ot2_standard(
decoy: Decoy,
- ot2_standard_deck_def: DeckDefinitionV4,
+ ot2_standard_deck_def: DeckDefinitionV5,
ot2_fixed_trash_def: LabwareDefinition,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
@@ -74,7 +74,7 @@ async def test_get_deck_labware_fixtures_ot2_standard(
async def test_get_deck_labware_fixtures_ot2_short_trash(
decoy: Decoy,
- ot2_short_trash_deck_def: DeckDefinitionV4,
+ ot2_short_trash_deck_def: DeckDefinitionV5,
ot2_short_fixed_trash_def: LabwareDefinition,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
@@ -104,7 +104,7 @@ async def test_get_deck_labware_fixtures_ot2_short_trash(
async def test_get_deck_labware_fixtures_ot3_standard(
decoy: Decoy,
- ot3_standard_deck_def: DeckDefinitionV4,
+ ot3_standard_deck_def: DeckDefinitionV5,
ot3_fixed_trash_def: LabwareDefinition,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
diff --git a/api/tests/opentrons/protocol_engine/resources/test_ot3_validation.py b/api/tests/opentrons/protocol_engine/resources/test_ot3_validation.py
index d2382bf70a1..327daf82129 100644
--- a/api/tests/opentrons/protocol_engine/resources/test_ot3_validation.py
+++ b/api/tests/opentrons/protocol_engine/resources/test_ot3_validation.py
@@ -6,6 +6,7 @@
from opentrons.protocol_engine.errors.exceptions import HardwareNotSupportedError
from opentrons.hardware_control.api import API
+from opentrons.hardware_control.protocols.types import FlexRobotType, OT2RobotType
@pytest.mark.ot3_only
@@ -16,6 +17,7 @@ def test_ensure_ot3_hardware(decoy: Decoy) -> None:
from opentrons.hardware_control.ot3api import OT3API
ot_3_hardware_api = decoy.mock(cls=OT3API)
+ decoy.when(ot_3_hardware_api.get_robot_type()).then_return(FlexRobotType)
result = ensure_ot3_hardware(
ot_3_hardware_api,
)
@@ -28,6 +30,7 @@ def test_ensure_ot3_hardware(decoy: Decoy) -> None:
def test_ensure_ot3_hardware_raises_error(decoy: Decoy) -> None:
"""Should raise a HardwareNotSupportedError exception."""
ot_2_hardware_api = decoy.mock(cls=API)
+ decoy.when(ot_2_hardware_api.get_robot_type()).then_return(OT2RobotType)
with pytest.raises(HardwareNotSupportedError):
ensure_ot3_hardware(
ot_2_hardware_api,
diff --git a/api/tests/opentrons/protocol_engine/resources/test_pipette_data_provider.py b/api/tests/opentrons/protocol_engine/resources/test_pipette_data_provider.py
index c3cf10449fc..61d177ba42f 100644
--- a/api/tests/opentrons/protocol_engine/resources/test_pipette_data_provider.py
+++ b/api/tests/opentrons/protocol_engine/resources/test_pipette_data_provider.py
@@ -1,16 +1,27 @@
"""Test pipette data provider."""
+from typing import Dict
+from sys import maxsize
import pytest
from opentrons_shared_data.pipette.dev_types import PipetteNameType, PipetteModel
from opentrons_shared_data.pipette import pipette_definition, types as pip_types
+from opentrons_shared_data.pipette.pipette_definition import (
+ PipetteBoundingBoxOffsetDefinition,
+ TIP_OVERLAP_VERSION_MAXIMUM,
+)
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.protocol_engine.types import FlowRates
from opentrons.protocol_engine.resources.pipette_data_provider import (
LoadedStaticPipetteData,
VirtualPipetteDataProvider,
+ validate_and_default_tip_overlap_version,
+ get_latest_tip_overlap_before_version,
)
from opentrons.protocol_engine.resources import pipette_data_provider as subject
+from opentrons.protocol_engine.errors.exceptions import InvalidLoadPipetteSpecsError
+from ..pipette_fixtures import get_default_nozzle_map
+from opentrons.types import Point
@pytest.fixture
@@ -24,11 +35,11 @@ def test_get_virtual_pipette_static_config(
) -> None:
"""It should return config data given a pipette name."""
result = subject_instance.get_virtual_pipette_static_config(
- PipetteNameType.P20_SINGLE_GEN2.value, "some-id"
+ PipetteNameType.P20_SINGLE_GEN2.value, "some-id", "v0"
)
assert result == LoadedStaticPipetteData(
- model="p20_single_v2.0",
+ model="p20_single_v2.2",
display_name="P20 Single-Channel GEN2",
min_volume=1,
max_volume=20.0,
@@ -50,6 +61,9 @@ def test_get_virtual_pipette_static_config(
"opentrons/opentrons_96_tiprack_10ul/1": 8.25,
"opentrons/opentrons_96_tiprack_20ul/1": 8.25,
},
+ nozzle_map=result.nozzle_map,
+ back_left_corner_offset=Point(0, 0, 10.45),
+ front_right_corner_offset=Point(0, 0, 10.45),
)
@@ -58,10 +72,10 @@ def test_configure_virtual_pipette_for_volume(
) -> None:
"""It should return an updated config if the liquid class changes."""
result1 = subject_instance.get_virtual_pipette_static_config(
- PipetteNameType.P50_SINGLE_FLEX.value, "my-pipette"
+ PipetteNameType.P50_SINGLE_FLEX.value, "my-pipette", "v0"
)
assert result1 == LoadedStaticPipetteData(
- model="p50_single_v3.0",
+ model="p50_single_v3.6",
display_name="Flex 1-Channel 50 μL",
min_volume=5,
max_volume=50.0,
@@ -69,21 +83,24 @@ def test_configure_virtual_pipette_for_volume(
nozzle_offset_z=-259.15,
home_position=230.15,
flow_rates=FlowRates(
- default_blow_out={"2.14": 4.0},
- default_aspirate={"2.14": 8.0},
- default_dispense={"2.14": 8.0},
+ default_blow_out={"2.14": 57},
+ default_aspirate={"2.14": 35},
+ default_dispense={"2.14": 57},
),
tip_configuration_lookup_table=result1.tip_configuration_lookup_table,
nominal_tip_overlap=result1.nominal_tip_overlap,
+ nozzle_map=result1.nozzle_map,
+ back_left_corner_offset=Point(-8.0, -22.0, -259.15),
+ front_right_corner_offset=Point(-8.0, -22.0, -259.15),
)
subject_instance.configure_virtual_pipette_for_volume(
"my-pipette", 1, result1.model
)
result2 = subject_instance.get_virtual_pipette_static_config(
- PipetteNameType.P50_SINGLE_FLEX.value, "my-pipette"
+ PipetteNameType.P50_SINGLE_FLEX.value, "my-pipette", "v0"
)
assert result2 == LoadedStaticPipetteData(
- model="p50_single_v3.0",
+ model="p50_single_v3.6",
display_name="Flex 1-Channel 50 μL",
min_volume=1,
max_volume=30,
@@ -91,12 +108,15 @@ def test_configure_virtual_pipette_for_volume(
nozzle_offset_z=-259.15,
home_position=230.15,
flow_rates=FlowRates(
- default_blow_out={"2.14": 4.0},
- default_aspirate={"2.14": 8.0},
- default_dispense={"2.14": 8.0},
+ default_blow_out={"2.14": 26.7},
+ default_aspirate={"2.14": 26.7},
+ default_dispense={"2.14": 26.7},
),
tip_configuration_lookup_table=result2.tip_configuration_lookup_table,
nominal_tip_overlap=result2.nominal_tip_overlap,
+ nozzle_map=result2.nozzle_map,
+ back_left_corner_offset=Point(-8.0, -22.0, -259.15),
+ front_right_corner_offset=Point(-8.0, -22.0, -259.15),
)
@@ -105,7 +125,7 @@ def test_load_virtual_pipette_by_model_string(
) -> None:
"""It should return config data given a pipette model."""
result = subject_instance.get_virtual_pipette_static_config_by_model_string(
- "p300_multi_v2.1", "my-pipette"
+ "p300_multi_v2.1", "my-pipette", "v0"
)
assert result == LoadedStaticPipetteData(
model="p300_multi_v2.1",
@@ -122,6 +142,9 @@ def test_load_virtual_pipette_by_model_string(
),
tip_configuration_lookup_table=result.tip_configuration_lookup_table,
nominal_tip_overlap=result.nominal_tip_overlap,
+ nozzle_map=result.nozzle_map,
+ back_left_corner_offset=Point(-16.0, 43.15, 35.52),
+ front_right_corner_offset=Point(16.0, -43.15, 35.52),
)
@@ -138,12 +161,37 @@ def test_load_virtual_pipette_nozzle_layout(
assert result.front_right == "E1"
assert result.back_left == "A1"
+ subject_instance.configure_virtual_pipette_nozzle_layout(
+ "my-pipette", "p300_multi_v2.1"
+ )
+ result = subject_instance.get_nozzle_layout_for_pipette("my-pipette")
+ assert result.configuration.value == "FULL"
-def test_get_pipette_static_config(
+ subject_instance.configure_virtual_pipette_nozzle_layout(
+ "my-96-pipette", "p1000_96_v3.6", "A1", "A12", "A1"
+ )
+ result = subject_instance.get_nozzle_layout_for_pipette("my-96-pipette")
+ assert result.configuration.value == "ROW"
+
+ subject_instance.configure_virtual_pipette_nozzle_layout(
+ "my-96-pipette", "p1000_96_v3.6", "A1", "A1"
+ )
+ result = subject_instance.get_nozzle_layout_for_pipette("my-96-pipette")
+ assert result.configuration.value == "SINGLE"
+
+ subject_instance.configure_virtual_pipette_nozzle_layout(
+ "my-96-pipette", "p1000_96_v3.6", "A1", "H1"
+ )
+ result = subject_instance.get_nozzle_layout_for_pipette("my-96-pipette")
+ assert result.configuration.value == "COLUMN"
+
+
+@pytest.fixture
+def pipette_dict(
supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
-) -> None:
- """It should return config data given a PipetteDict."""
- pipette_dict: PipetteDict = {
+) -> PipetteDict:
+ """Get a pipette dict."""
+ return {
"name": "p300_single_gen2",
"min_volume": 20,
"max_volume": 300,
@@ -162,6 +210,12 @@ def test_get_pipette_static_config(
"opentrons/opentrons_96_tiprack_300ul/1": 8.2,
"opentrons/opentrons_96_filtertiprack_200ul/1": 8.2,
},
+ "versioned_tip_overlap": {
+ "v0": {
+ "default": 8.2,
+ },
+ "v2": {"default": 9.3},
+ },
"available_volume": 300.0,
"return_tip_height": 0.5,
"default_aspirate_flow_rates": {"2.0": 46.43, "2.1": 92.86},
@@ -178,10 +232,31 @@ def test_get_pipette_static_config(
"default_aspirate_speeds": {"2.0": 5.021202, "2.6": 10.042404},
"default_push_out_volume": 3,
"supported_tips": {pip_types.PipetteTipType.t300: supported_tip_fixture},
- "current_nozzle_map": None,
+ "current_nozzle_map": get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ "pipette_bounding_box_offsets": PipetteBoundingBoxOffsetDefinition(
+ backLeftCorner=[10, 20, 30],
+ frontRightCorner=[40, 50, 60],
+ ),
}
- result = subject.get_pipette_static_config(pipette_dict)
+
+@pytest.mark.parametrize(
+ "tip_overlap_version,overlap_data",
+ [
+ ("v0", {"default": 8.2}),
+ ("v1", {"default": 8.2}),
+ ("v2", {"default": 9.3}),
+ ("v10000", {"default": 9.3}),
+ ],
+)
+def test_get_pipette_static_config(
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+ pipette_dict: PipetteDict,
+ tip_overlap_version: str,
+ overlap_data: Dict[str, float],
+) -> None:
+ """It should return config data given a PipetteDict."""
+ result = subject.get_pipette_static_config(pipette_dict, tip_overlap_version)
assert result == LoadedStaticPipetteData(
model="p300_single_v2.0",
@@ -195,13 +270,62 @@ def test_get_pipette_static_config(
default_blow_out={"2.0": 46.43, "2.2": 92.86},
),
tip_configuration_lookup_table={300: supported_tip_fixture},
- nominal_tip_overlap={
- "default": 8.2,
- "opentrons/opentrons_96_tiprack_300ul/1": 8.2,
- "opentrons/opentrons_96_filtertiprack_200ul/1": 8.2,
- },
+ nominal_tip_overlap=overlap_data,
# TODO(mc, 2023-02-28): these two values are not present in PipetteDict
# https://opentrons.atlassian.net/browse/RCORE-655
nozzle_offset_z=0,
home_position=0,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(10, 20, 30),
+ front_right_corner_offset=Point(40, 50, 60),
+ )
+
+
+@pytest.mark.parametrize(
+ "version",
+ [
+ "",
+ "qwe",
+ "v",
+ "v-1",
+ "vab",
+ ],
+)
+def test_validate_bad_tip_overlap_versions(version: str) -> None:
+ """Raise for bad tip overlap version specs."""
+ with pytest.raises(InvalidLoadPipetteSpecsError):
+ validate_and_default_tip_overlap_version(version)
+
+
+def test_default_tip_overlap_versions() -> None:
+ """Default None tip overlap version specs."""
+ assert (
+ validate_and_default_tip_overlap_version(None)
+ == f"v{TIP_OVERLAP_VERSION_MAXIMUM}"
)
+
+
+@pytest.mark.parametrize("version", ["v0", "v1", f"v{maxsize+1}"])
+def test_pass_valid_tip_overlap_versions(version: str) -> None:
+ """Pass valid tip overlap specs."""
+ assert validate_and_default_tip_overlap_version(version) == version
+
+
+@pytest.mark.parametrize(
+ "version,target_data",
+ [
+ ("v0", {"default": 123.0}),
+ ("v1", {"default": 321.1}),
+ ("v3", {"default": 333.5}),
+ ("v9999", {"default": 4414.99}),
+ ],
+)
+def test_get_latest_tip_overlap(version: str, target_data: Dict[str, float]) -> None:
+ """Test the search function for latest offset."""
+ overlap = {
+ "v0": {"default": 123.0},
+ "v1": {"default": 321.1},
+ "v2": {"default": 333.5},
+ "v1231": {"default": 4414.99},
+ }
+ assert get_latest_tip_overlap_before_version(overlap, version) == target_data
diff --git a/api/tests/opentrons/protocol_engine/state/command_fixtures.py b/api/tests/opentrons/protocol_engine/state/command_fixtures.py
index ef548377a3e..98ee48e724d 100644
--- a/api/tests/opentrons/protocol_engine/state/command_fixtures.py
+++ b/api/tests/opentrons/protocol_engine/state/command_fixtures.py
@@ -9,10 +9,14 @@
from opentrons.protocol_engine import ErrorOccurrence, commands as cmd
from opentrons.protocol_engine.types import (
DeckPoint,
+ ModuleModel,
+ ModuleDefinition,
MovementAxis,
WellLocation,
LabwareLocation,
+ DeckSlotLocation,
LabwareMovementStrategy,
+ AddressableOffsetVector,
)
@@ -20,6 +24,7 @@ def create_queued_command(
command_id: str = "command-id",
command_key: str = "command-key",
command_type: str = "command-type",
+ intent: cmd.CommandIntent = cmd.CommandIntent.PROTOCOL,
params: Optional[BaseModel] = None,
) -> cmd.Command:
"""Given command data, build a pending command model."""
@@ -32,6 +37,7 @@ def create_queued_command(
createdAt=datetime(year=2021, month=1, day=1),
status=cmd.CommandStatus.QUEUED,
params=params or BaseModel(),
+ intent=intent,
),
)
@@ -159,6 +165,30 @@ def create_load_pipette_command(
)
+def create_load_module_command(
+ module_id: str,
+ location: DeckSlotLocation,
+ model: ModuleModel,
+) -> cmd.LoadModule:
+ """Get a completed LoadModule command."""
+ params = cmd.LoadModuleParams(moduleId=module_id, location=location, model=model)
+ result = cmd.LoadModuleResult(
+ moduleId=module_id,
+ model=model,
+ serialNumber=None,
+ definition=ModuleDefinition.construct(), # type: ignore[call-arg]
+ )
+
+ return cmd.LoadModule(
+ id="command-id",
+ key="command-key",
+ status=cmd.CommandStatus.SUCCEEDED,
+ createdAt=datetime.now(),
+ params=params,
+ result=result,
+ )
+
+
def create_aspirate_command(
pipette_id: str,
volume: float,
@@ -189,6 +219,29 @@ def create_aspirate_command(
)
+def create_aspirate_in_place_command(
+ pipette_id: str,
+ volume: float,
+ flow_rate: float,
+) -> cmd.AspirateInPlace:
+ """Get a completed Aspirate command."""
+ params = cmd.AspirateInPlaceParams(
+ pipetteId=pipette_id,
+ volume=volume,
+ flowRate=flow_rate,
+ )
+ result = cmd.AspirateInPlaceResult(volume=volume)
+
+ return cmd.AspirateInPlace(
+ id="command-id",
+ key="command-key",
+ status=cmd.CommandStatus.SUCCEEDED,
+ createdAt=datetime.now(),
+ params=params,
+ result=result,
+ )
+
+
def create_dispense_command(
pipette_id: str,
volume: float,
@@ -343,6 +396,31 @@ def create_move_to_well_command(
)
+def create_move_to_addressable_area_command(
+ pipette_id: str,
+ addressable_area_name: str = "area-name",
+ offset: AddressableOffsetVector = AddressableOffsetVector(x=0, y=0, z=0),
+ destination: DeckPoint = DeckPoint(x=0, y=0, z=0),
+) -> cmd.MoveToAddressableArea:
+ """Get a completed MoveToWell command."""
+ params = cmd.MoveToAddressableAreaParams(
+ pipetteId=pipette_id,
+ addressableAreaName=addressable_area_name,
+ offset=offset,
+ )
+
+ result = cmd.MoveToAddressableAreaResult(position=destination)
+
+ return cmd.MoveToAddressableArea(
+ id="command-id",
+ key="command-key",
+ status=cmd.CommandStatus.SUCCEEDED,
+ createdAt=datetime.now(),
+ params=params,
+ result=result,
+ )
+
+
def create_move_to_coordinates_command(
pipette_id: str,
coordinates: DeckPoint = DeckPoint(x=0, y=0, z=0),
@@ -414,6 +492,27 @@ def create_blow_out_command(
)
+def create_blow_out_in_place_command(
+ pipette_id: str,
+ flow_rate: float,
+) -> cmd.BlowOutInPlace:
+ """Get a completed blowOutInPlace command."""
+ params = cmd.BlowOutInPlaceParams(
+ pipetteId=pipette_id,
+ flowRate=flow_rate,
+ )
+ result = cmd.BlowOutInPlaceResult()
+
+ return cmd.BlowOutInPlace(
+ id="command-id",
+ key="command-key",
+ status=cmd.CommandStatus.SUCCEEDED,
+ createdAt=datetime(year=2022, month=1, day=1),
+ params=params,
+ result=result,
+ )
+
+
def create_touch_tip_command(
pipette_id: str,
labware_id: str = "labware-id",
@@ -480,3 +579,27 @@ def create_prepare_to_aspirate_command(pipette_id: str) -> cmd.PrepareToAspirate
params=params,
result=result,
)
+
+
+def create_reload_labware_command(
+ labware_id: str,
+ offset_id: Optional[str],
+) -> cmd.ReloadLabware:
+ """Create a completed ReloadLabware command."""
+ params = cmd.ReloadLabwareParams(
+ labwareId=labware_id,
+ )
+
+ result = cmd.ReloadLabwareResult(
+ labwareId=labware_id,
+ offsetId=offset_id,
+ )
+
+ return cmd.ReloadLabware(
+ id="command-id",
+ key="command-key",
+ status=cmd.CommandStatus.SUCCEEDED,
+ createdAt=datetime.now(),
+ params=params,
+ result=result,
+ )
diff --git a/api/tests/opentrons/protocol_engine/state/conftest.py b/api/tests/opentrons/protocol_engine/state/conftest.py
index 9188101b05b..f657a9c3ed9 100644
--- a/api/tests/opentrons/protocol_engine/state/conftest.py
+++ b/api/tests/opentrons/protocol_engine/state/conftest.py
@@ -4,6 +4,7 @@
from opentrons.protocol_engine.state.labware import LabwareView
from opentrons.protocol_engine.state.pipettes import PipetteView
+from opentrons.protocol_engine.state.addressable_areas import AddressableAreaView
from opentrons.protocol_engine.state.geometry import GeometryView
@@ -19,6 +20,12 @@ def pipette_view(decoy: Decoy) -> PipetteView:
return decoy.mock(cls=PipetteView)
+@pytest.fixture
+def addressable_area_view(decoy: Decoy) -> AddressableAreaView:
+ """Get a mock in the shape of a AddressableAreaView."""
+ return decoy.mock(cls=AddressableAreaView)
+
+
@pytest.fixture
def geometry_view(decoy: Decoy) -> GeometryView:
"""Get a mock in the shape of a GeometryView."""
diff --git a/api/tests/opentrons/protocol_engine/state/test_addressable_area_store.py b/api/tests/opentrons/protocol_engine/state/test_addressable_area_store.py
new file mode 100644
index 00000000000..8a79d31ce92
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/state/test_addressable_area_store.py
@@ -0,0 +1,262 @@
+"""Addressable area state store tests."""
+import pytest
+
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
+from opentrons_shared_data.labware.labware_definition import Parameters
+from opentrons.protocols.models import LabwareDefinition
+from opentrons.types import DeckSlotName
+
+from opentrons.protocol_engine.commands import Command
+from opentrons.protocol_engine.actions import (
+ SucceedCommandAction,
+ AddAddressableAreaAction,
+)
+from opentrons.protocol_engine.state import Config
+from opentrons.protocol_engine.state.addressable_areas import (
+ AddressableAreaStore,
+ AddressableAreaState,
+)
+from opentrons.protocol_engine.types import (
+ DeckType,
+ DeckConfigurationType,
+ ModuleModel,
+ LabwareMovementStrategy,
+ DeckSlotLocation,
+ AddressableAreaLocation,
+)
+
+from .command_fixtures import (
+ create_load_labware_command,
+ create_load_module_command,
+ create_move_labware_command,
+ create_move_to_addressable_area_command,
+)
+
+
+def _make_deck_config() -> DeckConfigurationType:
+ return [
+ ("cutoutA1", "singleLeftSlot", None),
+ ("cutoutB1", "singleLeftSlot", None),
+ ("cutoutC1", "singleLeftSlot", None),
+ ("cutoutD1", "singleLeftSlot", None),
+ ("cutoutA2", "singleCenterSlot", None),
+ ("cutoutB2", "singleCenterSlot", None),
+ ("cutoutC2", "singleCenterSlot", None),
+ ("cutoutD2", "singleCenterSlot", None),
+ ("cutoutA3", "trashBinAdapter", None),
+ ("cutoutB3", "singleRightSlot", None),
+ ("cutoutC3", "stagingAreaRightSlot", None),
+ ("cutoutD3", "wasteChuteRightAdapterNoCover", None),
+ ]
+
+
+@pytest.fixture
+def simulated_subject(
+ ot3_standard_deck_def: DeckDefinitionV5,
+) -> AddressableAreaStore:
+ """Get an AddressableAreaStore test subject, under simulated deck conditions."""
+ return AddressableAreaStore(
+ deck_configuration=[],
+ config=Config(
+ use_simulated_deck_config=True,
+ robot_type="OT-3 Standard",
+ deck_type=DeckType.OT3_STANDARD,
+ ),
+ deck_definition=ot3_standard_deck_def,
+ )
+
+
+@pytest.fixture
+def subject(
+ ot3_standard_deck_def: DeckDefinitionV5,
+) -> AddressableAreaStore:
+ """Get an AddressableAreaStore test subject."""
+ return AddressableAreaStore(
+ deck_configuration=_make_deck_config(),
+ config=Config(
+ use_simulated_deck_config=False,
+ robot_type="OT-3 Standard",
+ deck_type=DeckType.OT3_STANDARD,
+ ),
+ deck_definition=ot3_standard_deck_def,
+ )
+
+
+def test_initial_state_simulated(
+ ot3_standard_deck_def: DeckDefinitionV5,
+ simulated_subject: AddressableAreaStore,
+) -> None:
+ """It should create the Addressable Area store with no loaded addressable areas."""
+ assert simulated_subject.state == AddressableAreaState(
+ loaded_addressable_areas_by_name={},
+ potential_cutout_fixtures_by_cutout_id={},
+ deck_definition=ot3_standard_deck_def,
+ deck_configuration=[],
+ robot_type="OT-3 Standard",
+ use_simulated_deck_config=True,
+ )
+
+
+def test_initial_state(
+ ot3_standard_deck_def: DeckDefinitionV5,
+ subject: AddressableAreaStore,
+) -> None:
+ """It should create the Addressable Area store with loaded addressable areas."""
+ assert subject.state.potential_cutout_fixtures_by_cutout_id == {}
+ assert not subject.state.use_simulated_deck_config
+ assert subject.state.deck_definition == ot3_standard_deck_def
+ assert subject.state.deck_configuration == _make_deck_config()
+ # Loading 9 regular slots, 1 trash, 2 Staging Area slots and 4 waste chute types
+ assert len(subject.state.loaded_addressable_areas_by_name) == 16
+
+
+@pytest.mark.parametrize(
+ ("command", "expected_area"),
+ (
+ (
+ create_load_labware_command(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ labware_id="test-labware-id",
+ definition=LabwareDefinition.construct( # type: ignore[call-arg]
+ parameters=Parameters.construct(loadName="blah"), # type: ignore[call-arg]
+ namespace="bleh",
+ version=123,
+ ),
+ offset_id="offset-id",
+ display_name="display-name",
+ ),
+ "A1",
+ ),
+ (
+ create_load_labware_command(
+ location=AddressableAreaLocation(addressableAreaName="A4"),
+ labware_id="test-labware-id",
+ definition=LabwareDefinition.construct( # type: ignore[call-arg]
+ parameters=Parameters.construct(loadName="blah"), # type: ignore[call-arg]
+ namespace="bleh",
+ version=123,
+ ),
+ offset_id="offset-id",
+ display_name="display-name",
+ ),
+ "A4",
+ ),
+ (
+ create_load_module_command(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ module_id="test-module-id",
+ model=ModuleModel.TEMPERATURE_MODULE_V2,
+ ),
+ "A1",
+ ),
+ (
+ create_move_labware_command(
+ new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ strategy=LabwareMovementStrategy.USING_GRIPPER,
+ ),
+ "A1",
+ ),
+ (
+ create_move_labware_command(
+ new_location=AddressableAreaLocation(addressableAreaName="A4"),
+ strategy=LabwareMovementStrategy.USING_GRIPPER,
+ ),
+ "A4",
+ ),
+ (
+ create_move_to_addressable_area_command(
+ pipette_id="pipette-id", addressable_area_name="gripperWasteChute"
+ ),
+ "gripperWasteChute",
+ ),
+ ),
+)
+def test_addressable_area_referencing_commands_load_on_simulated_deck(
+ command: Command,
+ expected_area: str,
+ simulated_subject: AddressableAreaStore,
+) -> None:
+ """It should check and store the addressable area when referenced in a command."""
+ simulated_subject.handle_action(
+ SucceedCommandAction(private_result=None, command=command)
+ )
+ assert expected_area in simulated_subject.state.loaded_addressable_areas_by_name
+
+
+@pytest.mark.parametrize(
+ ("command", "expected_area"),
+ (
+ (
+ create_load_labware_command(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ labware_id="test-labware-id",
+ definition=LabwareDefinition.construct( # type: ignore[call-arg]
+ parameters=Parameters.construct(loadName="blah"), # type: ignore[call-arg]
+ namespace="bleh",
+ version=123,
+ ),
+ offset_id="offset-id",
+ display_name="display-name",
+ ),
+ "A1",
+ ),
+ (
+ create_load_labware_command(
+ location=AddressableAreaLocation(addressableAreaName="C4"),
+ labware_id="test-labware-id",
+ definition=LabwareDefinition.construct( # type: ignore[call-arg]
+ parameters=Parameters.construct(loadName="blah"), # type: ignore[call-arg]
+ namespace="bleh",
+ version=123,
+ ),
+ offset_id="offset-id",
+ display_name="display-name",
+ ),
+ "C4",
+ ),
+ (
+ create_load_module_command(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ module_id="test-module-id",
+ model=ModuleModel.TEMPERATURE_MODULE_V2,
+ ),
+ "A1",
+ ),
+ (
+ create_move_labware_command(
+ new_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ strategy=LabwareMovementStrategy.USING_GRIPPER,
+ ),
+ "A1",
+ ),
+ (
+ create_move_labware_command(
+ new_location=AddressableAreaLocation(addressableAreaName="C4"),
+ strategy=LabwareMovementStrategy.USING_GRIPPER,
+ ),
+ "C4",
+ ),
+ ),
+)
+def test_addressable_area_referencing_commands_load(
+ command: Command,
+ expected_area: str,
+ subject: AddressableAreaStore,
+) -> None:
+ """It should check that the addressable area is in the deck config."""
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
+ assert expected_area in subject.state.loaded_addressable_areas_by_name
+
+
+def test_add_addressable_area_action(
+ simulated_subject: AddressableAreaStore,
+) -> None:
+ """It should add the addressable area to the store."""
+ simulated_subject.handle_action(
+ AddAddressableAreaAction(
+ addressable_area=AddressableAreaLocation(
+ addressableAreaName="movableTrashA1"
+ )
+ )
+ )
+ assert "movableTrashA1" in simulated_subject.state.loaded_addressable_areas_by_name
diff --git a/api/tests/opentrons/protocol_engine/state/test_addressable_area_view.py b/api/tests/opentrons/protocol_engine/state/test_addressable_area_view.py
new file mode 100644
index 00000000000..e903c59a45d
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/state/test_addressable_area_view.py
@@ -0,0 +1,469 @@
+"""Addressable area state view tests."""
+import inspect
+
+import pytest
+from decoy import Decoy
+from typing import Dict, Set, Optional, cast
+
+from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
+from opentrons.types import Point, DeckSlotName
+
+from opentrons.protocol_engine.errors import (
+ AreaNotInDeckConfigurationError,
+ IncompatibleAddressableAreaError,
+ SlotDoesNotExistError,
+ AddressableAreaDoesNotExistError,
+)
+from opentrons.protocol_engine.resources import deck_configuration_provider
+from opentrons.protocol_engine.state.addressable_areas import (
+ AddressableAreaState,
+ AddressableAreaView,
+)
+from opentrons.protocol_engine.types import (
+ AddressableArea,
+ AreaType,
+ DeckConfigurationType,
+ PotentialCutoutFixture,
+ Dimensions,
+ DeckPoint,
+ AddressableOffsetVector,
+)
+
+
+@pytest.fixture(autouse=True)
+def patch_mock_deck_configuration_provider(
+ decoy: Decoy, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ """Mock out deck_configuration_provider.py functions."""
+ for name, func in inspect.getmembers(
+ deck_configuration_provider, inspect.isfunction
+ ):
+ monkeypatch.setattr(deck_configuration_provider, name, decoy.mock(func=func))
+
+
+def get_addressable_area_view(
+ loaded_addressable_areas_by_name: Optional[Dict[str, AddressableArea]] = None,
+ potential_cutout_fixtures_by_cutout_id: Optional[
+ Dict[str, Set[PotentialCutoutFixture]]
+ ] = None,
+ deck_definition: Optional[DeckDefinitionV5] = None,
+ deck_configuration: Optional[DeckConfigurationType] = None,
+ robot_type: RobotType = "OT-3 Standard",
+ use_simulated_deck_config: bool = False,
+) -> AddressableAreaView:
+ """Get a labware view test subject."""
+ state = AddressableAreaState(
+ loaded_addressable_areas_by_name=loaded_addressable_areas_by_name or {},
+ potential_cutout_fixtures_by_cutout_id=potential_cutout_fixtures_by_cutout_id
+ or {},
+ deck_definition=deck_definition or cast(DeckDefinitionV5, {"otId": "fake"}),
+ deck_configuration=deck_configuration or [],
+ robot_type=robot_type,
+ use_simulated_deck_config=use_simulated_deck_config,
+ )
+
+ return AddressableAreaView(state=state)
+
+
+def test_get_all_cutout_fixtures_simulated_deck_config() -> None:
+ """It should return no cutout fixtures when the deck config is simulated."""
+ subject = get_addressable_area_view(
+ deck_configuration=None,
+ use_simulated_deck_config=True,
+ )
+ assert subject.get_all_cutout_fixtures() is None
+
+
+def test_get_all_cutout_fixtures_non_simulated_deck_config() -> None:
+ """It should return the cutout fixtures from the deck config, if it's not simulated."""
+ subject = get_addressable_area_view(
+ deck_configuration=[
+ ("cutout-id-1", "cutout-fixture-id-1", None),
+ ("cutout-id-2", "cutout-fixture-id-2", None),
+ ],
+ use_simulated_deck_config=False,
+ )
+ assert subject.get_all_cutout_fixtures() == [
+ "cutout-fixture-id-1",
+ "cutout-fixture-id-2",
+ ]
+
+
+def test_get_loaded_addressable_area() -> None:
+ """It should get the loaded addressable area."""
+ addressable_area = AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=1, y=2, z=3),
+ position=AddressableOffsetVector(x=7, y=8, z=9),
+ compatible_module_types=["magneticModuleType"],
+ )
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={"abc": addressable_area}
+ )
+
+ assert subject.get_addressable_area("abc") is addressable_area
+
+
+def test_get_loaded_addressable_area_raises() -> None:
+ """It should raise if the addressable area does not exist."""
+ subject = get_addressable_area_view()
+
+ with pytest.raises(AreaNotInDeckConfigurationError):
+ subject.get_addressable_area("abc")
+
+
+def test_get_addressable_area_for_simulation_already_loaded() -> None:
+ """It should get the addressable area for a simulation that has not been loaded yet."""
+ addressable_area = AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=1, y=2, z=3),
+ position=AddressableOffsetVector(x=7, y=8, z=9),
+ compatible_module_types=["magneticModuleType"],
+ )
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={"abc": addressable_area},
+ use_simulated_deck_config=True,
+ )
+
+ assert subject.get_addressable_area("abc") is addressable_area
+
+
+def test_get_addressable_area_for_simulation_not_loaded(decoy: Decoy) -> None:
+ """It should get the addressable area for a simulation that has not been loaded yet."""
+ subject = get_addressable_area_view(
+ potential_cutout_fixtures_by_cutout_id={
+ "cutoutA1": {
+ PotentialCutoutFixture(
+ cutout_id="cutoutA1",
+ cutout_fixture_id="blah",
+ provided_addressable_areas=frozenset(),
+ )
+ }
+ },
+ use_simulated_deck_config=True,
+ )
+
+ addressable_area = AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=1, y=2, z=3),
+ position=AddressableOffsetVector(x=7, y=8, z=9),
+ compatible_module_types=["magneticModuleType"],
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_potential_cutout_fixtures(
+ "abc", subject.state.deck_definition
+ )
+ ).then_return(
+ (
+ "cutoutA1",
+ {
+ PotentialCutoutFixture(
+ cutout_id="cutoutA1",
+ cutout_fixture_id="blah",
+ provided_addressable_areas=frozenset(),
+ )
+ },
+ )
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_cutout_position(
+ "cutoutA1", subject.state.deck_definition
+ )
+ ).then_return(DeckPoint(x=1, y=2, z=3))
+
+ decoy.when(
+ deck_configuration_provider.get_addressable_area_from_name(
+ "abc",
+ DeckPoint(x=1, y=2, z=3),
+ DeckSlotName.SLOT_A1,
+ subject.state.deck_definition,
+ )
+ ).then_return(addressable_area)
+
+ assert subject.get_addressable_area("abc") is addressable_area
+
+
+def test_get_addressable_area_for_simulation_raises(decoy: Decoy) -> None:
+ """It should raise if the requested addressable area is incompatible with loaded ones."""
+ subject = get_addressable_area_view(
+ potential_cutout_fixtures_by_cutout_id={
+ "123": {
+ PotentialCutoutFixture(
+ cutout_id="789",
+ cutout_fixture_id="bleh",
+ provided_addressable_areas=frozenset(),
+ )
+ }
+ },
+ use_simulated_deck_config=True,
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_potential_cutout_fixtures(
+ "abc", subject.state.deck_definition
+ )
+ ).then_return(
+ (
+ "123",
+ {
+ PotentialCutoutFixture(
+ cutout_id="123",
+ cutout_fixture_id="blah",
+ provided_addressable_areas=frozenset(),
+ )
+ },
+ )
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_provided_addressable_area_names(
+ "bleh", "789", subject.state.deck_definition
+ )
+ ).then_return([])
+
+ with pytest.raises(IncompatibleAddressableAreaError):
+ subject.get_addressable_area("abc")
+
+
+def test_get_addressable_area_position() -> None:
+ """It should get the absolute location of the addressable area."""
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={
+ "abc": AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=10, y=20, z=30),
+ position=AddressableOffsetVector(x=1, y=2, z=3),
+ compatible_module_types=[],
+ )
+ }
+ )
+
+ result = subject.get_addressable_area_position("abc")
+ assert result == Point(1, 2, 3)
+
+
+def test_get_addressable_area_move_to_location() -> None:
+ """It should get the absolute location of an addressable area's move to location."""
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={
+ "abc": AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=10, y=20, z=30),
+ position=AddressableOffsetVector(x=1, y=2, z=3),
+ compatible_module_types=[],
+ )
+ }
+ )
+
+ result = subject.get_addressable_area_move_to_location("abc")
+ assert result == Point(6, 12, 33)
+
+
+def test_get_addressable_area_center() -> None:
+ """It should get the absolute location of an addressable area's center."""
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={
+ "abc": AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=10, y=20, z=30),
+ position=AddressableOffsetVector(x=1, y=2, z=3),
+ compatible_module_types=[],
+ )
+ }
+ )
+
+ result = subject.get_addressable_area_center("abc")
+ assert result == Point(6, 12, 3)
+
+
+def test_get_fixture_height(decoy: Decoy) -> None:
+ """It should return the height of the requested fixture."""
+ subject = get_addressable_area_view()
+ decoy.when(
+ deck_configuration_provider.get_cutout_fixture(
+ "someShortCutoutFixture", subject.state.deck_definition
+ )
+ ).then_return(
+ {
+ "height": 10,
+ # These values don't matter:
+ "id": "id",
+ "expectOpentronsModuleSerialNumber": False,
+ "fixtureGroup": {},
+ "mayMountTo": [],
+ "displayName": "",
+ "providesAddressableAreas": {},
+ }
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_cutout_fixture(
+ "someTallCutoutFixture", subject.state.deck_definition
+ )
+ ).then_return(
+ {
+ "height": 9000.1,
+ # These values don't matter:
+ "id": "id",
+ "expectOpentronsModuleSerialNumber": False,
+ "fixtureGroup": {},
+ "mayMountTo": [],
+ "displayName": "",
+ "providesAddressableAreas": {},
+ }
+ )
+
+ assert subject.get_fixture_height("someShortCutoutFixture") == 10
+ assert subject.get_fixture_height("someTallCutoutFixture") == 9000.1
+
+
+def test_get_slot_definition() -> None:
+ """It should return a deck slot's definition."""
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={
+ "6": AddressableArea(
+ area_name="area",
+ area_type=AreaType.SLOT,
+ base_slot=DeckSlotName.SLOT_D3,
+ display_name="fancy name",
+ bounding_box=Dimensions(x=1, y=2, z=3),
+ position=AddressableOffsetVector(x=7, y=8, z=9),
+ compatible_module_types=["magneticModuleType"],
+ )
+ }
+ )
+
+ result = subject.get_slot_definition(DeckSlotName.SLOT_6.id)
+
+ assert result == {
+ "id": "area",
+ "position": [7, 8, 9],
+ "boundingBox": {
+ "xDimension": 1,
+ "yDimension": 2,
+ "zDimension": 3,
+ },
+ "displayName": "fancy name",
+ "compatibleModuleTypes": ["magneticModuleType"],
+ }
+
+
+def test_get_slot_definition_raises_with_bad_slot_name(decoy: Decoy) -> None:
+ """It should raise a SlotDoesNotExistError if a bad slot name is given."""
+ subject = get_addressable_area_view()
+
+ decoy.when(
+ deck_configuration_provider.get_potential_cutout_fixtures(
+ "foo", subject.state.deck_definition
+ )
+ ).then_raise(AddressableAreaDoesNotExistError())
+
+ with pytest.raises(SlotDoesNotExistError):
+ subject.get_slot_definition("foo")
+
+
+def test_raise_if_area_not_in_deck_configuration_on_robot(decoy: Decoy) -> None:
+ """It should raise if the requested addressable area name is not loaded in state."""
+ subject = get_addressable_area_view(
+ loaded_addressable_areas_by_name={"real": decoy.mock(cls=AddressableArea)}
+ )
+
+ subject.raise_if_area_not_in_deck_configuration("real")
+
+ with pytest.raises(AreaNotInDeckConfigurationError):
+ subject.raise_if_area_not_in_deck_configuration("fake")
+
+
+def test_raise_if_area_not_in_deck_configuration_simulated_config(decoy: Decoy) -> None:
+ """It should raise if the requested addressable area name is not loaded in state."""
+ subject = get_addressable_area_view(
+ use_simulated_deck_config=True,
+ potential_cutout_fixtures_by_cutout_id={
+ "waluigi": {
+ PotentialCutoutFixture(
+ cutout_id="fire flower",
+ cutout_fixture_id="1up",
+ provided_addressable_areas=frozenset(),
+ )
+ },
+ "wario": {
+ PotentialCutoutFixture(
+ cutout_id="mushroom",
+ cutout_fixture_id="star",
+ provided_addressable_areas=frozenset(),
+ )
+ },
+ },
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_potential_cutout_fixtures(
+ "mario", subject.state.deck_definition
+ )
+ ).then_return(
+ (
+ "wario",
+ {
+ PotentialCutoutFixture(
+ cutout_id="mushroom",
+ cutout_fixture_id="star",
+ provided_addressable_areas=frozenset(),
+ )
+ },
+ )
+ )
+
+ subject.raise_if_area_not_in_deck_configuration("mario")
+
+ decoy.when(
+ deck_configuration_provider.get_potential_cutout_fixtures(
+ "luigi", subject.state.deck_definition
+ )
+ ).then_return(
+ (
+ "waluigi",
+ {
+ PotentialCutoutFixture(
+ cutout_id="mushroom",
+ cutout_fixture_id="star",
+ provided_addressable_areas=frozenset(),
+ )
+ },
+ )
+ )
+
+ decoy.when(
+ deck_configuration_provider.get_provided_addressable_area_names(
+ "1up", "fire flower", subject.state.deck_definition
+ )
+ ).then_return([])
+
+ decoy.when(
+ deck_configuration_provider.get_addressable_area_display_name(
+ "luigi", subject.state.deck_definition
+ )
+ ).then_return("super luigi")
+
+ with pytest.raises(IncompatibleAddressableAreaError):
+ subject.raise_if_area_not_in_deck_configuration("luigi")
diff --git a/api/tests/opentrons/protocol_engine/state/test_command_history.py b/api/tests/opentrons/protocol_engine/state/test_command_history.py
new file mode 100644
index 00000000000..3c84b86e07f
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/state/test_command_history.py
@@ -0,0 +1,301 @@
+"""CommandHistory state store tests."""
+import pytest
+
+from opentrons.ordered_set import OrderedSet
+
+from opentrons.protocol_engine.errors.exceptions import CommandDoesNotExistError
+from opentrons.protocol_engine.state.command_history import CommandHistory, CommandEntry
+from opentrons.protocol_engine.commands import CommandIntent, CommandStatus
+
+from .command_fixtures import (
+ create_queued_command,
+)
+
+
+def create_queued_command_entry(
+ command_id: str = "command-id", index: int = 0
+) -> CommandEntry:
+ """Create a command entry for a queued command."""
+ return CommandEntry(create_queued_command(command_id=command_id), index)
+
+
+def create_fixit_command_entry(
+ command_id: str = "command-id", index: int = 0
+) -> CommandEntry:
+ """Create a command entry for a fixit command."""
+ return CommandEntry(
+ create_queued_command(command_id=command_id, intent=CommandIntent.FIXIT), index
+ )
+
+
+@pytest.fixture
+def command_history() -> CommandHistory:
+ """Instantiates a CommandHistory instance."""
+ return CommandHistory()
+
+
+def test_length(command_history: CommandHistory) -> None:
+ """It should return the length of the command history."""
+ assert command_history.length() == 0
+ command_history._add("0", create_queued_command_entry())
+ assert command_history.length() == 1
+
+
+def test_has(command_history: CommandHistory) -> None:
+ """It should return True if the command exists in the history, False otherwise."""
+ assert not command_history.has("0")
+ command_history._add("0", create_queued_command_entry())
+ assert command_history.has("0")
+
+
+def test_get(command_history: CommandHistory) -> None:
+ """It should return the command entry for the given ID."""
+ with pytest.raises(CommandDoesNotExistError):
+ command_history.get("0")
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ assert command_history.get("0") == command_entry
+
+
+def test_get_next(command_history: CommandHistory) -> None:
+ """It should return the next command entry after the command associated with the given ID."""
+ with pytest.raises(CommandDoesNotExistError):
+ command_history.get_next("0")
+ command_entry_1 = create_queued_command_entry()
+ command_entry_2 = create_queued_command_entry(index=1)
+ command_history._add("0", command_entry_1)
+ command_history._add("1", command_entry_2)
+ assert command_history.get_next("0") == command_entry_2
+ assert command_history.get_next("1") is None
+
+
+def test_get_prev(command_history: CommandHistory) -> None:
+ """It should return the previous command entry before the command associated with the given ID."""
+ with pytest.raises(CommandDoesNotExistError):
+ command_history.get_prev("0")
+ command_entry_1 = create_queued_command_entry()
+ command_entry_2 = create_queued_command_entry(index=1)
+ command_history._add("0", command_entry_1)
+ command_history._add("1", command_entry_2)
+ assert command_history.get_prev("0") is None
+ assert command_history.get_prev("1") == command_entry_1
+
+
+def test_get_if_present(command_history: CommandHistory) -> None:
+ """It should return the command entry for the given ID if it exists, None otherwise."""
+ assert command_history.get_if_present("0") is None
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ assert command_history.get_if_present("0") == command_entry
+
+
+def test_get_all_commands(command_history: CommandHistory) -> None:
+ """It should return a list of all commands."""
+ assert command_history.get_all_commands() == []
+ command_entry_1 = create_queued_command_entry()
+ command_entry_2 = create_queued_command_entry(index=1)
+ command_history._add("0", command_entry_1)
+ command_history._add("1", command_entry_2)
+ assert command_history.get_all_commands() == [
+ command_entry_1.command,
+ command_entry_2.command,
+ ]
+
+
+def test_get_all_ids(command_history: CommandHistory) -> None:
+ """It should return a list of all command IDs."""
+ assert command_history.get_all_ids() == []
+ command_entry_1 = create_queued_command_entry()
+ command_entry_2 = create_queued_command_entry(index=1)
+ command_history._add("0", command_entry_1)
+ command_history._add("1", command_entry_2)
+ assert command_history.get_all_ids() == ["0", "1"]
+
+
+def test_get_slice(command_history: CommandHistory) -> None:
+ """It should return a slice of commands."""
+ assert command_history.get_slice(0, 2) == []
+ command_entry_1 = create_queued_command_entry()
+ command_entry_2 = create_queued_command_entry(index=1)
+ command_entry_3 = create_queued_command_entry(index=2)
+ command_history._add("0", command_entry_1)
+ command_history._add("1", command_entry_2)
+ command_history._add("2", command_entry_3)
+ assert command_history.get_slice(1, 3) == [
+ command_entry_2.command,
+ command_entry_3.command,
+ ]
+
+
+def test_get_tail_command(command_history: CommandHistory) -> None:
+ """It should return the tail command."""
+ assert command_history.get_tail_command() is None
+ command_entry_1 = create_queued_command_entry()
+ command_entry_2 = create_queued_command_entry(index=1)
+ command_history._add("0", command_entry_1)
+ command_history._add("1", command_entry_2)
+ assert command_history.get_tail_command() == command_entry_2
+
+
+def test_get_recently_dequeued_command(command_history: CommandHistory) -> None:
+ """It should return the most recently dequeued command."""
+ assert command_history.get_terminal_command() is None
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ command_history._set_terminal_command_id("0")
+ assert command_history.get_terminal_command() == command_entry
+
+
+def test_get_running_command(command_history: CommandHistory) -> None:
+ """It should return the currently running command."""
+ assert command_history.get_running_command() is None
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ command_history._set_running_command_id("0")
+ assert command_history.get_running_command() == command_entry
+
+
+def test_get_queue_ids(command_history: CommandHistory) -> None:
+ """It should return the IDs of all commands in the queue."""
+ assert command_history.get_queue_ids() == OrderedSet()
+ command_history._add_to_queue("0")
+ command_history._add_to_queue("1")
+ assert command_history.get_queue_ids() == OrderedSet(["0", "1"])
+
+
+def test_get_setup_queue_ids(command_history: CommandHistory) -> None:
+ """It should return the IDs of all commands in the setup queue."""
+ assert command_history.get_setup_queue_ids() == OrderedSet()
+ command_history._add_to_setup_queue("0")
+ command_history._add_to_setup_queue("1")
+ assert command_history.get_setup_queue_ids() == OrderedSet(["0", "1"])
+
+
+def test_get_fixit_queue_ids(command_history: CommandHistory) -> None:
+ """It should return the IDs of all commands in the setup queue."""
+ assert command_history.get_fixit_queue_ids() == OrderedSet()
+ command_history._add_to_fixit_queue("0")
+ command_history._add_to_fixit_queue("1")
+ assert command_history.get_fixit_queue_ids() == OrderedSet(["0", "1"])
+
+
+def test_set_command_entry(command_history: CommandHistory) -> None:
+ """It should set the command entry for the given ID."""
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ assert command_history.get("0") == command_entry
+
+
+def test_set_recent_dequeued_command_id(command_history: CommandHistory) -> None:
+ """It should set the ID of the most recently dequeued command."""
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ command_history._set_terminal_command_id("0")
+ assert command_history.get_terminal_command() == command_entry
+
+
+def test_set_running_command_id(command_history: CommandHistory) -> None:
+ """It should set the ID of the currently running command."""
+ command_entry = create_queued_command_entry()
+ command_history._add("0", command_entry)
+ command_history._set_running_command_id("0")
+ assert command_history.get_running_command() == command_entry
+
+
+def test_set_fixit_running_command_id(command_history: CommandHistory) -> None:
+ """It should set the ID of the currently running fixit command."""
+ command_entry = create_queued_command()
+ command_history.set_command_queued(command_entry)
+ running_command = command_entry.copy(
+ update={
+ "status": CommandStatus.RUNNING,
+ }
+ )
+ command_history.set_command_running(running_command)
+ finished_command = command_entry.copy(
+ update={
+ "status": CommandStatus.SUCCEEDED,
+ }
+ )
+ command_history.set_command_succeeded(finished_command)
+ fixit_command_entry = create_queued_command(
+ command_id="fixit-id", intent=CommandIntent.FIXIT
+ )
+ command_history.set_command_queued(fixit_command_entry)
+ fixit_running_command = fixit_command_entry.copy(
+ update={
+ "status": CommandStatus.RUNNING,
+ }
+ )
+ command_history.set_command_running(fixit_running_command)
+ current_running_command = command_history.get_running_command()
+ assert current_running_command is not None
+ assert current_running_command.command == fixit_running_command
+ assert command_history.get_all_commands() == [
+ finished_command,
+ fixit_running_command,
+ ]
+
+
+def test_add_to_queue(command_history: CommandHistory) -> None:
+ """It should add the given ID to the queue."""
+ command_history._add_to_queue("0")
+ assert command_history.get_queue_ids() == OrderedSet(["0"])
+
+
+def test_add_to_setup_queue(command_history: CommandHistory) -> None:
+ """It should add the given ID to the setup queue."""
+ command_history._add_to_setup_queue("0")
+ assert command_history.get_setup_queue_ids() == OrderedSet(["0"])
+
+
+def test_add_to_fixit_queue(command_history: CommandHistory) -> None:
+ """It should add the given ID to the setup queue."""
+ fixit_command = create_queued_command(intent=CommandIntent.FIXIT)
+ command_history.set_command_queued(fixit_command)
+ assert command_history.get_fixit_queue_ids() == OrderedSet(["command-id"])
+
+
+def test_clear_queue(command_history: CommandHistory) -> None:
+ """It should clear all commands in the queue."""
+ command_history._add_to_queue("0")
+ command_history._add_to_queue("1")
+ command_history.clear_queue()
+ assert command_history.get_queue_ids() == OrderedSet()
+
+
+def test_clear_setup_queue(command_history: CommandHistory) -> None:
+ """It should clear all commands in the setup queue."""
+ command_history._add_to_setup_queue("0")
+ command_history._add_to_setup_queue("1")
+ command_history.clear_setup_queue()
+ assert command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_clear_fixit_queue(command_history: CommandHistory) -> None:
+ """It should clear all commands in the setup queue."""
+ command_history.set_command_queued(
+ create_queued_command(command_id="0", intent=CommandIntent.FIXIT)
+ )
+ command_history.set_command_queued(
+ create_queued_command(command_id="1", intent=CommandIntent.FIXIT)
+ )
+ assert command_history.get_fixit_queue_ids() == OrderedSet(["0", "1"])
+ command_history.clear_fixit_queue()
+ assert command_history.get_fixit_queue_ids() == OrderedSet()
+
+
+def test_remove_id_from_queue(command_history: CommandHistory) -> None:
+ """It should remove the given ID from the queue."""
+ command_history._add_to_queue("0")
+ command_history._add_to_queue("1")
+ command_history._remove_queue_id("0")
+ assert command_history.get_queue_ids() == OrderedSet(["1"])
+
+
+def test_remove_id_from_setup_queue(command_history: CommandHistory) -> None:
+ """It should remove the given ID from the setup queue."""
+ command_history._add_to_setup_queue("0")
+ command_history._add_to_setup_queue("1")
+ command_history._remove_setup_queue_id("0")
+ assert command_history.get_setup_queue_ids() == OrderedSet(["1"])
diff --git a/api/tests/opentrons/protocol_engine/state/test_command_state.py b/api/tests/opentrons/protocol_engine/state/test_command_state.py
new file mode 100644
index 00000000000..01b9186ac9b
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/state/test_command_state.py
@@ -0,0 +1,501 @@
+"""Tests for the CommandStore+CommandState+CommandView trifecta.
+
+The trifecta is tested here as a single unit, treating CommandState as a private
+implementation detail.
+"""
+
+from datetime import datetime
+from unittest.mock import sentinel
+
+import pytest
+
+from opentrons_shared_data.errors import ErrorCodes, PythonException
+
+from opentrons.ordered_set import OrderedSet
+from opentrons.protocol_engine import actions, commands, errors
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryType
+from opentrons.protocol_engine.errors.error_occurrence import ErrorOccurrence
+from opentrons.protocol_engine.errors.exceptions import EStopActivatedError
+from opentrons.protocol_engine.notes.notes import CommandNote
+from opentrons.protocol_engine.state.commands import (
+ CommandStore,
+ CommandView,
+)
+from opentrons.protocol_engine.state.config import Config
+from opentrons.protocol_engine.types import DeckType, EngineStatus
+
+
+def _make_config() -> Config:
+ return Config(
+ # Choice of robot and deck type is arbitrary.
+ robot_type="OT-2 Standard",
+ deck_type=DeckType.OT2_STANDARD,
+ )
+
+
+@pytest.mark.parametrize("error_recovery_type", ErrorRecoveryType)
+def test_command_failure(error_recovery_type: ErrorRecoveryType) -> None:
+ """It should store an error and mark the command if it fails."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject_view = CommandView(subject.state)
+
+ command_id = "command-id"
+ command_key = "command-key"
+ created_at = datetime(year=2021, month=1, day=1)
+ started_at = datetime(year=2022, month=2, day=2)
+ failed_at = datetime(year=2023, month=3, day=3)
+ error_id = "error-id"
+ notes = [
+ CommandNote(
+ noteKind="noteKind",
+ shortMessage="shortMessage",
+ longMessage="longMessage",
+ source="source",
+ )
+ ]
+
+ params = commands.CommentParams(message="No comment.")
+
+ subject.handle_action(
+ actions.QueueCommandAction(
+ command_id=command_id,
+ created_at=created_at,
+ request=commands.CommentCreate(params=params, key=command_key),
+ request_hash=None,
+ )
+ )
+ subject.handle_action(
+ actions.RunCommandAction(command_id=command_id, started_at=started_at)
+ )
+ subject.handle_action(
+ actions.FailCommandAction(
+ command_id=command_id,
+ running_command=subject_view.get(command_id),
+ error_id=error_id,
+ failed_at=failed_at,
+ error=errors.ProtocolEngineError(message="oh no"),
+ notes=notes,
+ type=error_recovery_type,
+ )
+ )
+
+ expected_error_occurrence = errors.ErrorOccurrence(
+ id=error_id,
+ errorType="ProtocolEngineError",
+ createdAt=failed_at,
+ detail="oh no",
+ errorCode=ErrorCodes.GENERAL_ERROR.value.code,
+ )
+ expected_failed_command = commands.Comment(
+ id=command_id,
+ key=command_key,
+ commandType="comment",
+ createdAt=created_at,
+ startedAt=started_at,
+ completedAt=failed_at,
+ status=commands.CommandStatus.FAILED,
+ params=params,
+ result=None,
+ error=expected_error_occurrence,
+ notes=notes,
+ )
+
+ assert subject_view.get("command-id") == expected_failed_command
+
+
+def test_command_failure_clears_queues() -> None:
+ """It should clear the command queue on command failure."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+ subject_view = CommandView(subject.state)
+
+ queue_1 = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(), key="command-key-1"
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ subject.handle_action(queue_1)
+ queue_2 = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(), key="command-key-2"
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-2",
+ )
+ subject.handle_action(queue_2)
+
+ run_1 = actions.RunCommandAction(
+ command_id="command-id-1",
+ started_at=datetime(year=2022, month=2, day=2),
+ )
+ subject.handle_action(run_1)
+ fail_1 = actions.FailCommandAction(
+ command_id="command-id-1",
+ running_command=subject_view.get("command-id-1"),
+ error_id="error-id",
+ failed_at=datetime(year=2023, month=3, day=3),
+ error=errors.ProtocolEngineError(message="oh no"),
+ notes=[
+ CommandNote(
+ noteKind="noteKind",
+ shortMessage="shortMessage",
+ longMessage="longMessage",
+ source="source",
+ )
+ ],
+ type=ErrorRecoveryType.FAIL_RUN,
+ )
+ subject.handle_action(fail_1)
+
+ assert [(c.id, c.status) for c in subject_view.get_all()] == [
+ ("command-id-1", commands.CommandStatus.FAILED),
+ ("command-id-2", commands.CommandStatus.FAILED),
+ ]
+ assert subject_view.get_running_command_id() is None
+ assert subject_view.get_queue_ids() == OrderedSet()
+ assert subject_view.get_next_to_execute() is None
+
+
+def test_setup_command_failure_only_clears_setup_command_queue() -> None:
+ """It should clear only the setup command queue for a failed setup command.
+
+ This test queues up a non-setup command followed by two setup commands,
+ then runs and fails the first setup command.
+ """
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject_view = CommandView(subject.state)
+
+ queue_1 = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(), key="command-key-1"
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ subject.handle_action(queue_1)
+ queue_2_setup = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(),
+ intent=commands.CommandIntent.SETUP,
+ key="command-key-2",
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-2",
+ )
+ subject.handle_action(queue_2_setup)
+ queue_3_setup = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(),
+ intent=commands.CommandIntent.SETUP,
+ key="command-key-3",
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-3",
+ )
+ subject.handle_action(queue_3_setup)
+
+ run_2_setup = actions.RunCommandAction(
+ command_id="command-id-2",
+ started_at=datetime(year=2022, month=2, day=2),
+ )
+ subject.handle_action(run_2_setup)
+ fail_2_setup = actions.FailCommandAction(
+ command_id="command-id-2",
+ running_command=subject_view.get("command-id-2"),
+ error_id="error-id",
+ failed_at=datetime(year=2023, month=3, day=3),
+ error=errors.ProtocolEngineError(message="oh no"),
+ notes=[
+ CommandNote(
+ noteKind="noteKind",
+ shortMessage="shortMessage",
+ longMessage="longMessage",
+ source="source",
+ )
+ ],
+ type=ErrorRecoveryType.FAIL_RUN,
+ )
+ subject.handle_action(fail_2_setup)
+
+ assert [(c.id, c.status) for c in subject_view.get_all()] == [
+ ("command-id-1", commands.CommandStatus.QUEUED),
+ ("command-id-2", commands.CommandStatus.FAILED),
+ ("command-id-3", commands.CommandStatus.FAILED),
+ ]
+ assert subject_view.get_running_command_id() is None
+
+ subject.handle_action(
+ actions.PlayAction(requested_at=datetime.now(), deck_configuration=None)
+ )
+ assert subject_view.get_next_to_execute() == "command-id-1"
+
+
+def test_nonfatal_command_failure() -> None:
+ """Test the command queue if a command fails recoverably.
+
+ Commands that were after the failed command in the queue should be left in
+ the queue.
+
+ The queue status should be "awaiting-recovery."
+ """
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject_view = CommandView(subject.state)
+
+ queue_1 = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(), key="command-key-1"
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ subject.handle_action(queue_1)
+ queue_2 = actions.QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(), key="command-key-2"
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-2",
+ )
+ subject.handle_action(queue_2)
+
+ run_1 = actions.RunCommandAction(
+ command_id="command-id-1",
+ started_at=datetime(year=2022, month=2, day=2),
+ )
+ subject.handle_action(run_1)
+ fail_1 = actions.FailCommandAction(
+ command_id="command-id-1",
+ running_command=subject_view.get("command-id-1"),
+ error_id="error-id",
+ failed_at=datetime(year=2023, month=3, day=3),
+ error=errors.ProtocolEngineError(message="oh no"),
+ notes=[
+ CommandNote(
+ noteKind="noteKind",
+ shortMessage="shortMessage",
+ longMessage="longMessage",
+ source="source",
+ )
+ ],
+ type=ErrorRecoveryType.WAIT_FOR_RECOVERY,
+ )
+ subject.handle_action(fail_1)
+
+ assert [(c.id, c.status) for c in subject_view.get_all()] == [
+ ("command-id-1", commands.CommandStatus.FAILED),
+ ("command-id-2", commands.CommandStatus.QUEUED),
+ ]
+ assert subject_view.get_running_command_id() is None
+
+
+def test_error_recovery_type_tracking() -> None:
+ """It should keep track of each failed command's error recovery type."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+
+ subject.handle_action(
+ actions.QueueCommandAction(
+ command_id="c1",
+ created_at=datetime.now(),
+ request=commands.CommentCreate(
+ params=commands.CommentParams(message="yeehaw"),
+ ),
+ request_hash=None,
+ )
+ )
+ subject.handle_action(
+ actions.QueueCommandAction(
+ command_id="c2",
+ created_at=datetime.now(),
+ request=commands.CommentCreate(
+ params=commands.CommentParams(message="yeehaw"),
+ ),
+ request_hash=None,
+ )
+ )
+ subject.handle_action(
+ actions.RunCommandAction(command_id="c1", started_at=datetime.now())
+ )
+ running_command_1 = CommandView(subject.state).get("c1")
+ subject.handle_action(
+ actions.FailCommandAction(
+ command_id="c1",
+ running_command=running_command_1,
+ error_id="c1-error",
+ failed_at=datetime.now(),
+ error=PythonException(RuntimeError("new sheriff in town")),
+ notes=[],
+ type=ErrorRecoveryType.WAIT_FOR_RECOVERY,
+ )
+ )
+ subject.handle_action(
+ actions.RunCommandAction(command_id="c2", started_at=datetime.now())
+ )
+ running_command_2 = CommandView(subject.state).get("c2")
+ subject.handle_action(
+ actions.FailCommandAction(
+ command_id="c2",
+ running_command=running_command_2,
+ error_id="c2-error",
+ failed_at=datetime.now(),
+ error=PythonException(RuntimeError("new sheriff in town")),
+ notes=[],
+ type=ErrorRecoveryType.FAIL_RUN,
+ )
+ )
+
+ view = CommandView(subject.state)
+ assert view.get_error_recovery_type("c1") == ErrorRecoveryType.WAIT_FOR_RECOVERY
+ assert view.get_error_recovery_type("c2") == ErrorRecoveryType.FAIL_RUN
+
+
+def test_get_recovery_in_progress_for_command() -> None:
+ """It should return whether error recovery is in progress for the given command."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+ subject_view = CommandView(subject.state)
+
+ queue_1 = actions.QueueCommandAction(
+ "c1",
+ created_at=datetime.now(),
+ request=commands.CommentCreate(params=commands.CommentParams(message="")),
+ request_hash=None,
+ )
+ subject.handle_action(queue_1)
+ run_1 = actions.RunCommandAction(command_id="c1", started_at=datetime.now())
+ subject.handle_action(run_1)
+ fail_1 = actions.FailCommandAction(
+ command_id="c1",
+ error_id="c1-error",
+ failed_at=datetime.now(),
+ error=PythonException(RuntimeError()),
+ notes=[],
+ type=ErrorRecoveryType.WAIT_FOR_RECOVERY,
+ running_command=subject_view.get("c1"),
+ )
+ subject.handle_action(fail_1)
+
+ # c1 failed recoverably and we're currently recovering from it.
+ assert subject_view.get_recovery_in_progress_for_command("c1")
+
+ resume_from_1_recovery = actions.ResumeFromRecoveryAction()
+ subject.handle_action(resume_from_1_recovery)
+
+ # c1 failed recoverably, but we've already completed its recovery.
+ assert not subject_view.get_recovery_in_progress_for_command("c1")
+
+ queue_2 = actions.QueueCommandAction(
+ "c2",
+ created_at=datetime.now(),
+ request=commands.CommentCreate(params=commands.CommentParams(message="")),
+ request_hash=None,
+ )
+ subject.handle_action(queue_2)
+ run_2 = actions.RunCommandAction(command_id="c2", started_at=datetime.now())
+ subject.handle_action(run_2)
+ fail_2 = actions.FailCommandAction(
+ command_id="c2",
+ error_id="c2-error",
+ failed_at=datetime.now(),
+ error=PythonException(RuntimeError()),
+ notes=[],
+ type=ErrorRecoveryType.WAIT_FOR_RECOVERY,
+ running_command=subject_view.get("c2"),
+ )
+ subject.handle_action(fail_2)
+
+ # c2 failed recoverably and we're currently recovering from it.
+ assert subject_view.get_recovery_in_progress_for_command("c2")
+ # ...and that means we're *not* currently recovering from c1,
+ # even though it failed recoverably before.
+ assert not subject_view.get_recovery_in_progress_for_command("c1")
+
+ resume_from_2_recovery = actions.ResumeFromRecoveryAction()
+ subject.handle_action(resume_from_2_recovery)
+ queue_3 = actions.QueueCommandAction(
+ "c3",
+ created_at=datetime.now(),
+ request=commands.CommentCreate(params=commands.CommentParams(message="")),
+ request_hash=None,
+ )
+ subject.handle_action(queue_3)
+ run_3 = actions.RunCommandAction(command_id="c3", started_at=datetime.now())
+ subject.handle_action(run_3)
+ fail_3 = actions.FailCommandAction(
+ command_id="c3",
+ error_id="c3-error",
+ failed_at=datetime.now(),
+ error=PythonException(RuntimeError()),
+ notes=[],
+ type=ErrorRecoveryType.FAIL_RUN,
+ running_command=subject_view.get("c3"),
+ )
+ subject.handle_action(fail_3)
+
+ # c3 failed, but not recoverably.
+ assert not subject_view.get_recovery_in_progress_for_command("c2")
+
+
+def test_final_state_after_estop() -> None:
+ """Test the final state of the run after it's E-stopped."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+ subject_view = CommandView(subject.state)
+
+ error_details = actions.FinishErrorDetails(
+ error=EStopActivatedError(), error_id="error-id", created_at=datetime.now()
+ )
+ expected_error_occurrence = ErrorOccurrence(
+ id=error_details.error_id,
+ createdAt=error_details.created_at,
+ errorCode=ErrorCodes.E_STOP_ACTIVATED.value.code,
+ errorType="EStopActivatedError",
+ detail="E-stop activated.",
+ )
+
+ subject.handle_action(actions.StopAction(from_estop=True))
+ subject.handle_action(actions.FinishAction(error_details=error_details))
+ subject.handle_action(
+ actions.HardwareStoppedAction(
+ completed_at=sentinel.hardware_stopped_action_completed_at,
+ finish_error_details=None,
+ )
+ )
+
+ assert subject_view.get_status() == EngineStatus.FAILED
+ assert subject_view.get_error() == expected_error_occurrence
+
+
+def test_final_state_after_stop() -> None:
+ """Test the final state of the run after it's stopped."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+ subject_view = CommandView(subject.state)
+
+ subject.handle_action(actions.StopAction())
+ subject.handle_action(
+ actions.FinishAction(
+ error_details=actions.FinishErrorDetails(
+ error=RuntimeError(
+ "uh oh I was a command and then I got cancelled because someone"
+ " stopped the run, and now I'm raising this exception because"
+ " of that. Woe is me"
+ ),
+ error_id="error-id",
+ created_at=datetime.now(),
+ )
+ )
+ )
+ subject.handle_action(
+ actions.HardwareStoppedAction(
+ completed_at=sentinel.hardware_stopped_action_completed_at,
+ finish_error_details=None,
+ )
+ )
+
+ assert subject_view.get_status() == EngineStatus.STOPPED
+ assert subject_view.get_error() is None
diff --git a/api/tests/opentrons/protocol_engine/state/test_command_store.py b/api/tests/opentrons/protocol_engine/state/test_command_store.py
deleted file mode 100644
index 6a53ce46a61..00000000000
--- a/api/tests/opentrons/protocol_engine/state/test_command_store.py
+++ /dev/null
@@ -1,1174 +0,0 @@
-"""Tests for the command lifecycle state."""
-import pytest
-from collections import OrderedDict
-from datetime import datetime
-from typing import NamedTuple, Type
-
-from opentrons_shared_data.errors import ErrorCodes
-from opentrons.ordered_set import OrderedSet
-from opentrons_shared_data.pipette.dev_types import PipetteNameType
-from opentrons.types import MountType, DeckSlotName
-from opentrons.hardware_control.types import DoorState
-
-from opentrons.protocol_engine import commands, errors
-from opentrons.protocol_engine.types import DeckSlotLocation, DeckType, WellLocation
-from opentrons.protocol_engine.state import Config
-from opentrons.protocol_engine.state.commands import (
- CommandState,
- CommandStore,
- CommandEntry,
- RunResult,
- QueueStatus,
-)
-
-from opentrons.protocol_engine.actions import (
- QueueCommandAction,
- UpdateCommandAction,
- FailCommandAction,
- PlayAction,
- PauseAction,
- PauseSource,
- FinishAction,
- FinishErrorDetails,
- StopAction,
- HardwareStoppedAction,
- DoorChangeAction,
-)
-
-from .command_fixtures import (
- create_queued_command,
- create_running_command,
- create_succeeded_command,
- create_failed_command,
-)
-
-
-def _make_config(block_on_door_open: bool = False) -> Config:
- return Config(
- block_on_door_open=block_on_door_open,
- # Choice of robot and deck type is arbitrary.
- robot_type="OT-2 Standard",
- deck_type=DeckType.OT2_STANDARD,
- )
-
-
-@pytest.mark.parametrize(
- ("is_door_open", "config", "expected_is_door_blocking"),
- [
- (False, _make_config(), False),
- (True, _make_config(), False),
- (False, _make_config(block_on_door_open=True), False),
- (True, _make_config(block_on_door_open=True), True),
- ],
-)
-def test_initial_state(
- is_door_open: bool,
- config: Config,
- expected_is_door_blocking: bool,
-) -> None:
- """It should set the initial state."""
- subject = CommandStore(is_door_open=is_door_open, config=config)
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.SETUP,
- run_completed_at=None,
- run_started_at=None,
- is_door_blocking=expected_is_door_blocking,
- run_result=None,
- running_command_id=None,
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- all_command_ids=[],
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-class QueueCommandSpec(NamedTuple):
- """Test data for the QueueCommandAction."""
-
- command_request: commands.CommandCreate
- expected_cls: Type[commands.Command]
- created_at: datetime = datetime(year=2021, month=1, day=1)
- command_id: str = "command-id"
- command_key: str = "command-key"
-
-
-@pytest.mark.parametrize(
- QueueCommandSpec._fields,
- [
- QueueCommandSpec(
- command_request=commands.AspirateCreate(
- params=commands.AspirateParams(
- pipetteId="pipette-id",
- labwareId="labware-id",
- wellName="well-name",
- volume=42,
- flowRate=1.23,
- wellLocation=WellLocation(),
- ),
- key="command-key",
- ),
- expected_cls=commands.Aspirate,
- ),
- QueueCommandSpec(
- command_request=commands.DispenseCreate(
- params=commands.DispenseParams(
- pipetteId="pipette-id",
- labwareId="labware-id",
- wellName="well-name",
- volume=42,
- flowRate=1.23,
- wellLocation=WellLocation(),
- ),
- ),
- expected_cls=commands.Dispense,
- # test when key prop is missing
- command_key="command-id",
- ),
- QueueCommandSpec(
- command_request=commands.DropTipCreate(
- params=commands.DropTipParams(
- pipetteId="pipette-id",
- labwareId="labware-id",
- wellName="well-name",
- ),
- key="command-key",
- ),
- expected_cls=commands.DropTip,
- ),
- QueueCommandSpec(
- command_request=commands.LoadLabwareCreate(
- params=commands.LoadLabwareParams(
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
- loadName="load-name",
- namespace="namespace",
- version=42,
- ),
- key="command-key",
- ),
- expected_cls=commands.LoadLabware,
- ),
- QueueCommandSpec(
- command_request=commands.LoadPipetteCreate(
- params=commands.LoadPipetteParams(
- mount=MountType.LEFT,
- pipetteName=PipetteNameType.P300_SINGLE,
- ),
- key="command-key",
- ),
- expected_cls=commands.LoadPipette,
- ),
- QueueCommandSpec(
- command_request=commands.PickUpTipCreate(
- params=commands.PickUpTipParams(
- pipetteId="pipette-id",
- labwareId="labware-id",
- wellName="well-name",
- ),
- key="command-key",
- ),
- expected_cls=commands.PickUpTip,
- ),
- QueueCommandSpec(
- command_request=commands.MoveToWellCreate(
- params=commands.MoveToWellParams(
- pipetteId="pipette-id",
- labwareId="labware-id",
- wellName="well-name",
- ),
- key="command-key",
- ),
- expected_cls=commands.MoveToWell,
- ),
- QueueCommandSpec(
- command_request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(message="hello world"),
- key="command-key",
- ),
- expected_cls=commands.WaitForResume,
- ),
- QueueCommandSpec(
- # a WaitForResumeCreate with `pause` should be mapped to
- # a WaitForResume with `commandType="waitForResume"`
- command_request=commands.WaitForResumeCreate(
- commandType="pause",
- params=commands.WaitForResumeParams(message="hello world"),
- key="command-key",
- ),
- expected_cls=commands.WaitForResume,
- ),
- ],
-)
-def test_command_store_queues_commands(
- command_request: commands.CommandCreate,
- expected_cls: Type[commands.Command],
- created_at: datetime,
- command_id: str,
- command_key: str,
-) -> None:
- """It should add a command to the store."""
- action = QueueCommandAction(
- request=command_request,
- request_hash=None,
- created_at=created_at,
- command_id=command_id,
- )
- expected_command = expected_cls(
- id=command_id,
- key=command_key,
- createdAt=created_at,
- status=commands.CommandStatus.QUEUED,
- params=command_request.params, # type: ignore[arg-type]
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
- subject.handle_action(action)
-
- assert subject.state.commands_by_id == {
- "command-id": CommandEntry(index=0, command=expected_command),
- }
-
- assert subject.state.all_command_ids == ["command-id"]
- assert subject.state.queued_command_ids == OrderedSet(["command-id"])
-
-
-def test_command_queue_with_hash() -> None:
- """It should queue a command with a command hash and no explicit key."""
- create = commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(message="hello world"),
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
- subject.handle_action(
- QueueCommandAction(
- request=create,
- request_hash="abc123",
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
- )
-
- assert subject.state.commands_by_id["command-id-1"].command.key == "abc123"
- assert subject.state.latest_command_hash == "abc123"
-
- subject.handle_action(
- QueueCommandAction(
- request=create,
- request_hash="def456",
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-2",
- )
- )
-
- assert subject.state.latest_command_hash == "def456"
-
-
-def test_command_queue_and_unqueue() -> None:
- """It should queue on QueueCommandAction and dequeue on UpdateCommandAction."""
- queue_1 = QueueCommandAction(
- request=commands.WaitForResumeCreate(params=commands.WaitForResumeParams()),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
- queue_2 = QueueCommandAction(
- request=commands.WaitForResumeCreate(params=commands.WaitForResumeParams()),
- request_hash=None,
- created_at=datetime(year=2022, month=2, day=2),
- command_id="command-id-2",
- )
- update_1 = UpdateCommandAction(
- private_result=None,
- command=create_running_command(command_id="command-id-1"),
- )
- update_2 = UpdateCommandAction(
- private_result=None,
- command=create_running_command(command_id="command-id-2"),
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(queue_1)
- assert subject.state.queued_command_ids == OrderedSet(["command-id-1"])
-
- subject.handle_action(queue_2)
- assert subject.state.queued_command_ids == OrderedSet(
- ["command-id-1", "command-id-2"]
- )
-
- subject.handle_action(update_2)
- assert subject.state.queued_command_ids == OrderedSet(["command-id-1"])
-
- subject.handle_action(update_1)
- assert subject.state.queued_command_ids == OrderedSet()
-
-
-def test_setup_command_queue_and_unqueue() -> None:
- """It should queue and dequeue on setup commands."""
- queue_1 = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(),
- intent=commands.CommandIntent.SETUP,
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
- queue_2 = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(),
- intent=commands.CommandIntent.SETUP,
- ),
- request_hash=None,
- created_at=datetime(year=2022, month=2, day=2),
- command_id="command-id-2",
- )
- update_1 = UpdateCommandAction(
- private_result=None,
- command=create_running_command(command_id="command-id-1"),
- )
- update_2 = UpdateCommandAction(
- private_result=None,
- command=create_running_command(command_id="command-id-2"),
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(queue_1)
- assert subject.state.queued_setup_command_ids == OrderedSet(["command-id-1"])
-
- subject.handle_action(queue_2)
- assert subject.state.queued_setup_command_ids == OrderedSet(
- ["command-id-1", "command-id-2"]
- )
-
- subject.handle_action(update_2)
- assert subject.state.queued_setup_command_ids == OrderedSet(["command-id-1"])
-
- subject.handle_action(update_1)
- assert subject.state.queued_setup_command_ids == OrderedSet()
-
-
-def test_setup_queue_action_updates_command_intent() -> None:
- """It should update command source correctly."""
- queue_cmd = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(),
- intent=commands.CommandIntent.SETUP,
- key="command-key-1",
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
-
- expected_pause_cmd = commands.WaitForResume(
- id="command-id-1",
- key="command-key-1",
- createdAt=datetime(year=2021, month=1, day=1),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.QUEUED,
- intent=commands.CommandIntent.SETUP,
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(queue_cmd)
- assert subject.state.commands_by_id["command-id-1"] == CommandEntry(
- index=0, command=expected_pause_cmd
- )
-
-
-def test_running_command_id() -> None:
- """It should update the running command ID through a command's lifecycle."""
- queue = QueueCommandAction(
- request=commands.WaitForResumeCreate(params=commands.WaitForResumeParams()),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
- running_update = UpdateCommandAction(
- private_result=None,
- command=create_running_command(command_id="command-id-1"),
- )
- completed_update = UpdateCommandAction(
- private_result=None,
- command=create_succeeded_command(command_id="command-id-1"),
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(queue)
- assert subject.state.running_command_id is None
-
- subject.handle_action(running_update)
- assert subject.state.running_command_id == "command-id-1"
-
- subject.handle_action(completed_update)
- assert subject.state.running_command_id is None
-
-
-def test_running_command_no_queue() -> None:
- """It should add a running command to state, even if there was no queue action."""
- running_update = UpdateCommandAction(
- private_result=None,
- command=create_running_command(command_id="command-id-1"),
- )
- completed_update = UpdateCommandAction(
- private_result=None,
- command=create_succeeded_command(command_id="command-id-1"),
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(running_update)
- assert subject.state.all_command_ids == ["command-id-1"]
- assert subject.state.running_command_id == "command-id-1"
-
- subject.handle_action(completed_update)
- assert subject.state.all_command_ids == ["command-id-1"]
- assert subject.state.running_command_id is None
-
-
-def test_command_failure_clears_queues() -> None:
- """It should clear the command queue on command failure."""
- queue_1 = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(), key="command-key-1"
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
- queue_2 = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(), key="command-key-2"
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-2",
- )
- running_1 = UpdateCommandAction(
- private_result=None,
- command=commands.WaitForResume(
- id="command-id-1",
- key="command-key-1",
- createdAt=datetime(year=2021, month=1, day=1),
- startedAt=datetime(year=2022, month=2, day=2),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.RUNNING,
- ),
- )
- fail_1 = FailCommandAction(
- command_id="command-id-1",
- error_id="error-id",
- failed_at=datetime(year=2023, month=3, day=3),
- error=errors.ProtocolEngineError(message="oh no"),
- )
-
- expected_failed_1 = commands.WaitForResume(
- id="command-id-1",
- key="command-key-1",
- error=errors.ErrorOccurrence(
- id="error-id",
- createdAt=datetime(year=2023, month=3, day=3),
- errorCode=ErrorCodes.GENERAL_ERROR.value.code,
- errorType="ProtocolEngineError",
- detail="oh no",
- ),
- createdAt=datetime(year=2021, month=1, day=1),
- startedAt=datetime(year=2022, month=2, day=2),
- completedAt=datetime(year=2023, month=3, day=3),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.FAILED,
- )
- expected_failed_2 = commands.WaitForResume(
- id="command-id-2",
- key="command-key-2",
- error=None,
- createdAt=datetime(year=2021, month=1, day=1),
- completedAt=datetime(year=2023, month=3, day=3),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.FAILED,
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(queue_1)
- subject.handle_action(queue_2)
- subject.handle_action(running_1)
- subject.handle_action(fail_1)
-
- assert subject.state.running_command_id is None
- assert subject.state.queued_command_ids == OrderedSet()
- assert subject.state.all_command_ids == ["command-id-1", "command-id-2"]
- assert subject.state.commands_by_id == {
- "command-id-1": CommandEntry(index=0, command=expected_failed_1),
- "command-id-2": CommandEntry(index=1, command=expected_failed_2),
- }
-
-
-def test_setup_command_failure_only_clears_setup_command_queue() -> None:
- """It should clear only the setup command queue for a failed setup command.
-
- This test queues up a non-setup command followed by two setup commands,
- then attempts to run and fail the first setup command and
- """
- cmd_1_non_setup = commands.WaitForResume(
- id="command-id-1",
- key="command-key-1",
- createdAt=datetime(year=2021, month=1, day=1),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.QUEUED,
- )
- queue_action_1_non_setup = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=cmd_1_non_setup.params, key="command-key-1"
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-1",
- )
- queue_action_2_setup = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(),
- intent=commands.CommandIntent.SETUP,
- key="command-key-2",
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-2",
- )
- queue_action_3_setup = QueueCommandAction(
- request=commands.WaitForResumeCreate(
- params=commands.WaitForResumeParams(),
- intent=commands.CommandIntent.SETUP,
- key="command-key-3",
- ),
- request_hash=None,
- created_at=datetime(year=2021, month=1, day=1),
- command_id="command-id-3",
- )
-
- running_cmd_2 = UpdateCommandAction(
- private_result=None,
- command=commands.WaitForResume(
- id="command-id-2",
- key="command-key-2",
- createdAt=datetime(year=2021, month=1, day=1),
- startedAt=datetime(year=2022, month=2, day=2),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.RUNNING,
- intent=commands.CommandIntent.SETUP,
- ),
- )
- failed_action_cmd_2 = FailCommandAction(
- command_id="command-id-2",
- error_id="error-id",
- failed_at=datetime(year=2023, month=3, day=3),
- error=errors.ProtocolEngineError(message="oh no"),
- )
- expected_failed_cmd_2 = commands.WaitForResume(
- id="command-id-2",
- key="command-key-2",
- error=errors.ErrorOccurrence(
- id="error-id",
- createdAt=datetime(year=2023, month=3, day=3),
- errorType="ProtocolEngineError",
- detail="oh no",
- errorCode=ErrorCodes.GENERAL_ERROR.value.code,
- ),
- createdAt=datetime(year=2021, month=1, day=1),
- startedAt=datetime(year=2022, month=2, day=2),
- completedAt=datetime(year=2023, month=3, day=3),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.FAILED,
- intent=commands.CommandIntent.SETUP,
- )
- expected_failed_cmd_3 = commands.WaitForResume(
- id="command-id-3",
- key="command-key-3",
- error=None,
- createdAt=datetime(year=2021, month=1, day=1),
- completedAt=datetime(year=2023, month=3, day=3),
- params=commands.WaitForResumeParams(),
- status=commands.CommandStatus.FAILED,
- intent=commands.CommandIntent.SETUP,
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(queue_action_1_non_setup)
- subject.handle_action(queue_action_2_setup)
- subject.handle_action(queue_action_3_setup)
- subject.handle_action(running_cmd_2)
- subject.handle_action(failed_action_cmd_2)
-
- assert subject.state.running_command_id is None
- assert subject.state.queued_setup_command_ids == OrderedSet()
- assert subject.state.queued_command_ids == OrderedSet(["command-id-1"])
- assert subject.state.all_command_ids == [
- "command-id-1",
- "command-id-2",
- "command-id-3",
- ]
- assert subject.state.commands_by_id == {
- "command-id-1": CommandEntry(index=0, command=cmd_1_non_setup),
- "command-id-2": CommandEntry(index=1, command=expected_failed_cmd_2),
- "command-id-3": CommandEntry(index=2, command=expected_failed_cmd_3),
- }
-
-
-def test_command_store_preserves_handle_order() -> None:
- """It should store commands in the order they are handled."""
- # Any arbitrary 3 commands that compare non-equal (!=) to each other.
- command_a = create_queued_command(command_id="command-id-1")
- command_b = create_running_command(command_id="command-id-2")
- command_c = create_succeeded_command(command_id="command-id-1")
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(UpdateCommandAction(private_result=None, command=command_a))
- assert subject.state.all_command_ids == ["command-id-1"]
- assert subject.state.commands_by_id == {
- "command-id-1": CommandEntry(index=0, command=command_a),
- }
-
- subject.handle_action(UpdateCommandAction(private_result=None, command=command_b))
- assert subject.state.all_command_ids == ["command-id-1", "command-id-2"]
- assert subject.state.commands_by_id == {
- "command-id-1": CommandEntry(index=0, command=command_a),
- "command-id-2": CommandEntry(index=1, command=command_b),
- }
-
- subject.handle_action(UpdateCommandAction(private_result=None, command=command_c))
- assert subject.state.all_command_ids == ["command-id-1", "command-id-2"]
- assert subject.state.commands_by_id == {
- "command-id-1": CommandEntry(index=0, command=command_c),
- "command-id-2": CommandEntry(index=1, command=command_b),
- }
-
-
-@pytest.mark.parametrize("pause_source", PauseSource)
-def test_command_store_handles_pause_action(pause_source: PauseSource) -> None:
- """It should clear the running flag on pause."""
- subject = CommandStore(is_door_open=False, config=_make_config())
- subject.handle_action(PauseAction(source=pause_source))
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=None,
- run_completed_at=None,
- run_started_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-@pytest.mark.parametrize("pause_source", PauseSource)
-def test_command_store_handles_play_action(pause_source: PauseSource) -> None:
- """It should set the running flag on play."""
- subject = CommandStore(is_door_open=False, config=_make_config())
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.RUNNING,
- run_result=None,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=datetime(year=2021, month=1, day=1),
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_handles_finish_action() -> None:
- """It should change to a succeeded state with FinishAction."""
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
- subject.handle_action(FinishAction())
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.SUCCEEDED,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=datetime(year=2021, month=1, day=1),
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_handles_finish_action_with_stopped() -> None:
- """It should change to a stopped state if FinishAction has set_run_status=False."""
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
- subject.handle_action(FinishAction(set_run_status=False))
-
- assert subject.state.run_result == RunResult.STOPPED
-
-
-@pytest.mark.parametrize("from_estop", [True, False])
-def test_command_store_handles_stop_action(from_estop: bool) -> None:
- """It should mark the engine as non-gracefully stopped on StopAction."""
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
- subject.handle_action(StopAction(from_estop=from_estop))
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.STOPPED,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=datetime(year=2021, month=1, day=1),
- latest_command_hash=None,
- stopped_by_estop=from_estop,
- )
-
-
-def test_command_store_cannot_restart_after_should_stop() -> None:
- """It should reject a play action after finish."""
- subject = CommandStore(is_door_open=False, config=_make_config())
- subject.handle_action(FinishAction())
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.SUCCEEDED,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_save_started_completed_run_timestamp() -> None:
- """It should save started and completed timestamps."""
- subject = CommandStore(config=_make_config(), is_door_open=False)
- start_time = datetime(year=2021, month=1, day=1)
- hardware_stopped_time = datetime(year=2022, month=2, day=2)
-
- subject.handle_action(PlayAction(requested_at=start_time))
- subject.handle_action(
- HardwareStoppedAction(
- completed_at=hardware_stopped_time, finish_error_details=None
- )
- )
-
- assert subject.state.run_started_at == start_time
- assert subject.state.run_completed_at == hardware_stopped_time
-
-
-def test_timestamps_are_latched() -> None:
- """It should not change startedAt or completedAt once set."""
- subject = CommandStore(config=_make_config(), is_door_open=False)
-
- play_time_1 = datetime(year=2021, month=1, day=1)
- play_time_2 = datetime(year=2022, month=2, day=2)
- stop_time_1 = datetime(year=2023, month=3, day=3)
- stop_time_2 = datetime(year=2024, month=4, day=4)
-
- subject.handle_action(PlayAction(requested_at=play_time_1))
- subject.handle_action(PauseAction(source=PauseSource.CLIENT))
- subject.handle_action(PlayAction(requested_at=play_time_2))
- subject.handle_action(
- HardwareStoppedAction(completed_at=stop_time_1, finish_error_details=None)
- )
- subject.handle_action(
- HardwareStoppedAction(completed_at=stop_time_2, finish_error_details=None)
- )
-
- assert subject.state.run_started_at == play_time_1
- assert subject.state.run_completed_at == stop_time_1
-
-
-def test_command_store_wraps_unknown_errors() -> None:
- """Fatal errors that are unknown should be wrapped in EnumeratedErrors.
-
- Fatal errors can come in through FinishActions and HardwareStoppedActions.
- If these are not descendants of EnumeratedError already, they should be
- wrapped in an EnumeratedError before being converted to an ErrorOccurrence.
-
- The wrapping EnumeratedError should be an UnexpectedProtocolError for errors that happened
- in the main part of the protocol run, or a PythonException for errors that happened elsewhere.
- """
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(
- FinishAction(
- error_details=FinishErrorDetails(
- error=RuntimeError("oh no"),
- error_id="error-id-1",
- created_at=datetime(year=2021, month=1, day=1),
- )
- )
- )
-
- subject.handle_action(
- HardwareStoppedAction(
- completed_at=datetime(year=2022, month=2, day=2),
- finish_error_details=FinishErrorDetails(
- error=RuntimeError("yikes"),
- error_id="error-id-2",
- created_at=datetime(year=2023, month=3, day=3),
- ),
- )
- )
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.FAILED,
- run_completed_at=datetime(year=2022, month=2, day=2),
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=errors.ErrorOccurrence(
- id="error-id-1",
- createdAt=datetime(year=2021, month=1, day=1),
- # This is wrapped into an UnexpectedProtocolError because it's not
- # enumerated, and it happened in the main part of the run.
- errorType="UnexpectedProtocolError",
- # Unknown errors use the default error code.
- errorCode=ErrorCodes.GENERAL_ERROR.value.code,
- # And it has information about what created it.
- detail="oh no",
- wrappedErrors=[
- errors.ErrorOccurrence(
- id="error-id-1",
- createdAt=datetime(year=2021, month=1, day=1),
- errorType="PythonException",
- detail="RuntimeError: oh no",
- errorCode="4000",
- errorInfo={
- "class": "RuntimeError",
- "args": "('oh no',)",
- },
- wrappedErrors=[],
- )
- ],
- ),
- finish_error=errors.ErrorOccurrence(
- id="error-id-2",
- createdAt=datetime(year=2023, month=3, day=3),
- # This is wrapped into a PythonException because it's not
- # enumerated, and it happened during the post-run cleanup steps.
- errorType="PythonException",
- # Unknown errors use the default error code.
- errorCode=ErrorCodes.GENERAL_ERROR.value.code,
- # And it has information about what created it.
- detail="RuntimeError: yikes",
- errorInfo={
- "class": "RuntimeError",
- "args": "('yikes',)",
- },
- ),
- run_started_at=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_preserves_enumerated_errors() -> None:
- """If an error is derived from EnumeratedError, it should be stored as-is."""
-
- class MyCustomError(errors.ProtocolEngineError):
- def __init__(self, message: str) -> None:
- super().__init__(ErrorCodes.PIPETTE_NOT_PRESENT, message)
-
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(
- FinishAction(
- error_details=FinishErrorDetails(
- error=MyCustomError(message="oh no"),
- error_id="error-id-1",
- created_at=datetime(year=2021, month=1, day=1),
- )
- )
- )
-
- subject.handle_action(
- HardwareStoppedAction(
- completed_at=datetime(year=2022, month=2, day=2),
- finish_error_details=FinishErrorDetails(
- error=MyCustomError(message="yikes"),
- error_id="error-id-2",
- created_at=datetime(year=2023, month=3, day=3),
- ),
- )
- )
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.FAILED,
- run_completed_at=datetime(year=2022, month=2, day=2),
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=errors.ErrorOccurrence(
- id="error-id-1",
- createdAt=datetime(year=2021, month=1, day=1),
- errorType="MyCustomError",
- detail="oh no",
- errorCode=ErrorCodes.PIPETTE_NOT_PRESENT.value.code,
- ),
- finish_error=errors.ErrorOccurrence(
- id="error-id-2",
- createdAt=datetime(year=2023, month=3, day=3),
- errorType="MyCustomError",
- detail="yikes",
- errorCode=ErrorCodes.PIPETTE_NOT_PRESENT.value.code,
- ),
- run_started_at=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_ignores_stop_after_graceful_finish() -> None:
- """It should no-op on stop if already gracefully finished."""
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
- subject.handle_action(FinishAction())
- subject.handle_action(StopAction())
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.SUCCEEDED,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=datetime(year=2021, month=1, day=1),
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_ignores_finish_after_non_graceful_stop() -> None:
- """It should no-op on finish if already ungracefully stopped."""
- subject = CommandStore(is_door_open=False, config=_make_config())
-
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
- subject.handle_action(StopAction())
- subject.handle_action(FinishAction())
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.STOPPED,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=datetime(year=2021, month=1, day=1),
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_command_store_handles_command_failed() -> None:
- """It should store an error and mark the command if it fails."""
- command = create_running_command(command_id="command-id")
-
- expected_error_occurrence = errors.ErrorOccurrence(
- id="error-id",
- errorType="ProtocolEngineError",
- createdAt=datetime(year=2022, month=2, day=2),
- detail="oh no",
- errorCode=ErrorCodes.GENERAL_ERROR.value.code,
- )
-
- expected_failed_command = create_failed_command(
- command_id="command-id",
- error=expected_error_occurrence,
- completed_at=datetime(year=2022, month=2, day=2),
- )
-
- subject = CommandStore(is_door_open=False, config=_make_config())
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
- subject.handle_action(
- FailCommandAction(
- command_id="command-id",
- error_id="error-id",
- failed_at=datetime(year=2022, month=2, day=2),
- error=errors.ProtocolEngineError(message="oh no"),
- )
- )
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.SETUP,
- run_result=None,
- run_completed_at=None,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=["command-id"],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id={
- "command-id": CommandEntry(index=0, command=expected_failed_command),
- },
- run_error=None,
- finish_error=None,
- run_started_at=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-def test_handles_hardware_stopped() -> None:
- """It should mark the hardware as stopped on HardwareStoppedAction."""
- subject = CommandStore(is_door_open=False, config=_make_config())
- completed_at = datetime(year=2021, day=1, month=1)
- subject.handle_action(
- HardwareStoppedAction(completed_at=completed_at, finish_error_details=None)
- )
-
- assert subject.state == CommandState(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.STOPPED,
- run_completed_at=completed_at,
- is_door_blocking=False,
- running_command_id=None,
- all_command_ids=[],
- queued_command_ids=OrderedSet(),
- queued_setup_command_ids=OrderedSet(),
- commands_by_id=OrderedDict(),
- run_error=None,
- finish_error=None,
- run_started_at=None,
- latest_command_hash=None,
- stopped_by_estop=False,
- )
-
-
-@pytest.mark.parametrize(
- ("is_door_open", "config", "expected_queue_status"),
- [
- (False, _make_config(), QueueStatus.RUNNING),
- (True, _make_config(), QueueStatus.RUNNING),
- (False, _make_config(block_on_door_open=True), QueueStatus.RUNNING),
- (True, _make_config(block_on_door_open=True), QueueStatus.PAUSED),
- ],
-)
-def test_command_store_handles_play_according_to_initial_door_state(
- is_door_open: bool,
- config: Config,
- expected_queue_status: QueueStatus,
-) -> None:
- """It should set command queue state on play action according to door state."""
- subject = CommandStore(is_door_open=is_door_open, config=config)
- start_time = datetime(year=2021, month=1, day=1)
- subject.handle_action(PlayAction(requested_at=start_time))
-
- assert subject.state.queue_status == expected_queue_status
- assert subject.state.run_started_at == start_time
-
-
-@pytest.mark.parametrize(
- ("config", "expected_is_door_blocking"),
- [
- (_make_config(block_on_door_open=True), True),
- (_make_config(block_on_door_open=False), False),
- ],
-)
-def test_handles_door_open_and_close_event_before_play(
- config: Config, expected_is_door_blocking: bool
-) -> None:
- """It should update state but not pause on door open whenis setup."""
- subject = CommandStore(is_door_open=False, config=config)
-
- subject.handle_action(DoorChangeAction(door_state=DoorState.OPEN))
-
- assert subject.state.queue_status == QueueStatus.SETUP
- assert subject.state.is_door_blocking is expected_is_door_blocking
-
- subject.handle_action(DoorChangeAction(door_state=DoorState.CLOSED))
-
- assert subject.state.queue_status == QueueStatus.SETUP
- assert subject.state.is_door_blocking is False
-
-
-@pytest.mark.parametrize(
- ("config", "expected_queue_status", "expected_is_door_blocking"),
- [
- (_make_config(block_on_door_open=True), QueueStatus.PAUSED, True),
- (_make_config(block_on_door_open=False), QueueStatus.RUNNING, False),
- ],
-)
-def test_handles_door_open_and_close_event_after_play(
- config: Config, expected_queue_status: QueueStatus, expected_is_door_blocking: bool
-) -> None:
- """It should update state when door opened and closed after run is played."""
- subject = CommandStore(is_door_open=False, config=config)
-
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
- subject.handle_action(DoorChangeAction(door_state=DoorState.OPEN))
-
- assert subject.state.queue_status == expected_queue_status
- assert subject.state.is_door_blocking is expected_is_door_blocking
-
- subject.handle_action(DoorChangeAction(door_state=DoorState.CLOSED))
-
- assert subject.state.queue_status == expected_queue_status
- assert subject.state.is_door_blocking is False
diff --git a/api/tests/opentrons/protocol_engine/state/test_command_store_old.py b/api/tests/opentrons/protocol_engine/state/test_command_store_old.py
new file mode 100644
index 00000000000..7f376a0b019
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/state/test_command_store_old.py
@@ -0,0 +1,1070 @@
+"""Tests for CommandStore.
+
+DEPRECATED: Testing CommandStore independently of CommandView is no longer helpful.
+Add new tests to test_command_state.py, where they can be tested together.
+"""
+
+
+import pytest
+from datetime import datetime
+from typing import NamedTuple, Type
+
+from opentrons_shared_data.errors import ErrorCodes
+from opentrons_shared_data.pipette.dev_types import PipetteNameType
+
+from opentrons.ordered_set import OrderedSet
+from opentrons.protocol_engine.actions.actions import RunCommandAction
+from opentrons.types import MountType, DeckSlotName
+from opentrons.hardware_control.types import DoorState
+
+from opentrons.protocol_engine import commands, errors
+from opentrons.protocol_engine.types import DeckSlotLocation, DeckType, WellLocation
+from opentrons.protocol_engine.state import Config
+from opentrons.protocol_engine.state.commands import (
+ CommandState,
+ CommandStore,
+ RunResult,
+ QueueStatus,
+)
+from opentrons.protocol_engine.state.command_history import CommandEntry
+
+from opentrons.protocol_engine.actions import (
+ QueueCommandAction,
+ SucceedCommandAction,
+ PlayAction,
+ PauseAction,
+ PauseSource,
+ FinishAction,
+ FinishErrorDetails,
+ StopAction,
+ HardwareStoppedAction,
+ DoorChangeAction,
+)
+
+from opentrons.protocol_engine.state.command_history import CommandHistory
+
+from .command_fixtures import create_succeeded_command
+
+
+def _make_config(block_on_door_open: bool = False) -> Config:
+ return Config(
+ block_on_door_open=block_on_door_open,
+ # Choice of robot and deck type is arbitrary.
+ robot_type="OT-2 Standard",
+ deck_type=DeckType.OT2_STANDARD,
+ )
+
+
+@pytest.mark.parametrize(
+ ("is_door_open", "config", "expected_is_door_blocking"),
+ [
+ (False, _make_config(), False),
+ (True, _make_config(), False),
+ (False, _make_config(block_on_door_open=True), False),
+ (True, _make_config(block_on_door_open=True), True),
+ ],
+)
+def test_initial_state(
+ is_door_open: bool,
+ config: Config,
+ expected_is_door_blocking: bool,
+) -> None:
+ """It should set the initial state."""
+ subject = CommandStore(is_door_open=is_door_open, config=config)
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.SETUP,
+ run_completed_at=None,
+ run_started_at=None,
+ is_door_blocking=expected_is_door_blocking,
+ run_result=None,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+
+
+class QueueCommandSpec(NamedTuple):
+ """Test data for the QueueCommandAction."""
+
+ command_request: commands.CommandCreate
+ expected_cls: Type[commands.Command]
+ created_at: datetime = datetime(year=2021, month=1, day=1)
+ command_id: str = "command-id"
+ command_key: str = "command-key"
+
+
+@pytest.mark.parametrize(
+ QueueCommandSpec._fields,
+ [
+ QueueCommandSpec(
+ command_request=commands.AspirateCreate(
+ params=commands.AspirateParams(
+ pipetteId="pipette-id",
+ labwareId="labware-id",
+ wellName="well-name",
+ volume=42,
+ flowRate=1.23,
+ wellLocation=WellLocation(),
+ ),
+ key="command-key",
+ ),
+ expected_cls=commands.Aspirate,
+ ),
+ QueueCommandSpec(
+ command_request=commands.DispenseCreate(
+ params=commands.DispenseParams(
+ pipetteId="pipette-id",
+ labwareId="labware-id",
+ wellName="well-name",
+ volume=42,
+ flowRate=1.23,
+ wellLocation=WellLocation(),
+ ),
+ ),
+ expected_cls=commands.Dispense,
+ # test when key prop is missing
+ command_key="command-id",
+ ),
+ QueueCommandSpec(
+ command_request=commands.DropTipCreate(
+ params=commands.DropTipParams(
+ pipetteId="pipette-id",
+ labwareId="labware-id",
+ wellName="well-name",
+ ),
+ key="command-key",
+ ),
+ expected_cls=commands.DropTip,
+ ),
+ QueueCommandSpec(
+ command_request=commands.LoadLabwareCreate(
+ params=commands.LoadLabwareParams(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ loadName="load-name",
+ namespace="namespace",
+ version=42,
+ ),
+ key="command-key",
+ ),
+ expected_cls=commands.LoadLabware,
+ ),
+ QueueCommandSpec(
+ command_request=commands.LoadPipetteCreate(
+ params=commands.LoadPipetteParams(
+ mount=MountType.LEFT,
+ pipetteName=PipetteNameType.P300_SINGLE,
+ ),
+ key="command-key",
+ ),
+ expected_cls=commands.LoadPipette,
+ ),
+ QueueCommandSpec(
+ command_request=commands.PickUpTipCreate(
+ params=commands.PickUpTipParams(
+ pipetteId="pipette-id",
+ labwareId="labware-id",
+ wellName="well-name",
+ ),
+ key="command-key",
+ ),
+ expected_cls=commands.PickUpTip,
+ ),
+ QueueCommandSpec(
+ command_request=commands.MoveToWellCreate(
+ params=commands.MoveToWellParams(
+ pipetteId="pipette-id",
+ labwareId="labware-id",
+ wellName="well-name",
+ ),
+ key="command-key",
+ ),
+ expected_cls=commands.MoveToWell,
+ ),
+ QueueCommandSpec(
+ command_request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(message="hello world"),
+ key="command-key",
+ ),
+ expected_cls=commands.WaitForResume,
+ ),
+ QueueCommandSpec(
+ # a WaitForResumeCreate with `pause` should be mapped to
+ # a WaitForResume with `commandType="waitForResume"`
+ command_request=commands.WaitForResumeCreate(
+ commandType="pause",
+ params=commands.WaitForResumeParams(message="hello world"),
+ key="command-key",
+ ),
+ expected_cls=commands.WaitForResume,
+ ),
+ ],
+)
+def test_command_store_queues_commands(
+ command_request: commands.CommandCreate,
+ expected_cls: Type[commands.Command],
+ created_at: datetime,
+ command_id: str,
+ command_key: str,
+) -> None:
+ """It should add a command to the store."""
+ action = QueueCommandAction(
+ request=command_request,
+ request_hash=None,
+ created_at=created_at,
+ command_id=command_id,
+ )
+ expected_command = expected_cls(
+ id=command_id,
+ key=command_key,
+ createdAt=created_at,
+ status=commands.CommandStatus.QUEUED,
+ params=command_request.params, # type: ignore[arg-type]
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject.handle_action(action)
+
+ assert subject.state.command_history.get("command-id") == CommandEntry(
+ index=0, command=expected_command
+ )
+ assert subject.state.command_history.get_all_ids() == ["command-id"]
+ assert subject.state.command_history.get_queue_ids() == OrderedSet(["command-id"])
+
+
+def test_command_queue_with_hash() -> None:
+ """It should queue a command with a command hash and no explicit key."""
+ create = commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(message="hello world"),
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject.handle_action(
+ QueueCommandAction(
+ request=create,
+ request_hash="abc123",
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ )
+
+ assert subject.state.command_history.get("command-id-1").command.key == "abc123"
+ assert subject.state.latest_protocol_command_hash == "abc123"
+
+ subject.handle_action(
+ QueueCommandAction(
+ request=create,
+ request_hash="def456",
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-2",
+ )
+ )
+
+ assert subject.state.latest_protocol_command_hash == "def456"
+
+
+def test_command_queue_and_unqueue() -> None:
+ """It should queue on QueueCommandAction and dequeue on RunCommandAction."""
+ queue_1 = QueueCommandAction(
+ request=commands.WaitForResumeCreate(params=commands.WaitForResumeParams()),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ queue_2 = QueueCommandAction(
+ request=commands.WaitForResumeCreate(params=commands.WaitForResumeParams()),
+ request_hash=None,
+ created_at=datetime(year=2022, month=2, day=2),
+ command_id="command-id-2",
+ )
+ run_1 = RunCommandAction(
+ command_id="command-id-1",
+ started_at=datetime(year=2021, month=1, day=1),
+ )
+ run_2 = RunCommandAction(
+ command_id="command-id-2",
+ started_at=datetime(year=2022, month=2, day=2),
+ )
+ succeed_2 = SucceedCommandAction(
+ private_result=None,
+ command=create_succeeded_command(command_id="command-id-2"),
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(queue_1)
+ assert subject.state.command_history.get_queue_ids() == OrderedSet(["command-id-1"])
+
+ subject.handle_action(queue_2)
+ assert subject.state.command_history.get_queue_ids() == OrderedSet(
+ ["command-id-1", "command-id-2"]
+ )
+
+ subject.handle_action(run_2)
+ assert subject.state.command_history.get_queue_ids() == OrderedSet(["command-id-1"])
+
+ subject.handle_action(succeed_2)
+ subject.handle_action(run_1)
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+
+
+def test_setup_command_queue_and_unqueue() -> None:
+ """It should queue and dequeue on setup commands."""
+ queue_1 = QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(),
+ intent=commands.CommandIntent.SETUP,
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ queue_2 = QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(),
+ intent=commands.CommandIntent.SETUP,
+ ),
+ request_hash=None,
+ created_at=datetime(year=2022, month=2, day=2),
+ command_id="command-id-2",
+ )
+ run_1 = RunCommandAction(
+ command_id="command-id-1", started_at=datetime(year=2021, month=1, day=1)
+ )
+ run_2 = RunCommandAction(
+ command_id="command-id-2", started_at=datetime(year=2022, month=2, day=2)
+ )
+ succeed_2 = SucceedCommandAction(
+ private_result=None,
+ command=create_succeeded_command(command_id="command-id-2"),
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(queue_1)
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet(
+ ["command-id-1"]
+ )
+
+ subject.handle_action(queue_2)
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet(
+ ["command-id-1", "command-id-2"]
+ )
+
+ subject.handle_action(run_2)
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet(
+ ["command-id-1"]
+ )
+
+ subject.handle_action(succeed_2)
+ subject.handle_action(run_1)
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_setup_queue_action_updates_command_intent() -> None:
+ """It should update command source correctly."""
+ queue_cmd = QueueCommandAction(
+ request=commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams(),
+ intent=commands.CommandIntent.SETUP,
+ key="command-key-1",
+ ),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+
+ expected_pause_cmd = commands.WaitForResume(
+ id="command-id-1",
+ key="command-key-1",
+ createdAt=datetime(year=2021, month=1, day=1),
+ params=commands.WaitForResumeParams(),
+ status=commands.CommandStatus.QUEUED,
+ intent=commands.CommandIntent.SETUP,
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(queue_cmd)
+ assert subject.state.command_history.get("command-id-1") == CommandEntry(
+ index=0, command=expected_pause_cmd
+ )
+
+
+def test_running_command_id() -> None:
+ """It should update the running command ID through a command's lifecycle."""
+ queue = QueueCommandAction(
+ request=commands.WaitForResumeCreate(params=commands.WaitForResumeParams()),
+ request_hash=None,
+ created_at=datetime(year=2021, month=1, day=1),
+ command_id="command-id-1",
+ )
+ run = RunCommandAction(
+ command_id="command-id-1",
+ started_at=datetime(year=2021, month=1, day=1),
+ )
+ succeed = SucceedCommandAction(
+ private_result=None,
+ command=create_succeeded_command(command_id="command-id-1"),
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(queue)
+ assert subject.state.command_history.get_running_command() is None
+
+ subject.handle_action(run)
+ running_command = subject.state.command_history.get_running_command()
+ assert running_command is not None
+ assert running_command.command.id == "command-id-1"
+
+ subject.handle_action(succeed)
+ assert subject.state.command_history.get_running_command() is None
+
+
+def test_command_store_keeps_commands_in_queue_order() -> None:
+ """It should keep commands in the order they were originally enqueued."""
+ command_create_1_non_setup = commands.CommentCreate(
+ params=commands.CommentParams(message="hello world"),
+ )
+ command_create_2_setup = commands.CommentCreate(
+ params=commands.CommentParams(message="hello world"),
+ intent=commands.CommandIntent.SETUP,
+ )
+ command_create_3_non_setup = commands.CommentCreate(
+ params=commands.CommentParams(message="hello world"),
+ )
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ QueueCommandAction(
+ "command-id-1",
+ created_at=datetime(year=2021, month=1, day=1),
+ request=command_create_1_non_setup,
+ request_hash=None,
+ )
+ )
+ assert subject.state.command_history.get_all_ids() == ["command-id-1"]
+
+ subject.handle_action(
+ QueueCommandAction(
+ "command-id-2",
+ created_at=datetime(year=2021, month=1, day=1),
+ request=command_create_2_setup,
+ request_hash=None,
+ )
+ )
+ assert subject.state.command_history.get_all_ids() == [
+ "command-id-1",
+ "command-id-2",
+ ]
+
+ subject.handle_action(
+ QueueCommandAction(
+ "command-id-3",
+ created_at=datetime(year=2021, month=1, day=1),
+ request=command_create_3_non_setup,
+ request_hash=None,
+ )
+ )
+ assert subject.state.command_history.get_all_ids() == [
+ "command-id-1",
+ "command-id-2",
+ "command-id-3",
+ ]
+
+ # Running and completing commands shouldn't affect the command order.
+ subject.handle_action(
+ RunCommandAction(
+ command_id="command-id-2", started_at=datetime(year=2021, month=1, day=1)
+ )
+ )
+ subject.handle_action(
+ SucceedCommandAction(
+ command=create_succeeded_command(
+ command_id="command-id-2",
+ ),
+ private_result=None,
+ )
+ )
+ assert subject.state.command_history.get_all_ids() == [
+ "command-id-1",
+ "command-id-2",
+ "command-id-3",
+ ]
+
+
+@pytest.mark.parametrize("pause_source", PauseSource)
+def test_command_store_handles_pause_action(pause_source: PauseSource) -> None:
+ """It should clear the running flag on pause."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject.handle_action(PauseAction(source=pause_source))
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=None,
+ run_completed_at=None,
+ run_started_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+
+
+@pytest.mark.parametrize("pause_source", PauseSource)
+def test_command_store_handles_play_action(pause_source: PauseSource) -> None:
+ """It should set the running flag on play."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.RUNNING,
+ run_result=None,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=datetime(year=2021, month=1, day=1),
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_handles_finish_action() -> None:
+ """It should change to a succeeded state with FinishAction."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+ subject.handle_action(FinishAction())
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.SUCCEEDED,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=datetime(year=2021, month=1, day=1),
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_handles_finish_action_with_stopped() -> None:
+ """It should change to a stopped state if FinishAction has set_run_status=False."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+ subject.handle_action(FinishAction(set_run_status=False))
+
+ assert subject.state.run_result == RunResult.STOPPED
+
+
+@pytest.mark.parametrize(
+ ["from_estop", "expected_run_result"],
+ [(True, RunResult.FAILED), (False, RunResult.STOPPED)],
+)
+def test_command_store_handles_stop_action(
+ from_estop: bool, expected_run_result: RunResult
+) -> None:
+ """It should mark the engine as non-gracefully stopped on StopAction."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+ subject.handle_action(StopAction(from_estop=from_estop))
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=expected_run_result,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=datetime(year=2021, month=1, day=1),
+ latest_protocol_command_hash=None,
+ stopped_by_estop=from_estop,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_handles_stop_action_when_awaiting_recovery() -> None:
+ """It should mark the engine as non-gracefully stopped on StopAction."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+
+ subject.state.queue_status = QueueStatus.AWAITING_RECOVERY
+
+ subject.handle_action(StopAction())
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.STOPPED,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=datetime(year=2021, month=1, day=1),
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_cannot_restart_after_should_stop() -> None:
+ """It should reject a play action after finish."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ subject.handle_action(FinishAction())
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.SUCCEEDED,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=None,
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_save_started_completed_run_timestamp() -> None:
+ """It should save started and completed timestamps."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+ start_time = datetime(year=2021, month=1, day=1)
+ hardware_stopped_time = datetime(year=2022, month=2, day=2)
+
+ subject.handle_action(PlayAction(requested_at=start_time, deck_configuration=[]))
+ subject.handle_action(
+ HardwareStoppedAction(
+ completed_at=hardware_stopped_time, finish_error_details=None
+ )
+ )
+
+ assert subject.state.run_started_at == start_time
+ assert subject.state.run_completed_at == hardware_stopped_time
+
+
+def test_timestamps_are_latched() -> None:
+ """It should not change startedAt or completedAt once set."""
+ subject = CommandStore(config=_make_config(), is_door_open=False)
+
+ play_time_1 = datetime(year=2021, month=1, day=1)
+ play_time_2 = datetime(year=2022, month=2, day=2)
+ stop_time_1 = datetime(year=2023, month=3, day=3)
+ stop_time_2 = datetime(year=2024, month=4, day=4)
+
+ subject.handle_action(PlayAction(requested_at=play_time_1, deck_configuration=[]))
+ subject.handle_action(PauseAction(source=PauseSource.CLIENT))
+ subject.handle_action(PlayAction(requested_at=play_time_2, deck_configuration=[]))
+ subject.handle_action(
+ HardwareStoppedAction(completed_at=stop_time_1, finish_error_details=None)
+ )
+ subject.handle_action(
+ HardwareStoppedAction(completed_at=stop_time_2, finish_error_details=None)
+ )
+
+ assert subject.state.run_started_at == play_time_1
+ assert subject.state.run_completed_at == stop_time_1
+
+
+def test_command_store_wraps_unknown_errors() -> None:
+ """Fatal errors that are unknown should be wrapped in EnumeratedErrors.
+
+ Fatal errors can come in through FinishActions and HardwareStoppedActions.
+ If these are not descendants of EnumeratedError already, they should be
+ wrapped in an EnumeratedError before being converted to an ErrorOccurrence.
+
+ The wrapping EnumeratedError should be an UnexpectedProtocolError for errors that happened
+ in the main part of the protocol run, or a PythonException for errors that happened elsewhere.
+ """
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ FinishAction(
+ error_details=FinishErrorDetails(
+ error=RuntimeError("oh no"),
+ error_id="error-id-1",
+ created_at=datetime(year=2021, month=1, day=1),
+ )
+ )
+ )
+
+ subject.handle_action(
+ HardwareStoppedAction(
+ completed_at=datetime(year=2022, month=2, day=2),
+ finish_error_details=FinishErrorDetails(
+ error=RuntimeError("yikes"),
+ error_id="error-id-2",
+ created_at=datetime(year=2023, month=3, day=3),
+ ),
+ )
+ )
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.FAILED,
+ run_completed_at=datetime(year=2022, month=2, day=2),
+ is_door_blocking=False,
+ run_error=errors.ErrorOccurrence(
+ id="error-id-1",
+ createdAt=datetime(year=2021, month=1, day=1),
+ # This is wrapped into an UnexpectedProtocolError because it's not
+ # enumerated, and it happened in the main part of the run.
+ errorType="UnexpectedProtocolError",
+ # Unknown errors use the default error code.
+ errorCode=ErrorCodes.GENERAL_ERROR.value.code,
+ # And it has information about what created it.
+ detail="oh no",
+ wrappedErrors=[
+ errors.ErrorOccurrence(
+ id="error-id-1",
+ createdAt=datetime(year=2021, month=1, day=1),
+ errorType="PythonException",
+ detail="RuntimeError: oh no",
+ errorCode="4000",
+ errorInfo={
+ "class": "RuntimeError",
+ "args": "('oh no',)",
+ },
+ wrappedErrors=[],
+ )
+ ],
+ ),
+ finish_error=errors.ErrorOccurrence(
+ id="error-id-2",
+ createdAt=datetime(year=2023, month=3, day=3),
+ # This is wrapped into a PythonException because it's not
+ # enumerated, and it happened during the post-run cleanup steps.
+ errorType="PythonException",
+ # Unknown errors use the default error code.
+ errorCode=ErrorCodes.GENERAL_ERROR.value.code,
+ # And it has information about what created it.
+ detail="RuntimeError: yikes",
+ errorInfo={
+ "class": "RuntimeError",
+ "args": "('yikes',)",
+ },
+ ),
+ run_started_at=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_preserves_enumerated_errors() -> None:
+ """If an error is derived from EnumeratedError, it should be stored as-is."""
+
+ class MyCustomError(errors.ProtocolEngineError):
+ def __init__(self, message: str) -> None:
+ super().__init__(ErrorCodes.PIPETTE_NOT_PRESENT, message)
+
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ FinishAction(
+ error_details=FinishErrorDetails(
+ error=MyCustomError(message="oh no"),
+ error_id="error-id-1",
+ created_at=datetime(year=2021, month=1, day=1),
+ )
+ )
+ )
+
+ subject.handle_action(
+ HardwareStoppedAction(
+ completed_at=datetime(year=2022, month=2, day=2),
+ finish_error_details=FinishErrorDetails(
+ error=MyCustomError(message="yikes"),
+ error_id="error-id-2",
+ created_at=datetime(year=2023, month=3, day=3),
+ ),
+ )
+ )
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.FAILED,
+ run_completed_at=datetime(year=2022, month=2, day=2),
+ is_door_blocking=False,
+ run_error=errors.ErrorOccurrence(
+ id="error-id-1",
+ createdAt=datetime(year=2021, month=1, day=1),
+ errorType="MyCustomError",
+ detail="oh no",
+ errorCode=ErrorCodes.PIPETTE_NOT_PRESENT.value.code,
+ ),
+ finish_error=errors.ErrorOccurrence(
+ id="error-id-2",
+ createdAt=datetime(year=2023, month=3, day=3),
+ errorType="MyCustomError",
+ detail="yikes",
+ errorCode=ErrorCodes.PIPETTE_NOT_PRESENT.value.code,
+ ),
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=None,
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_ignores_stop_after_graceful_finish() -> None:
+ """It should no-op on stop if already gracefully finished."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+ subject.handle_action(FinishAction())
+ subject.handle_action(StopAction())
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.SUCCEEDED,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=datetime(year=2021, month=1, day=1),
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_command_store_ignores_finish_after_non_graceful_stop() -> None:
+ """It should no-op on finish if already ungracefully stopped."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+ subject.handle_action(StopAction())
+ subject.handle_action(FinishAction())
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.STOPPED,
+ run_completed_at=None,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=datetime(year=2021, month=1, day=1),
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+def test_handles_hardware_stopped() -> None:
+ """It should mark the hardware as stopped on HardwareStoppedAction."""
+ subject = CommandStore(is_door_open=False, config=_make_config())
+ completed_at = datetime(year=2021, day=1, month=1)
+ subject.handle_action(
+ HardwareStoppedAction(completed_at=completed_at, finish_error_details=None)
+ )
+
+ assert subject.state == CommandState(
+ command_history=CommandHistory(),
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.STOPPED,
+ run_completed_at=completed_at,
+ is_door_blocking=False,
+ run_error=None,
+ finish_error=None,
+ failed_command=None,
+ command_error_recovery_types={},
+ recovery_target_command_id=None,
+ run_started_at=None,
+ latest_protocol_command_hash=None,
+ stopped_by_estop=False,
+ )
+ assert subject.state.command_history.get_running_command() is None
+ assert subject.state.command_history.get_all_ids() == []
+ assert subject.state.command_history.get_queue_ids() == OrderedSet()
+ assert subject.state.command_history.get_setup_queue_ids() == OrderedSet()
+
+
+@pytest.mark.parametrize(
+ ("is_door_open", "config", "expected_queue_status"),
+ [
+ (False, _make_config(), QueueStatus.RUNNING),
+ (True, _make_config(), QueueStatus.RUNNING),
+ (False, _make_config(block_on_door_open=True), QueueStatus.RUNNING),
+ (True, _make_config(block_on_door_open=True), QueueStatus.PAUSED),
+ ],
+)
+def test_command_store_handles_play_according_to_initial_door_state(
+ is_door_open: bool,
+ config: Config,
+ expected_queue_status: QueueStatus,
+) -> None:
+ """It should set command queue state on play action according to door state."""
+ subject = CommandStore(is_door_open=is_door_open, config=config)
+ start_time = datetime(year=2021, month=1, day=1)
+ subject.handle_action(PlayAction(requested_at=start_time, deck_configuration=[]))
+
+ assert subject.state.queue_status == expected_queue_status
+ assert subject.state.run_started_at == start_time
+
+
+@pytest.mark.parametrize(
+ ("config", "expected_is_door_blocking"),
+ [
+ (_make_config(block_on_door_open=True), True),
+ (_make_config(block_on_door_open=False), False),
+ ],
+)
+def test_handles_door_open_and_close_event_before_play(
+ config: Config, expected_is_door_blocking: bool
+) -> None:
+ """It should update state but not pause on door open whenis setup."""
+ subject = CommandStore(is_door_open=False, config=config)
+
+ subject.handle_action(DoorChangeAction(door_state=DoorState.OPEN))
+
+ assert subject.state.queue_status == QueueStatus.SETUP
+ assert subject.state.is_door_blocking is expected_is_door_blocking
+
+ subject.handle_action(DoorChangeAction(door_state=DoorState.CLOSED))
+
+ assert subject.state.queue_status == QueueStatus.SETUP
+ assert subject.state.is_door_blocking is False
+
+
+@pytest.mark.parametrize(
+ ("config", "expected_queue_status", "expected_is_door_blocking"),
+ [
+ (_make_config(block_on_door_open=True), QueueStatus.PAUSED, True),
+ (_make_config(block_on_door_open=False), QueueStatus.RUNNING, False),
+ ],
+)
+def test_handles_door_open_and_close_event_after_play(
+ config: Config, expected_queue_status: QueueStatus, expected_is_door_blocking: bool
+) -> None:
+ """It should update state when door opened and closed after run is played."""
+ subject = CommandStore(is_door_open=False, config=config)
+
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
+ subject.handle_action(DoorChangeAction(door_state=DoorState.OPEN))
+
+ assert subject.state.queue_status == expected_queue_status
+ assert subject.state.is_door_blocking is expected_is_door_blocking
+
+ subject.handle_action(DoorChangeAction(door_state=DoorState.CLOSED))
+
+ assert subject.state.queue_status == expected_queue_status
+ assert subject.state.is_door_blocking is False
diff --git a/api/tests/opentrons/protocol_engine/state/test_command_view.py b/api/tests/opentrons/protocol_engine/state/test_command_view.py
deleted file mode 100644
index c52180996f1..00000000000
--- a/api/tests/opentrons/protocol_engine/state/test_command_view.py
+++ /dev/null
@@ -1,834 +0,0 @@
-"""Labware state store tests."""
-import pytest
-from contextlib import nullcontext as does_not_raise
-from datetime import datetime
-from typing import List, NamedTuple, Optional, Sequence, Type, Union
-
-from opentrons.ordered_set import OrderedSet
-
-from opentrons.protocol_engine import EngineStatus, commands as cmd, errors
-from opentrons.protocol_engine.actions import (
- PlayAction,
- PauseAction,
- PauseSource,
- StopAction,
- QueueCommandAction,
-)
-
-from opentrons.protocol_engine.state.commands import (
- CommandState,
- CommandView,
- CommandSlice,
- CommandEntry,
- CurrentCommand,
- RunResult,
- QueueStatus,
-)
-from opentrons.protocol_engine.errors import ProtocolCommandFailedError
-
-from .command_fixtures import (
- create_queued_command,
- create_running_command,
- create_failed_command,
- create_succeeded_command,
-)
-
-
-def get_command_view(
- queue_status: QueueStatus = QueueStatus.SETUP,
- run_completed_at: Optional[datetime] = None,
- run_started_at: Optional[datetime] = None,
- is_door_blocking: bool = False,
- run_result: Optional[RunResult] = None,
- running_command_id: Optional[str] = None,
- queued_command_ids: Sequence[str] = (),
- queued_setup_command_ids: Sequence[str] = (),
- run_error: Optional[errors.ErrorOccurrence] = None,
- finish_error: Optional[errors.ErrorOccurrence] = None,
- commands: Sequence[cmd.Command] = (),
- latest_command_hash: Optional[str] = None,
-) -> CommandView:
- """Get a command view test subject."""
- all_command_ids = [command.id for command in commands]
- commands_by_id = {
- command.id: CommandEntry(index=index, command=command)
- for index, command in enumerate(commands)
- }
-
- state = CommandState(
- queue_status=queue_status,
- run_completed_at=run_completed_at,
- is_door_blocking=is_door_blocking,
- run_result=run_result,
- running_command_id=running_command_id,
- queued_command_ids=OrderedSet(queued_command_ids),
- queued_setup_command_ids=OrderedSet(queued_setup_command_ids),
- run_error=run_error,
- finish_error=finish_error,
- all_command_ids=all_command_ids,
- commands_by_id=commands_by_id,
- run_started_at=run_started_at,
- latest_command_hash=latest_command_hash,
- stopped_by_estop=False,
- )
-
- return CommandView(state=state)
-
-
-def test_get_by_id() -> None:
- """It should get a command by ID from state."""
- command = create_succeeded_command(command_id="command-id")
- subject = get_command_view(commands=[command])
-
- assert subject.get("command-id") == command
-
-
-def test_get_command_bad_id() -> None:
- """It should raise if a requested command ID isn't in state."""
- command = create_succeeded_command(command_id="command-id")
- subject = get_command_view(commands=[command])
-
- with pytest.raises(errors.CommandDoesNotExistError):
- subject.get("asdfghjkl")
-
-
-def test_get_all() -> None:
- """It should get all the commands from the state."""
- command_1 = create_succeeded_command(command_id="command-id-1")
- command_2 = create_running_command(command_id="command-id-2")
- command_3 = create_queued_command(command_id="command-id-3")
-
- subject = get_command_view(commands=[command_1, command_2, command_3])
-
- assert subject.get_all() == [command_1, command_2, command_3]
-
-
-def test_get_next_to_execute_returns_first_queued() -> None:
- """It should return the next queued command ID."""
- subject = get_command_view(
- queue_status=QueueStatus.RUNNING,
- queued_command_ids=["command-id-1", "command-id-2"],
- )
-
- assert subject.get_next_to_execute() == "command-id-1"
-
-
-@pytest.mark.parametrize(
- "queue_status",
- [QueueStatus.SETUP, QueueStatus.RUNNING],
-)
-def test_get_next_to_execute_prioritizes_setup_command_queue(
- queue_status: QueueStatus,
-) -> None:
- """It should prioritize setup command queue over protocol command queue."""
- subject = get_command_view(
- queue_status=queue_status,
- queued_command_ids=["command-id-1", "command-id-2"],
- queued_setup_command_ids=["setup-command-id"],
- )
-
- assert subject.get_next_to_execute() == "setup-command-id"
-
-
-def test_get_next_to_execute_returns_none_when_no_queued() -> None:
- """It should return None if there are no queued commands."""
- subject = get_command_view(
- queue_status=QueueStatus.RUNNING,
- queued_command_ids=[],
- )
-
- assert subject.get_next_to_execute() is None
-
-
-@pytest.mark.parametrize("queue_status", [QueueStatus.SETUP, QueueStatus.PAUSED])
-def test_get_next_to_execute_returns_none_if_not_running(
- queue_status: QueueStatus,
-) -> None:
- """It should not return protocol commands if the engine is not running."""
- subject = get_command_view(
- queue_status=queue_status,
- queued_setup_command_ids=[],
- queued_command_ids=["command-id-1", "command-id-2"],
- )
- result = subject.get_next_to_execute()
-
- assert result is None
-
-
-def test_get_next_to_execute_returns_no_commands_if_paused() -> None:
- """It should not return any type of command if the engine is paused."""
- subject = get_command_view(
- queue_status=QueueStatus.PAUSED,
- queued_setup_command_ids=["setup-id-1", "setup-id-2"],
- queued_command_ids=["command-id-1", "command-id-2"],
- )
- result = subject.get_next_to_execute()
-
- assert result is None
-
-
-@pytest.mark.parametrize("run_result", RunResult)
-def test_get_next_to_execute_raises_if_stopped(run_result: RunResult) -> None:
- """It should raise if an engine stop has been requested."""
- subject = get_command_view(run_result=run_result)
-
- with pytest.raises(errors.RunStoppedError):
- subject.get_next_to_execute()
-
-
-def test_get_is_running_queue() -> None:
- """It should be able to get if the engine is running."""
- subject = get_command_view(queue_status=QueueStatus.PAUSED)
- assert subject.get_is_running() is False
-
- subject = get_command_view(queue_status=QueueStatus.RUNNING)
- assert subject.get_is_running() is True
-
- subject = get_command_view(queue_status=QueueStatus.SETUP)
- assert subject.get_is_running() is False
-
-
-def test_get_command_is_final() -> None:
- """It should be able to tell if a command is complete."""
- completed_command = create_succeeded_command(command_id="completed-command-id")
- failed_command = create_failed_command(command_id="failed-command-id")
- running_command = create_running_command(command_id="running-command-id")
- pending_command = create_queued_command(command_id="queued-command-id")
-
- subject = get_command_view(
- commands=[completed_command, failed_command, running_command, pending_command]
- )
-
- assert subject.get_command_is_final("completed-command-id") is True
- assert subject.get_command_is_final("failed-command-id") is True
- assert subject.get_command_is_final("running-command-id") is False
- assert subject.get_command_is_final("queued-command-id") is False
-
-
-@pytest.mark.parametrize("run_result", RunResult)
-def test_get_command_is_final_when_run_has_result(run_result: RunResult) -> None:
- """Queued commands are final when the run will never execute any more commands."""
- completed_command = create_succeeded_command(command_id="completed-command-id")
- failed_command = create_failed_command(command_id="failed-command-id")
- running_command = create_running_command(command_id="running-command-id")
- pending_command = create_queued_command(command_id="queued-command-id")
-
- subject = get_command_view(
- commands=[completed_command, failed_command, running_command, pending_command],
- run_result=run_result,
- )
-
- assert subject.get_command_is_final("completed-command-id") is True
- assert subject.get_command_is_final("failed-command-id") is True
- assert subject.get_command_is_final("running-command-id") is False
- assert subject.get_command_is_final("queued-command-id") is True
-
-
-def test_get_all_commands_final() -> None:
- """It should return True if no commands queued or running."""
- subject = get_command_view(queued_command_ids=[])
- assert subject.get_all_commands_final() is True
-
- subject = get_command_view(queued_command_ids=["queued-command-id"])
- assert subject.get_all_commands_final() is False
-
- subject = get_command_view(
- queued_command_ids=[], running_command_id="running-command-id"
- )
- assert subject.get_all_commands_final() is False
-
-
-def test_get_all_complete_fatal_command_failure() -> None:
- """It should raise an error if any protocol commands failed."""
- completed_command = create_succeeded_command(command_id="command-id-1")
- failed_command = create_failed_command(
- command_id="command-id-2",
- error=errors.ErrorOccurrence(
- id="some-error-id",
- errorType="PrettyBadError",
- createdAt=datetime(year=2021, month=1, day=1),
- detail="Oh no",
- errorCode="4321",
- ),
- )
-
- subject = get_command_view(
- queued_command_ids=[],
- running_command_id=None,
- commands=[completed_command, failed_command],
- )
-
- with pytest.raises(ProtocolCommandFailedError):
- subject.get_all_commands_final()
-
-
-def test_get_all_complete_setup_not_fatal() -> None:
- """It should not call setup command fatal."""
- completed_command = create_succeeded_command(command_id="command-id-1")
- failed_command = create_failed_command(
- command_id="command-id-2",
- intent=cmd.CommandIntent.SETUP,
- error=errors.ErrorOccurrence(
- id="some-error-id",
- errorType="PrettyBadError",
- createdAt=datetime(year=2021, month=1, day=1),
- detail="Oh no",
- errorCode="4321",
- ),
- )
-
- subject = get_command_view(
- queued_command_ids=[],
- running_command_id=None,
- commands=[completed_command, failed_command],
- )
-
- result = subject.get_all_commands_final()
- assert result is True
-
-
-def test_get_is_stopped() -> None:
- """It should return true if stop requested and no command running."""
- subject = get_command_view(run_completed_at=None)
- assert subject.get_is_stopped() is False
-
- subject = get_command_view(run_completed_at=datetime(year=2021, day=1, month=1))
- assert subject.get_is_stopped() is True
-
-
-def test_get_is_started() -> None:
- """It should return true if start requested and no command running."""
- subject = get_command_view(run_started_at=None)
- assert subject.has_been_played() is False
-
- subject = get_command_view(run_started_at=datetime(year=2021, day=1, month=1))
- assert subject.has_been_played() is True
-
-
-def test_get_is_terminal() -> None:
- """It should return true if run is in a terminal state."""
- subject = get_command_view(run_result=None)
- assert subject.get_is_terminal() is False
-
- subject = get_command_view(run_result=RunResult.SUCCEEDED)
- assert subject.get_is_terminal() is True
-
-
-class ActionAllowedSpec(NamedTuple):
- """Spec data to test CommandView.validate_action_allowed."""
-
- subject: CommandView
- action: Union[PlayAction, PauseAction, StopAction, QueueCommandAction]
- expected_error: Optional[Type[errors.ProtocolEngineError]]
-
-
-action_allowed_specs: List[ActionAllowedSpec] = [
- # play is allowed if the engine is idle
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.SETUP),
- action=PlayAction(requested_at=datetime(year=2021, month=1, day=1)),
- expected_error=None,
- ),
- # play is allowed if engine is idle, even if door is blocking
- ActionAllowedSpec(
- subject=get_command_view(is_door_blocking=True, queue_status=QueueStatus.SETUP),
- action=PlayAction(requested_at=datetime(year=2021, month=1, day=1)),
- expected_error=None,
- ),
- # play is allowed if the engine is paused
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.PAUSED),
- action=PlayAction(requested_at=datetime(year=2021, month=1, day=1)),
- expected_error=None,
- ),
- # pause is allowed if the engine is running
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.RUNNING),
- action=PauseAction(source=PauseSource.CLIENT),
- expected_error=None,
- ),
- # stop is usually allowed
- ActionAllowedSpec(
- subject=get_command_view(),
- action=StopAction(),
- expected_error=None,
- ),
- # queue command is allowed during setup
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.SETUP),
- action=QueueCommandAction(
- request=cmd.HomeCreate(params=cmd.HomeParams()),
- request_hash=None,
- command_id="command-id",
- created_at=datetime(year=2021, month=1, day=1),
- ),
- expected_error=None,
- ),
- # play is disallowed if paused and door is blocking
- ActionAllowedSpec(
- subject=get_command_view(
- is_door_blocking=True, queue_status=QueueStatus.PAUSED
- ),
- action=PlayAction(requested_at=datetime(year=2021, month=1, day=1)),
- expected_error=errors.RobotDoorOpenError,
- ),
- # play is disallowed if stop has been requested
- ActionAllowedSpec(
- subject=get_command_view(run_result=RunResult.STOPPED),
- action=PlayAction(requested_at=datetime(year=2021, month=1, day=1)),
- expected_error=errors.RunStoppedError,
- ),
- # pause is disallowed if stop has been requested
- ActionAllowedSpec(
- subject=get_command_view(run_result=RunResult.STOPPED),
- action=PauseAction(source=PauseSource.CLIENT),
- expected_error=errors.RunStoppedError,
- ),
- # pause is disallowed if engine is not running
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.SETUP),
- action=PauseAction(source=PauseSource.CLIENT),
- expected_error=errors.PauseNotAllowedError,
- ),
- # pause is disallowed if engine is already paused
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.PAUSED),
- action=PauseAction(source=PauseSource.CLIENT),
- expected_error=errors.PauseNotAllowedError,
- ),
- # stop is disallowed if stop has already been requested
- ActionAllowedSpec(
- subject=get_command_view(run_result=RunResult.STOPPED),
- action=StopAction(),
- expected_error=errors.RunStoppedError,
- ),
- # queue command action is disallowed if stop has already been requested
- ActionAllowedSpec(
- subject=get_command_view(run_result=RunResult.STOPPED),
- action=QueueCommandAction(
- request=cmd.HomeCreate(params=cmd.HomeParams()),
- request_hash=None,
- command_id="command-id",
- created_at=datetime(year=2021, month=1, day=1),
- ),
- expected_error=errors.RunStoppedError,
- ),
- # queue setup command is disallowed if paused
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.PAUSED),
- action=QueueCommandAction(
- request=cmd.HomeCreate(
- params=cmd.HomeParams(),
- intent=cmd.CommandIntent.SETUP,
- ),
- request_hash=None,
- command_id="command-id",
- created_at=datetime(year=2021, month=1, day=1),
- ),
- expected_error=errors.SetupCommandNotAllowedError,
- ),
- # queue setup command is disallowed if running
- ActionAllowedSpec(
- subject=get_command_view(queue_status=QueueStatus.RUNNING),
- action=QueueCommandAction(
- request=cmd.HomeCreate(
- params=cmd.HomeParams(),
- intent=cmd.CommandIntent.SETUP,
- ),
- request_hash=None,
- command_id="command-id",
- created_at=datetime(year=2021, month=1, day=1),
- ),
- expected_error=errors.SetupCommandNotAllowedError,
- ),
-]
-
-
-@pytest.mark.parametrize(ActionAllowedSpec._fields, action_allowed_specs)
-def test_validate_action_allowed(
- subject: CommandView,
- action: Union[PlayAction, PauseAction, StopAction],
- expected_error: Optional[Type[errors.ProtocolEngineError]],
-) -> None:
- """It should validate allowed play/pause/stop actions."""
- expectation = pytest.raises(expected_error) if expected_error else does_not_raise()
-
- with expectation: # type: ignore[attr-defined]
- result = subject.validate_action_allowed(action)
-
- if expected_error is None:
- assert result == action
-
-
-def test_get_errors() -> None:
- """It should be able to pull all ErrorOccurrences from the store."""
- run_error = errors.ErrorOccurrence(
- id="error-1",
- createdAt=datetime(year=2021, month=1, day=1),
- errorType="ReallyBadError",
- detail="things could not get worse",
- errorCode="4321",
- )
- finish_error = errors.ErrorOccurrence(
- id="error-2",
- createdAt=datetime(year=2022, month=2, day=2),
- errorType="EvenWorseError",
- detail="things got worse",
- errorCode="1234",
- )
-
- no_error_subject = get_command_view()
- assert no_error_subject.get_error() is None
-
- just_run_error_subject = get_command_view(run_error=run_error)
- assert just_run_error_subject.get_error() == run_error
-
- just_finish_error_subject = get_command_view(finish_error=finish_error)
- assert just_finish_error_subject.get_error() == finish_error
-
- both_errors_subject = get_command_view(
- run_error=run_error, finish_error=finish_error
- )
- both_errors_result = both_errors_subject.get_error()
- assert both_errors_result is not None
- assert both_errors_result.wrappedErrors == [run_error, finish_error]
-
-
-class GetStatusSpec(NamedTuple):
- """Spec data for get_status tests."""
-
- subject: CommandView
- expected_status: EngineStatus
-
-
-get_status_specs: List[GetStatusSpec] = [
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.RUNNING,
- running_command_id=None,
- queued_command_ids=[],
- ),
- expected_status=EngineStatus.RUNNING,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.SUCCEEDED,
- run_completed_at=None,
- ),
- expected_status=EngineStatus.FINISHING,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.PAUSED,
- run_result=RunResult.FAILED,
- run_completed_at=None,
- ),
- expected_status=EngineStatus.FINISHING,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.PAUSED,
- ),
- expected_status=EngineStatus.PAUSED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- run_result=RunResult.FAILED,
- run_completed_at=datetime(year=2021, day=1, month=1),
- ),
- expected_status=EngineStatus.FAILED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- run_result=RunResult.SUCCEEDED,
- run_completed_at=datetime(year=2021, day=1, month=1),
- finish_error=errors.ErrorOccurrence(
- id="finish-error-id",
- errorType="finish-error-type",
- createdAt=datetime(year=2021, day=1, month=1),
- detail="finish-error-detail",
- ),
- ),
- expected_status=EngineStatus.FAILED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- run_result=RunResult.SUCCEEDED,
- run_completed_at=datetime(year=2021, day=1, month=1),
- ),
- expected_status=EngineStatus.SUCCEEDED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- run_result=RunResult.STOPPED,
- run_completed_at=None,
- ),
- expected_status=EngineStatus.STOP_REQUESTED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- run_result=RunResult.STOPPED,
- run_completed_at=datetime(year=2021, day=1, month=1),
- ),
- expected_status=EngineStatus.STOPPED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.PAUSED,
- is_door_blocking=True,
- ),
- expected_status=EngineStatus.BLOCKED_BY_OPEN_DOOR,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.SETUP,
- is_door_blocking=True,
- ),
- expected_status=EngineStatus.IDLE,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.PAUSED,
- is_door_blocking=False,
- run_completed_at=datetime(year=2021, day=1, month=1),
- ),
- expected_status=EngineStatus.PAUSED,
- ),
- GetStatusSpec(
- subject=get_command_view(
- queue_status=QueueStatus.SETUP,
- running_command_id="command-id",
- queued_command_ids=["command-id-1"],
- queued_setup_command_ids=["command-id-2"],
- ),
- expected_status=EngineStatus.IDLE,
- ),
-]
-
-
-@pytest.mark.parametrize(GetStatusSpec._fields, get_status_specs)
-def test_get_status(subject: CommandView, expected_status: EngineStatus) -> None:
- """It should set a status according to the command queue and running flag."""
- assert subject.get_status() == expected_status
-
-
-class GetOkayToClearSpec(NamedTuple):
- """Spec data for get_status tests."""
-
- subject: CommandView
- expected_is_okay: bool
-
-
-get_okay_to_clear_specs: List[GetOkayToClearSpec] = [
- GetOkayToClearSpec(
- # Protocol not played yet, no commands queued or ran yet
- subject=get_command_view(
- queue_status=QueueStatus.SETUP,
- running_command_id=None,
- queued_command_ids=[],
- queued_setup_command_ids=[],
- ),
- expected_is_okay=True,
- ),
- GetOkayToClearSpec(
- # Protocol commands are queued but not played yet,
- # no setup commands queued or running
- subject=get_command_view(
- queue_status=QueueStatus.SETUP,
- running_command_id=None,
- queued_setup_command_ids=[],
- queued_command_ids=["command-id"],
- commands=[create_queued_command(command_id="command-id")],
- ),
- expected_is_okay=True,
- ),
- GetOkayToClearSpec(
- # Protocol not played yet, setup commands are queued
- subject=get_command_view(
- queue_status=QueueStatus.SETUP,
- running_command_id=None,
- queued_setup_command_ids=["command-id"],
- commands=[create_queued_command(command_id="command-id")],
- ),
- expected_is_okay=False,
- ),
- GetOkayToClearSpec(
- # Protocol is stopped
- subject=get_command_view(
- run_completed_at=datetime(year=2021, day=1, month=1),
- ),
- expected_is_okay=True,
- ),
-]
-
-
-@pytest.mark.parametrize(GetOkayToClearSpec._fields, get_okay_to_clear_specs)
-def test_get_okay_to_clear(subject: CommandView, expected_is_okay: bool) -> None:
- """It should report whether an engine is ok to clear."""
- assert subject.get_is_okay_to_clear() is expected_is_okay
-
-
-def test_get_current() -> None:
- """It should return the "current" command."""
- subject = get_command_view(
- running_command_id=None,
- queued_command_ids=[],
- )
- assert subject.get_current() is None
-
- command = create_running_command(
- "command-id",
- command_key="command-key",
- created_at=datetime(year=2021, month=1, day=1),
- )
- subject = get_command_view(
- running_command_id="command-id",
- queued_command_ids=[],
- commands=[command],
- )
- assert subject.get_current() == CurrentCommand(
- index=0,
- command_id="command-id",
- command_key="command-key",
- created_at=datetime(year=2021, month=1, day=1),
- )
-
- command_1 = create_succeeded_command(
- "command-id-1",
- command_key="key-1",
- created_at=datetime(year=2021, month=1, day=1),
- )
- command_2 = create_succeeded_command(
- "command-id-2",
- command_key="key-2",
- created_at=datetime(year=2022, month=2, day=2),
- )
- subject = get_command_view(commands=[command_1, command_2])
- assert subject.get_current() == CurrentCommand(
- index=1,
- command_id="command-id-2",
- command_key="key-2",
- created_at=datetime(year=2022, month=2, day=2),
- )
-
- command_1 = create_succeeded_command(
- "command-id-1",
- command_key="key-1",
- created_at=datetime(year=2021, month=1, day=1),
- )
- command_2 = create_failed_command(
- "command-id-2",
- command_key="key-2",
- created_at=datetime(year=2022, month=2, day=2),
- )
- subject = get_command_view(commands=[command_1, command_2])
- assert subject.get_current() == CurrentCommand(
- index=1,
- command_id="command-id-2",
- command_key="key-2",
- created_at=datetime(year=2022, month=2, day=2),
- )
-
-
-def test_get_slice_empty() -> None:
- """It should return a slice from the tail if no current command."""
- subject = get_command_view(commands=[])
- result = subject.get_slice(cursor=None, length=2)
-
- assert result == CommandSlice(commands=[], cursor=0, total_length=0)
-
-
-def test_get_slice() -> None:
- """It should return a slice of all commands."""
- command_1 = create_succeeded_command(command_id="command-id-1")
- command_2 = create_running_command(command_id="command-id-2")
- command_3 = create_queued_command(command_id="command-id-3")
- command_4 = create_queued_command(command_id="command-id-4")
-
- subject = get_command_view(commands=[command_1, command_2, command_3, command_4])
-
- result = subject.get_slice(cursor=1, length=3)
-
- assert result == CommandSlice(
- commands=[command_2, command_3, command_4],
- cursor=1,
- total_length=4,
- )
-
- result = subject.get_slice(cursor=-3, length=10)
-
- assert result == CommandSlice(
- commands=[command_1, command_2, command_3, command_4],
- cursor=0,
- total_length=4,
- )
-
-
-def test_get_slice_default_cursor_no_current() -> None:
- """It should return a slice from the tail if no current command."""
- command_1 = create_succeeded_command(command_id="command-id-1")
- command_2 = create_succeeded_command(command_id="command-id-2")
- command_3 = create_succeeded_command(command_id="command-id-3")
- command_4 = create_succeeded_command(command_id="command-id-4")
-
- subject = get_command_view(commands=[command_1, command_2, command_3, command_4])
-
- result = subject.get_slice(cursor=None, length=3)
-
- assert result == CommandSlice(
- commands=[command_2, command_3, command_4],
- cursor=1,
- total_length=4,
- )
-
-
-def test_get_slice_default_cursor_running() -> None:
- """It should select a cursor based on the running command, if present."""
- command_1 = create_succeeded_command(command_id="command-id-1")
- command_2 = create_succeeded_command(command_id="command-id-2")
- command_3 = create_running_command(command_id="command-id-3")
- command_4 = create_queued_command(command_id="command-id-4")
- command_5 = create_queued_command(command_id="command-id-5")
-
- subject = get_command_view(
- commands=[command_1, command_2, command_3, command_4, command_5],
- running_command_id="command-id-3",
- )
-
- result = subject.get_slice(cursor=None, length=2)
-
- assert result == CommandSlice(
- commands=[command_3, command_4],
- cursor=2,
- total_length=5,
- )
-
-
-def test_get_slice_default_cursor_queued() -> None:
- """It should select a cursor based on the next queued command, if present."""
- command_1 = create_succeeded_command(command_id="command-id-1")
- command_2 = create_succeeded_command(command_id="command-id-2")
- command_3 = create_succeeded_command(command_id="command-id-3")
- command_4 = create_queued_command(command_id="command-id-4")
- command_5 = create_queued_command(command_id="command-id-5")
-
- subject = get_command_view(
- commands=[command_1, command_2, command_3, command_4, command_5],
- running_command_id=None,
- queued_command_ids=["command-id-4", "command-id-4", "command-id-5"],
- )
-
- result = subject.get_slice(cursor=None, length=2)
-
- assert result == CommandSlice(
- commands=[command_3, command_4],
- cursor=2,
- total_length=5,
- )
-
-
-def test_get_latest_command_hash() -> None:
- """It should get the latest command hash from state, if set."""
- subject = get_command_view(latest_command_hash="abc123")
- assert subject.get_latest_command_hash() == "abc123"
diff --git a/api/tests/opentrons/protocol_engine/state/test_command_view_old.py b/api/tests/opentrons/protocol_engine/state/test_command_view_old.py
new file mode 100644
index 00000000000..19a2515a3e6
--- /dev/null
+++ b/api/tests/opentrons/protocol_engine/state/test_command_view_old.py
@@ -0,0 +1,1028 @@
+"""Tests for CommandView.
+
+DEPRECATED: Testing CommandView independently of CommandStore is no longer helpful.
+Add new tests to test_command_state.py, where they can be tested together.
+"""
+
+
+import pytest
+from contextlib import nullcontext as does_not_raise
+from datetime import datetime
+from typing import Dict, List, NamedTuple, Optional, Sequence, Type, Union
+
+from opentrons.protocol_engine import EngineStatus, commands as cmd, errors
+from opentrons.protocol_engine.actions import (
+ PlayAction,
+ PauseAction,
+ PauseSource,
+ StopAction,
+ QueueCommandAction,
+)
+from opentrons.protocol_engine.actions.actions import ResumeFromRecoveryAction
+
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryType
+from opentrons.protocol_engine.state.commands import (
+ CommandState,
+ CommandView,
+ CommandSlice,
+ CurrentCommand,
+ RunResult,
+ QueueStatus,
+)
+
+from opentrons.protocol_engine.state.command_history import CommandEntry
+
+from opentrons.protocol_engine.errors import ProtocolCommandFailedError, ErrorOccurrence
+
+from opentrons_shared_data.errors.codes import ErrorCodes
+
+from opentrons.protocol_engine.state.command_history import CommandHistory
+
+from .command_fixtures import (
+ create_queued_command,
+ create_running_command,
+ create_failed_command,
+ create_succeeded_command,
+)
+
+
+def get_command_view( # noqa: C901
+ queue_status: QueueStatus = QueueStatus.SETUP,
+ run_completed_at: Optional[datetime] = None,
+ run_started_at: Optional[datetime] = None,
+ is_door_blocking: bool = False,
+ run_result: Optional[RunResult] = None,
+ running_command_id: Optional[str] = None,
+ queued_command_ids: Sequence[str] = (),
+ queued_setup_command_ids: Sequence[str] = (),
+ queued_fixit_command_ids: Sequence[str] = (),
+ run_error: Optional[errors.ErrorOccurrence] = None,
+ failed_command: Optional[CommandEntry] = None,
+ command_error_recovery_types: Optional[Dict[str, ErrorRecoveryType]] = None,
+ recovery_target_command_id: Optional[str] = None,
+ finish_error: Optional[errors.ErrorOccurrence] = None,
+ commands: Sequence[cmd.Command] = (),
+ latest_command_hash: Optional[str] = None,
+) -> CommandView:
+ """Get a command view test subject."""
+ command_history = CommandHistory()
+
+ if running_command_id:
+ command_history._set_running_command_id(running_command_id)
+ if queued_command_ids:
+ for command_id in queued_command_ids:
+ command_history._add_to_queue(command_id)
+ if queued_setup_command_ids:
+ for command_id in queued_setup_command_ids:
+ command_history._add_to_setup_queue(command_id)
+ if queued_fixit_command_ids:
+ for command_id in queued_fixit_command_ids:
+ command_history._add_to_fixit_queue(command_id)
+ if commands:
+ for index, command in enumerate(commands):
+ command_history._add(
+ command_id=command.id,
+ command_entry=CommandEntry(index=index, command=command),
+ )
+
+ state = CommandState(
+ command_history=command_history,
+ queue_status=queue_status,
+ run_completed_at=run_completed_at,
+ is_door_blocking=is_door_blocking,
+ run_result=run_result,
+ run_error=run_error,
+ finish_error=finish_error,
+ failed_command=failed_command,
+ command_error_recovery_types=command_error_recovery_types or {},
+ recovery_target_command_id=recovery_target_command_id,
+ run_started_at=run_started_at,
+ latest_protocol_command_hash=latest_command_hash,
+ stopped_by_estop=False,
+ )
+
+ return CommandView(state=state)
+
+
+def test_get_by_id() -> None:
+ """It should get a command by ID from state."""
+ command = create_succeeded_command(command_id="command-id")
+ subject = get_command_view(commands=[command])
+
+ assert subject.get("command-id") == command
+
+
+def test_get_command_bad_id() -> None:
+ """It should raise if a requested command ID isn't in state."""
+ command = create_succeeded_command(command_id="command-id")
+ subject = get_command_view(commands=[command])
+
+ with pytest.raises(errors.CommandDoesNotExistError):
+ subject.get("asdfghjkl")
+
+
+def test_get_all() -> None:
+ """It should get all the commands from the state."""
+ command_1 = create_succeeded_command(command_id="command-id-1")
+ command_2 = create_running_command(command_id="command-id-2")
+ command_3 = create_queued_command(command_id="command-id-3")
+
+ subject = get_command_view(commands=[command_1, command_2, command_3])
+
+ assert subject.get_all() == [command_1, command_2, command_3]
+
+
+def test_get_next_to_execute_returns_first_queued() -> None:
+ """It should return the next queued command ID."""
+ subject = get_command_view(
+ queue_status=QueueStatus.RUNNING,
+ queued_command_ids=["command-id-1", "command-id-2"],
+ queued_fixit_command_ids=["fixit-id-1", "fixit-id-2"],
+ )
+
+ assert subject.get_next_to_execute() == "command-id-1"
+
+
+@pytest.mark.parametrize(
+ "queue_status",
+ [QueueStatus.SETUP, QueueStatus.RUNNING],
+)
+def test_get_next_to_execute_prioritizes_setup_command_queue(
+ queue_status: QueueStatus,
+) -> None:
+ """It should prioritize setup command queue over protocol command queue."""
+ subject = get_command_view(
+ queue_status=queue_status,
+ queued_command_ids=["command-id-1", "command-id-2"],
+ queued_setup_command_ids=["setup-command-id"],
+ )
+
+ assert subject.get_next_to_execute() == "setup-command-id"
+
+
+@pytest.mark.parametrize(
+ "queue_status",
+ [QueueStatus.AWAITING_RECOVERY],
+)
+def test_get_next_to_execute_prioritizes_fixit_command_queue(
+ queue_status: QueueStatus,
+) -> None:
+ """It should prioritize fixit command queue over protocol command queue."""
+ subject = get_command_view(
+ queue_status=queue_status,
+ queued_command_ids=["command-id-1", "command-id-2"],
+ queued_setup_command_ids=["setup-command-id"],
+ queued_fixit_command_ids=["fixit-1", "fixit-2"],
+ )
+
+ assert subject.get_next_to_execute() == "fixit-1"
+
+
+def test_get_next_to_execute_returns_none_when_no_queued() -> None:
+ """It should return None if there are no queued commands."""
+ subject = get_command_view(
+ queue_status=QueueStatus.RUNNING,
+ queued_command_ids=[],
+ )
+
+ assert subject.get_next_to_execute() is None
+
+
+@pytest.mark.parametrize("queue_status", [QueueStatus.SETUP, QueueStatus.PAUSED])
+def test_get_next_to_execute_returns_none_if_not_running(
+ queue_status: QueueStatus,
+) -> None:
+ """It should not return protocol commands if the engine is not running."""
+ subject = get_command_view(
+ queue_status=queue_status,
+ queued_setup_command_ids=[],
+ queued_command_ids=["command-id-1", "command-id-2"],
+ )
+ result = subject.get_next_to_execute()
+
+ assert result is None
+
+
+def test_get_next_to_execute_returns_no_commands_if_paused() -> None:
+ """It should not return any type of command if the engine is paused."""
+ subject = get_command_view(
+ queue_status=QueueStatus.PAUSED,
+ queued_setup_command_ids=["setup-id-1", "setup-id-2"],
+ queued_command_ids=["command-id-1", "command-id-2"],
+ queued_fixit_command_ids=["fixit-id-1", "fixit-id-2"],
+ )
+ result = subject.get_next_to_execute()
+
+ assert result is None
+
+
+def test_get_next_to_execute_returns_no_commands_if_awaiting_recovery_no_fixit() -> None:
+ """It should not return any type of command if the engine is awaiting-recovery."""
+ subject = get_command_view(
+ queue_status=QueueStatus.AWAITING_RECOVERY,
+ queued_setup_command_ids=["setup-id-1", "setup-id-2"],
+ queued_command_ids=["command-id-1", "command-id-2"],
+ queued_fixit_command_ids=[],
+ )
+ result = subject.get_next_to_execute()
+
+ assert result is None
+
+
+@pytest.mark.parametrize("run_result", RunResult)
+def test_get_next_to_execute_raises_if_stopped(run_result: RunResult) -> None:
+ """It should raise if an engine stop has been requested."""
+ subject = get_command_view(run_result=run_result)
+
+ with pytest.raises(errors.RunStoppedError):
+ subject.get_next_to_execute()
+
+
+def test_get_is_running_queue() -> None:
+ """It should be able to get if the engine is running."""
+ subject = get_command_view(queue_status=QueueStatus.PAUSED)
+ assert subject.get_is_running() is False
+
+ subject = get_command_view(queue_status=QueueStatus.RUNNING)
+ assert subject.get_is_running() is True
+
+ subject = get_command_view(queue_status=QueueStatus.SETUP)
+ assert subject.get_is_running() is False
+
+
+def test_get_command_is_final() -> None:
+ """It should be able to tell if a command is complete."""
+ completed_command = create_succeeded_command(command_id="completed-command-id")
+ failed_command = create_failed_command(command_id="failed-command-id")
+ running_command = create_running_command(command_id="running-command-id")
+ pending_command = create_queued_command(command_id="queued-command-id")
+
+ subject = get_command_view(
+ commands=[completed_command, failed_command, running_command, pending_command]
+ )
+
+ assert subject.get_command_is_final("completed-command-id") is True
+ assert subject.get_command_is_final("failed-command-id") is True
+ assert subject.get_command_is_final("running-command-id") is False
+ assert subject.get_command_is_final("queued-command-id") is False
+
+
+@pytest.mark.parametrize("run_result", RunResult)
+def test_get_command_is_final_when_run_has_result(run_result: RunResult) -> None:
+ """Queued commands are final when the run will never execute any more commands."""
+ completed_command = create_succeeded_command(command_id="completed-command-id")
+ failed_command = create_failed_command(command_id="failed-command-id")
+ running_command = create_running_command(command_id="running-command-id")
+ pending_command = create_queued_command(command_id="queued-command-id")
+
+ subject = get_command_view(
+ commands=[completed_command, failed_command, running_command, pending_command],
+ run_result=run_result,
+ )
+
+ assert subject.get_command_is_final("completed-command-id") is True
+ assert subject.get_command_is_final("failed-command-id") is True
+ assert subject.get_command_is_final("running-command-id") is False
+ assert subject.get_command_is_final("queued-command-id") is True
+
+
+def test_get_all_commands_final() -> None:
+ """It should return True if no commands queued or running."""
+ running_command = create_running_command(command_id="running-command-id")
+
+ subject = get_command_view(queued_command_ids=[])
+ assert subject.get_all_commands_final() is True
+
+ subject = get_command_view(queued_command_ids=["queued-command-id"])
+ assert subject.get_all_commands_final() is False
+
+ subject = get_command_view(
+ queued_command_ids=[],
+ running_command_id="running-command-id",
+ commands=[running_command],
+ )
+ assert subject.get_all_commands_final() is False
+
+
+def test_raise_fatal_command_error() -> None:
+ """It should raise the fatal command error."""
+ completed_command = create_succeeded_command(command_id="command-id-1")
+ failed_command = create_failed_command(
+ command_id="command-id-2",
+ error=errors.ErrorOccurrence(
+ id="some-error-id",
+ errorType="PrettyBadError",
+ createdAt=datetime(year=2021, month=1, day=1),
+ detail="Oh no",
+ errorCode="4321",
+ ),
+ )
+
+ subject = get_command_view(
+ queued_command_ids=[],
+ running_command_id=None,
+ failed_command=CommandEntry(index=1, command=failed_command),
+ commands=[completed_command, failed_command],
+ )
+
+ with pytest.raises(ProtocolCommandFailedError):
+ subject.raise_fatal_command_error()
+
+
+def test_raise_fatal_command_error_tolerates_failed_setup_commands() -> None:
+ """It should not call setup command fatal."""
+ completed_command = create_succeeded_command(command_id="command-id-1")
+ failed_command = create_failed_command(
+ command_id="command-id-2",
+ intent=cmd.CommandIntent.SETUP,
+ error=errors.ErrorOccurrence(
+ id="some-error-id",
+ errorType="PrettyBadError",
+ createdAt=datetime(year=2021, month=1, day=1),
+ detail="Oh no",
+ errorCode="4321",
+ ),
+ )
+
+ subject = get_command_view(
+ queued_command_ids=[],
+ running_command_id=None,
+ commands=[completed_command, failed_command],
+ )
+
+ subject.raise_fatal_command_error() # Should not raise.
+
+
+def test_get_is_stopped() -> None:
+ """It should return true if stop requested and no command running."""
+ subject = get_command_view(run_completed_at=None)
+ assert subject.get_is_stopped() is False
+
+ subject = get_command_view(run_completed_at=datetime(year=2021, day=1, month=1))
+ assert subject.get_is_stopped() is True
+
+
+def test_get_is_started() -> None:
+ """It should return true if start requested and no command running."""
+ subject = get_command_view(run_started_at=None)
+ assert subject.has_been_played() is False
+
+ subject = get_command_view(run_started_at=datetime(year=2021, day=1, month=1))
+ assert subject.has_been_played() is True
+
+
+def test_get_is_terminal() -> None:
+ """It should return true if run is in a terminal state."""
+ subject = get_command_view(run_result=None)
+ assert subject.get_is_terminal() is False
+
+ subject = get_command_view(run_result=RunResult.SUCCEEDED)
+ assert subject.get_is_terminal() is True
+
+
+class ActionAllowedSpec(NamedTuple):
+ """Spec data to test CommandView.validate_action_allowed."""
+
+ subject: CommandView
+ action: Union[
+ PlayAction,
+ PauseAction,
+ StopAction,
+ QueueCommandAction,
+ ResumeFromRecoveryAction,
+ ]
+ expected_error: Optional[Type[Exception]]
+
+
+action_allowed_specs: List[ActionAllowedSpec] = [
+ # play is allowed if the engine is idle
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.SETUP),
+ action=PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ ),
+ expected_error=None,
+ ),
+ # play is allowed if engine is idle, even if door is blocking
+ ActionAllowedSpec(
+ subject=get_command_view(is_door_blocking=True, queue_status=QueueStatus.SETUP),
+ action=PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ ),
+ expected_error=None,
+ ),
+ # play is allowed if the engine is paused
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.PAUSED),
+ action=PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ ),
+ expected_error=None,
+ ),
+ # pause is allowed if the engine is running
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.RUNNING),
+ action=PauseAction(source=PauseSource.CLIENT),
+ expected_error=None,
+ ),
+ # stop is usually allowed
+ ActionAllowedSpec(
+ subject=get_command_view(),
+ action=StopAction(),
+ expected_error=None,
+ ),
+ # queue command is allowed during setup
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.SETUP),
+ action=QueueCommandAction(
+ request=cmd.HomeCreate(params=cmd.HomeParams()),
+ request_hash=None,
+ command_id="command-id",
+ created_at=datetime(year=2021, month=1, day=1),
+ ),
+ expected_error=None,
+ ),
+ # play is disallowed if paused and door is blocking
+ ActionAllowedSpec(
+ subject=get_command_view(
+ is_door_blocking=True, queue_status=QueueStatus.PAUSED
+ ),
+ action=PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ ),
+ expected_error=errors.RobotDoorOpenError,
+ ),
+ # play is disallowed if stop has been requested
+ ActionAllowedSpec(
+ subject=get_command_view(run_result=RunResult.STOPPED),
+ action=PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ ),
+ expected_error=errors.RunStoppedError,
+ ),
+ # pause is disallowed if stop has been requested
+ ActionAllowedSpec(
+ subject=get_command_view(run_result=RunResult.STOPPED),
+ action=PauseAction(source=PauseSource.CLIENT),
+ expected_error=errors.RunStoppedError,
+ ),
+ # pause is disallowed if engine is not running
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.SETUP),
+ action=PauseAction(source=PauseSource.CLIENT),
+ expected_error=errors.PauseNotAllowedError,
+ ),
+ # pause is disallowed if engine is already paused
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.PAUSED),
+ action=PauseAction(source=PauseSource.CLIENT),
+ expected_error=errors.PauseNotAllowedError,
+ ),
+ # stop is disallowed if stop has already been requested
+ ActionAllowedSpec(
+ subject=get_command_view(run_result=RunResult.STOPPED),
+ action=StopAction(),
+ expected_error=errors.RunStoppedError,
+ ),
+ # queue command action is disallowed if stop has already been requested
+ ActionAllowedSpec(
+ subject=get_command_view(run_result=RunResult.STOPPED),
+ action=QueueCommandAction(
+ request=cmd.HomeCreate(params=cmd.HomeParams()),
+ request_hash=None,
+ command_id="command-id",
+ created_at=datetime(year=2021, month=1, day=1),
+ ),
+ expected_error=errors.RunStoppedError,
+ ),
+ # queue setup command is disallowed if paused
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.PAUSED),
+ action=QueueCommandAction(
+ request=cmd.HomeCreate(
+ params=cmd.HomeParams(),
+ intent=cmd.CommandIntent.SETUP,
+ ),
+ request_hash=None,
+ command_id="command-id",
+ created_at=datetime(year=2021, month=1, day=1),
+ ),
+ expected_error=errors.SetupCommandNotAllowedError,
+ ),
+ # queue setup command is disallowed if running
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.RUNNING),
+ action=QueueCommandAction(
+ request=cmd.HomeCreate(
+ params=cmd.HomeParams(),
+ intent=cmd.CommandIntent.SETUP,
+ ),
+ request_hash=None,
+ command_id="command-id",
+ created_at=datetime(year=2021, month=1, day=1),
+ ),
+ expected_error=errors.SetupCommandNotAllowedError,
+ ),
+ # fixit command is disallowed if not in recovery mode
+ ActionAllowedSpec(
+ subject=get_command_view(queue_status=QueueStatus.RUNNING),
+ action=QueueCommandAction(
+ request=cmd.HomeCreate(
+ params=cmd.HomeParams(),
+ intent=cmd.CommandIntent.FIXIT,
+ ),
+ request_hash=None,
+ command_id="command-id",
+ created_at=datetime(year=2021, month=1, day=1),
+ ),
+ expected_error=errors.FixitCommandNotAllowedError,
+ ),
+ ActionAllowedSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.AWAITING_RECOVERY,
+ failed_command=CommandEntry(
+ index=2,
+ command=create_failed_command(
+ command_id="command-id-3",
+ error=ErrorOccurrence(
+ id="error-id",
+ errorType="ProtocolEngineError",
+ createdAt=datetime(year=2022, month=2, day=2),
+ detail="oh no",
+ errorCode=ErrorCodes.GENERAL_ERROR.value.code,
+ ),
+ ),
+ ),
+ ),
+ action=QueueCommandAction(
+ request=cmd.HomeCreate(
+ params=cmd.HomeParams(),
+ intent=cmd.CommandIntent.FIXIT,
+ ),
+ request_hash=None,
+ command_id="command-id",
+ created_at=datetime(year=2021, month=1, day=1),
+ ),
+ expected_error=None,
+ ),
+ # resume from recovery not allowed if fixit commands in queue
+ ActionAllowedSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.AWAITING_RECOVERY,
+ queued_fixit_command_ids=["fixit-id-1", "fixit-id-2"],
+ failed_command=CommandEntry(
+ index=2,
+ command=create_failed_command(
+ command_id="command-id-3",
+ error=ErrorOccurrence(
+ id="error-id",
+ errorType="ProtocolEngineError",
+ createdAt=datetime(year=2022, month=2, day=2),
+ detail="oh no",
+ errorCode=ErrorCodes.GENERAL_ERROR.value.code,
+ ),
+ ),
+ ),
+ ),
+ action=ResumeFromRecoveryAction(),
+ expected_error=errors.ResumeFromRecoveryNotAllowedError,
+ ),
+]
+
+
+@pytest.mark.parametrize(ActionAllowedSpec._fields, action_allowed_specs)
+def test_validate_action_allowed(
+ subject: CommandView,
+ action: Union[PlayAction, PauseAction, StopAction],
+ expected_error: Optional[Type[Exception]],
+) -> None:
+ """It should validate allowed play/pause/stop actions."""
+ expectation = pytest.raises(expected_error) if expected_error else does_not_raise()
+
+ with expectation:
+ result = subject.validate_action_allowed(action)
+
+ if expected_error is None:
+ assert result == action
+
+
+def test_get_errors() -> None:
+ """It should be able to pull all ErrorOccurrences from the store."""
+ run_error = errors.ErrorOccurrence(
+ id="error-1",
+ createdAt=datetime(year=2021, month=1, day=1),
+ errorType="ReallyBadError",
+ detail="things could not get worse",
+ errorCode="4321",
+ )
+ finish_error = errors.ErrorOccurrence(
+ id="error-2",
+ createdAt=datetime(year=2022, month=2, day=2),
+ errorType="EvenWorseError",
+ detail="things got worse",
+ errorCode="1234",
+ )
+
+ no_error_subject = get_command_view()
+ assert no_error_subject.get_error() is None
+
+ just_run_error_subject = get_command_view(run_error=run_error)
+ assert just_run_error_subject.get_error() == run_error
+
+ just_finish_error_subject = get_command_view(finish_error=finish_error)
+ assert just_finish_error_subject.get_error() == finish_error
+
+ both_errors_subject = get_command_view(
+ run_error=run_error, finish_error=finish_error
+ )
+ both_errors_result = both_errors_subject.get_error()
+ assert both_errors_result is not None
+ assert both_errors_result.wrappedErrors == [run_error, finish_error]
+
+
+class GetStatusSpec(NamedTuple):
+ """Spec data for get_status tests."""
+
+ subject: CommandView
+ expected_status: EngineStatus
+
+
+get_status_specs: List[GetStatusSpec] = [
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.RUNNING,
+ running_command_id=None,
+ queued_command_ids=[],
+ ),
+ expected_status=EngineStatus.RUNNING,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.SUCCEEDED,
+ run_completed_at=None,
+ ),
+ expected_status=EngineStatus.FINISHING,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.PAUSED,
+ run_result=RunResult.FAILED,
+ run_completed_at=None,
+ ),
+ expected_status=EngineStatus.FINISHING,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.PAUSED,
+ ),
+ expected_status=EngineStatus.PAUSED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ run_result=RunResult.FAILED,
+ run_completed_at=datetime(year=2021, day=1, month=1),
+ ),
+ expected_status=EngineStatus.FAILED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ run_result=RunResult.SUCCEEDED,
+ run_completed_at=datetime(year=2021, day=1, month=1),
+ finish_error=errors.ErrorOccurrence(
+ id="finish-error-id",
+ errorType="finish-error-type",
+ createdAt=datetime(year=2021, day=1, month=1),
+ detail="finish-error-detail",
+ ),
+ ),
+ expected_status=EngineStatus.FAILED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ run_result=RunResult.SUCCEEDED,
+ run_completed_at=datetime(year=2021, day=1, month=1),
+ ),
+ expected_status=EngineStatus.SUCCEEDED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ run_result=RunResult.STOPPED,
+ run_completed_at=None,
+ ),
+ expected_status=EngineStatus.STOP_REQUESTED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ run_result=RunResult.STOPPED,
+ run_completed_at=datetime(year=2021, day=1, month=1),
+ ),
+ expected_status=EngineStatus.STOPPED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.PAUSED,
+ is_door_blocking=True,
+ ),
+ expected_status=EngineStatus.BLOCKED_BY_OPEN_DOOR,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.SETUP,
+ is_door_blocking=True,
+ ),
+ expected_status=EngineStatus.IDLE,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.PAUSED,
+ is_door_blocking=False,
+ run_completed_at=datetime(year=2021, day=1, month=1),
+ ),
+ expected_status=EngineStatus.PAUSED,
+ ),
+ GetStatusSpec(
+ subject=get_command_view(
+ queue_status=QueueStatus.SETUP,
+ running_command_id="command-id",
+ queued_command_ids=["command-id-1"],
+ queued_setup_command_ids=["command-id-2"],
+ ),
+ expected_status=EngineStatus.IDLE,
+ ),
+]
+
+
+@pytest.mark.parametrize(GetStatusSpec._fields, get_status_specs)
+def test_get_status(subject: CommandView, expected_status: EngineStatus) -> None:
+ """It should set a status according to the command queue and running flag."""
+ assert subject.get_status() == expected_status
+
+
+class GetOkayToClearSpec(NamedTuple):
+ """Spec data for get_status tests."""
+
+ subject: CommandView
+ expected_is_okay: bool
+
+
+get_okay_to_clear_specs: List[GetOkayToClearSpec] = [
+ GetOkayToClearSpec(
+ # Protocol not played yet, no commands queued or ran yet
+ subject=get_command_view(
+ queue_status=QueueStatus.SETUP,
+ running_command_id=None,
+ queued_command_ids=[],
+ queued_setup_command_ids=[],
+ ),
+ expected_is_okay=True,
+ ),
+ GetOkayToClearSpec(
+ # Protocol commands are queued but not played yet,
+ # no setup commands queued or running
+ subject=get_command_view(
+ queue_status=QueueStatus.SETUP,
+ running_command_id=None,
+ queued_setup_command_ids=[],
+ queued_command_ids=["command-id"],
+ commands=[create_queued_command(command_id="command-id")],
+ ),
+ expected_is_okay=True,
+ ),
+ GetOkayToClearSpec(
+ # Protocol not played yet, setup commands are queued
+ subject=get_command_view(
+ queue_status=QueueStatus.SETUP,
+ running_command_id=None,
+ queued_setup_command_ids=["command-id"],
+ commands=[create_queued_command(command_id="command-id")],
+ ),
+ expected_is_okay=False,
+ ),
+ GetOkayToClearSpec(
+ # Protocol is stopped
+ subject=get_command_view(
+ run_completed_at=datetime(year=2021, day=1, month=1),
+ ),
+ expected_is_okay=True,
+ ),
+]
+
+
+@pytest.mark.parametrize(GetOkayToClearSpec._fields, get_okay_to_clear_specs)
+def test_get_okay_to_clear(subject: CommandView, expected_is_okay: bool) -> None:
+ """It should report whether an engine is ok to clear."""
+ assert subject.get_is_okay_to_clear() is expected_is_okay
+
+
+def test_get_running_command_id() -> None:
+ """It should return the running command ID."""
+ running_command = create_running_command(command_id="command-id")
+
+ subject_with_running = get_command_view(
+ running_command_id="command-id", commands=[running_command]
+ )
+ assert subject_with_running.get_running_command_id() == "command-id"
+
+ subject_without_running = get_command_view(running_command_id=None)
+ assert subject_without_running.get_running_command_id() is None
+
+
+def test_get_current() -> None:
+ """It should return the "current" command."""
+ subject = get_command_view(
+ running_command_id=None,
+ queued_command_ids=[],
+ )
+ assert subject.get_current() is None
+
+ command = create_running_command(
+ "command-id",
+ command_key="command-key",
+ created_at=datetime(year=2021, month=1, day=1),
+ )
+ subject = get_command_view(
+ running_command_id="command-id",
+ queued_command_ids=[],
+ commands=[command],
+ )
+ assert subject.get_current() == CurrentCommand(
+ index=0,
+ command_id="command-id",
+ command_key="command-key",
+ created_at=datetime(year=2021, month=1, day=1),
+ )
+
+ command_1 = create_succeeded_command(
+ "command-id-1",
+ command_key="key-1",
+ created_at=datetime(year=2021, month=1, day=1),
+ )
+ command_2 = create_succeeded_command(
+ "command-id-2",
+ command_key="key-2",
+ created_at=datetime(year=2022, month=2, day=2),
+ )
+ subject = get_command_view(commands=[command_1, command_2])
+ subject.state.command_history._set_terminal_command_id(command_1.id)
+
+ assert subject.get_current() == CurrentCommand(
+ index=1,
+ command_id="command-id-2",
+ command_key="key-2",
+ created_at=datetime(year=2022, month=2, day=2),
+ )
+
+ command_1 = create_succeeded_command(
+ "command-id-1",
+ command_key="key-1",
+ created_at=datetime(year=2021, month=1, day=1),
+ )
+ command_2 = create_failed_command(
+ "command-id-2",
+ command_key="key-2",
+ created_at=datetime(year=2022, month=2, day=2),
+ )
+ subject = get_command_view(commands=[command_1, command_2])
+ subject.state.command_history._set_terminal_command_id(command_1.id)
+
+ assert subject.get_current() == CurrentCommand(
+ index=1,
+ command_id="command-id-2",
+ command_key="key-2",
+ created_at=datetime(year=2022, month=2, day=2),
+ )
+
+
+def test_get_slice_empty() -> None:
+ """It should return a slice from the tail if no current command."""
+ subject = get_command_view(commands=[])
+ result = subject.get_slice(cursor=None, length=2)
+
+ assert result == CommandSlice(commands=[], cursor=0, total_length=0)
+
+
+def test_get_slice() -> None:
+ """It should return a slice of all commands."""
+ command_1 = create_succeeded_command(command_id="command-id-1")
+ command_2 = create_running_command(command_id="command-id-2")
+ command_3 = create_queued_command(command_id="command-id-3")
+ command_4 = create_queued_command(command_id="command-id-4")
+
+ subject = get_command_view(commands=[command_1, command_2, command_3, command_4])
+
+ result = subject.get_slice(cursor=1, length=3)
+
+ assert result == CommandSlice(
+ commands=[command_2, command_3, command_4],
+ cursor=1,
+ total_length=4,
+ )
+
+ result = subject.get_slice(cursor=-3, length=10)
+
+ assert result == CommandSlice(
+ commands=[command_1, command_2, command_3, command_4],
+ cursor=0,
+ total_length=4,
+ )
+
+
+def test_get_slice_default_cursor_no_current() -> None:
+ """It should return a slice from the tail if no current command."""
+ command_1 = create_succeeded_command(command_id="command-id-1")
+ command_2 = create_succeeded_command(command_id="command-id-2")
+ command_3 = create_succeeded_command(command_id="command-id-3")
+ command_4 = create_succeeded_command(command_id="command-id-4")
+
+ subject = get_command_view(commands=[command_1, command_2, command_3, command_4])
+
+ result = subject.get_slice(cursor=None, length=3)
+
+ assert result == CommandSlice(
+ commands=[command_2, command_3, command_4],
+ cursor=1,
+ total_length=4,
+ )
+
+
+def test_get_slice_default_cursor_failed_command() -> None:
+ """It should return a slice from the last executed command."""
+ command_1 = create_failed_command(command_id="command-id-1")
+ command_2 = create_failed_command(command_id="command-id-2")
+ command_3 = create_failed_command(
+ command_id="command-id-3",
+ error=ErrorOccurrence(
+ id="error-id",
+ errorType="ProtocolEngineError",
+ createdAt=datetime(year=2022, month=2, day=2),
+ detail="oh no",
+ errorCode=ErrorCodes.GENERAL_ERROR.value.code,
+ ),
+ )
+ command_4 = create_failed_command(command_id="command-id-4")
+
+ subject = get_command_view(
+ commands=[command_1, command_2, command_3, command_4],
+ run_result=RunResult.FAILED,
+ failed_command=CommandEntry(index=2, command=command_3),
+ )
+
+ result = subject.get_slice(cursor=None, length=3)
+
+ assert result == CommandSlice(
+ commands=[command_3, command_4],
+ cursor=2,
+ total_length=4,
+ )
+
+
+def test_get_slice_default_cursor_running() -> None:
+ """It should select a cursor based on the running command, if present."""
+ command_1 = create_succeeded_command(command_id="command-id-1")
+ command_2 = create_succeeded_command(command_id="command-id-2")
+ command_3 = create_running_command(command_id="command-id-3")
+ command_4 = create_queued_command(command_id="command-id-4")
+ command_5 = create_queued_command(command_id="command-id-5")
+
+ subject = get_command_view(
+ commands=[command_1, command_2, command_3, command_4, command_5],
+ running_command_id="command-id-3",
+ )
+
+ result = subject.get_slice(cursor=None, length=2)
+
+ assert result == CommandSlice(
+ commands=[command_3, command_4],
+ cursor=2,
+ total_length=5,
+ )
+
+
+def test_get_slice_default_cursor_queued() -> None:
+ """It should select a cursor automatically."""
+ command_1 = create_succeeded_command(command_id="command-id-1")
+ command_2 = create_succeeded_command(command_id="command-id-2")
+ command_3 = create_succeeded_command(command_id="command-id-3")
+ command_4 = create_queued_command(command_id="command-id-4")
+ command_5 = create_queued_command(command_id="command-id-5")
+
+ subject = get_command_view(
+ commands=[command_1, command_2, command_3, command_4, command_5],
+ running_command_id=None,
+ queued_command_ids=[command_4.id, command_5.id],
+ )
+
+ result = subject.get_slice(cursor=None, length=2)
+
+ assert result == CommandSlice(
+ commands=[command_3, command_4],
+ cursor=2,
+ total_length=5,
+ )
+
+
+def test_get_latest_command_hash() -> None:
+ """It should get the latest command hash from state, if set."""
+ subject = get_command_view(latest_command_hash="abc123")
+ assert subject.get_latest_protocol_command_hash() == "abc123"
diff --git a/api/tests/opentrons/protocol_engine/state/test_geometry_view.py b/api/tests/opentrons/protocol_engine/state/test_geometry_view.py
index e46dd87d58a..82cf971595e 100644
--- a/api/tests/opentrons/protocol_engine/state/test_geometry_view.py
+++ b/api/tests/opentrons/protocol_engine/state/test_geometry_view.py
@@ -1,16 +1,26 @@
"""Test state getters for retrieving geometry views of state."""
import inspect
+import json
import pytest
from decoy import Decoy
from typing import cast, List, Tuple, Optional, NamedTuple
+from datetime import datetime
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
+from opentrons_shared_data.deck import load as load_deck
from opentrons_shared_data.labware.dev_types import LabwareUri
from opentrons_shared_data.pipette import pipette_definition
from opentrons.calibration_storage.helpers import uri_from_details
from opentrons.protocols.models import LabwareDefinition
from opentrons.types import Point, DeckSlotName, MountType
+from opentrons_shared_data.pipette.dev_types import PipetteNameType
+from opentrons_shared_data.labware.labware_definition import (
+ Dimensions as LabwareDimensions,
+ Parameters as LabwareDefinitionParameters,
+ CornerOffsetFromSlot,
+)
+from opentrons_shared_data import load_shared_data
from opentrons.protocol_engine import errors
from opentrons.protocol_engine.types import (
@@ -19,6 +29,7 @@
DeckSlotLocation,
ModuleLocation,
OnLabwareLocation,
+ AddressableAreaLocation,
ModuleOffsetVector,
ModuleOffsetData,
LoadedLabware,
@@ -33,24 +44,50 @@
OverlapOffset,
DeckType,
CurrentWell,
+ CurrentAddressableArea,
+ CurrentPipetteLocation,
LabwareMovementOffsetData,
+ LoadedPipette,
+ TipGeometry,
+ ModuleDefinition,
+)
+from opentrons.protocol_engine.commands import (
+ CommandStatus,
+ LoadLabwareResult,
+ LoadLabware,
+ LoadLabwareParams,
+ LoadModuleResult,
+ LoadModule,
+ LoadModuleParams,
)
+from opentrons.protocol_engine.actions import SucceedCommandAction
from opentrons.protocol_engine.state import move_types
from opentrons.protocol_engine.state.config import Config
-from opentrons.protocol_engine.state.labware import LabwareView
-from opentrons.protocol_engine.state.modules import ModuleView
-from opentrons.protocol_engine.state.pipettes import PipetteView, StaticPipetteConfig
+from opentrons.protocol_engine.state.labware import LabwareView, LabwareStore
+from opentrons.protocol_engine.state.modules import ModuleView, ModuleStore
+from opentrons.protocol_engine.state.pipettes import (
+ PipetteView,
+ PipetteStore,
+ StaticPipetteConfig,
+ BoundingNozzlesOffsets,
+ PipetteBoundingBoxOffsets,
+)
+from opentrons.protocol_engine.state.addressable_areas import (
+ AddressableAreaView,
+ AddressableAreaStore,
+)
from opentrons.protocol_engine.state.geometry import GeometryView, _GripperMoveType
+from ..pipette_fixtures import get_default_nozzle_map
@pytest.fixture
-def labware_view(decoy: Decoy) -> LabwareView:
+def mock_labware_view(decoy: Decoy) -> LabwareView:
"""Get a mock in the shape of a LabwareView."""
return decoy.mock(cls=LabwareView)
@pytest.fixture
-def module_view(decoy: Decoy) -> ModuleView:
+def mock_module_view(decoy: Decoy) -> ModuleView:
"""Get a mock in the shape of a ModuleView."""
return decoy.mock(cls=ModuleView)
@@ -61,6 +98,12 @@ def mock_pipette_view(decoy: Decoy) -> PipetteView:
return decoy.mock(cls=PipetteView)
+@pytest.fixture
+def mock_addressable_area_view(decoy: Decoy) -> AddressableAreaView:
+ """Get a mock in the shape of a AddressableAreaView."""
+ return decoy.mock(cls=AddressableAreaView)
+
+
@pytest.fixture(autouse=True)
def patch_mock_move_types(decoy: Decoy, monkeypatch: pytest.MonkeyPatch) -> None:
"""Mock out move_types.py functions."""
@@ -68,25 +111,145 @@ def patch_mock_move_types(decoy: Decoy, monkeypatch: pytest.MonkeyPatch) -> None
monkeypatch.setattr(move_types, name, decoy.mock(func=func))
+@pytest.fixture
+def use_mocks() -> bool:
+ """True to use mocks; add a use_mocks parameter of False to your test to use real states."""
+ return True
+
+
+@pytest.fixture
+def deck_definition(state_config: Config) -> DeckDefinitionV5:
+ """Override as parameter to use a non-flex deck def."""
+ return load_deck(name=state_config.deck_type.value, version=5)
+
+
+@pytest.fixture
+def state_config() -> Config:
+ """Get a state config. This is set up for a Flex."""
+ return Config(
+ robot_type="OT-3 Standard",
+ deck_type=DeckType.OT3_STANDARD,
+ )
+
+
+@pytest.fixture
+def labware_store(deck_definition: DeckDefinitionV5) -> LabwareStore:
+ """Get a labware store that can accept actions."""
+ return LabwareStore(deck_definition=deck_definition, deck_fixed_labware=[])
+
+
+@pytest.fixture
+def labware_view(labware_store: LabwareStore) -> LabwareView:
+ """Get a labware view of a real labware store."""
+ return LabwareView(labware_store._state)
+
+
+@pytest.fixture
+def module_store(state_config: Config) -> ModuleStore:
+ """Get a module store that can accept actions."""
+ return ModuleStore(config=state_config, module_calibration_offsets={})
+
+
+@pytest.fixture
+def module_view(module_store: ModuleStore) -> ModuleView:
+ """Get a module view of a real labware store."""
+ return ModuleView(module_store._state)
+
+
+@pytest.fixture
+def pipette_store() -> PipetteStore:
+ """Get a pipette store that can accept actions."""
+ return PipetteStore()
+
+
+@pytest.fixture
+def pipette_view(pipette_store: PipetteStore) -> PipetteView:
+ """Get a pipette view of a real pipette store."""
+ return PipetteView(pipette_store._state)
+
+
+@pytest.fixture
+def addressable_area_store(
+ state_config: Config, deck_definition: DeckDefinitionV5
+) -> AddressableAreaStore:
+ """Get an addressable area store that can accept actions."""
+ return AddressableAreaStore(
+ deck_configuration=[], config=state_config, deck_definition=deck_definition
+ )
+
+
+@pytest.fixture
+def addressable_area_view(
+ addressable_area_store: AddressableAreaStore,
+) -> AddressableAreaView:
+ """Get an addressable area view of a real addressable are store."""
+ return AddressableAreaView(addressable_area_store._state)
+
+
+@pytest.fixture
+def nice_labware_definition() -> LabwareDefinition:
+ """Load a nice labware def that won't blow up your terminal."""
+ return LabwareDefinition.parse_obj(
+ json.loads(
+ load_shared_data("labware/fixtures/2/fixture_12_trough_v2.json").decode(
+ "utf-8"
+ )
+ )
+ )
+
+
+@pytest.fixture
+def nice_adapter_definition() -> LabwareDefinition:
+ """Load a friendly adapter definition."""
+ return LabwareDefinition.parse_obj(
+ json.loads(
+ load_shared_data(
+ "labware/definitions/2/opentrons_aluminum_flat_bottom_plate/1.json"
+ ).decode("utf-8")
+ )
+ )
+
+
@pytest.fixture
def subject(
- labware_view: LabwareView, module_view: ModuleView, mock_pipette_view: PipetteView
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_pipette_view: PipetteView,
+ mock_addressable_area_view: AddressableAreaView,
+ state_config: Config,
+ labware_view: LabwareView,
+ module_view: ModuleView,
+ pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
+ use_mocks: bool,
) -> GeometryView:
- """Get a GeometryView with its store dependencies mocked out."""
+ """Get a GeometryView with its store dependencies provided.
+
+ By default, this will return a view with those dependencies as mocked. If you add a
+ parameter to your test of use_mocks that returns false, i.e.
+
+ @pytest.mark.parametrize('use_mocks', [False])
+ def my_cool_test(subject: GeometryView) -> None:
+ pass
+
+ then the provided subject will use actual states. Over time, we should get more and more
+ things using use_mocks=True, and then flip the default
+ """
return GeometryView(
- config=Config(
- robot_type="OT-3 Standard",
- deck_type=DeckType.OT3_STANDARD,
- ),
- labware_view=labware_view,
- module_view=module_view,
- pipette_view=mock_pipette_view,
+ config=state_config,
+ labware_view=mock_labware_view if use_mocks else labware_view,
+ module_view=mock_module_view if use_mocks else module_view,
+ pipette_view=mock_pipette_view if use_mocks else pipette_view,
+ addressable_area_view=mock_addressable_area_view
+ if use_mocks
+ else addressable_area_view,
)
def test_get_labware_parent_position(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should return a deck slot position for labware in a deck slot."""
@@ -97,10 +260,10 @@ def test_get_labware_parent_position(
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
offsetId=None,
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- Point(1, 2, 3)
- )
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(1, 2, 3))
result = subject.get_labware_parent_position("labware-id")
@@ -109,7 +272,7 @@ def test_get_labware_parent_position(
def test_raise_error_for_off_deck_labware_parent(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""Test raise error when fetching parent for labware that's off-deck."""
@@ -120,16 +283,17 @@ def test_raise_error_for_off_deck_labware_parent(
location=OFF_DECK_LOCATION,
offsetId=None,
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
with pytest.raises(errors.LabwareNotOnDeckError):
subject.get_labware_parent_position("labware-id")
def test_get_labware_parent_position_on_module(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should return a module position for labware on a module."""
@@ -141,28 +305,34 @@ def test_get_labware_parent_position_on_module(
offsetId=None,
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_3)
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- Point(1, 2, 3)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(1, 2, 3))
+
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
)
- decoy.when(labware_view.get_deck_definition()).then_return(ot2_standard_deck_def)
+
decoy.when(
- module_view.get_nominal_module_offset(
- module_id="module-id", deck_type=DeckType.OT2_STANDARD
+ mock_module_view.get_nominal_module_offset(
+ module_id="module-id",
+ addressable_areas=mock_addressable_area_view,
)
).then_return(LabwareOffsetVector(x=4, y=5, z=6))
- decoy.when(module_view.get_connected_model("module-id")).then_return(
+
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
ModuleModel.THERMOCYCLER_MODULE_V2
)
decoy.when(
- labware_view.get_module_overlap_offsets(
+ mock_labware_view.get_module_overlap_offsets(
"labware-id", ModuleModel.THERMOCYCLER_MODULE_V2
)
).then_return(OverlapOffset(x=1, y=2, z=3))
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=2, y=3, z=4),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
@@ -176,9 +346,10 @@ def test_get_labware_parent_position_on_module(
def test_get_labware_parent_position_on_labware(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should return a labware position for labware on a labware on a module."""
@@ -196,38 +367,41 @@ def test_get_labware_parent_position_on_labware(
location=ModuleLocation(moduleId="module-id"),
offsetId=None,
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_3)
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- Point(1, 2, 3)
- )
- decoy.when(labware_view.get("adapter-id")).then_return(adapter_data)
- decoy.when(labware_view.get_dimensions("adapter-id")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(1, 2, 3))
+ decoy.when(mock_labware_view.get("adapter-id")).then_return(adapter_data)
+ decoy.when(mock_labware_view.get_dimensions("adapter-id")).then_return(
Dimensions(x=123, y=456, z=5)
)
decoy.when(
- labware_view.get_labware_overlap_offsets("labware-id", "xyz")
+ mock_labware_view.get_labware_overlap_offsets("labware-id", "xyz")
).then_return(OverlapOffset(x=1, y=2, z=2))
- decoy.when(labware_view.get_deck_definition()).then_return(ot2_standard_deck_def)
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
+ )
decoy.when(
- module_view.get_nominal_module_offset(
- module_id="module-id", deck_type=DeckType.OT2_STANDARD
+ mock_module_view.get_nominal_module_offset(
+ module_id="module-id",
+ addressable_areas=mock_addressable_area_view,
)
).then_return(LabwareOffsetVector(x=1, y=2, z=3))
- decoy.when(module_view.get_connected_model("module-id")).then_return(
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
ModuleModel.MAGNETIC_MODULE_V2
)
decoy.when(
- labware_view.get_module_overlap_offsets(
+ mock_labware_view.get_module_overlap_offsets(
"adapter-id", ModuleModel.MAGNETIC_MODULE_V2
)
).then_return(OverlapOffset(x=-3, y=-2, z=-1))
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=3, y=4, z=5),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
@@ -241,9 +415,9 @@ def test_get_labware_parent_position_on_labware(
def test_module_calibration_offset_rotation(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""Return the rotated module calibration offset if the module was moved from one side of the deck to the other."""
@@ -255,14 +429,14 @@ def test_module_calibration_offset_rotation(
offsetId=None,
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_D1)
)
- decoy.when(module_view.get_connected_model("module-id")).then_return(
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
ModuleModel.TEMPERATURE_MODULE_V2
)
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=2, y=3, z=4),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_D1),
@@ -274,7 +448,7 @@ def test_module_calibration_offset_rotation(
assert result == ModuleOffsetVector(x=2, y=3, z=4)
# the module has changed from slot D1 to D3, so we should rotate the calibration offset 180 degrees along the z axis
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_D3)
)
result = subject._get_calibrated_module_offset(ModuleLocation(moduleId="module-id"))
@@ -283,7 +457,7 @@ def test_module_calibration_offset_rotation(
# attempting to load the module calibration offset from an invalid slot in the middle of the deck (A2, B2, C2, D2)
# is not be allowed since you can't even load a module in the middle to perform a module calibration in the
# first place. So if someone manually edits the stored module calibration offset we will throw an assert error.
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=2, y=3, z=4),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_D2),
@@ -298,7 +472,8 @@ def test_module_calibration_offset_rotation(
def test_get_labware_origin_position(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should return a deck slot position with the labware's offset as its origin."""
@@ -310,11 +485,13 @@ def test_get_labware_origin_position(
offsetId=None,
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- Point(1, 2, 3)
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(1, 2, 3))
expected_parent = Point(1, 2, 3)
expected_offset = Point(
@@ -332,7 +509,8 @@ def test_get_labware_origin_position(
def test_get_labware_highest_z(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should get the absolute location of a labware's highest Z point."""
@@ -346,14 +524,16 @@ def test_get_labware_highest_z(
slot_pos = Point(1, 2, 3)
calibration_offset = LabwareOffsetVector(x=1, y=-2, z=3)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(slot_pos)
highest_z = subject.get_labware_highest_z("labware-id")
@@ -363,9 +543,10 @@ def test_get_labware_highest_z(
def test_get_module_labware_highest_z(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should get the absolute location of a labware's highest Z point."""
@@ -379,35 +560,40 @@ def test_get_module_labware_highest_z(
slot_pos = Point(1, 2, 3)
calibration_offset = LabwareOffsetVector(x=1, y=-2, z=3)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_3)
)
- decoy.when(labware_view.get_deck_definition()).then_return(ot2_standard_deck_def)
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
+ )
decoy.when(
- module_view.get_nominal_module_offset(
- module_id="module-id", deck_type=DeckType.OT2_STANDARD
+ mock_module_view.get_nominal_module_offset(
+ module_id="module-id",
+ addressable_areas=mock_addressable_area_view,
)
).then_return(LabwareOffsetVector(x=4, y=5, z=6))
- decoy.when(module_view.get_height_over_labware("module-id")).then_return(0.5)
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_height_over_labware("module-id")).then_return(0.5)
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=0, y=0, z=0),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
)
)
- decoy.when(module_view.get_connected_model("module-id")).then_return(
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
ModuleModel.MAGNETIC_MODULE_V2
)
decoy.when(
- labware_view.get_module_overlap_offsets(
+ mock_labware_view.get_module_overlap_offsets(
"labware-id", ModuleModel.MAGNETIC_MODULE_V2
)
).then_return(OverlapOffset(x=0, y=0, z=0))
@@ -417,28 +603,31 @@ def test_get_module_labware_highest_z(
assert highest_z == (well_plate_def.dimensions.zDimension + 3 + 3 + 6 + 0.5)
-def test_get_all_labware_highest_z_no_equipment(
+def test_get_all_obstacle_highest_z_no_equipment(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should return 0 if no loaded equipment."""
- decoy.when(module_view.get_all()).then_return([])
- decoy.when(labware_view.get_all()).then_return([])
+ decoy.when(mock_module_view.get_all()).then_return([])
+ decoy.when(mock_labware_view.get_all()).then_return([])
+ decoy.when(mock_addressable_area_view.get_all()).then_return([])
- result = subject.get_all_labware_highest_z()
+ result = subject.get_all_obstacle_highest_z()
assert result == 0
-def test_get_all_labware_highest_z(
+def test_get_all_obstacle_highest_z(
decoy: Decoy,
well_plate_def: LabwareDefinition,
reservoir_def: LabwareDefinition,
falcon_tuberack_def: LabwareDefinition,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should get the highest Z amongst all labware."""
@@ -468,64 +657,433 @@ def test_get_all_labware_highest_z(
off_deck_lw_offset = LabwareOffsetVector(x=1, y=-2, z=3)
reservoir_offset = LabwareOffsetVector(x=1, y=-2, z=3)
- decoy.when(module_view.get_all()).then_return([])
+ decoy.when(mock_module_view.get_all()).then_return([])
+ decoy.when(mock_addressable_area_view.get_all()).then_return([])
- decoy.when(labware_view.get_all()).then_return([plate, off_deck_lw, reservoir])
- decoy.when(labware_view.get("plate-id")).then_return(plate)
- decoy.when(labware_view.get("off-deck-plate-id")).then_return(off_deck_lw)
- decoy.when(labware_view.get("reservoir-id")).then_return(reservoir)
+ decoy.when(mock_labware_view.get_all()).then_return([plate, off_deck_lw, reservoir])
+ decoy.when(mock_labware_view.get("plate-id")).then_return(plate)
+ decoy.when(mock_labware_view.get("off-deck-plate-id")).then_return(off_deck_lw)
+ decoy.when(mock_labware_view.get("reservoir-id")).then_return(reservoir)
- decoy.when(labware_view.get_definition("plate-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_definition("off-deck-plate-id")).then_return(
+ decoy.when(mock_labware_view.get_definition("plate-id")).then_return(well_plate_def)
+ decoy.when(mock_labware_view.get_definition("off-deck-plate-id")).then_return(
falcon_tuberack_def # Something tall.
)
- decoy.when(labware_view.get_definition("reservoir-id")).then_return(reservoir_def)
+ decoy.when(mock_labware_view.get_definition("reservoir-id")).then_return(
+ reservoir_def
+ )
- decoy.when(labware_view.get_labware_offset_vector("plate-id")).then_return(
+ decoy.when(mock_labware_view.get_labware_offset_vector("plate-id")).then_return(
plate_offset
)
- decoy.when(labware_view.get_labware_offset_vector("off-deck-plate-id")).then_return(
- off_deck_lw_offset
- )
- decoy.when(labware_view.get_labware_offset_vector("reservoir-id")).then_return(
+ decoy.when(
+ mock_labware_view.get_labware_offset_vector("off-deck-plate-id")
+ ).then_return(off_deck_lw_offset)
+ decoy.when(mock_labware_view.get_labware_offset_vector("reservoir-id")).then_return(
reservoir_offset
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- Point(1, 2, 3)
- )
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- Point(4, 5, 6)
- )
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(1, 2, 3))
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(Point(4, 5, 6))
plate_z = subject.get_labware_highest_z("plate-id")
reservoir_z = subject.get_labware_highest_z("reservoir-id")
- all_z = subject.get_all_labware_highest_z()
+ all_z = subject.get_all_obstacle_highest_z()
# Should exclude the off-deck plate.
assert all_z == max(plate_z, reservoir_z)
-def test_get_all_labware_highest_z_with_modules(
+def test_get_all_obstacle_highest_z_with_staging_area(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ well_plate_def: LabwareDefinition,
+ falcon_tuberack_def: LabwareDefinition,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+) -> None:
+ """It should get the highest Z amongst all labware including staging area."""
+ plate = LoadedLabware(
+ id="plate-id",
+ loadName="plate-load-name",
+ definitionUri="plate-definition-uri",
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
+ offsetId="plate-offset-id",
+ )
+ staging_lw = LoadedLabware(
+ id="staging-id",
+ loadName="staging-load-name",
+ definitionUri="staging-definition-uri",
+ location=AddressableAreaLocation(addressableAreaName="D4"),
+ offsetId="plate-offset-id",
+ )
+
+ plate_offset = LabwareOffsetVector(x=1, y=-2, z=3)
+ staging_lw_offset = LabwareOffsetVector(x=1, y=-2, z=3)
+
+ decoy.when(mock_module_view.get_all()).then_return([])
+ decoy.when(mock_addressable_area_view.get_all()).then_return([])
+
+ decoy.when(mock_labware_view.get_all()).then_return([plate, staging_lw])
+ decoy.when(mock_labware_view.get("plate-id")).then_return(plate)
+ decoy.when(mock_labware_view.get("staging-id")).then_return(staging_lw)
+
+ decoy.when(mock_labware_view.get_definition("plate-id")).then_return(well_plate_def)
+ decoy.when(mock_labware_view.get_definition("staging-id")).then_return(
+ falcon_tuberack_def # Something tall.
+ )
+
+ decoy.when(mock_labware_view.get_labware_offset_vector("plate-id")).then_return(
+ plate_offset
+ )
+ decoy.when(mock_labware_view.get_labware_offset_vector("staging-id")).then_return(
+ staging_lw_offset
+ )
+
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(1, 2, 3))
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position("D4")
+ ).then_return(Point(4, 5, 6))
+
+ staging_z = subject.get_labware_highest_z("staging-id")
+ all_z = subject.get_all_obstacle_highest_z()
+
+ assert all_z == staging_z
+
+
+def test_get_all_obstacle_highest_z_with_modules(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should get the highest Z including modules."""
module_1 = LoadedModule.construct(id="module-id-1") # type: ignore[call-arg]
module_2 = LoadedModule.construct(id="module-id-2") # type: ignore[call-arg]
- decoy.when(labware_view.get_all()).then_return([])
- decoy.when(module_view.get_all()).then_return([module_1, module_2])
- decoy.when(module_view.get_overall_height("module-id-1")).then_return(42.0)
- decoy.when(module_view.get_overall_height("module-id-2")).then_return(1337.0)
+ decoy.when(mock_labware_view.get_all()).then_return([])
+ decoy.when(mock_addressable_area_view.get_all()).then_return([])
+
+ decoy.when(mock_module_view.get_all()).then_return([module_1, module_2])
+ decoy.when(mock_module_view.get_overall_height("module-id-1")).then_return(42.0)
+ decoy.when(mock_module_view.get_overall_height("module-id-2")).then_return(1337.0)
+
+ result = subject.get_all_obstacle_highest_z()
+
+ assert result == 1337.0
+
+
+def test_get_all_obstacle_highest_z_with_fixtures(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+) -> None:
+ """It should get the highest Z including fixtures."""
+ decoy.when(mock_labware_view.get_all()).then_return([])
+ decoy.when(mock_module_view.get_all()).then_return([])
+
+ decoy.when(mock_addressable_area_view.get_all_cutout_fixtures()).then_return(
+ ["abc", "xyz"]
+ )
+ decoy.when(mock_addressable_area_view.get_fixture_height("abc")).then_return(42.0)
+ decoy.when(mock_addressable_area_view.get_fixture_height("xyz")).then_return(1337.0)
- result = subject.get_all_labware_highest_z()
+ result = subject.get_all_obstacle_highest_z()
assert result == 1337.0
+def test_get_highest_z_in_slot_with_single_labware(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+ well_plate_def: LabwareDefinition,
+) -> None:
+ """It should get the highest Z in slot with just a single labware."""
+ # Case: Slot has a labware that doesn't have any other labware on it. Highest z is equal to labware height.
+ labware_in_slot = LoadedLabware(
+ id="just-labware-id",
+ loadName="just-labware-name",
+ definitionUri="definition-uri",
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
+ offsetId="offset-id",
+ )
+ slot_pos = Point(1, 2, 3)
+ calibration_offset = LabwareOffsetVector(x=1, y=-2, z=3)
+
+ decoy.when(mock_labware_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(
+ labware_in_slot
+ )
+ decoy.when(mock_labware_view.get_id_by_labware("just-labware-id")).then_raise(
+ errors.LabwareNotLoadedOnLabwareError("no more labware")
+ )
+ decoy.when(mock_labware_view.get("just-labware-id")).then_return(labware_in_slot)
+ decoy.when(mock_labware_view.get_definition("just-labware-id")).then_return(
+ well_plate_def
+ )
+ decoy.when(
+ mock_labware_view.get_labware_offset_vector("just-labware-id")
+ ).then_return(calibration_offset)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(slot_pos)
+
+ expected_highest_z = well_plate_def.dimensions.zDimension + 3 + 3
+ assert (
+ subject.get_highest_z_in_slot(DeckSlotLocation(slotName=DeckSlotName.SLOT_3))
+ == expected_highest_z
+ )
+
+
+def test_get_highest_z_in_slot_with_single_module(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+ ot2_standard_deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get the highest Z in slot with just a single module."""
+ # Case: Slot has a module that doesn't have any labware on it. Highest z is equal to module height.
+ module_in_slot = LoadedModule.construct(
+ id="only-module",
+ model=ModuleModel.THERMOCYCLER_MODULE_V2,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4),
+ )
+
+ decoy.when(mock_module_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(
+ module_in_slot
+ )
+ decoy.when(mock_labware_view.get_id_by_module("only-module")).then_raise(
+ errors.LabwareNotLoadedOnModuleError("only module")
+ )
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
+ )
+ decoy.when(
+ mock_module_view.get_module_highest_z(
+ module_id="only-module",
+ addressable_areas=mock_addressable_area_view,
+ )
+ ).then_return(12345)
+
+ assert (
+ subject.get_highest_z_in_slot(DeckSlotLocation(slotName=DeckSlotName.SLOT_3))
+ == 12345
+ )
+
+
+# TODO (spp, 2023-12-05): this is mocking out too many things and is hard to follow.
+# Create an integration test that loads labware and modules and tests the geometry
+# in an easier-to-understand manner.
+def test_get_highest_z_in_slot_with_stacked_labware_on_slot(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+ well_plate_def: LabwareDefinition,
+) -> None:
+ """It should get the highest z in slot of the topmost labware in stack.
+
+ Tests both `get_highest_z_in_slot` and `get_highest_z_of_labware_stack`.
+ """
+ labware_in_slot = LoadedLabware(
+ id="bottom-labware-id",
+ loadName="bottom-labware-name",
+ definitionUri="bottom-definition-uri",
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
+ offsetId="offset-id",
+ )
+ middle_labware = LoadedLabware(
+ id="middle-labware-id",
+ loadName="middle-labware-name",
+ definitionUri="middle-definition-uri",
+ location=OnLabwareLocation(labwareId="bottom-labware-id"),
+ offsetId="offset-id",
+ )
+ top_labware = LoadedLabware(
+ id="top-labware-id",
+ loadName="top-labware-name",
+ definitionUri="top-definition-uri",
+ location=OnLabwareLocation(labwareId="middle-labware-id"),
+ offsetId="offset-id",
+ )
+ slot_pos = Point(11, 22, 33)
+ top_lw_lpc_offset = LabwareOffsetVector(x=1, y=-2, z=3)
+
+ decoy.when(mock_labware_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(
+ labware_in_slot
+ )
+
+ decoy.when(mock_labware_view.get_id_by_labware("bottom-labware-id")).then_return(
+ "middle-labware-id"
+ )
+ decoy.when(mock_labware_view.get_id_by_labware("middle-labware-id")).then_return(
+ "top-labware-id"
+ )
+ decoy.when(mock_labware_view.get_id_by_labware("top-labware-id")).then_raise(
+ errors.LabwareNotLoadedOnLabwareError("top labware")
+ )
+
+ decoy.when(mock_labware_view.get("bottom-labware-id")).then_return(labware_in_slot)
+ decoy.when(mock_labware_view.get("middle-labware-id")).then_return(middle_labware)
+ decoy.when(mock_labware_view.get("top-labware-id")).then_return(top_labware)
+
+ decoy.when(mock_labware_view.get_definition("top-labware-id")).then_return(
+ well_plate_def
+ )
+ decoy.when(
+ mock_labware_view.get_labware_offset_vector("top-labware-id")
+ ).then_return(top_lw_lpc_offset)
+ decoy.when(mock_labware_view.get_dimensions("middle-labware-id")).then_return(
+ Dimensions(x=10, y=20, z=30)
+ )
+ decoy.when(mock_labware_view.get_dimensions("bottom-labware-id")).then_return(
+ Dimensions(x=11, y=12, z=13)
+ )
+
+ decoy.when(
+ mock_labware_view.get_labware_overlap_offsets(
+ "top-labware-id", below_labware_name="middle-labware-name"
+ )
+ ).then_return(OverlapOffset(x=4, y=5, z=6))
+ decoy.when(
+ mock_labware_view.get_labware_overlap_offsets(
+ "middle-labware-id", below_labware_name="bottom-labware-name"
+ )
+ ).then_return(OverlapOffset(x=7, y=8, z=9))
+
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(slot_pos)
+
+ expected_highest_z = (
+ slot_pos.z + well_plate_def.dimensions.zDimension - 6 + 30 - 9 + 13 + 3
+ )
+ assert (
+ subject.get_highest_z_in_slot(DeckSlotLocation(slotName=DeckSlotName.SLOT_3))
+ == expected_highest_z
+ )
+
+
+# TODO (spp, 2023-12-05): this is mocking out too many things and is hard to follow.
+# Create an integration test that loads labware and modules and tests the geometry
+# in an easier-to-understand manner.
+def test_get_highest_z_in_slot_with_labware_stack_on_module(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+ well_plate_def: LabwareDefinition,
+ ot2_standard_deck_def: DeckDefinitionV5,
+) -> None:
+ """It should get the highest z in slot of labware on module.
+
+ Tests both `get_highest_z_in_slot` and `get_highest_z_of_labware_stack`.
+ """
+ top_labware = LoadedLabware(
+ id="top-labware-id",
+ loadName="top-labware-name",
+ definitionUri="top-labware-uri",
+ location=OnLabwareLocation(labwareId="adapter-id"),
+ offsetId="offset-id1",
+ )
+ adapter = LoadedLabware(
+ id="adapter-id",
+ loadName="adapter-name",
+ definitionUri="adapter-uri",
+ location=ModuleLocation(moduleId="module-id"),
+ offsetId="offset-id2",
+ )
+ module_on_slot = LoadedModule.construct(
+ id="module-id",
+ model=ModuleModel.THERMOCYCLER_MODULE_V2,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4),
+ )
+
+ slot_pos = Point(11, 22, 33)
+ top_lw_lpc_offset = LabwareOffsetVector(x=1, y=-2, z=3)
+
+ decoy.when(mock_module_view.get("module-id")).then_return(module_on_slot)
+ decoy.when(mock_module_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(
+ module_on_slot
+ )
+
+ decoy.when(mock_labware_view.get_id_by_module("module-id")).then_return(
+ "adapter-id"
+ )
+ decoy.when(mock_labware_view.get_id_by_labware("adapter-id")).then_return(
+ "top-labware-id"
+ )
+ decoy.when(mock_labware_view.get_id_by_labware("top-labware-id")).then_raise(
+ errors.LabwareNotLoadedOnLabwareError("top labware")
+ )
+
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
+ )
+ decoy.when(mock_labware_view.get_definition("top-labware-id")).then_return(
+ well_plate_def
+ )
+
+ decoy.when(mock_labware_view.get("adapter-id")).then_return(adapter)
+ decoy.when(mock_labware_view.get("top-labware-id")).then_return(top_labware)
+ decoy.when(
+ mock_labware_view.get_labware_offset_vector("top-labware-id")
+ ).then_return(top_lw_lpc_offset)
+ decoy.when(mock_labware_view.get_dimensions("adapter-id")).then_return(
+ Dimensions(x=10, y=20, z=30)
+ )
+ decoy.when(
+ mock_labware_view.get_labware_overlap_offsets(
+ labware_id="top-labware-id", below_labware_name="adapter-name"
+ )
+ ).then_return(OverlapOffset(x=4, y=5, z=6))
+
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
+ DeckSlotLocation(slotName=DeckSlotName.SLOT_3)
+ )
+ decoy.when(
+ mock_module_view.get_nominal_module_offset(
+ module_id="module-id",
+ addressable_areas=mock_addressable_area_view,
+ )
+ ).then_return(LabwareOffsetVector(x=40, y=50, z=60))
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
+ ModuleModel.TEMPERATURE_MODULE_V2
+ )
+
+ decoy.when(
+ mock_labware_view.get_module_overlap_offsets(
+ "adapter-id", ModuleModel.TEMPERATURE_MODULE_V2
+ )
+ ).then_return(OverlapOffset(x=1.1, y=2.2, z=3.3))
+
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(slot_pos)
+
+ expected_highest_z = (
+ slot_pos.z + 60 + 30 - 3.3 + well_plate_def.dimensions.zDimension - 6 + 3
+ )
+ assert (
+ subject.get_highest_z_in_slot(DeckSlotLocation(slotName=DeckSlotName.SLOT_3))
+ == expected_highest_z
+ )
+
+
@pytest.mark.parametrize(
["location", "min_z_height", "expected_min_z"],
[
@@ -541,8 +1099,9 @@ def test_get_all_labware_highest_z_with_modules(
def test_get_min_travel_z(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
location: Optional[CurrentWell],
min_z_height: Optional[float],
expected_min_z: float,
@@ -557,17 +1116,20 @@ def test_get_min_travel_z(
offsetId="offset-id",
)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- LabwareOffsetVector(x=0, y=0, z=3)
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
- Point(0, 0, 3)
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ LabwareOffsetVector(x=0, y=0, z=3)
)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_3.id)
+ ).then_return(Point(0, 0, 3))
- decoy.when(module_view.get_all()).then_return([])
- decoy.when(labware_view.get_all()).then_return([])
+ decoy.when(mock_module_view.get_all()).then_return([])
+ decoy.when(mock_labware_view.get_all()).then_return([])
+ decoy.when(mock_addressable_area_view.get_all()).then_return([])
min_travel_z = subject.get_min_travel_z(
"pipette-id", "labware-id", location, min_z_height
@@ -579,7 +1141,8 @@ def test_get_min_travel_z(
def test_get_labware_position(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should return the slot position plus calibrated offset."""
@@ -593,14 +1156,16 @@ def test_get_labware_position(
calibration_offset = LabwareOffsetVector(x=1, y=-2, z=3)
slot_pos = Point(4, 5, 6)
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
position = subject.get_labware_position(labware_id="labware-id")
@@ -614,7 +1179,8 @@ def test_get_labware_position(
def test_get_well_position(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware."""
@@ -629,15 +1195,17 @@ def test_get_well_position(
slot_pos = Point(4, 5, 6)
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
@@ -653,12 +1221,12 @@ def test_get_well_position(
def test_get_well_height(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should be able to get the well height."""
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
assert subject.get_well_height("labware-id", "B2") == 10.67
@@ -667,9 +1235,10 @@ def test_get_well_height(
def test_get_module_labware_well_position(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware on module."""
@@ -684,37 +1253,42 @@ def test_get_module_labware_well_position(
slot_pos = Point(4, 5, 6)
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_4)
)
- decoy.when(labware_view.get_deck_definition()).then_return(ot2_standard_deck_def)
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
+ )
decoy.when(
- module_view.get_nominal_module_offset(
- module_id="module-id", deck_type=DeckType.OT2_STANDARD
+ mock_module_view.get_nominal_module_offset(
+ module_id="module-id",
+ addressable_areas=mock_addressable_area_view,
)
).then_return(LabwareOffsetVector(x=4, y=5, z=6))
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=0, y=0, z=0),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
)
)
- decoy.when(module_view.get_connected_model("module-id")).then_return(
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
ModuleModel.MAGNETIC_MODULE_V2
)
decoy.when(
- labware_view.get_module_overlap_offsets(
+ mock_labware_view.get_module_overlap_offsets(
"labware-id", ModuleModel.MAGNETIC_MODULE_V2
)
).then_return(OverlapOffset(x=0, y=0, z=0))
@@ -730,7 +1304,8 @@ def test_get_module_labware_well_position(
def test_get_well_position_with_top_offset(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware."""
@@ -745,15 +1320,17 @@ def test_get_well_position_with_top_offset(
slot_pos = Point(4, 5, 6)
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
@@ -776,7 +1353,8 @@ def test_get_well_position_with_top_offset(
def test_get_well_position_with_bottom_offset(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well bottom in a labware."""
@@ -791,15 +1369,17 @@ def test_get_well_position_with_bottom_offset(
slot_pos = Point(4, 5, 6)
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
@@ -822,7 +1402,8 @@ def test_get_well_position_with_bottom_offset(
def test_get_well_position_with_center_offset(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well center in a labware."""
@@ -837,15 +1418,17 @@ def test_get_well_position_with_center_offset(
slot_pos = Point(4, 5, 6)
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
@@ -868,7 +1451,8 @@ def test_get_well_position_with_center_offset(
def test_get_relative_well_location(
decoy: Decoy,
well_plate_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
subject: GeometryView,
) -> None:
"""It should get the relative location of a well given an absolute position."""
@@ -883,15 +1467,17 @@ def test_get_relative_well_location(
slot_pos = Point(4, 5, 6)
well_def = well_plate_def.wells["B2"]
- decoy.when(labware_view.get("labware-id")).then_return(labware_data)
- decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def)
- decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return(
- calibration_offset
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ well_plate_def
)
- decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
- slot_pos
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
)
- decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return(
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_4.id)
+ ).then_return(slot_pos)
+ decoy.when(mock_labware_view.get_well_definition("labware-id", "B2")).then_return(
well_def
)
@@ -917,12 +1503,12 @@ def test_get_relative_well_location(
def test_get_nominal_effective_tip_length(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
mock_pipette_view: PipetteView,
subject: GeometryView,
) -> None:
"""It should get the effective tip length from a labware ID and pipette config."""
- decoy.when(labware_view.get_definition_uri("tip-rack-id")).then_return(
+ decoy.when(mock_labware_view.get_definition_uri("tip-rack-id")).then_return(
LabwareUri("opentrons/opentrons_96_tiprack_300ul/1")
)
@@ -934,7 +1520,7 @@ def test_get_nominal_effective_tip_length(
).then_return(10)
decoy.when(
- labware_view.get_tip_length(labware_id="tip-rack-id", overlap=10)
+ mock_labware_view.get_tip_length(labware_id="tip-rack-id", overlap=10)
).then_return(100)
result = subject.get_nominal_effective_tip_length(
@@ -948,18 +1534,18 @@ def test_get_nominal_effective_tip_length(
def test_get_nominal_tip_geometry(
decoy: Decoy,
tip_rack_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
mock_pipette_view: PipetteView,
subject: GeometryView,
) -> None:
"""It should get a "well's" tip geometry."""
well_def = tip_rack_def.wells["B2"]
- decoy.when(labware_view.get_definition_uri("tip-rack-id")).then_return(
+ decoy.when(mock_labware_view.get_definition_uri("tip-rack-id")).then_return(
LabwareUri("opentrons/opentrons_96_tiprack_300ul/1")
)
- decoy.when(labware_view.get_well_definition("tip-rack-id", "B2")).then_return(
+ decoy.when(mock_labware_view.get_well_definition("tip-rack-id", "B2")).then_return(
well_def
)
@@ -971,7 +1557,7 @@ def test_get_nominal_tip_geometry(
).then_return(10)
decoy.when(
- labware_view.get_tip_length(labware_id="tip-rack-id", overlap=10)
+ mock_labware_view.get_tip_length(labware_id="tip-rack-id", overlap=10)
).then_return(100)
result = subject.get_nominal_tip_geometry(
@@ -988,14 +1574,14 @@ def test_get_nominal_tip_geometry(
def test_get_nominal_tip_geometry_raises(
decoy: Decoy,
tip_rack_def: LabwareDefinition,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should raise LabwareIsNotTipRackError if well is not circular."""
well_def = tip_rack_def.wells["B2"]
well_def.shape = "rectangular"
- decoy.when(labware_view.get_well_definition("tip-rack-id", "B2")).then_return(
+ decoy.when(mock_labware_view.get_well_definition("tip-rack-id", "B2")).then_return(
well_def
)
@@ -1007,18 +1593,20 @@ def test_get_nominal_tip_geometry_raises(
def test_get_tip_drop_location(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
mock_pipette_view: PipetteView,
subject: GeometryView,
tip_rack_def: LabwareDefinition,
) -> None:
"""It should get relative drop tip location for a pipette/labware combo."""
- decoy.when(labware_view.get_definition("tip-rack-id")).then_return(tip_rack_def)
+ decoy.when(mock_labware_view.get_definition("tip-rack-id")).then_return(
+ tip_rack_def
+ )
decoy.when(mock_pipette_view.get_return_tip_scale("pipette-id")).then_return(0.5)
decoy.when(
- labware_view.get_tip_drop_z_offset(
+ mock_labware_view.get_tip_drop_z_offset(
labware_id="tip-rack-id", length_scale=0.5, additional_offset=3
)
).then_return(1337)
@@ -1037,12 +1625,14 @@ def test_get_tip_drop_location(
def test_get_tip_drop_location_with_non_tiprack(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
subject: GeometryView,
reservoir_def: LabwareDefinition,
) -> None:
"""It should get relative drop tip location for a labware that is not a tiprack."""
- decoy.when(labware_view.get_definition("labware-id")).then_return(reservoir_def)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(
+ reservoir_def
+ )
location = subject.get_checked_tip_drop_location(
pipette_id="pipette-id",
@@ -1059,15 +1649,24 @@ def test_get_tip_drop_location_with_non_tiprack(
)
-def test_get_tip_drop_explicit_location(subject: GeometryView) -> None:
+def test_get_tip_drop_explicit_location(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ subject: GeometryView,
+ tip_rack_def: LabwareDefinition,
+) -> None:
"""It should pass the location through if origin is not WellOrigin.DROP_TIP."""
+ decoy.when(mock_labware_view.get_definition("tip-rack-id")).then_return(
+ tip_rack_def
+ )
+
input_location = DropTipWellLocation(
origin=DropTipWellOrigin.TOP,
offset=WellOffset(x=1, y=2, z=3),
)
result = subject.get_checked_tip_drop_location(
- pipette_id="pipette-id", labware_id="labware-id", well_location=input_location
+ pipette_id="pipette-id", labware_id="tip-rack-id", well_location=input_location
)
assert result == WellLocation(
@@ -1078,12 +1677,12 @@ def test_get_tip_drop_explicit_location(subject: GeometryView) -> None:
def test_get_ancestor_slot_name(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
subject: GeometryView,
) -> None:
"""It should get name of ancestor slot of labware."""
- decoy.when(labware_view.get("labware-1")).then_return(
+ decoy.when(mock_labware_view.get("labware-1")).then_return(
LoadedLabware(
id="labware-1",
loadName="load-name",
@@ -1093,7 +1692,7 @@ def test_get_ancestor_slot_name(
)
assert subject.get_ancestor_slot_name("labware-1") == DeckSlotName.SLOT_4
- decoy.when(labware_view.get("labware-2")).then_return(
+ decoy.when(mock_labware_view.get("labware-2")).then_return(
LoadedLabware(
id="labware-2",
loadName="load-name",
@@ -1101,7 +1700,7 @@ def test_get_ancestor_slot_name(
location=ModuleLocation(moduleId="4321"),
)
)
- decoy.when(module_view.get_location("4321")).then_return(
+ decoy.when(mock_module_view.get_location("4321")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
)
assert subject.get_ancestor_slot_name("labware-2") == DeckSlotName.SLOT_1
@@ -1109,8 +1708,8 @@ def test_get_ancestor_slot_name(
def test_ensure_location_not_occupied_raises(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
subject: GeometryView,
) -> None:
"""It should raise error when labware is present in given location."""
@@ -1119,20 +1718,20 @@ def test_ensure_location_not_occupied_raises(
assert subject.ensure_location_not_occupied(location=slot_location) == slot_location
# Raise if labware in location
- decoy.when(labware_view.raise_if_labware_in_location(slot_location)).then_raise(
- errors.LocationIsOccupiedError("Woops!")
- )
+ decoy.when(
+ mock_labware_view.raise_if_labware_in_location(slot_location)
+ ).then_raise(errors.LocationIsOccupiedError("Woops!"))
with pytest.raises(errors.LocationIsOccupiedError):
subject.ensure_location_not_occupied(location=slot_location)
# Raise if module in location
- module_location = ModuleLocation(moduleId="module-id")
- decoy.when(labware_view.raise_if_labware_in_location(module_location)).then_return(
- None
- )
- decoy.when(module_view.raise_if_module_in_location(module_location)).then_raise(
- errors.LocationIsOccupiedError("Woops again!")
- )
+ module_location = DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
+ decoy.when(
+ mock_labware_view.raise_if_labware_in_location(module_location)
+ ).then_return(None)
+ decoy.when(
+ mock_module_view.raise_if_module_in_location(module_location)
+ ).then_raise(errors.LocationIsOccupiedError("Woops again!"))
with pytest.raises(errors.LocationIsOccupiedError):
subject.ensure_location_not_occupied(location=module_location)
@@ -1145,19 +1744,20 @@ def test_ensure_location_not_occupied_raises(
def test_get_labware_grip_point(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should get the grip point of the labware at the specified location."""
decoy.when(
- labware_view.get_grip_height_from_labware_bottom("labware-id")
+ mock_labware_view.get_grip_height_from_labware_bottom("labware-id")
).then_return(100)
- decoy.when(labware_view.get_slot_center_position(DeckSlotName.SLOT_1)).then_return(
- Point(x=101, y=102, z=103)
- )
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_center(DeckSlotName.SLOT_1.id)
+ ).then_return(Point(x=101, y=102, z=103))
labware_center = subject.get_labware_grip_point(
labware_id="labware-id", location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
)
@@ -1167,13 +1767,14 @@ def test_get_labware_grip_point(
def test_get_labware_grip_point_on_labware(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should get the grip point of a labware on another labware."""
- decoy.when(labware_view.get(labware_id="labware-id")).then_return(
+ decoy.when(mock_labware_view.get(labware_id="labware-id")).then_return(
LoadedLabware(
id="labware-id",
loadName="above-name",
@@ -1181,7 +1782,7 @@ def test_get_labware_grip_point_on_labware(
location=OnLabwareLocation(labwareId="below-id"),
)
)
- decoy.when(labware_view.get(labware_id="below-id")).then_return(
+ decoy.when(mock_labware_view.get(labware_id="below-id")).then_return(
LoadedLabware(
id="below-id",
loadName="below-name",
@@ -1190,19 +1791,19 @@ def test_get_labware_grip_point_on_labware(
)
)
- decoy.when(labware_view.get_dimensions("below-id")).then_return(
+ decoy.when(mock_labware_view.get_dimensions("below-id")).then_return(
Dimensions(x=1000, y=1001, z=11)
)
decoy.when(
- labware_view.get_grip_height_from_labware_bottom("labware-id")
+ mock_labware_view.get_grip_height_from_labware_bottom("labware-id")
).then_return(100)
decoy.when(
- labware_view.get_labware_overlap_offsets("labware-id", "below-name")
+ mock_labware_view.get_labware_overlap_offsets("labware-id", "below-name")
).then_return(OverlapOffset(x=0, y=1, z=6))
- decoy.when(labware_view.get_slot_center_position(DeckSlotName.SLOT_4)).then_return(
- Point(x=5, y=9, z=10)
- )
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_center(DeckSlotName.SLOT_4.id)
+ ).then_return(Point(x=5, y=9, z=10))
grip_point = subject.get_labware_grip_point(
labware_id="labware-id", location=OnLabwareLocation(labwareId="below-id")
@@ -1213,41 +1814,45 @@ def test_get_labware_grip_point_on_labware(
def test_get_labware_grip_point_for_labware_on_module(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- ot2_standard_deck_def: DeckDefinitionV4,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: GeometryView,
) -> None:
"""It should return the grip point for labware directly on a module."""
decoy.when(
- labware_view.get_grip_height_from_labware_bottom("labware-id")
+ mock_labware_view.get_grip_height_from_labware_bottom("labware-id")
).then_return(500)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_4)
)
- decoy.when(labware_view.get_deck_definition()).then_return(ot2_standard_deck_def)
+ decoy.when(mock_labware_view.get_deck_definition()).then_return(
+ ot2_standard_deck_def
+ )
decoy.when(
- module_view.get_nominal_module_offset(
- module_id="module-id", deck_type=DeckType.OT2_STANDARD
+ mock_module_view.get_nominal_module_offset(
+ module_id="module-id",
+ addressable_areas=mock_addressable_area_view,
)
).then_return(LabwareOffsetVector(x=1, y=2, z=3))
- decoy.when(module_view.get_connected_model("module-id")).then_return(
+ decoy.when(mock_module_view.get_connected_model("module-id")).then_return(
ModuleModel.MAGNETIC_MODULE_V2
)
decoy.when(
- labware_view.get_module_overlap_offsets(
+ mock_labware_view.get_module_overlap_offsets(
"labware-id", ModuleModel.MAGNETIC_MODULE_V2
)
).then_return(OverlapOffset(x=10, y=20, z=30))
- decoy.when(module_view.get_module_calibration_offset("module-id")).then_return(
+ decoy.when(mock_module_view.get_module_calibration_offset("module-id")).then_return(
ModuleOffsetData(
moduleOffsetVector=ModuleOffsetVector(x=100, y=200, z=300),
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4),
)
)
- decoy.when(labware_view.get_slot_center_position(DeckSlotName.SLOT_4)).then_return(
- Point(100, 200, 300)
- )
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_center(DeckSlotName.SLOT_4.id)
+ ).then_return(Point(100, 200, 300))
result_grip_point = subject.get_labware_grip_point(
labware_id="labware-id", location=ModuleLocation(moduleId="module-id")
)
@@ -1259,21 +1864,25 @@ def test_get_labware_grip_point_for_labware_on_module(
argnames=["location", "should_dodge", "expected_waypoints"],
argvalues=[
(None, True, []),
+ (None, False, []),
(CurrentWell("pipette-id", "from-labware-id", "well-name"), False, []),
(CurrentWell("pipette-id", "from-labware-id", "well-name"), True, [(11, 22)]),
+ (CurrentAddressableArea("pipette-id", "area-name"), False, []),
+ (CurrentAddressableArea("pipette-id", "area-name"), True, [(11, 22)]),
],
)
def test_get_extra_waypoints(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
- location: Optional[CurrentWell],
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_addressable_area_view: AddressableAreaView,
+ location: Optional[CurrentPipetteLocation],
should_dodge: bool,
expected_waypoints: List[Tuple[float, float]],
subject: GeometryView,
) -> None:
"""It should return extra waypoints if thermocycler should be dodged."""
- decoy.when(labware_view.get("from-labware-id")).then_return(
+ decoy.when(mock_labware_view.get("from-labware-id")).then_return(
LoadedLabware(
id="labware1",
loadName="load-name1",
@@ -1281,80 +1890,70 @@ def test_get_extra_waypoints(
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
)
)
- decoy.when(labware_view.get("to-labware-id")).then_return(
- LoadedLabware(
- id="labware2",
- loadName="load-name2",
- definitionUri="4567",
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2),
- )
- )
decoy.when(
- module_view.should_dodge_thermocycler(
+ mock_addressable_area_view.get_addressable_area_base_slot("area-name")
+ ).then_return(DeckSlotName.SLOT_1)
+
+ decoy.when(
+ mock_module_view.should_dodge_thermocycler(
from_slot=DeckSlotName.SLOT_1, to_slot=DeckSlotName.SLOT_2
)
).then_return(should_dodge)
decoy.when(
# Assume the subject's Config is for an OT-3, so use an OT-3 slot name.
- labware_view.get_slot_center_position(slot=DeckSlotName.SLOT_C2)
+ mock_addressable_area_view.get_addressable_area_center(
+ addressable_area_name=DeckSlotName.SLOT_C2.id
+ )
).then_return(Point(x=11, y=22, z=33))
- extra_waypoints = subject.get_extra_waypoints("to-labware-id", location)
+ extra_waypoints = subject.get_extra_waypoints(location, DeckSlotName.SLOT_2)
assert extra_waypoints == expected_waypoints
def test_get_slot_item(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
subject: GeometryView,
) -> None:
"""It should get items in certain slots."""
- allowed_labware_ids = {"foo", "bar"}
- allowed_module_ids = {"fizz", "buzz"}
labware = LoadedLabware.construct(id="cool-labware") # type: ignore[call-arg]
module = LoadedModule.construct(id="cool-module") # type: ignore[call-arg]
- decoy.when(
- labware_view.get_by_slot(DeckSlotName.SLOT_1, allowed_labware_ids)
- ).then_return(None)
- decoy.when(
- labware_view.get_by_slot(DeckSlotName.SLOT_2, allowed_labware_ids)
- ).then_return(labware)
- decoy.when(
- labware_view.get_by_slot(DeckSlotName.SLOT_3, allowed_labware_ids)
- ).then_return(None)
+ decoy.when(mock_labware_view.get_by_slot(DeckSlotName.SLOT_1)).then_return(None)
+ decoy.when(mock_labware_view.get_by_slot(DeckSlotName.SLOT_2)).then_return(labware)
+ decoy.when(mock_labware_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(None)
- decoy.when(
- module_view.get_by_slot(DeckSlotName.SLOT_1, allowed_module_ids)
- ).then_return(None)
- decoy.when(
- module_view.get_by_slot(DeckSlotName.SLOT_2, allowed_module_ids)
- ).then_return(None)
- decoy.when(
- module_view.get_by_slot(DeckSlotName.SLOT_3, allowed_module_ids)
- ).then_return(module)
+ decoy.when(mock_module_view.get_by_slot(DeckSlotName.SLOT_1)).then_return(None)
+ decoy.when(mock_module_view.get_by_slot(DeckSlotName.SLOT_2)).then_return(None)
+ decoy.when(mock_module_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(module)
assert (
subject.get_slot_item(
- DeckSlotName.SLOT_1, allowed_labware_ids, allowed_module_ids
+ DeckSlotName.SLOT_1,
)
is None
)
- assert (
- subject.get_slot_item(
- DeckSlotName.SLOT_2, allowed_labware_ids, allowed_module_ids
- )
- == labware
- )
- assert (
- subject.get_slot_item(
- DeckSlotName.SLOT_3, allowed_labware_ids, allowed_module_ids
- )
- == module
- )
+ assert subject.get_slot_item(DeckSlotName.SLOT_2) == labware
+ assert subject.get_slot_item(DeckSlotName.SLOT_3) == module
+
+
+def test_get_slot_item_that_is_overflowed_module(
+ decoy: Decoy,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ subject: GeometryView,
+) -> None:
+ """It should return the module that occupies the slot, even if not loaded on it."""
+ module = LoadedModule.construct(id="cool-module") # type: ignore[call-arg]
+ decoy.when(mock_labware_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(None)
+ decoy.when(mock_module_view.get_by_slot(DeckSlotName.SLOT_3)).then_return(None)
+ decoy.when(
+ mock_module_view.get_overflowed_module_in_slot(DeckSlotName.SLOT_3)
+ ).then_return(module)
+ assert subject.get_slot_item(DeckSlotName.SLOT_3) == module
@pytest.mark.parametrize(
@@ -1437,7 +2036,7 @@ class DropTipLocationFinderSpec(NamedTuple):
)
def test_get_next_drop_tip_location(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
mock_pipette_view: PipetteView,
subject: GeometryView,
labware_slot: DeckSlotName,
@@ -1448,10 +2047,16 @@ def test_get_next_drop_tip_location(
supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should provide the next location to drop tips into within a labware."""
- decoy.when(labware_view.is_fixed_trash(labware_id="abc")).then_return(True)
+ decoy.when(mock_labware_view.is_fixed_trash(labware_id="abc")).then_return(True)
decoy.when(
- labware_view.get_well_size(labware_id="abc", well_name="A1")
+ mock_labware_view.get_well_size(labware_id="abc", well_name="A1")
).then_return((well_size, 0, 0))
+ if pipette_channels == 96:
+ pip_type = PipetteNameType.P1000_96
+ elif pipette_channels == 8:
+ pip_type = PipetteNameType.P300_MULTI
+ else:
+ pip_type = PipetteNameType.P300_SINGLE
decoy.when(mock_pipette_view.get_config("pip-123")).then_return(
StaticPipetteConfig(
min_volume=1,
@@ -1464,10 +2069,19 @@ def test_get_next_drop_tip_location(
nominal_tip_overlap={},
home_position=0,
nozzle_offset_z=0,
+ bounding_nozzle_offsets=BoundingNozzlesOffsets(
+ back_left_offset=Point(x=10, y=20, z=30),
+ front_right_offset=Point(x=40, y=50, z=60),
+ ),
+ default_nozzle_map=get_default_nozzle_map(pip_type),
+ pipette_bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(x=10, y=20, z=30),
+ front_right_corner=Point(x=40, y=50, z=60),
+ ),
)
)
decoy.when(mock_pipette_view.get_mount("pip-123")).then_return(pipette_mount)
- decoy.when(labware_view.get("abc")).then_return(
+ decoy.when(mock_labware_view.get("abc")).then_return(
LoadedLabware(
id="abc",
loadName="load-name2",
@@ -1489,12 +2103,12 @@ def test_get_next_drop_tip_location(
def test_get_next_drop_tip_location_in_non_trash_labware(
decoy: Decoy,
- labware_view: LabwareView,
+ mock_labware_view: LabwareView,
mock_pipette_view: PipetteView,
subject: GeometryView,
) -> None:
"""It should provide the default drop tip location when dropping into a non-fixed-trash labware."""
- decoy.when(labware_view.is_fixed_trash(labware_id="abc")).then_return(False)
+ decoy.when(mock_labware_view.is_fixed_trash(labware_id="abc")).then_return(False)
assert subject.get_next_tip_drop_location(
labware_id="abc", well_name="A1", pipette_id="pip-123"
) == DropTipWellLocation(
@@ -1505,18 +2119,18 @@ def test_get_next_drop_tip_location_in_non_trash_labware(
def test_get_final_labware_movement_offset_vectors(
decoy: Decoy,
- module_view: ModuleView,
- labware_view: LabwareView,
+ mock_module_view: ModuleView,
+ mock_labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should provide the final labware movement offset data based on locations."""
- decoy.when(labware_view.get_deck_default_gripper_offsets()).then_return(
+ decoy.when(mock_labware_view.get_deck_default_gripper_offsets()).then_return(
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=1, y=2, z=3),
dropOffset=LabwareOffsetVector(x=3, y=2, z=1),
)
)
- decoy.when(module_view.get_default_gripper_offsets("module-id")).then_return(
+ decoy.when(mock_module_view.get_default_gripper_offsets("module-id")).then_return(
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=11, y=22, z=33),
dropOffset=LabwareOffsetVector(x=33, y=22, z=11),
@@ -1557,19 +2171,19 @@ def test_ensure_valid_gripper_location(subject: GeometryView) -> None:
def test_get_total_nominal_gripper_offset(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
subject: GeometryView,
) -> None:
"""It should calculate the correct gripper offsets given the location and move type.."""
- decoy.when(labware_view.get_deck_default_gripper_offsets()).then_return(
+ decoy.when(mock_labware_view.get_deck_default_gripper_offsets()).then_return(
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=1, y=2, z=3),
dropOffset=LabwareOffsetVector(x=3, y=2, z=1),
)
)
- decoy.when(module_view.get_default_gripper_offsets("module-id")).then_return(
+ decoy.when(mock_module_view.get_default_gripper_offsets("module-id")).then_return(
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=11, y=22, z=33),
dropOffset=LabwareOffsetVector(x=33, y=22, z=11),
@@ -1593,23 +2207,23 @@ def test_get_total_nominal_gripper_offset(
def test_get_stacked_labware_total_nominal_offset_slot_specific(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
subject: GeometryView,
) -> None:
"""Get nominal offset for stacked labware."""
# Case: labware on adapter on module, adapter has slot-specific offsets
- decoy.when(module_view.get_default_gripper_offsets("module-id")).then_return(
+ decoy.when(mock_module_view.get_default_gripper_offsets("module-id")).then_return(
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=11, y=22, z=33),
dropOffset=LabwareOffsetVector(x=33, y=22, z=11),
)
)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_C1)
)
decoy.when(
- labware_view.get_labware_gripper_offsets(
+ mock_labware_view.get_labware_gripper_offsets(
labware_id="adapter-id", slot_name=DeckSlotName.SLOT_C1
)
).then_return(
@@ -1618,7 +2232,7 @@ def test_get_stacked_labware_total_nominal_offset_slot_specific(
dropOffset=LabwareOffsetVector(x=300, y=200, z=100),
)
)
- decoy.when(labware_view.get_parent_location("adapter-id")).then_return(
+ decoy.when(mock_labware_view.get_parent_location("adapter-id")).then_return(
ModuleLocation(moduleId="module-id")
)
result1 = subject.get_total_nominal_gripper_offset_for_move_type(
@@ -1636,28 +2250,28 @@ def test_get_stacked_labware_total_nominal_offset_slot_specific(
def test_get_stacked_labware_total_nominal_offset_default(
decoy: Decoy,
- labware_view: LabwareView,
- module_view: ModuleView,
+ mock_labware_view: LabwareView,
+ mock_module_view: ModuleView,
subject: GeometryView,
) -> None:
"""Get nominal offset for stacked labware."""
# Case: labware on adapter on module, adapter has only default offsets
- decoy.when(module_view.get_default_gripper_offsets("module-id")).then_return(
+ decoy.when(mock_module_view.get_default_gripper_offsets("module-id")).then_return(
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=11, y=22, z=33),
dropOffset=LabwareOffsetVector(x=33, y=22, z=11),
)
)
- decoy.when(module_view.get_location("module-id")).then_return(
+ decoy.when(mock_module_view.get_location("module-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_4)
)
decoy.when(
- labware_view.get_labware_gripper_offsets(
+ mock_labware_view.get_labware_gripper_offsets(
labware_id="adapter-id", slot_name=DeckSlotName.SLOT_C1
)
).then_return(None)
decoy.when(
- labware_view.get_labware_gripper_offsets(
+ mock_labware_view.get_labware_gripper_offsets(
labware_id="adapter-id", slot_name=None
)
).then_return(
@@ -1666,7 +2280,7 @@ def test_get_stacked_labware_total_nominal_offset_default(
dropOffset=LabwareOffsetVector(x=300, y=200, z=100),
)
)
- decoy.when(labware_view.get_parent_location("adapter-id")).then_return(
+ decoy.when(mock_labware_view.get_parent_location("adapter-id")).then_return(
ModuleLocation(moduleId="module-id")
)
result1 = subject.get_total_nominal_gripper_offset_for_move_type(
@@ -1680,3 +2294,309 @@ def test_get_stacked_labware_total_nominal_offset_default(
move_type=_GripperMoveType.DROP_LABWARE,
)
assert result2 == LabwareOffsetVector(x=333, y=222, z=111)
+
+
+def test_check_gripper_labware_tip_collision(
+ decoy: Decoy,
+ mock_pipette_view: PipetteView,
+ mock_labware_view: LabwareView,
+ mock_addressable_area_view: AddressableAreaView,
+ subject: GeometryView,
+) -> None:
+ """It should raise a labware movement error if attached tips will collide with the labware during a gripper lift."""
+ pipettes = [
+ LoadedPipette(
+ id="pipette-id",
+ mount=MountType.LEFT,
+ pipetteName=PipetteNameType.P1000_96,
+ )
+ ]
+ decoy.when(mock_pipette_view.get_all()).then_return(pipettes)
+ decoy.when(mock_pipette_view.get_attached_tip("pipette-id")).then_return(
+ TipGeometry(
+ length=1000,
+ diameter=1000,
+ volume=1000,
+ )
+ )
+
+ definition = LabwareDefinition.construct( # type: ignore[call-arg]
+ namespace="hello",
+ dimensions=LabwareDimensions.construct(
+ yDimension=1, zDimension=2, xDimension=3
+ ),
+ version=1,
+ parameters=LabwareDefinitionParameters.construct(
+ format="96Standard",
+ loadName="labware-id",
+ isTiprack=True,
+ isMagneticModuleCompatible=False,
+ ),
+ cornerOffsetFromSlot=CornerOffsetFromSlot.construct(x=1, y=2, z=3),
+ ordering=[],
+ )
+
+ labware_data = LoadedLabware(
+ id="labware-id",
+ loadName="b",
+ definitionUri=uri_from_details(
+ namespace="hello", load_name="labware-id", version=1
+ ),
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ offsetId=None,
+ )
+
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(definition)
+ decoy.when(mock_labware_view.get("labware-id")).then_return(labware_data)
+
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_position(DeckSlotName.SLOT_1.id)
+ ).then_return(Point(1, 2, 3))
+
+ calibration_offset = LabwareOffsetVector(x=1, y=-2, z=3)
+ decoy.when(mock_labware_view.get_labware_offset_vector("labware-id")).then_return(
+ calibration_offset
+ )
+ decoy.when(subject.get_labware_origin_position("labware-id")).then_return(
+ Point(1, 2, 3)
+ )
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(definition)
+ decoy.when(subject._get_highest_z_from_labware_data(labware_data)).then_return(1000)
+
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(definition)
+ decoy.when(subject.get_labware_highest_z("labware-id")).then_return(100.0)
+ decoy.when(
+ mock_addressable_area_view.get_addressable_area_center(
+ addressable_area_name=DeckSlotName.SLOT_1.id
+ )
+ ).then_return(Point(x=11, y=22, z=33))
+ decoy.when(
+ mock_labware_view.get_grip_height_from_labware_bottom("labware-id")
+ ).then_return(1.0)
+ decoy.when(mock_labware_view.get_definition("labware-id")).then_return(definition)
+ decoy.when(
+ subject.get_labware_grip_point(
+ labware_id="labware-id",
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ )
+ ).then_return(Point(x=100.0, y=100.0, z=0.0))
+
+ with pytest.raises(errors.LabwareMovementNotAllowedError):
+ subject.check_gripper_labware_tip_collision(
+ gripper_homed_position_z=166.125,
+ labware_id="labware-id",
+ current_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ )
+
+
+# Note: Below here, all tests should be done using actual state objects rather than mocks of dependent views.
+# I (sf) think this is a better way to do things, but let's try and do it as we add more stuff and see if I'm
+# right!
+
+
+@pytest.mark.parametrize("use_mocks", [False])
+def test_get_offset_location_deck_slot(
+ decoy: Decoy,
+ labware_store: LabwareStore,
+ nice_labware_definition: LabwareDefinition,
+ subject: GeometryView,
+) -> None:
+ """Test if you can get the offset location of a labware in a deck slot."""
+ action = SucceedCommandAction(
+ command=LoadLabware(
+ id="load-labware-1",
+ createdAt=datetime.now(),
+ key="load-labware-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadLabwareResult(
+ labwareId="labware-id-1",
+ definition=nice_labware_definition,
+ offsetId=None,
+ ),
+ params=LoadLabwareParams(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_C2),
+ loadName=nice_labware_definition.parameters.loadName,
+ namespace=nice_labware_definition.namespace,
+ version=nice_labware_definition.version,
+ ),
+ ),
+ private_result=None,
+ )
+ labware_store.handle_action(action)
+ offset_location = subject.get_offset_location("labware-id-1")
+ assert offset_location is not None
+ assert offset_location.slotName == DeckSlotName.SLOT_C2
+ assert offset_location.definitionUri is None
+ assert offset_location.moduleModel is None
+
+
+@pytest.mark.parametrize("use_mocks", [False])
+def test_get_offset_location_module(
+ decoy: Decoy,
+ labware_store: LabwareStore,
+ module_store: ModuleStore,
+ nice_labware_definition: LabwareDefinition,
+ tempdeck_v2_def: ModuleDefinition,
+ subject: GeometryView,
+) -> None:
+ """Test if you can get the offset of a labware directly on a module."""
+ load_module = SucceedCommandAction(
+ command=LoadModule(
+ params=LoadModuleParams(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A3),
+ model=ModuleModel.TEMPERATURE_MODULE_V1,
+ ),
+ id="load-module-1",
+ createdAt=datetime.now(),
+ key="load-module-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadModuleResult(
+ moduleId="module-id-1",
+ definition=tempdeck_v2_def,
+ model=tempdeck_v2_def.model,
+ ),
+ ),
+ private_result=None,
+ )
+ load_labware = SucceedCommandAction(
+ command=LoadLabware(
+ id="load-labware-1",
+ createdAt=datetime.now(),
+ key="load-labware-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadLabwareResult(
+ labwareId="labware-id-1",
+ definition=nice_labware_definition,
+ offsetId=None,
+ ),
+ params=LoadLabwareParams(
+ location=ModuleLocation(moduleId="module-id-1"),
+ loadName=nice_labware_definition.parameters.loadName,
+ namespace=nice_labware_definition.namespace,
+ version=nice_labware_definition.version,
+ ),
+ ),
+ private_result=None,
+ )
+ module_store.handle_action(load_module)
+ labware_store.handle_action(load_labware)
+ offset_location = subject.get_offset_location("labware-id-1")
+ assert offset_location is not None
+ assert offset_location.slotName == DeckSlotName.SLOT_A3
+ assert offset_location.definitionUri is None
+ assert offset_location.moduleModel == ModuleModel.TEMPERATURE_MODULE_V1
+
+
+@pytest.mark.parametrize("use_mocks", [False])
+def test_get_offset_location_module_with_adapter(
+ decoy: Decoy,
+ labware_store: LabwareStore,
+ module_store: ModuleStore,
+ nice_labware_definition: LabwareDefinition,
+ nice_adapter_definition: LabwareDefinition,
+ tempdeck_v2_def: ModuleDefinition,
+ labware_view: LabwareView,
+ subject: GeometryView,
+) -> None:
+ """Test if you can get the offset of a labware directly on a module."""
+ load_module = SucceedCommandAction(
+ command=LoadModule(
+ params=LoadModuleParams(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A2),
+ model=ModuleModel.TEMPERATURE_MODULE_V1,
+ ),
+ id="load-module-1",
+ createdAt=datetime.now(),
+ key="load-module-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadModuleResult(
+ moduleId="module-id-1",
+ definition=tempdeck_v2_def,
+ model=tempdeck_v2_def.model,
+ ),
+ ),
+ private_result=None,
+ )
+ load_adapter = SucceedCommandAction(
+ command=LoadLabware(
+ id="load-adapter-1",
+ createdAt=datetime.now(),
+ key="load-adapter-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadLabwareResult(
+ labwareId="adapter-id-1",
+ definition=nice_adapter_definition,
+ offsetId=None,
+ ),
+ params=LoadLabwareParams(
+ location=ModuleLocation(moduleId="module-id-1"),
+ loadName=nice_adapter_definition.parameters.loadName,
+ namespace=nice_adapter_definition.namespace,
+ version=nice_adapter_definition.version,
+ ),
+ ),
+ private_result=None,
+ )
+ load_labware = SucceedCommandAction(
+ command=LoadLabware(
+ id="load-labware-1",
+ createdAt=datetime.now(),
+ key="load-labware-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadLabwareResult(
+ labwareId="labware-id-1",
+ definition=nice_labware_definition,
+ offsetId=None,
+ ),
+ params=LoadLabwareParams(
+ location=OnLabwareLocation(labwareId="adapter-id-1"),
+ loadName=nice_labware_definition.parameters.loadName,
+ namespace=nice_labware_definition.namespace,
+ version=nice_labware_definition.version,
+ ),
+ ),
+ private_result=None,
+ )
+ module_store.handle_action(load_module)
+ labware_store.handle_action(load_adapter)
+ labware_store.handle_action(load_labware)
+ offset_location = subject.get_offset_location("labware-id-1")
+ assert offset_location is not None
+ assert offset_location.slotName == DeckSlotName.SLOT_A2
+ assert offset_location.definitionUri == labware_view.get_uri_from_definition(
+ nice_adapter_definition
+ )
+ assert offset_location.moduleModel == ModuleModel.TEMPERATURE_MODULE_V1
+
+
+@pytest.mark.parametrize("use_mocks", [False])
+def test_get_offset_fails_with_off_deck_labware(
+ decoy: Decoy,
+ labware_store: LabwareStore,
+ nice_labware_definition: LabwareDefinition,
+ subject: GeometryView,
+) -> None:
+ """You cannot get the offset location for a labware loaded OFF_DECK."""
+ action = SucceedCommandAction(
+ command=LoadLabware(
+ id="load-labware-1",
+ createdAt=datetime.now(),
+ key="load-labware-1",
+ status=CommandStatus.SUCCEEDED,
+ result=LoadLabwareResult(
+ labwareId="labware-id-1",
+ definition=nice_labware_definition,
+ offsetId=None,
+ ),
+ params=LoadLabwareParams(
+ location=OFF_DECK_LOCATION,
+ loadName=nice_labware_definition.parameters.loadName,
+ namespace=nice_labware_definition.namespace,
+ version=nice_labware_definition.version,
+ ),
+ ),
+ private_result=None,
+ )
+ labware_store.handle_action(action)
+ offset_location = subject.get_offset_location("labware-id-1")
+ assert offset_location is None
diff --git a/api/tests/opentrons/protocol_engine/state/test_labware_store.py b/api/tests/opentrons/protocol_engine/state/test_labware_store.py
index efe67422da0..960ce423194 100644
--- a/api/tests/opentrons/protocol_engine/state/test_labware_store.py
+++ b/api/tests/opentrons/protocol_engine/state/test_labware_store.py
@@ -4,7 +4,7 @@
from datetime import datetime
from opentrons.calibration_storage.helpers import uri_from_details
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons.protocols.models import LabwareDefinition
from opentrons.types import DeckSlotName
@@ -21,19 +21,20 @@
from opentrons.protocol_engine.actions import (
AddLabwareOffsetAction,
AddLabwareDefinitionAction,
- UpdateCommandAction,
+ SucceedCommandAction,
)
from opentrons.protocol_engine.state.labware import LabwareStore, LabwareState
from .command_fixtures import (
create_load_labware_command,
create_move_labware_command,
+ create_reload_labware_command,
)
@pytest.fixture
def subject(
- ot2_standard_deck_def: DeckDefinitionV4,
+ ot2_standard_deck_def: DeckDefinitionV5,
) -> LabwareStore:
"""Get a LabwareStore test subject."""
return LabwareStore(
@@ -43,7 +44,7 @@ def subject(
def test_initial_state(
- ot2_standard_deck_def: DeckDefinitionV4,
+ ot2_standard_deck_def: DeckDefinitionV5,
subject: LabwareStore,
) -> None:
"""It should create the labware store with preloaded fixed labware."""
@@ -125,13 +126,71 @@ def test_handles_load_labware(
created_at=datetime(year=2021, month=1, day=2),
)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
assert subject.state.labware_by_id["test-labware-id"] == expected_labware_data
assert subject.state.definitions_by_uri[expected_definition_uri] == well_plate_def
+def test_handles_reload_labware(
+ subject: LabwareStore,
+ well_plate_def: LabwareDefinition,
+) -> None:
+ """It should override labware data in the state."""
+ load_labware = create_load_labware_command(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ labware_id="test-labware-id",
+ definition=well_plate_def,
+ display_name="display-name",
+ offset_id=None,
+ )
+
+ subject.handle_action(
+ SucceedCommandAction(private_result=None, command=load_labware)
+ )
+ expected_definition_uri = uri_from_details(
+ load_name=well_plate_def.parameters.loadName,
+ namespace=well_plate_def.namespace,
+ version=well_plate_def.version,
+ )
+ assert (
+ subject.state.labware_by_id["test-labware-id"].definitionUri
+ == expected_definition_uri
+ )
+
+ offset_request = LabwareOffsetCreate(
+ definitionUri="offset-definition-uri",
+ location=LabwareOffsetLocation(slotName=DeckSlotName.SLOT_1),
+ vector=LabwareOffsetVector(x=1, y=2, z=3),
+ )
+ subject.handle_action(
+ AddLabwareOffsetAction(
+ request=offset_request,
+ labware_offset_id="offset-id",
+ created_at=datetime(year=2021, month=1, day=2),
+ )
+ )
+ reload_labware = create_reload_labware_command(
+ labware_id="test-labware-id",
+ offset_id="offset-id",
+ )
+ subject.handle_action(
+ SucceedCommandAction(private_result=None, command=reload_labware)
+ )
+
+ expected_labware_data = LoadedLabware(
+ id="test-labware-id",
+ loadName=well_plate_def.parameters.loadName,
+ definitionUri=expected_definition_uri,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A1),
+ offsetId="offset-id",
+ displayName="display-name",
+ )
+ assert subject.state.labware_by_id["test-labware-id"] == expected_labware_data
+ assert subject.state.definitions_by_uri[expected_definition_uri] == well_plate_def
+
+
def test_handles_add_labware_definition(
subject: LabwareStore,
well_plate_def: LabwareDefinition,
@@ -173,7 +232,7 @@ def test_handles_move_labware(
)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_labware_command)
+ SucceedCommandAction(private_result=None, command=load_labware_command)
)
move_command = create_move_labware_command(
@@ -183,7 +242,7 @@ def test_handles_move_labware(
strategy=LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE,
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_command)
+ SucceedCommandAction(private_result=None, command=move_command)
)
assert subject.state.labware_by_id["my-labware-id"].location == DeckSlotLocation(
@@ -217,7 +276,7 @@ def test_handles_move_labware_off_deck(
)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_labware_command)
+ SucceedCommandAction(private_result=None, command=load_labware_command)
)
move_labware_off_deck_cmd = create_move_labware_command(
@@ -226,7 +285,7 @@ def test_handles_move_labware_off_deck(
strategy=LabwareMovementStrategy.MANUAL_MOVE_WITH_PAUSE,
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_labware_off_deck_cmd)
+ SucceedCommandAction(private_result=None, command=move_labware_off_deck_cmd)
)
assert subject.state.labware_by_id["my-labware-id"].location == OFF_DECK_LOCATION
assert subject.state.labware_by_id["my-labware-id"].offsetId is None
diff --git a/api/tests/opentrons/protocol_engine/state/test_labware_view.py b/api/tests/opentrons/protocol_engine/state/test_labware_view.py
index 494b92ed548..0f8086de606 100644
--- a/api/tests/opentrons/protocol_engine/state/test_labware_view.py
+++ b/api/tests/opentrons/protocol_engine/state/test_labware_view.py
@@ -5,8 +5,9 @@
from contextlib import nullcontext as does_not_raise
from opentrons_shared_data.deck import load as load_deck
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons_shared_data.pipette.dev_types import LabwareUri
+from opentrons_shared_data.labware import load_definition
from opentrons_shared_data.labware.labware_definition import (
Parameters,
LabwareRole,
@@ -20,7 +21,7 @@
STANDARD_OT3_DECK,
)
from opentrons.protocols.models import LabwareDefinition
-from opentrons.types import DeckSlotName, Point, MountType
+from opentrons.types import DeckSlotName, MountType
from opentrons.protocol_engine import errors
from opentrons.protocol_engine.types import (
@@ -34,6 +35,7 @@
ModuleLocation,
OnLabwareLocation,
LabwareLocation,
+ AddressableAreaLocation,
OFF_DECK_LOCATION,
OverlapOffset,
LabwareMovementOffsetData,
@@ -108,14 +110,14 @@ def get_labware_view(
labware_by_id: Optional[Dict[str, LoadedLabware]] = None,
labware_offsets_by_id: Optional[Dict[str, LabwareOffset]] = None,
definitions_by_uri: Optional[Dict[str, LabwareDefinition]] = None,
- deck_definition: Optional[DeckDefinitionV4] = None,
+ deck_definition: Optional[DeckDefinitionV5] = None,
) -> LabwareView:
"""Get a labware view test subject."""
state = LabwareState(
labware_by_id=labware_by_id or {},
labware_offsets_by_id=labware_offsets_by_id or {},
definitions_by_uri=definitions_by_uri or {},
- deck_definition=deck_definition or cast(DeckDefinitionV4, {"fake": True}),
+ deck_definition=deck_definition or cast(DeckDefinitionV5, {"fake": True}),
)
return LabwareView(state=state)
@@ -694,7 +696,7 @@ def test_get_labware_overlap_offsets() -> None:
class ModuleOverlapSpec(NamedTuple):
"""Spec data to test LabwareView.get_module_overlap_offsets."""
- spec_deck_definition: DeckDefinitionV4
+ spec_deck_definition: DeckDefinitionV5
module_model: ModuleModel
stacking_offset_with_module: Dict[str, SharedDataOverlapOffset]
expected_offset: OverlapOffset
@@ -703,7 +705,7 @@ class ModuleOverlapSpec(NamedTuple):
module_overlap_specs: List[ModuleOverlapSpec] = [
ModuleOverlapSpec(
# Labware on temp module on OT2, with stacking overlap for temp module
- spec_deck_definition=load_deck(STANDARD_OT2_DECK, 4),
+ spec_deck_definition=load_deck(STANDARD_OT2_DECK, 5),
module_model=ModuleModel.TEMPERATURE_MODULE_V2,
stacking_offset_with_module={
str(ModuleModel.TEMPERATURE_MODULE_V2.value): SharedDataOverlapOffset(
@@ -714,7 +716,7 @@ class ModuleOverlapSpec(NamedTuple):
),
ModuleOverlapSpec(
# Labware on TC Gen1 on OT2, with stacking overlap for TC Gen1
- spec_deck_definition=load_deck(STANDARD_OT2_DECK, 4),
+ spec_deck_definition=load_deck(STANDARD_OT2_DECK, 5),
module_model=ModuleModel.THERMOCYCLER_MODULE_V1,
stacking_offset_with_module={
str(ModuleModel.THERMOCYCLER_MODULE_V1.value): SharedDataOverlapOffset(
@@ -725,21 +727,21 @@ class ModuleOverlapSpec(NamedTuple):
),
ModuleOverlapSpec(
# Labware on TC Gen2 on OT2, with no stacking overlap
- spec_deck_definition=load_deck(STANDARD_OT2_DECK, 4),
+ spec_deck_definition=load_deck(STANDARD_OT2_DECK, 5),
module_model=ModuleModel.THERMOCYCLER_MODULE_V2,
stacking_offset_with_module={},
expected_offset=OverlapOffset(x=0, y=0, z=10.7),
),
ModuleOverlapSpec(
# Labware on TC Gen2 on Flex, with no stacking overlap
- spec_deck_definition=load_deck(STANDARD_OT3_DECK, 4),
+ spec_deck_definition=load_deck(STANDARD_OT3_DECK, 5),
module_model=ModuleModel.THERMOCYCLER_MODULE_V2,
stacking_offset_with_module={},
expected_offset=OverlapOffset(x=0, y=0, z=0),
),
ModuleOverlapSpec(
# Labware on TC Gen2 on Flex, with stacking overlap for TC Gen2
- spec_deck_definition=load_deck(STANDARD_OT3_DECK, 4),
+ spec_deck_definition=load_deck(STANDARD_OT3_DECK, 5),
module_model=ModuleModel.THERMOCYCLER_MODULE_V2,
stacking_offset_with_module={
str(ModuleModel.THERMOCYCLER_MODULE_V2.value): SharedDataOverlapOffset(
@@ -756,7 +758,7 @@ class ModuleOverlapSpec(NamedTuple):
argvalues=module_overlap_specs,
)
def test_get_module_overlap_offsets(
- spec_deck_definition: DeckDefinitionV4,
+ spec_deck_definition: DeckDefinitionV5,
module_model: ModuleModel,
stacking_offset_with_module: Dict[str, SharedDataOverlapOffset],
expected_offset: OverlapOffset,
@@ -798,52 +800,13 @@ def test_get_default_magnet_height(
assert subject.get_default_magnet_height(module_id="module-id", offset=2) == 12.0
-def test_get_deck_definition(ot2_standard_deck_def: DeckDefinitionV4) -> None:
+def test_get_deck_definition(ot2_standard_deck_def: DeckDefinitionV5) -> None:
"""It should get the deck definition from the state."""
subject = get_labware_view(deck_definition=ot2_standard_deck_def)
assert subject.get_deck_definition() == ot2_standard_deck_def
-def test_get_slot_definition(ot2_standard_deck_def: DeckDefinitionV4) -> None:
- """It should return a deck slot's definition."""
- subject = get_labware_view(deck_definition=ot2_standard_deck_def)
-
- result = subject.get_slot_definition(DeckSlotName.SLOT_6)
-
- assert result["id"] == "6"
- assert result["displayName"] == "Slot 6"
-
-
-def test_get_slot_definition_raises_with_bad_slot_name(
- ot2_standard_deck_def: DeckDefinitionV4,
-) -> None:
- """It should raise a SlotDoesNotExistError if a bad slot name is given."""
- subject = get_labware_view(deck_definition=ot2_standard_deck_def)
-
- with pytest.raises(errors.SlotDoesNotExistError):
- subject.get_slot_definition(DeckSlotName.SLOT_A1)
-
-
-def test_get_slot_position(ot2_standard_deck_def: DeckDefinitionV4) -> None:
- """It should get the absolute location of a deck slot's origin."""
- subject = get_labware_view(deck_definition=ot2_standard_deck_def)
-
- expected_position = Point(x=132.5, y=90.5, z=0.0)
- result = subject.get_slot_position(DeckSlotName.SLOT_5)
-
- assert result == expected_position
-
-
-def test_get_slot_center_position(ot2_standard_deck_def: DeckDefinitionV4) -> None:
- """It should get the absolute location of a deck slot's center."""
- subject = get_labware_view(deck_definition=ot2_standard_deck_def)
-
- expected_center = Point(x=196.5, y=43.0, z=0.0)
- result = subject.get_slot_center_position(DeckSlotName.SLOT_2)
- assert result == expected_center
-
-
def test_get_labware_offset_vector() -> None:
"""It should get a labware's offset vector."""
labware_without_offset = LoadedLabware(
@@ -1023,7 +986,7 @@ def test_find_applicable_labware_offset() -> None:
)
-def test_get_display_name() -> None:
+def test_get_user_specified_display_name() -> None:
"""It should get a labware's user-specified display name."""
subject = get_labware_view(
labware_by_id={
@@ -1032,8 +995,38 @@ def test_get_display_name() -> None:
},
)
- assert subject.get_display_name("plate_with_display_name") == "Fancy Plate Name"
- assert subject.get_display_name("reservoir_without_display_name") is None
+ assert (
+ subject.get_user_specified_display_name("plate_with_display_name")
+ == "Fancy Plate Name"
+ )
+ assert (
+ subject.get_user_specified_display_name("reservoir_without_display_name")
+ is None
+ )
+
+
+def test_get_display_name(
+ well_plate_def: LabwareDefinition,
+ reservoir_def: LabwareDefinition,
+) -> None:
+ """It should get the labware's display name."""
+ subject = get_labware_view(
+ labware_by_id={
+ "plate_with_custom_display_name": plate,
+ "reservoir_with_default_display_name": reservoir,
+ },
+ definitions_by_uri={
+ "some-plate-uri": well_plate_def,
+ "some-reservoir-uri": reservoir_def,
+ },
+ )
+ assert (
+ subject.get_display_name("plate_with_custom_display_name") == "Fancy Plate Name"
+ )
+ assert (
+ subject.get_display_name("reservoir_with_default_display_name")
+ == "NEST 12 Well Reservoir 15 mL"
+ )
def test_get_fixed_trash_id() -> None:
@@ -1144,41 +1137,9 @@ def test_get_by_slot() -> None:
labware_by_id={"1": labware_1, "2": labware_2, "3": labware_3}
)
- assert subject.get_by_slot(DeckSlotName.SLOT_1, {"1", "2"}) == labware_1
- assert subject.get_by_slot(DeckSlotName.SLOT_2, {"1", "2"}) == labware_2
- assert subject.get_by_slot(DeckSlotName.SLOT_3, {"1", "2"}) is None
-
-
-def test_get_by_slot_prefers_later() -> None:
- """It should get the labware in a slot, preferring later items if locations match."""
- labware_1 = LoadedLabware.construct( # type: ignore[call-arg]
- id="1", location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
- )
- labware_1_again = LoadedLabware.construct( # type: ignore[call-arg]
- id="1-again", location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
- )
-
- subject = get_labware_view(
- labware_by_id={"1": labware_1, "1-again": labware_1_again}
- )
-
- assert subject.get_by_slot(DeckSlotName.SLOT_1, {"1", "1-again"}) == labware_1_again
-
-
-def test_get_by_slot_filter_ids() -> None:
- """It should filter labwares in the same slot using IDs."""
- labware_1 = LoadedLabware.construct( # type: ignore[call-arg]
- id="1", location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
- )
- labware_1_again = LoadedLabware.construct( # type: ignore[call-arg]
- id="1-again", location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
- )
-
- subject = get_labware_view(
- labware_by_id={"1": labware_1, "1-again": labware_1_again}
- )
-
- assert subject.get_by_slot(DeckSlotName.SLOT_1, {"1"}) == labware_1
+ assert subject.get_by_slot(DeckSlotName.SLOT_1) == labware_1
+ assert subject.get_by_slot(DeckSlotName.SLOT_2) == labware_2
+ assert subject.get_by_slot(DeckSlotName.SLOT_3) is None
@pytest.mark.parametrize(
@@ -1267,6 +1228,67 @@ def test_get_all_labware_definition_empty() -> None:
assert result == []
+def test_raise_if_labware_inaccessible_by_pipette_staging_area() -> None:
+ """It should raise if the labware is on a staging slot."""
+ subject = get_labware_view(
+ labware_by_id={
+ "labware-id": LoadedLabware(
+ id="labware-id",
+ loadName="test",
+ definitionUri="def-uri",
+ location=AddressableAreaLocation(addressableAreaName="B4"),
+ )
+ },
+ )
+
+ with pytest.raises(
+ errors.LocationNotAccessibleByPipetteError, match="on staging slot"
+ ):
+ subject.raise_if_labware_inaccessible_by_pipette("labware-id")
+
+
+def test_raise_if_labware_inaccessible_by_pipette_off_deck() -> None:
+ """It should raise if the labware is off-deck."""
+ subject = get_labware_view(
+ labware_by_id={
+ "labware-id": LoadedLabware(
+ id="labware-id",
+ loadName="test",
+ definitionUri="def-uri",
+ location=OFF_DECK_LOCATION,
+ )
+ },
+ )
+
+ with pytest.raises(errors.LocationNotAccessibleByPipetteError, match="off-deck"):
+ subject.raise_if_labware_inaccessible_by_pipette("labware-id")
+
+
+def test_raise_if_labware_inaccessible_by_pipette_stacked_labware_on_staging_area() -> None:
+ """It should raise if the labware is stacked on a staging slot."""
+ subject = get_labware_view(
+ labware_by_id={
+ "labware-id": LoadedLabware(
+ id="labware-id",
+ loadName="test",
+ definitionUri="def-uri",
+ location=OnLabwareLocation(labwareId="lower-labware-id"),
+ ),
+ "lower-labware-id": LoadedLabware(
+ id="lower-labware-id",
+ loadName="test",
+ definitionUri="def-uri",
+ location=AddressableAreaLocation(addressableAreaName="B4"),
+ ),
+ },
+ )
+
+ with pytest.raises(
+ errors.LocationNotAccessibleByPipetteError, match="on staging slot"
+ ):
+ subject.raise_if_labware_inaccessible_by_pipette("labware-id")
+
+
def test_raise_if_labware_cannot_be_stacked_is_adapter() -> None:
"""It should raise if the labware trying to be stacked is an adapter."""
subject = get_labware_view()
@@ -1382,13 +1404,13 @@ def test_raise_if_labware_cannot_be_stacked_on_labware_on_adapter() -> None:
)
-def test_get_deck_gripper_offsets(ot3_standard_deck_def: DeckDefinitionV4) -> None:
+def test_get_deck_gripper_offsets(ot3_standard_deck_def: DeckDefinitionV5) -> None:
"""It should get the deck's gripper offsets."""
subject = get_labware_view(deck_definition=ot3_standard_deck_def)
assert subject.get_deck_default_gripper_offsets() == LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=0, y=0, z=0),
- dropOffset=LabwareOffsetVector(x=0, y=0, z=-0.25),
+ dropOffset=LabwareOffsetVector(x=0, y=0, z=-0.75),
)
@@ -1496,3 +1518,36 @@ def test_get_grip_height_from_labware_bottom(
assert (
subject.get_grip_height_from_labware_bottom("reservoir-id") == 15.7
) # default
+
+
+@pytest.mark.parametrize(
+ "labware_to_check,well_bbox",
+ [
+ ("opentrons_universal_flat_adapter", Dimensions(0, 0, 0)),
+ (
+ "corning_96_wellplate_360ul_flat",
+ Dimensions(116.81 - 10.95, 77.67 - 7.81, 14.22),
+ ),
+ ("nest_12_reservoir_15ml", Dimensions(117.48 - 10.28, 78.38 - 7.18, 31.4)),
+ ],
+)
+def test_calculates_well_bounding_box(
+ labware_to_check: str, well_bbox: Dimensions
+) -> None:
+ """It should be able to calculate well bounding boxes."""
+ definition = LabwareDefinition.parse_obj(load_definition(labware_to_check, 1))
+ labware = LoadedLabware(
+ id="test-labware-id",
+ loadName=labware_to_check,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ definitionUri="test-labware-uri",
+ offsetId=None,
+ displayName="Fancy Plate Name",
+ )
+ subject = get_labware_view(
+ labware_by_id={"test-labware-id": labware},
+ definitions_by_uri={"test-labware-uri": definition},
+ )
+ assert subject.get_well_bbox("test-labware-id").x == pytest.approx(well_bbox.x)
+ assert subject.get_well_bbox("test-labware-id").y == pytest.approx(well_bbox.y)
+ assert subject.get_well_bbox("test-labware-id").z == pytest.approx(well_bbox.z)
diff --git a/api/tests/opentrons/protocol_engine/state/test_module_store.py b/api/tests/opentrons/protocol_engine/state/test_module_store.py
index ffeca3dba2c..e6de0a96ac0 100644
--- a/api/tests/opentrons/protocol_engine/state/test_module_store.py
+++ b/api/tests/opentrons/protocol_engine/state/test_module_store.py
@@ -1,6 +1,10 @@
"""Module state store tests."""
+from typing import List, Set, cast, Dict, Optional
+
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from opentrons.types import DeckSlotName
from opentrons.protocol_engine import commands, actions
@@ -14,6 +18,10 @@
ModuleDefinition,
ModuleModel,
HeaterShakerLatchStatus,
+ DeckType,
+ AddressableArea,
+ DeckConfigurationType,
+ PotentialCutoutFixture,
)
from opentrons.protocol_engine.state.modules import (
@@ -34,19 +42,59 @@
ModuleSubStateType,
)
+from opentrons.protocol_engine.state.addressable_areas import (
+ AddressableAreaView,
+ AddressableAreaState,
+)
+from opentrons.protocol_engine.state.config import Config
from opentrons.hardware_control.modules.types import LiveData
+_OT2_STANDARD_CONFIG = Config(
+ use_simulated_deck_config=False,
+ robot_type="OT-2 Standard",
+ deck_type=DeckType.OT2_STANDARD,
+)
+
+
+def get_addressable_area_view(
+ loaded_addressable_areas_by_name: Optional[Dict[str, AddressableArea]] = None,
+ potential_cutout_fixtures_by_cutout_id: Optional[
+ Dict[str, Set[PotentialCutoutFixture]]
+ ] = None,
+ deck_definition: Optional[DeckDefinitionV5] = None,
+ deck_configuration: Optional[DeckConfigurationType] = None,
+ robot_type: RobotType = "OT-3 Standard",
+ use_simulated_deck_config: bool = False,
+) -> AddressableAreaView:
+ """Get a labware view test subject."""
+ state = AddressableAreaState(
+ loaded_addressable_areas_by_name=loaded_addressable_areas_by_name or {},
+ potential_cutout_fixtures_by_cutout_id=potential_cutout_fixtures_by_cutout_id
+ or {},
+ deck_definition=deck_definition or cast(DeckDefinitionV5, {"otId": "fake"}),
+ deck_configuration=deck_configuration or [],
+ robot_type=robot_type,
+ use_simulated_deck_config=use_simulated_deck_config,
+ )
+
+ return AddressableAreaView(state=state)
+
+
def test_initial_state() -> None:
"""It should initialize the module state."""
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
assert subject.state == ModuleState(
+ deck_type=DeckType.OT2_STANDARD,
requested_model_by_id={},
slot_by_module_id={},
hardware_by_module_id={},
substate_by_module_id={},
module_offset_by_serial={},
+ additional_slots_occupied_by_module_id={},
)
@@ -129,7 +177,7 @@ def test_load_module(
expected_substate: ModuleSubStateType,
) -> None:
"""It should handle a successful LoadModule command."""
- action = actions.UpdateCommandAction(
+ action = actions.SucceedCommandAction(
private_result=None,
command=commands.LoadModule.construct( # type: ignore[call-arg]
params=commands.LoadModuleParams(
@@ -145,10 +193,13 @@ def test_load_module(
),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(action)
assert subject.state == ModuleState(
+ deck_type=DeckType.OT2_STANDARD,
slot_by_module_id={"module-id": DeckSlotName.SLOT_1},
requested_model_by_id={"module-id": params_model},
hardware_by_module_id={
@@ -159,8 +210,65 @@ def test_load_module(
},
substate_by_module_id={"module-id": expected_substate},
module_offset_by_serial={},
+ additional_slots_occupied_by_module_id={},
+ )
+
+
+@pytest.mark.parametrize(
+ argnames=["tc_slot", "deck_type", "robot_type", "expected_additional_slots"],
+ argvalues=[
+ (
+ DeckSlotName.SLOT_7,
+ DeckType.OT2_STANDARD,
+ "OT-2 Standard",
+ [DeckSlotName.SLOT_8, DeckSlotName.SLOT_10, DeckSlotName.SLOT_11],
+ ),
+ (
+ DeckSlotName.SLOT_B1,
+ DeckType.OT3_STANDARD,
+ "OT-3 Standard",
+ [DeckSlotName.SLOT_A1],
+ ),
+ ],
+)
+def test_load_thermocycler_in_thermocycler_slot(
+ tc_slot: DeckSlotName,
+ deck_type: DeckType,
+ robot_type: RobotType,
+ expected_additional_slots: List[DeckSlotName],
+ thermocycler_v2_def: ModuleDefinition,
+) -> None:
+ """It should update additional slots for thermocycler module."""
+ action = actions.SucceedCommandAction(
+ private_result=None,
+ command=commands.LoadModule.construct( # type: ignore[call-arg]
+ params=commands.LoadModuleParams(
+ model=ModuleModel.THERMOCYCLER_MODULE_V2,
+ location=DeckSlotLocation(slotName=tc_slot),
+ ),
+ result=commands.LoadModuleResult(
+ moduleId="module-id",
+ model=ModuleModel.THERMOCYCLER_MODULE_V2,
+ serialNumber="serial-number",
+ definition=thermocycler_v2_def,
+ ),
+ ),
)
+ subject = ModuleStore(
+ Config(
+ use_simulated_deck_config=False,
+ robot_type=robot_type,
+ deck_type=deck_type,
+ ),
+ )
+ subject.handle_action(action)
+
+ assert subject.state.slot_by_module_id == {"module-id": tc_slot}
+ assert subject.state.additional_slots_occupied_by_module_id == {
+ "module-id": expected_additional_slots
+ }
+
@pytest.mark.parametrize(
argnames=["module_definition", "live_data", "expected_substate"],
@@ -231,10 +339,13 @@ def test_add_module_action(
module_live_data=live_data,
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(action)
assert subject.state == ModuleState(
+ deck_type=DeckType.OT2_STANDARD,
slot_by_module_id={"module-id": None},
requested_model_by_id={"module-id": None},
hardware_by_module_id={
@@ -245,6 +356,7 @@ def test_add_module_action(
},
substate_by_module_id={"module-id": expected_substate},
module_offset_by_serial={},
+ additional_slots_occupied_by_module_id={},
)
@@ -270,13 +382,15 @@ def test_handle_hs_temperature_commands(heater_shaker_v1_def: ModuleDefinition)
params=hs_commands.DeactivateHeaterParams(moduleId="module-id"),
result=hs_commands.DeactivateHeaterResult(),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_module_cmd)
+ actions.SucceedCommandAction(private_result=None, command=load_module_cmd)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=set_temp_cmd)
+ actions.SucceedCommandAction(private_result=None, command=set_temp_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -287,7 +401,7 @@ def test_handle_hs_temperature_commands(heater_shaker_v1_def: ModuleDefinition)
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=deactivate_cmd)
+ actions.SucceedCommandAction(private_result=None, command=deactivate_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -321,13 +435,15 @@ def test_handle_hs_shake_commands(heater_shaker_v1_def: ModuleDefinition) -> Non
params=hs_commands.DeactivateShakerParams(moduleId="module-id"),
result=hs_commands.DeactivateShakerResult(),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_module_cmd)
+ actions.SucceedCommandAction(private_result=None, command=load_module_cmd)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=set_shake_cmd)
+ actions.SucceedCommandAction(private_result=None, command=set_shake_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -338,7 +454,7 @@ def test_handle_hs_shake_commands(heater_shaker_v1_def: ModuleDefinition) -> Non
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=deactivate_cmd)
+ actions.SucceedCommandAction(private_result=None, command=deactivate_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -374,10 +490,12 @@ def test_handle_hs_labware_latch_commands(
params=hs_commands.OpenLabwareLatchParams(moduleId="module-id"),
result=hs_commands.OpenLabwareLatchResult(pipetteRetracted=False),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_module_cmd)
+ actions.SucceedCommandAction(private_result=None, command=load_module_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -389,7 +507,7 @@ def test_handle_hs_labware_latch_commands(
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=close_latch_cmd)
+ actions.SucceedCommandAction(private_result=None, command=close_latch_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -400,7 +518,7 @@ def test_handle_hs_labware_latch_commands(
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=open_latch_cmd)
+ actions.SucceedCommandAction(private_result=None, command=open_latch_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
@@ -438,13 +556,15 @@ def test_handle_tempdeck_temperature_commands(
params=temp_commands.DeactivateTemperatureParams(moduleId="module-id"),
result=temp_commands.DeactivateTemperatureResult(),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_module_cmd)
+ actions.SucceedCommandAction(private_result=None, command=load_module_cmd)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=set_temp_cmd)
+ actions.SucceedCommandAction(private_result=None, command=set_temp_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": TemperatureModuleSubState(
@@ -452,7 +572,7 @@ def test_handle_tempdeck_temperature_commands(
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=deactivate_cmd)
+ actions.SucceedCommandAction(private_result=None, command=deactivate_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": TemperatureModuleSubState(
@@ -497,13 +617,15 @@ def test_handle_thermocycler_temperature_commands(
params=tc_commands.DeactivateLidParams(moduleId="module-id"),
result=tc_commands.DeactivateLidResult(),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ config=_OT2_STANDARD_CONFIG,
+ )
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_module_cmd)
+ actions.SucceedCommandAction(private_result=None, command=load_module_cmd)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=set_block_temp_cmd)
+ actions.SucceedCommandAction(private_result=None, command=set_block_temp_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
@@ -514,7 +636,7 @@ def test_handle_thermocycler_temperature_commands(
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=set_lid_temp_cmd)
+ actions.SucceedCommandAction(private_result=None, command=set_lid_temp_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
@@ -525,7 +647,7 @@ def test_handle_thermocycler_temperature_commands(
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=deactivate_lid_cmd)
+ actions.SucceedCommandAction(private_result=None, command=deactivate_lid_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
@@ -536,7 +658,7 @@ def test_handle_thermocycler_temperature_commands(
)
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=deactivate_block_cmd)
+ actions.SucceedCommandAction(private_result=None, command=deactivate_block_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
@@ -574,13 +696,19 @@ def test_handle_thermocycler_lid_commands(
result=tc_commands.CloseLidResult(),
)
- subject = ModuleStore()
+ subject = ModuleStore(
+ Config(
+ use_simulated_deck_config=False,
+ robot_type="OT-3 Standard",
+ deck_type=DeckType.OT3_STANDARD,
+ ),
+ )
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_module_cmd)
+ actions.SucceedCommandAction(private_result=None, command=load_module_cmd)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=open_lid_cmd)
+ actions.SucceedCommandAction(private_result=None, command=open_lid_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
@@ -592,7 +720,7 @@ def test_handle_thermocycler_lid_commands(
}
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=close_lid_cmd)
+ actions.SucceedCommandAction(private_result=None, command=close_lid_cmd)
)
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
diff --git a/api/tests/opentrons/protocol_engine/state/test_module_view.py b/api/tests/opentrons/protocol_engine/state/test_module_view.py
index 5b83cda94f0..b840673f2e8 100644
--- a/api/tests/opentrons/protocol_engine/state/test_module_view.py
+++ b/api/tests/opentrons/protocol_engine/state/test_module_view.py
@@ -1,9 +1,24 @@
"""Tests for module state accessors in the protocol engine state store."""
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from math import isclose
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from contextlib import nullcontext as does_not_raise
-from typing import ContextManager, Dict, NamedTuple, Optional, Type, Union, Any
+from typing import (
+ ContextManager,
+ Dict,
+ NamedTuple,
+ Optional,
+ Type,
+ Union,
+ Any,
+ List,
+ Set,
+ cast,
+)
+
+from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons_shared_data import load_shared_data
from opentrons.types import DeckSlotName, MountType
@@ -13,18 +28,24 @@
DeckSlotLocation,
ModuleDefinition,
ModuleModel,
- ModuleLocation,
LabwareOffsetVector,
DeckType,
ModuleOffsetData,
HeaterShakerLatchStatus,
LabwareMovementOffsetData,
+ AddressableArea,
+ DeckConfigurationType,
+ PotentialCutoutFixture,
)
from opentrons.protocol_engine.state.modules import (
ModuleView,
ModuleState,
HardwareModule,
)
+from opentrons.protocol_engine.state.addressable_areas import (
+ AddressableAreaView,
+ AddressableAreaState,
+)
from opentrons.protocol_engine.state.module_substates import (
HeaterShakerModuleSubState,
@@ -37,22 +58,63 @@
ThermocyclerModuleId,
ModuleSubStateType,
)
+from opentrons_shared_data.deck import load as load_deck
+from opentrons.protocols.api_support.deck_type import (
+ STANDARD_OT3_DECK,
+)
+
+
+@pytest.fixture(scope="session")
+def ot3_standard_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 standard deck definition."""
+ return load_deck(STANDARD_OT3_DECK, 5)
+
+
+def get_addressable_area_view(
+ loaded_addressable_areas_by_name: Optional[Dict[str, AddressableArea]] = None,
+ potential_cutout_fixtures_by_cutout_id: Optional[
+ Dict[str, Set[PotentialCutoutFixture]]
+ ] = None,
+ deck_definition: Optional[DeckDefinitionV5] = None,
+ deck_configuration: Optional[DeckConfigurationType] = None,
+ robot_type: RobotType = "OT-3 Standard",
+ use_simulated_deck_config: bool = False,
+) -> AddressableAreaView:
+ """Get a labware view test subject."""
+ state = AddressableAreaState(
+ loaded_addressable_areas_by_name=loaded_addressable_areas_by_name or {},
+ potential_cutout_fixtures_by_cutout_id=potential_cutout_fixtures_by_cutout_id
+ or {},
+ deck_definition=deck_definition or cast(DeckDefinitionV5, {"otId": "fake"}),
+ deck_configuration=deck_configuration or [],
+ robot_type=robot_type,
+ use_simulated_deck_config=use_simulated_deck_config,
+ )
+
+ return AddressableAreaView(state=state)
def make_module_view(
+ deck_type: Optional[DeckType] = None,
slot_by_module_id: Optional[Dict[str, Optional[DeckSlotName]]] = None,
requested_model_by_module_id: Optional[Dict[str, Optional[ModuleModel]]] = None,
hardware_by_module_id: Optional[Dict[str, HardwareModule]] = None,
substate_by_module_id: Optional[Dict[str, ModuleSubStateType]] = None,
module_offset_by_serial: Optional[Dict[str, ModuleOffsetData]] = None,
+ additional_slots_occupied_by_module_id: Optional[
+ Dict[str, List[DeckSlotName]]
+ ] = None,
) -> ModuleView:
"""Get a module view test subject with the specified state."""
state = ModuleState(
+ deck_type=deck_type or DeckType.OT2_STANDARD,
slot_by_module_id=slot_by_module_id or {},
requested_model_by_id=requested_model_by_module_id or {},
hardware_by_module_id=hardware_by_module_id or {},
substate_by_module_id=substate_by_module_id or {},
module_offset_by_serial=module_offset_by_serial or {},
+ additional_slots_occupied_by_module_id=additional_slots_occupied_by_module_id
+ or {},
)
return ModuleView(state=state)
@@ -316,6 +378,7 @@ def test_get_module_offset_for_ot2_standard(
) -> None:
"""It should return the correct labware offset for module in specified slot."""
subject = make_module_view(
+ deck_type=DeckType.OT2_STANDARD,
slot_by_module_id={"module-id": slot},
hardware_by_module_id={
"module-id": HardwareModule(
@@ -325,43 +388,49 @@ def test_get_module_offset_for_ot2_standard(
},
)
assert (
- subject.get_nominal_module_offset("module-id", DeckType.OT2_STANDARD)
+ subject.get_nominal_module_offset("module-id", get_addressable_area_view())
== expected_offset
)
@pytest.mark.parametrize(
- argnames=["module_def", "slot", "expected_offset"],
+ argnames=["module_def", "slot", "expected_offset", "deck_definition"],
argvalues=[
(
lazy_fixture("tempdeck_v2_def"),
DeckSlotName.SLOT_1.to_ot3_equivalent(),
LabwareOffsetVector(x=0, y=0, z=9),
+ lazy_fixture("ot3_standard_deck_def"),
),
(
lazy_fixture("tempdeck_v2_def"),
DeckSlotName.SLOT_3.to_ot3_equivalent(),
LabwareOffsetVector(x=0, y=0, z=9),
+ lazy_fixture("ot3_standard_deck_def"),
),
(
lazy_fixture("thermocycler_v2_def"),
DeckSlotName.SLOT_7.to_ot3_equivalent(),
LabwareOffsetVector(x=-20.005, y=67.96, z=10.96),
+ lazy_fixture("ot3_standard_deck_def"),
),
(
lazy_fixture("heater_shaker_v1_def"),
DeckSlotName.SLOT_1.to_ot3_equivalent(),
LabwareOffsetVector(x=0, y=0, z=18.95),
+ lazy_fixture("ot3_standard_deck_def"),
),
(
lazy_fixture("heater_shaker_v1_def"),
DeckSlotName.SLOT_3.to_ot3_equivalent(),
LabwareOffsetVector(x=0, y=0, z=18.95),
+ lazy_fixture("ot3_standard_deck_def"),
),
(
lazy_fixture("mag_block_v1_def"),
- DeckSlotName.SLOT_2,
+ DeckSlotName.SLOT_2.to_ot3_equivalent(),
LabwareOffsetVector(x=0, y=0, z=38.0),
+ lazy_fixture("ot3_standard_deck_def"),
),
],
)
@@ -369,9 +438,11 @@ def test_get_module_offset_for_ot3_standard(
module_def: ModuleDefinition,
slot: DeckSlotName,
expected_offset: LabwareOffsetVector,
+ deck_definition: DeckDefinitionV5,
) -> None:
"""It should return the correct labware offset for module in specified slot."""
subject = make_module_view(
+ deck_type=DeckType.OT3_STANDARD,
slot_by_module_id={"module-id": slot},
hardware_by_module_id={
"module-id": HardwareModule(
@@ -380,9 +451,16 @@ def test_get_module_offset_for_ot3_standard(
)
},
)
+
result_offset = subject.get_nominal_module_offset(
- "module-id", DeckType.OT3_STANDARD
+ "module-id",
+ get_addressable_area_view(
+ deck_configuration=None,
+ deck_definition=deck_definition,
+ use_simulated_deck_config=True,
+ ),
)
+
assert (result_offset.x, result_offset.y, result_offset.z) == pytest.approx(
(expected_offset.x, expected_offset.y, expected_offset.z)
)
@@ -1595,11 +1673,10 @@ def test_get_overall_height(
),
(DeckSlotLocation(slotName=DeckSlotName.SLOT_2), does_not_raise()),
(DeckSlotLocation(slotName=DeckSlotName.FIXED_TRASH), does_not_raise()),
- (ModuleLocation(moduleId="module-id-1"), does_not_raise()),
],
)
def test_raise_if_labware_in_location(
- location: Union[DeckSlotLocation, ModuleLocation],
+ location: DeckSlotLocation,
expected_raise: ContextManager[Any],
thermocycler_v1_def: ModuleDefinition,
) -> None:
@@ -1648,19 +1725,19 @@ def test_get_by_slot() -> None:
},
)
- assert subject.get_by_slot(DeckSlotName.SLOT_1, {"1", "2"}) == LoadedModule(
+ assert subject.get_by_slot(DeckSlotName.SLOT_1) == LoadedModule(
id="1",
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
model=ModuleModel.TEMPERATURE_MODULE_V1,
serialNumber="serial-number-1",
)
- assert subject.get_by_slot(DeckSlotName.SLOT_2, {"1", "2"}) == LoadedModule(
+ assert subject.get_by_slot(DeckSlotName.SLOT_2) == LoadedModule(
id="2",
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2),
model=ModuleModel.TEMPERATURE_MODULE_V2,
serialNumber="serial-number-2",
)
- assert subject.get_by_slot(DeckSlotName.SLOT_3, {"1", "2"}) is None
+ assert subject.get_by_slot(DeckSlotName.SLOT_3) is None
def test_get_by_slot_prefers_later() -> None:
@@ -1686,7 +1763,7 @@ def test_get_by_slot_prefers_later() -> None:
},
)
- assert subject.get_by_slot(DeckSlotName.SLOT_1, {"1", "1-again"}) == LoadedModule(
+ assert subject.get_by_slot(DeckSlotName.SLOT_1) == LoadedModule(
id="1-again",
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
model=ModuleModel.TEMPERATURE_MODULE_V1,
@@ -1694,37 +1771,6 @@ def test_get_by_slot_prefers_later() -> None:
)
-def test_get_by_slot_filter_ids() -> None:
- """It should filter modules by ID in addition to checking the slot."""
- subject = make_module_view(
- slot_by_module_id={
- "1": DeckSlotName.SLOT_1,
- "1-again": DeckSlotName.SLOT_1,
- },
- hardware_by_module_id={
- "1": HardwareModule(
- serial_number="serial-number-1",
- definition=ModuleDefinition.construct( # type: ignore[call-arg]
- model=ModuleModel.TEMPERATURE_MODULE_V1
- ),
- ),
- "1-again": HardwareModule(
- serial_number="serial-number-1-again",
- definition=ModuleDefinition.construct( # type: ignore[call-arg]
- model=ModuleModel.TEMPERATURE_MODULE_V1
- ),
- ),
- },
- )
-
- assert subject.get_by_slot(DeckSlotName.SLOT_1, {"1"}) == LoadedModule(
- id="1",
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
- model=ModuleModel.TEMPERATURE_MODULE_V1,
- serialNumber="serial-number-1",
- )
-
-
@pytest.mark.parametrize(
argnames=["mount", "target_slot", "expected_result"],
argvalues=[
@@ -1756,14 +1802,14 @@ def test_is_edge_move_unsafe(
lazy_fixture("thermocycler_v2_def"),
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=0, y=0, z=4.6),
- dropOffset=LabwareOffsetVector(x=0, y=0, z=4.6),
+ dropOffset=LabwareOffsetVector(x=0, y=0, z=5.6),
),
),
(
lazy_fixture("heater_shaker_v1_def"),
LabwareMovementOffsetData(
pickUpOffset=LabwareOffsetVector(x=0, y=0, z=0),
- dropOffset=LabwareOffsetVector(x=0, y=0, z=0.5),
+ dropOffset=LabwareOffsetVector(x=0, y=0, z=1.0),
),
),
(
@@ -1792,3 +1838,128 @@ def test_get_default_gripper_offsets(
},
)
assert subject.get_default_gripper_offsets("module-1") == expected_offset_data
+
+
+@pytest.mark.parametrize(
+ argnames=["deck_type", "slot_name", "expected_highest_z", "deck_definition"],
+ argvalues=[
+ (
+ DeckType.OT2_STANDARD,
+ DeckSlotName.SLOT_1,
+ 84,
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ (
+ DeckType.OT3_STANDARD,
+ DeckSlotName.SLOT_D1,
+ 12.91,
+ lazy_fixture("ot3_standard_deck_def"),
+ ),
+ ],
+)
+def test_get_module_highest_z(
+ tempdeck_v2_def: ModuleDefinition,
+ deck_type: DeckType,
+ slot_name: DeckSlotName,
+ expected_highest_z: float,
+ deck_definition: DeckDefinitionV5,
+) -> None:
+ """It should get the highest z point of the module."""
+ subject = make_module_view(
+ deck_type=deck_type,
+ slot_by_module_id={"module-id": slot_name},
+ requested_model_by_module_id={
+ "module-id": ModuleModel.TEMPERATURE_MODULE_V2,
+ },
+ hardware_by_module_id={
+ "module-id": HardwareModule(
+ serial_number="module-serial",
+ definition=tempdeck_v2_def,
+ )
+ },
+ )
+ assert isclose(
+ subject.get_module_highest_z(
+ module_id="module-id",
+ addressable_areas=get_addressable_area_view(
+ deck_configuration=None,
+ deck_definition=deck_definition,
+ use_simulated_deck_config=True,
+ ),
+ ),
+ expected_highest_z,
+ )
+
+
+def test_get_overflowed_module_in_slot(tempdeck_v1_def: ModuleDefinition) -> None:
+ """It should return the module occupying but not loaded in the given slot."""
+ subject = make_module_view(
+ slot_by_module_id={"module-id": DeckSlotName.SLOT_1},
+ hardware_by_module_id={
+ "module-id": HardwareModule(
+ serial_number="serial-number",
+ definition=tempdeck_v1_def,
+ )
+ },
+ additional_slots_occupied_by_module_id={
+ "module-id": [DeckSlotName.SLOT_6, DeckSlotName.SLOT_A1],
+ },
+ )
+ assert subject.get_overflowed_module_in_slot(DeckSlotName.SLOT_6) == LoadedModule(
+ id="module-id",
+ model=ModuleModel.TEMPERATURE_MODULE_V1,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ serialNumber="serial-number",
+ )
+
+
+@pytest.mark.parametrize(
+ argnames=["deck_type", "module_def", "module_slot", "expected_result"],
+ argvalues=[
+ (
+ DeckType.OT3_STANDARD,
+ lazy_fixture("thermocycler_v2_def"),
+ DeckSlotName.SLOT_A1,
+ True,
+ ),
+ (
+ DeckType.OT3_STANDARD,
+ lazy_fixture("tempdeck_v1_def"),
+ DeckSlotName.SLOT_A1,
+ False,
+ ),
+ (
+ DeckType.OT3_STANDARD,
+ lazy_fixture("thermocycler_v2_def"),
+ DeckSlotName.SLOT_1,
+ False,
+ ),
+ (
+ DeckType.OT2_STANDARD,
+ lazy_fixture("thermocycler_v2_def"),
+ DeckSlotName.SLOT_A1,
+ False,
+ ),
+ ],
+)
+def test_is_flex_deck_with_thermocycler(
+ deck_type: DeckType,
+ module_def: ModuleDefinition,
+ module_slot: DeckSlotName,
+ expected_result: bool,
+) -> None:
+ """It should return True if there is a thermocycler on Flex."""
+ subject = make_module_view(
+ slot_by_module_id={"module-id": DeckSlotName.SLOT_B1},
+ hardware_by_module_id={
+ "module-id": HardwareModule(
+ serial_number="serial-number",
+ definition=module_def,
+ )
+ },
+ additional_slots_occupied_by_module_id={
+ "module-id": [module_slot, DeckSlotName.SLOT_C1],
+ },
+ deck_type=deck_type,
+ )
+ assert subject.is_flex_deck_with_thermocycler() == expected_result
diff --git a/api/tests/opentrons/protocol_engine/state/test_motion_view.py b/api/tests/opentrons/protocol_engine/state/test_motion_view.py
index 19680688644..61ec01262f3 100644
--- a/api/tests/opentrons/protocol_engine/state/test_motion_view.py
+++ b/api/tests/opentrons/protocol_engine/state/test_motion_view.py
@@ -16,12 +16,15 @@
LoadedPipette,
DeckSlotLocation,
CurrentWell,
+ CurrentAddressableArea,
MotorAxis,
+ AddressableOffsetVector,
)
from opentrons.protocol_engine.state import PipetteLocationData, move_types
from opentrons.protocol_engine.state.config import Config
from opentrons.protocol_engine.state.labware import LabwareView
from opentrons.protocol_engine.state.pipettes import PipetteView
+from opentrons.protocol_engine.state.addressable_areas import AddressableAreaView
from opentrons.protocol_engine.state.geometry import GeometryView
from opentrons.protocol_engine.state.motion import MotionView
from opentrons.protocol_engine.state.modules import ModuleView
@@ -60,6 +63,7 @@ def subject(
mock_engine_config: Config,
labware_view: LabwareView,
pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
geometry_view: GeometryView,
mock_module_view: ModuleView,
) -> MotionView:
@@ -68,6 +72,7 @@ def subject(
config=mock_engine_config,
labware_view=labware_view,
pipette_view=pipette_view,
+ addressable_area_view=addressable_area_view,
geometry_view=geometry_view,
module_view=mock_module_view,
)
@@ -79,7 +84,7 @@ def test_get_pipette_location_with_no_current_location(
subject: MotionView,
) -> None:
"""It should return mount and critical_point=None if no location."""
- decoy.when(pipette_view.get_current_well()).then_return(None)
+ decoy.when(pipette_view.get_current_location()).then_return(None)
decoy.when(pipette_view.get("pipette-id")).then_return(
LoadedPipette(
@@ -94,14 +99,14 @@ def test_get_pipette_location_with_no_current_location(
assert result == PipetteLocationData(mount=MountType.LEFT, critical_point=None)
-def test_get_pipette_location_with_current_location_with_quirks(
+def test_get_pipette_location_with_current_location_with_y_center(
decoy: Decoy,
labware_view: LabwareView,
pipette_view: PipetteView,
subject: MotionView,
) -> None:
- """It should return cp=XY_CENTER if location labware has center quirk."""
- decoy.when(pipette_view.get_current_well()).then_return(
+ """It should return cp=Y_CENTER if location labware requests."""
+ decoy.when(pipette_view.get_current_location()).then_return(
CurrentWell(pipette_id="pipette-id", labware_id="reservoir-id", well_name="A1")
)
@@ -114,9 +119,41 @@ def test_get_pipette_location_with_current_location_with_quirks(
)
decoy.when(
- labware_view.get_has_quirk(
+ labware_view.get_should_center_column_on_target_well(
+ "reservoir-id",
+ )
+ ).then_return(True)
+
+ result = subject.get_pipette_location("pipette-id")
+
+ assert result == PipetteLocationData(
+ mount=MountType.RIGHT,
+ critical_point=CriticalPoint.Y_CENTER,
+ )
+
+
+def test_get_pipette_location_with_current_location_with_xy_center(
+ decoy: Decoy,
+ labware_view: LabwareView,
+ pipette_view: PipetteView,
+ subject: MotionView,
+) -> None:
+ """It should return cp=XY_CENTER if location labware requests."""
+ decoy.when(pipette_view.get_current_location()).then_return(
+ CurrentWell(pipette_id="pipette-id", labware_id="reservoir-id", well_name="A1")
+ )
+
+ decoy.when(pipette_view.get("pipette-id")).then_return(
+ LoadedPipette(
+ id="pipette-id",
+ mount=MountType.RIGHT,
+ pipetteName=PipetteNameType.P300_SINGLE,
+ )
+ )
+
+ decoy.when(
+ labware_view.get_should_center_pipette_on_target_well(
"reservoir-id",
- "centerMultichannelOnWells",
)
).then_return(True)
@@ -135,7 +172,7 @@ def test_get_pipette_location_with_current_location_different_pipette(
subject: MotionView,
) -> None:
"""It should return mount and cp=None if location used other pipette."""
- decoy.when(pipette_view.get_current_well()).then_return(
+ decoy.when(pipette_view.get_current_location()).then_return(
CurrentWell(
pipette_id="other-pipette-id",
labware_id="reservoir-id",
@@ -152,9 +189,14 @@ def test_get_pipette_location_with_current_location_different_pipette(
)
decoy.when(
- labware_view.get_has_quirk(
+ labware_view.get_should_center_column_on_target_well(
+ "reservoir-id",
+ )
+ ).then_return(False)
+
+ decoy.when(
+ labware_view.get_should_center_pipette_on_target_well(
"reservoir-id",
- "centerMultichannelOnWells",
)
).then_return(False)
@@ -166,13 +208,13 @@ def test_get_pipette_location_with_current_location_different_pipette(
)
-def test_get_pipette_location_override_current_location(
+def test_get_pipette_location_override_current_location_xy_center(
decoy: Decoy,
labware_view: LabwareView,
pipette_view: PipetteView,
subject: MotionView,
) -> None:
- """It should calculate pipette location from a passed in deck location."""
+ """It should calculate pipette location from a passed in deck location with xy override."""
current_well = CurrentWell(
pipette_id="pipette-id",
labware_id="reservoir-id",
@@ -188,15 +230,14 @@ def test_get_pipette_location_override_current_location(
)
decoy.when(
- labware_view.get_has_quirk(
+ labware_view.get_should_center_pipette_on_target_well(
"reservoir-id",
- "centerMultichannelOnWells",
)
).then_return(True)
result = subject.get_pipette_location(
pipette_id="pipette-id",
- current_well=current_well,
+ current_location=current_well,
)
assert result == PipetteLocationData(
@@ -205,7 +246,127 @@ def test_get_pipette_location_override_current_location(
)
-def test_get_movement_waypoints_to_well(
+def test_get_pipette_location_override_current_location_y_center(
+ decoy: Decoy,
+ labware_view: LabwareView,
+ pipette_view: PipetteView,
+ subject: MotionView,
+) -> None:
+ """It should calculate pipette location from a passed in deck location with xy override."""
+ current_well = CurrentWell(
+ pipette_id="pipette-id",
+ labware_id="reservoir-id",
+ well_name="A1",
+ )
+
+ decoy.when(pipette_view.get("pipette-id")).then_return(
+ LoadedPipette(
+ id="pipette-id",
+ mount=MountType.RIGHT,
+ pipetteName=PipetteNameType.P300_SINGLE,
+ )
+ )
+
+ decoy.when(
+ labware_view.get_should_center_column_on_target_well(
+ "reservoir-id",
+ )
+ ).then_return(True)
+
+ result = subject.get_pipette_location(
+ pipette_id="pipette-id",
+ current_location=current_well,
+ )
+
+ assert result == PipetteLocationData(
+ mount=MountType.RIGHT,
+ critical_point=CriticalPoint.Y_CENTER,
+ )
+
+
+def test_get_movement_waypoints_to_well_for_y_center(
+ decoy: Decoy,
+ labware_view: LabwareView,
+ pipette_view: PipetteView,
+ geometry_view: GeometryView,
+ mock_module_view: ModuleView,
+ subject: MotionView,
+) -> None:
+ """It should call get_waypoints() with the correct args to move to a well."""
+ location = CurrentWell(pipette_id="123", labware_id="456", well_name="abc")
+
+ decoy.when(pipette_view.get_current_location()).then_return(location)
+
+ decoy.when(
+ labware_view.get_should_center_column_on_target_well(
+ "labware-id",
+ )
+ ).then_return(True)
+ decoy.when(
+ labware_view.get_should_center_pipette_on_target_well(
+ "labware-id",
+ )
+ ).then_return(False)
+
+ decoy.when(
+ geometry_view.get_well_position("labware-id", "well-name", WellLocation())
+ ).then_return(Point(x=4, y=5, z=6))
+
+ decoy.when(
+ move_types.get_move_type_to_well(
+ "pipette-id", "labware-id", "well-name", location, True
+ )
+ ).then_return(motion_planning.MoveType.GENERAL_ARC)
+ decoy.when(
+ geometry_view.get_min_travel_z("pipette-id", "labware-id", location, 123)
+ ).then_return(42.0)
+
+ decoy.when(geometry_view.get_ancestor_slot_name("labware-id")).then_return(
+ DeckSlotName.SLOT_2
+ )
+
+ decoy.when(
+ geometry_view.get_extra_waypoints(location, DeckSlotName.SLOT_2)
+ ).then_return([(456, 789)])
+
+ waypoints = [
+ motion_planning.Waypoint(
+ position=Point(1, 2, 3), critical_point=CriticalPoint.Y_CENTER
+ ),
+ motion_planning.Waypoint(
+ position=Point(4, 5, 6), critical_point=CriticalPoint.MOUNT
+ ),
+ ]
+
+ decoy.when(
+ motion_planning.get_waypoints(
+ move_type=motion_planning.MoveType.GENERAL_ARC,
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ min_travel_z=42,
+ dest=Point(x=4, y=5, z=6),
+ dest_cp=CriticalPoint.Y_CENTER,
+ xy_waypoints=[(456, 789)],
+ )
+ ).then_return(waypoints)
+
+ result = subject.get_movement_waypoints_to_well(
+ pipette_id="pipette-id",
+ labware_id="labware-id",
+ well_name="well-name",
+ well_location=WellLocation(),
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ force_direct=True,
+ minimum_z_height=123,
+ )
+
+ assert result == waypoints
+
+
+def test_get_movement_waypoints_to_well_for_xy_center(
decoy: Decoy,
labware_view: LabwareView,
pipette_view: PipetteView,
@@ -216,9 +377,17 @@ def test_get_movement_waypoints_to_well(
"""It should call get_waypoints() with the correct args to move to a well."""
location = CurrentWell(pipette_id="123", labware_id="456", well_name="abc")
- decoy.when(pipette_view.get_current_well()).then_return(location)
+ decoy.when(pipette_view.get_current_location()).then_return(location)
+
decoy.when(
- labware_view.get_has_quirk("labware-id", "centerMultichannelOnWells")
+ labware_view.get_should_center_column_on_target_well(
+ "labware-id",
+ )
+ ).then_return(False)
+ decoy.when(
+ labware_view.get_should_center_pipette_on_target_well(
+ "labware-id",
+ )
).then_return(True)
decoy.when(
@@ -233,10 +402,15 @@ def test_get_movement_waypoints_to_well(
decoy.when(
geometry_view.get_min_travel_z("pipette-id", "labware-id", location, 123)
).then_return(42.0)
- decoy.when(geometry_view.get_extra_waypoints("labware-id", location)).then_return(
- [(456, 789)]
+
+ decoy.when(geometry_view.get_ancestor_slot_name("labware-id")).then_return(
+ DeckSlotName.SLOT_2
)
+ decoy.when(
+ geometry_view.get_extra_waypoints(location, DeckSlotName.SLOT_2)
+ ).then_return([(456, 789)])
+
waypoints = [
motion_planning.Waypoint(
position=Point(1, 2, 3), critical_point=CriticalPoint.XY_CENTER
@@ -288,7 +462,7 @@ def test_get_movement_waypoints_to_well_raises(
well_location=None,
)
).then_return(Point(x=4, y=5, z=6))
- decoy.when(pipette_view.get_current_well()).then_return(None)
+ decoy.when(pipette_view.get_current_location()).then_return(None)
decoy.when(
geometry_view.get_min_travel_z("pipette-id", "labware-id", None, None)
).then_return(456)
@@ -326,6 +500,196 @@ def test_get_movement_waypoints_to_well_raises(
)
+def test_get_movement_waypoints_to_addressable_area(
+ decoy: Decoy,
+ pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
+ geometry_view: GeometryView,
+ subject: MotionView,
+) -> None:
+ """It should call get_waypoints() with the correct args to move to an addressable area."""
+ location = CurrentAddressableArea(pipette_id="123", addressable_area_name="abc")
+
+ decoy.when(pipette_view.get_current_location()).then_return(location)
+ decoy.when(
+ addressable_area_view.get_addressable_area_move_to_location("area-name")
+ ).then_return(Point(x=3, y=3, z=3))
+ decoy.when(geometry_view.get_all_obstacle_highest_z()).then_return(42)
+
+ decoy.when(
+ addressable_area_view.get_addressable_area_base_slot("area-name")
+ ).then_return(DeckSlotName.SLOT_2)
+
+ decoy.when(
+ geometry_view.get_extra_waypoints(location, DeckSlotName.SLOT_2)
+ ).then_return([])
+
+ waypoints = [
+ motion_planning.Waypoint(
+ position=Point(1, 2, 3), critical_point=CriticalPoint.XY_CENTER
+ ),
+ motion_planning.Waypoint(
+ position=Point(4, 5, 6), critical_point=CriticalPoint.MOUNT
+ ),
+ ]
+
+ decoy.when(
+ motion_planning.get_waypoints(
+ move_type=motion_planning.MoveType.DIRECT,
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ min_travel_z=123,
+ dest=Point(x=4, y=5, z=6),
+ dest_cp=CriticalPoint.XY_CENTER,
+ xy_waypoints=[],
+ )
+ ).then_return(waypoints)
+
+ result = subject.get_movement_waypoints_to_addressable_area(
+ addressable_area_name="area-name",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ force_direct=True,
+ minimum_z_height=123,
+ ignore_tip_configuration=False,
+ )
+
+ assert result == waypoints
+
+
+def test_move_to_moveable_trash_addressable_area(
+ decoy: Decoy,
+ pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
+ geometry_view: GeometryView,
+ subject: MotionView,
+) -> None:
+ """Ensure that a move request to a moveableTrash addressable utilizes the Instrument Center critical point."""
+ location = CurrentAddressableArea(
+ pipette_id="123", addressable_area_name="moveableTrashA1"
+ )
+
+ decoy.when(pipette_view.get_current_location()).then_return(location)
+ decoy.when(
+ addressable_area_view.get_addressable_area_move_to_location("moveableTrashA1")
+ ).then_return(Point(x=3, y=3, z=3))
+ decoy.when(geometry_view.get_all_obstacle_highest_z()).then_return(42)
+
+ decoy.when(
+ addressable_area_view.get_addressable_area_base_slot("moveableTrashA1")
+ ).then_return(DeckSlotName.SLOT_1)
+
+ decoy.when(
+ geometry_view.get_extra_waypoints(location, DeckSlotName.SLOT_1)
+ ).then_return([])
+
+ waypoints = [
+ motion_planning.Waypoint(
+ position=Point(1, 2, 3), critical_point=CriticalPoint.INSTRUMENT_XY_CENTER
+ )
+ ]
+
+ decoy.when(
+ motion_planning.get_waypoints(
+ move_type=motion_planning.MoveType.DIRECT,
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ min_travel_z=123,
+ dest=Point(x=4, y=5, z=6),
+ dest_cp=CriticalPoint.INSTRUMENT_XY_CENTER,
+ xy_waypoints=[],
+ )
+ ).then_return(waypoints)
+
+ result = subject.get_movement_waypoints_to_addressable_area(
+ addressable_area_name="moveableTrashA1",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ force_direct=True,
+ minimum_z_height=123,
+ ignore_tip_configuration=True,
+ )
+
+ assert result == waypoints
+
+
+def test_get_movement_waypoints_to_addressable_area_stay_at_max_travel_z(
+ decoy: Decoy,
+ pipette_view: PipetteView,
+ addressable_area_view: AddressableAreaView,
+ geometry_view: GeometryView,
+ subject: MotionView,
+) -> None:
+ """It should call get_waypoints() with the correct args to move to an addressable area.
+
+ This is the variant where we pass stay_at_max_travel_z=True to the subject.
+ This should affect the dest argument of get_waypoints().
+ """
+ location = CurrentAddressableArea(pipette_id="123", addressable_area_name="abc")
+
+ decoy.when(pipette_view.get_current_location()).then_return(location)
+ decoy.when(
+ addressable_area_view.get_addressable_area_move_to_location("area-name")
+ ).then_return(Point(x=3, y=3, z=3))
+ decoy.when(geometry_view.get_all_obstacle_highest_z()).then_return(42)
+
+ decoy.when(
+ addressable_area_view.get_addressable_area_base_slot("area-name")
+ ).then_return(DeckSlotName.SLOT_2)
+
+ decoy.when(
+ geometry_view.get_extra_waypoints(location, DeckSlotName.SLOT_2)
+ ).then_return([])
+
+ waypoints = [
+ motion_planning.Waypoint(
+ position=Point(1, 2, 3), critical_point=CriticalPoint.XY_CENTER
+ ),
+ motion_planning.Waypoint(
+ position=Point(4, 5, 6), critical_point=CriticalPoint.MOUNT
+ ),
+ ]
+
+ decoy.when(
+ motion_planning.get_waypoints(
+ move_type=motion_planning.MoveType.DIRECT,
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ min_travel_z=123,
+ dest=Point(
+ x=4,
+ y=5,
+ # The max_travel_z arg passed to the subject, plus the offset passed to the subject,
+ # minus a 1 mm margin as a hack--see comments in the subject.
+ z=1337 + 3 - 1,
+ ),
+ dest_cp=CriticalPoint.XY_CENTER,
+ xy_waypoints=[],
+ )
+ ).then_return(waypoints)
+
+ result = subject.get_movement_waypoints_to_addressable_area(
+ addressable_area_name="area-name",
+ offset=AddressableOffsetVector(x=1, y=2, z=3),
+ origin=Point(x=1, y=2, z=3),
+ origin_cp=CriticalPoint.MOUNT,
+ max_travel_z=1337,
+ force_direct=True,
+ minimum_z_height=123,
+ stay_at_max_travel_z=True,
+ ignore_tip_configuration=False,
+ )
+
+ assert result == waypoints
+
+
@pytest.mark.parametrize(
("direct", "expected_move_type"),
[
@@ -359,7 +723,7 @@ def test_get_movement_waypoints_to_coords(
dest = Point(4, 5, 6)
max_travel_z = 789
- decoy.when(geometry_view.get_all_labware_highest_z()).then_return(
+ decoy.when(geometry_view.get_all_obstacle_highest_z()).then_return(
all_labware_highest_z
)
@@ -401,7 +765,7 @@ def test_get_movement_waypoints_to_coords_raises(
subject: MotionView,
) -> None:
"""It should raise FailedToPlanMoveError if motion_planning.get_waypoints raises."""
- decoy.when(geometry_view.get_all_labware_highest_z()).then_return(123)
+ decoy.when(geometry_view.get_all_obstacle_highest_z()).then_return(123)
decoy.when(
# TODO(mm, 2022-06-22): We should use decoy.matchers.Anything() for all
# arguments. For some reason, Decoy does not match the call unless we
@@ -455,7 +819,7 @@ def test_check_pipette_blocking_hs_latch(
expected_result: bool,
) -> None:
"""It should return True if pipette is blocking opening the latch."""
- decoy.when(pipette_view.get_current_well()).then_return(
+ decoy.when(pipette_view.get_current_location()).then_return(
CurrentWell(pipette_id="pipette-id", labware_id="labware-id", well_name="A1")
)
@@ -495,7 +859,7 @@ def test_check_pipette_blocking_hs_shake(
expected_result: bool,
) -> None:
"""It should return True if pipette is blocking the h/s from shaking."""
- decoy.when(pipette_view.get_current_well()).then_return(
+ decoy.when(pipette_view.get_current_location()).then_return(
CurrentWell(pipette_id="pipette-id", labware_id="labware-id", well_name="A1")
)
@@ -526,8 +890,11 @@ def test_get_touch_tip_waypoints(
center_point = Point(1, 2, 3)
decoy.when(
- labware_view.get_has_quirk("labware-id", "centerMultichannelOnWells")
+ labware_view.get_should_center_pipette_on_target_well("labware-id")
).then_return(True)
+ decoy.when(
+ labware_view.get_should_center_column_on_target_well("labware-id")
+ ).then_return(False)
decoy.when(pipette_view.get_mount("pipette-id")).then_return(MountType.LEFT)
diff --git a/api/tests/opentrons/protocol_engine/state/test_pipette_store.py b/api/tests/opentrons/protocol_engine/state/test_pipette_store.py
index 3f638991c95..d2479a55bc8 100644
--- a/api/tests/opentrons/protocol_engine/state/test_pipette_store.py
+++ b/api/tests/opentrons/protocol_engine/state/test_pipette_store.py
@@ -6,7 +6,7 @@
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.pipette import pipette_definition
-from opentrons.types import DeckSlotName, MountType
+from opentrons.types import DeckSlotName, MountType, Point
from opentrons.protocol_engine import commands as cmd
from opentrons.protocol_engine.types import (
DeckPoint,
@@ -20,13 +20,15 @@
)
from opentrons.protocol_engine.actions import (
SetPipetteMovementSpeedAction,
- UpdateCommandAction,
+ SucceedCommandAction,
)
from opentrons.protocol_engine.state.pipettes import (
PipetteStore,
PipetteState,
CurrentDeckPoint,
StaticPipetteConfig,
+ BoundingNozzlesOffsets,
+ PipetteBoundingBoxOffsets,
)
from opentrons.protocol_engine.resources.pipette_data_provider import (
LoadedStaticPipetteData,
@@ -35,6 +37,7 @@
from .command_fixtures import (
create_load_pipette_command,
create_aspirate_command,
+ create_aspirate_in_place_command,
create_dispense_command,
create_dispense_in_place_command,
create_pick_up_tip_command,
@@ -43,11 +46,13 @@
create_touch_tip_command,
create_move_to_well_command,
create_blow_out_command,
+ create_blow_out_in_place_command,
create_move_labware_command,
create_move_to_coordinates_command,
create_move_relative_command,
create_prepare_to_aspirate_command,
)
+from ..pipette_fixtures import get_default_nozzle_map
@pytest.fixture
@@ -63,7 +68,7 @@ def test_sets_initial_state(subject: PipetteStore) -> None:
assert result == PipetteState(
pipettes_by_id={},
aspirated_volume_by_id={},
- current_well=None,
+ current_location=None,
current_deck_point=CurrentDeckPoint(mount=None, deck_point=None),
attached_tip_by_id={},
movement_speed_by_id={},
@@ -81,7 +86,7 @@ def test_handles_load_pipette(subject: PipetteStore) -> None:
mount=MountType.LEFT,
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
result = subject.state
@@ -112,10 +117,10 @@ def test_handles_pick_up_and_drop_tip(subject: PipetteStore) -> None:
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=pick_up_tip_command)
+ SucceedCommandAction(private_result=None, command=pick_up_tip_command)
)
assert subject.state.attached_tip_by_id["abc"] == TipGeometry(
volume=42, length=101, diameter=8.0
@@ -123,7 +128,7 @@ def test_handles_pick_up_and_drop_tip(subject: PipetteStore) -> None:
assert subject.state.aspirated_volume_by_id["abc"] == 0
subject.handle_action(
- UpdateCommandAction(private_result=None, command=drop_tip_command)
+ SucceedCommandAction(private_result=None, command=drop_tip_command)
)
assert subject.state.attached_tip_by_id["abc"] is None
assert subject.state.aspirated_volume_by_id["abc"] is None
@@ -146,10 +151,10 @@ def test_handles_drop_tip_in_place(subject: PipetteStore) -> None:
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=pick_up_tip_command)
+ SucceedCommandAction(private_result=None, command=pick_up_tip_command)
)
assert subject.state.attached_tip_by_id["xyz"] == TipGeometry(
volume=42, length=101, diameter=8.0
@@ -157,63 +162,47 @@ def test_handles_drop_tip_in_place(subject: PipetteStore) -> None:
assert subject.state.aspirated_volume_by_id["xyz"] == 0
subject.handle_action(
- UpdateCommandAction(private_result=None, command=drop_tip_in_place_command)
+ SucceedCommandAction(private_result=None, command=drop_tip_in_place_command)
)
assert subject.state.attached_tip_by_id["xyz"] is None
assert subject.state.aspirated_volume_by_id["xyz"] is None
-def test_pipette_volume_adds_aspirate(subject: PipetteStore) -> None:
+@pytest.mark.parametrize(
+ "aspirate_command",
+ [
+ create_aspirate_command(pipette_id="pipette-id", volume=42, flow_rate=1.23),
+ create_aspirate_in_place_command(
+ pipette_id="pipette-id", volume=42, flow_rate=1.23
+ ),
+ ],
+)
+def test_aspirate_adds_volume(
+ subject: PipetteStore, aspirate_command: cmd.Command
+) -> None:
"""It should add volume to pipette after an aspirate."""
load_command = create_load_pipette_command(
pipette_id="pipette-id",
pipette_name=PipetteNameType.P300_SINGLE,
mount=MountType.LEFT,
)
- aspirate_command = create_aspirate_command(
- pipette_id="pipette-id",
- volume=42,
- flow_rate=1.23,
- )
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_command)
+ SucceedCommandAction(private_result=None, command=load_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=aspirate_command)
+ SucceedCommandAction(private_result=None, command=aspirate_command)
)
assert subject.state.aspirated_volume_by_id["pipette-id"] == 42
subject.handle_action(
- UpdateCommandAction(private_result=None, command=aspirate_command)
+ SucceedCommandAction(private_result=None, command=aspirate_command)
)
assert subject.state.aspirated_volume_by_id["pipette-id"] == 84
-def test_handles_blow_out(subject: PipetteStore) -> None:
- """It should set volume to 0 and set current well."""
- command = create_blow_out_command(
- pipette_id="pipette-id",
- labware_id="labware-id",
- well_name="well-name",
- flow_rate=1.23,
- )
-
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
-
- result = subject.state
-
- assert result.aspirated_volume_by_id["pipette-id"] is None
-
- assert result.current_well == CurrentWell(
- pipette_id="pipette-id",
- labware_id="labware-id",
- well_name="well-name",
- )
-
-
@pytest.mark.parametrize(
"dispense_command",
[
@@ -225,7 +214,7 @@ def test_handles_blow_out(subject: PipetteStore) -> None:
),
],
)
-def test_pipette_volume_subtracts_dispense(
+def test_dispense_subtracts_volume(
subject: PipetteStore, dispense_command: cmd.Command
) -> None:
"""It should subtract volume from pipette after a dispense."""
@@ -241,28 +230,57 @@ def test_pipette_volume_subtracts_dispense(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_command)
+ SucceedCommandAction(private_result=None, command=load_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=aspirate_command)
+ SucceedCommandAction(private_result=None, command=aspirate_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=dispense_command)
+ SucceedCommandAction(private_result=None, command=dispense_command)
)
assert subject.state.aspirated_volume_by_id["pipette-id"] == 21
subject.handle_action(
- UpdateCommandAction(private_result=None, command=dispense_command)
+ SucceedCommandAction(private_result=None, command=dispense_command)
)
assert subject.state.aspirated_volume_by_id["pipette-id"] == 0
+
+@pytest.mark.parametrize(
+ "blow_out_command",
+ [
+ create_blow_out_command("pipette-id", 1.23),
+ create_blow_out_in_place_command("pipette-id", 1.23),
+ ],
+)
+def test_blow_out_clears_volume(
+ subject: PipetteStore, blow_out_command: cmd.Command
+) -> None:
+ """It should wipe out the aspirated volume after a blowOut."""
+ load_command = create_load_pipette_command(
+ pipette_id="pipette-id",
+ pipette_name=PipetteNameType.P300_SINGLE,
+ mount=MountType.LEFT,
+ )
+ aspirate_command = create_aspirate_command(
+ pipette_id="pipette-id",
+ volume=42,
+ flow_rate=1.23,
+ )
+
subject.handle_action(
- UpdateCommandAction(private_result=None, command=dispense_command)
+ SucceedCommandAction(private_result=None, command=load_command)
+ )
+ subject.handle_action(
+ SucceedCommandAction(private_result=None, command=aspirate_command)
+ )
+ subject.handle_action(
+ SucceedCommandAction(private_result=None, command=blow_out_command)
)
- assert subject.state.aspirated_volume_by_id["pipette-id"] == 0
+ assert subject.state.aspirated_volume_by_id["pipette-id"] is None
@pytest.mark.parametrize(
@@ -360,11 +378,11 @@ def test_movement_commands_update_current_well(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
- assert subject.state.current_well == expected_location
+ assert subject.state.current_location == expected_location
@pytest.mark.parametrize(
@@ -444,14 +462,14 @@ def test_movement_commands_without_well_clear_current_well(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_command)
+ SucceedCommandAction(private_result=None, command=move_command)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
- assert subject.state.current_well is None
+ assert subject.state.current_location is None
@pytest.mark.parametrize(
@@ -497,14 +515,14 @@ def test_heater_shaker_command_without_movement(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_command)
+ SucceedCommandAction(private_result=None, command=move_command)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
- assert subject.state.current_well == CurrentWell(
+ assert subject.state.current_location == CurrentWell(
pipette_id="pipette-id",
labware_id="labware-id",
well_name="well-name",
@@ -608,16 +626,16 @@ def test_move_labware_clears_current_well(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_to_well_command)
+ SucceedCommandAction(private_result=None, command=move_to_well_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_labware_command)
+ SucceedCommandAction(private_result=None, command=move_labware_command)
)
- assert subject.state.current_well == expected_current_well
+ assert subject.state.current_location == expected_current_well
def test_set_movement_speed(subject: PipetteStore) -> None:
@@ -629,7 +647,7 @@ def test_set_movement_speed(subject: PipetteStore) -> None:
mount=MountType.LEFT,
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
SetPipetteMovementSpeedAction(pipette_id=pipette_id, speed=123.456)
@@ -666,10 +684,13 @@ def test_add_pipette_config(
nominal_tip_overlap={"default": 5},
home_position=8.9,
nozzle_offset_z=10.11,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
),
)
subject.handle_action(
- UpdateCommandAction(command=command, private_result=private_result)
+ SucceedCommandAction(command=command, private_result=private_result)
)
assert subject.state.static_config_by_id["pipette-id"] == StaticPipetteConfig(
@@ -683,6 +704,15 @@ def test_add_pipette_config(
nominal_tip_overlap={"default": 5},
home_position=8.9,
nozzle_offset_z=10.11,
+ bounding_nozzle_offsets=BoundingNozzlesOffsets(
+ back_left_offset=Point(x=0, y=0, z=0),
+ front_right_offset=Point(x=0, y=0, z=0),
+ ),
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(x=1, y=2, z=3),
+ front_right_corner=Point(x=4, y=5, z=6),
+ ),
)
assert subject.state.flow_rates_by_id["pipette-id"].default_aspirate == {"a": 1.0}
assert subject.state.flow_rates_by_id["pipette-id"].default_dispense == {"b": 2.0}
@@ -761,9 +791,9 @@ def test_movement_commands_update_deck_point(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
assert subject.state.current_deck_point == CurrentDeckPoint(
mount=MountType.LEFT, deck_point=DeckPoint(x=11, y=22, z=33)
@@ -842,17 +872,17 @@ def test_homing_commands_clear_deck_point(
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=move_command)
+ SucceedCommandAction(private_result=None, command=move_command)
)
assert subject.state.current_deck_point == CurrentDeckPoint(
mount=MountType.LEFT, deck_point=DeckPoint(x=1, y=2, z=3)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=command))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=command))
assert subject.state.current_deck_point == CurrentDeckPoint(
mount=None, deck_point=None
@@ -879,18 +909,18 @@ def test_prepare_to_aspirate_marks_pipette_ready(
pipette_id="pipette-id", tip_volume=42, tip_length=101, tip_diameter=8.0
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=load_pipette_command)
+ SucceedCommandAction(private_result=None, command=load_pipette_command)
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=pick_up_tip_command)
+ SucceedCommandAction(private_result=None, command=pick_up_tip_command)
)
- subject.handle_action(UpdateCommandAction(private_result=None, command=previous))
+ subject.handle_action(SucceedCommandAction(private_result=None, command=previous))
prepare_to_aspirate_command = create_prepare_to_aspirate_command(
pipette_id="pipette-id"
)
subject.handle_action(
- UpdateCommandAction(private_result=None, command=prepare_to_aspirate_command)
+ SucceedCommandAction(private_result=None, command=prepare_to_aspirate_command)
)
assert subject.state.aspirated_volume_by_id["pipette-id"] == 0.0
diff --git a/api/tests/opentrons/protocol_engine/state/test_pipette_view.py b/api/tests/opentrons/protocol_engine/state/test_pipette_view.py
index 5721beb5b18..96c7905dcd4 100644
--- a/api/tests/opentrons/protocol_engine/state/test_pipette_view.py
+++ b/api/tests/opentrons/protocol_engine/state/test_pipette_view.py
@@ -1,12 +1,14 @@
"""Tests for pipette state accessors in the protocol_engine state store."""
+from collections import OrderedDict
+
import pytest
-from typing import cast, Dict, List, Optional
+from typing import cast, Dict, List, Optional, Tuple, NamedTuple
from opentrons_shared_data.pipette.dev_types import PipetteNameType
from opentrons_shared_data.pipette import pipette_definition
from opentrons.config.defaults_ot2 import Z_RETRACT_DISTANCE
-from opentrons.types import MountType, Mount as HwMount
+from opentrons.types import MountType, Mount as HwMount, Point
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.protocol_engine import errors
from opentrons.protocol_engine.types import (
@@ -14,7 +16,7 @@
MotorAxis,
FlowRates,
DeckPoint,
- CurrentWell,
+ CurrentPipetteLocation,
TipGeometry,
)
from opentrons.protocol_engine.state.pipettes import (
@@ -23,15 +25,34 @@
CurrentDeckPoint,
HardwarePipette,
StaticPipetteConfig,
+ BoundingNozzlesOffsets,
+ PipetteBoundingBoxOffsets,
)
-from opentrons.hardware_control.nozzle_manager import NozzleMap
+from opentrons.hardware_control.nozzle_manager import NozzleMap, NozzleConfigurationType
from opentrons.protocol_engine.errors import TipNotAttachedError, PipetteNotLoadedError
+from ..pipette_fixtures import (
+ NINETY_SIX_ROWS,
+ NINETY_SIX_COLS,
+ NINETY_SIX_MAP,
+ EIGHT_CHANNEL_ROWS,
+ EIGHT_CHANNEL_COLS,
+ EIGHT_CHANNEL_MAP,
+ get_default_nozzle_map,
+)
+
+_SAMPLE_NOZZLE_BOUNDS_OFFSETS = BoundingNozzlesOffsets(
+ back_left_offset=Point(x=10, y=20, z=30), front_right_offset=Point(x=40, y=50, z=60)
+)
+_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS = PipetteBoundingBoxOffsets(
+ back_left_corner=Point(x=10, y=20, z=30), front_right_corner=Point(x=40, y=50, z=60)
+)
+
def get_pipette_view(
pipettes_by_id: Optional[Dict[str, LoadedPipette]] = None,
aspirated_volume_by_id: Optional[Dict[str, Optional[float]]] = None,
- current_well: Optional[CurrentWell] = None,
+ current_well: Optional[CurrentPipetteLocation] = None,
current_deck_point: CurrentDeckPoint = CurrentDeckPoint(
mount=None, deck_point=None
),
@@ -45,7 +66,7 @@ def get_pipette_view(
state = PipetteState(
pipettes_by_id=pipettes_by_id or {},
aspirated_volume_by_id=aspirated_volume_by_id or {},
- current_well=current_well,
+ current_location=current_well,
current_deck_point=current_deck_point,
attached_tip_by_id=attached_tip_by_id or {},
movement_speed_by_id=movement_speed_by_id or {},
@@ -251,6 +272,9 @@ def test_get_pipette_working_volume(
nominal_tip_overlap={},
home_position=0,
nozzle_offset_z=0,
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS,
)
},
)
@@ -278,6 +302,9 @@ def test_get_pipette_working_volume_raises_if_tip_volume_is_none(
nominal_tip_overlap={},
home_position=0,
nozzle_offset_z=0,
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS,
)
},
)
@@ -314,6 +341,9 @@ def test_get_pipette_available_volume(
nominal_tip_overlap={},
home_position=0,
nozzle_offset_z=0,
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS,
),
"pipette-id-none": StaticPipetteConfig(
min_volume=1,
@@ -326,6 +356,9 @@ def test_get_pipette_available_volume(
nominal_tip_overlap={},
home_position=0,
nozzle_offset_z=0,
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS,
),
},
)
@@ -434,6 +467,9 @@ def test_get_static_config(
nominal_tip_overlap={},
home_position=10.12,
nozzle_offset_z=12.13,
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS,
)
subject = get_pipette_view(
@@ -481,6 +517,9 @@ def test_get_nominal_tip_overlap(
},
home_position=0,
nozzle_offset_z=0,
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ pipette_bounding_box_offsets=_SAMPLE_PIPETTE_BOUNDING_BOX_OFFSETS,
)
subject = get_pipette_view(static_config_by_id={"pipette-id": config})
@@ -512,3 +551,218 @@ def test_get_motor_axes(
assert subject.get_z_axis("pipette-id") == expected_z_axis
assert subject.get_plunger_axis("pipette-id") == expected_plunger_axis
+
+
+def test_nozzle_configuration_getters() -> None:
+ """Test that pipette view returns correct nozzle configuration data."""
+ nozzle_map = NozzleMap.build(
+ physical_nozzles=OrderedDict({"A1": Point(0, 0, 0)}),
+ physical_rows=OrderedDict({"A": ["A1"]}),
+ physical_columns=OrderedDict({"1": ["A1"]}),
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="A1",
+ )
+ subject = get_pipette_view(nozzle_layout_by_id={"pipette-id": nozzle_map})
+ assert subject.get_nozzle_layout_type("pipette-id") == NozzleConfigurationType.FULL
+ assert subject.get_is_partially_configured("pipette-id") is False
+ assert subject.get_primary_nozzle("pipette-id") == "A1"
+
+
+class _PipetteSpecs(NamedTuple):
+ tip_length: float
+ bounding_box_offsets: PipetteBoundingBoxOffsets
+ nozzle_map: NozzleMap
+ destination_position: Point
+ nozzle_bounds_result: Tuple[Point, Point, Point, Point]
+
+
+_pipette_spec_cases = [
+ _PipetteSpecs(
+ # 8-channel P300, full configuration
+ tip_length=42,
+ bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(0.0, 31.5, 35.52),
+ front_right_corner=Point(0.0, -31.5, 35.52),
+ ),
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=EIGHT_CHANNEL_MAP,
+ physical_rows=EIGHT_CHANNEL_ROWS,
+ physical_columns=EIGHT_CHANNEL_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="H1",
+ ),
+ destination_position=Point(100, 200, 300),
+ nozzle_bounds_result=(
+ (
+ Point(x=100.0, y=200.0, z=342.0),
+ Point(x=100.0, y=137.0, z=342.0),
+ Point(x=100.0, y=200.0, z=342.0),
+ Point(x=100.0, y=137.0, z=342.0),
+ )
+ ),
+ ),
+ _PipetteSpecs(
+ # 8-channel P300, single configuration
+ tip_length=42,
+ bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(0.0, 31.5, 35.52),
+ front_right_corner=Point(0.0, -31.5, 35.52),
+ ),
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=EIGHT_CHANNEL_MAP,
+ physical_rows=EIGHT_CHANNEL_ROWS,
+ physical_columns=EIGHT_CHANNEL_COLS,
+ starting_nozzle="H1",
+ back_left_nozzle="H1",
+ front_right_nozzle="H1",
+ ),
+ destination_position=Point(100, 200, 300),
+ nozzle_bounds_result=(
+ (
+ Point(x=100.0, y=263.0, z=342.0),
+ Point(x=100.0, y=200.0, z=342.0),
+ Point(x=100.0, y=263.0, z=342.0),
+ Point(x=100.0, y=200.0, z=342.0),
+ )
+ ),
+ ),
+ _PipetteSpecs(
+ # 96-channel P1000, full configuration
+ tip_length=42,
+ bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(-36.0, -25.5, -259.15),
+ front_right_corner=Point(63.0, -88.5, -259.15),
+ ),
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="H12",
+ ),
+ destination_position=Point(100, 200, 300),
+ nozzle_bounds_result=(
+ (
+ Point(x=100.0, y=200.0, z=342.0),
+ Point(x=199.0, y=137.0, z=342.0),
+ Point(x=199.0, y=200.0, z=342.0),
+ Point(x=100.0, y=137.0, z=342.0),
+ )
+ ),
+ ),
+ _PipetteSpecs(
+ # 96-channel P1000, A1 COLUMN configuration
+ tip_length=42,
+ bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(-36.0, -25.5, -259.15),
+ front_right_corner=Point(63.0, -88.5, -259.15),
+ ),
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="H1",
+ ),
+ destination_position=Point(100, 200, 300),
+ nozzle_bounds_result=(
+ Point(100, 200, 342),
+ Point(199, 137, 342),
+ Point(199, 200, 342),
+ Point(100, 137, 342),
+ ),
+ ),
+ _PipetteSpecs(
+ # 96-channel P1000, A12 COLUMN configuration
+ tip_length=42,
+ bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(-36.0, -25.5, -259.15),
+ front_right_corner=Point(63.0, -88.5, -259.15),
+ ),
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A12",
+ back_left_nozzle="A12",
+ front_right_nozzle="H12",
+ ),
+ destination_position=Point(100, 200, 300),
+ nozzle_bounds_result=(
+ Point(1, 200, 342),
+ Point(100, 137, 342),
+ Point(100, 200, 342),
+ Point(1, 137, 342),
+ ),
+ ),
+ _PipetteSpecs(
+ # 96-channel P1000, ROW configuration
+ tip_length=42,
+ bounding_box_offsets=PipetteBoundingBoxOffsets(
+ back_left_corner=Point(-36.0, -25.5, -259.15),
+ front_right_corner=Point(63.0, -88.5, -259.15),
+ ),
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="A12",
+ ),
+ destination_position=Point(100, 200, 300),
+ nozzle_bounds_result=(
+ Point(100, 200, 342),
+ Point(199, 137, 342),
+ Point(199, 200, 342),
+ Point(100, 137, 342),
+ ),
+ ),
+]
+
+
+@pytest.mark.parametrize(
+ argnames=_PipetteSpecs._fields,
+ argvalues=_pipette_spec_cases,
+)
+def test_get_nozzle_bounds_at_location(
+ tip_length: float,
+ bounding_box_offsets: PipetteBoundingBoxOffsets,
+ nozzle_map: NozzleMap,
+ destination_position: Point,
+ nozzle_bounds_result: Tuple[Point, Point, Point, Point],
+) -> None:
+ """It should get the pipette's nozzle's bounds at the given location."""
+ subject = get_pipette_view(
+ nozzle_layout_by_id={"pipette-id": nozzle_map},
+ attached_tip_by_id={
+ "pipette-id": TipGeometry(length=tip_length, diameter=123, volume=123),
+ },
+ static_config_by_id={
+ "pipette-id": StaticPipetteConfig(
+ min_volume=1,
+ max_volume=9001,
+ channels=5,
+ model="blah",
+ display_name="bleh",
+ serial_number="",
+ tip_configuration_lookup_table={},
+ nominal_tip_overlap={},
+ home_position=0,
+ nozzle_offset_z=0,
+ default_nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE),
+ bounding_nozzle_offsets=_SAMPLE_NOZZLE_BOUNDS_OFFSETS,
+ pipette_bounding_box_offsets=bounding_box_offsets,
+ )
+ },
+ )
+ assert (
+ subject.get_pipette_bounds_at_specified_move_to_position(
+ pipette_id="pipette-id", destination_position=destination_position
+ )
+ == nozzle_bounds_result
+ )
diff --git a/api/tests/opentrons/protocol_engine/state/test_state_store.py b/api/tests/opentrons/protocol_engine/state/test_state_store.py
index 44c42fe50b2..515cbbd81e1 100644
--- a/api/tests/opentrons/protocol_engine/state/test_state_store.py
+++ b/api/tests/opentrons/protocol_engine/state/test_state_store.py
@@ -1,11 +1,11 @@
"""Tests for the top-level StateStore/StateView."""
-from typing import Callable, Optional
+from typing import Callable, Union
from datetime import datetime
import pytest
from decoy import Decoy
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons.protocol_engine.actions import PlayAction
from opentrons.protocol_engine.state import State, StateStore, Config
@@ -32,7 +32,7 @@ def engine_config() -> Config:
@pytest.fixture
def subject(
change_notifier: ChangeNotifier,
- ot2_standard_deck_def: DeckDefinitionV4,
+ ot2_standard_deck_def: DeckDefinitionV5,
engine_config: Config,
) -> StateStore:
"""Get a StateStore test subject."""
@@ -55,7 +55,11 @@ def test_has_state(subject: StateStore) -> None:
def test_state_is_immutable(subject: StateStore) -> None:
"""It should treat the state as immutable."""
result_1 = subject.state
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
result_2 = subject.state
assert result_1 is not result_2
@@ -68,58 +72,70 @@ def test_notify_on_state_change(
) -> None:
"""It should notify state changes when actions are handled."""
decoy.verify(change_notifier.notify(), times=0)
- subject.handle_action(PlayAction(requested_at=datetime(year=2021, month=1, day=1)))
+ subject.handle_action(
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
+ )
decoy.verify(change_notifier.notify(), times=1)
-async def test_wait_for_state(
+async def test_wait_for(
decoy: Decoy,
change_notifier: ChangeNotifier,
subject: StateStore,
) -> None:
"""It should return an awaitable that signals state changes."""
- check_condition: Callable[..., Optional[str]] = decoy.mock()
+ check_condition: Callable[..., Union[str, int]] = decoy.mock(name="check_condition")
decoy.when(check_condition("foo", bar="baz")).then_return(
- None,
- None,
+ 0,
+ 0,
"hello world",
)
-
result = await subject.wait_for(check_condition, "foo", bar="baz")
assert result == "hello world"
+ decoy.verify(await change_notifier.wait(), times=2)
+ decoy.reset()
+
+ decoy.when(check_condition("foo", bar="baz")).then_return(
+ "hello world",
+ "hello world again",
+ 0,
+ )
+ result = await subject.wait_for_not(check_condition, "foo", bar="baz")
+ assert result == 0
decoy.verify(await change_notifier.wait(), times=2)
-async def test_wait_for_state_short_circuit(
+async def test_wait_for_already_satisfied(
decoy: Decoy,
subject: StateStore,
change_notifier: ChangeNotifier,
) -> None:
- """It should short-circuit the change notifier if condition is satisfied."""
- check_condition: Callable[..., Optional[str]] = decoy.mock()
+ """It should return immediately and skip the change notifier."""
+ check_condition: Callable[..., Union[str, int]] = decoy.mock(name="check_condition")
decoy.when(check_condition("foo", bar="baz")).then_return("hello world")
-
result = await subject.wait_for(check_condition, "foo", bar="baz")
assert result == "hello world"
-
decoy.verify(await change_notifier.wait(), times=0)
-
-async def test_wait_for_already_true(decoy: Decoy, subject: StateStore) -> None:
- """It should signal immediately if condition is already met."""
- check_condition = decoy.mock()
- decoy.when(check_condition()).then_return(True)
- await subject.wait_for(check_condition)
+ decoy.when(check_condition("foo", bar="baz")).then_return(0)
+ result = await subject.wait_for_not(check_condition, "foo", bar="baz")
+ assert result == 0
+ decoy.verify(await change_notifier.wait(), times=0)
async def test_wait_for_raises(decoy: Decoy, subject: StateStore) -> None:
"""It should raise if the condition function raises."""
- check_condition = decoy.mock()
+ check_condition = decoy.mock(name="check_condition")
decoy.when(check_condition()).then_raise(ValueError("oh no"))
with pytest.raises(ValueError, match="oh no"):
await subject.wait_for(check_condition)
+
+ with pytest.raises(ValueError, match="oh no"):
+ await subject.wait_for_not(check_condition)
diff --git a/api/tests/opentrons/protocol_engine/state/test_tip_state.py b/api/tests/opentrons/protocol_engine/state/test_tip_state.py
index 44da21f97c6..23e30362a0a 100644
--- a/api/tests/opentrons/protocol_engine/state/test_tip_state.py
+++ b/api/tests/opentrons/protocol_engine/state/test_tip_state.py
@@ -1,4 +1,6 @@
"""Tests for tip state store and selectors."""
+from collections import OrderedDict
+
import pytest
from typing import Optional
@@ -9,12 +11,21 @@
)
from opentrons_shared_data.pipette import pipette_definition
+from opentrons.hardware_control.nozzle_manager import NozzleMap
from opentrons.protocol_engine import actions, commands
from opentrons.protocol_engine.state.tips import TipStore, TipView
from opentrons.protocol_engine.types import FlowRates, DeckPoint
from opentrons.protocol_engine.resources.pipette_data_provider import (
LoadedStaticPipetteData,
)
+from opentrons.types import Point
+from opentrons_shared_data.pipette.dev_types import PipetteNameType
+from ..pipette_fixtures import (
+ NINETY_SIX_MAP,
+ NINETY_SIX_COLS,
+ NINETY_SIX_ROWS,
+ get_default_nozzle_map,
+)
_tip_rack_parameters = LabwareParameters.construct(isTiprack=True) # type: ignore[call-arg]
@@ -104,17 +115,51 @@ def drop_tip_in_place_command() -> commands.DropTipInPlace:
],
)
def test_get_next_tip_returns_none(
- load_labware_command: commands.LoadLabware, subject: TipStore
+ load_labware_command: commands.LoadLabware,
+ subject: TipStore,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should start at the first tip in the labware."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=96,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P1000_96),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=1,
starting_tip_name=None,
+ nozzle_map=None,
)
assert result is None
@@ -122,17 +167,59 @@ def test_get_next_tip_returns_none(
@pytest.mark.parametrize("input_tip_amount", [1, 8, 96])
def test_get_next_tip_returns_first_tip(
- load_labware_command: commands.LoadLabware, subject: TipStore, input_tip_amount: int
+ load_labware_command: commands.LoadLabware,
+ subject: TipStore,
+ input_tip_amount: int,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should start at the first tip in the labware."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ pipette_name_type = PipetteNameType.P1000_96
+ if input_tip_amount == 1:
+ pipette_name_type = PipetteNameType.P300_SINGLE_GEN2
+ elif input_tip_amount == 8:
+ pipette_name_type = PipetteNameType.P300_MULTI_GEN2
+ else:
+ pipette_name_type = PipetteNameType.P1000_96
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=input_tip_amount,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(pipette_name_type),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=input_tip_amount,
starting_tip_name=None,
+ nozzle_map=None,
)
assert result == "A1"
@@ -144,16 +231,49 @@ def test_get_next_tip_used_starting_tip(
subject: TipStore,
input_tip_amount: int,
result_well_name: str,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should start searching at the given starting tip."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=input_tip_amount,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=input_tip_amount,
starting_tip_name="B1",
+ nozzle_map=None,
)
assert result == result_well_name
@@ -185,16 +305,34 @@ def test_get_next_tip_skips_picked_up_tip(
) -> None:
"""It should get the next tip in the column if one has been picked up."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
)
load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
result=commands.LoadPipetteResult(pipetteId="pipette-id")
)
+ channels_num = input_tip_amount
+ if input_starting_tip is not None:
+ pipette_name_type = PipetteNameType.P1000_96
+ if input_tip_amount == 1:
+ pipette_name_type = PipetteNameType.P300_SINGLE_GEN2
+ elif input_tip_amount == 8:
+ pipette_name_type = PipetteNameType.P300_MULTI_GEN2
+ else:
+ pipette_name_type = PipetteNameType.P1000_96
+ else:
+ channels_num = get_next_tip_tips
+ pipette_name_type = PipetteNameType.P1000_96
+ if get_next_tip_tips == 1:
+ pipette_name_type = PipetteNameType.P300_SINGLE_GEN2
+ elif get_next_tip_tips == 8:
+ pipette_name_type = PipetteNameType.P300_MULTI_GEN2
+ else:
+ pipette_name_type = PipetteNameType.P1000_96
load_pipette_private_result = commands.LoadPipettePrivateResult(
pipette_id="pipette-id",
serial_number="pipette-serial",
config=LoadedStaticPipetteData(
- channels=input_tip_amount,
+ channels=channels_num,
max_volume=15,
min_volume=3,
model="gen a",
@@ -208,21 +346,25 @@ def test_get_next_tip_skips_picked_up_tip(
nominal_tip_overlap={},
nozzle_offset_z=1.23,
home_position=4.56,
+ nozzle_map=get_default_nozzle_map(pipette_name_type),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
),
)
subject.handle_action(
- actions.UpdateCommandAction(
+ actions.SucceedCommandAction(
private_result=load_pipette_private_result, command=load_pipette_command
)
)
subject.handle_action(
- actions.UpdateCommandAction(command=pick_up_tip_command, private_result=None)
+ actions.SucceedCommandAction(command=pick_up_tip_command, private_result=None)
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=get_next_tip_tips,
starting_tip_name=input_starting_tip,
+ nozzle_map=load_pipette_private_result.config.nozzle_map,
)
assert result == result_well_name
@@ -231,16 +373,48 @@ def test_get_next_tip_skips_picked_up_tip(
def test_get_next_tip_with_starting_tip(
subject: TipStore,
load_labware_command: commands.LoadLabware,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should return the starting tip, and then the following tip after that."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=1,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
-
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=1,
starting_tip_name="B2",
+ nozzle_map=load_pipette_private_result.config.nozzle_map,
)
assert result == "B2"
@@ -257,13 +431,14 @@ def test_get_next_tip_with_starting_tip(
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=pick_up_tip)
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip)
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=1,
starting_tip_name="B2",
+ nozzle_map=load_pipette_private_result.config.nozzle_map,
)
assert result == "C2"
@@ -272,16 +447,49 @@ def test_get_next_tip_with_starting_tip(
def test_get_next_tip_with_starting_tip_8_channel(
subject: TipStore,
load_labware_command: commands.LoadLabware,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should return the starting tip, and then the following tip after that."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=8,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_MULTI_GEN2),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=8,
starting_tip_name="A2",
+ nozzle_map=None,
)
assert result == "A2"
@@ -298,31 +506,171 @@ def test_get_next_tip_with_starting_tip_8_channel(
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=pick_up_tip)
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip)
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=8,
starting_tip_name="A2",
+ nozzle_map=None,
)
assert result == "A3"
+def test_get_next_tip_with_1_channel_followed_by_8_channel(
+ subject: TipStore,
+ load_labware_command: commands.LoadLabware,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+) -> None:
+ """It should return the first tip of column 2 for the 8 channel after performing a single tip pickup on column 1."""
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=1,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
+ )
+ load_pipette_command2 = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id2")
+ )
+ load_pipette_private_result2 = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id2",
+ serial_number="pipette-serial2",
+ config=LoadedStaticPipetteData(
+ channels=8,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name2",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_MULTI_GEN2),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result2, command=load_pipette_command2
+ )
+ )
+
+ result = TipView(subject.state).get_next_tip(
+ labware_id="cool-labware",
+ num_tips=1,
+ starting_tip_name=None,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ )
+
+ assert result == "A1"
+
+ pick_up_tip2 = commands.PickUpTip.construct( # type: ignore[call-arg]
+ params=commands.PickUpTipParams.construct(
+ pipetteId="pipette-id2",
+ labwareId="cool-labware",
+ wellName="A1",
+ ),
+ result=commands.PickUpTipResult.construct(
+ position=DeckPoint(x=0, y=0, z=0), tipLength=1.23
+ ),
+ )
+
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip2)
+ )
+
+ result = TipView(subject.state).get_next_tip(
+ labware_id="cool-labware",
+ num_tips=8,
+ starting_tip_name=None,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_MULTI_GEN2),
+ )
+
+ assert result == "A2"
+
+
def test_get_next_tip_with_starting_tip_out_of_tips(
subject: TipStore,
load_labware_command: commands.LoadLabware,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should return the starting tip of H12 and then None after that."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=1,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=1,
starting_tip_name="H12",
+ nozzle_map=None,
)
assert result == "H12"
@@ -339,13 +687,14 @@ def test_get_next_tip_with_starting_tip_out_of_tips(
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=pick_up_tip)
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip)
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=1,
starting_tip_name="H12",
+ nozzle_map=None,
)
assert result is None
@@ -354,16 +703,49 @@ def test_get_next_tip_with_starting_tip_out_of_tips(
def test_get_next_tip_with_column_and_starting_tip(
subject: TipStore,
load_labware_command: commands.LoadLabware,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
) -> None:
"""It should return the first tip in a column, taking starting tip into account."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=8,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_MULTI_GEN2),
+ back_left_corner_offset=Point(0, 0, 0),
+ front_right_corner_offset=Point(0, 0, 0),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
)
result = TipView(subject.state).get_next_tip(
labware_id="cool-labware",
num_tips=8,
starting_tip_name="D1",
+ nozzle_map=None,
)
assert result == "A2"
@@ -377,7 +759,7 @@ def test_reset_tips(
) -> None:
"""It should be able to reset tip tracking state."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
)
load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
result=commands.LoadPipetteResult(pipetteId="pipette-id")
@@ -386,7 +768,7 @@ def test_reset_tips(
pipette_id="pipette-id",
serial_number="pipette-serial",
config=LoadedStaticPipetteData(
- channels=8,
+ channels=1,
max_volume=15,
min_volume=3,
model="gen a",
@@ -400,17 +782,20 @@ def test_reset_tips(
nominal_tip_overlap={},
nozzle_offset_z=1.23,
home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
),
)
subject.handle_action(
- actions.UpdateCommandAction(
+ actions.SucceedCommandAction(
private_result=load_pipette_private_result, command=load_pipette_command
)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=pick_up_tip_command)
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip_command)
)
subject.handle_action(actions.ResetTipsAction(labware_id="cool-labware"))
@@ -418,6 +803,7 @@ def test_reset_tips(
labware_id="cool-labware",
num_tips=1,
starting_tip_name=None,
+ nozzle_map=None,
)
assert result == "A1"
@@ -448,15 +834,19 @@ def test_handle_pipette_config_action(
nominal_tip_overlap={},
nozzle_offset_z=1.23,
home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
),
)
subject.handle_action(
- actions.UpdateCommandAction(
+ actions.SucceedCommandAction(
private_result=load_pipette_private_result, command=load_pipette_command
)
)
- assert TipView(subject.state).get_pipette_channels(pipette_id="pipette-id") == 8
+ assert TipView(subject.state).get_pipette_channels("pipette-id") == 8
+ assert TipView(subject.state).get_pipette_active_channels("pipette-id") == 8
@pytest.mark.parametrize(
@@ -473,7 +863,7 @@ def test_has_tip_not_tip_rack(
) -> None:
"""It should return False if labware isn't a tip rack."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
)
result = TipView(state=subject.state).has_clean_tip("cool-labware", "A1")
@@ -486,7 +876,7 @@ def test_has_tip_tip_rack(
) -> None:
"""It should return False if labware isn't a tip rack."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
)
result = TipView(state=subject.state).has_clean_tip("cool-labware", "A1")
@@ -504,7 +894,7 @@ def test_drop_tip(
) -> None:
"""It should be clear tip length when a tip is dropped."""
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=load_labware_command)
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
)
load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
result=commands.LoadPipetteResult(pipetteId="pipette-id")
@@ -527,36 +917,446 @@ def test_drop_tip(
nominal_tip_overlap={},
nozzle_offset_z=1.23,
home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
),
)
subject.handle_action(
- actions.UpdateCommandAction(
+ actions.SucceedCommandAction(
private_result=load_pipette_private_result, command=load_pipette_command
)
)
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=pick_up_tip_command)
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip_command)
)
result = TipView(subject.state).get_tip_length("pipette-id")
assert result == 1.23
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=drop_tip_command)
+ actions.SucceedCommandAction(private_result=None, command=drop_tip_command)
)
result = TipView(subject.state).get_tip_length("pipette-id")
assert result == 0
subject.handle_action(
- actions.UpdateCommandAction(private_result=None, command=pick_up_tip_command)
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip_command)
)
result = TipView(subject.state).get_tip_length("pipette-id")
assert result == 1.23
subject.handle_action(
- actions.UpdateCommandAction(
+ actions.SucceedCommandAction(
private_result=None, command=drop_tip_in_place_command
)
)
result = TipView(subject.state).get_tip_length("pipette-id")
assert result == 0
+
+
+@pytest.mark.parametrize(
+ argnames=["nozzle_map", "expected_channels"],
+ argvalues=[
+ (
+ NozzleMap.build(
+ physical_nozzles=OrderedDict({"A1": Point(0, 0, 0)}),
+ physical_rows=OrderedDict({"A": ["A1"]}),
+ physical_columns=OrderedDict({"1": ["A1"]}),
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="A1",
+ ),
+ 1,
+ ),
+ (
+ NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="H12",
+ ),
+ 96,
+ ),
+ (
+ NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A1",
+ back_left_nozzle="A1",
+ front_right_nozzle="E1",
+ ),
+ 5,
+ ),
+ ],
+)
+def test_active_channels(
+ subject: TipStore,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+ nozzle_map: NozzleMap,
+ expected_channels: int,
+) -> None:
+ """Should update active channels after pipette configuration change."""
+ # Load pipette to update state
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=9,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=nozzle_map,
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
+ )
+
+ # Configure nozzle for partial configuration
+ configure_nozzle_layout_cmd = commands.ConfigureNozzleLayout.construct( # type: ignore[call-arg]
+ result=commands.ConfigureNozzleLayoutResult()
+ )
+ configure_nozzle_private_result = commands.ConfigureNozzleLayoutPrivateResult(
+ pipette_id="pipette-id",
+ nozzle_map=nozzle_map,
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=configure_nozzle_private_result,
+ command=configure_nozzle_layout_cmd,
+ )
+ )
+ assert (
+ TipView(subject.state).get_pipette_active_channels("pipette-id")
+ == expected_channels
+ )
+
+
+def test_next_tip_uses_active_channels(
+ subject: TipStore,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+ load_labware_command: commands.LoadLabware,
+ pick_up_tip_command: commands.PickUpTip,
+) -> None:
+ """Test that tip tracking logic uses pipette's active channels."""
+ # Load labware
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+
+ # Load pipette
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=96,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P300_SINGLE_GEN2),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
+ )
+
+ # Configure nozzle for partial configuration
+ configure_nozzle_layout_cmd = commands.ConfigureNozzleLayout.construct( # type: ignore[call-arg]
+ result=commands.ConfigureNozzleLayoutResult()
+ )
+ configure_nozzle_private_result = commands.ConfigureNozzleLayoutPrivateResult(
+ pipette_id="pipette-id",
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle="A12",
+ back_left_nozzle="A12",
+ front_right_nozzle="H12",
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=configure_nozzle_private_result,
+ command=configure_nozzle_layout_cmd,
+ )
+ )
+ # Pick up partial tips
+ subject.handle_action(
+ actions.SucceedCommandAction(command=pick_up_tip_command, private_result=None)
+ )
+
+ result = TipView(subject.state).get_next_tip(
+ labware_id="cool-labware",
+ num_tips=5,
+ starting_tip_name=None,
+ nozzle_map=None,
+ )
+ assert result == "A2"
+
+
+def test_next_tip_automatic_tip_tracking_with_partial_configurations(
+ subject: TipStore,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+ load_labware_command: commands.LoadLabware,
+ pick_up_tip_command: commands.PickUpTip,
+) -> None:
+ """Test tip tracking logic using multiple pipette configurations."""
+ # Load labware
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+
+ # Load pipette
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=96,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P1000_96),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
+ )
+
+ def _assert_and_pickup(well: str, nozzle_map: NozzleMap) -> None:
+ result = TipView(subject.state).get_next_tip(
+ labware_id="cool-labware",
+ num_tips=0,
+ starting_tip_name=None,
+ nozzle_map=nozzle_map,
+ )
+ assert result == well
+
+ pick_up_tip = commands.PickUpTip.construct( # type: ignore[call-arg]
+ params=commands.PickUpTipParams.construct(
+ pipetteId="pipette-id",
+ labwareId="cool-labware",
+ wellName=result,
+ ),
+ result=commands.PickUpTipResult.construct(
+ position=DeckPoint(x=0, y=0, z=0), tipLength=1.23
+ ),
+ )
+
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip)
+ )
+
+ # Configure nozzle for partial configurations
+ configure_nozzle_layout_cmd = commands.ConfigureNozzleLayout.construct( # type: ignore[call-arg]
+ result=commands.ConfigureNozzleLayoutResult()
+ )
+
+ def _reconfigure_nozzle_layout(start: str, back_l: str, front_r: str) -> NozzleMap:
+ configure_nozzle_private_result = commands.ConfigureNozzleLayoutPrivateResult(
+ pipette_id="pipette-id",
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle=start,
+ back_left_nozzle=back_l,
+ front_right_nozzle=front_r,
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=configure_nozzle_private_result,
+ command=configure_nozzle_layout_cmd,
+ )
+ )
+ return configure_nozzle_private_result.nozzle_map
+
+ map = _reconfigure_nozzle_layout("A1", "A1", "H3")
+ _assert_and_pickup("A10", map)
+ map = _reconfigure_nozzle_layout("A1", "A1", "F2")
+ _assert_and_pickup("C8", map)
+
+ # Configure to single tip pickups
+ map = _reconfigure_nozzle_layout("H12", "H12", "H12")
+ _assert_and_pickup("A1", map)
+ map = _reconfigure_nozzle_layout("H1", "H1", "H1")
+ _assert_and_pickup("A9", map)
+ map = _reconfigure_nozzle_layout("A12", "A12", "A12")
+ _assert_and_pickup("H1", map)
+ map = _reconfigure_nozzle_layout("A1", "A1", "A1")
+ _assert_and_pickup("B9", map)
+
+
+def test_next_tip_automatic_tip_tracking_tiprack_limits(
+ subject: TipStore,
+ supported_tip_fixture: pipette_definition.SupportedTipsDefinition,
+ load_labware_command: commands.LoadLabware,
+ pick_up_tip_command: commands.PickUpTip,
+) -> None:
+ """Test tip tracking logic to ensure once a tiprack is consumed it returns None when consuming tips using multiple pipette configurations."""
+ # Load labware
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=load_labware_command)
+ )
+
+ # Load pipette
+ load_pipette_command = commands.LoadPipette.construct( # type: ignore[call-arg]
+ result=commands.LoadPipetteResult(pipetteId="pipette-id")
+ )
+ load_pipette_private_result = commands.LoadPipettePrivateResult(
+ pipette_id="pipette-id",
+ serial_number="pipette-serial",
+ config=LoadedStaticPipetteData(
+ channels=96,
+ max_volume=15,
+ min_volume=3,
+ model="gen a",
+ display_name="display name",
+ flow_rates=FlowRates(
+ default_aspirate={},
+ default_dispense={},
+ default_blow_out={},
+ ),
+ tip_configuration_lookup_table={15: supported_tip_fixture},
+ nominal_tip_overlap={},
+ nozzle_offset_z=1.23,
+ home_position=4.56,
+ nozzle_map=get_default_nozzle_map(PipetteNameType.P1000_96),
+ back_left_corner_offset=Point(x=1, y=2, z=3),
+ front_right_corner_offset=Point(x=4, y=5, z=6),
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=load_pipette_private_result, command=load_pipette_command
+ )
+ )
+
+ def _get_next_and_pickup(nozzle_map: NozzleMap) -> str | None:
+ result = TipView(subject.state).get_next_tip(
+ labware_id="cool-labware",
+ num_tips=0,
+ starting_tip_name=None,
+ nozzle_map=nozzle_map,
+ )
+ if result is not None:
+ pick_up_tip = commands.PickUpTip.construct( # type: ignore[call-arg]
+ params=commands.PickUpTipParams.construct(
+ pipetteId="pipette-id",
+ labwareId="cool-labware",
+ wellName=result,
+ ),
+ result=commands.PickUpTipResult.construct(
+ position=DeckPoint(x=0, y=0, z=0), tipLength=1.23
+ ),
+ )
+
+ subject.handle_action(
+ actions.SucceedCommandAction(private_result=None, command=pick_up_tip)
+ )
+
+ return result
+
+ # Configure nozzle for partial configurations
+ configure_nozzle_layout_cmd = commands.ConfigureNozzleLayout.construct( # type: ignore[call-arg]
+ result=commands.ConfigureNozzleLayoutResult()
+ )
+
+ def _reconfigure_nozzle_layout(start: str, back_l: str, front_r: str) -> NozzleMap:
+ configure_nozzle_private_result = commands.ConfigureNozzleLayoutPrivateResult(
+ pipette_id="pipette-id",
+ nozzle_map=NozzleMap.build(
+ physical_nozzles=NINETY_SIX_MAP,
+ physical_rows=NINETY_SIX_ROWS,
+ physical_columns=NINETY_SIX_COLS,
+ starting_nozzle=start,
+ back_left_nozzle=back_l,
+ front_right_nozzle=front_r,
+ ),
+ )
+ subject.handle_action(
+ actions.SucceedCommandAction(
+ private_result=configure_nozzle_private_result,
+ command=configure_nozzle_layout_cmd,
+ )
+ )
+ return configure_nozzle_private_result.nozzle_map
+
+ map = _reconfigure_nozzle_layout("A1", "A1", "A1")
+ for x in range(96):
+ _get_next_and_pickup(map)
+ assert _get_next_and_pickup(map) is None
+
+ subject.handle_action(actions.ResetTipsAction(labware_id="cool-labware"))
+ map = _reconfigure_nozzle_layout("A12", "A12", "A12")
+ for x in range(96):
+ _get_next_and_pickup(map)
+ assert _get_next_and_pickup(map) is None
+
+ subject.handle_action(actions.ResetTipsAction(labware_id="cool-labware"))
+ map = _reconfigure_nozzle_layout("H1", "H1", "H1")
+ for x in range(96):
+ _get_next_and_pickup(map)
+ assert _get_next_and_pickup(map) is None
+
+ subject.handle_action(actions.ResetTipsAction(labware_id="cool-labware"))
+ map = _reconfigure_nozzle_layout("H12", "H12", "H12")
+ for x in range(96):
+ _get_next_and_pickup(map)
+ assert _get_next_and_pickup(map) is None
diff --git a/api/tests/opentrons/protocol_engine/test_create_protocol_engine.py b/api/tests/opentrons/protocol_engine/test_create_protocol_engine.py
index c099b0c4521..2f7a0cae441 100644
--- a/api/tests/opentrons/protocol_engine/test_create_protocol_engine.py
+++ b/api/tests/opentrons/protocol_engine/test_create_protocol_engine.py
@@ -1,9 +1,10 @@
"""Smoke tests for the ProtocolEngine creation factory."""
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
-from opentrons_shared_data.deck.dev_types import DeckDefinitionV4
+from opentrons_shared_data.deck.dev_types import DeckDefinitionV5
from opentrons_shared_data.robot.dev_types import RobotType
+from opentrons_shared_data.deck import load as load_deck
from opentrons.calibration_storage.helpers import uri_from_details
from opentrons.hardware_control import API as HardwareAPI
@@ -18,6 +19,30 @@
from opentrons.protocol_engine.types import DeckSlotLocation, LoadedLabware
from opentrons.types import DeckSlotName
+from opentrons.protocols.api_support.deck_type import (
+ STANDARD_OT2_DECK,
+ SHORT_TRASH_DECK,
+ STANDARD_OT3_DECK,
+)
+
+
+@pytest.fixture(scope="session")
+def ot2_standard_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 standard deck definition."""
+ return load_deck(STANDARD_OT2_DECK, 5)
+
+
+@pytest.fixture(scope="session")
+def ot2_short_trash_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 with short trash standard deck definition."""
+ return load_deck(SHORT_TRASH_DECK, 5)
+
+
+@pytest.fixture(scope="session")
+def ot3_standard_deck_def() -> DeckDefinitionV5:
+ """Get the OT-2 standard deck definition."""
+ return load_deck(STANDARD_OT3_DECK, 5)
+
@pytest.mark.parametrize(
(
@@ -47,7 +72,7 @@ async def test_create_engine_initializes_state_with_no_fixed_trash(
hardware_api: HardwareAPI,
robot_type: RobotType,
deck_type: DeckType,
- expected_deck_def: DeckDefinitionV4,
+ expected_deck_def: DeckDefinitionV5,
) -> None:
"""It should load deck geometry data into the store on create."""
engine = await create_protocol_engine(
@@ -102,7 +127,7 @@ async def test_create_engine_initializes_state_with_fixed_trash(
hardware_api: HardwareAPI,
robot_type: RobotType,
deck_type: DeckType,
- expected_deck_def: DeckDefinitionV4,
+ expected_deck_def: DeckDefinitionV5,
expected_fixed_trash_def: LabwareDefinition,
expected_fixed_trash_slot: DeckSlotName,
) -> None:
diff --git a/api/tests/opentrons/protocol_engine/test_plugins.py b/api/tests/opentrons/protocol_engine/test_plugins.py
index 0da44ab62bc..471a689e265 100644
--- a/api/tests/opentrons/protocol_engine/test_plugins.py
+++ b/api/tests/opentrons/protocol_engine/test_plugins.py
@@ -29,7 +29,9 @@ def test_configure(
decoy: Decoy, state_view: StateView, action_dispatcher: ActionDispatcher
) -> None:
"""The engine should be able to configure the plugin."""
- action = PlayAction(requested_at=datetime(year=2021, month=1, day=1))
+ action = PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
subject = _MyPlugin()
subject._configure(
diff --git a/api/tests/opentrons/protocol_engine/test_protocol_engine.py b/api/tests/opentrons/protocol_engine/test_protocol_engine.py
index d8928126495..4816708fa57 100644
--- a/api/tests/opentrons/protocol_engine/test_protocol_engine.py
+++ b/api/tests/opentrons/protocol_engine/test_protocol_engine.py
@@ -2,12 +2,13 @@
import inspect
from datetime import datetime
from typing import Any
+from unittest.mock import sentinel
import pytest
from decoy import Decoy
from opentrons_shared_data.robot.dev_types import RobotType
-from opentrons.ordered_set import OrderedSet
+from opentrons.protocol_engine.actions.actions import ResumeFromRecoveryAction
from opentrons.types import DeckSlotName
from opentrons.hardware_control import HardwareControlAPI, OT2HardwareControlAPI
@@ -16,7 +17,9 @@
from opentrons.protocols.models import LabwareDefinition
from opentrons.protocol_engine import ProtocolEngine, commands, slot_standardization
-from opentrons.protocol_engine.errors.exceptions import EStopActivatedError
+from opentrons.protocol_engine.errors.exceptions import (
+ CommandNotAllowedError,
+)
from opentrons.protocol_engine.types import (
DeckType,
LabwareOffset,
@@ -28,6 +31,7 @@
ModuleModel,
Liquid,
PostRunHardwareState,
+ AddressableAreaLocation,
)
from opentrons.protocol_engine.execution import (
QueueWorker,
@@ -43,6 +47,7 @@
ActionDispatcher,
AddLabwareOffsetAction,
AddLabwareDefinitionAction,
+ AddAddressableAreaAction,
AddLiquidAction,
AddModuleAction,
PlayAction,
@@ -54,7 +59,6 @@
QueueCommandAction,
HardwareStoppedAction,
ResetTipsAction,
- FailCommandAction,
)
@@ -125,9 +129,9 @@ def _mock_slot_standardization_module(
def _mock_hash_command_params_module(
decoy: Decoy, monkeypatch: pytest.MonkeyPatch
) -> None:
- hash_command_params = commands.hash_command_params
+ hash_command_params = commands.hash_protocol_command_params
monkeypatch.setattr(
- commands, "hash_command_params", decoy.mock(func=hash_command_params)
+ commands, "hash_protocol_command_params", decoy.mock(func=hash_command_params)
)
@@ -179,7 +183,9 @@ def test_add_command(
original_request = commands.WaitForResumeCreate(
params=commands.WaitForResumeParams()
)
- standardized_request = commands.HomeCreate(params=commands.HomeParams())
+ standardized_request = commands.HomeCreate(
+ params=commands.HomeParams(), intent=commands.CommandIntent.PROTOCOL
+ )
queued = commands.Home(
id="command-id",
key="command-key",
@@ -199,9 +205,13 @@ def test_add_command(
decoy.when(model_utils.generate_id()).then_return("command-id")
decoy.when(model_utils.get_timestamp()).then_return(created_at)
- decoy.when(state_store.commands.get_latest_command_hash()).then_return("abc")
+ decoy.when(state_store.commands.get_latest_protocol_command_hash()).then_return(
+ "abc"
+ )
decoy.when(
- commands.hash_command_params(create=standardized_request, last_hash="abc")
+ commands.hash_protocol_command_params(
+ create=standardized_request, last_hash="abc"
+ )
).then_return("123")
def _stub_queued(*_a: object, **_k: object) -> None:
@@ -241,6 +251,105 @@ def _stub_queued(*_a: object, **_k: object) -> None:
assert result == queued
+def test_add_fixit_command(
+ decoy: Decoy,
+ state_store: StateStore,
+ action_dispatcher: ActionDispatcher,
+ model_utils: ModelUtils,
+ subject: ProtocolEngine,
+) -> None:
+ """It should add a fixit command to the state from a request."""
+ created_at = datetime(year=2021, month=1, day=1)
+ original_request = commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams()
+ )
+ standardized_request = commands.HomeCreate(
+ params=commands.HomeParams(), intent=commands.CommandIntent.FIXIT
+ )
+ queued = commands.Home(
+ id="command-id",
+ key="command-key",
+ status=commands.CommandStatus.QUEUED,
+ createdAt=created_at,
+ params=commands.HomeParams(),
+ )
+
+ robot_type: RobotType = "OT-3 Standard"
+ decoy.when(state_store.config).then_return(
+ Config(robot_type=robot_type, deck_type=DeckType.OT3_STANDARD)
+ )
+
+ decoy.when(
+ slot_standardization.standardize_command(original_request, robot_type)
+ ).then_return(standardized_request)
+
+ decoy.when(model_utils.generate_id()).then_return("command-id")
+ decoy.when(model_utils.get_timestamp()).then_return(created_at)
+
+ def _stub_queued(*_a: object, **_k: object) -> None:
+ decoy.when(state_store.commands.get("command-id")).then_return(queued)
+
+ decoy.when(
+ state_store.commands.validate_action_allowed(
+ QueueCommandAction(
+ command_id="command-id",
+ created_at=created_at,
+ request=standardized_request,
+ request_hash=None,
+ )
+ )
+ ).then_return(
+ QueueCommandAction(
+ command_id="command-id-validated",
+ created_at=created_at,
+ request=standardized_request,
+ request_hash=None,
+ )
+ )
+
+ decoy.when(
+ action_dispatcher.dispatch(
+ QueueCommandAction(
+ command_id="command-id-validated",
+ created_at=created_at,
+ request=standardized_request,
+ request_hash=None,
+ )
+ ),
+ ).then_do(_stub_queued)
+
+ result = subject.add_command(original_request)
+ assert result == queued
+
+
+def test_add_fixit_command_raises(
+ decoy: Decoy,
+ state_store: StateStore,
+ action_dispatcher: ActionDispatcher,
+ model_utils: ModelUtils,
+ subject: ProtocolEngine,
+) -> None:
+ """It should raise if a failedCommandId is supplied without a fixit command."""
+ original_request = commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams()
+ )
+ standardized_request = commands.HomeCreate(
+ params=commands.HomeParams(), intent=commands.CommandIntent.PROTOCOL
+ )
+
+ robot_type: RobotType = "OT-3 Standard"
+ decoy.when(state_store.config).then_return(
+ Config(robot_type=robot_type, deck_type=DeckType.OT3_STANDARD)
+ )
+
+ decoy.when(
+ slot_standardization.standardize_command(original_request, robot_type)
+ ).then_return(standardized_request)
+
+ with pytest.raises(CommandNotAllowedError):
+ subject.add_command(original_request, "id-123")
+
+
async def test_add_and_execute_command(
decoy: Decoy,
state_store: StateStore,
@@ -329,6 +438,99 @@ def _stub_completed(*_a: object, **_k: object) -> bool:
assert result == completed
+async def test_add_and_execute_command_wait_for_recovery(
+ decoy: Decoy,
+ state_store: StateStore,
+ action_dispatcher: ActionDispatcher,
+ model_utils: ModelUtils,
+ subject: ProtocolEngine,
+) -> None:
+ """It should add and execute a command from a request."""
+ created_at = datetime(year=2021, month=1, day=1)
+ original_request = commands.WaitForResumeCreate(
+ params=commands.WaitForResumeParams()
+ )
+ standardized_request = commands.HomeCreate(params=commands.HomeParams())
+ queued = commands.Home(
+ id="command-id",
+ key="command-key",
+ status=commands.CommandStatus.QUEUED,
+ createdAt=created_at,
+ params=commands.HomeParams(),
+ )
+ completed = commands.Home(
+ id="command-id",
+ key="command-key",
+ status=commands.CommandStatus.SUCCEEDED,
+ createdAt=created_at,
+ params=commands.HomeParams(),
+ )
+
+ robot_type: RobotType = "OT-3 Standard"
+ decoy.when(state_store.config).then_return(
+ Config(robot_type=robot_type, deck_type=DeckType.OT3_STANDARD)
+ )
+
+ decoy.when(
+ slot_standardization.standardize_command(original_request, robot_type)
+ ).then_return(standardized_request)
+
+ decoy.when(model_utils.generate_id()).then_return("command-id")
+ decoy.when(model_utils.get_timestamp()).then_return(created_at)
+
+ def _stub_queued(*_a: object, **_k: object) -> None:
+ decoy.when(state_store.commands.get("command-id")).then_return(queued)
+
+ def _stub_completed(*_a: object, **_k: object) -> bool:
+ decoy.when(state_store.commands.get("command-id")).then_return(completed)
+ return True
+
+ decoy.when(
+ state_store.commands.validate_action_allowed(
+ QueueCommandAction(
+ command_id="command-id",
+ created_at=created_at,
+ request=standardized_request,
+ request_hash=None,
+ )
+ )
+ ).then_return(
+ QueueCommandAction(
+ command_id="command-id-validated",
+ created_at=created_at,
+ request=standardized_request,
+ request_hash=None,
+ )
+ )
+
+ decoy.when(
+ action_dispatcher.dispatch(
+ QueueCommandAction(
+ command_id="command-id-validated",
+ created_at=created_at,
+ request=standardized_request,
+ request_hash=None,
+ )
+ )
+ ).then_do(_stub_queued)
+
+ decoy.when(
+ await state_store.wait_for(
+ condition=state_store.commands.get_command_is_final,
+ command_id="command-id",
+ ),
+ ).then_do(_stub_completed)
+
+ result = await subject.add_and_execute_command_wait_for_recovery(original_request)
+ assert result == completed
+ decoy.verify(
+ await state_store.wait_for_not(
+ state_store.commands.get_recovery_in_progress_for_command,
+ "command-id",
+ )
+ )
+
+
def test_play(
decoy: Decoy,
state_store: StateStore,
@@ -343,15 +545,23 @@ def test_play(
)
decoy.when(
state_store.commands.validate_action_allowed(
- PlayAction(requested_at=datetime(year=2021, month=1, day=1))
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
),
- ).then_return(PlayAction(requested_at=datetime(year=2022, month=2, day=2)))
+ ).then_return(
+ PlayAction(
+ requested_at=datetime(year=2022, month=2, day=2), deck_configuration=[]
+ )
+ )
- subject.play()
+ subject.play(deck_configuration=[])
decoy.verify(
action_dispatcher.dispatch(
- PlayAction(requested_at=datetime(year=2022, month=2, day=2))
+ PlayAction(
+ requested_at=datetime(year=2022, month=2, day=2), deck_configuration=[]
+ )
),
hardware_api.resume(HardwarePauseType.PAUSE),
)
@@ -371,17 +581,25 @@ def test_play_blocked_by_door(
)
decoy.when(
state_store.commands.validate_action_allowed(
- PlayAction(requested_at=datetime(year=2021, month=1, day=1))
+ PlayAction(
+ requested_at=datetime(year=2021, month=1, day=1), deck_configuration=[]
+ )
),
- ).then_return(PlayAction(requested_at=datetime(year=2022, month=2, day=2)))
+ ).then_return(
+ PlayAction(
+ requested_at=datetime(year=2022, month=2, day=2), deck_configuration=[]
+ )
+ )
decoy.when(state_store.commands.get_is_door_blocking()).then_return(True)
- subject.play()
+ subject.play(deck_configuration=[])
decoy.verify(hardware_api.resume(HardwarePauseType.PAUSE), times=0)
decoy.verify(
action_dispatcher.dispatch(
- PlayAction(requested_at=datetime(year=2022, month=2, day=2))
+ PlayAction(
+ requested_at=datetime(year=2022, month=2, day=2), deck_configuration=[]
+ )
),
hardware_api.pause(HardwarePauseType.PAUSE),
)
@@ -401,7 +619,7 @@ def test_pause(
state_store.commands.validate_action_allowed(expected_action),
).then_return(expected_action)
- subject.pause()
+ subject.request_pause()
decoy.verify(
action_dispatcher.dispatch(expected_action),
@@ -409,6 +627,24 @@ def test_pause(
)
+def test_resume_from_recovery(
+ decoy: Decoy,
+ state_store: StateStore,
+ action_dispatcher: ActionDispatcher,
+ subject: ProtocolEngine,
+) -> None:
+ """It should dispatch a ResumeFromRecoveryAction."""
+ expected_action = ResumeFromRecoveryAction()
+
+ decoy.when(
+ state_store.commands.validate_action_allowed(expected_action)
+ ).then_return(expected_action)
+
+ subject.resume_from_recovery()
+
+ decoy.verify(action_dispatcher.dispatch(expected_action))
+
+
@pytest.mark.parametrize("drop_tips_after_run", [True, False])
@pytest.mark.parametrize("set_run_status", [True, False])
@pytest.mark.parametrize(
@@ -438,8 +674,8 @@ async def test_finish(
"""It should be able to gracefully tell the engine it's done."""
completed_at = datetime(2021, 1, 1, 0, 0)
- decoy.when(model_utils.get_timestamp()).then_return(completed_at)
decoy.when(state_store.commands.state.stopped_by_estop).then_return(False)
+ decoy.when(model_utils.get_timestamp()).then_return(completed_at)
await subject.finish(
drop_tips_after_run=drop_tips_after_run,
@@ -658,7 +894,8 @@ async def test_wait_until_complete(
decoy.verify(
await state_store.wait_for(
condition=state_store.commands.get_all_commands_final
- )
+ ),
+ state_store.commands.raise_fatal_command_error(),
)
@@ -677,7 +914,7 @@ async def test_stop(
state_store.commands.validate_action_allowed(expected_action),
).then_return(expected_action)
- await subject.stop()
+ await subject.request_stop()
decoy.verify(
action_dispatcher.dispatch(expected_action),
@@ -703,7 +940,7 @@ async def test_stop_for_legacy_core_protocols(
decoy.when(hardware_api.is_movement_execution_taskified()).then_return(True)
- await subject.stop()
+ await subject.request_stop()
decoy.verify(
action_dispatcher.dispatch(expected_action),
@@ -712,95 +949,53 @@ async def test_stop_for_legacy_core_protocols(
)
-@pytest.mark.parametrize("maintenance_run", [True, False])
-async def test_estop_during_command(
+async def test_estop(
decoy: Decoy,
action_dispatcher: ActionDispatcher,
queue_worker: QueueWorker,
state_store: StateStore,
subject: ProtocolEngine,
- model_utils: ModelUtils,
- maintenance_run: bool,
) -> None:
"""It should be able to stop the engine."""
- timestamp = datetime(2021, 1, 1, 0, 0)
- command_id = "command_fake_id"
- error_id = "fake_error_id"
- fake_command_set = OrderedSet(["fake-id-1", "fake-id-1"])
-
- decoy.when(model_utils.get_timestamp()).then_return(timestamp)
- decoy.when(model_utils.generate_id()).then_return(error_id)
- decoy.when(state_store.commands.get_is_stopped()).then_return(False)
- decoy.when(state_store.commands.state.running_command_id).then_return(command_id)
- decoy.when(state_store.commands.state.queued_command_ids).then_return(
- fake_command_set
- )
-
- expected_action = FailCommandAction(
- command_id=command_id,
- error_id=error_id,
- failed_at=timestamp,
- error=EStopActivatedError(message="Estop Activated"),
- )
- expected_action_2 = FailCommandAction(
- command_id=fake_command_set.head(),
- error_id=error_id,
- failed_at=timestamp,
- error=EStopActivatedError(message="Estop Activated"),
- )
+ expected_action = StopAction(from_estop=True)
+ validated_action = sentinel.validated_action
+ decoy.when(
+ state_store.commands.validate_action_allowed(expected_action),
+ ).then_return(validated_action)
- subject.estop(maintenance_run=maintenance_run)
+ subject.estop()
decoy.verify(
- action_dispatcher.dispatch(action=expected_action),
- action_dispatcher.dispatch(action=expected_action_2),
+ action_dispatcher.dispatch(action=validated_action),
queue_worker.cancel(),
)
-@pytest.mark.parametrize("maintenance_run", [True, False])
-async def test_estop_without_command(
+async def test_estop_noops_if_invalid(
decoy: Decoy,
action_dispatcher: ActionDispatcher,
queue_worker: QueueWorker,
state_store: StateStore,
subject: ProtocolEngine,
- model_utils: ModelUtils,
- maintenance_run: bool,
) -> None:
- """It should be able to stop the engine."""
- timestamp = datetime(2021, 1, 1, 0, 0)
- error_id = "fake_error_id"
-
- decoy.when(model_utils.get_timestamp()).then_return(timestamp)
- decoy.when(model_utils.generate_id()).then_return(error_id)
- decoy.when(state_store.commands.get_is_stopped()).then_return(False)
- decoy.when(state_store.commands.state.running_command_id).then_return(None)
-
- expected_stop = StopAction(from_estop=True)
- expected_hardware_stop = HardwareStoppedAction(
- completed_at=timestamp,
- finish_error_details=FinishErrorDetails(
- error=EStopActivatedError(message="Estop Activated"),
- error_id=error_id,
- created_at=timestamp,
- ),
- )
-
+ """It should no-op if a stop is invalid right now.."""
+ expected_action = StopAction(from_estop=True)
decoy.when(
- state_store.commands.validate_action_allowed(expected_stop),
- ).then_return(expected_stop)
+ state_store.commands.validate_action_allowed(expected_action),
+ ).then_raise(RuntimeError("unable to stop; this machine craves flesh"))
- subject.estop(maintenance_run=maintenance_run)
+ subject.estop() # Should not raise.
decoy.verify(
- action_dispatcher.dispatch(expected_stop), times=1 if maintenance_run else 0
+ action_dispatcher.dispatch(), # type: ignore
+ ignore_extra_args=True,
+ times=0,
)
decoy.verify(
- action_dispatcher.dispatch(expected_hardware_stop),
- times=1 if maintenance_run else 0,
+ queue_worker.cancel(),
+ ignore_extra_args=True,
+ times=0,
)
- decoy.verify(queue_worker.cancel(), times=1 if maintenance_run else 0)
def test_add_plugin(
@@ -905,6 +1100,25 @@ def _stub_get_definition_uri(*args: Any, **kwargs: Any) -> None:
assert result == "some/definition/uri"
+def test_add_addressable_area(
+ decoy: Decoy,
+ action_dispatcher: ActionDispatcher,
+ subject: ProtocolEngine,
+) -> None:
+ """It should dispatch an AddAddressableArea action."""
+ subject.add_addressable_area(addressable_area_name="my_funky_area")
+
+ decoy.verify(
+ action_dispatcher.dispatch(
+ AddAddressableAreaAction(
+ addressable_area=AddressableAreaLocation(
+ addressableAreaName="my_funky_area"
+ )
+ )
+ )
+ )
+
+
def test_add_liquid(
decoy: Decoy,
action_dispatcher: ActionDispatcher,
diff --git a/api/tests/opentrons/protocol_reader/_input_file.py b/api/tests/opentrons/protocol_reader/_input_file.py
index 85939ecc0bc..d28d994f981 100644
--- a/api/tests/opentrons/protocol_reader/_input_file.py
+++ b/api/tests/opentrons/protocol_reader/_input_file.py
@@ -2,7 +2,7 @@
from dataclasses import dataclass
from io import BytesIO
-from typing import IO
+from typing import BinaryIO
from opentrons.protocol_reader import AbstractInputFile
@@ -12,7 +12,7 @@ class InputFile(AbstractInputFile):
"""An implementation of AbstractInputFile to use for test input."""
filename: str
- file: IO[bytes]
+ file: BinaryIO
@classmethod
def make(cls, filename: str, contents: bytes) -> InputFile:
diff --git a/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_command_mapper.py b/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_command_mapper.py
index c5e381e56a1..c8950cbe090 100644
--- a/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_command_mapper.py
+++ b/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_command_mapper.py
@@ -5,6 +5,7 @@
"""
from datetime import datetime
from pathlib import Path
+from textwrap import dedent
from typing import List
import pytest
@@ -36,7 +37,7 @@ async def simulate_and_get_commands(protocol_file: Path) -> List[commands.Comman
robot_type="OT-2 Standard",
protocol_config=protocol_source.config,
)
- result = await subject.run(protocol_source)
+ result = await subject.run(deck_configuration=[], protocol_source=protocol_source)
assert result.state_summary.errors == []
assert result.state_summary.status == EngineStatus.SUCCEEDED
return result.commands
@@ -165,6 +166,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=commands.HomeParams(axes=None),
+ notes=[],
result=commands.HomeResult(),
)
assert commands_result[1] == commands.LoadLabware.construct(
@@ -180,6 +182,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
namespace="opentrons",
version=1,
),
+ notes=[],
result=tiprack_1_result_captor,
)
assert commands_result[2] == commands.LoadLabware.construct(
@@ -195,6 +198,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
namespace="opentrons",
version=1,
),
+ notes=[],
result=tiprack_2_result_captor,
)
assert commands_result[3] == commands.LoadModule.construct(
@@ -209,6 +213,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4),
moduleId="module-0",
),
+ notes=[],
result=module_1_result_captor,
)
assert commands_result[4] == commands.LoadLabware.construct(
@@ -224,6 +229,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
namespace="opentrons",
version=1,
),
+ notes=[],
result=well_plate_1_result_captor,
)
assert commands_result[5] == commands.LoadLabware.construct(
@@ -239,6 +245,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
namespace="opentrons",
version=1,
),
+ notes=[],
result=module_plate_1_result_captor,
)
@@ -252,6 +259,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
params=commands.LoadPipetteParams(
pipetteName=PipetteNameType.P300_SINGLE, mount=MountType.LEFT
),
+ notes=[],
result=pipette_left_result_captor,
)
@@ -265,6 +273,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
params=commands.LoadPipetteParams(
pipetteName=PipetteNameType.P300_MULTI, mount=MountType.RIGHT
),
+ notes=[],
result=pipette_right_result_captor,
)
@@ -289,6 +298,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
labwareId=tiprack_1_id,
wellName="A1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0, tipLength=51.83, position=DeckPoint(x=0, y=0, z=0)
),
@@ -305,6 +315,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
labwareId=tiprack_2_id,
wellName="A1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0, tipLength=51.83, position=DeckPoint(x=0, y=0, z=0)
),
@@ -322,6 +333,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
labwareId="fixedTrash",
wellName="A1",
),
+ notes=[],
result=commands.DropTipResult(position=DeckPoint(x=0, y=0, z=0)),
)
@@ -337,6 +349,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
labwareId=tiprack_1_id,
wellName="B1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0, tipLength=51.83, position=DeckPoint(x=0, y=0, z=0)
),
@@ -355,6 +368,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=40,
flowRate=150,
),
+ notes=[],
result=commands.AspirateResult(volume=40, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[13] == commands.Dispense.construct(
@@ -371,6 +385,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=35,
flowRate=360,
),
+ notes=[],
result=commands.DispenseResult(volume=35, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[14] == commands.Aspirate.construct(
@@ -387,6 +402,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=40,
flowRate=150.0,
),
+ notes=[],
result=commands.AspirateResult(volume=40, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[15] == commands.Dispense.construct(
@@ -403,6 +419,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=35,
flowRate=300,
),
+ notes=[],
result=commands.DispenseResult(volume=35, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[16] == commands.BlowOut.construct(
@@ -418,6 +435,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
wellName="B1",
flowRate=1000.0,
),
+ notes=[],
result=commands.BlowOutResult(position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[17] == commands.Aspirate.construct(
@@ -434,6 +452,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=50,
flowRate=150,
),
+ notes=[],
result=commands.AspirateResult(volume=50, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[18] == commands.Dispense.construct(
@@ -450,6 +469,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=50,
flowRate=300,
),
+ notes=[],
result=commands.DispenseResult(volume=50, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[19] == commands.BlowOut.construct(
@@ -465,6 +485,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
wellName="B1",
flowRate=1000.0,
),
+ notes=[],
result=commands.BlowOutResult(position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[20] == commands.Aspirate.construct(
@@ -481,6 +502,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=300,
flowRate=150,
),
+ notes=[],
result=commands.AspirateResult(volume=300, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[21] == commands.Dispense.construct(
@@ -497,6 +519,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=300,
flowRate=300,
),
+ notes=[],
result=commands.DispenseResult(volume=300, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[22] == commands.BlowOut.construct(
@@ -512,6 +535,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
wellName="B1",
flowRate=1000.0,
),
+ notes=[],
result=commands.BlowOutResult(position=DeckPoint(x=0, y=0, z=0)),
)
# TODO:(jr, 15.08.2022): this should map to move_to when move_to is mapped in a followup ticket RSS-62
@@ -526,6 +550,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
legacyCommandText="Moving to (100, 100, 10)",
legacyCommandType="command.MOVE_TO",
),
+ notes=[],
result=commands.CustomResult(),
)
# TODO:(jr, 15.08.2022): aspirate commands with no labware get filtered
@@ -541,6 +566,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
legacyCommandText="Aspirating 300.0 uL from (100, 100, 10) at 150.0 uL/sec",
legacyCommandType="command.ASPIRATE",
),
+ notes=[],
result=commands.CustomResult(),
)
# TODO:(jr, 15.08.2022): dispense commands with no labware get filtered
@@ -556,6 +582,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
legacyCommandText="Dispensing 300.0 uL into (100, 100, 10) at 300.0 uL/sec",
legacyCommandType="command.DISPENSE",
),
+ notes=[],
result=commands.CustomResult(),
)
# TODO:(jr, 15.08.2022): blow_out commands with no labware get filtered
@@ -571,6 +598,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
legacyCommandText="Blowing out at (100, 100, 10)",
legacyCommandType="command.BLOW_OUT",
),
+ notes=[],
result=commands.CustomResult(),
)
assert commands_result[27] == commands.Aspirate.construct(
@@ -587,6 +615,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=50,
flowRate=150,
),
+ notes=[],
result=commands.AspirateResult(volume=50, position=DeckPoint(x=0, y=0, z=0)),
)
assert commands_result[28] == commands.Dispense.construct(
@@ -603,6 +632,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
volume=50,
flowRate=300,
),
+ notes=[],
result=commands.DispenseResult(volume=50, position=DeckPoint(x=0, y=0, z=0)),
)
# TODO:(jr, 15.08.2022): aspirate commands with no labware get filtered
@@ -618,6 +648,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
legacyCommandText="Aspirating 50.0 uL from Opentrons 96 Well Aluminum Block with NEST Well Plate 100 µL on 3 at 150.0 uL/sec",
legacyCommandType="command.ASPIRATE",
),
+ notes=[],
result=commands.CustomResult(),
)
# TODO:(jr, 15.08.2022): dispense commands with no labware get filtered
@@ -633,6 +664,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
legacyCommandText="Dispensing 50.0 uL into Opentrons 96 Well Aluminum Block with NEST Well Plate 100 µL on 3 at 300.0 uL/sec",
legacyCommandType="command.DISPENSE",
),
+ notes=[],
result=commands.CustomResult(),
)
assert commands_result[31] == commands.DropTip.construct(
@@ -647,6 +679,7 @@ async def test_big_protocol_commands(big_protocol_file: Path) -> None:
labwareId=tiprack_1_id,
wellName="A1",
),
+ notes=[],
result=commands.DropTipResult(position=DeckPoint(x=0, y=0, z=0)),
)
@@ -721,3 +754,46 @@ async def test_zero_volume_dispense_commands(
labwareId=load_well_plate.result.labwareId,
wellName="D7",
)
+
+
+async def test_air_gap(tmp_path: Path) -> None:
+ """An `air_gap()` should be mapped to an `aspirate`.
+
+ This covers RQA-2621.
+ """
+ path = tmp_path / "protocol.py"
+ path.write_text(
+ dedent(
+ """\
+ metadata = {"apiLevel": "2.13"}
+ def run(protocol):
+ # Prep:
+ tip_rack = protocol.load_labware("opentrons_96_tiprack_300ul", 1)
+ well_plate = protocol.load_labware("biorad_96_wellplate_200ul_pcr", 2)
+ pipette = protocol.load_instrument("p300_single_gen2", mount="left", tip_racks=[tip_rack])
+ pipette.pick_up_tip()
+
+ # Test:
+ pipette.move_to(well_plate["A1"].top())
+ pipette.air_gap(100)
+ """
+ )
+ )
+ result_commands = await simulate_and_get_commands(path)
+ [
+ initial_home,
+ load_tip_rack,
+ load_well_plate,
+ load_pipette,
+ pick_up_tip,
+ move_to_well,
+ air_gap_aspirate,
+ ] = result_commands
+ assert isinstance(initial_home, commands.Home)
+ assert isinstance(load_tip_rack, commands.LoadLabware)
+ assert isinstance(load_well_plate, commands.LoadLabware)
+ assert isinstance(load_pipette, commands.LoadPipette)
+ assert isinstance(pick_up_tip, commands.PickUpTip)
+ # TODO(mm, 2024-04-23): This commands.Custom looks wrong. This should be a commands.MoveToWell.
+ assert isinstance(move_to_well, commands.Custom)
+ assert isinstance(air_gap_aspirate, commands.Aspirate)
diff --git a/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_custom_labware.py b/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_custom_labware.py
index 32456b98af1..e9c9371fe2a 100644
--- a/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_custom_labware.py
+++ b/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_custom_labware.py
@@ -56,7 +56,7 @@ async def test_legacy_custom_labware(custom_labware_protocol_files: List[Path])
robot_type="OT-2 Standard",
protocol_config=protocol_source.config,
)
- result = await subject.run(protocol_source)
+ result = await subject.run(deck_configuration=[], protocol_source=protocol_source)
expected_labware = LoadedLabware.construct(
id=matchers.Anything(),
diff --git a/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_module_commands.py b/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_module_commands.py
index dd7d74885fe..afc9c500c29 100644
--- a/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_module_commands.py
+++ b/api/tests/opentrons/protocol_runner/smoke_tests/test_legacy_module_commands.py
@@ -64,7 +64,7 @@ async def test_runner_with_modules_in_legacy_python(
robot_type="OT-2 Standard",
protocol_config=protocol_source.config,
)
- result = await subject.run(protocol_source)
+ result = await subject.run(deck_configuration=[], protocol_source=protocol_source)
commands_result = result.commands
assert len(commands_result) == 6
@@ -82,6 +82,7 @@ async def test_runner_with_modules_in_legacy_python(
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=commands.HomeParams(axes=None),
+ notes=[],
result=commands.HomeResult(),
)
assert commands_result[1] == commands.LoadLabware.construct(
@@ -92,6 +93,7 @@ async def test_runner_with_modules_in_legacy_python(
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=matchers.Anything(),
+ notes=[],
result=matchers.Anything(),
)
@@ -103,6 +105,7 @@ async def test_runner_with_modules_in_legacy_python(
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=matchers.Anything(),
+ notes=[],
result=temp_module_result_captor,
)
@@ -114,6 +117,7 @@ async def test_runner_with_modules_in_legacy_python(
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=matchers.Anything(),
+ notes=[],
result=mag_module_result_captor,
)
@@ -125,6 +129,7 @@ async def test_runner_with_modules_in_legacy_python(
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=matchers.Anything(),
+ notes=[],
result=thermocycler_result_captor,
)
@@ -136,6 +141,7 @@ async def test_runner_with_modules_in_legacy_python(
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=matchers.Anything(),
+ notes=[],
result=heater_shaker_result_captor,
)
diff --git a/api/tests/opentrons/protocol_runner/smoke_tests/test_protocol_runner.py b/api/tests/opentrons/protocol_runner/smoke_tests/test_protocol_runner.py
index 459361a5972..7253a6e2f91 100644
--- a/api/tests/opentrons/protocol_runner/smoke_tests/test_protocol_runner.py
+++ b/api/tests/opentrons/protocol_runner/smoke_tests/test_protocol_runner.py
@@ -43,7 +43,11 @@ async def test_runner_with_python(
robot_type="OT-2 Standard",
protocol_config=protocol_source.config,
)
- result = await subject.run(protocol_source)
+ result = await subject.run(
+ deck_configuration=[],
+ protocol_source=protocol_source,
+ run_time_param_values=None,
+ )
commands_result = result.commands
pipettes_result = result.state_summary.pipettes
labware_result = result.state_summary.labware
@@ -92,6 +96,7 @@ async def test_runner_with_python(
labwareId=labware_id_captor.value,
wellName="A1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0,
tipLength=51.83,
@@ -114,7 +119,7 @@ async def test_runner_with_json(json_protocol_file: Path) -> None:
subject = await create_simulating_runner(
robot_type="OT-2 Standard", protocol_config=protocol_source.config
)
- result = await subject.run(protocol_source)
+ result = await subject.run(deck_configuration=[], protocol_source=protocol_source)
commands_result = result.commands
pipettes_result = result.state_summary.pipettes
@@ -153,6 +158,7 @@ async def test_runner_with_json(json_protocol_file: Path) -> None:
labwareId="labware-id",
wellName="A1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0,
tipLength=51.83,
@@ -176,7 +182,11 @@ async def test_runner_with_legacy_python(legacy_python_protocol_file: Path) -> N
robot_type="OT-2 Standard",
protocol_config=protocol_source.config,
)
- result = await subject.run(protocol_source)
+ result = await subject.run(
+ deck_configuration=[],
+ protocol_source=protocol_source,
+ run_time_param_values=None,
+ )
commands_result = result.commands
pipettes_result = result.state_summary.pipettes
@@ -216,6 +226,7 @@ async def test_runner_with_legacy_python(legacy_python_protocol_file: Path) -> N
labwareId=labware_id_captor.value,
wellName="A1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0, tipLength=51.83, position=DeckPoint(x=0, y=0, z=0)
),
@@ -235,7 +246,11 @@ async def test_runner_with_legacy_json(legacy_json_protocol_file: Path) -> None:
subject = await create_simulating_runner(
robot_type="OT-2 Standard", protocol_config=protocol_source.config
)
- result = await subject.run(protocol_source)
+ result = await subject.run(
+ deck_configuration=[],
+ protocol_source=protocol_source,
+ run_time_param_values=None,
+ )
commands_result = result.commands
pipettes_result = result.state_summary.pipettes
@@ -276,6 +291,7 @@ async def test_runner_with_legacy_json(legacy_json_protocol_file: Path) -> None:
labwareId=labware_id_captor.value,
wellName="A1",
),
+ notes=[],
result=commands.PickUpTipResult(
tipVolume=300.0, tipLength=51.83, position=DeckPoint(x=0, y=0, z=0)
),
diff --git a/api/tests/opentrons/protocol_runner/test_legacy_command_mapper.py b/api/tests/opentrons/protocol_runner/test_legacy_command_mapper.py
index e5995136685..38a9cec60d8 100644
--- a/api/tests/opentrons/protocol_runner/test_legacy_command_mapper.py
+++ b/api/tests/opentrons/protocol_runner/test_legacy_command_mapper.py
@@ -7,7 +7,7 @@
from decoy import matchers, Decoy
from opentrons.hardware_control.dev_types import PipetteDict
-from opentrons.commands.types import CommentMessage, PauseMessage, CommandMessage
+from opentrons.legacy_commands.types import CommentMessage, PauseMessage, CommandMessage
from opentrons.protocol_engine import (
DeckSlotLocation,
ModuleLocation,
@@ -16,6 +16,7 @@
commands as pe_commands,
actions as pe_actions,
)
+from opentrons.protocol_engine.error_recovery_policy import ErrorRecoveryType
from opentrons.protocol_engine.resources import (
ModuleDataProvider,
pipette_data_provider,
@@ -69,20 +70,22 @@ def test_map_before_command() -> None:
result = subject.map_command(legacy_command)
assert result == [
- pe_actions.UpdateCommandAction(
- private_result=None,
- command=pe_commands.Custom.construct(
- id="command.COMMENT-0",
+ pe_actions.QueueCommandAction(
+ command_id="command.COMMENT-0",
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.CustomCreate(
key="command.COMMENT-0",
- status=pe_commands.CommandStatus.RUNNING,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
params=LegacyCommandParams(
legacyCommandType="command.COMMENT",
legacyCommandText="hello world",
),
),
- )
+ request_hash=None,
+ ),
+ pe_actions.RunCommandAction(
+ command_id="command.COMMENT-0",
+ started_at=matchers.IsA(datetime),
+ ),
]
@@ -109,7 +112,7 @@ def test_map_after_command() -> None:
result = subject.map_command(legacy_command_end)
assert result == [
- pe_actions.UpdateCommandAction(
+ pe_actions.SucceedCommandAction(
private_result=None,
command=pe_commands.Custom.construct(
id="command.COMMENT-0",
@@ -123,6 +126,7 @@ def test_map_after_command() -> None:
legacyCommandText="hello world",
),
result=pe_commands.CustomResult(),
+ notes=[],
),
)
]
@@ -152,12 +156,15 @@ def test_map_after_with_error_command() -> None:
assert result == [
pe_actions.FailCommandAction(
command_id="command.COMMENT-0",
+ running_command=matchers.Anything(),
error_id=matchers.IsA(str),
failed_at=matchers.IsA(datetime),
error=matchers.ErrorMatching(
LegacyContextCommandError,
match="oh no",
),
+ notes=[],
+ type=ErrorRecoveryType.FAIL_RUN,
)
]
@@ -202,35 +209,37 @@ def test_command_stack() -> None:
]
assert result == [
- pe_actions.UpdateCommandAction(
- private_result=None,
- command=pe_commands.Custom.construct(
- id="command.COMMENT-0",
+ pe_actions.QueueCommandAction(
+ command_id="command.COMMENT-0",
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.CustomCreate(
key="command.COMMENT-0",
- status=pe_commands.CommandStatus.RUNNING,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
params=LegacyCommandParams(
legacyCommandType="command.COMMENT",
legacyCommandText="hello",
),
),
+ request_hash=None,
),
- pe_actions.UpdateCommandAction(
- private_result=None,
- command=pe_commands.Custom.construct(
- id="command.COMMENT-1",
+ pe_actions.RunCommandAction(
+ command_id="command.COMMENT-0", started_at=matchers.IsA(datetime)
+ ),
+ pe_actions.QueueCommandAction(
+ command_id="command.COMMENT-1",
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.CustomCreate(
key="command.COMMENT-1",
- status=pe_commands.CommandStatus.RUNNING,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
params=LegacyCommandParams(
legacyCommandType="command.COMMENT",
legacyCommandText="goodbye",
),
),
+ request_hash=None,
+ ),
+ pe_actions.RunCommandAction(
+ command_id="command.COMMENT-1", started_at=matchers.IsA(datetime)
),
- pe_actions.UpdateCommandAction(
+ pe_actions.SucceedCommandAction(
private_result=None,
command=pe_commands.Custom.construct(
id="command.COMMENT-0",
@@ -244,13 +253,17 @@ def test_command_stack() -> None:
legacyCommandText="hello",
),
result=pe_commands.CustomResult(),
+ notes=[],
),
),
pe_actions.FailCommandAction(
command_id="command.COMMENT-1",
+ running_command=matchers.Anything(),
error_id=matchers.IsA(str),
failed_at=matchers.IsA(datetime),
error=matchers.ErrorMatching(LegacyContextCommandError, "oh no"),
+ notes=[],
+ type=ErrorRecoveryType.FAIL_RUN,
),
]
@@ -267,32 +280,55 @@ def test_map_labware_load(minimal_labware_def: LabwareDefinition) -> None:
offset_id="labware-offset-id-123",
labware_display_name="My special labware",
)
- expected_output = pe_commands.LoadLabware.construct(
- id=matchers.IsA(str),
- key=matchers.IsA(str),
- status=pe_commands.CommandStatus.SUCCEEDED,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
- completedAt=matchers.IsA(datetime),
- params=pe_commands.LoadLabwareParams.construct(
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
- namespace="some_namespace",
- loadName="some_load_name",
- version=123,
- displayName="My special labware",
- labwareId=None,
+
+ expected_id_and_key = "commands.LOAD_LABWARE-0"
+ expected_params = pe_commands.LoadLabwareParams(
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ namespace="some_namespace",
+ loadName="some_load_name",
+ version=123,
+ displayName="My special labware",
+ labwareId=None,
+ )
+ expected_queue = pe_actions.QueueCommandAction(
+ command_id=expected_id_and_key,
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.LoadLabwareCreate(
+ key=expected_id_and_key,
+ params=expected_params,
),
- result=pe_commands.LoadLabwareResult.construct(
- labwareId=matchers.IsA(str),
- # Trusting that the exact fields within in the labware definition
- # get passed through correctly.
- definition=matchers.Anything(),
- offsetId="labware-offset-id-123",
+ request_hash=None,
+ )
+ expected_run = pe_actions.RunCommandAction(
+ command_id=expected_id_and_key,
+ started_at=matchers.IsA(datetime),
+ )
+ expected_succeed = pe_actions.SucceedCommandAction(
+ command=pe_commands.LoadLabware.construct(
+ id=expected_id_and_key,
+ key=expected_id_and_key,
+ params=expected_params,
+ status=pe_commands.CommandStatus.SUCCEEDED,
+ createdAt=matchers.IsA(datetime),
+ startedAt=matchers.IsA(datetime),
+ completedAt=matchers.IsA(datetime),
+ result=pe_commands.LoadLabwareResult.construct(
+ labwareId=matchers.IsA(str),
+ # Trusting that the exact fields within in the labware definition
+ # get passed through correctly.
+ definition=matchers.Anything(),
+ offsetId="labware-offset-id-123",
+ ),
+ notes=[],
),
+ private_result=None,
+ )
+ result_queue, result_run, result_succeed = LegacyCommandMapper().map_equipment_load(
+ input
)
- output = LegacyCommandMapper().map_equipment_load(input)
- assert output[0] == expected_output
- assert output[1] is None
+ assert result_queue == expected_queue
+ assert result_run == expected_run
+ assert result_succeed == expected_succeed
def test_map_instrument_load(decoy: Decoy) -> None:
@@ -306,30 +342,51 @@ def test_map_instrument_load(decoy: Decoy) -> None:
pipette_config = cast(LoadedStaticPipetteData, {"config": True})
decoy.when(
- pipette_data_provider.get_pipette_static_config(pipette_dict)
+ pipette_data_provider.get_pipette_static_config(pipette_dict, "v0"),
).then_return(pipette_config)
- result = LegacyCommandMapper().map_equipment_load(input)
- pipette_id_captor = matchers.Captor()
-
- assert result[0] == pe_commands.LoadPipette.construct(
- id=matchers.IsA(str),
- key=matchers.IsA(str),
- status=pe_commands.CommandStatus.SUCCEEDED,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
- completedAt=matchers.IsA(datetime),
- params=pe_commands.LoadPipetteParams.construct(
- pipetteName=PipetteNameType.P1000_SINGLE_GEN2, mount=MountType.LEFT
+ expected_id_and_key = "commands.LOAD_PIPETTE-0"
+ expected_params = pe_commands.LoadPipetteParams.construct(
+ pipetteName=PipetteNameType.P1000_SINGLE_GEN2, mount=MountType.LEFT
+ )
+ expected_queue = pe_actions.QueueCommandAction(
+ command_id=expected_id_and_key,
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.LoadPipetteCreate(
+ key=expected_id_and_key, params=expected_params
),
- result=pe_commands.LoadPipetteResult.construct(pipetteId=pipette_id_captor),
+ request_hash=None,
)
- assert result[1] == pe_commands.LoadPipettePrivateResult(
- pipette_id="pipette-0",
- serial_number="fizzbuzz",
- config=pipette_config,
+ expected_run = pe_actions.RunCommandAction(
+ command_id=expected_id_and_key, started_at=matchers.IsA(datetime)
+ )
+ expected_succeed = pe_actions.SucceedCommandAction(
+ command=pe_commands.LoadPipette.construct(
+ id=expected_id_and_key,
+ key=expected_id_and_key,
+ status=pe_commands.CommandStatus.SUCCEEDED,
+ createdAt=matchers.IsA(datetime),
+ startedAt=matchers.IsA(datetime),
+ completedAt=matchers.IsA(datetime),
+ params=expected_params,
+ result=pe_commands.LoadPipetteResult(pipetteId="pipette-0"),
+ notes=[],
+ ),
+ private_result=pe_commands.LoadPipettePrivateResult(
+ pipette_id="pipette-0", serial_number="fizzbuzz", config=pipette_config
+ ),
)
+ [
+ result_queue,
+ result_run,
+ result_succeed,
+ ] = LegacyCommandMapper().map_equipment_load(input)
+
+ assert result_queue == expected_queue
+ assert result_run == expected_run
+ assert result_succeed == expected_succeed
+
def test_map_module_load(
decoy: Decoy,
@@ -349,30 +406,50 @@ def test_map_module_load(
module_data_provider.get_definition(ModuleModel.TEMPERATURE_MODULE_V2)
).then_return(test_definition)
- expected_output = pe_commands.LoadModule.construct(
- id=matchers.IsA(str),
- key=matchers.IsA(str),
- status=pe_commands.CommandStatus.SUCCEEDED,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
- completedAt=matchers.IsA(datetime),
- params=pe_commands.LoadModuleParams.construct(
- model=ModuleModel.TEMPERATURE_MODULE_V1,
- location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
- moduleId=matchers.IsA(str),
+ expected_id_and_key = "commands.LOAD_MODULE-0"
+ expected_params = pe_commands.LoadModuleParams.construct(
+ model=ModuleModel.TEMPERATURE_MODULE_V1,
+ location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
+ moduleId=matchers.IsA(str),
+ )
+ expected_queue = pe_actions.QueueCommandAction(
+ command_id=expected_id_and_key,
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.LoadModuleCreate(
+ key=expected_id_and_key, params=expected_params
),
- result=pe_commands.LoadModuleResult.construct(
- moduleId=matchers.IsA(str),
- serialNumber="module-serial",
- definition=test_definition,
- model=ModuleModel.TEMPERATURE_MODULE_V2,
+ request_hash=None,
+ )
+ expected_run = pe_actions.RunCommandAction(
+ command_id=expected_id_and_key, started_at=matchers.IsA(datetime)
+ )
+ expected_succeed = pe_actions.SucceedCommandAction(
+ command=pe_commands.LoadModule.construct(
+ id=expected_id_and_key,
+ key=expected_id_and_key,
+ status=pe_commands.CommandStatus.SUCCEEDED,
+ createdAt=matchers.IsA(datetime),
+ startedAt=matchers.IsA(datetime),
+ completedAt=matchers.IsA(datetime),
+ params=expected_params,
+ result=pe_commands.LoadModuleResult.construct(
+ moduleId=matchers.IsA(str),
+ serialNumber="module-serial",
+ definition=test_definition,
+ model=ModuleModel.TEMPERATURE_MODULE_V2,
+ ),
+ notes=[],
),
+ private_result=None,
)
- output = LegacyCommandMapper(
+
+ [result_queue, result_run, result_succeed] = LegacyCommandMapper(
module_data_provider=module_data_provider
).map_equipment_load(input)
- assert output[0] == expected_output
- assert output[1] is None
+
+ assert result_queue == expected_queue
+ assert result_run == expected_run
+ assert result_succeed == expected_succeed
def test_map_module_labware_load(minimal_labware_def: LabwareDefinition) -> None:
@@ -388,33 +465,56 @@ def test_map_module_labware_load(minimal_labware_def: LabwareDefinition) -> None
offset_id="labware-offset-id-123",
)
- expected_output = pe_commands.LoadLabware.construct(
- id=matchers.IsA(str),
- key=matchers.IsA(str),
- status=pe_commands.CommandStatus.SUCCEEDED,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
- completedAt=matchers.IsA(datetime),
- params=pe_commands.LoadLabwareParams.construct(
- location=ModuleLocation(moduleId="module-123"),
- namespace="some_namespace",
- loadName="some_load_name",
- version=123,
- displayName="My very special module labware",
- labwareId=None,
+ expected_id_and_key = "commands.LOAD_LABWARE-0"
+ expected_params = pe_commands.LoadLabwareParams.construct(
+ location=ModuleLocation(moduleId="module-123"),
+ namespace="some_namespace",
+ loadName="some_load_name",
+ version=123,
+ displayName="My very special module labware",
+ labwareId=None,
+ )
+ expected_queue = pe_actions.QueueCommandAction(
+ command_id=expected_id_and_key,
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.LoadLabwareCreate(
+ key=expected_id_and_key,
+ params=expected_params,
),
- result=pe_commands.LoadLabwareResult.construct(
- labwareId=matchers.IsA(str),
- definition=matchers.Anything(),
- offsetId="labware-offset-id-123",
+ request_hash=None,
+ )
+ expected_run = pe_actions.RunCommandAction(
+ command_id="commands.LOAD_LABWARE-0",
+ started_at=matchers.IsA(datetime),
+ )
+ expected_succeed = pe_actions.SucceedCommandAction(
+ command=pe_commands.LoadLabware.construct(
+ id=expected_id_and_key,
+ key=expected_id_and_key,
+ params=expected_params,
+ status=pe_commands.CommandStatus.SUCCEEDED,
+ createdAt=matchers.IsA(datetime),
+ startedAt=matchers.IsA(datetime),
+ completedAt=matchers.IsA(datetime),
+ result=pe_commands.LoadLabwareResult.construct(
+ labwareId=matchers.IsA(str),
+ # Trusting that the exact fields within in the labware definition
+ # get passed through correctly.
+ definition=matchers.Anything(),
+ offsetId="labware-offset-id-123",
+ ),
+ notes=[],
),
+ private_result=None,
)
+
subject = LegacyCommandMapper()
subject._module_id_by_slot = {DeckSlotName.SLOT_1: "module-123"}
- output = subject.map_equipment_load(load_input)
+ result_queue, result_run, result_succeed = subject.map_equipment_load(load_input)
- assert output[0] == expected_output
- assert output[1] is None
+ assert result_queue == expected_queue
+ assert result_run == expected_run
+ assert result_succeed == expected_succeed
def test_map_pause() -> None:
@@ -441,18 +541,20 @@ def test_map_pause() -> None:
]
assert result == [
- pe_actions.UpdateCommandAction(
- private_result=None,
- command=pe_commands.WaitForResume.construct(
- id="command.PAUSE-0",
+ pe_actions.QueueCommandAction(
+ command_id="command.PAUSE-0",
+ created_at=matchers.IsA(datetime),
+ request=pe_commands.WaitForResumeCreate(
key="command.PAUSE-0",
- status=pe_commands.CommandStatus.RUNNING,
- createdAt=matchers.IsA(datetime),
- startedAt=matchers.IsA(datetime),
params=pe_commands.WaitForResumeParams(message="hello world"),
),
+ request_hash=None,
+ ),
+ pe_actions.RunCommandAction(
+ command_id="command.PAUSE-0",
+ started_at=matchers.IsA(datetime),
),
- pe_actions.UpdateCommandAction(
+ pe_actions.SucceedCommandAction(
private_result=None,
command=pe_commands.WaitForResume.construct(
id="command.PAUSE-0",
@@ -462,6 +564,7 @@ def test_map_pause() -> None:
startedAt=matchers.IsA(datetime),
completedAt=matchers.IsA(datetime),
params=pe_commands.WaitForResumeParams(message="hello world"),
+ notes=[],
),
),
pe_actions.PauseAction(source=pe_actions.PauseSource.PROTOCOL),
@@ -476,6 +579,7 @@ def test_map_pause() -> None:
"command.DISTRIBUTE",
"command.TRANSFER",
"command.RETURN_TIP",
+ "command.AIR_GAP",
],
)
def test_filter_higher_order_commands(command_type: str) -> None:
diff --git a/api/tests/opentrons/protocol_runner/test_legacy_context_plugin.py b/api/tests/opentrons/protocol_runner/test_legacy_context_plugin.py
index ac9a46112ff..f11676bcd37 100644
--- a/api/tests/opentrons/protocol_runner/test_legacy_context_plugin.py
+++ b/api/tests/opentrons/protocol_runner/test_legacy_context_plugin.py
@@ -5,7 +5,10 @@
from datetime import datetime
from typing import Callable
-from opentrons.commands.types import CommandMessage as LegacyCommand, PauseMessage
+from opentrons.legacy_commands.types import (
+ CommandMessage as LegacyCommand,
+ PauseMessage,
+)
from opentrons.protocol_engine import (
StateView,
actions as pe_actions,
@@ -93,7 +96,9 @@ async def test_broker_subscribe_unsubscribe(
subject: LegacyContextPlugin,
) -> None:
"""It should subscribe to the brokers on setup and unsubscribe on teardown."""
- command_broker_unsubscribe: Callable[[], None] = decoy.mock()
+ command_broker_unsubscribe: Callable[[], None] = decoy.mock(
+ name="command_broker_unsubscribe"
+ )
equipment_broker_subscription_context = decoy.mock(cls=_ContextManager)
decoy.when(
@@ -132,7 +137,7 @@ async def test_command_broker_messages(
command_handler_captor = matchers.Captor()
decoy.when(
mock_legacy_broker.subscribe(topic="command", handler=command_handler_captor)
- ).then_return(decoy.mock())
+ ).then_return(decoy.mock(name="command_broker_unsubscribe"))
decoy.when(
mock_equipment_broker.subscribed(callback=matchers.Anything())
).then_enter_with(None)
@@ -158,7 +163,9 @@ async def test_command_broker_messages(
decoy.when(
mock_legacy_command_mapper.map_command(command=legacy_command)
- ).then_return([pe_actions.UpdateCommandAction(engine_command, private_result=None)])
+ ).then_return(
+ [pe_actions.SucceedCommandAction(engine_command, private_result=None)]
+ )
await to_thread.run_sync(handler, legacy_command)
@@ -166,7 +173,7 @@ async def test_command_broker_messages(
decoy.verify(
mock_action_dispatcher.dispatch(
- pe_actions.UpdateCommandAction(engine_command, private_result=None)
+ pe_actions.SucceedCommandAction(engine_command, private_result=None)
)
)
@@ -185,7 +192,7 @@ async def test_equipment_broker_messages(
labware_handler_captor = matchers.Captor()
decoy.when(
mock_legacy_broker.subscribe(topic="command", handler=matchers.Anything())
- ).then_return(decoy.mock())
+ ).then_return(decoy.mock(name="command_broker_unsubscribe"))
decoy.when(
mock_equipment_broker.subscribed(callback=labware_handler_captor)
).then_enter_with(None)
@@ -215,7 +222,9 @@ async def test_equipment_broker_messages(
decoy.when(
mock_legacy_command_mapper.map_equipment_load(load_info=load_info)
- ).then_return((engine_command, None))
+ ).then_return(
+ [pe_actions.SucceedCommandAction(command=engine_command, private_result=None)]
+ )
await to_thread.run_sync(handler, load_info)
@@ -223,6 +232,6 @@ async def test_equipment_broker_messages(
decoy.verify(
mock_action_dispatcher.dispatch(
- pe_actions.UpdateCommandAction(command=engine_command, private_result=None)
+ pe_actions.SucceedCommandAction(command=engine_command, private_result=None)
),
)
diff --git a/api/tests/opentrons/protocol_runner/test_protocol_runner.py b/api/tests/opentrons/protocol_runner/test_protocol_runner.py
index 7965fc3bc1f..68e215bf3dd 100644
--- a/api/tests/opentrons/protocol_runner/test_protocol_runner.py
+++ b/api/tests/opentrons/protocol_runner/test_protocol_runner.py
@@ -1,6 +1,8 @@
"""Tests for the PythonAndLegacyRunner, JsonRunner & LiveRunner classes."""
+from datetime import datetime
+
import pytest
-from pytest_lazyfixture import lazy_fixture # type: ignore[import]
+from pytest_lazyfixture import lazy_fixture # type: ignore[import-untyped]
from decoy import Decoy, matchers
from pathlib import Path
from typing import List, cast, Optional, Union, Type
@@ -18,7 +20,12 @@
from opentrons.util.broker import Broker
from opentrons import protocol_reader
-from opentrons.protocol_engine import ProtocolEngine, Liquid, commands as pe_commands
+from opentrons.protocol_engine import (
+ ProtocolEngine,
+ Liquid,
+ commands as pe_commands,
+ errors as pe_errors,
+)
from opentrons.protocol_reader import (
ProtocolSource,
JsonProtocolConfig,
@@ -169,7 +176,7 @@ def live_runner_subject(
(None, LiveRunner),
],
)
-async def test_create_protocol_runner(
+def test_create_protocol_runner(
protocol_engine: ProtocolEngine,
hardware_api: HardwareAPI,
task_queue: TaskQueue,
@@ -203,16 +210,16 @@ async def test_create_protocol_runner(
(lazy_fixture("live_runner_subject")),
],
)
-async def test_play_starts_run(
+def test_play_starts_run(
decoy: Decoy,
protocol_engine: ProtocolEngine,
task_queue: TaskQueue,
subject: AnyRunner,
) -> None:
"""It should start a protocol run with play."""
- subject.play()
+ subject.play(deck_configuration=[])
- decoy.verify(protocol_engine.play(), times=1)
+ decoy.verify(protocol_engine.play(deck_configuration=[]), times=1)
@pytest.mark.parametrize(
@@ -223,7 +230,7 @@ async def test_play_starts_run(
(lazy_fixture("live_runner_subject")),
],
)
-async def test_pause(
+def test_pause(
decoy: Decoy,
protocol_engine: ProtocolEngine,
subject: AnyRunner,
@@ -231,7 +238,7 @@ async def test_pause(
"""It should pause a protocol run with pause."""
subject.pause()
- decoy.verify(protocol_engine.pause(), times=1)
+ decoy.verify(protocol_engine.request_pause(), times=1)
@pytest.mark.parametrize(
@@ -254,7 +261,7 @@ async def test_stop(
subject.play()
await subject.stop()
- decoy.verify(await protocol_engine.stop(), times=1)
+ decoy.verify(await protocol_engine.request_stop(), times=1)
@pytest.mark.parametrize(
@@ -286,6 +293,25 @@ async def test_stop_when_run_never_started(
)
+@pytest.mark.parametrize(
+ "subject",
+ [
+ (lazy_fixture("json_runner_subject")),
+ (lazy_fixture("legacy_python_runner_subject")),
+ (lazy_fixture("live_runner_subject")),
+ ],
+)
+def test_resume_from_recovery(
+ decoy: Decoy,
+ protocol_engine: ProtocolEngine,
+ subject: AnyRunner,
+) -> None:
+ """It should call `resume_from_recovery()` on the underlying engine."""
+ subject.resume_from_recovery()
+
+ decoy.verify(protocol_engine.resume_from_recovery(), times=1)
+
+
async def test_run_json_runner(
decoy: Decoy,
hardware_api: HardwareAPI,
@@ -299,16 +325,106 @@ async def test_run_json_runner(
)
assert json_runner_subject.was_started() is False
- await json_runner_subject.run()
+ await json_runner_subject.run(deck_configuration=[])
assert json_runner_subject.was_started() is True
decoy.verify(
- protocol_engine.play(),
+ protocol_engine.play(deck_configuration=[]),
task_queue.start(),
await task_queue.join(),
)
+async def test_run_json_runner_stop_requested_stops_enquqing(
+ decoy: Decoy,
+ hardware_api: HardwareAPI,
+ protocol_engine: ProtocolEngine,
+ task_queue: TaskQueue,
+ json_runner_subject: JsonRunner,
+ json_file_reader: JsonFileReader,
+ json_translator: JsonTranslator,
+) -> None:
+ """It should run a protocol to completion."""
+ labware_definition = LabwareDefinition.construct() # type: ignore[call-arg]
+ json_protocol_source = ProtocolSource(
+ directory=Path("/dev/null"),
+ main_file=Path("/dev/null/abc.json"),
+ files=[],
+ metadata={},
+ robot_type="OT-2 Standard",
+ config=JsonProtocolConfig(schema_version=6),
+ content_hash="abc123",
+ )
+
+ commands: List[pe_commands.CommandCreate] = [
+ pe_commands.HomeCreate(params=pe_commands.HomeParams()),
+ pe_commands.WaitForDurationCreate(
+ params=pe_commands.WaitForDurationParams(seconds=10)
+ ),
+ pe_commands.LoadLiquidCreate(
+ params=pe_commands.LoadLiquidParams(
+ liquidId="water-id", labwareId="labware-id", volumeByWell={"A1": 30}
+ )
+ ),
+ ]
+
+ liquids: List[Liquid] = [
+ Liquid(id="water-id", displayName="water", description="water desc")
+ ]
+
+ json_protocol = ProtocolSchemaV6.construct() # type: ignore[call-arg]
+
+ decoy.when(
+ await protocol_reader.extract_labware_definitions(json_protocol_source)
+ ).then_return([labware_definition])
+ decoy.when(json_file_reader.read(json_protocol_source)).then_return(json_protocol)
+ decoy.when(json_translator.translate_commands(json_protocol)).then_return(commands)
+ decoy.when(json_translator.translate_liquids(json_protocol)).then_return(liquids)
+ decoy.when(
+ await protocol_engine.add_and_execute_command(
+ pe_commands.HomeCreate(params=pe_commands.HomeParams()),
+ )
+ ).then_return(
+ pe_commands.Home.construct(status=pe_commands.CommandStatus.SUCCEEDED) # type: ignore[call-arg]
+ )
+ decoy.when(
+ await protocol_engine.add_and_execute_command(
+ pe_commands.WaitForDurationCreate(
+ params=pe_commands.WaitForDurationParams(seconds=10)
+ ),
+ )
+ ).then_return(
+ pe_commands.WaitForDuration.construct( # type: ignore[call-arg]
+ error=pe_errors.ErrorOccurrence.from_failed(
+ id="some-id",
+ createdAt=datetime(year=2021, month=1, day=1),
+ error=pe_errors.ProtocolEngineError(),
+ )
+ )
+ )
+
+ await json_runner_subject.load(json_protocol_source)
+
+ run_func_captor = matchers.Captor()
+
+ decoy.verify(
+ protocol_engine.add_labware_definition(labware_definition),
+ protocol_engine.add_liquid(
+ id="water-id", name="water", description="water desc", color=None
+ ),
+ protocol_engine.add_command(
+ request=pe_commands.HomeCreate(params=pe_commands.HomeParams(axes=None))
+ ),
+ task_queue.set_run_func(func=run_func_captor),
+ )
+
+ # Verify that the run func calls the right things:
+ run_func = run_func_captor.value
+
+ with pytest.raises(pe_errors.ProtocolEngineError):
+ await run_func()
+
+
@pytest.mark.parametrize(
"schema_version, json_protocol",
[
@@ -366,6 +482,8 @@ async def test_load_json_runner(
await json_runner_subject.load(json_protocol_source)
+ run_func_captor = matchers.Captor()
+
decoy.verify(
protocol_engine.add_labware_definition(labware_definition),
protocol_engine.add_liquid(
@@ -374,24 +492,30 @@ async def test_load_json_runner(
protocol_engine.add_command(
request=pe_commands.HomeCreate(params=pe_commands.HomeParams(axes=None))
),
- protocol_engine.add_command(
+ task_queue.set_run_func(func=run_func_captor),
+ )
+
+ # Verify that the run func calls the right things:
+ run_func = run_func_captor.value
+ await run_func()
+ decoy.verify(
+ await protocol_engine.add_and_execute_command(
request=pe_commands.WaitForResumeCreate(
params=pe_commands.WaitForResumeParams(message="hello")
- )
+ ),
),
- protocol_engine.add_command(
+ await protocol_engine.add_and_execute_command(
request=pe_commands.WaitForResumeCreate(
params=pe_commands.WaitForResumeParams(message="goodbye")
- )
+ ),
),
- protocol_engine.add_command(
+ await protocol_engine.add_and_execute_command(
request=pe_commands.LoadLiquidCreate(
params=pe_commands.LoadLiquidParams(
liquidId="water-id", labwareId="labware-id", volumeByWell={"A1": 30}
)
),
),
- task_queue.set_run_func(func=protocol_engine.wait_until_complete),
)
@@ -456,21 +580,33 @@ async def test_load_legacy_python(
await legacy_python_runner_subject.load(
legacy_protocol_source,
python_parse_mode=PythonParseMode.ALLOW_LEGACY_METADATA_AND_REQUIREMENTS,
+ run_time_param_values=None,
)
+ run_func_captor = matchers.Captor()
+
decoy.verify(
protocol_engine.add_labware_definition(labware_definition),
protocol_engine.add_plugin(matchers.IsA(LegacyContextPlugin)),
- protocol_engine.add_command(
+ task_queue.set_run_func(run_func_captor),
+ )
+
+ assert broker_captor.value is legacy_python_runner_subject.broker
+
+ # Verify that the run func calls the right things:
+ run_func = run_func_captor.value
+ await run_func()
+ decoy.verify(
+ await protocol_engine.add_and_execute_command(
request=pe_commands.HomeCreate(params=pe_commands.HomeParams(axes=None))
),
- task_queue.set_run_func(
- func=legacy_executor.execute,
+ await legacy_executor.execute(
protocol=legacy_protocol,
context=legacy_context,
+ parameter_context=legacy_python_runner_subject._parameter_context,
+ run_time_param_values=None,
),
)
- assert broker_captor.value is legacy_python_runner_subject.broker
async def test_load_python_with_pe_papi_core(
@@ -526,6 +662,7 @@ async def test_load_python_with_pe_papi_core(
await legacy_python_runner_subject.load(
legacy_protocol_source,
python_parse_mode=PythonParseMode.ALLOW_LEGACY_METADATA_AND_REQUIREMENTS,
+ run_time_param_values=None,
)
decoy.verify(protocol_engine.add_plugin(matchers.IsA(LegacyContextPlugin)), times=0)
@@ -587,18 +724,29 @@ async def test_load_legacy_json(
await legacy_python_runner_subject.load(
legacy_protocol_source,
python_parse_mode=PythonParseMode.ALLOW_LEGACY_METADATA_AND_REQUIREMENTS,
+ run_time_param_values=None,
)
+ run_func_captor = matchers.Captor()
+
decoy.verify(
protocol_engine.add_labware_definition(labware_definition),
protocol_engine.add_plugin(matchers.IsA(LegacyContextPlugin)),
- protocol_engine.add_command(
+ task_queue.set_run_func(run_func_captor),
+ )
+
+ # Verify that the run func calls the right things:
+ run_func = run_func_captor.value
+ await run_func()
+ decoy.verify(
+ await protocol_engine.add_and_execute_command(
request=pe_commands.HomeCreate(params=pe_commands.HomeParams(axes=None))
),
- task_queue.set_run_func(
- func=legacy_executor.execute,
+ await legacy_executor.execute(
protocol=legacy_protocol,
context=legacy_context,
+ parameter_context=legacy_python_runner_subject._parameter_context,
+ run_time_param_values=None,
),
)
@@ -616,11 +764,11 @@ async def test_run_python_runner(
)
assert legacy_python_runner_subject.was_started() is False
- await legacy_python_runner_subject.run()
+ await legacy_python_runner_subject.run(deck_configuration=[])
assert legacy_python_runner_subject.was_started() is True
decoy.verify(
- protocol_engine.play(),
+ protocol_engine.play(deck_configuration=[]),
task_queue.start(),
await task_queue.join(),
)
@@ -639,12 +787,12 @@ async def test_run_live_runner(
)
assert live_runner_subject.was_started() is False
- await live_runner_subject.run()
+ await live_runner_subject.run(deck_configuration=[])
assert live_runner_subject.was_started() is True
decoy.verify(
await hardware_api.home(),
- protocol_engine.play(),
+ protocol_engine.play(deck_configuration=[]),
task_queue.start(),
await task_queue.join(),
)
diff --git a/api/tests/opentrons/protocol_runner/test_task_queue.py b/api/tests/opentrons/protocol_runner/test_task_queue.py
index 0359a0eb50b..6bf5cb90c7a 100644
--- a/api/tests/opentrons/protocol_runner/test_task_queue.py
+++ b/api/tests/opentrons/protocol_runner/test_task_queue.py
@@ -6,26 +6,28 @@
async def test_set_run_func(decoy: Decoy) -> None:
"""It should be able to add a task for the "run" phase."""
- run_func = decoy.mock(is_async=True)
- cleanup_func = decoy.mock(is_async=True)
+ run_func = decoy.mock(name="run_func", is_async=True)
+ cleanup_func = decoy.mock(name="cleanup_func", is_async=True)
- subject = TaskQueue(cleanup_func=cleanup_func)
+ subject = TaskQueue() # cleanup_func=cleanup_func)
+ subject.set_cleanup_func(func=cleanup_func)
subject.set_run_func(func=run_func)
subject.start()
await subject.join()
decoy.verify(
await run_func(),
- await cleanup_func(error=None),
+ await cleanup_func(None),
)
async def test_passes_args(decoy: Decoy) -> None:
"""It should pass kwargs to the run phase function."""
- run_func = decoy.mock(is_async=True)
- cleanup_func = decoy.mock(is_async=True)
+ run_func = decoy.mock(name="run_func", is_async=True)
+ cleanup_func = decoy.mock(name="cleanup_func", is_async=True)
- subject = TaskQueue(cleanup_func=cleanup_func)
+ subject = TaskQueue() # cleanup_func=cleanup_func)
+ subject.set_cleanup_func(func=cleanup_func)
subject.set_run_func(func=run_func, hello="world")
subject.start()
await subject.join()
@@ -35,24 +37,26 @@ async def test_passes_args(decoy: Decoy) -> None:
async def test_cleanup_gets_run_error(decoy: Decoy) -> None:
"""It should verify "cleanup" func gets error raised in "run" func."""
- run_func = decoy.mock(is_async=True)
- cleanup_func = decoy.mock(is_async=True)
+ run_func = decoy.mock(name="run_func", is_async=True)
+ cleanup_func = decoy.mock(name="cleanup_func", is_async=True)
error = RuntimeError("Oh no!")
decoy.when(await run_func()).then_raise(error)
- subject = TaskQueue(cleanup_func=cleanup_func)
+ subject = TaskQueue() # cleanup_func=cleanup_func)
+ subject.set_cleanup_func(func=cleanup_func)
subject.set_run_func(func=run_func)
subject.start()
await subject.join()
- decoy.verify(await cleanup_func(error=error))
+ decoy.verify(await cleanup_func(error))
async def test_join_waits_for_start(decoy: Decoy) -> None:
"""It should wait until the queue is started when join is called."""
- cleanup_func = decoy.mock(is_async=True)
- subject = TaskQueue(cleanup_func=cleanup_func)
+ cleanup_func = decoy.mock(name="cleanup_func", is_async=True)
+ subject = TaskQueue() # cleanup_func=cleanup_func)
+ subject.set_cleanup_func(func=cleanup_func)
join_task = asyncio.create_task(subject.join())
await asyncio.sleep(0)
@@ -64,14 +68,15 @@ async def test_join_waits_for_start(decoy: Decoy) -> None:
async def test_start_runs_stuff_once(decoy: Decoy) -> None:
"""Calling `start` should no-op if already started."""
- run_func = decoy.mock(is_async=True)
- cleanup_func = decoy.mock(is_async=True)
+ run_func = decoy.mock(name="run_func", is_async=True)
+ cleanup_func = decoy.mock(name="cleanup_func", is_async=True)
- subject = TaskQueue(cleanup_func=cleanup_func)
+ subject = TaskQueue() # leanup_func=cleanup_func)
+ subject.set_cleanup_func(func=cleanup_func)
subject.set_run_func(func=run_func)
subject.start()
subject.start()
await subject.join()
decoy.verify(await run_func(), times=1)
- decoy.verify(await cleanup_func(error=None), times=1)
+ decoy.verify(await cleanup_func(None), times=1)
diff --git a/api/tests/opentrons/protocols/api_support/test_instrument.py b/api/tests/opentrons/protocols/api_support/test_instrument.py
index 3e8391cc512..304ef97f0a6 100644
--- a/api/tests/opentrons/protocols/api_support/test_instrument.py
+++ b/api/tests/opentrons/protocols/api_support/test_instrument.py
@@ -1,6 +1,11 @@
import pytest
+import logging
+from typing import Optional
from opentrons.protocol_api import ProtocolContext
-from opentrons.protocols.api_support.instrument import validate_takes_liquid
+from opentrons.protocols.api_support.instrument import (
+ validate_takes_liquid,
+ validate_tiprack,
+)
from opentrons.types import Location, Point
@@ -125,3 +130,32 @@ def test_validate_takes_liquid_adapter(ctx):
reject_module=False,
reject_adapter=True,
)
+
+
+@pytest.mark.parametrize(
+ argnames=["pipette_name", "log_value"],
+ argvalues=[
+ ["p1000_96", None],
+ [
+ "p50_single_flex",
+ "The pipette p50_single_flex and its tip rack opentrons_flex_96_tiprack_200ul appear to be mismatched. Please check your protocol.",
+ ],
+ [
+ "p20_single_gen2",
+ "The pipette p20_single_gen2 and its tip rack opentrons_flex_96_tiprack_200ul appear to be mismatched. Please check your protocol.",
+ ],
+ ],
+)
+def test_validate_tiprack(
+ ctx: ProtocolContext, caplog, pipette_name: str, log_value: Optional[str]
+):
+ tip_rack = ctx.load_labware("opentrons_flex_96_tiprack_200ul", 2)
+
+ with caplog.at_level(logging.WARNING):
+ log = logging.getLogger(__name__)
+ validate_tiprack(pipette_name, tip_rack, log=log)
+
+ if log_value:
+ assert caplog.messages[0] == log_value
+ else:
+ assert caplog.records == []
diff --git a/api/tests/opentrons/protocols/duration/test_estimator.py b/api/tests/opentrons/protocols/duration/test_estimator.py
index 92614869641..594f1cfad57 100644
--- a/api/tests/opentrons/protocols/duration/test_estimator.py
+++ b/api/tests/opentrons/protocols/duration/test_estimator.py
@@ -3,7 +3,7 @@
import math
import pytest
-from opentrons.commands import types
+from opentrons.legacy_commands import types
from opentrons.protocol_api import InstrumentContext
from opentrons.protocols.duration.estimator import (
DurationEstimator,
diff --git a/api/tests/opentrons/protocols/execution/test_execute_python.py b/api/tests/opentrons/protocols/execution/test_execute_python.py
index fa84be9a7e7..cc8e7e9b306 100644
--- a/api/tests/opentrons/protocols/execution/test_execute_python.py
+++ b/api/tests/opentrons/protocols/execution/test_execute_python.py
@@ -35,6 +35,7 @@ def starargs(*args):
execute_python._runfunc_ok(starargs)
+@pytest.mark.ot2_only
@pytest.mark.parametrize("protocol_file", ["testosaur_v2.py"])
def test_execute_ok(protocol, protocol_file, ctx):
proto = parse(protocol.text, protocol.filename)
diff --git a/robot-server/session.pdf b/api/tests/opentrons/protocols/parameters/__init__.py
similarity index 100%
rename from robot-server/session.pdf
rename to api/tests/opentrons/protocols/parameters/__init__.py
diff --git a/api/tests/opentrons/protocols/parameters/test_parameter_definition.py b/api/tests/opentrons/protocols/parameters/test_parameter_definition.py
new file mode 100644
index 00000000000..556ce016672
--- /dev/null
+++ b/api/tests/opentrons/protocols/parameters/test_parameter_definition.py
@@ -0,0 +1,337 @@
+"""Tests for the Parameter Definitions."""
+import inspect
+
+import pytest
+from decoy import Decoy
+
+from opentrons.protocols.parameters import validation as mock_validation
+from opentrons.protocols.parameters.types import ParameterValueError
+from opentrons.protocols.parameters.parameter_definition import (
+ create_int_parameter,
+ create_float_parameter,
+ create_bool_parameter,
+ create_str_parameter,
+)
+
+from opentrons.protocol_engine.types import (
+ NumberParameter,
+ BooleanParameter,
+ EnumParameter,
+ EnumChoice,
+)
+
+
+@pytest.fixture(autouse=True)
+def _patch_parameter_validation(decoy: Decoy, monkeypatch: pytest.MonkeyPatch) -> None:
+ for name, func in inspect.getmembers(mock_validation, inspect.isfunction):
+ monkeypatch.setattr(mock_validation, name, decoy.mock(func=func))
+
+
+def test_create_int_parameter_min_and_max(decoy: Decoy) -> None:
+ """It should create an int parameter definition with a minimum and maximum."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description("a b c")).then_return("1 2 3")
+ decoy.when(mock_validation.ensure_unit_string_length("test")).then_return("microns")
+
+ parameter_def = create_int_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=42,
+ minimum=1,
+ maximum=100,
+ description="a b c",
+ unit="test",
+ )
+
+ decoy.verify(
+ mock_validation.validate_options(42, 1, 100, None, int),
+ mock_validation.validate_type(42, int),
+ )
+
+ assert parameter_def._display_name == "my cool name"
+ assert parameter_def.variable_name == "my variable"
+ assert parameter_def._description == "1 2 3"
+ assert parameter_def._unit == "microns"
+ assert parameter_def._allowed_values is None
+ assert parameter_def._minimum == 1
+ assert parameter_def._maximum == 100
+ assert parameter_def.value == 42
+
+
+def test_create_int_parameter_choices(decoy: Decoy) -> None:
+ """It should create an int parameter definition with choices."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description(None)).then_return("1 2 3")
+ decoy.when(mock_validation.ensure_unit_string_length(None)).then_return("microns")
+
+ parameter_def = create_int_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=42,
+ choices=[{"display_name": "uhh", "value": 42}],
+ )
+
+ decoy.verify(
+ mock_validation.validate_options(
+ 42, None, None, [{"display_name": "uhh", "value": 42}], int
+ ),
+ mock_validation.validate_type(42, int),
+ )
+
+ assert parameter_def._display_name == "my cool name"
+ assert parameter_def.variable_name == "my variable"
+ assert parameter_def._description == "1 2 3"
+ assert parameter_def._unit == "microns"
+ assert parameter_def._allowed_values == {42}
+ assert parameter_def._minimum is None
+ assert parameter_def._maximum is None
+ assert parameter_def.value == 42
+
+
+def test_int_parameter_default_raises_not_in_range() -> None:
+ """It should raise an error if the default is not between min or max"""
+ with pytest.raises(ParameterValueError, match="between"):
+ create_int_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=9000,
+ minimum=9001,
+ maximum=10000,
+ )
+
+
+def test_create_float_parameter_min_and_max(decoy: Decoy) -> None:
+ """It should create a float parameter definition with a minimum and maximum."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description("a b c")).then_return("1 2 3")
+ decoy.when(mock_validation.ensure_unit_string_length("test")).then_return("microns")
+
+ parameter_def = create_float_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=4.2,
+ minimum=1.0,
+ maximum=10.5,
+ description="a b c",
+ unit="test",
+ )
+
+ decoy.verify(
+ mock_validation.validate_options(4.2, 1.0, 10.5, None, float),
+ mock_validation.validate_type(4.2, float),
+ )
+
+ assert parameter_def._display_name == "my cool name"
+ assert parameter_def.variable_name == "my variable"
+ assert parameter_def._description == "1 2 3"
+ assert parameter_def._unit == "microns"
+ assert parameter_def._allowed_values is None
+ assert parameter_def._minimum == 1.0
+ assert parameter_def._maximum == 10.5
+ assert parameter_def.value == 4.2
+
+
+def test_create_float_parameter_choices(decoy: Decoy) -> None:
+ """It should create a float parameter definition with choices."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+
+ parameter_def = create_float_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=4.2,
+ choices=[{"display_name": "urr", "value": 4.2}],
+ )
+
+ decoy.verify(
+ mock_validation.validate_options(
+ 4.2, None, None, [{"display_name": "urr", "value": 4.2}], float
+ ),
+ mock_validation.validate_type(4.2, float),
+ )
+
+ assert parameter_def._display_name == "my cool name"
+ assert parameter_def.variable_name == "my variable"
+ assert parameter_def._allowed_values == {4.2}
+ assert parameter_def._minimum is None
+ assert parameter_def._maximum is None
+ assert parameter_def.value == 4.2
+
+
+def test_float_parameter_default_raises_not_in_range() -> None:
+ """It should raise an error if the default is not between min or max"""
+ with pytest.raises(ParameterValueError, match="between"):
+ create_float_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=9000.1,
+ minimum=1,
+ maximum=9000,
+ )
+
+
+def test_create_bool_parameter(decoy: Decoy) -> None:
+ """It should create a boolean parameter"""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description("describe this")).then_return("1 2 3")
+
+ parameter_def = create_bool_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=False,
+ choices=[{"display_name": "uhh", "value": False}],
+ description="describe this",
+ )
+
+ decoy.verify(
+ mock_validation.validate_options(
+ False, None, None, [{"display_name": "uhh", "value": False}], bool
+ ),
+ mock_validation.validate_type(False, bool),
+ )
+
+ assert parameter_def._display_name == "my cool name"
+ assert parameter_def.variable_name == "my variable"
+ assert parameter_def._description == "1 2 3"
+ assert parameter_def._unit is None
+ assert parameter_def._allowed_values == {False}
+ assert parameter_def._minimum is None
+ assert parameter_def._maximum is None
+ assert parameter_def.value is False
+
+
+def test_create_str_parameter(decoy: Decoy) -> None:
+ """It should create a string parameter"""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description("describe this")).then_return("1 2 3")
+
+ parameter_def = create_str_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default="omega",
+ choices=[{"display_name": "alpha", "value": "omega"}],
+ description="describe this",
+ )
+
+ decoy.verify(
+ mock_validation.validate_options(
+ "omega", None, None, [{"display_name": "alpha", "value": "omega"}], str
+ ),
+ mock_validation.validate_type("omega", str),
+ )
+
+ assert parameter_def._display_name == "my cool name"
+ assert parameter_def.variable_name == "my variable"
+ assert parameter_def._description == "1 2 3"
+ assert parameter_def._unit is None
+ assert parameter_def._allowed_values == {"omega"}
+ assert parameter_def._minimum is None
+ assert parameter_def._maximum is None
+ assert parameter_def.value == "omega"
+
+
+def test_str_parameter_default_raises_not_in_allowed_values() -> None:
+ """It should raise an error if the default is not between min or max"""
+ with pytest.raises(ParameterValueError, match="allowed values"):
+ create_str_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default="waldo",
+ choices=[{"display_name": "where's", "value": "odlaw"}],
+ )
+
+
+def test_as_protocol_engine_boolean_parameter(decoy: Decoy) -> None:
+ """It should return a protocol engine BooleanParameter model."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description("describe this")).then_return("1 2 3")
+
+ parameter_def = create_bool_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=False,
+ choices=[{"display_name": "uhh", "value": False}],
+ description="describe this",
+ )
+
+ assert parameter_def.as_protocol_engine_type() == BooleanParameter(
+ type="bool",
+ displayName="my cool name",
+ variableName="my variable",
+ description="1 2 3",
+ value=False,
+ default=False,
+ )
+
+
+def test_as_protocol_engine_enum_parameter(decoy: Decoy) -> None:
+ """It should return a protocol engine EnumParameter model."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+
+ parameter_def = create_str_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default="red",
+ choices=[
+ {"display_name": "Lapis lazuli", "value": "blue"},
+ {"display_name": "Vermilion", "value": "red"},
+ {"display_name": "Celadon", "value": "green"},
+ ],
+ )
+ parameter_def.value = "green"
+ decoy.when(mock_validation.convert_type_string_for_enum(str)).then_return("float")
+
+ assert parameter_def.as_protocol_engine_type() == EnumParameter(
+ type="float",
+ displayName="my cool name",
+ variableName="my variable",
+ choices=[
+ EnumChoice(displayName="Lapis lazuli", value="blue"),
+ EnumChoice(displayName="Vermilion", value="red"),
+ EnumChoice(displayName="Celadon", value="green"),
+ ],
+ value="green",
+ default="red",
+ )
+
+
+def test_as_protocol_engine_number_parameter(decoy: Decoy) -> None:
+ """It should return a protocol engine NumberParameter model."""
+ decoy.when(mock_validation.ensure_display_name("foo")).then_return("my cool name")
+ decoy.when(mock_validation.ensure_variable_name("bar")).then_return("my variable")
+ decoy.when(mock_validation.ensure_description("a b c")).then_return("1 2 3")
+ decoy.when(mock_validation.ensure_unit_string_length("test")).then_return("microns")
+
+ parameter_def = create_int_parameter(
+ display_name="foo",
+ variable_name="bar",
+ default=42,
+ minimum=1,
+ maximum=100,
+ description="a b c",
+ unit="test",
+ )
+
+ parameter_def.value = 60
+ decoy.when(mock_validation.convert_type_string_for_num_param(int)).then_return(
+ "int"
+ )
+
+ assert parameter_def.as_protocol_engine_type() == NumberParameter(
+ type="int",
+ displayName="my cool name",
+ variableName="my variable",
+ description="1 2 3",
+ suffix="microns",
+ min=1.0,
+ max=100.0,
+ value=60.0,
+ default=42.0,
+ )
diff --git a/api/tests/opentrons/protocols/parameters/test_validation.py b/api/tests/opentrons/protocols/parameters/test_validation.py
new file mode 100644
index 00000000000..0ff337eb91d
--- /dev/null
+++ b/api/tests/opentrons/protocols/parameters/test_validation.py
@@ -0,0 +1,305 @@
+import pytest
+from typing import Optional, List, Union
+
+from opentrons.protocols.parameters.types import (
+ AllowedTypes,
+ ParameterChoice,
+ ParameterNameError,
+ ParameterValueError,
+ ParameterDefinitionError,
+)
+
+from opentrons.protocols.parameters import validation as subject
+
+
+def test_validate_variable_name_unique() -> None:
+ """It should no-op if the name is unique or if it's not a string, and raise if it is not."""
+ subject.validate_variable_name_unique("one of a kind", {"fee", "foo", "fum"})
+ subject.validate_variable_name_unique({}, {"fee", "foo", "fum"}) # type: ignore[arg-type]
+ with pytest.raises(ParameterNameError):
+ subject.validate_variable_name_unique("copy", {"paste", "copy", "cut"})
+
+
+def test_ensure_display_name() -> None:
+ """It should ensure the display name is within the character limit."""
+ result = subject.ensure_display_name("abc")
+ assert result == "abc"
+
+
+def test_ensure_display_name_raises() -> None:
+ """It should raise if the display name is too long."""
+ with pytest.raises(ParameterNameError):
+ subject.ensure_display_name("Lorem ipsum dolor sit amet nam.")
+
+
+def test_ensure_description_name() -> None:
+ """It should ensure the description name is within the character limit."""
+ result = subject.ensure_description("123456789")
+ assert result == "123456789"
+
+
+def test_ensure_description_raises() -> None:
+ """It should raise if the description is too long."""
+ with pytest.raises(ParameterNameError):
+ subject.ensure_description(
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
+ " Fusce eget elementum nunc, quis sodales sed."
+ )
+
+
+def test_ensure_unit_string_length() -> None:
+ """It should ensure the unit name is within the character limit."""
+ result = subject.ensure_unit_string_length("ul")
+ assert result == "ul"
+
+
+def test_ensure_unit_string_length_raises() -> None:
+ """It should raise if the unit name is too long."""
+ with pytest.raises(ParameterNameError):
+ subject.ensure_unit_string_length("newtons per square foot")
+
+
+@pytest.mark.parametrize(
+ "variable_name",
+ [
+ "x",
+ "my_cool_variable",
+ "_secret_variable",
+ ],
+)
+def test_ensure_variable_name(variable_name: str) -> None:
+ """It should ensure the variable name is a valid python variable name."""
+ result = subject.ensure_variable_name(variable_name)
+ assert result == variable_name
+
+
+@pytest.mark.parametrize(
+ "variable_name",
+ [
+ "3d_vector",
+ "my cool variable name",
+ "ca$h_money",
+ ],
+)
+def test_ensure_variable_name_raises(variable_name: str) -> None:
+ """It should raise if the variable name is not valid."""
+ with pytest.raises(ParameterNameError, match="underscore"):
+ subject.ensure_variable_name(variable_name)
+
+
+@pytest.mark.parametrize(
+ "variable_name",
+ [
+ "def",
+ "class",
+ "lambda",
+ ],
+)
+def test_ensure_variable_name_raises_keyword(variable_name: str) -> None:
+ """It should raise if the variable name is a python keyword."""
+ with pytest.raises(ParameterNameError, match="keyword"):
+ subject.ensure_variable_name(variable_name)
+
+
+def test_validate_options() -> None:
+ """It should not raise when given valid constraints"""
+ subject.validate_options(123, 1, 100, None, int)
+ subject.validate_options(123, 100, 100, None, int)
+ subject.validate_options(
+ 123, None, None, [{"display_name": "abc", "value": 456}], int
+ )
+ subject.validate_options(12.3, 1.1, 100.9, None, float)
+ subject.validate_options(12.3, 1.1, 1.1, None, float)
+ subject.validate_options(
+ 12.3, None, None, [{"display_name": "abc", "value": 45.6}], float
+ )
+ subject.validate_options(
+ True, None, None, [{"display_name": "abc", "value": False}], bool
+ )
+ subject.validate_options(
+ "x", None, None, [{"display_name": "abc", "value": "y"}], str
+ )
+
+
+def test_validate_options_raises_value_error() -> None:
+ """It should raise if the value of the default does not match the type."""
+ with pytest.raises(ParameterValueError):
+ subject.validate_options(123, 1, 100, None, str)
+
+
+def test_validate_options_raises_name_error() -> None:
+ """It should raise if the display name of a choice is too long."""
+ with pytest.raises(ParameterNameError):
+ subject.validate_options(
+ "foo",
+ None,
+ None,
+ [{"display_name": "Lorem ipsum dolor sit amet nam.", "value": "a"}],
+ str,
+ )
+
+
+@pytest.mark.parametrize(
+ ["value", "param_type", "result"],
+ [
+ (1.0, int, 1),
+ (1.1, int, 1.1),
+ (2, float, 2.0),
+ (2.0, float, 2.0),
+ (2.2, float, 2.2),
+ ("3.0", str, "3.0"),
+ (0.0, bool, False),
+ (1, bool, True),
+ (3.0, bool, 3.0),
+ (True, bool, True),
+ ],
+)
+def test_ensure_value_type(
+ value: Union[float, bool, str], param_type: type, result: AllowedTypes
+) -> None:
+ """It should ensure that if applicable, the value is coerced into the expected type"""
+ assert result == subject.ensure_value_type(value, param_type)
+
+
+@pytest.mark.parametrize(
+ ["value", "result"],
+ [
+ (1, 1.0),
+ (2.0, 2.0),
+ (3.3, 3.3),
+ ],
+)
+def test_ensure_float_value(value: Union[float, int], result: float) -> None:
+ """It should ensure that if applicable, the value is coerced into a float."""
+ assert result == subject.ensure_float_value(value)
+
+
+@pytest.mark.parametrize(
+ ["value", "result"],
+ [
+ (1, 1.0),
+ (2.0, 2.0),
+ (3.3, 3.3),
+ (None, None),
+ ],
+)
+def test_ensure_optional_float_value(value: Union[float, int], result: float) -> None:
+ """It should ensure that if applicable, the value is coerced into a float."""
+ assert result == subject.ensure_optional_float_value(value)
+
+
+@pytest.mark.parametrize(
+ ["choices", "result"],
+ [
+ ([], []),
+ (None, None),
+ (
+ [{"display_name": "foo", "value": 1}],
+ [{"display_name": "foo", "value": 1.0}],
+ ),
+ (
+ [{"display_name": "foo", "value": 2.0}],
+ [{"display_name": "foo", "value": 2.0}],
+ ),
+ (
+ [{"display_name": "foo", "value": 3.3}],
+ [{"display_name": "foo", "value": 3.3}],
+ ),
+ (
+ [{"display_name": "foo", "value": "4"}],
+ [{"display_name": "foo", "value": "4"}],
+ ),
+ (
+ [{"display_name": "foo", "value": True}],
+ [{"display_name": "foo", "value": True}],
+ ),
+ ],
+)
+def test_ensure_float_choices(
+ choices: Optional[List[ParameterChoice]], result: Optional[List[ParameterChoice]]
+) -> None:
+ """It should ensure that if applicable, the value in a choice is coerced into a float."""
+ assert result == subject.ensure_float_choices(choices)
+
+
+@pytest.mark.parametrize(
+ ["param_type", "result"],
+ [(int, "int"), (float, "float"), (str, "str")],
+)
+def test_convert_type_string_for_enum(param_type: type, result: str) -> None:
+ """It should convert the type into a string for the EnumParameter model."""
+ assert result == subject.convert_type_string_for_enum(param_type)
+
+
+def test_convert_type_string_for_enum_raises() -> None:
+ """It should raise if given a bool to convert to an enum type string."""
+ with pytest.raises(ParameterValueError):
+ subject.convert_type_string_for_enum(bool)
+
+
+@pytest.mark.parametrize(["param_type", "result"], [(int, "int"), (float, "float")])
+def test_convert_type_string_for_num_param(param_type: type, result: str) -> None:
+ """It should convert the type into a string for the NumberParameter model."""
+ assert result == subject.convert_type_string_for_num_param(param_type)
+
+
+@pytest.mark.parametrize(
+ "param_type",
+ [str, bool],
+)
+def test_convert_type_string_for_num_param_raises(param_type: type) -> None:
+ """It should raise if given a bool or str to convert to a number type string."""
+ with pytest.raises(ParameterValueError):
+ subject.convert_type_string_for_num_param(param_type)
+
+
+@pytest.mark.parametrize(
+ ["default", "minimum", "maximum", "choices", "parameter_type", "error_text"],
+ [
+ (123, None, None, None, int, "provide either"),
+ (
+ 123,
+ 1,
+ None,
+ [{"display_name": "abc", "value": 123}],
+ int,
+ "maximum values cannot",
+ ),
+ (
+ 123,
+ None,
+ 100,
+ [{"display_name": "abc", "value": 123}],
+ int,
+ "maximum values cannot",
+ ),
+ (123, None, None, [{"display_name": "abc"}], int, "dictionary with keys"),
+ (123, None, None, [{"value": 123}], int, "dictionary with keys"),
+ (
+ 123,
+ None,
+ None,
+ [{"display_name": "abc", "value": "123"}],
+ int,
+ "must be of type",
+ ),
+ (123, 1, None, None, int, "maximum must also"),
+ (123, None, 100, None, int, "minimum must also"),
+ (123, 100, 1, None, int, "Maximum must be greater"),
+ (123, 1.1, 100, None, int, "Minimum is type"),
+ (123, 1, 100.5, None, int, "Maximum is type"),
+ (123.0, "1.0", 100.0, None, float, "Minimum is type"),
+ ("blah", 1, 100, None, str, "Only parameters of type float or int"),
+ ],
+)
+def test_validate_options_raise_definition_error(
+ default: AllowedTypes,
+ minimum: Optional[AllowedTypes],
+ maximum: Optional[AllowedTypes],
+ choices: Optional[List[ParameterChoice]],
+ parameter_type: type,
+ error_text: str,
+) -> None:
+ """It should raise if the parameter definition constraints are not valid."""
+ with pytest.raises(ParameterDefinitionError, match=error_text):
+ subject.validate_options(default, minimum, maximum, choices, parameter_type)
diff --git a/api/tests/opentrons/protocols/test_parse.py b/api/tests/opentrons/protocols/test_parse.py
index cc86621601a..11a39507238 100644
--- a/api/tests/opentrons/protocols/test_parse.py
+++ b/api/tests/opentrons/protocols/test_parse.py
@@ -1,6 +1,6 @@
import json
from textwrap import dedent
-from typing import Any, Callable, Optional, Union
+from typing import Any, Callable, Optional, Union, Literal
import pytest
from opentrons_shared_data.robot.dev_types import RobotType
@@ -407,7 +407,7 @@ def run(ctx): pass
@pytest.mark.parametrize("filename", ["protocol.py", None])
def test_parse_python_details(
protocol_source: str,
- protocol_text_kind: str,
+ protocol_text_kind: Literal["str", "bytes"],
filename: Optional[str],
expected_api_level: APIVersion,
expected_robot_type: RobotType,
@@ -423,8 +423,11 @@ def test_parse_python_details(
parsed = parse(text, filename)
assert isinstance(parsed, PythonProtocol)
- assert parsed.text == protocol_source
- assert isinstance(parsed.text, str)
+ assert parsed.text == text
+ if protocol_text_kind == "str":
+ assert isinstance(parsed.text, str)
+ else:
+ assert isinstance(parsed.text, bytes)
assert parsed.filename == filename
assert parsed.contents.co_filename == (
@@ -454,13 +457,13 @@ def test_parse_json_details(
get_json_protocol_fixture: Callable[..., Any],
fixture_version: str,
fixture_name: str,
- protocol_text_kind: str,
+ protocol_text_kind: Literal["str", "bytes"],
filename: str,
) -> None:
protocol = get_json_protocol_fixture(
fixture_version=fixture_version, fixture_name=fixture_name, decode=False
)
- if protocol_text_kind == "text":
+ if protocol_text_kind == "str":
protocol_text: Union[bytes, str] = protocol
else:
protocol_text = protocol.encode("utf-8")
diff --git a/api/tests/opentrons/system/test_wifi.py b/api/tests/opentrons/system/test_wifi.py
index b6b3a994d68..da807cb2bdb 100644
--- a/api/tests/opentrons/system/test_wifi.py
+++ b/api/tests/opentrons/system/test_wifi.py
@@ -190,16 +190,16 @@ async def test_key_lifecycle(wifi_keys_tempdir):
f.write(str(random.getrandbits(2048)))
# TODO(mc, 2021-09-12): use pathlib
- with open(path, "rb") as f: # type: ignore[assignment]
- add_response = wifi.add_key(fn, f.read()) # type: ignore[arg-type]
+ with open(path, "rb") as f:
+ add_response = wifi.add_key(fn, f.read())
assert add_response.created is True
assert add_response.key.file == fn
results[fn] = add_response
# We should not be able to upload a duplicate
# TODO(mc, 2021-09-12): use pathlib
- with open(os.path.join(source_td, "test1.pem"), "rb") as f: # type: ignore[assignment]
- add_response = wifi.add_key("test1.pem", f.read()) # type: ignore[arg-type]
+ with open(os.path.join(source_td, "test1.pem"), "rb") as f:
+ add_response = wifi.add_key("test1.pem", f.read())
assert add_response.created is False
# We should be able to see them all
diff --git a/api/tests/opentrons/test_execute.py b/api/tests/opentrons/test_execute.py
index 71dd37a0bea..77563083337 100644
--- a/api/tests/opentrons/test_execute.py
+++ b/api/tests/opentrons/test_execute.py
@@ -9,6 +9,7 @@
from typing import TYPE_CHECKING, Any, Callable, Generator, List, TextIO, cast
import pytest
+from _pytest.fixtures import SubRequest
from opentrons_shared_data import get_shared_data_root, load_shared_data
from opentrons_shared_data.pipette.dev_types import PipetteModel
@@ -31,13 +32,13 @@
@pytest.fixture(params=[APIVersion(2, 0), ENGINE_CORE_API_VERSION])
-def api_version(request: pytest.FixtureRequest) -> APIVersion:
+def api_version(request: SubRequest) -> APIVersion:
"""Return an API version to test with.
Newer API versions execute through Protocol Engine, and older API versions don't.
The two codepaths are very different, so we need to test them both.
"""
- return request.param # type: ignore[attr-defined,no-any-return]
+ return cast(APIVersion, request.param)
@pytest.fixture
diff --git a/api/tests/opentrons/test_legacy_broker.py b/api/tests/opentrons/test_legacy_broker.py
index 2351f73e348..719fe43052d 100644
--- a/api/tests/opentrons/test_legacy_broker.py
+++ b/api/tests/opentrons/test_legacy_broker.py
@@ -2,8 +2,8 @@
from typing import List, NamedTuple, cast
-from opentrons.commands.types import CommandMessage
-from opentrons.commands.publisher import CommandPublisher, publish
+from opentrons.legacy_commands.types import CommandMessage
+from opentrons.legacy_commands.publisher import CommandPublisher, publish
def _my_command(arg1: int, arg2: str = "", arg3: str = "") -> CommandMessage:
diff --git a/api/tests/opentrons/test_ordered_set.py b/api/tests/opentrons/test_ordered_set.py
index 0bb620f92be..b5ce910f314 100644
--- a/api/tests/opentrons/test_ordered_set.py
+++ b/api/tests/opentrons/test_ordered_set.py
@@ -126,14 +126,13 @@ def test_clear() -> None:
def test_head() -> None:
"""It should return the head of the set."""
subject = OrderedSet([1, 2])
-
assert subject.head() == 1
- subject.remove(1)
+ subject.remove(1)
assert subject.head() == 2
- subject.remove(2)
- with pytest.raises(IndexError):
+ subject.remove(2)
+ with pytest.raises(IndexError, match="Set is empty"):
subject.head()
assert subject.head(default_value=42) == 42
@@ -145,3 +144,13 @@ def test_difference() -> None:
b = {1, 9}
assert (a - OrderedSet(b)) == (a - b) == OrderedSet([3, 4, 5, 2, 6, 5, 3, 5, 8, 7])
+
+
+def test_repr() -> None:
+ """It should return a meaningful repr string."""
+ subject = OrderedSet([1, 2, 3])
+ result = repr(subject)
+ assert "OrderedSet" in result
+ assert "1" in result
+ assert "2" in result
+ assert "3" in result
diff --git a/api/tests/opentrons/test_simulate.py b/api/tests/opentrons/test_simulate.py
index 598d46bba0e..6750bf850b0 100644
--- a/api/tests/opentrons/test_simulate.py
+++ b/api/tests/opentrons/test_simulate.py
@@ -8,6 +8,7 @@
from typing import TYPE_CHECKING, Callable, Generator, List, TextIO, cast
import pytest
+from _pytest.fixtures import SubRequest
from opentrons_shared_data import get_shared_data_root, load_shared_data
@@ -26,13 +27,13 @@
@pytest.fixture(params=[APIVersion(2, 0), ENGINE_CORE_API_VERSION])
-def api_version(request: pytest.FixtureRequest) -> APIVersion:
+def api_version(request: SubRequest) -> APIVersion:
"""Return an API version to test with.
Newer API versions execute through Protocol Engine, and older API versions don't.
The two codepaths are very different, so we need to test them both.
"""
- return request.param # type: ignore[attr-defined,no-any-return]
+ return cast(APIVersion, request.param)
@pytest.mark.parametrize(
@@ -89,6 +90,13 @@ def test_simulate_without_filename(protocol: Protocol, protocol_file: str) -> No
"Dropping tip into H12 of Opentrons OT-2 96 Tip Rack 1000 µL on slot 1",
],
),
+ (
+ "ot2_drop_tip.py",
+ [
+ "Picking up tip from A1 of Opentrons OT-2 96 Tip Rack 300 µL on slot 5",
+ "Dropping tip into Trash Bin on slot 12",
+ ],
+ ),
],
)
def test_simulate_function_apiv2_run_log(
diff --git a/api/tests/opentrons/util/test_async_helpers.py b/api/tests/opentrons/util/test_async_helpers.py
index 14f9e1a0436..d33293eb75e 100644
--- a/api/tests/opentrons/util/test_async_helpers.py
+++ b/api/tests/opentrons/util/test_async_helpers.py
@@ -85,7 +85,7 @@ async def __aexit__(
).result()
# The loop should be closed and unusable now that the context manager has exited.
- assert loop_in_thread.is_closed
+ assert loop_in_thread.is_closed()
with pytest.raises(RuntimeError, match="Event loop is closed"):
loop_in_thread.call_soon_threadsafe(lambda: None)
diff --git a/api/tests/opentrons/util/test_linal.py b/api/tests/opentrons/util/test_linal.py
index fa90bfe9a90..5c186d20903 100755
--- a/api/tests/opentrons/util/test_linal.py
+++ b/api/tests/opentrons/util/test_linal.py
@@ -2,6 +2,7 @@
from opentrons.util.linal import solve, add_z, apply_transform, solve_attitude
from numpy.linalg import inv
import numpy as np
+from numpy.typing import NDArray
def test_solve() -> None:
@@ -22,10 +23,10 @@ def test_solve() -> None:
X = solve(expected, actual)
- expected2 = np.array(
+ expected2: NDArray[np.double] = np.array(
[cos(theta + pi / 2) * scale + 0.5, sin(theta + pi / 2) * scale + 0.25, 1]
)
- result = np.dot(X, np.array([[0], [1], [1]])).transpose() # type: ignore[no-untyped-call]
+ result = np.dot(X, np.array([[0], [1], [1]])).transpose()
assert np.isclose(expected2, result).all()
@@ -35,9 +36,11 @@ def test_add_z() -> None:
y = 10
z = 20
- xy_array = np.array([[1, 0, x], [0, 1, y], [0, 0, 1]])
+ xy_array: NDArray[np.double] = np.array([[1, 0, x], [0, 1, y], [0, 0, 1]])
- expected = np.array([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]])
+ expected: NDArray[np.double] = np.array(
+ [[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]
+ )
result = add_z(xy_array, z)
assert (result == expected).all()
@@ -61,5 +64,5 @@ def test_apply_transform() -> None:
expected = (round(x - x_delta, 2), round(y - y_delta, 2), round(z))
- result = apply_transform(inv(transform), (1, 2, 3)) # type: ignore[no-untyped-call]
+ result = apply_transform(inv(transform), (1, 2, 3))
assert np.isclose(result, expected, atol=0.1).all()
diff --git a/api/tests/opentrons/util/test_performance_helpers.py b/api/tests/opentrons/util/test_performance_helpers.py
new file mode 100644
index 00000000000..57a42ef6a71
--- /dev/null
+++ b/api/tests/opentrons/util/test_performance_helpers.py
@@ -0,0 +1,28 @@
+"""Tests for performance_helpers."""
+
+from pathlib import Path
+from opentrons_shared_data.performance.dev_types import RobotContextState
+from opentrons.util.performance_helpers import (
+ StubbedTracker,
+ _get_robot_context_tracker,
+)
+
+
+def test_return_function_unchanged() -> None:
+ """Test that the function is returned unchanged when using StubbedTracker."""
+ tracker = StubbedTracker(Path("/path/to/storage"), True)
+
+ def func_to_track() -> None:
+ pass
+
+ assert (
+ tracker.track(RobotContextState.ANALYZING_PROTOCOL)(func_to_track)
+ is func_to_track
+ )
+
+
+def test_singleton_tracker() -> None:
+ """Test that the tracker is a singleton."""
+ tracker = _get_robot_context_tracker()
+ tracker2 = _get_robot_context_tracker()
+ assert tracker is tracker2
diff --git a/app-shell-odd/Makefile b/app-shell-odd/Makefile
index 629a6b63478..543ed2de95f 100644
--- a/app-shell-odd/Makefile
+++ b/app-shell-odd/Makefile
@@ -9,7 +9,7 @@ SHELL := bash
PATH := $(shell cd .. && yarn bin):$(PATH)
# dev server port
-PORT ?= 8090
+PORT ?= 5173
# dep directories for production build
# TODO(mc, 2018-08-07): figure out a better way to do this
@@ -24,7 +24,7 @@ ssh_opts ?= $(default_ssh_opts)
builder := yarn electron-builder \
--config electron-builder.config.js \
--publish never
-
+
electron := yarn electron . \
--devtools \
--log.level.console="debug" \
@@ -56,7 +56,7 @@ clean:
.PHONY: lib
lib: export NODE_ENV := production
lib:
- OPENTRONS_PROJECT=$(OPENTRONS_PROJECT) webpack --profile
+ OPENTRONS_PROJECT=$(OPENTRONS_PROJECT) vite build
.PHONY: deps
deps:
@@ -83,7 +83,7 @@ push-ot3: dist-ot3
.PHONY: dev
dev: export NODE_ENV := development
dev:
- webpack
+ vite build
$(electron)
.PHONY: test
diff --git a/app-shell-odd/electron-builder.config.js b/app-shell-odd/electron-builder.config.js
index 491e9ddcba7..d5cd4ac7eea 100644
--- a/app-shell-odd/electron-builder.config.js
+++ b/app-shell-odd/electron-builder.config.js
@@ -2,7 +2,7 @@
module.exports = {
appId: 'com.opentrons.odd',
- electronVersion: '21.3.1',
+ electronVersion: '27.0.0',
npmRebuild: false,
files: [
'**/*',
diff --git a/app-shell-odd/package.json b/app-shell-odd/package.json
index bf0191b8c48..e080060ca7c 100644
--- a/app-shell-odd/package.json
+++ b/app-shell-odd/package.json
@@ -29,11 +29,11 @@
]
},
"devDependencies": {
- "@opentrons/app": "link:../app",
- "@opentrons/discovery-client": "link:../discovery-client",
- "@opentrons/shared-data": "link:../shared-data"
+ "@opentrons/app": "link:../app"
},
"dependencies": {
+ "@opentrons/discovery-client": "link:../discovery-client",
+ "@opentrons/shared-data": "link:../shared-data",
"@thi.ng/paths": "1.6.5",
"@types/dateformat": "^3.0.1",
"@types/fs-extra": "9.0.13",
@@ -42,7 +42,6 @@
"@types/uuid": "^3.4.7",
"ajv": "6.12.3",
"dateformat": "3.0.3",
- "electron-debug": "3.0.1",
"electron-devtools-installer": "3.2.0",
"electron-store": "5.1.1",
"electron-updater": "4.1.2",
@@ -52,6 +51,7 @@
"get-stream": "5.1.0",
"lodash": "4.17.21",
"merge-options": "1.0.1",
+ "mqtt": "4.3.8",
"node-fetch": "2.6.7",
"node-stream-zip": "1.8.2",
"pump": "3.0.0",
diff --git a/app-shell-odd/src/__mocks__/log.ts b/app-shell-odd/src/__mocks__/log.ts
index eb498dd5963..7b3cdc8dcfe 100644
--- a/app-shell-odd/src/__mocks__/log.ts
+++ b/app-shell-odd/src/__mocks__/log.ts
@@ -1,4 +1,3 @@
// mock logger
// NOTE: importing mock to avoid copy-paste
-// eslint-disable-next-line jest/no-mocks-import
export * from '@opentrons/app/src/__mocks__/logger'
diff --git a/app-shell-odd/src/__tests__/discovery.test.ts b/app-shell-odd/src/__tests__/discovery.test.ts
index 77b2f26957d..ea7d1f0f51a 100644
--- a/app-shell-odd/src/__tests__/discovery.test.ts
+++ b/app-shell-odd/src/__tests__/discovery.test.ts
@@ -1,81 +1,84 @@
// tests for the app-shell's discovery module
import { app } from 'electron'
import Store from 'electron-store'
-import { when } from 'jest-when'
+import { vi, it, expect, describe, beforeEach, afterEach } from 'vitest'
import * as DiscoveryClient from '@opentrons/discovery-client'
-import {
- startDiscovery,
- finishDiscovery,
-} from '@opentrons/app/src/redux/discovery'
+import { startDiscovery, finishDiscovery } from '../actions'
import { registerDiscovery } from '../discovery'
import * as Cfg from '../config'
-jest.mock('electron')
-jest.mock('electron-store')
-jest.mock('@opentrons/discovery-client')
-jest.mock('../config')
-
-const createDiscoveryClient = DiscoveryClient.createDiscoveryClient as jest.MockedFunction<
- typeof DiscoveryClient.createDiscoveryClient
->
-
-const getFullConfig = Cfg.getFullConfig as jest.MockedFunction<
- typeof Cfg.getFullConfig
->
-
-const getOverrides = Cfg.getOverrides as jest.MockedFunction<
- typeof Cfg.getOverrides
->
-
-const handleConfigChange = Cfg.handleConfigChange as jest.MockedFunction<
- typeof Cfg.handleConfigChange
->
-
-const appOnce = app.once as jest.MockedFunction
-
-const MockStore = Store as jest.MockedClass
+vi.mock('electron')
+vi.mock('electron-store')
+vi.mock('../usb')
+vi.mock('@opentrons/discovery-client')
+vi.mock('../config')
+vi.mock('../system-info')
+vi.mock('../log', () => {
+ return {
+ createLogger: () => {
+ return { debug: () => null }
+ },
+ }
+})
+let mockGet = vi.fn(property => {
+ return []
+})
+let mockOnDidChange = vi.fn()
+let mockDelete = vi.fn()
+let mockSet = vi.fn()
describe('app-shell/discovery', () => {
- const dispatch = jest.fn()
+ const dispatch = vi.fn()
const mockClient = {
- start: jest.fn(),
- stop: jest.fn(),
- getRobots: jest.fn(),
- removeRobot: jest.fn(),
+ start: vi.fn(),
+ stop: vi.fn(),
+ getRobots: vi.fn(),
+ removeRobot: vi.fn(),
}
const emitListChange = (): void => {
- const lastCall =
- createDiscoveryClient.mock.calls[
- createDiscoveryClient.mock.calls.length - 1
- ]
+ const lastCall = vi.mocked(DiscoveryClient.createDiscoveryClient).mock
+ .calls[
+ vi.mocked(DiscoveryClient.createDiscoveryClient).mock.calls.length - 1
+ ]
const { onListChange } = lastCall[0]
onListChange([])
}
beforeEach(() => {
- getFullConfig.mockReturnValue(({
+ mockGet = vi.fn(property => {
+ return []
+ })
+ mockDelete = vi.fn()
+ mockOnDidChange = vi.fn()
+ mockSet = vi.fn()
+ vi.mocked(Store).mockImplementation(() => {
+ return {
+ get: mockGet,
+ set: mockSet,
+ delete: mockDelete,
+ onDidAnyChange: mockOnDidChange,
+ } as any
+ })
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: { disableCache: false, candidates: [] },
} as unknown) as Cfg.Config)
- getOverrides.mockReturnValue({})
- createDiscoveryClient.mockReturnValue(mockClient)
-
- when(MockStore.prototype.get).calledWith('robots', []).mockReturnValue([])
- when(MockStore.prototype.get)
- .calledWith('services', null)
- .mockReturnValue(null)
+ vi.mocked(Cfg.getOverrides).mockReturnValue({})
+ vi.mocked(DiscoveryClient.createDiscoveryClient).mockReturnValue(mockClient)
})
afterEach(() => {
- jest.resetAllMocks()
+ vi.resetAllMocks()
})
it('registerDiscovery creates a DiscoveryClient', () => {
registerDiscovery(dispatch)
- expect(createDiscoveryClient).toHaveBeenCalledWith(
+ expect(
+ vi.mocked(DiscoveryClient.createDiscoveryClient)
+ ).toHaveBeenCalledWith(
expect.objectContaining({
onListChange: expect.any(Function),
})
@@ -95,14 +98,14 @@ describe('app-shell/discovery', () => {
})
it('calls client.stop when electron app emits "will-quit"', () => {
- expect(appOnce).toHaveBeenCalledTimes(0)
+ expect(vi.mocked(app.once)).toHaveBeenCalledTimes(0)
registerDiscovery(dispatch)
expect(mockClient.stop).toHaveBeenCalledTimes(0)
- expect(appOnce).toHaveBeenCalledTimes(1)
+ expect(vi.mocked(app.once)).toHaveBeenCalledTimes(1)
- const [event, handler] = appOnce.mock.calls[0]
+ const [event, handler] = vi.mocked(app.once).mock.calls[0]
expect(event).toEqual('will-quit')
// trigger event handler
@@ -168,7 +171,7 @@ describe('app-shell/discovery', () => {
mockClient.getRobots.mockReturnValue([{ name: 'foo' }, { name: 'bar' }])
emitListChange()
- expect(MockStore.prototype.set).toHaveBeenLastCalledWith('robots', [
+ expect(vi.mocked(mockSet)).toHaveBeenLastCalledWith('robots', [
{ name: 'foo' },
{ name: 'bar' },
])
@@ -177,9 +180,9 @@ describe('app-shell/discovery', () => {
it('loads robots from cache on client initialization', () => {
const mockRobot = { name: 'foo' }
- MockStore.prototype.get.mockImplementation(key => {
+ vi.mocked(mockGet).mockImplementation((key: string) => {
if (key === 'robots') return [mockRobot]
- return null
+ return null as any
})
registerDiscovery(dispatch)
@@ -263,13 +266,13 @@ describe('app-shell/discovery', () => {
},
]
- MockStore.prototype.get.mockImplementation(key => {
+ vi.mocked(mockGet).mockImplementation((key: string) => {
if (key === 'services') return services
- return null
+ return null as any
})
registerDiscovery(dispatch)
- expect(MockStore.prototype.delete).toHaveBeenCalledWith('services')
+ expect(mockDelete).toHaveBeenCalledWith('services')
expect(mockClient.start).toHaveBeenCalledWith(
expect.objectContaining({
initialRobots: [
@@ -339,7 +342,7 @@ describe('app-shell/discovery', () => {
it('does not update services from store when caching disabled', () => {
// cache has been disabled
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: {
candidates: [],
disableCache: true,
@@ -347,9 +350,9 @@ describe('app-shell/discovery', () => {
} as unknown) as Cfg.Config)
// discovery.json contains 1 entry
- MockStore.prototype.get.mockImplementation(key => {
+ mockGet.mockImplementation((key: string) => {
if (key === 'robots') return [{ name: 'foo' }]
- return null
+ return null as any
})
registerDiscovery(dispatch)
@@ -364,7 +367,7 @@ describe('app-shell/discovery', () => {
it('should clear cache and suspend caching when caching becomes disabled', () => {
// Cache enabled initially
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: {
candidates: [],
disableCache: false,
@@ -372,33 +375,33 @@ describe('app-shell/discovery', () => {
} as unknown) as Cfg.Config)
// discovery.json contains 1 entry
- MockStore.prototype.get.mockImplementation(key => {
+ mockGet.mockImplementation((key: string) => {
if (key === 'robots') return [{ name: 'foo' }]
- return null
+ return null as any
})
registerDiscovery(dispatch)
// the 'discovery.disableCache' change handler
- const changeHandler = handleConfigChange.mock.calls[1][1]
+ const changeHandler = vi.mocked(Cfg.handleConfigChange).mock.calls[1][1]
const disableCache = true
changeHandler(disableCache, false)
- expect(MockStore.prototype.set).toHaveBeenCalledWith('robots', [])
+ expect(mockSet).toHaveBeenCalledWith('robots', [])
// new services discovered
- MockStore.prototype.set.mockClear()
+ mockSet.mockClear()
mockClient.getRobots.mockReturnValue([{ name: 'foo' }, { name: 'bar' }])
emitListChange()
// but discovery.json should not update
- expect(MockStore.prototype.set).toHaveBeenCalledTimes(0)
+ expect(mockSet).toHaveBeenCalledTimes(0)
})
})
describe('manual addresses', () => {
it('loads candidates from config on client initialization', () => {
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: { cacheDisabled: false, candidates: ['1.2.3.4'] },
} as unknown) as Cfg.Config)
@@ -415,7 +418,7 @@ describe('app-shell/discovery', () => {
// ensures config override works with only one candidate specified
it('candidates in config can be single string value', () => {
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: { cacheDisabled: false, candidates: '1.2.3.4' },
} as unknown) as Cfg.Config)
diff --git a/app-shell-odd/src/__tests__/http.test.ts b/app-shell-odd/src/__tests__/http.test.ts
index 3016a66b6f9..7b2c72578c0 100644
--- a/app-shell-odd/src/__tests__/http.test.ts
+++ b/app-shell-odd/src/__tests__/http.test.ts
@@ -1,19 +1,18 @@
import fetch from 'node-fetch'
import isError from 'lodash/isError'
+import { describe, it, vi, expect, beforeEach } from 'vitest'
-import { HTTP_API_VERSION } from '@opentrons/app/src/redux/robot-api/constants'
+import { HTTP_API_VERSION } from '../constants'
import * as Http from '../http'
import type { Request, Response } from 'node-fetch'
-jest.mock('../config')
-jest.mock('node-fetch')
-
-const mockFetch = fetch as jest.MockedFunction
+vi.mock('../config')
+vi.mock('node-fetch')
describe('app-shell main http module', () => {
beforeEach(() => {
- jest.clearAllMocks()
+ vi.clearAllMocks()
})
const SUCCESS_SPECS = [
@@ -84,12 +83,12 @@ describe('app-shell main http module', () => {
const { name, method, request, requestOptions, response, expected } = spec
it(`it should handle when ${name}`, () => {
- mockFetch.mockResolvedValueOnce((response as unknown) as Response)
+ vi.mocked(fetch).mockResolvedValueOnce((response as unknown) as Response)
// @ts-expect-error(mc, 2021-02-17): reqwrite as integration tests and
// avoid mocking node-fetch
return method((request as unknown) as Request).then((result: string) => {
- expect(mockFetch).toHaveBeenCalledWith(request, requestOptions)
+ expect(vi.mocked(fetch)).toHaveBeenCalledWith(request, requestOptions)
expect(result).toEqual(expected)
})
})
@@ -100,9 +99,11 @@ describe('app-shell main http module', () => {
it(`it should handle when ${name}`, () => {
if (isError(response)) {
- mockFetch.mockRejectedValueOnce(response)
+ vi.mocked(fetch).mockRejectedValueOnce(response)
} else {
- mockFetch.mockResolvedValueOnce((response as unknown) as Response)
+ vi.mocked(fetch).mockResolvedValueOnce(
+ (response as unknown) as Response
+ )
}
return expect(method((request as unknown) as Request)).rejects.toThrow(
diff --git a/app-shell-odd/src/__tests__/update.test.ts b/app-shell-odd/src/__tests__/update.test.ts
index ffa8f3e6742..26adb67684b 100644
--- a/app-shell-odd/src/__tests__/update.test.ts
+++ b/app-shell-odd/src/__tests__/update.test.ts
@@ -1,50 +1,47 @@
// app-shell self-update tests
-import { when, resetAllWhenMocks } from 'jest-when'
+import { when } from 'vitest-when'
+import { describe, it, vi, beforeEach, afterEach, expect } from 'vitest'
import * as http from '../http'
import { registerUpdate, FLEX_MANIFEST_URL } from '../update'
import * as Cfg from '../config'
import type { Dispatch } from '../types'
-jest.unmock('electron-updater')
-jest.mock('electron-updater')
-jest.mock('../log')
-jest.mock('../config')
-jest.mock('../http')
-jest.mock('fs-extra')
-
-const getConfig = Cfg.getConfig as jest.MockedFunction
-const fetchJson = http.fetchJson as jest.MockedFunction
+vi.unmock('electron-updater')
+vi.mock('electron-updater')
+vi.mock('../log')
+vi.mock('../config')
+vi.mock('../http')
+vi.mock('fs-extra')
describe('update', () => {
let dispatch: Dispatch
let handleAction: Dispatch
beforeEach(() => {
- dispatch = jest.fn()
+ dispatch = vi.fn()
handleAction = registerUpdate(dispatch)
})
afterEach(() => {
- jest.resetAllMocks()
- resetAllWhenMocks()
+ vi.resetAllMocks()
})
it('handles shell:CHECK_UPDATE with available update', () => {
- when(getConfig)
+ when(vi.mocked(Cfg.getConfig))
// @ts-expect-error getConfig mock not recognizing correct type overload
.calledWith('update')
- .mockReturnValue({
+ .thenReturn({
channel: 'latest',
} as any)
- when(fetchJson)
+ when(vi.mocked(http.fetchJson))
.calledWith(FLEX_MANIFEST_URL)
- .mockResolvedValue({ production: { '5.0.0': {}, '6.0.0': {} } })
+ .thenResolve({ production: { '5.0.0': {}, '6.0.0': {} } })
handleAction({ type: 'shell:CHECK_UPDATE', meta: { shell: true } })
- expect(getConfig).toHaveBeenCalledWith('update')
+ expect(vi.mocked(Cfg.getConfig)).toHaveBeenCalledWith('update')
- expect(fetchJson).toHaveBeenCalledWith(FLEX_MANIFEST_URL)
+ expect(vi.mocked(http.fetchJson)).toHaveBeenCalledWith(FLEX_MANIFEST_URL)
})
})
diff --git a/app-shell-odd/src/actions.ts b/app-shell-odd/src/actions.ts
new file mode 100644
index 00000000000..d1427d8468d
--- /dev/null
+++ b/app-shell-odd/src/actions.ts
@@ -0,0 +1,447 @@
+import type {
+ AddCustomLabwareAction,
+ AddCustomLabwareFailureAction,
+ AddCustomLabwareFileAction,
+ AddNewLabwareNameAction,
+ ChangeCustomLabwareDirectoryAction,
+ CheckedLabwareFile,
+ ClearAddCustomLabwareFailureAction,
+ ClearNewLabwareNameAction,
+ CustomLabwareListAction,
+ CustomLabwareListActionSource,
+ CustomLabwareListFailureAction,
+ DeleteCustomLabwareFileAction,
+ DuplicateLabwareFile,
+ FailedLabwareFile,
+ OpenCustomLabwareDirectoryAction,
+} from '@opentrons/app/src/redux/custom-labware/types'
+import type {
+ ResetConfigValueAction,
+ UpdateConfigValueAction,
+} from '@opentrons/app/src/redux/config'
+import type {
+ AddProtocolAction,
+ AddProtocolFailureAction,
+ AnalyzeProtocolAction,
+ AnalyzeProtocolFailureAction,
+ AnalyzeProtocolSuccessAction,
+ ClearAddProtocolFailureAction,
+ FetchProtocolsAction,
+ OpenProtocolDirectoryAction,
+ ProtocolListActionSource,
+ RemoveProtocolAction,
+ StoredProtocolData,
+ StoredProtocolDir,
+ UpdateProtocolListAction,
+ UpdateProtocolListFailureAction,
+ ViewProtocolSourceFolder,
+} from '@opentrons/app/src/redux/protocol-storage'
+import {
+ ADD_CUSTOM_LABWARE,
+ ADD_CUSTOM_LABWARE_FAILURE,
+ ADD_CUSTOM_LABWARE_FILE,
+ ADD_NEW_LABWARE_NAME,
+ ADD_PROTOCOL,
+ ADD_PROTOCOL_FAILURE,
+ ANALYZE_PROTOCOL,
+ ANALYZE_PROTOCOL_FAILURE,
+ ANALYZE_PROTOCOL_SUCCESS,
+ APP_RESTART,
+ CHANGE_CUSTOM_LABWARE_DIRECTORY,
+ CLEAR_ADD_CUSTOM_LABWARE_FAILURE,
+ CLEAR_ADD_PROTOCOL_FAILURE,
+ CLEAR_NEW_LABWARE_NAME,
+ CONFIG_INITIALIZED,
+ CUSTOM_LABWARE_LIST,
+ CUSTOM_LABWARE_LIST_FAILURE,
+ DELETE_CUSTOM_LABWARE_FILE,
+ FETCH_PROTOCOLS,
+ LABWARE_DIRECTORY_CONFIG_PATH,
+ NETWORK_INTERFACES_CHANGED,
+ OPEN_CUSTOM_LABWARE_DIRECTORY,
+ OPEN_PROTOCOL_DIRECTORY,
+ POLL,
+ RELOAD_UI,
+ REMOVE_PROTOCOL,
+ RESET_VALUE,
+ SEND_LOG,
+ SYSTEM_INFO_INITIALIZED,
+ UPDATE_PROTOCOL_LIST,
+ UPDATE_PROTOCOL_LIST_FAILURE,
+ UPDATE_VALUE,
+ USB_DEVICE_ADDED,
+ USB_DEVICE_REMOVED,
+ USB_HTTP_REQUESTS_START,
+ USB_HTTP_REQUESTS_STOP,
+ VALUE_UPDATED,
+ VIEW_PROTOCOL_SOURCE_FOLDER,
+ NOTIFY_SUBSCRIBE,
+ ROBOT_MASS_STORAGE_DEVICE_ADDED,
+ ROBOT_MASS_STORAGE_DEVICE_ENUMERATED,
+ ROBOT_MASS_STORAGE_DEVICE_REMOVED,
+ UPDATE_BRIGHTNESS,
+ DISCOVERY_START,
+ DISCOVERY_FINISH,
+ SEND_READY_STATUS,
+} from './constants'
+import type {
+ InitializedAction,
+ NetworkInterface,
+ NetworkInterfacesChangedAction,
+ UsbDevice,
+ UsbDeviceAddedAction,
+ UsbDeviceRemovedAction,
+} from '@opentrons/app/src/redux/system-info/types'
+import type {
+ ConfigInitializedAction,
+ ConfigValueUpdatedAction,
+ Config,
+ StartDiscoveryAction,
+ FinishDiscoveryAction,
+ RobotSystemAction,
+} from './types'
+import type {
+ AppRestartAction,
+ NotifySubscribeAction,
+ NotifyTopic,
+ ReloadUiAction,
+ RobotMassStorageDeviceAdded,
+ RobotMassStorageDeviceEnumerated,
+ RobotMassStorageDeviceRemoved,
+ SendLogAction,
+ UpdateBrightnessAction,
+ UsbRequestsAction,
+} from '@opentrons/app/src/redux/shell/types'
+
+// config file has been initialized
+export const configInitialized = (config: Config): ConfigInitializedAction => ({
+ type: CONFIG_INITIALIZED,
+ payload: { config },
+})
+
+// config value has been updated
+export const configValueUpdated = (
+ path: string,
+ value: unknown
+): ConfigValueUpdatedAction => ({
+ type: VALUE_UPDATED,
+ payload: { path, value },
+})
+
+export const customLabwareList = (
+ payload: CheckedLabwareFile[],
+ source: CustomLabwareListActionSource = POLL
+): CustomLabwareListAction => ({
+ type: CUSTOM_LABWARE_LIST,
+ payload,
+ meta: { source },
+})
+
+export const customLabwareListFailure = (
+ message: string,
+ source: CustomLabwareListActionSource = POLL
+): CustomLabwareListFailureAction => ({
+ type: CUSTOM_LABWARE_LIST_FAILURE,
+ payload: { message },
+ meta: { source },
+})
+
+export const changeCustomLabwareDirectory = (): ChangeCustomLabwareDirectoryAction => ({
+ type: CHANGE_CUSTOM_LABWARE_DIRECTORY,
+ meta: { shell: true },
+})
+
+export const addCustomLabware = (
+ overwrite: DuplicateLabwareFile | null = null
+): AddCustomLabwareAction => ({
+ type: ADD_CUSTOM_LABWARE,
+ payload: { overwrite },
+ meta: { shell: true },
+})
+
+export const addCustomLabwareFile = (
+ filePath: string
+): AddCustomLabwareFileAction => ({
+ type: ADD_CUSTOM_LABWARE_FILE,
+ payload: { filePath },
+ meta: { shell: true },
+})
+
+export const deleteCustomLabwareFile = (
+ filePath: string
+): DeleteCustomLabwareFileAction => ({
+ type: DELETE_CUSTOM_LABWARE_FILE,
+ payload: { filePath },
+ meta: { shell: true },
+})
+
+export const addCustomLabwareFailure = (
+ labware: FailedLabwareFile | null = null,
+ message: string | null = null
+): AddCustomLabwareFailureAction => ({
+ type: ADD_CUSTOM_LABWARE_FAILURE,
+ payload: { labware, message },
+})
+
+export const clearAddCustomLabwareFailure = (): ClearAddCustomLabwareFailureAction => ({
+ type: CLEAR_ADD_CUSTOM_LABWARE_FAILURE,
+})
+
+export const addNewLabwareName = (
+ filename: string
+): AddNewLabwareNameAction => ({
+ type: ADD_NEW_LABWARE_NAME,
+ payload: { filename },
+})
+
+export const clearNewLabwareName = (): ClearNewLabwareNameAction => ({
+ type: CLEAR_NEW_LABWARE_NAME,
+})
+
+export const openCustomLabwareDirectory = (): OpenCustomLabwareDirectoryAction => ({
+ type: OPEN_CUSTOM_LABWARE_DIRECTORY,
+ meta: { shell: true },
+})
+
+// request a config value reset to default
+export const resetConfigValue = (path: string): ResetConfigValueAction => ({
+ type: RESET_VALUE,
+ payload: { path },
+ meta: { shell: true },
+})
+
+export const resetCustomLabwareDirectory = (): ResetConfigValueAction => {
+ return resetConfigValue(LABWARE_DIRECTORY_CONFIG_PATH)
+}
+
+// request a config value update
+export const updateConfigValue = (
+ path: string,
+ value: unknown
+): UpdateConfigValueAction => ({
+ type: UPDATE_VALUE,
+ payload: { path, value },
+ meta: { shell: true },
+})
+
+// action creators
+
+export const fetchProtocols = (): FetchProtocolsAction => ({
+ type: FETCH_PROTOCOLS,
+ meta: { shell: true },
+})
+
+export const updateProtocolList = (
+ payload: StoredProtocolData[],
+ source: ProtocolListActionSource = POLL
+): UpdateProtocolListAction => ({
+ type: UPDATE_PROTOCOL_LIST,
+ payload,
+ meta: { source },
+})
+
+export const updateProtocolListFailure = (
+ message: string,
+ source: ProtocolListActionSource = POLL
+): UpdateProtocolListFailureAction => ({
+ type: UPDATE_PROTOCOL_LIST_FAILURE,
+ payload: { message },
+ meta: { source },
+})
+
+export const addProtocol = (protocolFilePath: string): AddProtocolAction => ({
+ type: ADD_PROTOCOL,
+ payload: { protocolFilePath },
+ meta: { shell: true },
+})
+
+export const removeProtocol = (protocolKey: string): RemoveProtocolAction => ({
+ type: REMOVE_PROTOCOL,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const addProtocolFailure = (
+ protocol: StoredProtocolDir | null = null,
+ message: string | null = null
+): AddProtocolFailureAction => ({
+ type: ADD_PROTOCOL_FAILURE,
+ payload: { protocol, message },
+})
+
+export const clearAddProtocolFailure = (): ClearAddProtocolFailureAction => ({
+ type: CLEAR_ADD_PROTOCOL_FAILURE,
+})
+
+export const openProtocolDirectory = (): OpenProtocolDirectoryAction => ({
+ type: OPEN_PROTOCOL_DIRECTORY,
+ meta: { shell: true },
+})
+
+export const analyzeProtocol = (
+ protocolKey: string
+): AnalyzeProtocolAction => ({
+ type: ANALYZE_PROTOCOL,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const analyzeProtocolSuccess = (
+ protocolKey: string
+): AnalyzeProtocolSuccessAction => ({
+ type: ANALYZE_PROTOCOL_SUCCESS,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const analyzeProtocolFailure = (
+ protocolKey: string
+): AnalyzeProtocolFailureAction => ({
+ type: ANALYZE_PROTOCOL_FAILURE,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const viewProtocolSourceFolder = (
+ protocolKey: string
+): ViewProtocolSourceFolder => ({
+ type: VIEW_PROTOCOL_SOURCE_FOLDER,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const initialized = (
+ usbDevices: UsbDevice[],
+ networkInterfaces: NetworkInterface[]
+): InitializedAction => ({
+ type: SYSTEM_INFO_INITIALIZED,
+ payload: { usbDevices, networkInterfaces },
+ meta: { shell: true },
+})
+
+export const usbDeviceAdded = (usbDevice: UsbDevice): UsbDeviceAddedAction => ({
+ type: USB_DEVICE_ADDED,
+ payload: { usbDevice },
+ meta: { shell: true },
+})
+
+export const usbDeviceRemoved = (
+ usbDevice: UsbDevice
+): UsbDeviceRemovedAction => ({
+ type: USB_DEVICE_REMOVED,
+ payload: { usbDevice },
+ meta: { shell: true },
+})
+
+export const networkInterfacesChanged = (
+ networkInterfaces: NetworkInterface[]
+): NetworkInterfacesChangedAction => ({
+ type: NETWORK_INTERFACES_CHANGED,
+ payload: { networkInterfaces },
+})
+
+export const usbRequestsStart = (): UsbRequestsAction => ({
+ type: USB_HTTP_REQUESTS_START,
+ meta: { shell: true },
+})
+
+export const usbRequestsStop = (): UsbRequestsAction => ({
+ type: USB_HTTP_REQUESTS_STOP,
+ meta: { shell: true },
+})
+
+export const appRestart = (message: string): AppRestartAction => ({
+ type: APP_RESTART,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const reloadUi = (message: string): ReloadUiAction => ({
+ type: RELOAD_UI,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const sendLog = (message: string): SendLogAction => ({
+ type: SEND_LOG,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const updateBrightness = (message: string): UpdateBrightnessAction => ({
+ type: UPDATE_BRIGHTNESS,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const robotMassStorageDeviceRemoved = (
+ rootPath: string
+): RobotMassStorageDeviceRemoved => ({
+ type: ROBOT_MASS_STORAGE_DEVICE_REMOVED,
+ payload: {
+ rootPath,
+ },
+ meta: { shell: true },
+})
+
+export const robotMassStorageDeviceAdded = (
+ rootPath: string
+): RobotMassStorageDeviceAdded => ({
+ type: ROBOT_MASS_STORAGE_DEVICE_ADDED,
+ payload: {
+ rootPath,
+ },
+ meta: { shell: true },
+})
+
+export const robotMassStorageDeviceEnumerated = (
+ rootPath: string,
+ filePaths: string[]
+): RobotMassStorageDeviceEnumerated => ({
+ type: ROBOT_MASS_STORAGE_DEVICE_ENUMERATED,
+ payload: {
+ rootPath,
+ filePaths,
+ },
+ meta: { shell: true },
+})
+
+export const notifySubscribeAction = (
+ hostname: string,
+ topic: NotifyTopic
+): NotifySubscribeAction => ({
+ type: NOTIFY_SUBSCRIBE,
+ payload: {
+ hostname,
+ topic,
+ },
+ meta: { shell: true },
+})
+
+export function startDiscovery(
+ timeout: number | null = null
+): StartDiscoveryAction {
+ return {
+ type: DISCOVERY_START,
+ payload: { timeout },
+ meta: { shell: true },
+ }
+}
+
+export function finishDiscovery(): FinishDiscoveryAction {
+ return { type: DISCOVERY_FINISH, meta: { shell: true } }
+}
+
+export const sendReadyStatus = (status: boolean): RobotSystemAction => ({
+ type: SEND_READY_STATUS,
+ payload: { shellReady: status },
+ meta: { shell: true },
+})
diff --git a/app-shell-odd/src/config/__fixtures__/index.ts b/app-shell-odd/src/config/__fixtures__/index.ts
index 08725e1cd2d..b3ff0cbfbd7 100644
--- a/app-shell-odd/src/config/__fixtures__/index.ts
+++ b/app-shell-odd/src/config/__fixtures__/index.ts
@@ -8,13 +8,16 @@ import type {
ConfigV18,
ConfigV19,
ConfigV20,
+ ConfigV21,
} from '@opentrons/app/src/redux/config/types'
+const PKG_VERSION: string = _PKG_VERSION_
+
export const MOCK_CONFIG_V12: ConfigV12 = {
version: 12,
devtools: false,
reinstallDevtools: false,
- update: { channel: _PKG_VERSION_.includes('beta') ? 'beta' : 'latest' },
+ update: { channel: PKG_VERSION.includes('beta') ? 'beta' : 'latest' },
log: { level: { file: 'debug', console: 'info' } },
ui: {
width: 1024,
@@ -129,3 +132,8 @@ export const MOCK_CONFIG_V20: ConfigV20 = {
},
},
}
+
+export const MOCK_CONFIG_V21: ConfigV21 = {
+ ...MOCK_CONFIG_V20,
+ version: 21,
+}
diff --git a/app-shell-odd/src/config/__tests__/migrate.test.ts b/app-shell-odd/src/config/__tests__/migrate.test.ts
index b752b9437de..0dcdfbc658a 100644
--- a/app-shell-odd/src/config/__tests__/migrate.test.ts
+++ b/app-shell-odd/src/config/__tests__/migrate.test.ts
@@ -1,4 +1,5 @@
// config migration tests
+import { describe, it, expect } from 'vitest'
import {
MOCK_CONFIG_V12,
MOCK_CONFIG_V13,
@@ -9,10 +10,11 @@ import {
MOCK_CONFIG_V18,
MOCK_CONFIG_V19,
MOCK_CONFIG_V20,
+ MOCK_CONFIG_V21,
} from '../__fixtures__'
import { migrate } from '../migrate'
-const NEWEST_VERSION = 20
+const NEWEST_VERSION = 21
describe('config migration', () => {
it('should migrate version 12 to latest', () => {
@@ -20,7 +22,7 @@ describe('config migration', () => {
const result = migrate(v12Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 13 to latest', () => {
@@ -28,7 +30,7 @@ describe('config migration', () => {
const result = migrate(v13Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 14 to latest', () => {
@@ -36,7 +38,7 @@ describe('config migration', () => {
const result = migrate(v14Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 15 to latest', () => {
@@ -44,7 +46,7 @@ describe('config migration', () => {
const result = migrate(v15Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 16 to latest', () => {
@@ -52,7 +54,7 @@ describe('config migration', () => {
const result = migrate(v16Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 17 to latest', () => {
@@ -60,7 +62,7 @@ describe('config migration', () => {
const result = migrate(v17Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migration version 18 to latest', () => {
@@ -68,7 +70,7 @@ describe('config migration', () => {
const result = migrate(v18Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migration version 19 to latest', () => {
@@ -76,14 +78,21 @@ describe('config migration', () => {
const result = migrate(v19Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
- it('should keep version 20', () => {
+ it('should migration version 20 to latest', () => {
const v20Config = MOCK_CONFIG_V20
const result = migrate(v20Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(v20Config)
+ expect(result).toEqual(MOCK_CONFIG_V21)
+ })
+ it('should keep version 21', () => {
+ const v21Config = MOCK_CONFIG_V21
+ const result = migrate(v21Config)
+
+ expect(result.version).toBe(NEWEST_VERSION)
+ expect(result).toEqual(v21Config)
})
})
diff --git a/app-shell-odd/src/config/__tests__/update.test.ts b/app-shell-odd/src/config/__tests__/update.test.ts
index 136c7bc8a97..518d6db9587 100644
--- a/app-shell-odd/src/config/__tests__/update.test.ts
+++ b/app-shell-odd/src/config/__tests__/update.test.ts
@@ -1,3 +1,4 @@
+import { describe, it, expect } from 'vitest'
import * as Cfg from '@opentrons/app/src/redux/config'
import { shouldUpdate, getNextValue } from '../update'
diff --git a/app-shell-odd/src/config/index.ts b/app-shell-odd/src/config/index.ts
index ae9e650acc7..7c8d3f1ce8a 100644
--- a/app-shell-odd/src/config/index.ts
+++ b/app-shell-odd/src/config/index.ts
@@ -6,8 +6,9 @@ import forEach from 'lodash/forEach'
import mergeOptions from 'merge-options'
import yargsParser from 'yargs-parser'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
-import * as Cfg from '@opentrons/app/src/redux/config'
+import { UI_INITIALIZED } from '../constants'
+import * as Cfg from '../constants'
+import { configInitialized, configValueUpdated } from '../actions'
import systemd from '../systemd'
import { createLogger } from '../log'
import { DEFAULTS_V12, migrate } from './migrate'
@@ -65,7 +66,7 @@ const log = (): Logger => _log ?? (_log = createLogger('config'))
export function registerConfig(dispatch: Dispatch): (action: Action) => void {
return function handleIncomingAction(action: Action) {
if (action.type === UI_INITIALIZED) {
- dispatch(Cfg.configInitialized(getFullConfig()))
+ dispatch(configInitialized(getFullConfig()))
} else if (
action.type === Cfg.UPDATE_VALUE ||
action.type === Cfg.RESET_VALUE ||
@@ -103,7 +104,7 @@ export function registerConfig(dispatch: Dispatch): (action: Action) => void {
log().debug('Updating config', { path, nextValue })
store().set(path, nextValue)
- dispatch(Cfg.configValueUpdated(path, nextValue))
+ dispatch(configValueUpdated(path, nextValue))
} else {
log().debug(`config path in overrides; not updating`, { path })
}
diff --git a/app-shell-odd/src/config/migrate.ts b/app-shell-odd/src/config/migrate.ts
index 4aed0cdf1bf..9a05df79594 100644
--- a/app-shell-odd/src/config/migrate.ts
+++ b/app-shell-odd/src/config/migrate.ts
@@ -1,7 +1,6 @@
import path from 'path'
import { app } from 'electron'
import uuid from 'uuid/v4'
-import { CONFIG_VERSION_LATEST } from '@opentrons/app/src/redux/config'
import type {
Config,
@@ -14,17 +13,21 @@ import type {
ConfigV18,
ConfigV19,
ConfigV20,
+ ConfigV21,
} from '@opentrons/app/src/redux/config/types'
// format
// base config v12 defaults
// any default values for later config versions are specified in the migration
// functions for those version below
+const CONFIG_VERSION_LATEST = 21 // update this after each config version bump
+
+const PKG_VERSION: string = _PKG_VERSION_
export const DEFAULTS_V12: ConfigV12 = {
version: 12,
devtools: false,
reinstallDevtools: false,
- update: { channel: _PKG_VERSION_.includes('beta') ? 'beta' : 'latest' },
+ update: { channel: PKG_VERSION.includes('beta') ? 'beta' : 'latest' },
log: { level: { file: 'debug', console: 'info' } },
ui: {
width: 1024,
@@ -169,6 +172,21 @@ const toVersion20 = (prevConfig: ConfigV19): ConfigV20 => {
}
}
+const toVersion21 = (prevConfig: ConfigV20): ConfigV21 => {
+ return {
+ ...prevConfig,
+ version: 21 as const,
+ onDeviceDisplaySettings: {
+ ...prevConfig.onDeviceDisplaySettings,
+ unfinishedUnboxingFlowRoute:
+ prevConfig.onDeviceDisplaySettings.unfinishedUnboxingFlowRoute ===
+ '/dashboard'
+ ? null
+ : prevConfig.onDeviceDisplaySettings.unfinishedUnboxingFlowRoute,
+ },
+ }
+}
+
const MIGRATIONS: [
(prevConfig: ConfigV12) => ConfigV13,
(prevConfig: ConfigV13) => ConfigV14,
@@ -177,7 +195,8 @@ const MIGRATIONS: [
(prevConfig: ConfigV16) => ConfigV17,
(prevConfig: ConfigV17) => ConfigV18,
(prevConfig: ConfigV18) => ConfigV19,
- (prevConfig: ConfigV19) => ConfigV20
+ (prevConfig: ConfigV19) => ConfigV20,
+ (prevConfig: ConfigV20) => ConfigV21
] = [
toVersion13,
toVersion14,
@@ -187,6 +206,7 @@ const MIGRATIONS: [
toVersion18,
toVersion19,
toVersion20,
+ toVersion21,
]
export const DEFAULTS: Config = migrate(DEFAULTS_V12)
@@ -202,6 +222,7 @@ export function migrate(
| ConfigV18
| ConfigV19
| ConfigV20
+ | ConfigV21
): Config {
let result = prevConfig
// loop through the migrations, skipping any migrations that are unnecessary
diff --git a/app-shell-odd/src/config/update.ts b/app-shell-odd/src/config/update.ts
index 6340e249967..894aff585c8 100644
--- a/app-shell-odd/src/config/update.ts
+++ b/app-shell-odd/src/config/update.ts
@@ -9,7 +9,7 @@ import {
RESET_VALUE,
ADD_UNIQUE_VALUE,
SUBTRACT_VALUE,
-} from '@opentrons/app/src/redux/config'
+} from '../constants'
import { DEFAULTS } from './migrate'
diff --git a/app-shell-odd/src/constants.ts b/app-shell-odd/src/constants.ts
new file mode 100644
index 00000000000..788fdf70cd7
--- /dev/null
+++ b/app-shell-odd/src/constants.ts
@@ -0,0 +1,257 @@
+import type {
+ UI_INITIALIZED_TYPE,
+ CONFIG_INITIALIZED_TYPE,
+ CONFIG_UPDATE_VALUE_TYPE,
+ CONFIG_RESET_VALUE_TYPE,
+ CONFIG_TOGGLE_VALUE_TYPE,
+ CONFIG_ADD_UNIQUE_VALUE_TYPE,
+ CONFIG_SUBTRACT_VALUE_TYPE,
+ CONFIG_VALUE_UPDATED_TYPE,
+ POLL_TYPE,
+ INITIAL_TYPE,
+ ADD_LABWARE_TYPE,
+ DELETE_LABWARE_TYPE,
+ OVERWRITE_LABWARE_TYPE,
+ CHANGE_DIRECTORY_TYPE,
+ FETCH_CUSTOM_LABWARE_TYPE,
+ CUSTOM_LABWARE_LIST_TYPE,
+ CUSTOM_LABWARE_LIST_FAILURE_TYPE,
+ CHANGE_CUSTOM_LABWARE_DIRECTORY_TYPE,
+ ADD_CUSTOM_LABWARE_TYPE,
+ ADD_CUSTOM_LABWARE_FILE_TYPE,
+ ADD_CUSTOM_LABWARE_FAILURE_TYPE,
+ CLEAR_ADD_CUSTOM_LABWARE_FAILURE_TYPE,
+ ADD_NEW_LABWARE_NAME_TYPE,
+ CLEAR_NEW_LABWARE_NAME_TYPE,
+ OPEN_CUSTOM_LABWARE_DIRECTORY_TYPE,
+ DELETE_CUSTOM_LABWARE_FILE_TYPE,
+ INVALID_LABWARE_FILE_TYPE,
+ DUPLICATE_LABWARE_FILE_TYPE,
+ OPENTRONS_LABWARE_FILE_TYPE,
+ VALID_LABWARE_FILE_TYPE,
+ OPEN_PYTHON_DIRECTORY_TYPE,
+ CHANGE_PYTHON_PATH_OVERRIDE_TYPE,
+ FETCH_PROTOCOLS_TYPE,
+ UPDATE_PROTOCOL_LIST_TYPE,
+ UPDATE_PROTOCOL_LIST_FAILURE_TYPE,
+ ADD_PROTOCOL_TYPE,
+ REMOVE_PROTOCOL_TYPE,
+ ADD_PROTOCOL_FAILURE_TYPE,
+ CLEAR_ADD_PROTOCOL_FAILURE_TYPE,
+ OPEN_PROTOCOL_DIRECTORY_TYPE,
+ ANALYZE_PROTOCOL_TYPE,
+ ANALYZE_PROTOCOL_SUCCESS_TYPE,
+ ANALYZE_PROTOCOL_FAILURE_TYPE,
+ VIEW_PROTOCOL_SOURCE_FOLDER_TYPE,
+ PROTOCOL_ADDITION_TYPE,
+ OPENTRONS_USB_TYPE,
+ SYSTEM_INFO_INITIALIZED_TYPE,
+ USB_DEVICE_ADDED_TYPE,
+ USB_DEVICE_REMOVED_TYPE,
+ NETWORK_INTERFACES_CHANGED_TYPE,
+ U2E_DRIVER_OUTDATED_MESSAGE_TYPE,
+ U2E_DRIVER_DESCRIPTION_TYPE,
+ U2E_DRIVER_OUTDATED_CTA_TYPE,
+ DISCOVERY_START_TYPE,
+ DISCOVERY_FINISH_TYPE,
+ DISCOVERY_UPDATE_LIST_TYPE,
+ DISCOVERY_REMOVE_TYPE,
+ CLEAR_CACHE_TYPE,
+ USB_HTTP_REQUESTS_START_TYPE,
+ USB_HTTP_REQUESTS_STOP_TYPE,
+ APP_RESTART_TYPE,
+ RELOAD_UI_TYPE,
+ SEND_LOG_TYPE,
+} from './types'
+
+// these constants are all copied over from the app
+
+export const UI_INITIALIZED: UI_INITIALIZED_TYPE = 'shell:UI_INITIALIZED'
+export const CONFIG_INITIALIZED: CONFIG_INITIALIZED_TYPE = 'config:INITIALIZED'
+export const UPDATE_VALUE: CONFIG_UPDATE_VALUE_TYPE = 'config:UPDATE_VALUE'
+export const RESET_VALUE: CONFIG_RESET_VALUE_TYPE = 'config:RESET_VALUE'
+export const TOGGLE_VALUE: CONFIG_TOGGLE_VALUE_TYPE = 'config:TOGGLE_VALUE'
+export const ADD_UNIQUE_VALUE: CONFIG_ADD_UNIQUE_VALUE_TYPE =
+ 'config:ADD_UNIQUE_VALUE'
+export const SUBTRACT_VALUE: CONFIG_SUBTRACT_VALUE_TYPE =
+ 'config:SUBTRACT_VALUE'
+export const VALUE_UPDATED: CONFIG_VALUE_UPDATED_TYPE = 'config:VALUE_UPDATED'
+
+// custom labware
+
+export const FETCH_CUSTOM_LABWARE: FETCH_CUSTOM_LABWARE_TYPE =
+ 'labware:FETCH_CUSTOM_LABWARE'
+
+export const CUSTOM_LABWARE_LIST: CUSTOM_LABWARE_LIST_TYPE =
+ 'labware:CUSTOM_LABWARE_LIST'
+
+export const CUSTOM_LABWARE_LIST_FAILURE: CUSTOM_LABWARE_LIST_FAILURE_TYPE =
+ 'labware:CUSTOM_LABWARE_LIST_FAILURE'
+
+export const CHANGE_CUSTOM_LABWARE_DIRECTORY: CHANGE_CUSTOM_LABWARE_DIRECTORY_TYPE =
+ 'labware:CHANGE_CUSTOM_LABWARE_DIRECTORY'
+
+export const ADD_CUSTOM_LABWARE: ADD_CUSTOM_LABWARE_TYPE =
+ 'labware:ADD_CUSTOM_LABWARE'
+
+export const ADD_CUSTOM_LABWARE_FILE: ADD_CUSTOM_LABWARE_FILE_TYPE =
+ 'labware:ADD_CUSTOM_LABWARE_FILE'
+
+export const ADD_CUSTOM_LABWARE_FAILURE: ADD_CUSTOM_LABWARE_FAILURE_TYPE =
+ 'labware:ADD_CUSTOM_LABWARE_FAILURE'
+
+export const CLEAR_ADD_CUSTOM_LABWARE_FAILURE: CLEAR_ADD_CUSTOM_LABWARE_FAILURE_TYPE =
+ 'labware:CLEAR_ADD_CUSTOM_LABWARE_FAILURE'
+
+export const ADD_NEW_LABWARE_NAME: ADD_NEW_LABWARE_NAME_TYPE =
+ 'labware:ADD_NEW_LABWARE_NAME'
+
+export const CLEAR_NEW_LABWARE_NAME: CLEAR_NEW_LABWARE_NAME_TYPE =
+ 'labware:CLEAR_NEW_LABWARE_NAME'
+
+export const OPEN_CUSTOM_LABWARE_DIRECTORY: OPEN_CUSTOM_LABWARE_DIRECTORY_TYPE =
+ 'labware:OPEN_CUSTOM_LABWARE_DIRECTORY'
+
+export const DELETE_CUSTOM_LABWARE_FILE: DELETE_CUSTOM_LABWARE_FILE_TYPE =
+ 'labware:DELETE_CUSTOM_LABWARE_FILE'
+// action meta literals
+
+export const POLL: POLL_TYPE = 'poll'
+export const INITIAL: INITIAL_TYPE = 'initial'
+export const ADD_LABWARE: ADD_LABWARE_TYPE = 'addLabware'
+export const DELETE_LABWARE: DELETE_LABWARE_TYPE = 'deleteLabware'
+export const OVERWRITE_LABWARE: OVERWRITE_LABWARE_TYPE = 'overwriteLabware'
+export const CHANGE_DIRECTORY: CHANGE_DIRECTORY_TYPE = 'changeDirectory'
+
+// other constants
+
+export const LABWARE_DIRECTORY_CONFIG_PATH = 'labware.directory'
+
+export const INVALID_LABWARE_FILE: INVALID_LABWARE_FILE_TYPE =
+ 'INVALID_LABWARE_FILE'
+
+export const DUPLICATE_LABWARE_FILE: DUPLICATE_LABWARE_FILE_TYPE =
+ 'DUPLICATE_LABWARE_FILE'
+
+export const OPENTRONS_LABWARE_FILE: OPENTRONS_LABWARE_FILE_TYPE =
+ 'OPENTRONS_LABWARE_FILE'
+
+export const VALID_LABWARE_FILE: VALID_LABWARE_FILE_TYPE = 'VALID_LABWARE_FILE'
+
+export const OPEN_PYTHON_DIRECTORY: OPEN_PYTHON_DIRECTORY_TYPE =
+ 'protocol-analysis:OPEN_PYTHON_DIRECTORY'
+
+export const CHANGE_PYTHON_PATH_OVERRIDE: CHANGE_PYTHON_PATH_OVERRIDE_TYPE =
+ 'protocol-analysis:CHANGE_PYTHON_PATH_OVERRIDE'
+
+export const FETCH_PROTOCOLS: FETCH_PROTOCOLS_TYPE =
+ 'protocolStorage:FETCH_PROTOCOLS'
+
+export const UPDATE_PROTOCOL_LIST: UPDATE_PROTOCOL_LIST_TYPE =
+ 'protocolStorage:UPDATE_PROTOCOL_LIST'
+
+export const UPDATE_PROTOCOL_LIST_FAILURE: UPDATE_PROTOCOL_LIST_FAILURE_TYPE =
+ 'protocolStorage:UPDATE_PROTOCOL_LIST_FAILURE'
+
+export const ADD_PROTOCOL: ADD_PROTOCOL_TYPE = 'protocolStorage:ADD_PROTOCOL'
+
+export const REMOVE_PROTOCOL: REMOVE_PROTOCOL_TYPE =
+ 'protocolStorage:REMOVE_PROTOCOL'
+
+export const ADD_PROTOCOL_FAILURE: ADD_PROTOCOL_FAILURE_TYPE =
+ 'protocolStorage:ADD_PROTOCOL_FAILURE'
+
+export const CLEAR_ADD_PROTOCOL_FAILURE: CLEAR_ADD_PROTOCOL_FAILURE_TYPE =
+ 'protocolStorage:CLEAR_ADD_PROTOCOL_FAILURE'
+
+export const OPEN_PROTOCOL_DIRECTORY: OPEN_PROTOCOL_DIRECTORY_TYPE =
+ 'protocolStorage:OPEN_PROTOCOL_DIRECTORY'
+
+export const ANALYZE_PROTOCOL: ANALYZE_PROTOCOL_TYPE =
+ 'protocolStorage:ANALYZE_PROTOCOL'
+
+export const ANALYZE_PROTOCOL_SUCCESS: ANALYZE_PROTOCOL_SUCCESS_TYPE =
+ 'protocolStorage:ANALYZE_PROTOCOL_SUCCESS'
+
+export const ANALYZE_PROTOCOL_FAILURE: ANALYZE_PROTOCOL_FAILURE_TYPE =
+ 'protocolStorage:ANALYZE_PROTOCOL_FAILURE'
+
+export const VIEW_PROTOCOL_SOURCE_FOLDER: VIEW_PROTOCOL_SOURCE_FOLDER_TYPE =
+ 'protocolStorage:VIEW_PROTOCOL_SOURCE_FOLDER'
+
+export const PROTOCOL_ADDITION: PROTOCOL_ADDITION_TYPE = 'protocolAddition'
+
+export const OPENTRONS_USB: OPENTRONS_USB_TYPE = 'opentrons-usb'
+
+export const U2E_DRIVER_UPDATE_URL =
+ 'https://www.realtek.com/en/component/zoo/category/network-interface-controllers-10-100-1000m-gigabit-ethernet-usb-3-0-software'
+
+// driver statuses
+
+export const NOT_APPLICABLE: 'NOT_APPLICABLE' = 'NOT_APPLICABLE'
+export const UNKNOWN: 'UNKNOWN' = 'UNKNOWN'
+export const UP_TO_DATE: 'UP_TO_DATE' = 'UP_TO_DATE'
+export const OUTDATED: 'OUTDATED' = 'OUTDATED'
+
+// action types
+
+export const SYSTEM_INFO_INITIALIZED: SYSTEM_INFO_INITIALIZED_TYPE =
+ 'systemInfo:INITIALIZED'
+
+export const USB_DEVICE_ADDED: USB_DEVICE_ADDED_TYPE =
+ 'systemInfo:USB_DEVICE_ADDED'
+
+export const USB_DEVICE_REMOVED: USB_DEVICE_REMOVED_TYPE =
+ 'systemInfo:USB_DEVICE_REMOVED'
+
+export const NETWORK_INTERFACES_CHANGED: NETWORK_INTERFACES_CHANGED_TYPE =
+ 'systemInfo:NETWORK_INTERFACES_CHANGED'
+
+export const USB_HTTP_REQUESTS_START: USB_HTTP_REQUESTS_START_TYPE =
+ 'shell:USB_HTTP_REQUESTS_START'
+export const USB_HTTP_REQUESTS_STOP: USB_HTTP_REQUESTS_STOP_TYPE =
+ 'shell:USB_HTTP_REQUESTS_STOP'
+export const APP_RESTART: APP_RESTART_TYPE = 'shell:APP_RESTART'
+export const RELOAD_UI: RELOAD_UI_TYPE = 'shell:RELOAD_UI'
+export const SEND_LOG: SEND_LOG_TYPE = 'shell:SEND_LOG'
+
+export const UPDATE_BRIGHTNESS: 'shell:UPDATE_BRIGHTNESS' =
+ 'shell:UPDATE_BRIGHTNESS'
+export const ROBOT_MASS_STORAGE_DEVICE_ADDED: 'shell:ROBOT_MASS_STORAGE_DEVICE_ADDED' =
+ 'shell:ROBOT_MASS_STORAGE_DEVICE_ADDED'
+export const ROBOT_MASS_STORAGE_DEVICE_REMOVED: 'shell:ROBOT_MASS_STORAGE_DEVICE_REMOVED' =
+ 'shell:ROBOT_MASS_STORAGE_DEVICE_REMOVED'
+export const ROBOT_MASS_STORAGE_DEVICE_ENUMERATED: 'shell:ROBOT_MASS_STORAGE_DEVICE_ENUMERATED' =
+ 'shell:ROBOT_MASS_STORAGE_DEVICE_ENUMERATED'
+export const NOTIFY_SUBSCRIBE: 'shell:NOTIFY_SUBSCRIBE' =
+ 'shell:NOTIFY_SUBSCRIBE'
+
+// copy
+// TODO(mc, 2020-05-11): i18n
+export const U2E_DRIVER_OUTDATED_MESSAGE: U2E_DRIVER_OUTDATED_MESSAGE_TYPE =
+ 'There is an updated Realtek USB-to-Ethernet adapter driver available for your computer.'
+export const U2E_DRIVER_DESCRIPTION: U2E_DRIVER_DESCRIPTION_TYPE =
+ 'The OT-2 uses this adapter for its USB connection to the Opentrons App.'
+export const U2E_DRIVER_OUTDATED_CTA: U2E_DRIVER_OUTDATED_CTA_TYPE =
+ "Please update your computer's driver to ensure a reliable connection to your OT-2."
+
+export const DISCOVERY_START: DISCOVERY_START_TYPE = 'discovery:START'
+
+export const DISCOVERY_FINISH: DISCOVERY_FINISH_TYPE = 'discovery:FINISH'
+
+export const DISCOVERY_UPDATE_LIST: DISCOVERY_UPDATE_LIST_TYPE =
+ 'discovery:UPDATE_LIST'
+
+export const DISCOVERY_REMOVE: DISCOVERY_REMOVE_TYPE = 'discovery:REMOVE'
+
+export const CLEAR_CACHE: CLEAR_CACHE_TYPE = 'discovery:CLEAR_CACHE'
+
+export const HTTP_API_VERSION: 3 = 3
+
+export const SEND_READY_STATUS: 'shell:SEND_READY_STATUS' =
+ 'shell:SEND_READY_STATUS'
+
+export const FAILURE_STATUSES = {
+ ECONNREFUSED: 'ECONNREFUSED',
+ ECONNFAILED: 'ECONNFAILED',
+} as const
diff --git a/app-shell-odd/src/dialogs/__tests__/dialogs.test.ts b/app-shell-odd/src/dialogs/__tests__/dialogs.test.ts
index a0f4bfa0333..d3ad23a05d3 100644
--- a/app-shell-odd/src/dialogs/__tests__/dialogs.test.ts
+++ b/app-shell-odd/src/dialogs/__tests__/dialogs.test.ts
@@ -1,11 +1,8 @@
import Electron from 'electron'
-
+import { describe, it, expect, vi } from 'vitest'
import * as Dialogs from '..'
-jest.mock('electron')
-
-const mockShowOpenDialog = Electron.dialog
- .showOpenDialog as jest.MockedFunction
+vi.mock('electron')
const mockMainWindow = ({
mainWindow: true,
@@ -14,32 +11,41 @@ const mockMainWindow = ({
describe('dialog boxes', () => {
describe('showOpenDirectoryDialog', () => {
it('directory select with cancel', () => {
- mockShowOpenDialog.mockResolvedValue({ canceled: true, filePaths: [] })
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
+ canceled: true,
+ filePaths: [],
+ })
return Dialogs.showOpenDirectoryDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openDirectory', 'createDirectory'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openDirectory', 'createDirectory'],
+ }
+ )
expect(filePaths).toEqual([])
})
})
it('directory select with files', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/dir'],
})
return Dialogs.showOpenDirectoryDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openDirectory', 'createDirectory'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openDirectory', 'createDirectory'],
+ }
+ )
expect(filePaths).toEqual(['/path/to/dir'])
})
})
it('directory select with default location', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/dir'],
})
@@ -47,10 +53,13 @@ describe('dialog boxes', () => {
return Dialogs.showOpenDirectoryDialog(mockMainWindow, {
defaultPath: '/foo',
}).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openDirectory', 'createDirectory'],
- defaultPath: '/foo',
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openDirectory', 'createDirectory'],
+ defaultPath: '/foo',
+ }
+ )
expect(filePaths).toEqual(['/path/to/dir'])
})
})
@@ -58,32 +67,41 @@ describe('dialog boxes', () => {
describe('showOpenFileDialog', () => {
it('file select with cancel', () => {
- mockShowOpenDialog.mockResolvedValue({ canceled: true, filePaths: [] })
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
+ canceled: true,
+ filePaths: [],
+ })
return Dialogs.showOpenFileDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openFile'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openFile'],
+ }
+ )
expect(filePaths).toEqual([])
})
})
it('file select with files', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/file.json'],
})
return Dialogs.showOpenFileDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openFile'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openFile'],
+ }
+ )
expect(filePaths).toEqual(['/path/to/file.json'])
})
})
it('file select with filters', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/file.json'],
})
@@ -92,7 +110,9 @@ describe('dialog boxes', () => {
return Dialogs.showOpenFileDialog(mockMainWindow, options).then(
filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
+ expect(
+ vi.mocked(Electron.dialog.showOpenDialog)
+ ).toHaveBeenCalledWith(mockMainWindow, {
properties: ['openFile'],
filters: [{ name: 'JSON', extensions: ['json'] }],
})
@@ -102,7 +122,7 @@ describe('dialog boxes', () => {
})
it('file select with default location', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/file.json'],
})
@@ -110,10 +130,13 @@ describe('dialog boxes', () => {
return Dialogs.showOpenFileDialog(mockMainWindow, {
defaultPath: '/foo',
}).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openFile'],
- defaultPath: '/foo',
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openFile'],
+ defaultPath: '/foo',
+ }
+ )
expect(filePaths).toEqual(['/path/to/file.json'])
})
})
diff --git a/app-shell-odd/src/discovery.ts b/app-shell-odd/src/discovery.ts
index bbe84cc14a9..20aa74eebca 100644
--- a/app-shell-odd/src/discovery.ts
+++ b/app-shell-odd/src/discovery.ts
@@ -9,13 +9,13 @@ import {
DEFAULT_PORT,
} from '@opentrons/discovery-client'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
import {
+ UI_INITIALIZED,
DISCOVERY_START,
DISCOVERY_FINISH,
DISCOVERY_REMOVE,
CLEAR_CACHE,
-} from '@opentrons/app/src/redux/discovery/actions'
+} from './constants'
import { getFullConfig, handleConfigChange } from './config'
import { createLogger } from './log'
diff --git a/app-shell-odd/src/http.ts b/app-shell-odd/src/http.ts
index a1594465dc4..008cd80133f 100644
--- a/app-shell-odd/src/http.ts
+++ b/app-shell-odd/src/http.ts
@@ -6,7 +6,7 @@ import pump from 'pump'
import _fetch from 'node-fetch'
import FormData from 'form-data'
-import { HTTP_API_VERSION } from '@opentrons/app/src/redux/robot-api/constants'
+import { HTTP_API_VERSION } from './constants'
import type { Request, RequestInit, Response } from 'node-fetch'
diff --git a/app-shell-odd/src/main.ts b/app-shell-odd/src/main.ts
index d860bdeb444..eaea1768078 100644
--- a/app-shell-odd/src/main.ts
+++ b/app-shell-odd/src/main.ts
@@ -1,8 +1,9 @@
// electron main entry point
import { app, ipcMain } from 'electron'
+import dns from 'dns'
import fse from 'fs-extra'
import path from 'path'
-import { createUi } from './ui'
+import { createUi, waitForRobotServerAndShowMainWindow } from './ui'
import { createLogger } from './log'
import { registerDiscovery } from './discovery'
import {
@@ -22,10 +23,22 @@ import {
} from './config'
import systemd from './systemd'
import { watchForMassStorage } from './usb'
+import {
+ registerNotify,
+ establishBrokerConnection,
+ closeBrokerConnection,
+} from './notifications'
import type { BrowserWindow } from 'electron'
import type { Dispatch, Logger } from './types'
+/**
+ * node 17 introduced a change to default IP resolving to prefer IPv6 which causes localhost requests to fail
+ * setting the default to IPv4 fixes the issue
+ * https://github.com/node-fetch/node-fetch/issues/1624
+ */
+dns.setDefaultResultOrder('ipv4first')
+
systemd.sendStatus('starting app')
const config = getConfig()
const log = createLogger('main')
@@ -49,7 +62,14 @@ if (config.devtools) app.once('ready', installDevtools)
app.once('window-all-closed', () => {
log.debug('all windows closed, quitting the app')
- app.quit()
+ closeBrokerConnection()
+ .then(() => {
+ app.quit()
+ })
+ .catch(error => {
+ log.warn('Failed to properly close MQTT connections:', error)
+ app.quit()
+ })
})
function startUp(): void {
@@ -80,7 +100,7 @@ function startUp(): void {
mainWindow = createUi(dispatch)
rendererLogger = createRendererLogger()
-
+ void establishBrokerConnection()
mainWindow.once('closed', () => (mainWindow = null))
log.info('Fetching latest software version')
@@ -95,6 +115,7 @@ function startUp(): void {
registerRobotSystemUpdate(dispatch),
registerAppRestart(),
registerUpdateBrightness(),
+ registerNotify(dispatch, mainWindow),
]
ipcMain.on('dispatch', (_, action) => {
@@ -105,10 +126,18 @@ function startUp(): void {
log.silly('Global references', { mainWindow, rendererLogger })
ipcMain.once('dispatch', () => {
+ log.info('First dispatch, showing')
systemd.sendStatus('started')
systemd.ready()
const stopWatching = watchForMassStorage(dispatch)
ipcMain.once('quit', stopWatching)
+ // TODO: This is where we render the main window for the first time. See ui.ts
+ // in the createUI function for more.
+ if (!!!mainWindow) {
+ log.error('mainWindow went away before show')
+ } else {
+ waitForRobotServerAndShowMainWindow(dispatch, mainWindow)
+ }
})
}
diff --git a/app-shell-odd/src/notifications/connect.ts b/app-shell-odd/src/notifications/connect.ts
new file mode 100644
index 00000000000..67df09de466
--- /dev/null
+++ b/app-shell-odd/src/notifications/connect.ts
@@ -0,0 +1,121 @@
+import mqtt from 'mqtt'
+
+import { connectionStore } from './store'
+import {
+ sendDeserialized,
+ sendDeserializedGenericError,
+ deserializeExpectedMessages,
+} from './deserialize'
+import { unsubscribe } from './unsubscribe'
+import { notifyLog } from './notifyLog'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+
+// MQTT is somewhat particular about the clientId format and will connect erratically if an unexpected string is supplied.
+const CLIENT_ID = 'odd-' + Math.random().toString(16).slice(2, 8) // Derived from mqttjs
+const connectOptions: mqtt.IClientOptions = {
+ clientId: CLIENT_ID,
+ port: 1883,
+ keepalive: 60,
+ protocolVersion: 5,
+ reconnectPeriod: 1000,
+ connectTimeout: 30 * 1000,
+ clean: true,
+ resubscribe: true,
+}
+
+export function connectAsync(brokerURL: string): Promise {
+ const client = mqtt.connect(brokerURL, connectOptions)
+
+ return new Promise((resolve, reject) => {
+ // Listeners added to client to trigger promise resolution
+ const promiseListeners: {
+ [key: string]: (...args: any[]) => void
+ } = {
+ connect: () => {
+ removePromiseListeners()
+ return resolve(client)
+ },
+ // A connection error event will close the connection without a retry.
+ error: (error: Error | string) => {
+ removePromiseListeners()
+ const clientEndPromise = new Promise((resolve, reject) =>
+ client.end(true, {}, () => resolve(error))
+ )
+ return clientEndPromise.then(() => reject(error))
+ },
+ end: () => promiseListeners.error(`Couldn't connect to ${brokerURL}`),
+ }
+
+ function removePromiseListeners(): void {
+ Object.keys(promiseListeners).forEach(eventName => {
+ client.removeListener(eventName, promiseListeners[eventName])
+ })
+ }
+
+ Object.keys(promiseListeners).forEach(eventName => {
+ client.on(eventName, promiseListeners[eventName])
+ })
+ })
+}
+
+export function establishListeners(): void {
+ const client = connectionStore.client as mqtt.MqttClient
+ const { ip, robotName } = connectionStore
+
+ client.on(
+ 'message',
+ (topic: NotifyTopic, message: Buffer, packet: mqtt.IPublishPacket) => {
+ deserializeExpectedMessages(message.toString())
+ .then(deserializedMessage => {
+ const messageContainsUnsubFlag = 'unsubscribe' in deserializedMessage
+ if (messageContainsUnsubFlag) {
+ void unsubscribe(topic).catch((error: Error) =>
+ notifyLog.debug(error.message)
+ )
+ }
+
+ notifyLog.debug('Received notification data from main via IPC', {
+ ip,
+ topic,
+ })
+
+ sendDeserialized(topic, deserializedMessage)
+ })
+ .catch(error => notifyLog.debug(`${error.message}`))
+ }
+ )
+
+ client.on('reconnect', () => {
+ notifyLog.debug(`Attempting to reconnect to ${robotName} on ${ip}`)
+ })
+ // handles transport layer errors only
+ client.on('error', error => {
+ notifyLog.warn(`Error - ${error.name}: ${error.message}`)
+ sendDeserializedGenericError('ALL_TOPICS')
+ client.end()
+ })
+
+ client.on('end', () => {
+ notifyLog.debug(`Closed connection to ${robotName} on ${ip}`)
+ // Marking the connection as failed with a generic error status lets the connection re-establish in the future
+ // and tells the browser to fall back to polling (assuming this 'end' event isn't caused by the app closing).
+ void connectionStore.setErrorStatus()
+ })
+
+ client.on('disconnect', packet => {
+ notifyLog.warn(
+ `Disconnected from ${robotName} on ${ip} with code ${
+ packet.reasonCode ?? 'undefined'
+ }`
+ )
+ sendDeserializedGenericError('ALL_TOPICS')
+ })
+}
+
+export function closeConnectionForcefully(): Promise {
+ const { client } = connectionStore
+ return new Promise((resolve, reject) =>
+ client?.end(true, {}, () => resolve())
+ )
+}
diff --git a/app-shell-odd/src/notifications/deserialize.ts b/app-shell-odd/src/notifications/deserialize.ts
new file mode 100644
index 00000000000..01fd4bc933b
--- /dev/null
+++ b/app-shell-odd/src/notifications/deserialize.ts
@@ -0,0 +1,62 @@
+import isEqual from 'lodash/isEqual'
+
+import { connectionStore } from './store'
+
+import type {
+ NotifyBrokerResponses,
+ NotifyRefetchData,
+ NotifyResponseData,
+ NotifyTopic,
+ NotifyUnsubscribeData,
+} from '@opentrons/app/src/redux/shell/types'
+import { FAILURE_STATUSES } from '../constants'
+
+const VALID_NOTIFY_RESPONSES: [NotifyRefetchData, NotifyUnsubscribeData] = [
+ { refetch: true },
+ { unsubscribe: true },
+]
+
+export function sendDeserialized(
+ topic: NotifyTopic,
+ message: NotifyResponseData
+): void {
+ try {
+ const browserWindow = connectionStore.getBrowserWindow()
+ browserWindow?.webContents.send(
+ 'notify',
+ connectionStore.ip,
+ topic,
+ message
+ )
+ } catch {} // Prevents shell erroring during app shutdown event.
+}
+
+export function sendDeserializedGenericError(topic: NotifyTopic): void {
+ sendDeserialized(topic, FAILURE_STATUSES.ECONNFAILED)
+}
+
+export function deserializeExpectedMessages(
+ message: string
+): Promise {
+ return new Promise((resolve, reject) => {
+ let deserializedMessage: NotifyResponseData | Record
+ const error = new Error(
+ `Unexpected data received from notify broker: ${message}`
+ )
+
+ try {
+ deserializedMessage = JSON.parse(message)
+ } catch {
+ reject(error)
+ }
+
+ const isValidNotifyResponse = VALID_NOTIFY_RESPONSES.some(model =>
+ isEqual(model, deserializedMessage)
+ )
+ if (!isValidNotifyResponse) {
+ reject(error)
+ } else {
+ resolve(JSON.parse(message))
+ }
+ })
+}
diff --git a/app-shell-odd/src/notifications/index.ts b/app-shell-odd/src/notifications/index.ts
new file mode 100644
index 00000000000..cce5758de72
--- /dev/null
+++ b/app-shell-odd/src/notifications/index.ts
@@ -0,0 +1,65 @@
+import { connectionStore } from './store'
+import {
+ connectAsync,
+ establishListeners,
+ closeConnectionForcefully,
+} from './connect'
+import { subscribe } from './subscribe'
+import { notifyLog } from './notifyLog'
+
+import type { BrowserWindow } from 'electron'
+import type { Action, Dispatch } from '../types'
+
+// Manages the MQTT broker connection through a connection store. Subscriptions are handled "lazily" - a component must
+// dispatch a subscribe action before a subscription request is made to the broker. Unsubscribe requests only occur if
+// the broker sends an "unsubscribe" flag. Pending subs and unsubs are used to prevent unnecessary network and broker load.
+
+export function registerNotify(
+ dispatch: Dispatch,
+ mainWindow: BrowserWindow
+): (action: Action) => unknown {
+ // Because of the ODD's start sequence, the browser window will always be defined before relevant actions occur.
+ if (connectionStore.getBrowserWindow() == null) {
+ connectionStore.setBrowserWindow(mainWindow)
+ }
+
+ return function handleAction(action: Action) {
+ switch (action.type) {
+ case 'shell:NOTIFY_SUBSCRIBE':
+ return subscribe(action.payload.topic)
+ }
+ }
+}
+
+export function establishBrokerConnection(): Promise {
+ const { ip, robotName } = connectionStore
+
+ return connectAsync(`mqtt://${connectionStore.ip}`)
+ .then(client => {
+ notifyLog.debug(`Successfully connected to ${robotName} on ${ip}`)
+ void connectionStore
+ .setConnected(client)
+ .then(() => establishListeners())
+ .catch((error: Error) => notifyLog.debug(error.message))
+ })
+ .catch((error: Error) => {
+ notifyLog.warn(
+ `Failed to connect to ${robotName} on ${ip} - ${error.name}: ${error.message} `
+ )
+ void connectionStore.setErrorStatus()
+ })
+}
+
+export function closeBrokerConnection(): Promise {
+ return new Promise((resolve, reject) => {
+ setTimeout(() => {
+ reject(Error('Failed to close the connection within the time limit.'))
+ }, 2000)
+
+ notifyLog.debug(
+ `Stopping notify service connection for host ${connectionStore.robotName}`
+ )
+ const closeConnection = closeConnectionForcefully()
+ closeConnection.then(resolve).catch(reject)
+ })
+}
diff --git a/app-shell-odd/src/notifications/notifyLog.ts b/app-shell-odd/src/notifications/notifyLog.ts
new file mode 100644
index 00000000000..35507fa2c2a
--- /dev/null
+++ b/app-shell-odd/src/notifications/notifyLog.ts
@@ -0,0 +1,3 @@
+import { createLogger } from '../log'
+
+export const notifyLog = createLogger('notify')
diff --git a/app-shell-odd/src/notifications/store.ts b/app-shell-odd/src/notifications/store.ts
new file mode 100644
index 00000000000..9553fba3af4
--- /dev/null
+++ b/app-shell-odd/src/notifications/store.ts
@@ -0,0 +1,128 @@
+import type mqtt from 'mqtt'
+
+import { FAILURE_STATUSES } from '../constants'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+import type { BrowserWindow } from 'electron'
+
+type FailedConnStatus = typeof FAILURE_STATUSES[keyof typeof FAILURE_STATUSES]
+
+/**
+ * @description Manages the internal state of MQTT connections to various robot hosts.
+ */
+class ConnectionStore {
+ public readonly ip = '127.0.0.1'
+
+ public readonly robotName = 'LOCALHOST'
+
+ public client: mqtt.MqttClient | null = null
+
+ private readonly subscriptions: Set = new Set()
+
+ private readonly pendingSubs: Set = new Set()
+
+ private readonly pendingUnsubs: Set = new Set()
+
+ private unreachableStatus: FailedConnStatus | null = null
+
+ private browserWindow: BrowserWindow | null = null
+
+ public getBrowserWindow(): BrowserWindow | null {
+ return this.browserWindow
+ }
+
+ /**
+ * @returns {FailedConnStatus} "ECONNREFUSED" is a proxy for a port block error and is only returned once
+ * for analytics reasons. Afterward, a generic "ECONNFAILED" is returned.
+ */
+ public getFailedConnectionStatus(): FailedConnStatus | null {
+ const failureStatus = this.unreachableStatus
+ if (failureStatus === FAILURE_STATUSES.ECONNREFUSED) {
+ this.unreachableStatus = FAILURE_STATUSES.ECONNFAILED
+ }
+ return failureStatus
+ }
+
+ public setBrowserWindow(window: BrowserWindow): void {
+ this.browserWindow = window
+ }
+
+ public setConnected(client: mqtt.MqttClient): Promise {
+ return new Promise((resolve, reject) => {
+ if (this.client == null) {
+ this.client = client
+ resolve()
+ } else {
+ reject(new Error(`Connection already exists for ${this.robotName}`))
+ }
+ })
+ }
+
+ /**
+ * @description Marks the host as unreachable. Don't report ECONNREFUSED, since while this is a good enough proxy
+ * for port block events, it's not perfect, and a port block event can never actually occur on the ODD.
+ */
+ public setErrorStatus(): Promise {
+ return new Promise((resolve, reject) => {
+ this.unreachableStatus = FAILURE_STATUSES.ECONNFAILED
+ resolve()
+ })
+ }
+
+ public setSubStatus(
+ topic: NotifyTopic,
+ status: 'pending' | 'subscribed'
+ ): Promise {
+ return new Promise((resolve, reject) => {
+ if (status === 'pending') {
+ this.pendingSubs.add(topic)
+ } else {
+ this.subscriptions.add(topic)
+ this.pendingSubs.delete(topic)
+ }
+ resolve()
+ })
+ }
+
+ public setUnsubStatus(
+ topic: NotifyTopic,
+ status: 'pending' | 'unsubscribed'
+ ): Promise {
+ return new Promise((resolve, reject) => {
+ if (this.subscriptions.has(topic)) {
+ if (status === 'pending') {
+ this.pendingUnsubs.add(topic)
+ } else {
+ this.pendingUnsubs.delete(topic)
+ this.subscriptions.delete(topic)
+ }
+ }
+ resolve()
+ })
+ }
+
+ public isConnectedToBroker(): boolean {
+ return this.client?.connected ?? false
+ }
+
+ public isPendingSub(topic: NotifyTopic): boolean {
+ return this.pendingSubs.has(topic)
+ }
+
+ public isActiveSub(topic: NotifyTopic): boolean {
+ return this.subscriptions.has(topic)
+ }
+
+ public isPendingUnsub(topic: NotifyTopic): boolean {
+ return this.pendingUnsubs.has(topic)
+ }
+
+ /**
+ * @description A broker connection is terminated if it is errored or not present in the store.
+ */
+ public isConnectionTerminated(): boolean {
+ return this.unreachableStatus != null
+ }
+}
+
+export const connectionStore = new ConnectionStore()
diff --git a/app-shell-odd/src/notifications/subscribe.ts b/app-shell-odd/src/notifications/subscribe.ts
new file mode 100644
index 00000000000..6e334cb89c9
--- /dev/null
+++ b/app-shell-odd/src/notifications/subscribe.ts
@@ -0,0 +1,120 @@
+import mqtt from 'mqtt'
+
+import { connectionStore } from './store'
+import { sendDeserialized, sendDeserializedGenericError } from './deserialize'
+import { notifyLog } from './notifyLog'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+
+/**
+ * @property {number} qos: "Quality of Service", "at least once". Because we use React Query, which does not trigger
+ a render update event if duplicate data is received, we can avoid the additional overhead of guaranteeing "exactly once" delivery.
+ */
+const subscribeOptions: mqtt.IClientSubscribeOptions = {
+ qos: 1,
+}
+
+const CHECK_CONNECTION_INTERVAL = 500
+
+export function subscribe(topic: NotifyTopic): Promise {
+ if (connectionStore.isConnectionTerminated()) {
+ const errorMessage = connectionStore.getFailedConnectionStatus()
+ if (errorMessage != null) {
+ sendDeserialized(topic, errorMessage)
+ }
+ return Promise.resolve()
+ } else {
+ return waitUntilActiveOrErrored('client')
+ .then(() => {
+ const { client } = connectionStore
+ if (client == null) {
+ return Promise.reject(new Error('Expected hostData, received null.'))
+ }
+
+ if (
+ !connectionStore.isActiveSub(topic) &&
+ !connectionStore.isPendingSub(topic)
+ ) {
+ connectionStore
+ .setSubStatus(topic, 'pending')
+ .then(
+ () =>
+ new Promise(() => {
+ client.subscribe(topic, subscribeOptions, subscribeCb)
+ })
+ )
+ .catch((error: Error) => notifyLog.debug(error.message))
+ } else {
+ void waitUntilActiveOrErrored('subscription', topic).catch(
+ (error: Error) => {
+ notifyLog.debug(error.message)
+ sendDeserializedGenericError(topic)
+ }
+ )
+ }
+ })
+ .catch((error: Error) => {
+ notifyLog.debug(error.message)
+ sendDeserializedGenericError(topic)
+ })
+ }
+
+ function subscribeCb(error: Error, result: mqtt.ISubscriptionGrant[]): void {
+ const { robotName, ip } = connectionStore
+
+ if (error != null) {
+ sendDeserializedGenericError(topic)
+ notifyLog.debug(
+ `Failed to subscribe to ${robotName} on ${ip} to topic: ${topic}`
+ )
+ } else {
+ notifyLog.debug(
+ `Successfully subscribed to ${robotName} on ${ip} to topic: ${topic}`
+ )
+ connectionStore
+ .setSubStatus(topic, 'subscribed')
+ .catch((error: Error) => notifyLog.debug(error.message))
+ }
+ }
+}
+
+// Check every 500ms for 2 seconds before failing.
+function waitUntilActiveOrErrored(
+ connection: 'client' | 'subscription',
+ topic?: NotifyTopic
+): Promise {
+ return new Promise((resolve, reject) => {
+ if (connection === 'subscription') {
+ if (topic == null) {
+ reject(
+ new Error(
+ 'Must specify a topic when connection is type "subscription".'
+ )
+ )
+ }
+ }
+
+ const MAX_RETRIES = 4
+ let counter = 0
+ const intervalId = setInterval(() => {
+ const hasReceivedAck =
+ connection === 'client'
+ ? connectionStore.isConnectedToBroker()
+ : connectionStore.isActiveSub(topic as NotifyTopic)
+ if (hasReceivedAck) {
+ clearInterval(intervalId)
+ resolve()
+ }
+
+ counter++
+ if (counter === MAX_RETRIES) {
+ clearInterval(intervalId)
+ reject(
+ new Error(
+ `Maximum number of retries exceeded for ${connectionStore.robotName} on ${connectionStore.ip}.`
+ )
+ )
+ }
+ }, CHECK_CONNECTION_INTERVAL)
+ })
+}
diff --git a/app-shell-odd/src/notifications/unsubscribe.ts b/app-shell-odd/src/notifications/unsubscribe.ts
new file mode 100644
index 00000000000..da9d0935ed2
--- /dev/null
+++ b/app-shell-odd/src/notifications/unsubscribe.ts
@@ -0,0 +1,36 @@
+import { connectionStore } from './store'
+import { notifyLog } from './notifyLog'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+
+export function unsubscribe(topic: NotifyTopic): Promise {
+ return new Promise((resolve, reject) => {
+ if (!connectionStore.isPendingUnsub(topic)) {
+ connectionStore
+ .setUnsubStatus(topic, 'pending')
+ .then(() => {
+ const { client } = connectionStore
+ if (client == null) {
+ return reject(new Error('Expected hostData, received null.'))
+ }
+
+ client.unsubscribe(topic, {}, (error, result) => {
+ const { robotName, ip } = connectionStore
+ if (error != null) {
+ notifyLog.debug(
+ `Failed to unsubscribe to ${robotName} on ${ip} from topic: ${topic}`
+ )
+ } else {
+ notifyLog.debug(
+ `Successfully unsubscribed to ${robotName} on ${ip} from topic: ${topic}`
+ )
+ connectionStore
+ .setUnsubStatus(topic, 'unsubscribed')
+ .catch((error: Error) => notifyLog.debug(error.message))
+ }
+ })
+ })
+ .catch((error: Error) => notifyLog.debug(error.message))
+ }
+ })
+}
diff --git a/app-shell-odd/src/preload.ts b/app-shell-odd/src/preload.ts
index 3748885b730..590164ce665 100644
--- a/app-shell-odd/src/preload.ts
+++ b/app-shell-odd/src/preload.ts
@@ -2,5 +2,5 @@
// defines subset of Electron API that renderer process is allowed to access
// for security reasons
import { ipcRenderer } from 'electron'
-
+// @ts-expect-error can't get TS to recognize global.d.ts
global.APP_SHELL_REMOTE = { ipcRenderer }
diff --git a/app-shell-odd/src/restart.ts b/app-shell-odd/src/restart.ts
index 9bf400b1a4b..d9bbf76836e 100644
--- a/app-shell-odd/src/restart.ts
+++ b/app-shell-odd/src/restart.ts
@@ -1,4 +1,4 @@
-import { APP_RESTART } from '@opentrons/app/src/redux/shell/actions'
+import { APP_RESTART } from './constants'
import systemd from './systemd'
import { createLogger } from './log'
diff --git a/app-shell-odd/src/system-update/__tests__/release-files.test.ts b/app-shell-odd/src/system-update/__tests__/release-files.test.ts
index 8ecafec06fd..bd2a421b910 100644
--- a/app-shell-odd/src/system-update/__tests__/release-files.test.ts
+++ b/app-shell-odd/src/system-update/__tests__/release-files.test.ts
@@ -1,10 +1,13 @@
// TODO(mc, 2020-06-11): test all release-files functions
+import { vi, describe, it, expect, afterAll } from 'vitest'
import path from 'path'
import { promises as fs } from 'fs'
import fse from 'fs-extra'
import tempy from 'tempy'
import { cleanupReleaseFiles } from '../release-files'
+vi.mock('electron-store')
+vi.mock('../../log')
describe('system release files utilities', () => {
const tempDirs: string[] = []
@@ -14,8 +17,8 @@ describe('system release files utilities', () => {
return dir
}
- afterAll(() => {
- return Promise.all(tempDirs.map(d => fse.remove(d)))
+ afterAll(async () => {
+ await Promise.all(tempDirs.map(d => fse.remove(d)))
})
describe('cleanupReleaseFiles', () => {
diff --git a/app-shell-odd/src/system-update/__tests__/release-manifest.test.ts b/app-shell-odd/src/system-update/__tests__/release-manifest.test.ts
index 28b84050df1..89091d2731c 100644
--- a/app-shell-odd/src/system-update/__tests__/release-manifest.test.ts
+++ b/app-shell-odd/src/system-update/__tests__/release-manifest.test.ts
@@ -1,55 +1,42 @@
-import { when, resetAllWhenMocks } from 'jest-when'
-import fse from 'fs-extra'
+import { describe, it, vi, beforeEach, afterEach, expect } from 'vitest'
import * as Http from '../../http'
import * as Dirs from '../directories'
import { downloadAndCacheReleaseManifest } from '../release-manifest'
-jest.mock('fs-extra')
-jest.mock('../../http')
-jest.mock('../directories')
+vi.mock('../../http')
+vi.mock('../directories')
+vi.mock('../../log')
+vi.mock('electron-store')
+const fetchJson = Http.fetchJson
+const getManifestCacheDir = Dirs.getManifestCacheDir
-const fetchJson = Http.fetchJson as jest.MockedFunction
-const outputJson = fse.outputJson as jest.MockedFunction
-const readJson = fse.readJson as jest.MockedFunction
-const getManifestCacheDir = Dirs.getManifestCacheDir as jest.MockedFunction<
- typeof Dirs.getManifestCacheDir
->
const MOCK_DIR = 'mock_dir'
const MANIFEST_URL = 'http://example.com/releases.json'
-const MOCK_MANIFEST = {}
+const MOCK_MANIFEST = {} as any
describe('release manifest utilities', () => {
beforeEach(() => {
- getManifestCacheDir.mockReturnValue(MOCK_DIR)
- when(fetchJson).calledWith(MANIFEST_URL).mockResolvedValue(MOCK_MANIFEST)
- when(outputJson)
- // @ts-expect-error outputJson takes additional optional arguments which is tweaking jest-when
- .calledWith(MOCK_DIR, MOCK_MANIFEST)
- // @ts-expect-error outputJson takes additional optional arguments which is tweaking jest-when
- .mockResolvedValue()
- when(readJson)
- // @ts-expect-error readJson takes additional optional arguments which is tweaking jest-when
- .calledWith(MOCK_DIR)
- // @ts-expect-error readJson takes additional optional arguments which is tweaking jest-when
- .mockResolvedValue(MOCK_MANIFEST)
+ vi.mocked(getManifestCacheDir).mockReturnValue(MOCK_DIR)
+ vi.mocked(fetchJson).mockResolvedValue(MOCK_MANIFEST)
})
afterEach(() => {
- resetAllWhenMocks()
- jest.resetAllMocks()
+ vi.resetAllMocks()
})
- it('should download and save the manifest from a url', () => {
- return downloadAndCacheReleaseManifest(MANIFEST_URL).then(manifest => {
- expect(manifest).toBe(MOCK_MANIFEST)
- expect(outputJson).toHaveBeenCalledWith(MOCK_DIR, MOCK_MANIFEST)
- })
+ it('should download and save the manifest from a url', async () => {
+ await expect(
+ downloadAndCacheReleaseManifest(MANIFEST_URL)
+ ).resolves.toEqual(MOCK_MANIFEST)
+ expect(fetchJson).toHaveBeenCalledWith(MANIFEST_URL)
})
- it('should pull the manifest from the file if the manifest download fails', () => {
- when(fetchJson).calledWith(MANIFEST_URL).mockRejectedValue('oh no!')
- return downloadAndCacheReleaseManifest(MANIFEST_URL).then(manifest =>
- expect(manifest).toBe(MOCK_MANIFEST)
- )
+ it('should pull the manifest from the file if the manifest download fails', async () => {
+ const error = new Error('Failed to download')
+ vi.mocked(fetchJson).mockRejectedValue(error)
+ await expect(
+ downloadAndCacheReleaseManifest(MANIFEST_URL)
+ ).resolves.toEqual(MOCK_MANIFEST)
+ expect(fetchJson).toHaveBeenCalledWith(MANIFEST_URL)
})
})
diff --git a/app-shell-odd/src/system-update/index.ts b/app-shell-odd/src/system-update/index.ts
index 15f64186e0d..9b5286c212b 100644
--- a/app-shell-odd/src/system-update/index.ts
+++ b/app-shell-odd/src/system-update/index.ts
@@ -4,7 +4,7 @@ import { ensureDir } from 'fs-extra'
import { readFile } from 'fs/promises'
import StreamZip from 'node-stream-zip'
import Semver from 'semver'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
+import { UI_INITIALIZED } from '../constants'
import { createLogger } from '../log'
import {
getLatestSystemUpdateUrls,
diff --git a/app-shell-odd/src/types.ts b/app-shell-odd/src/types.ts
index 0b04485ee0f..b210a9cd399 100644
--- a/app-shell-odd/src/types.ts
+++ b/app-shell-odd/src/types.ts
@@ -4,6 +4,8 @@ import type {
Error as PlainError,
} from '@opentrons/app/src/redux/types'
import type { Logger } from '@opentrons/app/src/logger'
+
+import type { Config } from '@opentrons/app/src/redux/config/types'
export type { Action, PlainError }
export type Dispatch = (action: Action) => void
@@ -20,3 +22,116 @@ export interface Manifest {
}
}
}
+
+export type { Config }
+
+export interface Overrides {
+ [field: string]: unknown | Overrides
+}
+
+// copied types below from the app so the app shell odd does not pull in the app
+// in its bundle
+
+export type UI_INITIALIZED_TYPE = 'shell:UI_INITIALIZED'
+export type CONFIG_INITIALIZED_TYPE = 'config:INITIALIZED'
+export type CONFIG_UPDATE_VALUE_TYPE = 'config:UPDATE_VALUE'
+export type CONFIG_RESET_VALUE_TYPE = 'config:RESET_VALUE'
+export type CONFIG_TOGGLE_VALUE_TYPE = 'config:TOGGLE_VALUE'
+export type CONFIG_ADD_UNIQUE_VALUE_TYPE = 'config:ADD_UNIQUE_VALUE'
+export type CONFIG_SUBTRACT_VALUE_TYPE = 'config:SUBTRACT_VALUE'
+export type CONFIG_VALUE_UPDATED_TYPE = 'config:VALUE_UPDATED'
+
+export type POLL_TYPE = 'poll'
+export type INITIAL_TYPE = 'initial'
+export type ADD_LABWARE_TYPE = 'addLabware'
+export type DELETE_LABWARE_TYPE = 'deleteLabware'
+export type OVERWRITE_LABWARE_TYPE = 'overwriteLabware'
+export type CHANGE_DIRECTORY_TYPE = 'changeDirectory'
+
+export type FETCH_CUSTOM_LABWARE_TYPE = 'labware:FETCH_CUSTOM_LABWARE'
+export type CUSTOM_LABWARE_LIST_TYPE = 'labware:CUSTOM_LABWARE_LIST'
+export type CUSTOM_LABWARE_LIST_FAILURE_TYPE = 'labware:CUSTOM_LABWARE_LIST_FAILURE'
+export type CHANGE_CUSTOM_LABWARE_DIRECTORY_TYPE = 'labware:CHANGE_CUSTOM_LABWARE_DIRECTORY'
+export type ADD_CUSTOM_LABWARE_TYPE = 'labware:ADD_CUSTOM_LABWARE'
+export type ADD_CUSTOM_LABWARE_FILE_TYPE = 'labware:ADD_CUSTOM_LABWARE_FILE'
+export type ADD_CUSTOM_LABWARE_FAILURE_TYPE = 'labware:ADD_CUSTOM_LABWARE_FAILURE'
+export type CLEAR_ADD_CUSTOM_LABWARE_FAILURE_TYPE = 'labware:CLEAR_ADD_CUSTOM_LABWARE_FAILURE'
+export type ADD_NEW_LABWARE_NAME_TYPE = 'labware:ADD_NEW_LABWARE_NAME'
+export type CLEAR_NEW_LABWARE_NAME_TYPE = 'labware:CLEAR_NEW_LABWARE_NAME'
+export type OPEN_CUSTOM_LABWARE_DIRECTORY_TYPE = 'labware:OPEN_CUSTOM_LABWARE_DIRECTORY'
+export type DELETE_CUSTOM_LABWARE_FILE_TYPE = 'labware:DELETE_CUSTOM_LABWARE_FILE'
+export type INVALID_LABWARE_FILE_TYPE = 'INVALID_LABWARE_FILE'
+export type DUPLICATE_LABWARE_FILE_TYPE = 'DUPLICATE_LABWARE_FILE'
+export type OPENTRONS_LABWARE_FILE_TYPE = 'OPENTRONS_LABWARE_FILE'
+export type VALID_LABWARE_FILE_TYPE = 'VALID_LABWARE_FILE'
+export type OPEN_PYTHON_DIRECTORY_TYPE = 'protocol-analysis:OPEN_PYTHON_DIRECTORY'
+export type CHANGE_PYTHON_PATH_OVERRIDE_TYPE = 'protocol-analysis:CHANGE_PYTHON_PATH_OVERRIDE'
+
+export type FETCH_PROTOCOLS_TYPE = 'protocolStorage:FETCH_PROTOCOLS'
+export type UPDATE_PROTOCOL_LIST_TYPE = 'protocolStorage:UPDATE_PROTOCOL_LIST'
+export type UPDATE_PROTOCOL_LIST_FAILURE_TYPE = 'protocolStorage:UPDATE_PROTOCOL_LIST_FAILURE'
+export type ADD_PROTOCOL_TYPE = 'protocolStorage:ADD_PROTOCOL'
+export type REMOVE_PROTOCOL_TYPE = 'protocolStorage:REMOVE_PROTOCOL'
+export type ADD_PROTOCOL_FAILURE_TYPE = 'protocolStorage:ADD_PROTOCOL_FAILURE'
+export type CLEAR_ADD_PROTOCOL_FAILURE_TYPE = 'protocolStorage:CLEAR_ADD_PROTOCOL_FAILURE'
+export type OPEN_PROTOCOL_DIRECTORY_TYPE = 'protocolStorage:OPEN_PROTOCOL_DIRECTORY'
+export type ANALYZE_PROTOCOL_TYPE = 'protocolStorage:ANALYZE_PROTOCOL'
+export type ANALYZE_PROTOCOL_SUCCESS_TYPE = 'protocolStorage:ANALYZE_PROTOCOL_SUCCESS'
+export type ANALYZE_PROTOCOL_FAILURE_TYPE = 'protocolStorage:ANALYZE_PROTOCOL_FAILURE'
+export type VIEW_PROTOCOL_SOURCE_FOLDER_TYPE = 'protocolStorage:VIEW_PROTOCOL_SOURCE_FOLDER'
+
+export type PROTOCOL_ADDITION_TYPE = 'protocolAddition'
+
+export type OPENTRONS_USB_TYPE = 'opentrons-usb'
+
+export type SYSTEM_INFO_INITIALIZED_TYPE = 'systemInfo:INITIALIZED'
+
+export type USB_DEVICE_ADDED_TYPE = 'systemInfo:USB_DEVICE_ADDED'
+
+export type USB_DEVICE_REMOVED_TYPE = 'systemInfo:USB_DEVICE_REMOVED'
+
+export type NETWORK_INTERFACES_CHANGED_TYPE = 'systemInfo:NETWORK_INTERFACES_CHANGED'
+export type USB_HTTP_REQUESTS_START_TYPE = 'shell:USB_HTTP_REQUESTS_START'
+export type USB_HTTP_REQUESTS_STOP_TYPE = 'shell:USB_HTTP_REQUESTS_STOP'
+export type APP_RESTART_TYPE = 'shell:APP_RESTART'
+export type RELOAD_UI_TYPE = 'shell:RELOAD_UI'
+export type SEND_LOG_TYPE = 'shell:SEND_LOG'
+
+// copy
+// TODO(mc, 2020-05-11): i18n
+export type U2E_DRIVER_OUTDATED_MESSAGE_TYPE = 'There is an updated Realtek USB-to-Ethernet adapter driver available for your computer.'
+export type U2E_DRIVER_DESCRIPTION_TYPE = 'The OT-2 uses this adapter for its USB connection to the Opentrons App.'
+export type U2E_DRIVER_OUTDATED_CTA_TYPE = "Please update your computer's driver to ensure a reliable connection to your OT-2."
+
+export type DISCOVERY_START_TYPE = 'discovery:START'
+export type DISCOVERY_FINISH_TYPE = 'discovery:FINISH'
+export type DISCOVERY_UPDATE_LIST_TYPE = 'discovery:UPDATE_LIST'
+export type DISCOVERY_REMOVE_TYPE = 'discovery:REMOVE'
+export type CLEAR_CACHE_TYPE = 'discovery:CLEAR_CACHE'
+
+export interface ConfigInitializedAction {
+ type: CONFIG_INITIALIZED_TYPE
+ payload: { config: Config }
+}
+
+export interface ConfigValueUpdatedAction {
+ type: CONFIG_VALUE_UPDATED_TYPE
+ payload: { path: string; value: any }
+}
+
+export interface StartDiscoveryAction {
+ type: 'discovery:START'
+ payload: { timeout: number | null }
+ meta: { shell: true }
+}
+
+export interface FinishDiscoveryAction {
+ type: 'discovery:FINISH'
+ meta: { shell: true }
+}
+
+export interface RobotSystemAction {
+ type: 'shell:SEND_READY_STATUS'
+ payload: { shellReady: boolean }
+ meta: { shell: true }
+}
diff --git a/app-shell-odd/src/ui.ts b/app-shell-odd/src/ui.ts
index ec5f3e1ae0a..76e3dc6df36 100644
--- a/app-shell-odd/src/ui.ts
+++ b/app-shell-odd/src/ui.ts
@@ -1,7 +1,7 @@
// sets up the main window ui
-import { app, shell, BrowserWindow } from 'electron'
+import { app, BrowserWindow } from 'electron'
import path from 'path'
-import { sendReadyStatus } from '@opentrons/app/src/redux/shell'
+import { sendReadyStatus } from './actions'
import { getConfig } from './config'
import { createLogger } from './log'
import systemd from './systemd'
@@ -44,44 +44,43 @@ const WINDOW_OPTS = {
export function createUi(dispatch: Dispatch): BrowserWindow {
log.debug('Creating main window', { options: WINDOW_OPTS })
- const mainWindow = new BrowserWindow(WINDOW_OPTS).once(
- 'ready-to-show',
- () => {
- log.debug('Main window ready to show')
- mainWindow.show()
- process.env.NODE_ENV !== 'development' &&
- waitForRobotServerAndShowMainWIndow(dispatch)
- }
- )
+ const mainWindow = new BrowserWindow(WINDOW_OPTS)
+ // TODO: In the app, we immediately do .once('ready-to-show', () => { mainWindow.show() }). We don't do that
+ // here because in electron 27.0.0 for some reason ready-to-show isn't firing, so instead we use "the app sent
+ // something via IPC" as our signifier that the window can bw shown. This happens in main.ts.
+ // This is a worrying thing to have to do, and it would be good to stop doing it. We'll have to change this
+ // further when we upgrade past 27.
log.info(`Loading ${url}`)
// eslint-disable-next-line @typescript-eslint/no-floating-promises
mainWindow.loadURL(url, { extraHeaders: 'pragma: no-cache\n' })
- // open new windows ( {
- log.debug('Opening external link', { url })
- event.preventDefault()
- // eslint-disable-next-line @typescript-eslint/no-floating-promises
- shell.openExternal(url)
+ // never allow external links to open
+ mainWindow.webContents.setWindowOpenHandler(() => {
+ return { action: 'deny' }
})
return mainWindow
}
-export function waitForRobotServerAndShowMainWIndow(dispatch: Dispatch): void {
- setTimeout(function () {
- systemd
- .getisRobotServerReady()
- .then((isReady: boolean) => {
- dispatch(sendReadyStatus(isReady))
- if (!isReady) {
- waitForRobotServerAndShowMainWIndow(dispatch)
- }
- })
- .catch(e => {
- log.debug('Could not get status of robot server service', { e })
- waitForRobotServerAndShowMainWIndow(dispatch)
- })
- }, 1500)
+export function waitForRobotServerAndShowMainWindow(
+ dispatch: Dispatch,
+ mainWindow: BrowserWindow
+): void {
+ mainWindow.show()
+ process.env.NODE_ENV !== 'development' &&
+ setTimeout(function () {
+ systemd
+ .getisRobotServerReady()
+ .then((isReady: boolean) => {
+ dispatch(sendReadyStatus(isReady))
+ if (!isReady) {
+ waitForRobotServerAndShowMainWindow(dispatch, mainWindow)
+ }
+ })
+ .catch(e => {
+ log.debug('Could not get status of robot server service', { e })
+ waitForRobotServerAndShowMainWindow(dispatch, mainWindow)
+ })
+ }, 1500)
}
diff --git a/app-shell-odd/src/update.ts b/app-shell-odd/src/update.ts
index b59670f8c2b..d1ea2f154b3 100644
--- a/app-shell-odd/src/update.ts
+++ b/app-shell-odd/src/update.ts
@@ -1,8 +1,5 @@
import semver from 'semver'
-import {
- UI_INITIALIZED,
- UPDATE_BRIGHTNESS,
-} from '@opentrons/app/src/redux/shell/actions'
+import { UI_INITIALIZED, UPDATE_BRIGHTNESS } from './constants'
import { createLogger } from './log'
import { getConfig } from './config'
import {
@@ -17,11 +14,15 @@ import type { ReleaseSetUrls } from './system-update/types'
const log = createLogger('update')
-export const FLEX_MANIFEST_URL = _OPENTRONS_PROJECT_.includes('robot-stack')
- ? 'https://builds.opentrons.com/ot3-oe/releases.json'
- : 'https://ot3-development.builds.opentrons.com/ot3-oe/releases.json'
+const OPENTRONS_PROJECT: string = _OPENTRONS_PROJECT_
+
+export const FLEX_MANIFEST_URL =
+ OPENTRONS_PROJECT && OPENTRONS_PROJECT.includes('robot-stack')
+ ? 'https://builds.opentrons.com/ot3-oe/releases.json'
+ : 'https://ot3-development.builds.opentrons.com/ot3-oe/releases.json'
-let LATEST_OT_SYSTEM_VERSION = _PKG_VERSION_
+const PKG_VERSION = _PKG_VERSION_
+let LATEST_OT_SYSTEM_VERSION = PKG_VERSION
const channelFinder = (version: string, channel: string): boolean => {
// return the latest alpha/beta if a user subscribes to alpha/beta updates
@@ -59,7 +60,7 @@ export const updateLatestVersion = (): Promise => {
})
.find(verson => channelFinder(verson, channel))
const changed = LATEST_OT_SYSTEM_VERSION !== latestAvailableVersion
- LATEST_OT_SYSTEM_VERSION = latestAvailableVersion ?? _PKG_VERSION_
+ LATEST_OT_SYSTEM_VERSION = latestAvailableVersion ?? PKG_VERSION
if (changed) {
log.info(
`Update: latest version available from ${FLEX_MANIFEST_URL} is ${latestAvailableVersion}`
@@ -79,7 +80,7 @@ export const getLatestVersion = (): string => {
return LATEST_OT_SYSTEM_VERSION
}
-export const getCurrentVersion = (): string => _PKG_VERSION_
+export const getCurrentVersion = (): string => PKG_VERSION
export const isUpdateAvailable = (): boolean =>
getLatestVersion() !== getCurrentVersion()
diff --git a/app-shell-odd/src/usb.ts b/app-shell-odd/src/usb.ts
index 1d84abb733c..69629eff161 100644
--- a/app-shell-odd/src/usb.ts
+++ b/app-shell-odd/src/usb.ts
@@ -7,7 +7,7 @@ import {
robotMassStorageDeviceAdded,
robotMassStorageDeviceEnumerated,
robotMassStorageDeviceRemoved,
-} from '@opentrons/app/src/redux/shell/actions'
+} from './actions'
const FLEX_USB_MOUNT_DIR = '/media/'
const FLEX_USB_DEVICE_DIR = '/dev/'
const FLEX_USB_MOUNT_FILTER = /sd[a-z]+[0-9]+$/
diff --git a/app-shell-odd/typings/global.d.ts b/app-shell-odd/typings/global.d.ts
index 8513596d045..3b470870c2b 100644
--- a/app-shell-odd/typings/global.d.ts
+++ b/app-shell-odd/typings/global.d.ts
@@ -1,11 +1,4 @@
-import type { IpcRenderer } from 'electron'
-
declare global {
- const _PKG_VERSION_: string
- const _PKG_PRODUCT_NAME_: string
- const _PKG_BUGS_URL_: string
- const _OPENTRONS_PROJECT_: string
-
namespace NodeJS {
export interface Global {
APP_SHELL_REMOTE: {
@@ -14,3 +7,8 @@ declare global {
}
}
}
+
+declare const _PKG_VERSION_: string
+declare const _PKG_PRODUCT_NAME_: string
+declare const _PKG_BUGS_URL_: string
+declare const _OPENTRONS_PROJECT_: string
diff --git a/app-shell-odd/vite.config.ts b/app-shell-odd/vite.config.ts
new file mode 100644
index 00000000000..7848c92bd8d
--- /dev/null
+++ b/app-shell-odd/vite.config.ts
@@ -0,0 +1,89 @@
+import { versionForProject } from '../scripts/git-version.mjs'
+import pkg from './package.json'
+import path from 'path'
+import { defineConfig } from 'vite'
+import react from '@vitejs/plugin-react'
+import postCssImport from 'postcss-import'
+import postCssApply from 'postcss-apply'
+import postColorModFunction from 'postcss-color-mod-function'
+import postCssPresetEnv from 'postcss-preset-env'
+import lostCss from 'lost'
+import type { UserConfig } from 'vite'
+
+export default defineConfig(
+ async (): Promise => {
+ const project = process.env.OPENTRONS_PROJECT ?? 'robot-stack'
+ const version = await versionForProject(project)
+ return {
+ publicDir: false,
+ build: {
+ // Relative to the root
+ ssr: 'src/main.ts',
+ outDir: 'lib',
+ commonjsOptions: {
+ transformMixedEsModules: true,
+ esmExternals: true,
+ },
+ lib: {
+ entry: {
+ main: 'src/main.ts',
+ preload: 'src/preload.ts',
+ },
+
+ formats: ['cjs'],
+ },
+ },
+ plugins: [
+ react({
+ include: '**/*.tsx',
+ babel: {
+ // Use babel.config.js files
+ configFile: true,
+ },
+ }),
+ ],
+ optimizeDeps: {
+ esbuildOptions: {
+ target: 'CommonJs',
+ },
+ },
+ css: {
+ postcss: {
+ plugins: [
+ postCssImport({ root: 'src/' }),
+ postCssApply(),
+ postColorModFunction(),
+ postCssPresetEnv({ stage: 0 }),
+ lostCss(),
+ ],
+ },
+ },
+ define: {
+ 'process.env': process.env,
+ global: 'globalThis',
+ _PKG_VERSION_: JSON.stringify(version),
+ _PKG_PRODUCT_NAME_: JSON.stringify(pkg.productName),
+ _PKG_BUGS_URL_: JSON.stringify(pkg.bugs.url),
+ _OPENTRONS_PROJECT_: JSON.stringify(project),
+ },
+ resolve: {
+ alias: {
+ '@opentrons/components/styles': path.resolve(
+ '../components/src/index.module.css'
+ ),
+ '@opentrons/components': path.resolve('../components/src/index.ts'),
+ '@opentrons/shared-data': path.resolve('../shared-data/js/index.ts'),
+ '@opentrons/step-generation': path.resolve(
+ '../step-generation/src/index.ts'
+ ),
+ '@opentrons/discovery-client': path.resolve(
+ '../discovery-client/src/index.ts'
+ ),
+ '@opentrons/usb-bridge/node-client': path.resolve(
+ '../usb-bridge/node-client/src/index.ts'
+ ),
+ },
+ },
+ }
+ }
+)
diff --git a/app-shell-odd/webpack.config.js b/app-shell-odd/webpack.config.js
deleted file mode 100644
index c10c6569a91..00000000000
--- a/app-shell-odd/webpack.config.js
+++ /dev/null
@@ -1,44 +0,0 @@
-'use strict'
-
-const path = require('path')
-const webpackMerge = require('webpack-merge')
-const { DefinePlugin } = require('webpack')
-const { nodeBaseConfig } = require('@opentrons/webpack-config')
-const { versionForProject } = require('../scripts/git-version')
-const pkg = require('./package.json')
-
-const ENTRY_MAIN = path.join(__dirname, 'src/main.ts')
-const ENTRY_PRELOAD = path.join(__dirname, 'src/preload.ts')
-const OUTPUT_PATH = path.join(__dirname, 'lib')
-
-const project = process.env.OPENTRONS_PROJECT ?? 'robot-stack'
-
-module.exports = async () => {
- const version = await versionForProject(project)
-
- const COMMON_CONFIG = {
- output: { path: OUTPUT_PATH },
- plugins: [
- new DefinePlugin({
- _PKG_VERSION_: JSON.stringify(version),
- _PKG_PRODUCT_NAME_: JSON.stringify(pkg.productName),
- _PKG_BUGS_URL_: JSON.stringify(pkg.bugs.url),
- _OPENTRONS_PROJECT_: JSON.stringify(project),
- }),
- ],
- }
-
- return [
- // main process (runs in electron)
- webpackMerge(nodeBaseConfig, COMMON_CONFIG, {
- target: 'electron-main',
- entry: { main: ENTRY_MAIN },
- }),
-
- // preload script (runs in the browser window)
- webpackMerge(nodeBaseConfig, COMMON_CONFIG, {
- target: 'electron-preload',
- entry: { preload: ENTRY_PRELOAD },
- }),
- ]
-}
diff --git a/app-shell/Makefile b/app-shell/Makefile
index afde0f518d3..ec86ee924ff 100644
--- a/app-shell/Makefile
+++ b/app-shell/Makefile
@@ -9,7 +9,7 @@ SHELL := bash
PATH := $(shell cd .. && yarn bin):$(PATH)
# dev server port
-PORT ?= 8090
+PORT ?= 3000
# dep directories for production build
# TODO(mc, 2018-08-07): figure out a better way to do this
@@ -31,7 +31,7 @@ publish_dir := dist/publish
# make test tests=src/__tests__/http.test.ts would run only the
# specified test
tests ?= $(SRC_PATH)/src
-cov_opts ?= --coverage=true --ci=true --collectCoverageFrom='app-shell/src/**/*.(js|ts|tsx)'
+cov_opts ?= --coverage=true
test_opts ?=
# Other SSH args for robot
@@ -59,7 +59,7 @@ no_python_bundle ?=
builder := yarn electron-builder \
--config electron-builder.config.js \
- --config.electronVersion=21.3.1 \
+ --config.electronVersion=27.0.0 \
--publish never
@@ -89,7 +89,7 @@ setup:
.PHONY: clean
clean:
- shx rm -rf lib dist python
+ yarn shx rm -rf lib dist python
# artifacts
#####################################################################
@@ -97,7 +97,7 @@ clean:
.PHONY: lib
lib: export NODE_ENV := production
lib:
- webpack --profile
+ vite build
.PHONY: deps
deps:
@@ -109,6 +109,7 @@ package-deps: clean lib deps
package dist-posix dist-osx dist-linux dist-win: export NODE_ENV := production
package dist-posix dist-osx dist-linux dist-win: export BUILD_ID := $(build_id)
package dist-posix dist-osx dist-linux dist-win: export NO_PYTHON := $(if $(no_python_bundle),true,false)
+package dist-posix dist-osx dist-linux dist-win: export USE_HARD_LINKS := false
.PHONY: package
package: package-deps
@@ -182,7 +183,7 @@ dev-app-update.yml:
dev: export NODE_ENV := development
dev: export OPENTRONS_PROJECT := $(OPENTRONS_PROJECT)
dev: clean-dev-autoupdate ./dev-app-update.yml
- webpack
+ vite build
$(electron)
.PHONY: test
diff --git a/app-shell/__mocks__/usb-detection.js b/app-shell/__mocks__/usb-detection.js
deleted file mode 100644
index a982b3d9cdc..00000000000
--- a/app-shell/__mocks__/usb-detection.js
+++ /dev/null
@@ -1,14 +0,0 @@
-'use strict'
-
-const EventEmitter = require('events')
-const detector = new EventEmitter()
-
-detector.startMonitoring = jest.fn()
-detector.stopMonitoring = jest.fn()
-detector.find = jest.fn()
-
-afterEach(() => {
- detector.removeAllListeners()
-})
-
-module.exports = detector
diff --git a/app-shell/build/release-notes-internal.md b/app-shell/build/release-notes-internal.md
index 3c53342b57c..e6925397157 100644
--- a/app-shell/build/release-notes-internal.md
+++ b/app-shell/build/release-notes-internal.md
@@ -1,9 +1,53 @@
For more details about this release, please see the full [technical changelog][].
[technical change log]: https://github.com/Opentrons/opentrons/releases
+## Internal Release 1.5.0-alpha.1
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
+---
+
+## Internal Release 1.5.0-alpha.0
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
---
-# Internal Release 1.0.0
+## Internal Release 1.4.0-alpha.1
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+### Notable bug fixes
+
+App and robot update prompts should now function properly. However, updating from 1.4.0-alpha.0 to 1.4.0-alpha.1 will still present issues, as the fix is not in 1.4.0-alpha.0. After installing 1.4.0-alpha.1, switch your update channel to "latest" to receive the latest stable internal release prompt, which validates the fix.
+
+### All changes
+
+
+
+---
+
+## Internal Release 1.4.0-alpha.0
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
+---
+
+## Internal Release 1.3.0-alpha.0
+
+This internal release is from the `edge` branch to contain rapid dev on new features for 7.3.0. This release is for internal testing purposes and if used may require a factory reset of the robot to return to a stable version.
+
+
+
+---
+
+# Internal Release 1.1.0
This is 1.0.0, an internal release for the app supporting the Opentrons Flex.
@@ -11,10 +55,13 @@ This is still pretty early in the process, so some things are known not to work,
## New Stuff In This Release
-- Support for running labware position check using the calibration adapter for added accuracy (note: not an automated flow, just uses the adapter instead of a tip)
-- Support for 96-channel pipettes in protocols
-- Early provisional support for deck configuration and trash chutes in protocols
-
+- There is now UI for configuring the loaded deck fixtures such as trash chutes on your Flex.
+- Support for analyzing python protocol API 2.16 and JSON protocol V8
+- Labware position check now uses the calibration (the same one used for pipette and module calibration) instead of a tip; this should increase the accuracy of LPC.
+- Connecting a Flex to a wifi network while the app is connected to it with USB should work now
+- The app should generally be better about figuring out what kind of robot a protocol is for, and displaying the correct deck layout accordingly
+## Known Issues
+- Labware Renders are slightly askew towards the top right.
diff --git a/app-shell/build/release-notes.md b/app-shell/build/release-notes.md
index c55ecba0291..78aa19b0142 100644
--- a/app-shell/build/release-notes.md
+++ b/app-shell/build/release-notes.md
@@ -6,6 +6,123 @@ log][]. For a list of currently known issues, please see the [Opentrons issue tr
---
+## Opentrons App Changes in 7.3.0
+
+Welcome to the v7.3.0 release of the Opentrons App! This release adds support for Python protocols with runtime parameters, letting you change the behavior of a protocol each time you run it.
+
+Note: After updating, the app will prompt you to reanalyze all previously imported protocols. This is a one-time step and should not affect protocol behavior.
+
+### New Features
+
+Runtime Parameters
+
+- Available runtime parameters are shown on the protocol details screen.
+- Both the Opentrons App and touchscreen let you enter new parameter values during run setup.
+- The app highlights changed parameter values so you can confirm them before starting the run.
+- The run preview (before the run) and run log (after the run) reflect changes to steps based on your chosen parameter values.
+
+Modules in Deck Configuration
+
+- You can now specify what slots modules occupy on Flex in deck configuration.
+- When moving, Flex will avoid modules specified in deck configuration but not loaded in the protocol.
+- Deck configuration must be compatible with the protocol's requirements before you start a run.
+
+### Improved Features
+
+- Lists of robots are now sorted alphabetically.
+
+### Removals
+
+- Removed the "Use older protocol analysis method" advanced setting for OT-2. If you need this type of analysis, use `opentrons_simulate` on the command line.
+
+### Bug Fixes
+
+- All run log steps now appear in the same font size.
+- The app now properly sends custom labware definitions, along with the corresponding Python protocol, to Flex robots connected via USB.
+
+### Known Issues
+
+- Previously saved labware offset data may not be available when setting up a run via a USB connection from a Windows computer. Re-run Labware Position Check or use a Wi-Fi connection instead.
+- If you apply labware offset data for a particular type of labware, and then load a different type of labware in its place via a runtime parameter, the new labware type will have default offsets (0.0 on all axes). Re-run Labware Position Check to set offsets for the new labware.
+
+---
+
+## Opentrons App Changes in 7.2.2
+
+Welcome to the v7.2.2 release of the Opentrons App!
+
+There are no changes to the Opentrons App in v7.2.2, but it is required for updating the robot software to improve some features.
+
+---
+
+## Opentrons App Changes in 7.2.1
+
+Welcome to the v7.2.1 release of the Opentrons App!
+
+### Bug Fixes
+
+- Fixed a memory leak that could cause the app to crash.
+
+---
+
+## Opentrons App Changes in 7.2.0
+
+Welcome to the v7.2.0 release of the Opentrons App!
+
+The Linux version of the Opentrons App now requires Ubuntu 20.04 or newer.
+
+### New Features
+
+- Added a warning in case you need to manually remove tips from a pipette after power cycling the robot.
+
+### Improved Features
+
+- Commands involving the trash bin or waste chute now appear in the run preview.
+- The app will prompt you to reanalyze protocols that haven't been analyzed in such a long time that intervening changes to the app could affect their behavior.
+
+### Bug Fixes
+
+- The OT-2 now consistently applies tip length calibration. There used to be a height discrepancy between Labware Position Check and protocol runs. If you previously compensated for the inconsistent pipette height with labware offsets, re-run Labware Position Check to avoid pipette crashes.
+- The OT-2 now accurately calculates the position of the Thermocycler. If you previously compensated for the incorrect position with labware offsets, re-run Labware Position Check to avoid pipette crashes.
+
+### Known Issues
+
+- It's possible to start conflicting instrument detachment workflows when controlling one robot from multiple computers. Verify that the robot is idle before starting instrument detachment.
+- Robots may fail to reconnect after renaming them over a USB connection on Windows.
+
+---
+
+## Opentrons App Changes in 7.1.1
+
+Welcome to the v7.1.1 release of the Opentrons App!
+
+### Bug Fixes
+
+- The app properly displays Flex 1-Channel 1000 µL pipettes.
+
+---
+
+## Opentrons App Changes in 7.1.0
+
+Welcome to the v7.1.0 release of the Opentrons App! This release includes new deck and pipette functionality for Opentrons Flex, a new workflow for dropping tips after a protocol is canceled, and other improvements.
+
+### New Features
+
+- Specify the deck configuration of Flex, including the movable trash bin, waste chute, and staging area slots.
+- Resolve conflicts between the hardware a protocol requires and the current deck configuration as part of run setup.
+- Run protocols that use the Flex 96-Channel Pipette, including partial tip pickup.
+- Choose where to dispense liquid and drop tips held by a pipette when a protocol is canceled.
+
+### Improved Features
+
+- Labware Position Check on Flex uses the pipette calibration probe, instead of a tip, for greater accuracy.
+
+### Bug Fixes
+
+- Labware Position Check no longer tries to check the same labware in the same position twice, which was leading to errors.
+
+---
+
## Opentrons App Changes in 7.0.2
Welcome to the v7.0.2 release of the Opentrons App!
diff --git a/app-shell/electron-builder.config.js b/app-shell/electron-builder.config.js
index 4d265ac5e3c..aa61720338b 100644
--- a/app-shell/electron-builder.config.js
+++ b/app-shell/electron-builder.config.js
@@ -1,6 +1,5 @@
'use strict'
const path = require('path')
-const { versionForProject } = require('../scripts/git-version')
const {
OT_APP_DEPLOY_BUCKET,
@@ -25,7 +24,7 @@ const publishConfig =
module.exports = async () => ({
appId:
project === 'robot-stack' ? 'com.opentrons.app' : 'com.opentrons.appot3',
- electronVersion: '21.3.1',
+ electronVersion: '27.0.0',
npmRebuild: false,
releaseInfo: {
releaseNotesFile:
@@ -45,7 +44,9 @@ module.exports = async () => ({
},
],
extraMetadata: {
- version: await versionForProject(project),
+ version: await (
+ await import('../scripts/git-version.mjs')
+ ).versionForProject(project),
productName: project === 'robot-stack' ? 'Opentrons' : 'Opentrons-OT3',
},
extraResources: USE_PYTHON ? ['python'] : [],
diff --git a/app-shell/package.json b/app-shell/package.json
index abe24f5da46..457dc15eb55 100644
--- a/app-shell/package.json
+++ b/app-shell/package.json
@@ -29,14 +29,14 @@
]
},
"devDependencies": {
- "@opentrons/app": "link:../app",
- "@opentrons/discovery-client": "link:../discovery-client",
- "@opentrons/shared-data": "link:../shared-data",
- "@opentrons/usb-bridge/node-client": "link:../usb-bridge/node-client",
"electron-notarize": "^1.2.1",
"electron-publisher-s3": "^20.17.2"
},
"dependencies": {
+ "@opentrons/app": "link:../app",
+ "@opentrons/discovery-client": "link:../discovery-client",
+ "@opentrons/shared-data": "link:../shared-data",
+ "@opentrons/usb-bridge/node-client": "link:../usb-bridge/node-client",
"@thi.ng/paths": "1.6.5",
"@types/dateformat": "^3.0.1",
"@types/fs-extra": "9.0.13",
@@ -44,9 +44,12 @@
"@types/pump": "^1.1.0",
"@types/uuid": "^3.4.7",
"ajv": "6.12.3",
+ "axios": "^0.21.1",
"dateformat": "3.0.3",
- "electron-context-menu": "^3.5.0",
+ "electron-context-menu": "3.6.1",
"electron-debug": "3.0.1",
+ "electron-is-dev": "1.2.0",
+ "electron-localshortcut": "3.2.1",
"electron-devtools-installer": "3.2.0",
"electron-store": "5.1.1",
"electron-updater": "4.1.2",
@@ -54,18 +57,18 @@
"form-data": "2.5.0",
"fs-extra": "10.0.0",
"get-stream": "5.1.0",
+ "lodash": "4.17.21",
"merge-options": "1.0.1",
+ "mqtt": "4.3.8",
"node-fetch": "2.6.7",
"node-stream-zip": "1.8.2",
"pump": "3.0.0",
"semver": "5.5.0",
"serialport": "10.5.0",
"tempy": "1.0.1",
+ "usb": "^2.11.0",
"uuid": "3.2.1",
"winston": "3.1.0",
"yargs-parser": "13.1.2"
- },
- "optionalDependencies": {
- "usb-detection": "4.14.1"
}
}
diff --git a/app-shell/src/__fixtures__/config.ts b/app-shell/src/__fixtures__/config.ts
new file mode 100644
index 00000000000..640fa1df429
--- /dev/null
+++ b/app-shell/src/__fixtures__/config.ts
@@ -0,0 +1,270 @@
+import type {
+ ConfigV0,
+ ConfigV1,
+ ConfigV2,
+ ConfigV3,
+ ConfigV4,
+ ConfigV5,
+ ConfigV6,
+ ConfigV7,
+ ConfigV8,
+ ConfigV9,
+ ConfigV10,
+ ConfigV11,
+ ConfigV12,
+ ConfigV13,
+ ConfigV14,
+ ConfigV15,
+ ConfigV16,
+ ConfigV17,
+ ConfigV18,
+ ConfigV19,
+ ConfigV20,
+ ConfigV21,
+} from '@opentrons/app/src/redux/config/types'
+
+export const MOCK_CONFIG_V0: ConfigV0 = {
+ version: 0, // Default key added on boot if missing in configs
+ devtools: false,
+ reinstallDevtools: false,
+ update: {
+ channel: 'latest',
+ },
+ buildroot: {
+ manifestUrl:
+ 'https://opentrons-buildroot-ci.s3.us-east-2.amazonaws.com/releases.json',
+ },
+ log: {
+ level: {
+ file: 'debug',
+ console: 'info',
+ },
+ },
+ ui: {
+ width: 1024,
+ height: 768,
+ url: {
+ protocol: 'file:',
+ path: 'ui/index.html',
+ },
+ webPreferences: {
+ webSecurity: true,
+ },
+ },
+ analytics: {
+ appId: 'mock-mixpanel-id',
+ optedIn: true,
+ seenOptIn: false,
+ },
+
+ // deprecated warning flag
+ p10WarningSeen: {
+ 'some-id': true,
+ },
+
+ // user support (intercom)
+ support: {
+ userId: 'mock-intercom-id',
+ createdAt: 1589744281,
+ name: 'Unknown User',
+ email: null,
+ },
+ discovery: {
+ candidates: [],
+ },
+ labware: {
+ directory: '/Users/ot/Library/Application Support/Opentrons/labware',
+ },
+ alerts: {
+ ignored: [],
+ },
+}
+
+export const MOCK_CONFIG_V1: ConfigV1 = {
+ ...MOCK_CONFIG_V0,
+ version: 1,
+ discovery: {
+ ...MOCK_CONFIG_V0.discovery,
+ disableCache: false,
+ },
+}
+
+export const MOCK_CONFIG_V2: ConfigV2 = {
+ ...MOCK_CONFIG_V1,
+ version: 2,
+ calibration: {
+ useTrashSurfaceForTipCal: null,
+ },
+}
+
+export const MOCK_CONFIG_V3: ConfigV3 = {
+ ...MOCK_CONFIG_V2,
+ version: 3,
+ support: {
+ ...MOCK_CONFIG_V2.support,
+ name: null,
+ email: null,
+ },
+}
+
+export const MOCK_CONFIG_V4: ConfigV4 = {
+ ...MOCK_CONFIG_V3,
+ version: 4,
+ labware: {
+ ...MOCK_CONFIG_V3.labware,
+ showLabwareOffsetCodeSnippets: false,
+ },
+}
+
+export const MOCK_CONFIG_V5: ConfigV5 = {
+ ...MOCK_CONFIG_V4,
+ version: 5,
+ python: {
+ pathToPythonOverride: null,
+ },
+}
+
+export const MOCK_CONFIG_V6: ConfigV6 = {
+ ...MOCK_CONFIG_V5,
+ version: 6,
+ modules: {
+ heaterShaker: {
+ isAttached: false,
+ },
+ },
+}
+
+export const MOCK_CONFIG_V7: ConfigV7 = {
+ ...MOCK_CONFIG_V6,
+ version: 7,
+ ui: {
+ ...MOCK_CONFIG_V6.ui,
+ width: 800,
+ minWidth: 600,
+ height: 760,
+ },
+}
+
+export const MOCK_CONFIG_V8: ConfigV8 = {
+ ...MOCK_CONFIG_V7,
+ version: 8,
+ ui: {
+ ...MOCK_CONFIG_V7.ui,
+ width: 1024,
+ height: 768,
+ },
+}
+
+export const MOCK_CONFIG_V9: ConfigV9 = {
+ ...MOCK_CONFIG_V8,
+ version: 9,
+ isOnDevice: false,
+}
+
+export const MOCK_CONFIG_V10: ConfigV10 = {
+ ...MOCK_CONFIG_V9,
+ version: 10,
+ protocols: { sendAllProtocolsToOT3: false },
+}
+
+export const MOCK_CONFIG_V11: ConfigV11 = {
+ ...MOCK_CONFIG_V10,
+ version: 11,
+ protocols: {
+ ...MOCK_CONFIG_V10.protocols,
+ protocolsStoredSortKey: null,
+ },
+}
+
+export const MOCK_CONFIG_V12: ConfigV12 = (() => {
+ const { buildroot, ...restOfV11Config } = { ...MOCK_CONFIG_V11 } as ConfigV11
+ return {
+ ...restOfV11Config,
+ version: 12 as const,
+ robotSystemUpdate: {
+ manifestUrls: {
+ OT2: 'some-fake-manifest',
+ OT3: 'some-fake-manifest-ot3',
+ },
+ },
+ }
+})()
+
+export const MOCK_CONFIG_V13: ConfigV13 = {
+ ...MOCK_CONFIG_V12,
+ version: 13,
+ protocols: {
+ ...MOCK_CONFIG_V12.protocols,
+ protocolsOnDeviceSortKey: null,
+ },
+}
+
+export const MOCK_CONFIG_V14: ConfigV14 = {
+ ...MOCK_CONFIG_V13,
+ version: 14,
+ protocols: {
+ ...MOCK_CONFIG_V13.protocols,
+ pinnedProtocolIds: [],
+ },
+}
+
+export const MOCK_CONFIG_V15: ConfigV15 = {
+ ...MOCK_CONFIG_V14,
+ version: 15,
+ onDeviceDisplaySettings: {
+ sleepMs: 60 * 1000 * 60 * 24 * 7,
+ brightness: 4,
+ textSize: 1,
+ },
+}
+
+export const MOCK_CONFIG_V16: ConfigV16 = {
+ ...MOCK_CONFIG_V15,
+ version: 16,
+ onDeviceDisplaySettings: {
+ ...MOCK_CONFIG_V15.onDeviceDisplaySettings,
+ unfinishedUnboxingFlowRoute: null,
+ },
+}
+
+export const MOCK_CONFIG_V17: ConfigV17 = {
+ ...MOCK_CONFIG_V16,
+ version: 17,
+ protocols: {
+ ...MOCK_CONFIG_V16.protocols,
+ applyHistoricOffsets: true,
+ },
+}
+
+export const MOCK_CONFIG_V18: ConfigV18 = {
+ ...(() => {
+ const { robotSystemUpdate, version, ...rest } = MOCK_CONFIG_V17
+ return rest
+ })(),
+ version: 18,
+}
+
+export const MOCK_CONFIG_V19: ConfigV19 = {
+ ...MOCK_CONFIG_V18,
+ version: 19,
+ update: {
+ ...MOCK_CONFIG_V18.update,
+ hasJustUpdated: false,
+ },
+}
+
+export const MOCK_CONFIG_V20: ConfigV20 = {
+ ...MOCK_CONFIG_V19,
+ version: 20,
+ robotSystemUpdate: {
+ manifestUrls: {
+ OT2:
+ 'https://opentrons-buildroot-ci.s3.us-east-2.amazonaws.com/releases.json',
+ },
+ },
+}
+
+export const MOCK_CONFIG_V21: ConfigV21 = {
+ ...MOCK_CONFIG_V20,
+ version: 21,
+}
diff --git a/app-shell/src/__fixtures__/index.ts b/app-shell/src/__fixtures__/index.ts
new file mode 100644
index 00000000000..90f50c9a737
--- /dev/null
+++ b/app-shell/src/__fixtures__/index.ts
@@ -0,0 +1,2 @@
+export * from './config'
+export * from './robots'
diff --git a/app-shell/src/__fixtures__/robots.ts b/app-shell/src/__fixtures__/robots.ts
new file mode 100644
index 00000000000..183dc7d0ff3
--- /dev/null
+++ b/app-shell/src/__fixtures__/robots.ts
@@ -0,0 +1,123 @@
+import { HEALTH_STATUS_NOT_OK, HEALTH_STATUS_OK } from '../constants'
+
+export const mockLegacyHealthResponse = {
+ name: 'opentrons-dev',
+ api_version: '1.2.3',
+ fw_version: '4.5.6',
+ system_version: '7.8.9',
+ robot_model: 'OT-2 Standard',
+}
+
+export const mockLegacyServerHealthResponse = {
+ name: 'opentrons-dev',
+ apiServerVersion: '1.2.3',
+ serialNumber: '12345',
+ updateServerVersion: '1.2.3',
+ smoothieVersion: '4.5.6',
+ systemVersion: '7.8.9',
+}
+
+export const MOCK_DISCOVERY_ROBOTS = [
+ {
+ name: 'opentrons-dev',
+ health: mockLegacyHealthResponse,
+ serverHealth: mockLegacyServerHealthResponse,
+ addresses: [
+ {
+ ip: '10.14.19.50',
+ port: 31950,
+ seen: true,
+ healthStatus: HEALTH_STATUS_OK,
+ serverHealthStatus: HEALTH_STATUS_OK,
+ healthError: null,
+ serverHealthError: null,
+ advertisedModel: null,
+ },
+ ],
+ },
+ {
+ name: 'opentrons-dev2',
+ health: mockLegacyHealthResponse,
+ serverHealth: mockLegacyServerHealthResponse,
+ addresses: [
+ {
+ ip: '10.14.19.51',
+ port: 31950,
+ seen: true,
+ healthStatus: HEALTH_STATUS_OK,
+ serverHealthStatus: HEALTH_STATUS_OK,
+ healthError: null,
+ serverHealthError: null,
+ advertisedModel: null,
+ },
+ ],
+ },
+ {
+ name: 'opentrons-dev3',
+ health: mockLegacyHealthResponse,
+ serverHealth: mockLegacyServerHealthResponse,
+ addresses: [
+ {
+ ip: '10.14.19.52',
+ port: 31950,
+ seen: true,
+ healthStatus: HEALTH_STATUS_NOT_OK,
+ serverHealthStatus: HEALTH_STATUS_NOT_OK,
+ healthError: null,
+ serverHealthError: null,
+ advertisedModel: null,
+ },
+ ],
+ },
+ {
+ name: 'opentrons-dev4',
+ health: mockLegacyHealthResponse,
+ serverHealth: mockLegacyServerHealthResponse,
+ addresses: [
+ {
+ ip: '10.14.19.53',
+ port: 31950,
+ seen: true,
+ healthStatus: HEALTH_STATUS_OK,
+ serverHealthStatus: HEALTH_STATUS_OK,
+ healthError: null,
+ serverHealthError: null,
+ advertisedModel: null,
+ },
+ ],
+ },
+]
+
+export const MOCK_STORE_ROBOTS = [
+ {
+ robotName: 'opentrons-dev',
+ ip: '10.14.19.50',
+ },
+ {
+ robotName: 'opentrons-dev2',
+ ip: '10.14.19.51',
+ },
+ {
+ robotName: 'opentrons-dev3',
+ ip: '10.14.19.52',
+ },
+ {
+ robotName: 'opentrons-dev4',
+ ip: '10.14.19.53',
+ },
+]
+
+export const MOCK_HEALTHY_ROBOTS = [
+ {
+ robotName: 'opentrons-dev',
+ ip: '10.14.19.50',
+ },
+ {
+ robotName: 'opentrons-dev2',
+ ip: '10.14.19.51',
+ },
+ {
+ robotName: 'opentrons-dev4',
+ ip: '10.14.19.53',
+ },
+]
diff --git a/app-shell/src/__mocks__/log.ts b/app-shell/src/__mocks__/log.ts
deleted file mode 100644
index eb498dd5963..00000000000
--- a/app-shell/src/__mocks__/log.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-// mock logger
-// NOTE: importing mock to avoid copy-paste
-// eslint-disable-next-line jest/no-mocks-import
-export * from '@opentrons/app/src/__mocks__/logger'
diff --git a/app-shell/src/__tests__/discovery.test.ts b/app-shell/src/__tests__/discovery.test.ts
index fa1236e9df5..166020c2125 100644
--- a/app-shell/src/__tests__/discovery.test.ts
+++ b/app-shell/src/__tests__/discovery.test.ts
@@ -2,7 +2,7 @@
import { app } from 'electron'
import Store from 'electron-store'
import noop from 'lodash/noop'
-import { when } from 'jest-when'
+import { vi, it, expect, describe, beforeEach, afterEach } from 'vitest'
import * as DiscoveryClient from '@opentrons/discovery-client'
import {
@@ -12,78 +12,84 @@ import {
import { registerDiscovery } from '../discovery'
import * as Cfg from '../config'
import * as SysInfo from '../system-info'
+import { getSerialPortHttpAgent } from '../usb'
+
+vi.mock('electron')
+vi.mock('electron-store')
+vi.mock('../usb')
+vi.mock('@opentrons/discovery-client')
+vi.mock('../config')
+vi.mock('../system-info')
+vi.mock('../log', () => {
+ return {
+ createLogger: () => {
+ return { debug: () => null }
+ },
+ }
+})
+vi.mock('../notifications')
-jest.mock('electron')
-jest.mock('electron-store')
-jest.mock('@opentrons/discovery-client')
-jest.mock('../config')
-jest.mock('../system-info')
-
-const createDiscoveryClient = DiscoveryClient.createDiscoveryClient as jest.MockedFunction<
- typeof DiscoveryClient.createDiscoveryClient
->
-
-const getFullConfig = Cfg.getFullConfig as jest.MockedFunction<
- typeof Cfg.getFullConfig
->
-
-const getOverrides = Cfg.getOverrides as jest.MockedFunction<
- typeof Cfg.getOverrides
->
-
-const handleConfigChange = Cfg.handleConfigChange as jest.MockedFunction<
- typeof Cfg.handleConfigChange
->
-
-const createNetworkInterfaceMonitor = SysInfo.createNetworkInterfaceMonitor as jest.MockedFunction<
- typeof SysInfo.createNetworkInterfaceMonitor
->
-
-const appOnce = app.once as jest.MockedFunction
-
-const MockStore = Store as jest.MockedClass
-
+let mockGet = vi.fn(property => {
+ return []
+})
+let mockOnDidChange = vi.fn()
+let mockDelete = vi.fn()
+let mockSet = vi.fn()
describe('app-shell/discovery', () => {
- const dispatch = jest.fn()
+ const dispatch = vi.fn()
const mockClient = {
- start: jest.fn(),
- stop: jest.fn(),
- getRobots: jest.fn(),
- removeRobot: jest.fn(),
+ start: vi.fn(),
+ stop: vi.fn(),
+ getRobots: vi.fn(),
+ removeRobot: vi.fn(),
}
const emitListChange = (): void => {
- const lastCall =
- createDiscoveryClient.mock.calls[
- createDiscoveryClient.mock.calls.length - 1
- ]
+ const lastCall = vi.mocked(DiscoveryClient.createDiscoveryClient).mock
+ .calls[
+ vi.mocked(DiscoveryClient.createDiscoveryClient).mock.calls.length - 1
+ ]
const { onListChange } = lastCall[0]
onListChange([])
}
beforeEach(() => {
- getFullConfig.mockReturnValue(({
+ mockGet = vi.fn(property => {
+ return []
+ })
+ mockDelete = vi.fn()
+ mockOnDidChange = vi.fn()
+ mockSet = vi.fn()
+ vi.mocked(Store).mockImplementation(() => {
+ return {
+ get: mockGet,
+ set: mockSet,
+ delete: mockDelete,
+ onDidAnyChange: mockOnDidChange,
+ } as any
+ })
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: { disableCache: false, candidates: [] },
} as unknown) as Cfg.Config)
- getOverrides.mockReturnValue({})
- createNetworkInterfaceMonitor.mockReturnValue({ stop: noop })
- createDiscoveryClient.mockReturnValue(mockClient)
-
- when(MockStore.prototype.get).calledWith('robots', []).mockReturnValue([])
- when(MockStore.prototype.get)
- .calledWith('services', null)
- .mockReturnValue(null)
+ vi.mocked(Cfg.getOverrides).mockReturnValue({})
+ vi.mocked(SysInfo.createNetworkInterfaceMonitor).mockReturnValue({
+ stop: noop,
+ })
+ vi.mocked(DiscoveryClient.createDiscoveryClient).mockReturnValue(mockClient)
+ vi.mocked(getSerialPortHttpAgent).mockReturnValue({} as any)
})
afterEach(() => {
- jest.resetAllMocks()
+ vi.resetAllMocks()
})
it('registerDiscovery creates a DiscoveryClient', () => {
registerDiscovery(dispatch)
- expect(createDiscoveryClient).toHaveBeenCalledWith(
+ expect(
+ vi.mocked(DiscoveryClient.createDiscoveryClient)
+ ).toHaveBeenCalledWith(
expect.objectContaining({
onListChange: expect.any(Function),
})
@@ -103,14 +109,14 @@ describe('app-shell/discovery', () => {
})
it('calls client.stop when electron app emits "will-quit"', () => {
- expect(appOnce).toHaveBeenCalledTimes(0)
+ expect(vi.mocked(app.once)).toHaveBeenCalledTimes(0)
registerDiscovery(dispatch)
expect(mockClient.stop).toHaveBeenCalledTimes(0)
- expect(appOnce).toHaveBeenCalledTimes(1)
+ expect(vi.mocked(app.once)).toHaveBeenCalledTimes(1)
- const [event, handler] = appOnce.mock.calls[0]
+ const [event, handler] = vi.mocked(app.once).mock.calls[0]
expect(event).toEqual('will-quit')
// trigger event handler
@@ -176,7 +182,7 @@ describe('app-shell/discovery', () => {
mockClient.getRobots.mockReturnValue([{ name: 'foo' }, { name: 'bar' }])
emitListChange()
- expect(MockStore.prototype.set).toHaveBeenLastCalledWith('robots', [
+ expect(vi.mocked(mockSet)).toHaveBeenLastCalledWith('robots', [
{ name: 'foo' },
{ name: 'bar' },
])
@@ -185,9 +191,9 @@ describe('app-shell/discovery', () => {
it('loads robots from cache on client initialization', () => {
const mockRobot = { name: 'foo' }
- MockStore.prototype.get.mockImplementation(key => {
+ vi.mocked(mockGet).mockImplementation((key: string) => {
if (key === 'robots') return [mockRobot]
- return null
+ return null as any
})
registerDiscovery(dispatch)
@@ -271,13 +277,13 @@ describe('app-shell/discovery', () => {
},
]
- MockStore.prototype.get.mockImplementation(key => {
+ vi.mocked(mockGet).mockImplementation((key: string) => {
if (key === 'services') return services
- return null
+ return null as any
})
registerDiscovery(dispatch)
- expect(MockStore.prototype.delete).toHaveBeenCalledWith('services')
+ expect(mockDelete).toHaveBeenCalledWith('services')
expect(mockClient.start).toHaveBeenCalledWith(
expect.objectContaining({
initialRobots: [
@@ -347,7 +353,7 @@ describe('app-shell/discovery', () => {
it('does not update services from store when caching disabled', () => {
// cache has been disabled
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: {
candidates: [],
disableCache: true,
@@ -355,9 +361,9 @@ describe('app-shell/discovery', () => {
} as unknown) as Cfg.Config)
// discovery.json contains 1 entry
- MockStore.prototype.get.mockImplementation(key => {
+ mockGet.mockImplementation((key: string) => {
if (key === 'robots') return [{ name: 'foo' }]
- return null
+ return null as any
})
registerDiscovery(dispatch)
@@ -372,7 +378,7 @@ describe('app-shell/discovery', () => {
it('should clear cache and suspend caching when caching becomes disabled', () => {
// Cache enabled initially
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: {
candidates: [],
disableCache: false,
@@ -380,33 +386,33 @@ describe('app-shell/discovery', () => {
} as unknown) as Cfg.Config)
// discovery.json contains 1 entry
- MockStore.prototype.get.mockImplementation(key => {
+ mockGet.mockImplementation((key: string) => {
if (key === 'robots') return [{ name: 'foo' }]
- return null
+ return null as any
})
registerDiscovery(dispatch)
// the 'discovery.disableCache' change handler
- const changeHandler = handleConfigChange.mock.calls[1][1]
+ const changeHandler = vi.mocked(Cfg.handleConfigChange).mock.calls[1][1]
const disableCache = true
changeHandler(disableCache, false)
- expect(MockStore.prototype.set).toHaveBeenCalledWith('robots', [])
+ expect(mockSet).toHaveBeenCalledWith('robots', [])
// new services discovered
- MockStore.prototype.set.mockClear()
+ mockSet.mockClear()
mockClient.getRobots.mockReturnValue([{ name: 'foo' }, { name: 'bar' }])
emitListChange()
// but discovery.json should not update
- expect(MockStore.prototype.set).toHaveBeenCalledTimes(0)
+ expect(mockSet).toHaveBeenCalledTimes(0)
})
})
describe('manual addresses', () => {
it('loads candidates from config on client initialization', () => {
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: { cacheDisabled: false, candidates: ['1.2.3.4'] },
} as unknown) as Cfg.Config)
@@ -423,7 +429,7 @@ describe('app-shell/discovery', () => {
// ensures config override works with only one candidate specified
it('candidates in config can be single string value', () => {
- getFullConfig.mockReturnValue(({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue(({
discovery: { cacheDisabled: false, candidates: '1.2.3.4' },
} as unknown) as Cfg.Config)
diff --git a/app-shell/src/__tests__/http.test.ts b/app-shell/src/__tests__/http.test.ts
index 3016a66b6f9..5bb4c6675d7 100644
--- a/app-shell/src/__tests__/http.test.ts
+++ b/app-shell/src/__tests__/http.test.ts
@@ -1,19 +1,18 @@
import fetch from 'node-fetch'
import isError from 'lodash/isError'
+import { describe, it, expect, vi, beforeEach } from 'vitest'
import { HTTP_API_VERSION } from '@opentrons/app/src/redux/robot-api/constants'
import * as Http from '../http'
import type { Request, Response } from 'node-fetch'
-jest.mock('../config')
-jest.mock('node-fetch')
-
-const mockFetch = fetch as jest.MockedFunction
+vi.mock('../config')
+vi.mock('node-fetch')
describe('app-shell main http module', () => {
beforeEach(() => {
- jest.clearAllMocks()
+ vi.clearAllMocks()
})
const SUCCESS_SPECS = [
@@ -84,12 +83,12 @@ describe('app-shell main http module', () => {
const { name, method, request, requestOptions, response, expected } = spec
it(`it should handle when ${name}`, () => {
- mockFetch.mockResolvedValueOnce((response as unknown) as Response)
+ vi.mocked(fetch).mockResolvedValueOnce((response as unknown) as Response)
// @ts-expect-error(mc, 2021-02-17): reqwrite as integration tests and
// avoid mocking node-fetch
return method((request as unknown) as Request).then((result: string) => {
- expect(mockFetch).toHaveBeenCalledWith(request, requestOptions)
+ expect(vi.mocked(fetch)).toHaveBeenCalledWith(request, requestOptions)
expect(result).toEqual(expected)
})
})
@@ -100,9 +99,11 @@ describe('app-shell main http module', () => {
it(`it should handle when ${name}`, () => {
if (isError(response)) {
- mockFetch.mockRejectedValueOnce(response)
+ vi.mocked(fetch).mockRejectedValueOnce(response)
} else {
- mockFetch.mockResolvedValueOnce((response as unknown) as Response)
+ vi.mocked(fetch).mockResolvedValueOnce(
+ (response as unknown) as Response
+ )
}
return expect(method((request as unknown) as Request)).rejects.toThrow(
diff --git a/app-shell/src/__tests__/update.test.ts b/app-shell/src/__tests__/update.test.ts
index c131318ea5b..19d22e65b8f 100644
--- a/app-shell/src/__tests__/update.test.ts
+++ b/app-shell/src/__tests__/update.test.ts
@@ -1,45 +1,44 @@
// app-shell self-update tests
import * as ElectronUpdater from 'electron-updater'
+import { describe, it, vi, beforeEach, afterEach, expect } from 'vitest'
import { UPDATE_VALUE } from '@opentrons/app/src/redux/config'
import { registerUpdate } from '../update'
import * as Cfg from '../config'
import type { Dispatch } from '../types'
-jest.unmock('electron-updater')
-jest.mock('electron-updater')
-jest.mock('../log')
-jest.mock('../config')
-
-const getConfig = Cfg.getConfig as jest.MockedFunction
-
-const autoUpdater = ElectronUpdater.autoUpdater as jest.Mocked<
- typeof ElectronUpdater.autoUpdater
->
+vi.unmock('electron-updater')
+vi.mock('electron-updater')
+vi.mock('../log')
+vi.mock('../config')
describe('update', () => {
let dispatch: Dispatch
let handleAction: Dispatch
beforeEach(() => {
- dispatch = jest.fn()
+ dispatch = vi.fn()
handleAction = registerUpdate(dispatch)
})
afterEach(() => {
- jest.resetAllMocks()
+ vi.resetAllMocks()
;(ElectronUpdater as any).__mockReset()
})
it('handles shell:CHECK_UPDATE with available update', () => {
- getConfig.mockReturnValue('dev' as any)
+ vi.mocked(Cfg.getConfig).mockReturnValue('dev' as any)
handleAction({ type: 'shell:CHECK_UPDATE', meta: { shell: true } })
- expect(getConfig).toHaveBeenCalledWith('update.channel')
- expect(autoUpdater.channel).toEqual('dev')
- expect(autoUpdater.checkForUpdates).toHaveBeenCalledTimes(1)
+ expect(vi.mocked(Cfg.getConfig)).toHaveBeenCalledWith('update.channel')
+ expect(vi.mocked(ElectronUpdater.autoUpdater).channel).toEqual('dev')
+ expect(
+ vi.mocked(ElectronUpdater.autoUpdater).checkForUpdates
+ ).toHaveBeenCalledTimes(1)
- autoUpdater.emit('update-available', { version: '1.0.0' })
+ vi.mocked(ElectronUpdater.autoUpdater).emit('update-available', {
+ version: '1.0.0',
+ })
expect(dispatch).toHaveBeenCalledWith({
type: 'shell:CHECK_UPDATE_RESULT',
@@ -49,7 +48,9 @@ describe('update', () => {
it('handles shell:CHECK_UPDATE with no available update', () => {
handleAction({ type: 'shell:CHECK_UPDATE', meta: { shell: true } })
- autoUpdater.emit('update-not-available', { version: '1.0.0' })
+ vi.mocked(ElectronUpdater.autoUpdater).emit('update-not-available', {
+ version: '1.0.0',
+ })
expect(dispatch).toHaveBeenCalledWith({
type: 'shell:CHECK_UPDATE_RESULT',
@@ -59,7 +60,7 @@ describe('update', () => {
it('handles shell:CHECK_UPDATE with error', () => {
handleAction({ type: 'shell:CHECK_UPDATE', meta: { shell: true } })
- autoUpdater.emit('error', new Error('AH'))
+ vi.mocked(ElectronUpdater.autoUpdater).emit('error', new Error('AH'))
expect(dispatch).toHaveBeenCalledWith({
type: 'shell:CHECK_UPDATE_RESULT',
@@ -77,13 +78,15 @@ describe('update', () => {
meta: { shell: true },
})
- expect(autoUpdater.downloadUpdate).toHaveBeenCalledTimes(1)
+ expect(
+ vi.mocked(ElectronUpdater.autoUpdater).downloadUpdate
+ ).toHaveBeenCalledTimes(1)
const progress = {
percent: 20,
}
- autoUpdater.emit('download-progress', progress)
+ vi.mocked(ElectronUpdater.autoUpdater).emit('download-progress', progress)
expect(dispatch).toHaveBeenCalledWith({
type: 'shell:DOWNLOAD_PERCENTAGE',
@@ -92,7 +95,9 @@ describe('update', () => {
},
})
- autoUpdater.emit('update-downloaded', { version: '1.0.0' })
+ vi.mocked(ElectronUpdater.autoUpdater).emit('update-downloaded', {
+ version: '1.0.0',
+ })
expect(dispatch).toHaveBeenCalledWith({
type: 'shell:DOWNLOAD_UPDATE_RESULT',
@@ -110,7 +115,7 @@ describe('update', () => {
type: 'shell:DOWNLOAD_UPDATE',
meta: { shell: true },
})
- autoUpdater.emit('error', new Error('AH'))
+ vi.mocked(ElectronUpdater.autoUpdater).emit('error', new Error('AH'))
expect(dispatch).toHaveBeenCalledWith({
type: 'shell:DOWNLOAD_UPDATE_RESULT',
@@ -120,6 +125,8 @@ describe('update', () => {
it('handles shell:APPLY_UPDATE', () => {
handleAction({ type: 'shell:APPLY_UPDATE', meta: { shell: true } })
- expect(autoUpdater.quitAndInstall).toHaveBeenCalledTimes(1)
+ expect(
+ vi.mocked(ElectronUpdater.autoUpdater).quitAndInstall
+ ).toHaveBeenCalledTimes(1)
})
})
diff --git a/app-shell/src/config/__fixtures__/index.ts b/app-shell/src/config/__fixtures__/index.ts
deleted file mode 100644
index 848753aa993..00000000000
--- a/app-shell/src/config/__fixtures__/index.ts
+++ /dev/null
@@ -1,264 +0,0 @@
-import type {
- ConfigV0,
- ConfigV1,
- ConfigV2,
- ConfigV3,
- ConfigV4,
- ConfigV5,
- ConfigV6,
- ConfigV7,
- ConfigV8,
- ConfigV9,
- ConfigV10,
- ConfigV11,
- ConfigV12,
- ConfigV13,
- ConfigV14,
- ConfigV15,
- ConfigV16,
- ConfigV17,
- ConfigV18,
- ConfigV19,
- ConfigV20,
-} from '@opentrons/app/src/redux/config/types'
-
-export const MOCK_CONFIG_V0: ConfigV0 = {
- version: 0, // Default key added on boot if missing in configs
- devtools: false,
- reinstallDevtools: false,
- update: {
- channel: 'latest',
- },
- buildroot: {
- manifestUrl:
- 'https://opentrons-buildroot-ci.s3.us-east-2.amazonaws.com/releases.json',
- },
- log: {
- level: {
- file: 'debug',
- console: 'info',
- },
- },
- ui: {
- width: 1024,
- height: 768,
- url: {
- protocol: 'file:',
- path: 'ui/index.html',
- },
- webPreferences: {
- webSecurity: true,
- },
- },
- analytics: {
- appId: 'mock-mixpanel-id',
- optedIn: true,
- seenOptIn: false,
- },
-
- // deprecated warning flag
- p10WarningSeen: {
- 'some-id': true,
- },
-
- // user support (intercom)
- support: {
- userId: 'mock-intercom-id',
- createdAt: 1589744281,
- name: 'Unknown User',
- email: null,
- },
- discovery: {
- candidates: [],
- },
- labware: {
- directory: '/Users/ot/Library/Application Support/Opentrons/labware',
- },
- alerts: {
- ignored: [],
- },
-}
-
-export const MOCK_CONFIG_V1: ConfigV1 = {
- ...MOCK_CONFIG_V0,
- version: 1,
- discovery: {
- ...MOCK_CONFIG_V0.discovery,
- disableCache: false,
- },
-}
-
-export const MOCK_CONFIG_V2: ConfigV2 = {
- ...MOCK_CONFIG_V1,
- version: 2,
- calibration: {
- useTrashSurfaceForTipCal: null,
- },
-}
-
-export const MOCK_CONFIG_V3: ConfigV3 = {
- ...MOCK_CONFIG_V2,
- version: 3,
- support: {
- ...MOCK_CONFIG_V2.support,
- name: null,
- email: null,
- },
-}
-
-export const MOCK_CONFIG_V4: ConfigV4 = {
- ...MOCK_CONFIG_V3,
- version: 4,
- labware: {
- ...MOCK_CONFIG_V3.labware,
- showLabwareOffsetCodeSnippets: false,
- },
-}
-
-export const MOCK_CONFIG_V5: ConfigV5 = {
- ...MOCK_CONFIG_V4,
- version: 5,
- python: {
- pathToPythonOverride: null,
- },
-}
-
-export const MOCK_CONFIG_V6: ConfigV6 = {
- ...MOCK_CONFIG_V5,
- version: 6,
- modules: {
- heaterShaker: {
- isAttached: false,
- },
- },
-}
-
-export const MOCK_CONFIG_V7: ConfigV7 = {
- ...MOCK_CONFIG_V6,
- version: 7,
- ui: {
- ...MOCK_CONFIG_V6.ui,
- width: 800,
- minWidth: 600,
- height: 760,
- },
-}
-
-export const MOCK_CONFIG_V8: ConfigV8 = {
- ...MOCK_CONFIG_V7,
- version: 8,
- ui: {
- ...MOCK_CONFIG_V7.ui,
- width: 1024,
- height: 768,
- },
-}
-
-export const MOCK_CONFIG_V9: ConfigV9 = {
- ...MOCK_CONFIG_V8,
- version: 9,
- isOnDevice: false,
-}
-
-export const MOCK_CONFIG_V10: ConfigV10 = {
- ...MOCK_CONFIG_V9,
- version: 10,
- protocols: { sendAllProtocolsToOT3: false },
-}
-
-export const MOCK_CONFIG_V11: ConfigV11 = {
- ...MOCK_CONFIG_V10,
- version: 11,
- protocols: {
- ...MOCK_CONFIG_V10.protocols,
- protocolsStoredSortKey: null,
- },
-}
-
-export const MOCK_CONFIG_V12: ConfigV12 = (() => {
- const { buildroot, ...restOfV11Config } = { ...MOCK_CONFIG_V11 } as ConfigV11
- return {
- ...restOfV11Config,
- version: 12 as const,
- robotSystemUpdate: {
- manifestUrls: {
- OT2: 'some-fake-manifest',
- OT3: 'some-fake-manifest-ot3',
- },
- },
- }
-})()
-
-export const MOCK_CONFIG_V13: ConfigV13 = {
- ...MOCK_CONFIG_V12,
- version: 13,
- protocols: {
- ...MOCK_CONFIG_V12.protocols,
- protocolsOnDeviceSortKey: null,
- },
-}
-
-export const MOCK_CONFIG_V14: ConfigV14 = {
- ...MOCK_CONFIG_V13,
- version: 14,
- protocols: {
- ...MOCK_CONFIG_V13.protocols,
- pinnedProtocolIds: [],
- },
-}
-
-export const MOCK_CONFIG_V15: ConfigV15 = {
- ...MOCK_CONFIG_V14,
- version: 15,
- onDeviceDisplaySettings: {
- sleepMs: 60 * 1000 * 60 * 24 * 7,
- brightness: 4,
- textSize: 1,
- },
-}
-
-export const MOCK_CONFIG_V16: ConfigV16 = {
- ...MOCK_CONFIG_V15,
- version: 16,
- onDeviceDisplaySettings: {
- ...MOCK_CONFIG_V15.onDeviceDisplaySettings,
- unfinishedUnboxingFlowRoute: null,
- },
-}
-
-export const MOCK_CONFIG_V17: ConfigV17 = {
- ...MOCK_CONFIG_V16,
- version: 17,
- protocols: {
- ...MOCK_CONFIG_V16.protocols,
- applyHistoricOffsets: true,
- },
-}
-
-export const MOCK_CONFIG_V18: ConfigV18 = {
- ...(() => {
- const { robotSystemUpdate, version, ...rest } = MOCK_CONFIG_V17
- return rest
- })(),
- version: 18,
-}
-
-export const MOCK_CONFIG_V19: ConfigV19 = {
- ...MOCK_CONFIG_V18,
- version: 19,
- update: {
- ...MOCK_CONFIG_V18.update,
- hasJustUpdated: false,
- },
-}
-
-export const MOCK_CONFIG_V20: ConfigV20 = {
- ...MOCK_CONFIG_V19,
- version: 20,
- robotSystemUpdate: {
- manifestUrls: {
- OT2:
- 'https://opentrons-buildroot-ci.s3.us-east-2.amazonaws.com/releases.json',
- },
- },
-}
diff --git a/app-shell/src/config/__tests__/migrate.test.ts b/app-shell/src/config/__tests__/migrate.test.ts
index 38bc6381f40..24dcd9fcd38 100644
--- a/app-shell/src/config/__tests__/migrate.test.ts
+++ b/app-shell/src/config/__tests__/migrate.test.ts
@@ -1,4 +1,5 @@
// config migration tests
+import { describe, it, expect } from 'vitest'
import {
MOCK_CONFIG_V0,
MOCK_CONFIG_V1,
@@ -21,10 +22,11 @@ import {
MOCK_CONFIG_V18,
MOCK_CONFIG_V19,
MOCK_CONFIG_V20,
-} from '../__fixtures__'
+ MOCK_CONFIG_V21,
+} from '../../__fixtures__'
import { migrate } from '../migrate'
-const NEWEST_VERSION = 20
+const NEWEST_VERSION = 21
describe('config migration', () => {
it('should migrate version 0 to latest', () => {
@@ -32,7 +34,7 @@ describe('config migration', () => {
const result = migrate(v0Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 1 to latest', () => {
@@ -40,7 +42,7 @@ describe('config migration', () => {
const result = migrate(v1Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 2 to latest', () => {
@@ -48,7 +50,7 @@ describe('config migration', () => {
const result = migrate(v2Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 3 to latest', () => {
@@ -56,7 +58,7 @@ describe('config migration', () => {
const result = migrate(v3Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 4 to latest', () => {
@@ -64,7 +66,7 @@ describe('config migration', () => {
const result = migrate(v4Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 5 to latest', () => {
@@ -72,7 +74,7 @@ describe('config migration', () => {
const result = migrate(v5Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 6 to latest', () => {
@@ -80,7 +82,7 @@ describe('config migration', () => {
const result = migrate(v6Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 7 to latest', () => {
@@ -88,7 +90,7 @@ describe('config migration', () => {
const result = migrate(v7Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 8 to latest', () => {
@@ -96,7 +98,7 @@ describe('config migration', () => {
const result = migrate(v8Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 9 to latest', () => {
@@ -104,7 +106,7 @@ describe('config migration', () => {
const result = migrate(v9Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 10 to latest', () => {
@@ -112,7 +114,7 @@ describe('config migration', () => {
const result = migrate(v10Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 11 to latest', () => {
@@ -120,7 +122,7 @@ describe('config migration', () => {
const result = migrate(v11Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 12 to latest', () => {
@@ -128,7 +130,7 @@ describe('config migration', () => {
const result = migrate(v12Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 13 to latest', () => {
@@ -136,7 +138,7 @@ describe('config migration', () => {
const result = migrate(v13Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 14 to latest', () => {
@@ -144,7 +146,7 @@ describe('config migration', () => {
const result = migrate(v14Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 15 to latest', () => {
@@ -152,7 +154,7 @@ describe('config migration', () => {
const result = migrate(v15Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 16 to latest', () => {
@@ -160,7 +162,7 @@ describe('config migration', () => {
const result = migrate(v16Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 17 to latest', () => {
@@ -168,26 +170,34 @@ describe('config migration', () => {
const result = migrate(v17Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should migrate version 18 to latest', () => {
const v18Config = MOCK_CONFIG_V18
const result = migrate(v18Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
it('should keep migrate version 19 to latest', () => {
const v19Config = MOCK_CONFIG_V19
const result = migrate(v19Config)
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(MOCK_CONFIG_V20)
+ expect(result).toEqual(MOCK_CONFIG_V21)
})
- it('should keep version 20', () => {
+ it('should migration version 20 to latest', () => {
const v20Config = MOCK_CONFIG_V20
const result = migrate(v20Config)
+
+ expect(result.version).toBe(NEWEST_VERSION)
+ expect(result).toEqual(MOCK_CONFIG_V21)
+ })
+ it('should keep version 21', () => {
+ const v21Config = MOCK_CONFIG_V21
+ const result = migrate(v21Config)
+
expect(result.version).toBe(NEWEST_VERSION)
- expect(result).toEqual(v20Config)
+ expect(result).toEqual(v21Config)
})
})
diff --git a/app-shell/src/config/__tests__/update.test.ts b/app-shell/src/config/__tests__/update.test.ts
index 136c7bc8a97..518d6db9587 100644
--- a/app-shell/src/config/__tests__/update.test.ts
+++ b/app-shell/src/config/__tests__/update.test.ts
@@ -1,3 +1,4 @@
+import { describe, it, expect } from 'vitest'
import * as Cfg from '@opentrons/app/src/redux/config'
import { shouldUpdate, getNextValue } from '../update'
diff --git a/app-shell/src/config/actions.ts b/app-shell/src/config/actions.ts
new file mode 100644
index 00000000000..eabc9b47a16
--- /dev/null
+++ b/app-shell/src/config/actions.ts
@@ -0,0 +1,421 @@
+import type {
+ AddCustomLabwareAction,
+ AddCustomLabwareFailureAction,
+ AddCustomLabwareFileAction,
+ AddNewLabwareNameAction,
+ ChangeCustomLabwareDirectoryAction,
+ CheckedLabwareFile,
+ ClearAddCustomLabwareFailureAction,
+ ClearNewLabwareNameAction,
+ CustomLabwareListAction,
+ CustomLabwareListActionSource,
+ CustomLabwareListFailureAction,
+ DeleteCustomLabwareFileAction,
+ DuplicateLabwareFile,
+ FailedLabwareFile,
+ OpenCustomLabwareDirectoryAction,
+} from '@opentrons/app/src/redux/custom-labware/types'
+import type {
+ ResetConfigValueAction,
+ UpdateConfigValueAction,
+} from '@opentrons/app/src/redux/config'
+import type {
+ AddProtocolAction,
+ AddProtocolFailureAction,
+ AnalyzeProtocolAction,
+ AnalyzeProtocolFailureAction,
+ AnalyzeProtocolSuccessAction,
+ ClearAddProtocolFailureAction,
+ FetchProtocolsAction,
+ OpenProtocolDirectoryAction,
+ ProtocolListActionSource,
+ RemoveProtocolAction,
+ StoredProtocolData,
+ StoredProtocolDir,
+ UpdateProtocolListAction,
+ UpdateProtocolListFailureAction,
+ ViewProtocolSourceFolder,
+} from '@opentrons/app/src/redux/protocol-storage'
+import {
+ ADD_CUSTOM_LABWARE,
+ ADD_CUSTOM_LABWARE_FAILURE,
+ ADD_CUSTOM_LABWARE_FILE,
+ ADD_NEW_LABWARE_NAME,
+ ADD_PROTOCOL,
+ ADD_PROTOCOL_FAILURE,
+ ANALYZE_PROTOCOL,
+ ANALYZE_PROTOCOL_FAILURE,
+ ANALYZE_PROTOCOL_SUCCESS,
+ APP_RESTART,
+ CHANGE_CUSTOM_LABWARE_DIRECTORY,
+ CLEAR_ADD_CUSTOM_LABWARE_FAILURE,
+ CLEAR_ADD_PROTOCOL_FAILURE,
+ CLEAR_NEW_LABWARE_NAME,
+ CONFIG_INITIALIZED,
+ CUSTOM_LABWARE_LIST,
+ CUSTOM_LABWARE_LIST_FAILURE,
+ DELETE_CUSTOM_LABWARE_FILE,
+ FETCH_PROTOCOLS,
+ LABWARE_DIRECTORY_CONFIG_PATH,
+ NETWORK_INTERFACES_CHANGED,
+ OPEN_CUSTOM_LABWARE_DIRECTORY,
+ OPEN_PROTOCOL_DIRECTORY,
+ POLL,
+ RELOAD_UI,
+ REMOVE_PROTOCOL,
+ RESET_VALUE,
+ SEND_LOG,
+ SYSTEM_INFO_INITIALIZED,
+ UPDATE_PROTOCOL_LIST,
+ UPDATE_PROTOCOL_LIST_FAILURE,
+ UPDATE_VALUE,
+ USB_DEVICE_ADDED,
+ USB_DEVICE_REMOVED,
+ USB_HTTP_REQUESTS_START,
+ USB_HTTP_REQUESTS_STOP,
+ VALUE_UPDATED,
+ VIEW_PROTOCOL_SOURCE_FOLDER,
+ NOTIFY_SUBSCRIBE,
+ ROBOT_MASS_STORAGE_DEVICE_ADDED,
+ ROBOT_MASS_STORAGE_DEVICE_ENUMERATED,
+ ROBOT_MASS_STORAGE_DEVICE_REMOVED,
+ UPDATE_BRIGHTNESS,
+} from '../constants'
+import type {
+ InitializedAction,
+ NetworkInterface,
+ NetworkInterfacesChangedAction,
+ UsbDevice,
+ UsbDeviceAddedAction,
+ UsbDeviceRemovedAction,
+} from '@opentrons/app/src/redux/system-info/types'
+import type {
+ ConfigInitializedAction,
+ ConfigValueUpdatedAction,
+} from '../types'
+import type { Config } from './types'
+import type {
+ AppRestartAction,
+ NotifySubscribeAction,
+ NotifyTopic,
+ ReloadUiAction,
+ RobotMassStorageDeviceAdded,
+ RobotMassStorageDeviceEnumerated,
+ RobotMassStorageDeviceRemoved,
+ SendLogAction,
+ UpdateBrightnessAction,
+ UsbRequestsAction,
+} from '@opentrons/app/src/redux/shell/types'
+
+// config file has been initialized
+export const configInitialized = (config: Config): ConfigInitializedAction => ({
+ type: CONFIG_INITIALIZED,
+ payload: { config },
+})
+
+// config value has been updated
+export const configValueUpdated = (
+ path: string,
+ value: unknown
+): ConfigValueUpdatedAction => ({
+ type: VALUE_UPDATED,
+ payload: { path, value },
+})
+
+export const customLabwareList = (
+ payload: CheckedLabwareFile[],
+ source: CustomLabwareListActionSource = POLL
+): CustomLabwareListAction => ({
+ type: CUSTOM_LABWARE_LIST,
+ payload,
+ meta: { source },
+})
+
+export const customLabwareListFailure = (
+ message: string,
+ source: CustomLabwareListActionSource = POLL
+): CustomLabwareListFailureAction => ({
+ type: CUSTOM_LABWARE_LIST_FAILURE,
+ payload: { message },
+ meta: { source },
+})
+
+export const changeCustomLabwareDirectory = (): ChangeCustomLabwareDirectoryAction => ({
+ type: CHANGE_CUSTOM_LABWARE_DIRECTORY,
+ meta: { shell: true },
+})
+
+export const addCustomLabware = (
+ overwrite: DuplicateLabwareFile | null = null
+): AddCustomLabwareAction => ({
+ type: ADD_CUSTOM_LABWARE,
+ payload: { overwrite },
+ meta: { shell: true },
+})
+
+export const addCustomLabwareFile = (
+ filePath: string
+): AddCustomLabwareFileAction => ({
+ type: ADD_CUSTOM_LABWARE_FILE,
+ payload: { filePath },
+ meta: { shell: true },
+})
+
+export const deleteCustomLabwareFile = (
+ filePath: string
+): DeleteCustomLabwareFileAction => ({
+ type: DELETE_CUSTOM_LABWARE_FILE,
+ payload: { filePath },
+ meta: { shell: true },
+})
+
+export const addCustomLabwareFailure = (
+ labware: FailedLabwareFile | null = null,
+ message: string | null = null
+): AddCustomLabwareFailureAction => ({
+ type: ADD_CUSTOM_LABWARE_FAILURE,
+ payload: { labware, message },
+})
+
+export const clearAddCustomLabwareFailure = (): ClearAddCustomLabwareFailureAction => ({
+ type: CLEAR_ADD_CUSTOM_LABWARE_FAILURE,
+})
+
+export const addNewLabwareName = (
+ filename: string
+): AddNewLabwareNameAction => ({
+ type: ADD_NEW_LABWARE_NAME,
+ payload: { filename },
+})
+
+export const clearNewLabwareName = (): ClearNewLabwareNameAction => ({
+ type: CLEAR_NEW_LABWARE_NAME,
+})
+
+export const openCustomLabwareDirectory = (): OpenCustomLabwareDirectoryAction => ({
+ type: OPEN_CUSTOM_LABWARE_DIRECTORY,
+ meta: { shell: true },
+})
+
+// request a config value reset to default
+export const resetConfigValue = (path: string): ResetConfigValueAction => ({
+ type: RESET_VALUE,
+ payload: { path },
+ meta: { shell: true },
+})
+
+export const resetCustomLabwareDirectory = (): ResetConfigValueAction => {
+ return resetConfigValue(LABWARE_DIRECTORY_CONFIG_PATH)
+}
+
+// request a config value update
+export const updateConfigValue = (
+ path: string,
+ value: unknown
+): UpdateConfigValueAction => ({
+ type: UPDATE_VALUE,
+ payload: { path, value },
+ meta: { shell: true },
+})
+
+// action creators
+
+export const fetchProtocols = (): FetchProtocolsAction => ({
+ type: FETCH_PROTOCOLS,
+ meta: { shell: true },
+})
+
+export const updateProtocolList = (
+ payload: StoredProtocolData[],
+ source: ProtocolListActionSource = POLL
+): UpdateProtocolListAction => ({
+ type: UPDATE_PROTOCOL_LIST,
+ payload,
+ meta: { source },
+})
+
+export const updateProtocolListFailure = (
+ message: string,
+ source: ProtocolListActionSource = POLL
+): UpdateProtocolListFailureAction => ({
+ type: UPDATE_PROTOCOL_LIST_FAILURE,
+ payload: { message },
+ meta: { source },
+})
+
+export const addProtocol = (protocolFilePath: string): AddProtocolAction => ({
+ type: ADD_PROTOCOL,
+ payload: { protocolFilePath },
+ meta: { shell: true },
+})
+
+export const removeProtocol = (protocolKey: string): RemoveProtocolAction => ({
+ type: REMOVE_PROTOCOL,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const addProtocolFailure = (
+ protocol: StoredProtocolDir | null = null,
+ message: string | null = null
+): AddProtocolFailureAction => ({
+ type: ADD_PROTOCOL_FAILURE,
+ payload: { protocol, message },
+})
+
+export const clearAddProtocolFailure = (): ClearAddProtocolFailureAction => ({
+ type: CLEAR_ADD_PROTOCOL_FAILURE,
+})
+
+export const openProtocolDirectory = (): OpenProtocolDirectoryAction => ({
+ type: OPEN_PROTOCOL_DIRECTORY,
+ meta: { shell: true },
+})
+
+export const analyzeProtocol = (
+ protocolKey: string
+): AnalyzeProtocolAction => ({
+ type: ANALYZE_PROTOCOL,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const analyzeProtocolSuccess = (
+ protocolKey: string
+): AnalyzeProtocolSuccessAction => ({
+ type: ANALYZE_PROTOCOL_SUCCESS,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const analyzeProtocolFailure = (
+ protocolKey: string
+): AnalyzeProtocolFailureAction => ({
+ type: ANALYZE_PROTOCOL_FAILURE,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const viewProtocolSourceFolder = (
+ protocolKey: string
+): ViewProtocolSourceFolder => ({
+ type: VIEW_PROTOCOL_SOURCE_FOLDER,
+ payload: { protocolKey },
+ meta: { shell: true },
+})
+
+export const initialized = (
+ usbDevices: UsbDevice[],
+ networkInterfaces: NetworkInterface[]
+): InitializedAction => ({
+ type: SYSTEM_INFO_INITIALIZED,
+ payload: { usbDevices, networkInterfaces },
+ meta: { shell: true },
+})
+
+export const usbDeviceAdded = (usbDevice: UsbDevice): UsbDeviceAddedAction => ({
+ type: USB_DEVICE_ADDED,
+ payload: { usbDevice },
+ meta: { shell: true },
+})
+
+export const usbDeviceRemoved = (
+ usbDevice: UsbDevice
+): UsbDeviceRemovedAction => ({
+ type: USB_DEVICE_REMOVED,
+ payload: { usbDevice },
+ meta: { shell: true },
+})
+
+export const networkInterfacesChanged = (
+ networkInterfaces: NetworkInterface[]
+): NetworkInterfacesChangedAction => ({
+ type: NETWORK_INTERFACES_CHANGED,
+ payload: { networkInterfaces },
+})
+
+export const usbRequestsStart = (): UsbRequestsAction => ({
+ type: USB_HTTP_REQUESTS_START,
+ meta: { shell: true },
+})
+
+export const usbRequestsStop = (): UsbRequestsAction => ({
+ type: USB_HTTP_REQUESTS_STOP,
+ meta: { shell: true },
+})
+
+export const appRestart = (message: string): AppRestartAction => ({
+ type: APP_RESTART,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const reloadUi = (message: string): ReloadUiAction => ({
+ type: RELOAD_UI,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const sendLog = (message: string): SendLogAction => ({
+ type: SEND_LOG,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const updateBrightness = (message: string): UpdateBrightnessAction => ({
+ type: UPDATE_BRIGHTNESS,
+ payload: {
+ message: message,
+ },
+ meta: { shell: true },
+})
+
+export const robotMassStorageDeviceRemoved = (
+ rootPath: string
+): RobotMassStorageDeviceRemoved => ({
+ type: ROBOT_MASS_STORAGE_DEVICE_REMOVED,
+ payload: {
+ rootPath,
+ },
+ meta: { shell: true },
+})
+
+export const robotMassStorageDeviceAdded = (
+ rootPath: string
+): RobotMassStorageDeviceAdded => ({
+ type: ROBOT_MASS_STORAGE_DEVICE_ADDED,
+ payload: {
+ rootPath,
+ },
+ meta: { shell: true },
+})
+
+export const robotMassStorageDeviceEnumerated = (
+ rootPath: string,
+ filePaths: string[]
+): RobotMassStorageDeviceEnumerated => ({
+ type: ROBOT_MASS_STORAGE_DEVICE_ENUMERATED,
+ payload: {
+ rootPath,
+ filePaths,
+ },
+ meta: { shell: true },
+})
+
+export const notifySubscribeAction = (
+ hostname: string,
+ topic: NotifyTopic
+): NotifySubscribeAction => ({
+ type: NOTIFY_SUBSCRIBE,
+ payload: {
+ hostname,
+ topic,
+ },
+ meta: { shell: true },
+})
diff --git a/app-shell/src/config/index.ts b/app-shell/src/config/index.ts
index 559cfa47584..232b8ab829f 100644
--- a/app-shell/src/config/index.ts
+++ b/app-shell/src/config/index.ts
@@ -5,11 +5,18 @@ import get from 'lodash/get'
import mergeOptions from 'merge-options'
import yargsParser from 'yargs-parser'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
-import * as Cfg from '@opentrons/app/src/redux/config'
import { createLogger } from '../log'
+import {
+ ADD_UNIQUE_VALUE,
+ RESET_VALUE,
+ SUBTRACT_VALUE,
+ TOGGLE_VALUE,
+ UI_INITIALIZED,
+ UPDATE_VALUE,
+} from '../constants'
import { DEFAULTS_V0, migrate } from './migrate'
import { shouldUpdate, getNextValue } from './update'
+import { configInitialized, configValueUpdated } from './actions'
import type {
ConfigV0,
@@ -57,13 +64,13 @@ const log = (): Logger => _log ?? (_log = createLogger('config'))
export function registerConfig(dispatch: Dispatch): (action: Action) => void {
return function handleIncomingAction(action: Action) {
if (action.type === UI_INITIALIZED) {
- dispatch(Cfg.configInitialized(getFullConfig()))
+ dispatch(configInitialized(getFullConfig()))
} else if (
- action.type === Cfg.UPDATE_VALUE ||
- action.type === Cfg.RESET_VALUE ||
- action.type === Cfg.TOGGLE_VALUE ||
- action.type === Cfg.ADD_UNIQUE_VALUE ||
- action.type === Cfg.SUBTRACT_VALUE
+ action.type === UPDATE_VALUE ||
+ action.type === RESET_VALUE ||
+ action.type === TOGGLE_VALUE ||
+ action.type === ADD_UNIQUE_VALUE ||
+ action.type === SUBTRACT_VALUE
) {
const { path } = action.payload as { path: string }
@@ -75,7 +82,7 @@ export function registerConfig(dispatch: Dispatch): (action: Action) => void {
log().debug('Updating config', { path, nextValue })
store().set(path, nextValue)
- dispatch(Cfg.configValueUpdated(path, nextValue))
+ dispatch(configValueUpdated(path, nextValue))
} else {
log().debug(`config path in overrides; not updating`, { path })
}
diff --git a/app-shell/src/config/migrate.ts b/app-shell/src/config/migrate.ts
index d13b26ba7a6..53e37383cf5 100644
--- a/app-shell/src/config/migrate.ts
+++ b/app-shell/src/config/migrate.ts
@@ -1,8 +1,6 @@
import path from 'path'
import { app } from 'electron'
import uuid from 'uuid/v4'
-import { CONFIG_VERSION_LATEST } from '@opentrons/app/src/redux/config'
-
import type {
Config,
ConfigV0,
@@ -26,12 +24,15 @@ import type {
ConfigV18,
ConfigV19,
ConfigV20,
+ ConfigV21,
} from '@opentrons/app/src/redux/config/types'
// format
// base config v0 defaults
// any default values for later config versions are specified in the migration
// functions for those version below
+const CONFIG_VERSION_LATEST = 21
+
export const DEFAULTS_V0: ConfigV0 = {
version: 0,
devtools: false,
@@ -39,7 +40,8 @@ export const DEFAULTS_V0: ConfigV0 = {
// app update config
update: {
- channel: _PKG_VERSION_.includes('beta') ? 'beta' : 'latest',
+ // @ts-expect-error can't get TS to recognize global.d.ts
+ channel: [].includes('beta') ? 'beta' : 'latest',
},
buildroot: {
@@ -373,6 +375,20 @@ const toVersion20 = (prevConfig: ConfigV19): ConfigV20 => {
}
return nextConfig
}
+const toVersion21 = (prevConfig: ConfigV20): ConfigV21 => {
+ return {
+ ...prevConfig,
+ version: 21 as const,
+ onDeviceDisplaySettings: {
+ ...prevConfig.onDeviceDisplaySettings,
+ unfinishedUnboxingFlowRoute:
+ prevConfig.onDeviceDisplaySettings.unfinishedUnboxingFlowRoute ===
+ '/dashboard'
+ ? null
+ : prevConfig.onDeviceDisplaySettings.unfinishedUnboxingFlowRoute,
+ },
+ }
+}
const MIGRATIONS: [
(prevConfig: ConfigV0) => ConfigV1,
@@ -394,7 +410,8 @@ const MIGRATIONS: [
(prevConfig: ConfigV16) => ConfigV17,
(prevConfig: ConfigV17) => ConfigV18,
(prevConfig: ConfigV18) => ConfigV19,
- (prevConfig: ConfigV19) => ConfigV20
+ (prevConfig: ConfigV19) => ConfigV20,
+ (prevConfig: ConfigV20) => ConfigV21
] = [
toVersion1,
toVersion2,
@@ -416,6 +433,7 @@ const MIGRATIONS: [
toVersion18,
toVersion19,
toVersion20,
+ toVersion21,
]
export const DEFAULTS: Config = migrate(DEFAULTS_V0)
@@ -443,6 +461,7 @@ export function migrate(
| ConfigV18
| ConfigV19
| ConfigV20
+ | ConfigV21
): Config {
const prevVersion = prevConfig.version
let result = prevConfig
diff --git a/app-shell/src/config/update.ts b/app-shell/src/config/update.ts
index 6340e249967..894aff585c8 100644
--- a/app-shell/src/config/update.ts
+++ b/app-shell/src/config/update.ts
@@ -9,7 +9,7 @@ import {
RESET_VALUE,
ADD_UNIQUE_VALUE,
SUBTRACT_VALUE,
-} from '@opentrons/app/src/redux/config'
+} from '../constants'
import { DEFAULTS } from './migrate'
diff --git a/app-shell/src/constants.ts b/app-shell/src/constants.ts
new file mode 100644
index 00000000000..3e86c503c83
--- /dev/null
+++ b/app-shell/src/constants.ts
@@ -0,0 +1,253 @@
+import type {
+ UI_INITIALIZED_TYPE,
+ CONFIG_INITIALIZED_TYPE,
+ CONFIG_UPDATE_VALUE_TYPE,
+ CONFIG_RESET_VALUE_TYPE,
+ CONFIG_TOGGLE_VALUE_TYPE,
+ CONFIG_ADD_UNIQUE_VALUE_TYPE,
+ CONFIG_SUBTRACT_VALUE_TYPE,
+ CONFIG_VALUE_UPDATED_TYPE,
+ POLL_TYPE,
+ INITIAL_TYPE,
+ ADD_LABWARE_TYPE,
+ DELETE_LABWARE_TYPE,
+ OVERWRITE_LABWARE_TYPE,
+ CHANGE_DIRECTORY_TYPE,
+ FETCH_CUSTOM_LABWARE_TYPE,
+ CUSTOM_LABWARE_LIST_TYPE,
+ CUSTOM_LABWARE_LIST_FAILURE_TYPE,
+ CHANGE_CUSTOM_LABWARE_DIRECTORY_TYPE,
+ ADD_CUSTOM_LABWARE_TYPE,
+ ADD_CUSTOM_LABWARE_FILE_TYPE,
+ ADD_CUSTOM_LABWARE_FAILURE_TYPE,
+ CLEAR_ADD_CUSTOM_LABWARE_FAILURE_TYPE,
+ ADD_NEW_LABWARE_NAME_TYPE,
+ CLEAR_NEW_LABWARE_NAME_TYPE,
+ OPEN_CUSTOM_LABWARE_DIRECTORY_TYPE,
+ DELETE_CUSTOM_LABWARE_FILE_TYPE,
+ INVALID_LABWARE_FILE_TYPE,
+ DUPLICATE_LABWARE_FILE_TYPE,
+ OPENTRONS_LABWARE_FILE_TYPE,
+ VALID_LABWARE_FILE_TYPE,
+ OPEN_PYTHON_DIRECTORY_TYPE,
+ CHANGE_PYTHON_PATH_OVERRIDE_TYPE,
+ FETCH_PROTOCOLS_TYPE,
+ UPDATE_PROTOCOL_LIST_TYPE,
+ UPDATE_PROTOCOL_LIST_FAILURE_TYPE,
+ ADD_PROTOCOL_TYPE,
+ REMOVE_PROTOCOL_TYPE,
+ ADD_PROTOCOL_FAILURE_TYPE,
+ CLEAR_ADD_PROTOCOL_FAILURE_TYPE,
+ OPEN_PROTOCOL_DIRECTORY_TYPE,
+ ANALYZE_PROTOCOL_TYPE,
+ ANALYZE_PROTOCOL_SUCCESS_TYPE,
+ ANALYZE_PROTOCOL_FAILURE_TYPE,
+ VIEW_PROTOCOL_SOURCE_FOLDER_TYPE,
+ PROTOCOL_ADDITION_TYPE,
+ OPENTRONS_USB_TYPE,
+ SYSTEM_INFO_INITIALIZED_TYPE,
+ USB_DEVICE_ADDED_TYPE,
+ USB_DEVICE_REMOVED_TYPE,
+ NETWORK_INTERFACES_CHANGED_TYPE,
+ U2E_DRIVER_OUTDATED_MESSAGE_TYPE,
+ U2E_DRIVER_DESCRIPTION_TYPE,
+ U2E_DRIVER_OUTDATED_CTA_TYPE,
+ DISCOVERY_START_TYPE,
+ DISCOVERY_FINISH_TYPE,
+ DISCOVERY_UPDATE_LIST_TYPE,
+ DISCOVERY_REMOVE_TYPE,
+ CLEAR_CACHE_TYPE,
+ USB_HTTP_REQUESTS_START_TYPE,
+ USB_HTTP_REQUESTS_STOP_TYPE,
+ APP_RESTART_TYPE,
+ RELOAD_UI_TYPE,
+ SEND_LOG_TYPE,
+} from './types'
+
+// these constants are all copied over from the app
+
+export const UI_INITIALIZED: UI_INITIALIZED_TYPE = 'shell:UI_INITIALIZED'
+export const CONFIG_INITIALIZED: CONFIG_INITIALIZED_TYPE = 'config:INITIALIZED'
+export const UPDATE_VALUE: CONFIG_UPDATE_VALUE_TYPE = 'config:UPDATE_VALUE'
+export const RESET_VALUE: CONFIG_RESET_VALUE_TYPE = 'config:RESET_VALUE'
+export const TOGGLE_VALUE: CONFIG_TOGGLE_VALUE_TYPE = 'config:TOGGLE_VALUE'
+export const ADD_UNIQUE_VALUE: CONFIG_ADD_UNIQUE_VALUE_TYPE =
+ 'config:ADD_UNIQUE_VALUE'
+export const SUBTRACT_VALUE: CONFIG_SUBTRACT_VALUE_TYPE =
+ 'config:SUBTRACT_VALUE'
+export const VALUE_UPDATED: CONFIG_VALUE_UPDATED_TYPE = 'config:VALUE_UPDATED'
+
+// custom labware
+
+export const FETCH_CUSTOM_LABWARE: FETCH_CUSTOM_LABWARE_TYPE =
+ 'labware:FETCH_CUSTOM_LABWARE'
+
+export const CUSTOM_LABWARE_LIST: CUSTOM_LABWARE_LIST_TYPE =
+ 'labware:CUSTOM_LABWARE_LIST'
+
+export const CUSTOM_LABWARE_LIST_FAILURE: CUSTOM_LABWARE_LIST_FAILURE_TYPE =
+ 'labware:CUSTOM_LABWARE_LIST_FAILURE'
+
+export const CHANGE_CUSTOM_LABWARE_DIRECTORY: CHANGE_CUSTOM_LABWARE_DIRECTORY_TYPE =
+ 'labware:CHANGE_CUSTOM_LABWARE_DIRECTORY'
+
+export const ADD_CUSTOM_LABWARE: ADD_CUSTOM_LABWARE_TYPE =
+ 'labware:ADD_CUSTOM_LABWARE'
+
+export const ADD_CUSTOM_LABWARE_FILE: ADD_CUSTOM_LABWARE_FILE_TYPE =
+ 'labware:ADD_CUSTOM_LABWARE_FILE'
+
+export const ADD_CUSTOM_LABWARE_FAILURE: ADD_CUSTOM_LABWARE_FAILURE_TYPE =
+ 'labware:ADD_CUSTOM_LABWARE_FAILURE'
+
+export const CLEAR_ADD_CUSTOM_LABWARE_FAILURE: CLEAR_ADD_CUSTOM_LABWARE_FAILURE_TYPE =
+ 'labware:CLEAR_ADD_CUSTOM_LABWARE_FAILURE'
+
+export const ADD_NEW_LABWARE_NAME: ADD_NEW_LABWARE_NAME_TYPE =
+ 'labware:ADD_NEW_LABWARE_NAME'
+
+export const CLEAR_NEW_LABWARE_NAME: CLEAR_NEW_LABWARE_NAME_TYPE =
+ 'labware:CLEAR_NEW_LABWARE_NAME'
+
+export const OPEN_CUSTOM_LABWARE_DIRECTORY: OPEN_CUSTOM_LABWARE_DIRECTORY_TYPE =
+ 'labware:OPEN_CUSTOM_LABWARE_DIRECTORY'
+
+export const DELETE_CUSTOM_LABWARE_FILE: DELETE_CUSTOM_LABWARE_FILE_TYPE =
+ 'labware:DELETE_CUSTOM_LABWARE_FILE'
+// action meta literals
+
+export const POLL: POLL_TYPE = 'poll'
+export const INITIAL: INITIAL_TYPE = 'initial'
+export const ADD_LABWARE: ADD_LABWARE_TYPE = 'addLabware'
+export const DELETE_LABWARE: DELETE_LABWARE_TYPE = 'deleteLabware'
+export const OVERWRITE_LABWARE: OVERWRITE_LABWARE_TYPE = 'overwriteLabware'
+export const CHANGE_DIRECTORY: CHANGE_DIRECTORY_TYPE = 'changeDirectory'
+
+// other constants
+
+export const LABWARE_DIRECTORY_CONFIG_PATH = 'labware.directory'
+
+export const INVALID_LABWARE_FILE: INVALID_LABWARE_FILE_TYPE =
+ 'INVALID_LABWARE_FILE'
+
+export const DUPLICATE_LABWARE_FILE: DUPLICATE_LABWARE_FILE_TYPE =
+ 'DUPLICATE_LABWARE_FILE'
+
+export const OPENTRONS_LABWARE_FILE: OPENTRONS_LABWARE_FILE_TYPE =
+ 'OPENTRONS_LABWARE_FILE'
+
+export const VALID_LABWARE_FILE: VALID_LABWARE_FILE_TYPE = 'VALID_LABWARE_FILE'
+
+export const OPEN_PYTHON_DIRECTORY: OPEN_PYTHON_DIRECTORY_TYPE =
+ 'protocol-analysis:OPEN_PYTHON_DIRECTORY'
+
+export const CHANGE_PYTHON_PATH_OVERRIDE: CHANGE_PYTHON_PATH_OVERRIDE_TYPE =
+ 'protocol-analysis:CHANGE_PYTHON_PATH_OVERRIDE'
+
+export const FETCH_PROTOCOLS: FETCH_PROTOCOLS_TYPE =
+ 'protocolStorage:FETCH_PROTOCOLS'
+
+export const UPDATE_PROTOCOL_LIST: UPDATE_PROTOCOL_LIST_TYPE =
+ 'protocolStorage:UPDATE_PROTOCOL_LIST'
+
+export const UPDATE_PROTOCOL_LIST_FAILURE: UPDATE_PROTOCOL_LIST_FAILURE_TYPE =
+ 'protocolStorage:UPDATE_PROTOCOL_LIST_FAILURE'
+
+export const ADD_PROTOCOL: ADD_PROTOCOL_TYPE = 'protocolStorage:ADD_PROTOCOL'
+
+export const REMOVE_PROTOCOL: REMOVE_PROTOCOL_TYPE =
+ 'protocolStorage:REMOVE_PROTOCOL'
+
+export const ADD_PROTOCOL_FAILURE: ADD_PROTOCOL_FAILURE_TYPE =
+ 'protocolStorage:ADD_PROTOCOL_FAILURE'
+
+export const CLEAR_ADD_PROTOCOL_FAILURE: CLEAR_ADD_PROTOCOL_FAILURE_TYPE =
+ 'protocolStorage:CLEAR_ADD_PROTOCOL_FAILURE'
+
+export const OPEN_PROTOCOL_DIRECTORY: OPEN_PROTOCOL_DIRECTORY_TYPE =
+ 'protocolStorage:OPEN_PROTOCOL_DIRECTORY'
+
+export const ANALYZE_PROTOCOL: ANALYZE_PROTOCOL_TYPE =
+ 'protocolStorage:ANALYZE_PROTOCOL'
+
+export const ANALYZE_PROTOCOL_SUCCESS: ANALYZE_PROTOCOL_SUCCESS_TYPE =
+ 'protocolStorage:ANALYZE_PROTOCOL_SUCCESS'
+
+export const ANALYZE_PROTOCOL_FAILURE: ANALYZE_PROTOCOL_FAILURE_TYPE =
+ 'protocolStorage:ANALYZE_PROTOCOL_FAILURE'
+
+export const VIEW_PROTOCOL_SOURCE_FOLDER: VIEW_PROTOCOL_SOURCE_FOLDER_TYPE =
+ 'protocolStorage:VIEW_PROTOCOL_SOURCE_FOLDER'
+
+export const PROTOCOL_ADDITION: PROTOCOL_ADDITION_TYPE = 'protocolAddition'
+
+export const OPENTRONS_USB: OPENTRONS_USB_TYPE = 'opentrons-usb'
+
+export const U2E_DRIVER_UPDATE_URL =
+ 'https://www.realtek.com/en/component/zoo/category/network-interface-controllers-10-100-1000m-gigabit-ethernet-usb-3-0-software'
+
+// driver statuses
+
+export const NOT_APPLICABLE: 'NOT_APPLICABLE' = 'NOT_APPLICABLE'
+export const UNKNOWN: 'UNKNOWN' = 'UNKNOWN'
+export const UP_TO_DATE: 'UP_TO_DATE' = 'UP_TO_DATE'
+export const OUTDATED: 'OUTDATED' = 'OUTDATED'
+
+// action types
+
+export const SYSTEM_INFO_INITIALIZED: SYSTEM_INFO_INITIALIZED_TYPE =
+ 'systemInfo:INITIALIZED'
+
+export const USB_DEVICE_ADDED: USB_DEVICE_ADDED_TYPE =
+ 'systemInfo:USB_DEVICE_ADDED'
+
+export const USB_DEVICE_REMOVED: USB_DEVICE_REMOVED_TYPE =
+ 'systemInfo:USB_DEVICE_REMOVED'
+
+export const NETWORK_INTERFACES_CHANGED: NETWORK_INTERFACES_CHANGED_TYPE =
+ 'systemInfo:NETWORK_INTERFACES_CHANGED'
+
+export const USB_HTTP_REQUESTS_START: USB_HTTP_REQUESTS_START_TYPE =
+ 'shell:USB_HTTP_REQUESTS_START'
+export const USB_HTTP_REQUESTS_STOP: USB_HTTP_REQUESTS_STOP_TYPE =
+ 'shell:USB_HTTP_REQUESTS_STOP'
+export const APP_RESTART: APP_RESTART_TYPE = 'shell:APP_RESTART'
+export const RELOAD_UI: RELOAD_UI_TYPE = 'shell:RELOAD_UI'
+export const SEND_LOG: SEND_LOG_TYPE = 'shell:SEND_LOG'
+
+export const UPDATE_BRIGHTNESS: 'shell:UPDATE_BRIGHTNESS' =
+ 'shell:UPDATE_BRIGHTNESS'
+export const ROBOT_MASS_STORAGE_DEVICE_ADDED: 'shell:ROBOT_MASS_STORAGE_DEVICE_ADDED' =
+ 'shell:ROBOT_MASS_STORAGE_DEVICE_ADDED'
+export const ROBOT_MASS_STORAGE_DEVICE_REMOVED: 'shell:ROBOT_MASS_STORAGE_DEVICE_REMOVED' =
+ 'shell:ROBOT_MASS_STORAGE_DEVICE_REMOVED'
+export const ROBOT_MASS_STORAGE_DEVICE_ENUMERATED: 'shell:ROBOT_MASS_STORAGE_DEVICE_ENUMERATED' =
+ 'shell:ROBOT_MASS_STORAGE_DEVICE_ENUMERATED'
+export const NOTIFY_SUBSCRIBE: 'shell:NOTIFY_SUBSCRIBE' =
+ 'shell:NOTIFY_SUBSCRIBE'
+
+// copy
+// TODO(mc, 2020-05-11): i18n
+export const U2E_DRIVER_OUTDATED_MESSAGE: U2E_DRIVER_OUTDATED_MESSAGE_TYPE =
+ 'There is an updated Realtek USB-to-Ethernet adapter driver available for your computer.'
+export const U2E_DRIVER_DESCRIPTION: U2E_DRIVER_DESCRIPTION_TYPE =
+ 'The OT-2 uses this adapter for its USB connection to the Opentrons App.'
+export const U2E_DRIVER_OUTDATED_CTA: U2E_DRIVER_OUTDATED_CTA_TYPE =
+ "Please update your computer's driver to ensure a reliable connection to your OT-2."
+
+export const DISCOVERY_START: DISCOVERY_START_TYPE = 'discovery:START'
+
+export const DISCOVERY_FINISH: DISCOVERY_FINISH_TYPE = 'discovery:FINISH'
+
+export const DISCOVERY_UPDATE_LIST: DISCOVERY_UPDATE_LIST_TYPE =
+ 'discovery:UPDATE_LIST'
+
+export const DISCOVERY_REMOVE: DISCOVERY_REMOVE_TYPE = 'discovery:REMOVE'
+
+export const CLEAR_CACHE: CLEAR_CACHE_TYPE = 'discovery:CLEAR_CACHE'
+export const HEALTH_STATUS_OK: 'ok' = 'ok'
+export const HEALTH_STATUS_NOT_OK: 'notOk' = 'notOk'
+export const FAILURE_STATUSES = {
+ ECONNREFUSED: 'ECONNREFUSED',
+ ECONNFAILED: 'ECONNFAILED',
+} as const
diff --git a/app-shell/src/dialogs/__tests__/dialogs.test.ts b/app-shell/src/dialogs/__tests__/dialogs.test.ts
index a0f4bfa0333..2406a16d5a8 100644
--- a/app-shell/src/dialogs/__tests__/dialogs.test.ts
+++ b/app-shell/src/dialogs/__tests__/dialogs.test.ts
@@ -1,11 +1,8 @@
import Electron from 'electron'
-
+import { describe, it, vi, expect } from 'vitest'
import * as Dialogs from '..'
-jest.mock('electron')
-
-const mockShowOpenDialog = Electron.dialog
- .showOpenDialog as jest.MockedFunction
+vi.mock('electron')
const mockMainWindow = ({
mainWindow: true,
@@ -14,32 +11,41 @@ const mockMainWindow = ({
describe('dialog boxes', () => {
describe('showOpenDirectoryDialog', () => {
it('directory select with cancel', () => {
- mockShowOpenDialog.mockResolvedValue({ canceled: true, filePaths: [] })
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
+ canceled: true,
+ filePaths: [],
+ })
return Dialogs.showOpenDirectoryDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openDirectory', 'createDirectory'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openDirectory', 'createDirectory'],
+ }
+ )
expect(filePaths).toEqual([])
})
})
it('directory select with files', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/dir'],
})
return Dialogs.showOpenDirectoryDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openDirectory', 'createDirectory'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openDirectory', 'createDirectory'],
+ }
+ )
expect(filePaths).toEqual(['/path/to/dir'])
})
})
it('directory select with default location', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/dir'],
})
@@ -47,10 +53,13 @@ describe('dialog boxes', () => {
return Dialogs.showOpenDirectoryDialog(mockMainWindow, {
defaultPath: '/foo',
}).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openDirectory', 'createDirectory'],
- defaultPath: '/foo',
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openDirectory', 'createDirectory'],
+ defaultPath: '/foo',
+ }
+ )
expect(filePaths).toEqual(['/path/to/dir'])
})
})
@@ -58,32 +67,41 @@ describe('dialog boxes', () => {
describe('showOpenFileDialog', () => {
it('file select with cancel', () => {
- mockShowOpenDialog.mockResolvedValue({ canceled: true, filePaths: [] })
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
+ canceled: true,
+ filePaths: [],
+ })
return Dialogs.showOpenFileDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openFile'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openFile'],
+ }
+ )
expect(filePaths).toEqual([])
})
})
it('file select with files', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/file.json'],
})
return Dialogs.showOpenFileDialog(mockMainWindow).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openFile'],
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openFile'],
+ }
+ )
expect(filePaths).toEqual(['/path/to/file.json'])
})
})
it('file select with filters', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/file.json'],
})
@@ -92,7 +110,9 @@ describe('dialog boxes', () => {
return Dialogs.showOpenFileDialog(mockMainWindow, options).then(
filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
+ expect(
+ vi.mocked(Electron.dialog.showOpenDialog)
+ ).toHaveBeenCalledWith(mockMainWindow, {
properties: ['openFile'],
filters: [{ name: 'JSON', extensions: ['json'] }],
})
@@ -102,7 +122,7 @@ describe('dialog boxes', () => {
})
it('file select with default location', () => {
- mockShowOpenDialog.mockResolvedValue({
+ vi.mocked(Electron.dialog.showOpenDialog).mockResolvedValue({
canceled: false,
filePaths: ['/path/to/file.json'],
})
@@ -110,10 +130,13 @@ describe('dialog boxes', () => {
return Dialogs.showOpenFileDialog(mockMainWindow, {
defaultPath: '/foo',
}).then(filePaths => {
- expect(mockShowOpenDialog).toHaveBeenCalledWith(mockMainWindow, {
- properties: ['openFile'],
- defaultPath: '/foo',
- })
+ expect(vi.mocked(Electron.dialog.showOpenDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ properties: ['openFile'],
+ defaultPath: '/foo',
+ }
+ )
expect(filePaths).toEqual(['/path/to/file.json'])
})
})
diff --git a/app-shell/src/discovery.ts b/app-shell/src/discovery.ts
index ed562fdd069..f7e90bf0fd9 100644
--- a/app-shell/src/discovery.ts
+++ b/app-shell/src/discovery.ts
@@ -24,6 +24,7 @@ import { OPENTRONS_USB } from '@opentrons/app/src/redux/discovery/constants'
import { getFullConfig, handleConfigChange } from './config'
import { createLogger } from './log'
import { getSerialPortHttpAgent } from './usb'
+import { handleNotificationConnectionsFor } from './notifications'
import type {
Address,
@@ -31,7 +32,6 @@ import type {
LegacyService,
DiscoveryClient,
} from '@opentrons/discovery-client'
-
import type { Action, Dispatch } from './types'
import type { ConfigV1 } from '@opentrons/app/src/redux/config/schema-types'
@@ -201,6 +201,7 @@ export function registerDiscovery(
function handleRobots(): void {
const robots = client.getRobots()
+ handleNotificationConnectionsFor(robots)
if (!disableCache) store.set('robots', robots)
diff --git a/app-shell/src/http.ts b/app-shell/src/http.ts
index 02fe50da3e1..8a3a8131ceb 100644
--- a/app-shell/src/http.ts
+++ b/app-shell/src/http.ts
@@ -6,8 +6,6 @@ import pump from 'pump'
import _fetch from 'node-fetch'
import FormData from 'form-data'
-import { HTTP_API_VERSION } from '@opentrons/app/src/redux/robot-api/constants'
-
import type { Request, RequestInit, Response } from 'node-fetch'
type RequestInput = Request | string
@@ -22,7 +20,7 @@ export function fetch(
init?: RequestInit
): Promise {
const opts = init ?? {}
- opts.headers = { ...opts.headers, 'Opentrons-Version': `${HTTP_API_VERSION}` }
+ opts.headers = { ...opts.headers, 'Opentrons-Version': '3' }
return _fetch(input, opts).then(response => {
if (!response.ok) {
diff --git a/app-shell/src/labware/__tests__/definitions.test.ts b/app-shell/src/labware/__tests__/definitions.test.ts
index 697fdc4aabe..a044e40409c 100644
--- a/app-shell/src/labware/__tests__/definitions.test.ts
+++ b/app-shell/src/labware/__tests__/definitions.test.ts
@@ -4,6 +4,7 @@ import path from 'path'
import fs from 'fs-extra'
import tempy from 'tempy'
import Electron from 'electron'
+import { describe, it, expect, afterAll, vi } from 'vitest'
import {
readLabwareDirectory,
@@ -12,11 +13,7 @@ import {
removeLabwareFile,
} from '../definitions'
-jest.mock('electron')
-
-const trashItem = Electron.shell.trashItem as jest.MockedFunction<
- typeof Electron.shell.trashItem
->
+vi.mock('electron')
describe('labware directory utilities', () => {
const tempDirs: string[] = []
@@ -26,7 +23,7 @@ describe('labware directory utilities', () => {
return dir
}
- afterAll(() => {
+ afterAll((): any => {
return Promise.all(tempDirs.map(d => fs.remove(d)))
})
@@ -217,7 +214,7 @@ describe('labware directory utilities', () => {
const dir = makeEmptyDir()
const filename = path.join(dir, 'foo.json')
- trashItem.mockResolvedValue()
+ vi.mocked(Electron.shell.trashItem).mockResolvedValue()
return removeLabwareFile(filename).then(() => {
expect(Electron.shell.trashItem).toHaveBeenCalledWith(filename)
@@ -229,7 +226,9 @@ describe('labware directory utilities', () => {
const filename = path.join(dir, 'foo.json')
const setup = fs.writeJson(filename, { name: 'a' })
- trashItem.mockRejectedValue(Error('something went wrong'))
+ vi.mocked(Electron.shell.trashItem).mockRejectedValue(
+ Error('something went wrong')
+ )
return setup
.then(() => removeLabwareFile(filename))
diff --git a/app-shell/src/labware/__tests__/dispatch.test.ts b/app-shell/src/labware/__tests__/dispatch.test.ts
index f88f271956d..9df83cded8c 100644
--- a/app-shell/src/labware/__tests__/dispatch.test.ts
+++ b/app-shell/src/labware/__tests__/dispatch.test.ts
@@ -1,5 +1,6 @@
import fse from 'fs-extra'
import electron from 'electron'
+import { describe, it, vi, expect, beforeEach, afterEach } from 'vitest'
import * as Cfg from '../../config'
import * as Dialogs from '../../dialogs'
import * as Defs from '../definitions'
@@ -10,57 +11,16 @@ import { uiInitialized } from '@opentrons/app/src/redux/shell/actions'
import * as CustomLabware from '@opentrons/app/src/redux/custom-labware'
import * as CustomLabwareFixtures from '@opentrons/app/src/redux/custom-labware/__fixtures__'
+import type { Mock } from 'vitest'
import type { Config } from '@opentrons/app/src/redux/config/types'
import type { Dispatch } from '../../types'
-jest.mock('fs-extra')
-jest.mock('electron')
-jest.mock('../../config')
-jest.mock('../../dialogs')
-jest.mock('../definitions')
-jest.mock('../validation')
-
-const ensureDir = fse.ensureDir as jest.MockedFunction
-
-const getFullConfig = Cfg.getFullConfig as jest.MockedFunction<
- typeof Cfg.getFullConfig
->
-
-const handleConfigChange = Cfg.handleConfigChange as jest.MockedFunction<
- typeof Cfg.handleConfigChange
->
-
-const showOpenDirectoryDialog = Dialogs.showOpenDirectoryDialog as jest.MockedFunction<
- typeof Dialogs.showOpenDirectoryDialog
->
-
-const showOpenFileDialog = Dialogs.showOpenFileDialog as jest.MockedFunction<
- typeof Dialogs.showOpenFileDialog
->
-
-const readLabwareDirectory = Defs.readLabwareDirectory as jest.MockedFunction<
- typeof Defs.readLabwareDirectory
->
-
-const parseLabwareFiles = Defs.parseLabwareFiles as jest.MockedFunction<
- typeof Defs.parseLabwareFiles
->
-
-const addLabwareFile = Defs.addLabwareFile as jest.MockedFunction<
- typeof Defs.addLabwareFile
->
-
-const removeLabwareFile = Defs.removeLabwareFile as jest.MockedFunction<
- typeof Defs.removeLabwareFile
->
-
-const validateLabwareFiles = Val.validateLabwareFiles as jest.MockedFunction<
- typeof Val.validateLabwareFiles
->
-
-const validateNewLabwareFile = Val.validateNewLabwareFile as jest.MockedFunction<
- typeof Val.validateNewLabwareFile
->
+vi.mock('fs-extra')
+vi.mock('electron')
+vi.mock('../../config')
+vi.mock('../../dialogs')
+vi.mock('../definitions')
+vi.mock('../validation')
// wait a few ticks to let the mock Promises clear
const flush = (): Promise =>
@@ -71,41 +31,43 @@ describe('labware module dispatches', () => {
const mockMainWindow = ({
browserWindow: true,
} as unknown) as electron.BrowserWindow
- let dispatch: jest.MockedFunction
+ let dispatch: Mock
let handleAction: Dispatch
beforeEach(() => {
- getFullConfig.mockReturnValue({
+ vi.mocked(Cfg.getFullConfig).mockReturnValue({
labware: { directory: labwareDir },
} as Config)
- ensureDir.mockResolvedValue(undefined as never)
- addLabwareFile.mockResolvedValue()
- removeLabwareFile.mockResolvedValue()
- readLabwareDirectory.mockResolvedValue([])
- parseLabwareFiles.mockResolvedValue([])
- validateLabwareFiles.mockReturnValue([])
+ vi.mocked(fse.ensureDir).mockResolvedValue(undefined as never)
+ vi.mocked(Defs.addLabwareFile).mockResolvedValue()
+ vi.mocked(Defs.removeLabwareFile).mockResolvedValue()
+ vi.mocked(Defs.readLabwareDirectory).mockResolvedValue([])
+ vi.mocked(Defs.parseLabwareFiles).mockResolvedValue([])
+ vi.mocked(Val.validateLabwareFiles).mockReturnValue([])
- showOpenDirectoryDialog.mockResolvedValue([])
- showOpenFileDialog.mockResolvedValue([])
+ vi.mocked(Dialogs.showOpenDirectoryDialog).mockResolvedValue([])
+ vi.mocked(Dialogs.showOpenFileDialog).mockResolvedValue([])
- dispatch = jest.fn()
+ dispatch = vi.fn()
handleAction = registerLabware(dispatch, mockMainWindow)
})
afterEach(() => {
- jest.resetAllMocks()
+ vi.resetAllMocks()
})
it('ensures labware directory exists on FETCH_CUSTOM_LABWARE', () => {
handleAction(CustomLabware.fetchCustomLabware())
- expect(ensureDir).toHaveBeenCalledWith(labwareDir)
+ expect(vi.mocked(fse.ensureDir)).toHaveBeenCalledWith(labwareDir)
})
it('reads labware directory on FETCH_CUSTOM_LABWARE', () => {
handleAction(CustomLabware.fetchCustomLabware())
return flush().then(() =>
- expect(readLabwareDirectory).toHaveBeenCalledWith(labwareDir)
+ expect(vi.mocked(Defs.readLabwareDirectory)).toHaveBeenCalledWith(
+ labwareDir
+ )
)
})
@@ -113,7 +75,9 @@ describe('labware module dispatches', () => {
handleAction(uiInitialized())
return flush().then(() =>
- expect(readLabwareDirectory).toHaveBeenCalledWith(labwareDir)
+ expect(vi.mocked(Defs.readLabwareDirectory)).toHaveBeenCalledWith(
+ labwareDir
+ )
)
})
@@ -126,14 +90,20 @@ describe('labware module dispatches', () => {
{ filename: 'd.json', modified: 3, data: {} },
]
- readLabwareDirectory.mockResolvedValueOnce(mockDirectoryListing)
- parseLabwareFiles.mockResolvedValueOnce(mockParsedFiles)
+ vi.mocked(Defs.readLabwareDirectory).mockResolvedValueOnce(
+ mockDirectoryListing
+ )
+ vi.mocked(Defs.parseLabwareFiles).mockResolvedValueOnce(mockParsedFiles)
handleAction(CustomLabware.fetchCustomLabware())
return flush().then(() => {
- expect(parseLabwareFiles).toHaveBeenCalledWith(mockDirectoryListing)
- expect(validateLabwareFiles).toHaveBeenCalledWith(mockParsedFiles)
+ expect(vi.mocked(Defs.parseLabwareFiles)).toHaveBeenCalledWith(
+ mockDirectoryListing
+ )
+ expect(vi.mocked(Val.validateLabwareFiles)).toHaveBeenCalledWith(
+ mockParsedFiles
+ )
})
})
@@ -144,7 +114,7 @@ describe('labware module dispatches', () => {
CustomLabwareFixtures.mockValidLabware,
]
- validateLabwareFiles.mockReturnValueOnce(mockValidatedFiles)
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce(mockValidatedFiles)
handleAction(CustomLabware.fetchCustomLabware())
@@ -156,7 +126,7 @@ describe('labware module dispatches', () => {
})
it('dispatches CUSTOM_LABWARE_LIST_FAILURE if read fails', () => {
- readLabwareDirectory.mockRejectedValue(new Error('AH'))
+ vi.mocked(Defs.readLabwareDirectory).mockRejectedValue(new Error('AH'))
handleAction(CustomLabware.fetchCustomLabware())
@@ -171,15 +141,20 @@ describe('labware module dispatches', () => {
handleAction(CustomLabware.changeCustomLabwareDirectory())
return flush().then(() => {
- expect(showOpenDirectoryDialog).toHaveBeenCalledWith(mockMainWindow, {
- defaultPath: labwareDir,
- })
+ expect(vi.mocked(Dialogs.showOpenDirectoryDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ defaultPath: labwareDir,
+ }
+ )
expect(dispatch).not.toHaveBeenCalled()
})
})
it('dispatches config:UPDATE on labware dir selection', () => {
- showOpenDirectoryDialog.mockResolvedValue(['/path/to/labware'])
+ vi.mocked(Dialogs.showOpenDirectoryDialog).mockResolvedValue([
+ '/path/to/labware',
+ ])
handleAction(CustomLabware.changeCustomLabwareDirectory())
@@ -193,16 +168,18 @@ describe('labware module dispatches', () => {
})
it('reads labware directory on config change', () => {
- expect(handleConfigChange).toHaveBeenCalledWith(
+ expect(vi.mocked(Cfg.handleConfigChange)).toHaveBeenCalledWith(
'labware.directory',
expect.any(Function)
)
- const changeHandler = handleConfigChange.mock.calls[0][1]
+ const changeHandler = vi.mocked(Cfg.handleConfigChange).mock.calls[0][1]
changeHandler('old', 'new')
return flush().then(() => {
- expect(readLabwareDirectory).toHaveBeenCalledWith(labwareDir)
+ expect(vi.mocked(Defs.readLabwareDirectory)).toHaveBeenCalledWith(
+ labwareDir
+ )
expect(dispatch).toHaveBeenCalledWith(
CustomLabware.customLabwareList([], 'changeDirectory')
)
@@ -210,13 +187,15 @@ describe('labware module dispatches', () => {
})
it('dispatches labware directory list error on config change', () => {
- const changeHandler = handleConfigChange.mock.calls[0][1]
+ const changeHandler = vi.mocked(Cfg.handleConfigChange).mock.calls[0][1]
- readLabwareDirectory.mockRejectedValue(new Error('AH'))
+ vi.mocked(Defs.readLabwareDirectory).mockRejectedValue(new Error('AH'))
changeHandler('old', 'new')
return flush().then(() => {
- expect(readLabwareDirectory).toHaveBeenCalledWith(labwareDir)
+ expect(vi.mocked(Defs.readLabwareDirectory)).toHaveBeenCalledWith(
+ labwareDir
+ )
expect(dispatch).toHaveBeenCalledWith(
CustomLabware.customLabwareListFailure('AH', 'changeDirectory')
)
@@ -227,16 +206,19 @@ describe('labware module dispatches', () => {
handleAction(CustomLabware.addCustomLabware())
return flush().then(() => {
- expect(showOpenFileDialog).toHaveBeenCalledWith(mockMainWindow, {
- defaultPath: '__mock-app-path__',
- filters: [
- {
- name: 'JSON Labware Definitions',
- extensions: ['json'],
- },
- ],
- properties: ['multiSelections'],
- })
+ expect(vi.mocked(Dialogs.showOpenFileDialog)).toHaveBeenCalledWith(
+ mockMainWindow,
+ {
+ defaultPath: '__mock-app-path__',
+ filters: [
+ {
+ name: 'JSON Labware Definitions',
+ extensions: ['json'],
+ },
+ ],
+ properties: ['multiSelections'],
+ }
+ )
expect(dispatch).not.toHaveBeenCalled()
})
})
@@ -250,20 +232,24 @@ describe('labware module dispatches', () => {
data: {},
}
- showOpenFileDialog.mockResolvedValue(['/path/to/labware.json'])
+ vi.mocked(Dialogs.showOpenFileDialog).mockResolvedValue([
+ '/path/to/labware.json',
+ ])
// validation of existing definitions
- validateLabwareFiles.mockReturnValueOnce(mockValidatedFiles)
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce(mockValidatedFiles)
// existing files mock return
- parseLabwareFiles.mockResolvedValue([])
+ vi.mocked(Defs.parseLabwareFiles).mockResolvedValue([])
// new file mock return
- parseLabwareFiles.mockResolvedValue([mockNewUncheckedFile])
+ vi.mocked(Defs.parseLabwareFiles).mockResolvedValue([mockNewUncheckedFile])
// new file (not needed for this test except to prevent a type error)
- validateNewLabwareFile.mockReturnValueOnce(mockValidatedFiles[0])
+ vi.mocked(Val.validateNewLabwareFile).mockReturnValueOnce(
+ mockValidatedFiles[0]
+ )
handleAction(CustomLabware.addCustomLabware())
return flush().then(() => {
- expect(validateNewLabwareFile).toHaveBeenCalledWith(
+ expect(vi.mocked(Val.validateNewLabwareFile)).toHaveBeenCalledWith(
mockValidatedFiles,
mockNewUncheckedFile
)
@@ -276,8 +262,8 @@ describe('labware module dispatches', () => {
mockInvalidFile
)
- showOpenFileDialog.mockResolvedValue(['c.json'])
- validateNewLabwareFile.mockReturnValueOnce(mockInvalidFile)
+ vi.mocked(Dialogs.showOpenFileDialog).mockResolvedValue(['c.json'])
+ vi.mocked(Val.validateNewLabwareFile).mockReturnValueOnce(mockInvalidFile)
handleAction(CustomLabware.addCustomLabware())
@@ -293,18 +279,20 @@ describe('labware module dispatches', () => {
'addLabware'
)
- showOpenFileDialog.mockResolvedValue([mockValidFile.filename])
- validateNewLabwareFile.mockReturnValueOnce(mockValidFile)
+ vi.mocked(Dialogs.showOpenFileDialog).mockResolvedValue([
+ mockValidFile.filename,
+ ])
+ vi.mocked(Val.validateNewLabwareFile).mockReturnValueOnce(mockValidFile)
// initial read
- validateLabwareFiles.mockReturnValueOnce([])
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce([])
// read after add
- validateLabwareFiles.mockReturnValueOnce([mockValidFile])
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce([mockValidFile])
handleAction(CustomLabware.addCustomLabware())
return flush().then(() => {
- expect(addLabwareFile).toHaveBeenCalledWith(
+ expect(vi.mocked(Defs.addLabwareFile)).toHaveBeenCalledWith(
mockValidFile.filename,
labwareDir
)
@@ -316,10 +304,10 @@ describe('labware module dispatches', () => {
const mockValidFile = CustomLabwareFixtures.mockValidLabware
const expectedAction = CustomLabware.addCustomLabwareFailure(null, 'AH')
- showOpenFileDialog.mockResolvedValue(['a.json'])
- validateNewLabwareFile.mockReturnValueOnce(mockValidFile)
- validateLabwareFiles.mockReturnValueOnce([])
- addLabwareFile.mockRejectedValue(new Error('AH'))
+ vi.mocked(Dialogs.showOpenFileDialog).mockResolvedValue(['a.json'])
+ vi.mocked(Val.validateNewLabwareFile).mockReturnValueOnce(mockValidFile)
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce([])
+ vi.mocked(Defs.addLabwareFile).mockRejectedValue(new Error('AH'))
handleAction(CustomLabware.addCustomLabware())
@@ -341,16 +329,20 @@ describe('labware module dispatches', () => {
)
// validation of existing definitions
- validateLabwareFiles.mockReturnValueOnce(mockExisting)
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce(mockExisting)
// validation after deletes
- validateLabwareFiles.mockReturnValueOnce(mockAfterDeletes)
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce(mockAfterDeletes)
handleAction(CustomLabware.addCustomLabware(duplicate))
return flush().then(() => {
- expect(removeLabwareFile).toHaveBeenCalledWith('/duplicate1.json')
- expect(removeLabwareFile).toHaveBeenCalledWith('/duplicate2.json')
- expect(addLabwareFile).toHaveBeenCalledWith(
+ expect(vi.mocked(Defs.removeLabwareFile)).toHaveBeenCalledWith(
+ '/duplicate1.json'
+ )
+ expect(vi.mocked(Defs.removeLabwareFile)).toHaveBeenCalledWith(
+ '/duplicate2.json'
+ )
+ expect(vi.mocked(Defs.addLabwareFile)).toHaveBeenCalledWith(
duplicate.filename,
labwareDir
)
@@ -366,8 +358,8 @@ describe('labware module dispatches', () => {
]
const expectedAction = CustomLabware.addCustomLabwareFailure(null, 'AH')
- validateLabwareFiles.mockReturnValueOnce(mockExisting)
- removeLabwareFile.mockRejectedValue(new Error('AH'))
+ vi.mocked(Val.validateLabwareFiles).mockReturnValueOnce(mockExisting)
+ vi.mocked(Defs.removeLabwareFile).mockRejectedValue(new Error('AH'))
handleAction(CustomLabware.addCustomLabware(duplicate))
diff --git a/app-shell/src/labware/__tests__/validation.test.ts b/app-shell/src/labware/__tests__/validation.test.ts
index de21b4e887b..68359deaeb4 100644
--- a/app-shell/src/labware/__tests__/validation.test.ts
+++ b/app-shell/src/labware/__tests__/validation.test.ts
@@ -1,10 +1,12 @@
+import { describe, it, expect } from 'vitest'
import { validateLabwareFiles, validateNewLabwareFile } from '../validation'
-import uncheckedLabwareA from '@opentrons/shared-data/labware/fixtures/2/fixture_96_plate.json'
-import uncheckedLabwareB from '@opentrons/shared-data/labware/fixtures/2/fixture_12_trough.json'
+import {
+ fixture96Plate as uncheckedLabwareA,
+ fixture12Trough as uncheckedLabwareB,
+} from '@opentrons/shared-data'
import type { CheckedLabwareFile } from '@opentrons/app/src/redux/custom-labware/types'
-
import type { LabwareDefinition2 } from '@opentrons/shared-data'
const validLabwareA = uncheckedLabwareA as LabwareDefinition2
diff --git a/app-shell/src/labware/compare.ts b/app-shell/src/labware/compare.ts
index aa1603e5415..41df216b467 100644
--- a/app-shell/src/labware/compare.ts
+++ b/app-shell/src/labware/compare.ts
@@ -1,5 +1,3 @@
-// import type { CheckedLabwareFile } from '@opentrons/app/src/redux/custom-labware/types'
-
// TODO(bc, 2021-02-22): this function needs to be rewritten to satisfy how TS prefers to
// consume the `CheckedLabwareFile` union type. revisit once `app/src` is all in TS
diff --git a/app-shell/src/labware/index.ts b/app-shell/src/labware/index.ts
index f46f9134527..e5bc4a30846 100644
--- a/app-shell/src/labware/index.ts
+++ b/app-shell/src/labware/index.ts
@@ -2,14 +2,27 @@ import fse from 'fs-extra'
import { app, shell } from 'electron'
import { getFullConfig, handleConfigChange } from '../config'
import { showOpenDirectoryDialog, showOpenFileDialog } from '../dialogs'
+import {
+ ADD_CUSTOM_LABWARE,
+ ADD_CUSTOM_LABWARE_FILE,
+ ADD_LABWARE,
+ CHANGE_CUSTOM_LABWARE_DIRECTORY,
+ CHANGE_DIRECTORY,
+ DELETE_CUSTOM_LABWARE_FILE,
+ DELETE_LABWARE,
+ FETCH_CUSTOM_LABWARE,
+ INITIAL,
+ LABWARE_DIRECTORY_CONFIG_PATH,
+ OPEN_CUSTOM_LABWARE_DIRECTORY,
+ OVERWRITE_LABWARE,
+ POLL,
+ UI_INITIALIZED,
+ VALID_LABWARE_FILE,
+} from '../constants'
import * as Definitions from './definitions'
import { validateLabwareFiles, validateNewLabwareFile } from './validation'
import { sameIdentity } from './compare'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
-import * as CustomLabware from '@opentrons/app/src/redux/custom-labware'
-import * as ConfigActions from '@opentrons/app/src/redux/config'
-
import type {
UncheckedLabwareFile,
DuplicateLabwareFile,
@@ -19,6 +32,13 @@ import type {
import type { BrowserWindow } from 'electron'
import type { Action, Dispatch } from '../types'
+import {
+ addCustomLabwareFailure,
+ addNewLabwareName,
+ customLabwareList,
+ customLabwareListFailure,
+ updateConfigValue,
+} from '../config/actions'
const ensureDir: (dir: string) => Promise = fse.ensureDir
@@ -40,10 +60,10 @@ const fetchAndValidateCustomLabware = (
): Promise => {
return fetchValidatedCustomLabware()
.then(payload => {
- dispatch(CustomLabware.customLabwareList(payload, source))
+ dispatch(customLabwareList(payload, source))
})
.catch((error: Error) => {
- dispatch(CustomLabware.customLabwareListFailure(error.message, source))
+ dispatch(customLabwareListFailure(error.message, source))
})
}
@@ -65,9 +85,7 @@ const overwriteLabware = (
const dir = getFullConfig().labware.directory
return Definitions.addLabwareFile(next.filename, dir)
})
- .then(() =>
- fetchAndValidateCustomLabware(dispatch, CustomLabware.OVERWRITE_LABWARE)
- )
+ .then(() => fetchAndValidateCustomLabware(dispatch, OVERWRITE_LABWARE))
}
const copyLabware = (
@@ -82,27 +100,25 @@ const copyLabware = (
const next = validateNewLabwareFile(existing, newFile)
const dir = getFullConfig().labware.directory
- if (next.type !== CustomLabware.VALID_LABWARE_FILE) {
- return dispatch(CustomLabware.addCustomLabwareFailure(next))
+ if (next.type !== VALID_LABWARE_FILE) {
+ return dispatch(addCustomLabwareFailure(next))
}
return Definitions.addLabwareFile(next.filename, dir)
- .then(() =>
- fetchAndValidateCustomLabware(dispatch, CustomLabware.ADD_LABWARE)
- )
- .then(() => dispatch(CustomLabware.addNewLabwareName(newFile.filename)))
+ .then(() => fetchAndValidateCustomLabware(dispatch, ADD_LABWARE))
+ .then(() => dispatch(addNewLabwareName(newFile.filename)))
})
}
const deleteLabware = (dispatch: Dispatch, filePath: string): Promise => {
return Definitions.removeLabwareFile(filePath).then(() =>
- fetchAndValidateCustomLabware(dispatch, CustomLabware.DELETE_LABWARE)
+ fetchAndValidateCustomLabware(dispatch, DELETE_LABWARE)
)
}
export function getValidLabwareFilePaths(): Promise {
return fetchValidatedCustomLabware().then(validatedLabware => {
return validatedLabware
- .filter(labware => labware.type === CustomLabware.VALID_LABWARE_FILE)
+ .filter(labware => labware.type === VALID_LABWARE_FILE)
.map(labware => labware.filename)
})
}
@@ -111,25 +127,22 @@ export function registerLabware(
dispatch: Dispatch,
mainWindow: BrowserWindow
): Dispatch {
- handleConfigChange(CustomLabware.LABWARE_DIRECTORY_CONFIG_PATH, () => {
+ handleConfigChange(LABWARE_DIRECTORY_CONFIG_PATH, () => {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
- fetchAndValidateCustomLabware(dispatch, CustomLabware.CHANGE_DIRECTORY)
+ fetchAndValidateCustomLabware(dispatch, CHANGE_DIRECTORY)
})
return function handleActionForLabware(action: Action) {
switch (action.type) {
- case CustomLabware.FETCH_CUSTOM_LABWARE:
+ case FETCH_CUSTOM_LABWARE:
case UI_INITIALIZED: {
- const source =
- action.type === CustomLabware.FETCH_CUSTOM_LABWARE
- ? CustomLabware.POLL
- : CustomLabware.INITIAL
+ const source = action.type === FETCH_CUSTOM_LABWARE ? POLL : INITIAL
// eslint-disable-next-line @typescript-eslint/no-floating-promises
fetchAndValidateCustomLabware(dispatch, source)
break
}
- case CustomLabware.CHANGE_CUSTOM_LABWARE_DIRECTORY: {
+ case CHANGE_CUSTOM_LABWARE_DIRECTORY: {
const { labware: config } = getFullConfig()
const dialogOptions = { defaultPath: config.directory }
@@ -137,13 +150,13 @@ export function registerLabware(
showOpenDirectoryDialog(mainWindow, dialogOptions).then(filePaths => {
if (filePaths.length > 0) {
const dir = filePaths[0]
- dispatch(ConfigActions.updateConfigValue('labware.directory', dir))
+ dispatch(updateConfigValue('labware.directory', dir))
}
})
break
}
- case CustomLabware.ADD_CUSTOM_LABWARE: {
+ case ADD_CUSTOM_LABWARE: {
let addLabwareTask
// eslint-disable-next-line @typescript-eslint/strict-boolean-expressions
@@ -171,21 +184,21 @@ export function registerLabware(
}
addLabwareTask.catch((error: Error) => {
- dispatch(CustomLabware.addCustomLabwareFailure(null, error.message))
+ dispatch(addCustomLabwareFailure(null, error.message))
})
break
}
- case CustomLabware.ADD_CUSTOM_LABWARE_FILE: {
+ case ADD_CUSTOM_LABWARE_FILE: {
const filePath = action.payload.filePath
copyLabware(dispatch, [filePath]).catch((error: Error) => {
- dispatch(CustomLabware.addCustomLabwareFailure(null, error.message))
+ dispatch(addCustomLabwareFailure(null, error.message))
})
break
}
- case CustomLabware.DELETE_CUSTOM_LABWARE_FILE: {
+ case DELETE_CUSTOM_LABWARE_FILE: {
const filePath = action.payload.filePath
deleteLabware(dispatch, filePath).catch((error: Error) => {
console.error(error)
@@ -193,7 +206,7 @@ export function registerLabware(
break
}
- case CustomLabware.OPEN_CUSTOM_LABWARE_DIRECTORY: {
+ case OPEN_CUSTOM_LABWARE_DIRECTORY: {
const dir = getFullConfig().labware.directory
shell.openPath(dir)
break
diff --git a/app-shell/src/labware/validation.ts b/app-shell/src/labware/validation.ts
index 7ad1ee788ff..c46a93ae598 100644
--- a/app-shell/src/labware/validation.ts
+++ b/app-shell/src/labware/validation.ts
@@ -1,20 +1,19 @@
import Ajv from 'ajv'
import sortBy from 'lodash/sortBy'
-import labwareSchema from '@opentrons/shared-data/labware/schemas/2.json'
+import { labwareSchemaV2 as labwareSchema } from '@opentrons/shared-data'
import { sameIdentity } from './compare'
-import {
- INVALID_LABWARE_FILE,
- DUPLICATE_LABWARE_FILE,
- OPENTRONS_LABWARE_FILE,
- VALID_LABWARE_FILE,
-} from '@opentrons/app/src/redux/custom-labware/selectors'
-
import type { LabwareDefinition2 } from '@opentrons/shared-data'
import type {
UncheckedLabwareFile,
CheckedLabwareFile,
} from '@opentrons/app/src/redux/custom-labware/types'
+import {
+ DUPLICATE_LABWARE_FILE,
+ INVALID_LABWARE_FILE,
+ OPENTRONS_LABWARE_FILE,
+ VALID_LABWARE_FILE,
+} from '../constants'
const ajv = new Ajv()
const validateDefinition = ajv.compile(labwareSchema)
diff --git a/app-shell/src/main.ts b/app-shell/src/main.ts
index b1ef492b949..1f44b0607b9 100644
--- a/app-shell/src/main.ts
+++ b/app-shell/src/main.ts
@@ -1,8 +1,11 @@
// electron main entry point
import { app, ipcMain } from 'electron'
+import electronDebug from 'electron-debug'
+import dns from 'dns'
import contextMenu from 'electron-context-menu'
+import * as electronDevtoolsInstaller from 'electron-devtools-installer'
-import { createUi } from './ui'
+import { createUi, registerReloadUi } from './ui'
import { initializeMenu } from './menu'
import { createLogger } from './log'
import { registerProtocolAnalysis } from './protocol-analysis'
@@ -14,10 +17,18 @@ import { registerSystemInfo } from './system-info'
import { registerProtocolStorage } from './protocol-storage'
import { getConfig, getStore, getOverrides, registerConfig } from './config'
import { registerUsb } from './usb'
+import { registerNotify, closeAllNotifyConnections } from './notifications'
import type { BrowserWindow } from 'electron'
import type { Dispatch, Logger } from './types'
+/**
+ * node 17 introduced a change to default IP resolving to prefer IPv6 which causes localhost requests to fail
+ * setting the default to IPv4 fixes the issue
+ * https://github.com/node-fetch/node-fetch/issues/1624
+ */
+dns.setDefaultResultOrder('ipv4first')
+
const config = getConfig()
const log = createLogger('main')
@@ -29,7 +40,7 @@ log.debug('App config', {
if (config.devtools) {
// eslint-disable-next-line @typescript-eslint/no-var-requires
- require('electron-debug')({ isEnabled: true, showDevTools: true })
+ electronDebug({ isEnabled: true, showDevTools: true })
}
// hold on to references so they don't get garbage collected
@@ -45,6 +56,14 @@ if (config.devtools) app.once('ready', installDevtools)
app.once('window-all-closed', () => {
log.debug('all windows closed, quitting the app')
app.quit()
+ closeAllNotifyConnections()
+ .then(() => {
+ app.quit()
+ })
+ .catch(error => {
+ log.warn('Failed to properly close MQTT connections:', error)
+ app.quit()
+ })
})
function startUp(): void {
@@ -88,6 +107,8 @@ function startUp(): void {
registerSystemInfo(dispatch),
registerProtocolStorage(dispatch),
registerUsb(dispatch),
+ registerNotify(dispatch, mainWindow),
+ registerReloadUi(mainWindow),
]
ipcMain.on('dispatch', (_, action) => {
@@ -107,21 +128,32 @@ function createRendererLogger(): Logger {
return logger
}
-function installDevtools(): Promise {
- // eslint-disable-next-line @typescript-eslint/no-var-requires
- const devtools = require('electron-devtools-installer')
- const extensions = [devtools.REACT_DEVELOPER_TOOLS, devtools.REDUX_DEVTOOLS]
- const install = devtools.default
+function installDevtools(): Promise {
+ const extensions = [
+ electronDevtoolsInstaller.REACT_DEVELOPER_TOOLS,
+ electronDevtoolsInstaller.REDUX_DEVTOOLS,
+ ]
+ // @ts-expect-error the types for electron-devtools-installer are not correct
+ // when importing the default export via commmon JS. the installer is actually nested in
+ // another default object
+ const install = electronDevtoolsInstaller.default?.default
const forceReinstall = config.reinstallDevtools
log.debug('Installing devtools')
- return install(extensions, forceReinstall)
- .then(() => log.debug('Devtools extensions installed'))
- .catch((error: unknown) => {
- log.warn('Failed to install devtools extensions', {
- forceReinstall,
- error,
+ if (typeof install === 'function') {
+ return install(extensions, forceReinstall)
+ .then(() => log.debug('Devtools extensions installed'))
+ .catch((error: unknown) => {
+ log.warn('Failed to install devtools extensions', {
+ forceReinstall,
+ error,
+ })
})
- })
+ } else {
+ log.warn('could not resolve electron dev tools installer')
+ return Promise.reject(
+ new Error('could not resolve electron dev tools installer')
+ )
+ }
}
diff --git a/app-shell/src/menu.ts b/app-shell/src/menu.ts
index 90fc91943d8..52f04978934 100644
--- a/app-shell/src/menu.ts
+++ b/app-shell/src/menu.ts
@@ -1,10 +1,13 @@
/* eslint-disable @typescript-eslint/no-var-requires */
// application menu
-import { Menu } from 'electron'
+import { Menu, shell } from 'electron'
import type { MenuItemConstructorOptions } from 'electron'
import { LOG_DIR } from './log'
+const PRODUCT_NAME: string = _PKG_PRODUCT_NAME_
+const BUGS_URL: string = _PKG_BUGS_URL_
+
// file or application menu
const firstMenu: MenuItemConstructorOptions = {
role: process.platform === 'darwin' ? 'appMenu' : 'fileMenu',
@@ -23,20 +26,20 @@ const helpMenu: MenuItemConstructorOptions = {
label: 'Learn More',
click: () => {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
- require('electron').shell.openExternal('https://opentrons.com/')
+ shell.openExternal('https://opentrons.com/')
},
},
{
- label: `View ${_PKG_PRODUCT_NAME_} App Logs`,
+ label: `View ${PRODUCT_NAME} App Logs`,
click: () => {
- require('electron').shell.openPath(LOG_DIR)
+ shell.openPath(LOG_DIR)
},
},
{
label: 'Report an Issue',
click: () => {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
- require('electron').shell.openExternal(_PKG_BUGS_URL_)
+ shell.openExternal(BUGS_URL)
},
},
],
diff --git a/app-shell/src/notifications/__tests__/connect.test.ts b/app-shell/src/notifications/__tests__/connect.test.ts
new file mode 100644
index 00000000000..12c41464353
--- /dev/null
+++ b/app-shell/src/notifications/__tests__/connect.test.ts
@@ -0,0 +1,115 @@
+import { vi, describe, expect, it } from 'vitest'
+
+import {
+ getHealthyRobotDataForNotifyConnections,
+ cleanUpUnreachableRobots,
+ establishConnections,
+ closeConnectionsForcefullyFor,
+} from '../connect'
+import { connectionStore } from '../store'
+import { FAILURE_STATUSES } from '../../constants'
+import {
+ MOCK_DISCOVERY_ROBOTS,
+ MOCK_HEALTHY_ROBOTS,
+ MOCK_STORE_ROBOTS,
+} from '../../__fixtures__'
+
+vi.mock('electron-store')
+vi.mock('../notifyLog', () => {
+ return {
+ createLogger: () => {
+ return { debug: () => null }
+ },
+ notifyLog: { debug: vi.fn(), warn: vi.fn() },
+ }
+})
+
+describe('getHealthyRobotDataForNotifyConnections', () => {
+ it('should filter a list of discovery robots, only returning robots that have a health status of ok', () => {
+ const healthyRobots = getHealthyRobotDataForNotifyConnections(
+ MOCK_DISCOVERY_ROBOTS
+ )
+ expect(healthyRobots).toEqual(MOCK_HEALTHY_ROBOTS)
+ })
+})
+
+describe('cleanUpUnreachableRobots', () => {
+ it('should close connections forcefully for unreachable robots and resolve them', async () => {
+ MOCK_STORE_ROBOTS.forEach(robot => {
+ void connectionStore
+ .setPendingConnection(robot.robotName)
+ .then(() =>
+ connectionStore.setConnected(robot.robotName, vi.fn() as any)
+ )
+ })
+ const unreachableRobots = await cleanUpUnreachableRobots(
+ MOCK_HEALTHY_ROBOTS
+ )
+ expect(unreachableRobots).toEqual(['opentrons-dev3'])
+ })
+})
+
+describe('establishConnections', () => {
+ it('should not resolve any new connections if all reported robots are already in the connection store and connected', async () => {
+ connectionStore.clearStore()
+ MOCK_STORE_ROBOTS.forEach(robot => {
+ void connectionStore
+ .setPendingConnection(robot.robotName)
+ .then(() =>
+ connectionStore.setConnected(robot.robotName, vi.fn() as any)
+ )
+ })
+
+ const newRobots = await establishConnections(MOCK_HEALTHY_ROBOTS)
+ expect(newRobots).toEqual([])
+ })
+
+ it('should not attempt to connect to a robot if it a known notification port blocked robot', async () => {
+ await connectionStore.setErrorStatus(
+ '10.14.19.51',
+ FAILURE_STATUSES.ECONNREFUSED
+ )
+ connectionStore.clearStore()
+
+ const newRobots = await establishConnections(MOCK_HEALTHY_ROBOTS)
+ expect(newRobots).toEqual([
+ { ip: '10.14.19.50', robotName: 'opentrons-dev' },
+ { ip: '10.14.19.53', robotName: 'opentrons-dev4' },
+ ])
+ })
+
+ it('should not report a robot as new if it is connecting', async () => {
+ connectionStore.clearStore()
+ MOCK_STORE_ROBOTS.forEach(robot => {
+ void connectionStore.setPendingConnection(robot.robotName)
+ })
+
+ const newRobots = await establishConnections(MOCK_HEALTHY_ROBOTS)
+ expect(newRobots).toEqual([])
+ })
+
+ it('should create a new entry in the connection store for a new robot', async () => {
+ connectionStore.clearStore()
+ await establishConnections(MOCK_HEALTHY_ROBOTS)
+ console.log(connectionStore)
+ expect(connectionStore.getRobotNameByIP('10.14.19.50')).not.toBeNull()
+ })
+})
+
+describe('closeConnectionsForcefullyFor', () => {
+ it('should return an array of promises for each closing connection and resolve after closing connections', async () => {
+ connectionStore.clearStore()
+ MOCK_STORE_ROBOTS.forEach(robot => {
+ void connectionStore
+ .setPendingConnection(robot.robotName)
+ .then(() =>
+ connectionStore.setConnected(robot.robotName, vi.fn() as any)
+ )
+ })
+ const closingRobots = closeConnectionsForcefullyFor([
+ 'opentrons-dev',
+ 'opentrons-dev2',
+ ])
+ closingRobots.forEach(robot => expect(robot).toBeInstanceOf(Promise))
+ })
+})
diff --git a/app-shell/src/notifications/__tests__/deserialize.test.ts b/app-shell/src/notifications/__tests__/deserialize.test.ts
new file mode 100644
index 00000000000..ca9bab984fb
--- /dev/null
+++ b/app-shell/src/notifications/__tests__/deserialize.test.ts
@@ -0,0 +1,33 @@
+import { describe, expect, it } from 'vitest'
+
+import { deserializeExpectedMessages } from '../deserialize'
+
+import type { NotifyResponseData } from '@opentrons/app/src/redux/shell/types'
+
+const MOCK_VALID_RESPONSE: NotifyResponseData = { refetch: true }
+const MOCK_VALID_STRING_RESPONSE = JSON.stringify(MOCK_VALID_RESPONSE)
+const MOCK_INVALID_OBJECT = JSON.stringify({ test: 'MOCK_RESPONSE' })
+const MOCK_INVALID_STRING = 'MOCK_STRING'
+
+describe('closeConnectionsForcefullyFor', () => {
+ it('should resolve with the deserialized message if it is a valid notify response', async () => {
+ const response = await deserializeExpectedMessages(
+ MOCK_VALID_STRING_RESPONSE
+ )
+ expect(response).toEqual(MOCK_VALID_RESPONSE)
+ })
+
+ it('should reject with an error if the deserialized message is not a valid notify response', async () => {
+ const responsePromise = deserializeExpectedMessages(MOCK_INVALID_OBJECT)
+ await expect(responsePromise).rejects.toThrowError(
+ 'Unexpected data received from notify broker: {"test":"MOCK_RESPONSE"}'
+ )
+ })
+
+ it('should reject with an error if the message cannot be deserialized', async () => {
+ const responsePromise = deserializeExpectedMessages(MOCK_INVALID_STRING)
+ await expect(responsePromise).rejects.toThrowError(
+ 'Unexpected data received from notify broker: MOCK_STRING'
+ )
+ })
+})
diff --git a/app-shell/src/notifications/__tests__/notifications.test.ts b/app-shell/src/notifications/__tests__/notifications.test.ts
new file mode 100644
index 00000000000..5fdd521aa0b
--- /dev/null
+++ b/app-shell/src/notifications/__tests__/notifications.test.ts
@@ -0,0 +1,65 @@
+import { vi, describe, it, expect, beforeEach } from 'vitest'
+
+import { registerNotify, closeAllNotifyConnections } from '..'
+import { connectionStore } from '../store'
+import { subscribe } from '../subscribe'
+import { closeConnectionsForcefullyFor } from '../connect'
+
+import type { Mock } from 'vitest'
+
+vi.mock('electron-store')
+vi.mock('../store')
+vi.mock('../subscribe')
+vi.mock('../connect')
+vi.mock('../notifyLog', () => {
+ return {
+ createLogger: () => {
+ return { debug: () => null }
+ },
+ notifyLog: { debug: vi.fn() },
+ }
+})
+
+const MOCK_ACTION = {
+ type: 'shell:NOTIFY_SUBSCRIBE',
+ payload: { hostname: 'localhost', topic: 'ALL_TOPICS' },
+ meta: { shell: true },
+} as any
+
+describe('registerNotify', () => {
+ let dispatch: Mock
+ let mainWindow: Mock
+
+ beforeEach(() => {
+ dispatch = vi.fn()
+ mainWindow = vi.fn()
+ })
+
+ it('should set browser window when connectionStore has no browser window', () => {
+ registerNotify(dispatch, mainWindow as any)(MOCK_ACTION)
+
+ expect(connectionStore.setBrowserWindow).toHaveBeenCalledWith(mainWindow)
+ })
+
+ it('should subscribe when action type is shell:NOTIFY_SUBSCRIBE', () => {
+ registerNotify(dispatch, mainWindow as any)(MOCK_ACTION)
+
+ expect(vi.mocked(subscribe)).toHaveBeenCalledWith(
+ MOCK_ACTION.payload.hostname,
+ MOCK_ACTION.payload.topic
+ )
+ })
+})
+
+describe('closeAllNotifyConnections', () => {
+ it('should reject with an error when failed to close all connections within the time limit', async () => {
+ vi.useFakeTimers({ shouldAdvanceTime: true })
+ vi.mocked(closeConnectionsForcefullyFor).mockResolvedValue([])
+ const promise = closeAllNotifyConnections()
+ vi.advanceTimersByTime(2000)
+
+ await expect(promise).rejects.toThrowError(
+ 'Failed to close all connections within the time limit.'
+ )
+ })
+})
diff --git a/app-shell/src/notifications/__tests__/store.test.ts b/app-shell/src/notifications/__tests__/store.test.ts
new file mode 100644
index 00000000000..7192c8c2fa0
--- /dev/null
+++ b/app-shell/src/notifications/__tests__/store.test.ts
@@ -0,0 +1,348 @@
+import { describe, it, expect, beforeEach } from 'vitest'
+
+import { connectionStore } from '../store'
+
+const MOCK_IP = 'MOCK_IP'
+const MOCK_ROBOT = 'MOCK_ROBOT'
+const MOCK_WINDOW = {} as any
+const MOCK_CLIENT = { connected: true } as any
+const MOCK_TOPIC = 'MOCK_TOPIC' as any
+
+describe('ConnectionStore', () => {
+ beforeEach(() => {
+ connectionStore.clearStore()
+ })
+
+ describe('getBrowserWindow', () => {
+ it('should return the browser window', () => {
+ connectionStore.setBrowserWindow(MOCK_WINDOW)
+ expect(connectionStore.getBrowserWindow()).toBe(MOCK_WINDOW)
+ })
+ })
+
+ describe('getAllBrokersInStore', () => {
+ it('should return an empty array if there are no brokers in the store', () => {
+ expect(connectionStore.getAllBrokersInStore()).toEqual([])
+ })
+
+ it('should return an array of broker names in the store', async () => {
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setPendingConnection('robot2')
+ expect(connectionStore.getAllBrokersInStore()).toEqual([
+ MOCK_ROBOT,
+ 'robot2',
+ ])
+ })
+ })
+
+ describe('getClient', () => {
+ it('should return null if the given IP is not associated with a connection', () => {
+ expect(connectionStore.getClient(MOCK_IP)).toBeNull()
+ })
+
+ it('should return the client if the given IP is associated with a connection', async () => {
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ expect(connectionStore.getClient(MOCK_IP)).toBe(MOCK_CLIENT)
+ })
+ })
+
+ describe('setErrorStatus and getFailedConnectionStatus', () => {
+ it('should return null if the given IP is not associated with a connection', () => {
+ expect(connectionStore.getFailedConnectionStatus(MOCK_IP)).toBeNull()
+ })
+
+ it('should return the unreachable status for the given IP', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setErrorStatus(MOCK_IP, 'ECONNFAILED')
+ expect(connectionStore.getFailedConnectionStatus(MOCK_IP)).toBe(
+ 'ECONNFAILED'
+ )
+ })
+
+ it('should return "ECONNFAILED" if the unreachable status for the given IP is "ECONNREFUSED" after the first error status check', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setErrorStatus(MOCK_IP, 'ECONNREFUSED')
+ expect(connectionStore.getFailedConnectionStatus(MOCK_IP)).toBe(
+ 'ECONNREFUSED'
+ )
+ expect(connectionStore.getFailedConnectionStatus(MOCK_IP)).toBe(
+ 'ECONNFAILED'
+ )
+ })
+
+ it('should throw an error if the given IP is not associated with a connection', async () => {
+ await expect(
+ connectionStore.setErrorStatus(MOCK_IP, 'Connection refused')
+ ).rejects.toThrowError('MOCK_IP is not associated with a connection')
+ })
+ })
+
+ describe('getRobotNameByIP', () => {
+ it('should return null if the given IP is not associated with a connection', () => {
+ expect(connectionStore.getRobotNameByIP(MOCK_IP)).toBeNull()
+ })
+
+ it('should return the robot name associated with the given IP', () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ expect(connectionStore.getRobotNameByIP(MOCK_IP)).toBe(MOCK_ROBOT)
+ })
+ })
+
+ describe('setBrowserWindow', () => {
+ it('should set the browser window', () => {
+ connectionStore.setBrowserWindow(MOCK_WINDOW)
+ expect(connectionStore.getBrowserWindow()).toBe(MOCK_WINDOW)
+ })
+ })
+
+ describe('setPendingConnection', () => {
+ it('should create a new connection if there is no connection currently connecting', async () => {
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ expect(connectionStore.getAllBrokersInStore()).toEqual([MOCK_ROBOT])
+ })
+
+ it('should reject with an error if there is already a connection currently connecting', async () => {
+ await expect(
+ connectionStore.setPendingConnection(MOCK_ROBOT)
+ ).resolves.toBeUndefined()
+ await expect(
+ connectionStore.setPendingConnection(MOCK_ROBOT)
+ ).rejects.toThrowError(
+ 'Cannot create a new connection while currently connecting.'
+ )
+ })
+ })
+
+ describe('setConnected', () => {
+ it('should set the client for the given robot name', async () => {
+ connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ expect(connectionStore.getClient(MOCK_IP)).toBe(MOCK_CLIENT)
+ })
+
+ it('should reject with an error if there is already a connection for the given robot name', async () => {
+ const MOCK_CLIENT_2 = {} as any
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await expect(
+ connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT_2)
+ ).rejects.toThrowError('Connection already exists for MOCK_ROBOT')
+ })
+
+ it('should reject with an error if the given robot name is not associated with a connection', async () => {
+ await expect(
+ connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ ).rejects.toThrowError('IP is not associated with a connection')
+ })
+ })
+
+ describe('setSubStatus', () => {
+ it('should set the pending sub status for the given IP and topic', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'pending')
+ expect(connectionStore.isPendingSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(true)
+ })
+
+ it('should set the subscribed status for the given IP and topic', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'subscribed')
+ expect(connectionStore.isActiveSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(true)
+ expect(connectionStore.isPendingSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should throw an error if the given IP is not associated with a connection', async () => {
+ await expect(
+ connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'pending')
+ ).rejects.toThrowError('IP is not associated with a connection')
+ })
+ })
+
+ describe('setUnsubStatus', () => {
+ it('should set the pending unsub status for the given IP and topic if it is currently subscribed', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'subscribed')
+ await connectionStore.setUnsubStatus(MOCK_IP, MOCK_TOPIC, 'pending')
+ expect(connectionStore.isPendingUnsub(MOCK_IP, MOCK_TOPIC)).toBe(true)
+ expect(connectionStore.isActiveSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(true)
+ })
+
+ it('should set the unsubscribed status for the given IP and topic if it is currently subscribed', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'subscribed')
+ await connectionStore.setUnsubStatus(MOCK_IP, MOCK_TOPIC, 'unsubscribed')
+ expect(connectionStore.isActiveSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(false)
+ expect(connectionStore.isPendingUnsub(MOCK_IP, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should not do anything if the given IP is not associated with a connection', async () => {
+ await expect(
+ connectionStore.setUnsubStatus(MOCK_IP, MOCK_TOPIC, 'pending')
+ ).rejects.toThrowError('IP is not associated with a connection')
+ })
+ })
+
+ describe('associateIPWithRobotName', () => {
+ it('should associate the given IP with the given robot name', () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ expect(connectionStore.getRobotNameByIP(MOCK_IP)).toBe(MOCK_ROBOT)
+ })
+
+ it('should update the association if the IP is already associated with a different robot name', () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ connectionStore.associateIPWithRobotName(MOCK_IP, 'robot2')
+ expect(connectionStore.getRobotNameByIP(MOCK_IP)).toBe('robot2')
+ })
+ })
+
+ describe('clearStore', () => {
+ it('should clear all connections and robot names', async () => {
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ connectionStore.setBrowserWindow(MOCK_WINDOW)
+ expect(connectionStore.getAllBrokersInStore()).not.toEqual([])
+ expect(connectionStore.getBrowserWindow()).not.toBeNull()
+ connectionStore.clearStore()
+ expect(connectionStore.getAllBrokersInStore()).toEqual([])
+ expect(connectionStore.getBrowserWindow()).toBeNull()
+ })
+ })
+
+ describe('isConnectedToBroker', () => {
+ it('should return false if the given robot name is not associated with a connection', () => {
+ expect(connectionStore.isConnectedToBroker(MOCK_ROBOT)).toBe(false)
+ })
+
+ it('should return false if the connection client is null', async () => {
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ expect(connectionStore.isConnectedToBroker(MOCK_ROBOT)).toBe(false)
+ })
+
+ it('should return true if the connection client is not null', async () => {
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ expect(connectionStore.isConnectedToBroker(MOCK_ROBOT)).toBe(true)
+ })
+ })
+
+ describe('isConnectingToBroker', () => {
+ it('should return false if the given robot name is not associated with a connection', () => {
+ expect(connectionStore.isConnectingToBroker(MOCK_ROBOT)).toBe(false)
+ })
+
+ it('should return false if the connection client is not null', () => {
+ connectionStore.setPendingConnection(MOCK_ROBOT)
+ connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ expect(connectionStore.isConnectingToBroker(MOCK_ROBOT)).toBe(false)
+ })
+
+ it('should return true if the connection client is null and the connection is not terminated', () => {
+ connectionStore.setPendingConnection(MOCK_ROBOT)
+ expect(connectionStore.isConnectingToBroker(MOCK_ROBOT)).toBe(true)
+ })
+ })
+
+ describe('isPendingSub', () => {
+ it('should return false if the given IP is not associated with a connection', () => {
+ expect(connectionStore.isPendingSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should return false if the topic is not pending', () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ expect(connectionStore.isPendingSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should return true if the topic is pending', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'pending')
+ expect(connectionStore.isPendingSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(true)
+ })
+ })
+
+ describe('isActiveSub', () => {
+ it('should return false if the given IP is not associated with a connection', () => {
+ expect(connectionStore.isActiveSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should return false if the topic is not subscribed', () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ expect(connectionStore.isActiveSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should return true if the topic is subscribed', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'subscribed')
+ expect(connectionStore.isActiveSub(MOCK_ROBOT, MOCK_TOPIC)).toBe(true)
+ })
+ })
+
+ describe('isPendingUnsub', () => {
+ it('should return false if the given IP is not associated with a connection', () => {
+ expect(connectionStore.isPendingUnsub(MOCK_IP, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should return false if the topic is not pending', () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ expect(connectionStore.isPendingUnsub(MOCK_IP, MOCK_TOPIC)).toBe(false)
+ })
+
+ it('should return true if the topic is pending', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setSubStatus(MOCK_IP, MOCK_TOPIC, 'subscribed')
+ await connectionStore.setUnsubStatus(MOCK_IP, MOCK_TOPIC, 'pending')
+ expect(connectionStore.isPendingUnsub(MOCK_IP, MOCK_TOPIC)).toBe(true)
+ })
+ })
+
+ describe('isConnectionTerminated', () => {
+ it('should return true if the given robot name is not associated with a connection', () => {
+ expect(connectionStore.isConnectionTerminated(MOCK_ROBOT)).toBe(true)
+ })
+
+ it('should return true if the unreachable status is not null', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ await connectionStore.setErrorStatus(MOCK_IP, 'Connection refused')
+ expect(connectionStore.isConnectionTerminated(MOCK_ROBOT)).toBe(true)
+ })
+
+ it('should return false if the unreachable status is null', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ await connectionStore.setConnected(MOCK_ROBOT, MOCK_CLIENT)
+ expect(connectionStore.isConnectionTerminated(MOCK_ROBOT)).toBe(false)
+ })
+ })
+
+ describe('isKnownPortBlockedIP', () => {
+ it('should return false if the given IP is not in the known port blocked IPs set', () => {
+ expect(connectionStore.isKnownPortBlockedIP('MOCK_IP_2')).toBe(false)
+ })
+
+ it('should return true if the given IP is in the known port blocked IPs set', async () => {
+ connectionStore.associateIPWithRobotName(MOCK_IP, MOCK_ROBOT)
+ await connectionStore.setPendingConnection(MOCK_ROBOT)
+ connectionStore.setErrorStatus(MOCK_IP, 'ECONNREFUSED')
+ expect(connectionStore.isKnownPortBlockedIP(MOCK_IP)).toBe(true)
+ })
+ })
+})
diff --git a/app-shell/src/notifications/connect.ts b/app-shell/src/notifications/connect.ts
new file mode 100644
index 00000000000..bcaf24e6e3d
--- /dev/null
+++ b/app-shell/src/notifications/connect.ts
@@ -0,0 +1,209 @@
+import mqtt from 'mqtt'
+
+import { connectionStore } from './store'
+import {
+ sendDeserialized,
+ sendDeserializedGenericError,
+ deserializeExpectedMessages,
+} from './deserialize'
+import { unsubscribe } from './unsubscribe'
+import { notifyLog } from './notifyLog'
+import { FAILURE_STATUSES, HEALTH_STATUS_OK } from '../constants'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+import type { DiscoveryClientRobot } from '@opentrons/discovery-client'
+
+// MQTT is somewhat particular about the clientId format and will connect erratically if an unexpected string is supplied.
+const CLIENT_ID = 'app-' + Math.random().toString(16).slice(2, 8) // Derived from mqttjs
+const connectOptions: mqtt.IClientOptions = {
+ clientId: CLIENT_ID,
+ port: 1883,
+ keepalive: 60,
+ protocolVersion: 5,
+ reconnectPeriod: 1000,
+ connectTimeout: 30 * 1000,
+ clean: true,
+ resubscribe: true,
+}
+
+export interface RobotData {
+ ip: string
+ robotName: string
+}
+
+// This is the discovery-client equivalent of "available" robots when viewing the Devices page in the app.
+export function getHealthyRobotDataForNotifyConnections(
+ robots: DiscoveryClientRobot[]
+): RobotData[] {
+ return robots.flatMap(robot =>
+ robot.addresses
+ .filter(address => address.healthStatus === HEALTH_STATUS_OK)
+ .map(address => ({ ip: address.ip, robotName: robot.name }))
+ )
+}
+
+/**
+ *
+ * @description Remove broker connections from the connection store by forcibly disconnecting from brokers
+ * as robots are no longer discoverable.
+ */
+export function cleanUpUnreachableRobots(
+ healthyRobots: RobotData[]
+): Promise {
+ return new Promise((resolve, reject) => {
+ const healthyRobotNames = healthyRobots.map(({ robotName }) => robotName)
+ const healthyRobotNamesSet = new Set(healthyRobotNames)
+ const unreachableRobots = connectionStore
+ .getAllBrokersInStore()
+ .filter(robotName => {
+ return !healthyRobotNamesSet.has(robotName)
+ })
+ void closeConnectionsForcefullyFor(unreachableRobots)
+ resolve(unreachableRobots)
+ })
+}
+
+export function establishConnections(
+ healthyRobots: RobotData[]
+): Promise {
+ return new Promise((resolve, reject) => {
+ const newConnections = healthyRobots.filter(({ ip, robotName }) => {
+ if (connectionStore.isConnectedToBroker(robotName)) {
+ return false
+ } else {
+ connectionStore.associateIPWithRobotName(ip, robotName)
+ // True when a robot is connecting.
+ if (!connectionStore.isConnectionTerminated(robotName)) {
+ return false
+ } else {
+ return !connectionStore.isKnownPortBlockedIP(ip)
+ }
+ }
+ })
+ newConnections.forEach(({ ip, robotName }) => {
+ void connectionStore
+ .setPendingConnection(robotName)
+ .then(() => {
+ connectAsync(`mqtt://${ip}`)
+ .then(client => {
+ notifyLog.debug(`Successfully connected to ${robotName} on ${ip}`)
+ void connectionStore
+ .setConnected(robotName, client)
+ .then(() => establishListeners(client, ip, robotName))
+ .catch((error: Error) => notifyLog.debug(error.message))
+ })
+ .catch((error: Error) => {
+ notifyLog.warn(
+ `Failed to connect to ${robotName} on ${ip} - ${error.name}: ${error.message} `
+ )
+ void connectionStore.setErrorStatus(ip, error.message)
+ })
+ })
+ .catch((error: Error) => notifyLog.debug(error.message))
+ })
+ resolve(newConnections)
+ })
+}
+
+function connectAsync(brokerURL: string): Promise {
+ const client = mqtt.connect(brokerURL, connectOptions)
+
+ return new Promise((resolve, reject) => {
+ // Listeners added to client to trigger promise resolution
+ const promiseListeners: {
+ [key: string]: (...args: any[]) => void
+ } = {
+ connect: () => {
+ removePromiseListeners()
+ return resolve(client)
+ },
+ // A connection error event will close the connection without a retry.
+ error: (error: Error | string) => {
+ removePromiseListeners()
+ const clientEndPromise = new Promise((resolve, reject) =>
+ client.end(true, {}, () => resolve(error))
+ )
+ return clientEndPromise.then(() => reject(error))
+ },
+ end: () => promiseListeners.error(`Couldn't connect to ${brokerURL}`),
+ }
+
+ function removePromiseListeners(): void {
+ Object.keys(promiseListeners).forEach(eventName => {
+ client.removeListener(eventName, promiseListeners[eventName])
+ })
+ }
+
+ Object.keys(promiseListeners).forEach(eventName => {
+ client.on(eventName, promiseListeners[eventName])
+ })
+ })
+}
+
+function establishListeners(
+ client: mqtt.MqttClient,
+ ip: string,
+ robotName: string
+): void {
+ client.on(
+ 'message',
+ (topic: NotifyTopic, message: Buffer, packet: mqtt.IPublishPacket) => {
+ deserializeExpectedMessages(message.toString())
+ .then(deserializedMessage => {
+ const messageContainsUnsubFlag = 'unsubscribe' in deserializedMessage
+ if (messageContainsUnsubFlag) {
+ void unsubscribe(ip, topic).catch((error: Error) =>
+ notifyLog.debug(error.message)
+ )
+ }
+
+ notifyLog.debug('Received notification data from main via IPC', {
+ ip,
+ topic,
+ })
+
+ sendDeserialized({ ip, topic, message: deserializedMessage })
+ })
+ .catch(error => notifyLog.debug(`${error.message}`))
+ }
+ )
+
+ client.on('reconnect', () => {
+ notifyLog.debug(`Attempting to reconnect to ${robotName} on ${ip}`)
+ })
+ // handles transport layer errors only
+ client.on('error', error => {
+ notifyLog.warn(`Error - ${error.name}: ${error.message}`)
+ sendDeserializedGenericError(ip, 'ALL_TOPICS')
+ client.end()
+ })
+
+ client.on('end', () => {
+ notifyLog.debug(`Closed connection to ${robotName} on ${ip}`)
+ // Marking the connection as failed with a generic error status lets the connection re-establish in the future
+ // and tells the browser to fall back to polling (assuming this 'end' event isn't caused by the app closing).
+ void connectionStore.setErrorStatus(ip, FAILURE_STATUSES.ECONNFAILED)
+ })
+
+ client.on('disconnect', packet => {
+ notifyLog.warn(
+ `Disconnected from ${robotName} on ${ip} with code ${
+ packet.reasonCode ?? 'undefined'
+ }`
+ )
+ sendDeserializedGenericError(ip, 'ALL_TOPICS')
+ })
+}
+
+export function closeConnectionsForcefullyFor(
+ robotNames: string[]
+): Array> {
+ return robotNames.map(ip => {
+ const client = connectionStore.getClient(ip)
+ return new Promise((resolve, reject) => {
+ if (client != null) {
+ client.end(true, {}, () => resolve())
+ }
+ })
+ })
+}
diff --git a/app-shell/src/notifications/deserialize.ts b/app-shell/src/notifications/deserialize.ts
new file mode 100644
index 00000000000..53752b32a0f
--- /dev/null
+++ b/app-shell/src/notifications/deserialize.ts
@@ -0,0 +1,71 @@
+import isEqual from 'lodash/isEqual'
+
+import { connectionStore } from './store'
+
+import type {
+ NotifyBrokerResponses,
+ NotifyRefetchData,
+ NotifyResponseData,
+ NotifyTopic,
+ NotifyUnsubscribeData,
+} from '@opentrons/app/src/redux/shell/types'
+import { FAILURE_STATUSES } from '../constants'
+
+interface SendToBrowserParams {
+ ip: string
+ topic: NotifyTopic
+ message: NotifyResponseData
+}
+
+const VALID_NOTIFY_RESPONSES: [NotifyRefetchData, NotifyUnsubscribeData] = [
+ { refetch: true },
+ { unsubscribe: true },
+]
+
+export function sendDeserialized({
+ ip,
+ topic,
+ message,
+}: SendToBrowserParams): void {
+ try {
+ const browserWindow = connectionStore.getBrowserWindow()
+ browserWindow?.webContents.send('notify', ip, topic, message)
+ } catch {} // Prevents shell erroring during app shutdown event.
+}
+
+export function sendDeserializedGenericError(
+ ip: string,
+ topic: NotifyTopic
+): void {
+ sendDeserialized({
+ ip,
+ topic,
+ message: FAILURE_STATUSES.ECONNFAILED,
+ })
+}
+
+export function deserializeExpectedMessages(
+ message: string
+): Promise {
+ return new Promise((resolve, reject) => {
+ let deserializedMessage: NotifyResponseData | Record
+ const error = new Error(
+ `Unexpected data received from notify broker: ${message}`
+ )
+
+ try {
+ deserializedMessage = JSON.parse(message)
+ } catch {
+ reject(error)
+ }
+
+ const isValidNotifyResponse = VALID_NOTIFY_RESPONSES.some(model =>
+ isEqual(model, deserializedMessage)
+ )
+ if (!isValidNotifyResponse) {
+ reject(error)
+ } else {
+ resolve(JSON.parse(message))
+ }
+ })
+}
diff --git a/app-shell/src/notifications/index.ts b/app-shell/src/notifications/index.ts
new file mode 100644
index 00000000000..221addea9f6
--- /dev/null
+++ b/app-shell/src/notifications/index.ts
@@ -0,0 +1,61 @@
+import { connectionStore } from './store'
+import {
+ establishConnections,
+ cleanUpUnreachableRobots,
+ getHealthyRobotDataForNotifyConnections,
+ closeConnectionsForcefullyFor,
+ RobotData,
+} from './connect'
+import { subscribe } from './subscribe'
+import { notifyLog } from './notifyLog'
+
+import type { DiscoveryClientRobot } from '@opentrons/discovery-client'
+import type { BrowserWindow } from 'electron'
+import type { Action, Dispatch } from '../types'
+
+// Manages MQTT broker connections through a connection store. Broker connections are added based on health status
+// reported by discovery-client and broker connectivity status reported by MQTT. Because a robot may have several IPs,
+// only the first reported IP that results in a successful broker connection maintains an active connection.
+// All associated IPs reference the active connection. Subscriptions are handled "lazily" - a component must
+// dispatch a subscribe action before a subscription request is made to the broker. Unsubscribe requests only occur if
+// the broker sends an "unsubscribe" flag. Pending subs and unsubs are used to prevent unnecessary network and broker load.
+
+export function registerNotify(
+ dispatch: Dispatch,
+ mainWindow: BrowserWindow
+): (action: Action) => unknown {
+ if (connectionStore.getBrowserWindow() == null) {
+ connectionStore.setBrowserWindow(mainWindow)
+ }
+
+ return function handleAction(action: Action) {
+ switch (action.type) {
+ case 'shell:NOTIFY_SUBSCRIBE':
+ return subscribe(action.payload.hostname, action.payload.topic)
+ }
+ }
+}
+
+export function handleNotificationConnectionsFor(
+ robots: DiscoveryClientRobot[]
+): RobotData[] {
+ const reachableRobots = getHealthyRobotDataForNotifyConnections(robots)
+ void cleanUpUnreachableRobots(reachableRobots)
+ void establishConnections(reachableRobots)
+
+ return reachableRobots
+}
+
+export function closeAllNotifyConnections(): Promise {
+ return new Promise((resolve, reject) => {
+ setTimeout(() => {
+ reject(Error('Failed to close all connections within the time limit.'))
+ }, 2000)
+
+ notifyLog.debug('Stopping notify service connections')
+ const closeConnections = closeConnectionsForcefullyFor(
+ connectionStore.getAllBrokersInStore()
+ )
+ Promise.all(closeConnections).then(resolve).catch(reject)
+ })
+}
diff --git a/app-shell/src/notifications/notifyLog.ts b/app-shell/src/notifications/notifyLog.ts
new file mode 100644
index 00000000000..35507fa2c2a
--- /dev/null
+++ b/app-shell/src/notifications/notifyLog.ts
@@ -0,0 +1,3 @@
+import { createLogger } from '../log'
+
+export const notifyLog = createLogger('notify')
diff --git a/app-shell/src/notifications/store.ts b/app-shell/src/notifications/store.ts
new file mode 100644
index 00000000000..c9742ec6f90
--- /dev/null
+++ b/app-shell/src/notifications/store.ts
@@ -0,0 +1,269 @@
+import type mqtt from 'mqtt'
+
+import { FAILURE_STATUSES } from '../constants'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+import type { BrowserWindow } from 'electron'
+
+type FailedConnStatus = typeof FAILURE_STATUSES[keyof typeof FAILURE_STATUSES]
+
+interface HostData {
+ client: mqtt.MqttClient | null
+ subscriptions: Set
+ pendingSubs: Set
+ pendingUnsubs: Set
+ unreachableStatus: FailedConnStatus | null
+}
+
+/**
+ * @description Manages the internal state of MQTT connections to various robot hosts.
+ */
+class ConnectionStore {
+ private hostsByRobotName: Record = {}
+
+ private robotNamesByIP: Record = {}
+
+ private browserWindow: BrowserWindow | null = null
+
+ private readonly knownPortBlockedIPs = new Set()
+
+ public getBrowserWindow(): BrowserWindow | null {
+ return this.browserWindow
+ }
+
+ public getAllBrokersInStore(): string[] {
+ return Object.keys(this.hostsByRobotName)
+ }
+
+ public getClient(ip: string): mqtt.MqttClient | null {
+ const hostData = this.getHostDataByIP(ip)
+ if (hostData != null) {
+ return hostData.client
+ } else {
+ return null
+ }
+ }
+
+ /**
+ * @returns {FailedConnStatus} "ECONNREFUSED" is a proxy for a port block error and is only returned once
+ * for analytics reasons. Afterward, a generic "ECONNFAILED" is returned.
+ */
+ public getFailedConnectionStatus(ip: string): FailedConnStatus | null {
+ const robotName = this.getRobotNameByIP(ip)
+ if (robotName != null) {
+ const failureStatus = this.hostsByRobotName[robotName].unreachableStatus
+ if (failureStatus === FAILURE_STATUSES.ECONNREFUSED) {
+ this.hostsByRobotName[robotName].unreachableStatus =
+ FAILURE_STATUSES.ECONNFAILED
+ }
+ return failureStatus
+ } else {
+ return null
+ }
+ }
+
+ public getRobotNameByIP(ip: string): string | null {
+ return this.robotNamesByIP[ip] ?? null
+ }
+
+ public setBrowserWindow(window: BrowserWindow): void {
+ this.browserWindow = window
+ }
+
+ public setPendingConnection(robotName: string): Promise {
+ return new Promise((resolve, reject) => {
+ if (!this.isConnectingToBroker(robotName)) {
+ this.hostsByRobotName[robotName] = {
+ client: null,
+ subscriptions: new Set(),
+ pendingSubs: new Set(),
+ pendingUnsubs: new Set(),
+ unreachableStatus: null,
+ }
+ resolve()
+ } else {
+ reject(
+ new Error(
+ 'Cannot create a new connection while currently connecting.'
+ )
+ )
+ }
+ })
+ }
+
+ public setConnected(
+ robotName: string,
+ client: mqtt.MqttClient
+ ): Promise {
+ return new Promise((resolve, reject) => {
+ if (robotName in this.hostsByRobotName) {
+ if (this.hostsByRobotName[robotName].client == null) {
+ this.hostsByRobotName[robotName].client = client
+ resolve()
+ } else {
+ reject(new Error(`Connection already exists for ${robotName}`))
+ }
+ } else {
+ reject(new Error('IP is not associated with a connection'))
+ }
+ })
+ }
+
+ /**
+ * @description Marks the host as unreachable with an error status derived from the MQTT returned error object.
+ */
+ public setErrorStatus(ip: string, errorMessage: string): Promise {
+ return new Promise((resolve, reject) => {
+ const robotName = this.getRobotNameByIP(ip)
+ if (robotName != null && robotName in this.hostsByRobotName) {
+ if (this.hostsByRobotName[robotName].unreachableStatus == null) {
+ const errorStatus = errorMessage?.includes(
+ FAILURE_STATUSES.ECONNREFUSED
+ )
+ ? FAILURE_STATUSES.ECONNREFUSED
+ : FAILURE_STATUSES.ECONNFAILED
+
+ this.hostsByRobotName[robotName].unreachableStatus = errorStatus
+ if (errorStatus === FAILURE_STATUSES.ECONNREFUSED) {
+ this.knownPortBlockedIPs.add(ip)
+ }
+ }
+ resolve()
+ } else {
+ reject(new Error(`${ip} is not associated with a connection`))
+ }
+ })
+ }
+
+ public setSubStatus(
+ ip: string,
+ topic: NotifyTopic,
+ status: 'pending' | 'subscribed'
+ ): Promise {
+ return new Promise((resolve, reject) => {
+ const robotName = this.getRobotNameByIP(ip)
+ if (robotName != null && robotName in this.hostsByRobotName) {
+ const { pendingSubs, subscriptions } = this.hostsByRobotName[robotName]
+ if (status === 'pending') {
+ pendingSubs.add(topic)
+ } else {
+ subscriptions.add(topic)
+ pendingSubs.delete(topic)
+ }
+ resolve()
+ } else {
+ reject(new Error('IP is not associated with a connection'))
+ }
+ })
+ }
+
+ public setUnsubStatus(
+ ip: string,
+ topic: NotifyTopic,
+ status: 'pending' | 'unsubscribed'
+ ): Promise {
+ return new Promise((resolve, reject) => {
+ const robotName = this.getRobotNameByIP(ip)
+ if (robotName != null && robotName in this.hostsByRobotName) {
+ const { pendingUnsubs, subscriptions } = this.hostsByRobotName[
+ robotName
+ ]
+ if (subscriptions.has(topic)) {
+ if (status === 'pending') {
+ pendingUnsubs.add(topic)
+ } else {
+ pendingUnsubs.delete(topic)
+ subscriptions.delete(topic)
+ }
+ }
+ resolve()
+ } else {
+ reject(new Error('IP is not associated with a connection'))
+ }
+ })
+ }
+
+ public associateIPWithRobotName(ip: string, robotName: string): void {
+ const robotNameInStore = this.robotNamesByIP[ip]
+ if (robotNameInStore !== robotName) {
+ this.robotNamesByIP[ip] = robotName
+ }
+ }
+
+ /**
+ * @description Used for testing purposes.
+ */
+ public clearStore(): void {
+ this.hostsByRobotName = {}
+ this.robotNamesByIP = {}
+ this.browserWindow = null
+ }
+
+ public isConnectedToBroker(robotName: string): boolean {
+ return robotName != null
+ ? this.hostsByRobotName[robotName]?.client?.connected ?? false
+ : false
+ }
+
+ public isConnectingToBroker(robotName: string): boolean {
+ return (
+ robotName in this.hostsByRobotName &&
+ this.hostsByRobotName[robotName].client == null &&
+ !this.isConnectionTerminated(robotName)
+ )
+ }
+
+ public isPendingSub(robotName: string, topic: NotifyTopic): boolean {
+ if (robotName != null && robotName in this.hostsByRobotName) {
+ const { pendingSubs } = this.hostsByRobotName[robotName]
+ return pendingSubs.has(topic)
+ } else {
+ return false
+ }
+ }
+
+ public isActiveSub(robotName: string, topic: NotifyTopic): boolean {
+ if (robotName != null && robotName in this.hostsByRobotName) {
+ const { subscriptions } = this.hostsByRobotName[robotName]
+ return subscriptions.has(topic)
+ } else {
+ return false
+ }
+ }
+
+ public isPendingUnsub(ip: string, topic: NotifyTopic): boolean {
+ const robotName = this.getRobotNameByIP(ip)
+ if (robotName != null && robotName in this.hostsByRobotName) {
+ const { pendingUnsubs } = this.hostsByRobotName[robotName]
+ return pendingUnsubs.has(topic)
+ } else {
+ return false
+ }
+ }
+
+ /**
+ * @description A broker connection is terminated if it is errored or not present in the store.
+ */
+ public isConnectionTerminated(robotName: string): boolean {
+ if (robotName in this.hostsByRobotName) {
+ return this.hostsByRobotName[robotName].unreachableStatus != null
+ } else {
+ return true
+ }
+ }
+
+ public isKnownPortBlockedIP(ip: string): boolean {
+ return this.knownPortBlockedIPs.has(ip)
+ }
+
+ private getHostDataByIP(ip: string): HostData | null {
+ if (ip in this.robotNamesByIP) {
+ const robotName = this.robotNamesByIP[ip]
+ return this.hostsByRobotName[robotName] ?? null
+ } else {
+ return null
+ }
+ }
+}
+
+export const connectionStore = new ConnectionStore()
diff --git a/app-shell/src/notifications/subscribe.ts b/app-shell/src/notifications/subscribe.ts
new file mode 100644
index 00000000000..895a010406e
--- /dev/null
+++ b/app-shell/src/notifications/subscribe.ts
@@ -0,0 +1,136 @@
+import mqtt from 'mqtt'
+
+import { connectionStore } from './store'
+import { sendDeserialized, sendDeserializedGenericError } from './deserialize'
+import { notifyLog } from './notifyLog'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+
+/**
+ * @property {number} qos: "Quality of Service", "at least once". Because we use React Query, which does not trigger
+ a render update event if duplicate data is received, we can avoid the additional overhead of guaranteeing "exactly once" delivery.
+ */
+const subscribeOptions: mqtt.IClientSubscribeOptions = {
+ qos: 1,
+}
+
+const CHECK_CONNECTION_INTERVAL = 500
+
+export function subscribe(ip: string, topic: NotifyTopic): Promise {
+ const robotName = connectionStore.getRobotNameByIP(ip)
+
+ if (robotName == null || connectionStore.isConnectionTerminated(robotName)) {
+ const errorMessage = connectionStore.getFailedConnectionStatus(ip)
+ if (errorMessage != null) {
+ sendDeserialized({
+ ip,
+ topic,
+ message: errorMessage,
+ })
+ }
+ return Promise.resolve()
+ } else {
+ return waitUntilActiveOrErrored({ connection: 'client', ip, robotName })
+ .then(() => {
+ const client = connectionStore.getClient(ip)
+ if (client == null) {
+ return Promise.reject(new Error('Expected hostData, received null.'))
+ }
+
+ if (
+ !connectionStore.isActiveSub(robotName, topic) &&
+ !connectionStore.isPendingSub(robotName, topic)
+ ) {
+ connectionStore
+ .setSubStatus(ip, topic, 'pending')
+ .then(
+ () =>
+ new Promise(() => {
+ client.subscribe(topic, subscribeOptions, subscribeCb)
+ })
+ )
+ .catch((error: Error) => notifyLog.debug(error.message))
+ } else {
+ void waitUntilActiveOrErrored({
+ connection: 'subscription',
+ ip,
+ robotName,
+ topic,
+ }).catch((error: Error) => {
+ notifyLog.debug(error.message)
+ sendDeserializedGenericError(ip, topic)
+ })
+ }
+ })
+ .catch((error: Error) => {
+ notifyLog.debug(error.message)
+ sendDeserializedGenericError(ip, topic)
+ })
+ }
+
+ function subscribeCb(error: Error, result: mqtt.ISubscriptionGrant[]): void {
+ if (error != null) {
+ sendDeserializedGenericError(ip, topic)
+ notifyLog.debug(
+ `Failed to subscribe to ${robotName} on ${ip} to topic: ${topic}`
+ )
+ } else {
+ notifyLog.debug(
+ `Successfully subscribed to ${robotName} on ${ip} to topic: ${topic}`
+ )
+ connectionStore
+ .setSubStatus(ip, topic, 'subscribed')
+ .catch((error: Error) => notifyLog.debug(error.message))
+ }
+ }
+}
+
+interface WaitUntilActiveOrErroredParams {
+ connection: 'client' | 'subscription'
+ ip: string
+ robotName: string
+ topic?: NotifyTopic
+}
+
+// Check every 500ms for 2 seconds before failing.
+function waitUntilActiveOrErrored({
+ connection,
+ ip,
+ robotName,
+ topic,
+}: WaitUntilActiveOrErroredParams): Promise {
+ return new Promise((resolve, reject) => {
+ if (connection === 'subscription') {
+ if (topic == null) {
+ reject(
+ new Error(
+ 'Must specify a topic when connection is type "subscription".'
+ )
+ )
+ }
+ }
+
+ const MAX_RETRIES = 4
+ let counter = 0
+ const intervalId = setInterval(() => {
+ const hasReceivedAck =
+ connection === 'client'
+ ? connectionStore.isConnectedToBroker(robotName)
+ : connectionStore.isActiveSub(robotName, topic as NotifyTopic)
+ if (hasReceivedAck) {
+ clearInterval(intervalId)
+ resolve()
+ }
+
+ counter++
+ if (counter === MAX_RETRIES) {
+ clearInterval(intervalId)
+ reject(
+ new Error(
+ `Maximum number of retries exceeded for ${robotName} on ${ip}.`
+ )
+ )
+ }
+ }, CHECK_CONNECTION_INTERVAL)
+ })
+}
diff --git a/app-shell/src/notifications/unsubscribe.ts b/app-shell/src/notifications/unsubscribe.ts
new file mode 100644
index 00000000000..8a0f3d032cd
--- /dev/null
+++ b/app-shell/src/notifications/unsubscribe.ts
@@ -0,0 +1,36 @@
+import { connectionStore } from './store'
+import { notifyLog } from './notifyLog'
+
+import type { NotifyTopic } from '@opentrons/app/src/redux/shell/types'
+
+export function unsubscribe(ip: string, topic: NotifyTopic): Promise {
+ return new Promise((resolve, reject) => {
+ if (!connectionStore.isPendingUnsub(ip, topic)) {
+ connectionStore
+ .setUnsubStatus(ip, topic, 'pending')
+ .then(() => {
+ const client = connectionStore.getClient(ip)
+ if (client == null) {
+ return reject(new Error('Expected hostData, received null.'))
+ }
+
+ client.unsubscribe(topic, {}, (error, result) => {
+ const robotName = connectionStore.getRobotNameByIP(ip)
+ if (error != null) {
+ notifyLog.debug(
+ `Failed to unsubscribe to ${robotName} on ${ip} from topic: ${topic}`
+ )
+ } else {
+ notifyLog.debug(
+ `Successfully unsubscribed to ${robotName} on ${ip} from topic: ${topic}`
+ )
+ connectionStore
+ .setUnsubStatus(ip, topic, 'unsubscribed')
+ .catch((error: Error) => notifyLog.debug(error.message))
+ }
+ })
+ })
+ .catch((error: Error) => notifyLog.debug(error.message))
+ }
+ })
+}
diff --git a/app-shell/src/preload.ts b/app-shell/src/preload.ts
index 3748885b730..cf1f4ef7bef 100644
--- a/app-shell/src/preload.ts
+++ b/app-shell/src/preload.ts
@@ -3,4 +3,5 @@
// for security reasons
import { ipcRenderer } from 'electron'
+// @ts-expect-error can't get TS to recognize global.d.ts
global.APP_SHELL_REMOTE = { ipcRenderer }
diff --git a/app-shell/src/protocol-analysis/__tests__/protocolAnalysis.test.ts b/app-shell/src/protocol-analysis/__tests__/protocolAnalysis.test.ts
index dfd8e074121..e83ed5d4c7a 100644
--- a/app-shell/src/protocol-analysis/__tests__/protocolAnalysis.test.ts
+++ b/app-shell/src/protocol-analysis/__tests__/protocolAnalysis.test.ts
@@ -1,4 +1,5 @@
-import { when, resetAllWhenMocks } from 'jest-when'
+import { vi, it, expect, describe, beforeEach } from 'vitest'
+import { when } from 'vitest-when'
import electron from 'electron'
import * as ProtocolAnalysis from '@opentrons/app/src/redux/protocol-analysis'
import * as Cfg from '@opentrons/app/src/redux/config'
@@ -9,6 +10,7 @@ import { getValidLabwareFilePaths } from '../../labware'
import { selectPythonPath, getPythonPath } from '../getPythonPath'
import { executeAnalyzeCli } from '../executeAnalyzeCli'
import { writeFailedAnalysis } from '../writeFailedAnalysis'
+import { createLogger } from '../../log'
import {
registerProtocolAnalysis,
@@ -17,37 +19,23 @@ import {
} from '..'
import { Dispatch } from '../../types'
-jest.mock('../../labware')
-jest.mock('../../dialogs')
-jest.mock('../getPythonPath')
-jest.mock('../executeAnalyzeCli')
-jest.mock('../writeFailedAnalysis')
-
-const mockGetConfig = getConfig as jest.MockedFunction
-const mockSelectPythonPath = selectPythonPath as jest.MockedFunction<
- typeof selectPythonPath
->
-const mockGetPythonPath = getPythonPath as jest.MockedFunction<
- typeof getPythonPath
->
-const mockExecuteAnalyzeCli = executeAnalyzeCli as jest.MockedFunction<
- typeof executeAnalyzeCli
->
-const mockWriteFailedAnalysis = writeFailedAnalysis as jest.MockedFunction<
- typeof writeFailedAnalysis
->
-const mockGetValidLabwareFilePaths = getValidLabwareFilePaths as jest.MockedFunction<
- typeof getValidLabwareFilePaths
->
-const mockHandleConfigChange = handleConfigChange as jest.MockedFunction<
- typeof handleConfigChange
->
-const mockShowOpenDirectoryDialog = Dialogs.showOpenDirectoryDialog as jest.MockedFunction<
- typeof Dialogs.showOpenDirectoryDialog
->
-const mockOpenDirectoryInFileExplorer = Dialogs.openDirectoryInFileExplorer as jest.MockedFunction<
- typeof Dialogs.openDirectoryInFileExplorer
->
+vi.mock('../../labware')
+vi.mock('../../dialogs')
+vi.mock('../getPythonPath')
+vi.mock('../executeAnalyzeCli')
+vi.mock('../writeFailedAnalysis')
+vi.mock('electron-store')
+vi.mock('../../config')
+vi.mock('../../log', async importOriginal => {
+ const actual = await importOriginal()
+ return {
+ ...actual,
+ createLogger: () => ({
+ debug: vi.fn(),
+ error: vi.fn(),
+ }),
+ }
+})
// wait a few ticks to let the mock Promises clear
const flush = (): Promise =>
@@ -57,32 +45,32 @@ describe('analyzeProtocolSource', () => {
const mockMainWindow = ({
browserWindow: true,
} as unknown) as electron.BrowserWindow
- let dispatch: jest.MockedFunction
+ let dispatch = vi.fn()
let handleAction: Dispatch
beforeEach(() => {
- dispatch = jest.fn()
- mockGetConfig.mockReturnValue({
+ dispatch = vi.fn()
+ vi.mocked(getConfig).mockReturnValue({
python: { pathToPythonOverride: '/some/override/python' },
} as Config)
handleAction = registerProtocolAnalysis(dispatch, mockMainWindow)
})
- afterEach(() => {
- resetAllWhenMocks()
- })
-
it('should be able to initialize the Python path', () => {
- expect(mockSelectPythonPath).toHaveBeenCalledWith('/some/override/python')
- expect(mockHandleConfigChange).toHaveBeenCalledWith(
+ expect(vi.mocked(selectPythonPath)).toHaveBeenCalledWith(
+ '/some/override/python'
+ )
+ expect(vi.mocked(handleConfigChange)).toHaveBeenCalledWith(
'python.pathToPythonOverride',
expect.any(Function)
)
// the 'python.pathToPythonOverride' change handler
- const changeHandler = mockHandleConfigChange.mock.calls[0][1]
+ const changeHandler = vi.mocked(handleConfigChange).mock.calls[0][1]
changeHandler('/new/override/python', '/old/path/does/not/matter')
- expect(mockSelectPythonPath).toHaveBeenCalledWith('/new/override/python')
+ expect(vi.mocked(selectPythonPath)).toHaveBeenCalledWith(
+ '/new/override/python'
+ )
})
it('should get the Python path and execute the analyze CLI with custom labware', () => {
@@ -94,13 +82,13 @@ describe('analyzeProtocolSource', () => {
'/some/custom/labware/directory/fakeLabwareTwo.json',
]
- when(mockGetPythonPath).calledWith().mockResolvedValue(pythonPath)
- when(mockGetValidLabwareFilePaths)
+ when(vi.mocked(getPythonPath)).calledWith().thenResolve(pythonPath)
+ when(vi.mocked(getValidLabwareFilePaths))
.calledWith()
- .mockResolvedValue(labwarePaths)
+ .thenResolve(labwarePaths)
return analyzeProtocolSource(sourcePath, outputPath).then(() => {
- expect(mockExecuteAnalyzeCli).toHaveBeenCalledWith(
+ expect(vi.mocked(executeAnalyzeCli)).toHaveBeenCalledWith(
pythonPath,
outputPath,
[sourcePath, ...labwarePaths]
@@ -113,11 +101,14 @@ describe('analyzeProtocolSource', () => {
const outputPath = '/path/to/output.json'
const error = new Error('oh no')
- when(mockGetPythonPath).calledWith().mockRejectedValue(error)
- when(mockGetValidLabwareFilePaths).calledWith().mockResolvedValue([])
+ when(vi.mocked(getPythonPath)).calledWith().thenReject(error)
+ when(vi.mocked(getValidLabwareFilePaths)).calledWith().thenResolve([])
return analyzeProtocolSource(sourcePath, outputPath).then(() => {
- expect(mockWriteFailedAnalysis).toHaveBeenCalledWith(outputPath, 'oh no')
+ expect(vi.mocked(writeFailedAnalysis)).toHaveBeenCalledWith(
+ outputPath,
+ 'oh no'
+ )
})
})
@@ -127,37 +118,44 @@ describe('analyzeProtocolSource', () => {
const pythonPath = '/path/to/python'
const error = new Error('oh no')
- when(mockGetPythonPath).calledWith().mockResolvedValue(pythonPath)
- when(mockGetValidLabwareFilePaths).calledWith().mockResolvedValue([])
- when(mockExecuteAnalyzeCli)
+ when(vi.mocked(getPythonPath)).calledWith().thenResolve(pythonPath)
+ when(vi.mocked(getValidLabwareFilePaths)).calledWith().thenResolve([])
+ when(vi.mocked(executeAnalyzeCli))
.calledWith(pythonPath, outputPath, [sourcePath])
- .mockRejectedValue(error)
+ .thenReject(error)
return analyzeProtocolSource(sourcePath, outputPath).then(() => {
- expect(mockWriteFailedAnalysis).toHaveBeenCalledWith(outputPath, 'oh no')
+ expect(vi.mocked(writeFailedAnalysis)).toHaveBeenCalledWith(
+ outputPath,
+ 'oh no'
+ )
})
})
it('should open file picker in response to CHANGE_PYTHON_PATH_OVERRIDE and not call dispatch if no directory is returned from showOpenDirectoryDialog', () => {
- when(mockShowOpenDirectoryDialog)
+ when(vi.mocked(Dialogs.showOpenDirectoryDialog))
.calledWith(mockMainWindow)
- .mockResolvedValue([])
+ .thenResolve([])
handleAction(ProtocolAnalysis.changePythonPathOverrideConfig())
return flush().then(() => {
- expect(mockShowOpenDirectoryDialog).toHaveBeenCalledWith(mockMainWindow)
+ expect(vi.mocked(Dialogs.showOpenDirectoryDialog)).toHaveBeenCalledWith(
+ mockMainWindow
+ )
expect(dispatch).not.toHaveBeenCalled()
})
})
it('should open file picker in response to CHANGE_PYTHON_PATH_OVERRIDE and call dispatch with directory returned from showOpenDirectoryDialog', () => {
- when(mockShowOpenDirectoryDialog)
+ when(vi.mocked(Dialogs.showOpenDirectoryDialog))
.calledWith(mockMainWindow)
- .mockResolvedValue(['path/to/override'])
+ .thenResolve(['path/to/override'])
handleAction(ProtocolAnalysis.changePythonPathOverrideConfig())
return flush().then(() => {
- expect(mockShowOpenDirectoryDialog).toHaveBeenCalledWith(mockMainWindow)
+ expect(vi.mocked(Dialogs.showOpenDirectoryDialog)).toHaveBeenCalledWith(
+ mockMainWindow
+ )
expect(dispatch).toHaveBeenCalledWith(
Cfg.updateConfigValue(
CONFIG_PYTHON_PATH_TO_PYTHON_OVERRIDE,
@@ -168,15 +166,15 @@ describe('analyzeProtocolSource', () => {
})
it('should call openDirectoryInFileExplorer in response to OPEN_PYTHON_DIRECTORY', () => {
- when(mockOpenDirectoryInFileExplorer)
+ when(vi.mocked(Dialogs.openDirectoryInFileExplorer))
.calledWith('/some/override/python')
- .mockResolvedValue(null)
+ .thenResolve(null)
handleAction(ProtocolAnalysis.openPythonInterpreterDirectory())
return flush().then(() => {
- expect(mockOpenDirectoryInFileExplorer).toHaveBeenCalledWith(
- '/some/override/python'
- )
+ expect(
+ vi.mocked(Dialogs.openDirectoryInFileExplorer)
+ ).toHaveBeenCalledWith('/some/override/python')
})
})
})
diff --git a/app-shell/src/protocol-analysis/__tests__/writeFailedAnalysis.test.ts b/app-shell/src/protocol-analysis/__tests__/writeFailedAnalysis.test.ts
index 73dbf811479..4514887cb6d 100644
--- a/app-shell/src/protocol-analysis/__tests__/writeFailedAnalysis.test.ts
+++ b/app-shell/src/protocol-analysis/__tests__/writeFailedAnalysis.test.ts
@@ -1,5 +1,6 @@
import { readFile, rm } from 'fs/promises'
import tempy from 'tempy'
+import { describe, it, expect, beforeEach, afterEach } from 'vitest'
import { writeFailedAnalysis } from '../writeFailedAnalysis'
@@ -40,6 +41,7 @@ describe('write failed analysis', () => {
modules: [],
pipettes: [],
liquids: [],
+ runTimeParameters: [],
})
})
})
diff --git a/app-shell/src/protocol-analysis/index.ts b/app-shell/src/protocol-analysis/index.ts
index 34143c48de0..7264bb3819a 100644
--- a/app-shell/src/protocol-analysis/index.ts
+++ b/app-shell/src/protocol-analysis/index.ts
@@ -1,13 +1,15 @@
-import * as ProtocolAnalysis from '@opentrons/app/src/redux/protocol-analysis'
-import * as Cfg from '@opentrons/app/src/redux/config'
-
import { createLogger } from '../log'
import { getConfig, handleConfigChange } from '../config'
+import { updateConfigValue } from '../config/actions'
import { getValidLabwareFilePaths } from '../labware'
import {
showOpenDirectoryDialog,
openDirectoryInFileExplorer,
} from '../dialogs'
+import {
+ CHANGE_PYTHON_PATH_OVERRIDE,
+ OPEN_PYTHON_DIRECTORY,
+} from '../constants'
import { selectPythonPath, getPythonPath } from './getPythonPath'
import { executeAnalyzeCli } from './executeAnalyzeCli'
import { writeFailedAnalysis } from './writeFailedAnalysis'
@@ -33,20 +35,20 @@ export function registerProtocolAnalysis(
return function handleIncomingAction(action: Action): void {
switch (action.type) {
- case ProtocolAnalysis.OPEN_PYTHON_DIRECTORY: {
+ case OPEN_PYTHON_DIRECTORY: {
const dir = getConfig().python.pathToPythonOverride
openDirectoryInFileExplorer(dir).catch(err => {
log.debug('Error opening python directory', err.message)
})
break
}
- case ProtocolAnalysis.CHANGE_PYTHON_PATH_OVERRIDE: {
+ case CHANGE_PYTHON_PATH_OVERRIDE: {
showOpenDirectoryDialog(mainWindow)
.then(filePaths => {
if (filePaths.length > 0) {
const nextValue = filePaths[0]
dispatch(
- Cfg.updateConfigValue(
+ updateConfigValue(
CONFIG_PYTHON_PATH_TO_PYTHON_OVERRIDE,
nextValue
)
diff --git a/app-shell/src/protocol-analysis/writeFailedAnalysis.ts b/app-shell/src/protocol-analysis/writeFailedAnalysis.ts
index 519184a3d41..8723cd52d04 100644
--- a/app-shell/src/protocol-analysis/writeFailedAnalysis.ts
+++ b/app-shell/src/protocol-analysis/writeFailedAnalysis.ts
@@ -27,6 +27,7 @@ export function createFailedAnalysis(
pipettes: [],
modules: [],
liquids: [],
+ runTimeParameters: [],
// TODO(mc, 2022-05-04): this field does not make sense for an
// analysis that was unable to complete, but is required by
// ProtocolAnalysisOutput
diff --git a/app-shell/src/protocol-storage/__tests__/file-system.test.ts b/app-shell/src/protocol-storage/__tests__/file-system.test.ts
index c1aeb0071af..4da2cd23abe 100644
--- a/app-shell/src/protocol-storage/__tests__/file-system.test.ts
+++ b/app-shell/src/protocol-storage/__tests__/file-system.test.ts
@@ -4,8 +4,8 @@ import path from 'path'
import fs from 'fs-extra'
import tempy from 'tempy'
import Electron from 'electron'
+import { vi, describe, beforeEach, it, afterAll, expect } from 'vitest'
import uuid from 'uuid/v4'
-import { when } from 'jest-when'
import {
readDirectoriesWithinDirectory,
@@ -16,22 +16,15 @@ import {
PROTOCOLS_DIRECTORY_NAME,
PROTOCOLS_DIRECTORY_PATH,
} from '../file-system'
-import { getConfig } from '../../config'
import { analyzeProtocolSource } from '../../protocol-analysis'
-jest.mock('uuid/v4')
-jest.mock('electron')
-jest.mock('../../config')
-jest.mock('../../protocol-analysis')
+vi.mock('uuid/v4')
+vi.mock('electron')
+vi.mock('electron-store')
+vi.mock('../../protocol-analysis')
+vi.mock('../../log')
-const trashItem = Electron.shell.trashItem as jest.MockedFunction<
- typeof Electron.shell.trashItem
->
-const mockUuid = uuid as jest.MockedFunction
-const mockGetConfig = getConfig as jest.MockedFunction
-const mockRunFileWithPython = analyzeProtocolSource as jest.MockedFunction<
- typeof analyzeProtocolSource
->
+const trashItem = Electron.shell.trashItem
describe('protocol storage directory utilities', () => {
let protocolsDir: string
@@ -43,14 +36,11 @@ describe('protocol storage directory utilities', () => {
}
beforeEach(() => {
protocolsDir = makeEmptyDir()
- mockGetConfig.mockReturnValue({
- python: { pathToPythonOverride: null },
- } as any)
- mockRunFileWithPython.mockReturnValue(Promise.resolve())
+ vi.mocked(analyzeProtocolSource).mockReturnValue(Promise.resolve())
})
- afterAll(() => {
- jest.resetAllMocks()
+ afterAll((): any => {
+ vi.resetAllMocks()
return Promise.all(tempDirs.map(d => fs.remove(d)))
})
@@ -185,13 +175,11 @@ describe('protocol storage directory utilities', () => {
describe('addProtocolFile', () => {
it('writes a protocol file to a new directory', () => {
let count = 0
- when(mockUuid)
- .calledWith()
- .mockImplementation(() => {
- const nextId = `${count}abc123`
- count = count + 1
- return nextId
- })
+ vi.mocked(uuid).mockImplementation(() => {
+ const nextId = `${count}abc123`
+ count = count + 1
+ return nextId
+ })
const sourceDir = makeEmptyDir()
const destDir = makeEmptyDir()
const sourceName = path.join(sourceDir, 'source.py')
@@ -223,7 +211,7 @@ describe('protocol storage directory utilities', () => {
const protocolId = 'def456'
const setup = fs.mkdir(path.join(protocolsDir, protocolId))
- trashItem.mockResolvedValue()
+ vi.mocked(trashItem).mockResolvedValue()
return setup
.then(() => removeProtocolByKey('def456', protocolsDir))
@@ -239,7 +227,7 @@ describe('protocol storage directory utilities', () => {
const protocolId = 'def456'
const setup = fs.mkdir(path.join(protocolsDir, protocolId))
- trashItem.mockRejectedValue(Error('something went wrong'))
+ vi.mocked(trashItem).mockRejectedValue(Error('something went wrong'))
return setup
.then(() => removeProtocolByKey('def456', protocolsDir))
diff --git a/app-shell/src/protocol-storage/__tests__/protocol-storage.test.ts b/app-shell/src/protocol-storage/__tests__/protocol-storage.test.ts
index 2fcc70cdb0b..3ac1a106dbe 100644
--- a/app-shell/src/protocol-storage/__tests__/protocol-storage.test.ts
+++ b/app-shell/src/protocol-storage/__tests__/protocol-storage.test.ts
@@ -3,6 +3,7 @@
import path from 'path'
import fs from 'fs-extra'
import tempy from 'tempy'
+import { describe, it, vi, beforeEach, afterEach, expect } from 'vitest'
import { PROTOCOLS_DIRECTORY_NAME } from '../file-system'
import {
@@ -11,6 +12,9 @@ import {
getParsedAnalysisFromPath,
} from '../'
+vi.mock('electron-store')
+vi.mock('../../log')
+
describe('protocol storage directory utilities', () => {
let protocolsDir: string
let mockAnalysisFilePath: string
@@ -20,21 +24,18 @@ describe('protocol storage directory utilities', () => {
beforeEach(() => {
mockAnalysisFilePath = tempy.file({ extension: 'json' })
protocolsDir = path.join('__mock-app-path__', PROTOCOLS_DIRECTORY_NAME)
- mockDispatch = jest.fn()
+ mockDispatch = vi.fn()
requiredRmdir = true
})
afterEach(() => {
return requiredRmdir
- ? Promise.all([
+ ? (Promise.all([
fs.rmdir(protocolsDir, { recursive: true }),
fs.rm(mockAnalysisFilePath, { force: true }),
- ])
+ ]) as any)
: fs.rm(mockAnalysisFilePath, { force: true })
})
- afterAll(() => {
- jest.resetAllMocks()
- })
describe('fetchProtocols', () => {
it('reads and parses directories', () => {
@@ -118,6 +119,7 @@ describe('protocol storage directory utilities', () => {
pipettes: [],
modules: [],
labware: [],
+ runTimeParameters: [],
})
})
})
diff --git a/app-shell/src/protocol-storage/index.ts b/app-shell/src/protocol-storage/index.ts
index 0ffcf9795c6..53ec7148861 100644
--- a/app-shell/src/protocol-storage/index.ts
+++ b/app-shell/src/protocol-storage/index.ts
@@ -1,17 +1,32 @@
import fse from 'fs-extra'
import path from 'path'
import { shell } from 'electron'
-import first from 'lodash/first'
-
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
-import * as ProtocolStorageActions from '@opentrons/app/src/redux/protocol-storage/actions'
+import {
+ ADD_PROTOCOL,
+ ANALYZE_PROTOCOL,
+ FETCH_PROTOCOLS,
+ INITIAL,
+ OPEN_PROTOCOL_DIRECTORY,
+ POLL,
+ PROTOCOL_ADDITION,
+ REMOVE_PROTOCOL,
+ UI_INITIALIZED,
+ VIEW_PROTOCOL_SOURCE_FOLDER,
+} from '../constants'
+import {
+ analyzeProtocol,
+ analyzeProtocolFailure,
+ analyzeProtocolSuccess,
+ updateProtocolList,
+ updateProtocolListFailure,
+} from '../config/actions'
import * as FileSystem from './file-system'
import { createFailedAnalysis } from '../protocol-analysis/writeFailedAnalysis'
+import type { ProtocolAnalysisOutput } from '@opentrons/shared-data'
import type { ProtocolListActionSource as ListSource } from '@opentrons/app/src/redux/protocol-storage/types'
import type { Action, Dispatch } from '../types'
-import { ProtocolAnalysisOutput } from '@opentrons/shared-data'
const ensureDir: (dir: string) => Promise = fse.ensureDir
@@ -20,28 +35,18 @@ export const getUnixTimeFromAnalysisPath = (analysisPath: string): number =>
export const getParsedAnalysisFromPath = (
analysisPath: string
-): ProtocolAnalysisOutput => {
+): ProtocolAnalysisOutput | undefined => {
try {
return fse.readJsonSync(analysisPath)
} catch (error) {
- return createFailedAnalysis(
- error?.message ?? 'protocol analysis file cannot be parsed'
- )
+ const errorMessage =
+ error instanceof Error && error?.message != null
+ ? error.message
+ : 'protocol analysis file cannot be parsed'
+ return createFailedAnalysis(errorMessage)
}
}
-export const getProtocolSrcFilePaths = (
- protocolKey: string
-): Promise => {
- const protocolDir = `${FileSystem.PROTOCOLS_DIRECTORY_PATH}/${protocolKey}`
- return ensureDir(protocolDir)
- .then(() => FileSystem.parseProtocolDirs([protocolDir]))
- .then(storedProtocols => {
- const storedProtocol = first(storedProtocols)
- return storedProtocol?.srcFilePaths ?? []
- })
-}
-
// Revert a v7.0.0 pre-parity stop-gap solution.
const migrateProtocolsFromTempDirectory = preParityMigrateProtocolsFrom(
FileSystem.PRE_V7_PARITY_DIRECTORY_PATH,
@@ -135,7 +140,7 @@ export const fetchProtocols = (
}, null)
const mostRecentAnalysis =
mostRecentAnalysisFilePath != null
- ? getParsedAnalysisFromPath(mostRecentAnalysisFilePath)
+ ? getParsedAnalysisFromPath(mostRecentAnalysisFilePath) ?? null
: null
return {
@@ -151,78 +156,58 @@ export const fetchProtocols = (
mostRecentAnalysis,
}
})
- dispatch(
- ProtocolStorageActions.updateProtocolList(storedProtocolsData, source)
- )
+ dispatch(updateProtocolList(storedProtocolsData, source))
})
.catch((error: Error) => {
- dispatch(
- ProtocolStorageActions.updateProtocolListFailure(error.message, source)
- )
+ dispatch(updateProtocolListFailure(error.message, source))
})
}
export function registerProtocolStorage(dispatch: Dispatch): Dispatch {
return function handleActionForProtocolStorage(action: Action) {
switch (action.type) {
- case ProtocolStorageActions.FETCH_PROTOCOLS:
+ case FETCH_PROTOCOLS:
case UI_INITIALIZED: {
- const source =
- action.type === ProtocolStorageActions.FETCH_PROTOCOLS
- ? ProtocolStorageActions.POLL
- : ProtocolStorageActions.INITIAL
+ const source = action.type === FETCH_PROTOCOLS ? POLL : INITIAL
fetchProtocols(dispatch, source)
break
}
- case ProtocolStorageActions.ADD_PROTOCOL: {
+ case ADD_PROTOCOL: {
FileSystem.addProtocolFile(
action.payload.protocolFilePath,
FileSystem.PROTOCOLS_DIRECTORY_PATH
).then(protocolKey => {
- fetchProtocols(dispatch, ProtocolStorageActions.PROTOCOL_ADDITION)
- dispatch(ProtocolStorageActions.analyzeProtocol(protocolKey))
+ fetchProtocols(dispatch, PROTOCOL_ADDITION)
+ dispatch(analyzeProtocol(protocolKey))
})
break
}
- case ProtocolStorageActions.ANALYZE_PROTOCOL: {
+ case ANALYZE_PROTOCOL: {
FileSystem.analyzeProtocolByKey(
action.payload.protocolKey,
FileSystem.PROTOCOLS_DIRECTORY_PATH
)
.then(() => {
- dispatch(
- ProtocolStorageActions.analyzeProtocolSuccess(
- action.payload.protocolKey
- )
- )
- return fetchProtocols(
- dispatch,
- ProtocolStorageActions.PROTOCOL_ADDITION
- )
+ dispatch(analyzeProtocolSuccess(action.payload.protocolKey))
+ return fetchProtocols(dispatch, PROTOCOL_ADDITION)
})
.catch((_e: Error) => {
- dispatch(
- ProtocolStorageActions.analyzeProtocolFailure(
- action.payload.protocolKey
- )
- )
+ dispatch(analyzeProtocolFailure(action.payload.protocolKey))
})
break
}
- case ProtocolStorageActions.REMOVE_PROTOCOL: {
+ case REMOVE_PROTOCOL: {
FileSystem.removeProtocolByKey(
action.payload.protocolKey,
FileSystem.PROTOCOLS_DIRECTORY_PATH
- ).then(() =>
- fetchProtocols(dispatch, ProtocolStorageActions.PROTOCOL_ADDITION)
- )
+ ).then(() => fetchProtocols(dispatch, PROTOCOL_ADDITION))
break
}
- case ProtocolStorageActions.VIEW_PROTOCOL_SOURCE_FOLDER: {
+ case VIEW_PROTOCOL_SOURCE_FOLDER: {
FileSystem.viewProtocolSourceFolder(
action.payload.protocolKey,
FileSystem.PROTOCOLS_DIRECTORY_PATH
@@ -230,7 +215,7 @@ export function registerProtocolStorage(dispatch: Dispatch): Dispatch {
break
}
- case ProtocolStorageActions.OPEN_PROTOCOL_DIRECTORY: {
+ case OPEN_PROTOCOL_DIRECTORY: {
shell.openPath(FileSystem.PROTOCOLS_DIRECTORY_PATH)
break
}
diff --git a/app-shell/src/robot-update/__tests__/release-files.test.ts b/app-shell/src/robot-update/__tests__/release-files.test.ts
index edac2db7667..9807ac82ac7 100644
--- a/app-shell/src/robot-update/__tests__/release-files.test.ts
+++ b/app-shell/src/robot-update/__tests__/release-files.test.ts
@@ -3,9 +3,14 @@ import path from 'path'
import { promises as fs } from 'fs'
import fse from 'fs-extra'
import tempy from 'tempy'
+import { vi, describe, it, afterAll, expect } from 'vitest'
import { cleanupReleaseFiles } from '../release-files'
+vi.mock('electron-updater')
+vi.mock('electron-store')
+vi.mock('../../log')
+
describe('robot update release files utilities', () => {
const tempDirs: string[] = []
const makeEmptyDir = (): string => {
@@ -15,7 +20,7 @@ describe('robot update release files utilities', () => {
}
afterAll(() => {
- return Promise.all(tempDirs.map(d => fse.remove(d)))
+ return Promise.all(tempDirs.map(d => fse.remove(d))) as any
})
describe('cleanupReleaseFiles', () => {
diff --git a/app-shell/src/robot-update/__tests__/release-manifest.test.ts b/app-shell/src/robot-update/__tests__/release-manifest.test.ts
index cdc08dafdce..26ee86ad812 100644
--- a/app-shell/src/robot-update/__tests__/release-manifest.test.ts
+++ b/app-shell/src/robot-update/__tests__/release-manifest.test.ts
@@ -1,11 +1,11 @@
import fse from 'fs-extra'
import tempy from 'tempy'
+import { describe, it, vi, expect, beforeEach, afterEach } from 'vitest'
+
import * as Http from '../../http'
import { downloadManifest } from '../release-manifest'
-jest.mock('../../http')
-
-const fetchJson = Http.fetchJson as jest.MockedFunction
+vi.mock('../../http')
describe('release manifest utilities', () => {
let manifestFile: string
@@ -22,7 +22,7 @@ describe('release manifest utilities', () => {
const result = { mockResult: true }
const manifestUrl = 'http://example.com/releases.json'
- fetchJson.mockImplementation(
+ vi.mocked(Http.fetchJson).mockImplementation(
(url: unknown): Promise => {
if (url === manifestUrl) return Promise.resolve(result)
return Promise.resolve()
@@ -38,7 +38,7 @@ describe('release manifest utilities', () => {
const result = { mockResult: true }
const manifestUrl = 'http://example.com/releases.json'
- fetchJson.mockResolvedValue(result)
+ vi.mocked(Http.fetchJson).mockResolvedValue(result)
return downloadManifest(manifestUrl, manifestFile)
.then(() => fse.readJson(manifestFile))
@@ -50,7 +50,7 @@ describe('release manifest utilities', () => {
const manifestUrl = 'http://example.com/releases.json'
fse.writeJsonSync(manifestFile, manifest)
- fetchJson.mockRejectedValue(new Error('AH'))
+ vi.mocked(Http.fetchJson).mockRejectedValue(new Error('AH'))
return downloadManifest(manifestUrl, manifestFile).then(result =>
expect(result).toEqual(manifest)
diff --git a/app-shell/src/robot-update/constants.ts b/app-shell/src/robot-update/constants.ts
index c022db6185c..22a494d07d7 100644
--- a/app-shell/src/robot-update/constants.ts
+++ b/app-shell/src/robot-update/constants.ts
@@ -4,6 +4,8 @@ import type { UpdateManifestUrls } from './types'
import type { RobotUpdateTarget } from '@opentrons/app/src/redux/robot-update/types'
import { CURRENT_VERSION } from '../update'
+const OPENTRONS_PROJECT: string = _OPENTRONS_PROJECT_
+
const UPDATE_MANIFEST_URLS_RELEASE = {
ot2: 'https://builds.opentrons.com/ot2-br/releases.json',
flex: 'https://builds.opentrons.com/ot3-oe/releases.json',
@@ -15,7 +17,7 @@ const UPDATE_MANIFEST_URLS_INTERNAL_RELEASE = {
}
export const getUpdateManifestUrls = (): UpdateManifestUrls =>
- _OPENTRONS_PROJECT_.includes('robot-stack')
+ OPENTRONS_PROJECT.includes('robot-stack')
? UPDATE_MANIFEST_URLS_RELEASE
: UPDATE_MANIFEST_URLS_INTERNAL_RELEASE
diff --git a/app-shell/src/robot-update/index.ts b/app-shell/src/robot-update/index.ts
index 4f4d2bc8350..c74d1f5b534 100644
--- a/app-shell/src/robot-update/index.ts
+++ b/app-shell/src/robot-update/index.ts
@@ -1,9 +1,8 @@
// robot update files
import path from 'path'
import { readFile, ensureDir } from 'fs-extra'
-
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
import { createLogger } from '../log'
+import { UI_INITIALIZED } from '../constants'
import { downloadManifest, getReleaseSet } from './release-manifest'
import {
@@ -27,7 +26,6 @@ import type {
RobotUpdateAction,
RobotUpdateTarget,
} from '@opentrons/app/src/redux/robot-update/types'
-import type { RobotHost } from '@opentrons/app/src/redux/robot-api/types'
const log = createLogger('robot-update/index')
@@ -72,7 +70,7 @@ export function registerRobotUpdate(dispatch: Dispatch): Dispatch {
break
case 'robotUpdate:START_PREMIGRATION': {
- const robot = action.payload as RobotHost
+ const robot = action.payload
log.info('Starting robot premigration', { robot })
@@ -180,12 +178,12 @@ export function getRobotSystemUpdateUrls(
.then(manifest => {
const urls = getReleaseSet(manifest, CURRENT_VERSION)
- if (urls === null) {
- log.warn('No release files in manifest', {
- version: CURRENT_VERSION,
- manifest,
- })
- }
+ // if (urls === null) {
+ // log.warn('No release files in manifest', {
+ // version: CURRENT_VERSION,
+ // manifest,
+ // })
+ // }
return urls
})
diff --git a/app-shell/src/robot-update/release-files.ts b/app-shell/src/robot-update/release-files.ts
index 0c84634eb59..50e2366632a 100644
--- a/app-shell/src/robot-update/release-files.ts
+++ b/app-shell/src/robot-update/release-files.ts
@@ -7,7 +7,7 @@ import { move, readdir, remove, readFile } from 'fs-extra'
import StreamZip from 'node-stream-zip'
import getStream from 'get-stream'
-import { RobotUpdateTarget } from '@opentrons/app/src/redux/robot-update/types'
+import type { RobotUpdateTarget } from '@opentrons/app/src/redux/robot-update/types'
import { createLogger } from '../log'
import { fetchToFile } from '../http'
diff --git a/app-shell/src/robot-update/update.ts b/app-shell/src/robot-update/update.ts
index f3b0eca15df..9bd39b57d35 100644
--- a/app-shell/src/robot-update/update.ts
+++ b/app-shell/src/robot-update/update.ts
@@ -3,10 +3,9 @@
import path from 'path'
-import { OPENTRONS_USB } from '@opentrons/app/src/redux/discovery/constants'
-
import { fetch, postFile } from '../http'
import { getSerialPortHttpAgent } from '../usb'
+import { OPENTRONS_USB } from '../constants'
import type { RobotHost } from '@opentrons/app/src/redux/robot-api/types'
import type {
diff --git a/app-shell/src/system-info/__tests__/dispatch.test.ts b/app-shell/src/system-info/__tests__/dispatch.test.ts
index 00a057900b5..4da4b838429 100644
--- a/app-shell/src/system-info/__tests__/dispatch.test.ts
+++ b/app-shell/src/system-info/__tests__/dispatch.test.ts
@@ -1,68 +1,63 @@
import noop from 'lodash/noop'
+import { vi, it, expect, describe, beforeEach, afterEach } from 'vitest'
import { app } from 'electron'
import * as Fixtures from '@opentrons/app/src/redux/system-info/__fixtures__'
import * as SystemInfo from '@opentrons/app/src/redux/system-info'
import { uiInitialized } from '@opentrons/app/src/redux/shell/actions'
import * as OS from '../../os'
-import * as UsbDevices from '../usb-devices'
-import * as NetworkInterfaces from '../network-interfaces'
+import { createLogger } from '../../log'
+import { createUsbDeviceMonitor, getWindowsDriverVersion } from '../usb-devices'
+import {
+ getActiveInterfaces,
+ createNetworkInterfaceMonitor,
+} from '../network-interfaces'
import { registerSystemInfo } from '..'
import type { Dispatch } from '../../types'
import type { UsbDeviceMonitor } from '../usb-devices'
import type { NetworkInterfaceMonitor } from '../network-interfaces'
-jest.mock('../../os')
-jest.mock('../usb-devices')
-jest.mock('../network-interfaces')
-
-const createUsbDeviceMonitor = UsbDevices.createUsbDeviceMonitor as jest.MockedFunction<
- typeof UsbDevices.createUsbDeviceMonitor
->
-
-const getWindowsDriverVersion = UsbDevices.getWindowsDriverVersion as jest.MockedFunction<
- typeof UsbDevices.getWindowsDriverVersion
->
-
-const getActiveInterfaces = NetworkInterfaces.getActiveInterfaces as jest.MockedFunction<
- typeof NetworkInterfaces.getActiveInterfaces
->
-
-const createNetworkInterfaceMonitor = NetworkInterfaces.createNetworkInterfaceMonitor as jest.MockedFunction<
- typeof NetworkInterfaces.createNetworkInterfaceMonitor
->
-
-const isWindows = OS.isWindows as jest.MockedFunction
-
-const appOnce = app.once as jest.MockedFunction
-
+vi.mock('../../os')
+vi.mock('../usb-devices')
+vi.mock('../network-interfaces')
+vi.mock('electron-store')
+vi.mock('../../log', async importOriginal => {
+ const actual = await importOriginal()
+ return {
+ ...actual,
+ createLogger: () => ({
+ debug: vi.fn(),
+ error: vi.fn(),
+ }),
+ }
+})
const flush = (): Promise =>
new Promise(resolve => setTimeout(resolve, 0))
describe('app-shell::system-info module action tests', () => {
- const dispatch = jest.fn()
- const getAllDevices = jest.fn()
- const usbMonitor: UsbDeviceMonitor = { getAllDevices, stop: jest.fn() }
- const ifaceMonitor: NetworkInterfaceMonitor = { stop: jest.fn() }
+ const dispatch = vi.fn()
+ const getAllDevices = vi.fn()
+ const usbMonitor: UsbDeviceMonitor = { getAllDevices, stop: vi.fn() }
+ const ifaceMonitor: NetworkInterfaceMonitor = { stop: vi.fn() }
const { windowsDriverVersion: _, ...notRealtek } = Fixtures.mockUsbDevice
- const realtek0 = { ...notRealtek, manufacturer: 'Realtek' }
- const realtek1 = { ...notRealtek, manufacturer: 'realtek' }
+ const realtek0 = { ...notRealtek, manufacturerName: 'Realtek' }
+ const realtek1 = { ...notRealtek, manufacturerName: 'realtek' }
let handler: Dispatch
beforeEach(() => {
handler = registerSystemInfo(dispatch)
- isWindows.mockReturnValue(false)
- createUsbDeviceMonitor.mockReturnValue(usbMonitor)
- createNetworkInterfaceMonitor.mockReturnValue(ifaceMonitor)
+ vi.mocked(OS.isWindows).mockReturnValue(false)
+ vi.mocked(createUsbDeviceMonitor).mockReturnValue(usbMonitor)
+ vi.mocked(createNetworkInterfaceMonitor).mockReturnValue(ifaceMonitor)
getAllDevices.mockResolvedValue([realtek0])
- getActiveInterfaces.mockReturnValue([
+ vi.mocked(getActiveInterfaces).mockReturnValue([
Fixtures.mockNetworkInterface,
Fixtures.mockNetworkInterfaceV6,
])
})
afterEach(() => {
- jest.resetAllMocks()
+ vi.resetAllMocks()
})
it('sends initial USB device and network list on shell:UI_INITIALIZED', () => {
@@ -75,7 +70,7 @@ describe('app-shell::system-info module action tests', () => {
[Fixtures.mockNetworkInterface, Fixtures.mockNetworkInterfaceV6]
)
)
- expect(getWindowsDriverVersion).toHaveBeenCalledTimes(0)
+ expect(vi.mocked(getWindowsDriverVersion)).toHaveBeenCalledTimes(0)
})
})
@@ -85,14 +80,14 @@ describe('app-shell::system-info module action tests', () => {
return flush().then(() => {
expect(createUsbDeviceMonitor).toHaveBeenCalledTimes(1)
- expect(createNetworkInterfaceMonitor).toHaveBeenCalledTimes(1)
+ expect(vi.mocked(createNetworkInterfaceMonitor)).toHaveBeenCalledTimes(1)
expect(dispatch).toHaveBeenCalledTimes(2)
})
})
it('sends systemInfo:USB_DEVICE_ADDED when device added', () => {
handler(uiInitialized())
- const usbMonitorOptions = createUsbDeviceMonitor.mock.calls[0][0]
+ const usbMonitorOptions = vi.mocked(createUsbDeviceMonitor).mock.calls[0][0]
expect(usbMonitorOptions?.onDeviceAdd).toEqual(expect.any(Function))
const onDeviceAdd = usbMonitorOptions?.onDeviceAdd ?? noop
@@ -106,7 +101,7 @@ describe('app-shell::system-info module action tests', () => {
it('sends systemInfo:USB_DEVICE_REMOVED when device removed', () => {
handler(uiInitialized())
- const usbMonitorOptions = createUsbDeviceMonitor.mock.calls[0][0]
+ const usbMonitorOptions = vi.mocked(createUsbDeviceMonitor).mock.calls[0][0]
expect(usbMonitorOptions?.onDeviceRemove).toEqual(expect.any(Function))
const onDeviceRemove = usbMonitorOptions?.onDeviceRemove ?? noop
@@ -121,7 +116,8 @@ describe('app-shell::system-info module action tests', () => {
it('sends systemInfo:NETWORK_INTERFACES_CHANGED when ifaces change', () => {
handler(uiInitialized())
- const ifaceMonitorOpts = createNetworkInterfaceMonitor.mock.calls[0][0]
+ const ifaceMonitorOpts = vi.mocked(createNetworkInterfaceMonitor).mock
+ .calls[0][0]
expect(ifaceMonitorOpts.onInterfaceChange).toEqual(expect.any(Function))
const { onInterfaceChange } = ifaceMonitorOpts
@@ -144,7 +140,7 @@ describe('app-shell::system-info module action tests', () => {
it('stops monitoring on app quit', () => {
handler(uiInitialized())
- const appQuitHandler = appOnce.mock.calls.find(
+ const appQuitHandler = vi.mocked(app.once).mock.calls.find(
// @ts-expect-error(mc, 2021-02-17): event strings don't match, investigate
([event, handler]) => event === 'will-quit'
)?.[1]
@@ -157,8 +153,8 @@ describe('app-shell::system-info module action tests', () => {
describe('on windows', () => {
beforeEach(() => {
- isWindows.mockReturnValue(true)
- getWindowsDriverVersion.mockResolvedValue('1.2.3')
+ vi.mocked(OS.isWindows).mockReturnValue(true)
+ vi.mocked(getWindowsDriverVersion).mockResolvedValue('1.2.3')
})
it('should add Windows driver versions to Realtek devices on initialization', () => {
@@ -166,8 +162,12 @@ describe('app-shell::system-info module action tests', () => {
handler(uiInitialized())
return flush().then(() => {
- expect(getWindowsDriverVersion).toHaveBeenCalledWith(realtek0)
- expect(getWindowsDriverVersion).toHaveBeenCalledWith(realtek1)
+ expect(vi.mocked(getWindowsDriverVersion)).toHaveBeenCalledWith(
+ realtek0
+ )
+ expect(vi.mocked(getWindowsDriverVersion)).toHaveBeenCalledWith(
+ realtek1
+ )
expect(dispatch).toHaveBeenCalledWith(
SystemInfo.initialized(
@@ -185,12 +185,15 @@ describe('app-shell::system-info module action tests', () => {
it('should add Windows driver versions to Realtek devices on add', () => {
getAllDevices.mockResolvedValue([])
handler(uiInitialized())
- const usbMonitorOptions = createUsbDeviceMonitor.mock.calls[0][0]
+ const usbMonitorOptions = vi.mocked(createUsbDeviceMonitor).mock
+ .calls[0][0]
const onDeviceAdd = usbMonitorOptions?.onDeviceAdd ?? noop
onDeviceAdd(realtek0)
return flush().then(() => {
- expect(getWindowsDriverVersion).toHaveBeenCalledWith(realtek0)
+ expect(vi.mocked(getWindowsDriverVersion)).toHaveBeenCalledWith(
+ realtek0
+ )
expect(dispatch).toHaveBeenCalledWith(
SystemInfo.usbDeviceAdded({
diff --git a/app-shell/src/system-info/__tests__/network-interfaces.test.ts b/app-shell/src/system-info/__tests__/network-interfaces.test.ts
index 907177a104a..efa0206aaf4 100644
--- a/app-shell/src/system-info/__tests__/network-interfaces.test.ts
+++ b/app-shell/src/system-info/__tests__/network-interfaces.test.ts
@@ -1,16 +1,13 @@
import os from 'os'
import noop from 'lodash/noop'
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'
import {
getActiveInterfaces,
createNetworkInterfaceMonitor,
} from '../network-interfaces'
-jest.mock('os')
-
-const networkInterfaces = os.networkInterfaces as jest.MockedFunction<
- typeof os.networkInterfaces
->
+vi.mock('os')
const mockV4: os.NetworkInterfaceInfoIPv4 = {
address: '192.168.1.17',
@@ -33,17 +30,17 @@ const mockV6: os.NetworkInterfaceInfoIPv6 = {
describe('system-info::network-interfaces', () => {
beforeEach(() => {
- jest.useFakeTimers()
+ vi.useFakeTimers()
})
afterEach(() => {
- jest.resetAllMocks()
- jest.clearAllTimers()
- jest.useRealTimers()
+ vi.resetAllMocks()
+ vi.clearAllTimers()
+ vi.useRealTimers()
})
it('should return external network interfaces', () => {
- networkInterfaces.mockReturnValue({
+ vi.mocked(os.networkInterfaces).mockReturnValue({
en0: [mockV4, mockV6],
en1: [mockV6],
lo0: [
@@ -60,56 +57,56 @@ describe('system-info::network-interfaces', () => {
})
it('should be able to poll the attached network interfaces', () => {
- networkInterfaces.mockReturnValue({})
+ vi.mocked(os.networkInterfaces).mockReturnValue({})
const monitor = createNetworkInterfaceMonitor({
pollInterval: 30000,
onInterfaceChange: noop,
})
- expect(networkInterfaces).toHaveBeenCalledTimes(1)
- jest.advanceTimersByTime(30000)
- expect(networkInterfaces).toHaveBeenCalledTimes(2)
- jest.advanceTimersByTime(30000)
- expect(networkInterfaces).toHaveBeenCalledTimes(3)
+ expect(vi.mocked(os.networkInterfaces)).toHaveBeenCalledTimes(1)
+ vi.advanceTimersByTime(30000)
+ expect(vi.mocked(os.networkInterfaces)).toHaveBeenCalledTimes(2)
+ vi.advanceTimersByTime(30000)
+ expect(vi.mocked(os.networkInterfaces)).toHaveBeenCalledTimes(3)
monitor.stop()
- jest.advanceTimersByTime(30000)
- expect(networkInterfaces).toHaveBeenCalledTimes(3)
+ vi.advanceTimersByTime(30000)
+ expect(vi.mocked(os.networkInterfaces)).toHaveBeenCalledTimes(3)
})
it('should be able to signal interface changes', () => {
- const handleInterfaceChange = jest.fn()
+ const handleInterfaceChange = vi.fn()
- networkInterfaces.mockReturnValue({})
+ vi.mocked(os.networkInterfaces).mockReturnValue({})
createNetworkInterfaceMonitor({
pollInterval: 30000,
onInterfaceChange: handleInterfaceChange,
})
- networkInterfaces.mockReturnValueOnce({
+ vi.mocked(os.networkInterfaces).mockReturnValueOnce({
en0: [mockV4, mockV6],
})
- jest.advanceTimersByTime(30000)
+ vi.advanceTimersByTime(30000)
expect(handleInterfaceChange).toHaveBeenCalledWith([
{ name: 'en0', ...mockV4 },
{ name: 'en0', ...mockV6 },
])
handleInterfaceChange.mockClear()
- networkInterfaces.mockReturnValueOnce({
+ vi.mocked(os.networkInterfaces).mockReturnValueOnce({
en0: [mockV4, mockV6],
})
- jest.advanceTimersByTime(30000)
+ vi.advanceTimersByTime(30000)
expect(handleInterfaceChange).toHaveBeenCalledTimes(0)
handleInterfaceChange.mockClear()
- networkInterfaces.mockReturnValueOnce({
+ vi.mocked(os.networkInterfaces).mockReturnValueOnce({
en0: [mockV4, mockV6],
en1: [mockV4],
})
- jest.advanceTimersByTime(30000)
+ vi.advanceTimersByTime(30000)
expect(handleInterfaceChange).toHaveBeenCalledWith([
{ name: 'en0', ...mockV4 },
{ name: 'en0', ...mockV6 },
@@ -119,18 +116,18 @@ describe('system-info::network-interfaces', () => {
})
it('should be able to stop monitoring interface changes', () => {
- const handleInterfaceChange = jest.fn()
+ const handleInterfaceChange = vi.fn()
- networkInterfaces.mockReturnValue({})
+ vi.mocked(os.networkInterfaces).mockReturnValue({})
const monitor = createNetworkInterfaceMonitor({
pollInterval: 30000,
onInterfaceChange: handleInterfaceChange,
})
- networkInterfaces.mockReturnValueOnce({ en0: [mockV4] })
+ vi.mocked(os.networkInterfaces).mockReturnValueOnce({ en0: [mockV4] })
monitor.stop()
- jest.advanceTimersByTime(30000)
+ vi.advanceTimersByTime(30000)
expect(handleInterfaceChange).toHaveBeenCalledTimes(0)
})
})
diff --git a/app-shell/src/system-info/__tests__/usb-devices.test.ts b/app-shell/src/system-info/__tests__/usb-devices.test.ts
index 1c84dda857d..47177333333 100644
--- a/app-shell/src/system-info/__tests__/usb-devices.test.ts
+++ b/app-shell/src/system-info/__tests__/usb-devices.test.ts
@@ -1,71 +1,209 @@
import execa from 'execa'
-import usbDetection from 'usb-detection'
+import { usb } from 'usb'
+import { vi, it, expect, describe, afterEach } from 'vitest'
import * as Fixtures from '@opentrons/app/src/redux/system-info/__fixtures__'
+import { createLogger } from '../../log'
import { createUsbDeviceMonitor, getWindowsDriverVersion } from '../usb-devices'
+import { isWindows } from '../../os'
+
+vi.mock('execa')
+vi.mock('usb')
+vi.mock('electron-store')
+vi.mock('../../log', async importOriginal => {
+ const actual = await importOriginal()
+ return {
+ ...actual,
+ createLogger: () => ({
+ debug: vi.fn(),
+ error: vi.fn(),
+ warn: vi.fn(),
+ }),
+ }
+})
+
+const mockFixtureDevice = {
+ ...Fixtures.mockUsbDevice,
+ identifier: 'ec2c23ab245e0424059c3ad99e626cdb',
+}
+
+const mockDescriptor = {
+ busNumber: 3,
+ deviceAddress: 10,
+ deviceDescriptor: {
+ idVendor: Fixtures.mockUsbDevice.vendorId,
+ idProduct: Fixtures.mockUsbDevice.productId,
+ iSerialNumber: 0,
+ iManufacturer: 1,
+ iProduct: 2,
+ },
+}
+
+const getSerialIterator = () => {
+ const serials = ['sn1', 'sn2', 'sn3']
+ let idx = 0
+ return () => {
+ idx += 1
+ return serials[idx - 1]
+ }
+}
+
+const getManufacturerIterator = () => {
+ const mfrs = ['mfr1', 'mfr2', 'mfr3']
+ let idx = 0
+ return () => {
+ idx += 1
+ return mfrs[idx - 1]
+ }
+}
+
+const getProductIterator = () => {
+ const products = ['pr1', 'pr2', 'pr3']
+ let idx = 0
+ return () => {
+ idx += 1
+ return products[idx - 1]
+ }
+}
+
+const mockUSBDevice = {
+ ...mockDescriptor,
+ getStringDescriptor: vi.mocked(usb.Device),
+ open: vi.mocked(usb.Device),
+ close: vi.mocked(usb.Device),
+}
+
+if (!isWindows()) {
+ describe('app-shell::system-info::usb-devices::detection', () => {
+ const { windowsDriverVersion: _, ...mockDevice } = Fixtures.mockUsbDevice
+ afterEach(() => {
+ vi.resetAllMocks()
+ })
-jest.mock('execa')
-jest.mock('usb-detection')
+ it.skip('can return the list of all devices', async () => {
+ const mockDevices = [mockUSBDevice, mockUSBDevice, mockUSBDevice] as any
+ const serialIterator = getSerialIterator()
+ const mfrIterator = getManufacturerIterator()
+ const productIterator = getProductIterator()
+ vi.mocked(usb.getDeviceList).mockReturnValueOnce(mockDevices)
+ // @ts-expect-error Revisit after Vite migration.
+ vi.mocked(usb.Device).mockImplementation((descriptorId, callback) =>
+ callback(
+ undefined,
+ [serialIterator, mfrIterator, productIterator][descriptorId]()
+ )
+ )
-const usbDetectionFind = usbDetection.find as jest.MockedFunction<
- typeof usbDetection.find
->
+ const monitor = createUsbDeviceMonitor()
+ const result = monitor.getAllDevices()
+ const devices = await result
+
+ expect(devices).toEqual([
+ {
+ ...mockFixtureDevice,
+ manufacturerName: 'mfr1',
+ serialNumber: 'sn1',
+ productName: 'pr1',
+ },
+ {
+ ...mockFixtureDevice,
+ manufacturerName: 'mfr2',
+ serialNumber: 'sn2',
+ productName: 'pr2',
+ },
+ {
+ ...mockFixtureDevice,
+ manufacturerName: 'mfr3',
+ serialNumber: 'sn3',
+ productName: 'pr3',
+ },
+ ])
+ })
-const execaCommand = execa.command as jest.MockedFunction
+ it.skip('can notify when devices are added', () =>
+ new Promise((resolve, reject) => {
+ const onDeviceAdd = vi.fn()
+ onDeviceAdd.mockImplementation(device => {
+ try {
+ expect(device).toEqual({
+ ...mockFixtureDevice,
+ manufacturerName: 'mfr1',
+ serialNumber: 'sn1',
+ productName: 'pn1',
+ })
+ resolve()
+ } catch (error) {
+ reject(error)
+ }
+ })
+ let attachListener
+ vi.mocked(usb.on).mockImplementationOnce((event, listener) => {
+ if (event === 'attach') {
+ attachListener = listener
+ }
+ })
+ createUsbDeviceMonitor({ onDeviceAdd })
+ // @ts-expect-error Revisit after Vite migration.
+ vi.mocked(usb.Device).mockImplementation((descriptorId, callback) =>
+ callback(undefined, ['sn1', 'mfr1', 'pn1'][descriptorId])
+ )
+ if (attachListener) {
+ // @ts-expect-error: this is gross
+ attachListener(mockUSBDevice)
+ } else {
+ reject(new Error('attachListener was not defined'))
+ }
+ }))
+
+ it('can notify when devices are removed', () =>
+ new Promise((resolve, reject) => {
+ const onDeviceRemove = vi.fn()
+ onDeviceRemove.mockImplementation(device => {
+ try {
+ expect(device).toEqual({
+ vendorId: mockDevice.vendorId,
+ productId: mockDevice.productId,
+ identifier: 'ec2c23ab245e0424059c3ad99e626cdb',
+ manufacturerName: undefined,
+ productName: undefined,
+ serialNumber: undefined,
+ systemIdentifier: undefined,
+ })
+ resolve()
+ } catch (error) {
+ reject(error)
+ }
+ })
+
+ let detachListener
+
+ vi.mocked(usb.on).mockImplementationOnce((event, listener) => {
+ if (event === 'detach') {
+ detachListener = listener
+ }
+ })
+ vi.mocked(usb.Device).mockImplementation(() => {
+ throw new Error('Cannot open detached device')
+ })
+ createUsbDeviceMonitor({ onDeviceRemove })
+ if (detachListener) {
+ // @ts-expect-error: this is gross
+ detachListener(mockUSBDevice)
+ } else {
+ reject(new Error('detachListener was not created'))
+ }
+ }))
+ })
+}
describe('app-shell::system-info::usb-devices', () => {
const { windowsDriverVersion: _, ...mockDevice } = Fixtures.mockUsbDevice
afterEach(() => {
- jest.resetAllMocks()
- })
-
- it('can create a usb device monitor', () => {
- expect(usbDetection.startMonitoring).toHaveBeenCalledTimes(0)
- createUsbDeviceMonitor()
- expect(usbDetection.startMonitoring).toHaveBeenCalledTimes(1)
- })
-
- it('usb device monitor can be stopped', () => {
- const monitor = createUsbDeviceMonitor()
- monitor.stop()
- expect(usbDetection.stopMonitoring).toHaveBeenCalledTimes(1)
- })
-
- it('can return the list of all devices', async () => {
- const mockDevices = [
- { ...mockDevice, deviceName: 'foo' },
- { ...mockDevice, deviceName: 'bar' },
- { ...mockDevice, deviceName: 'baz' },
- ]
-
- usbDetectionFind.mockResolvedValueOnce(mockDevices)
-
- const monitor = createUsbDeviceMonitor()
- const result = monitor.getAllDevices()
-
- await expect(result).resolves.toEqual(mockDevices)
- })
-
- it('can notify when devices are added', () => {
- const onDeviceAdd = jest.fn()
- createUsbDeviceMonitor({ onDeviceAdd })
-
- usbDetection.emit('add', mockDevice)
-
- expect(onDeviceAdd).toHaveBeenCalledWith(mockDevice)
- })
-
- it('can notify when devices are removed', () => {
- const onDeviceRemove = jest.fn()
- createUsbDeviceMonitor({ onDeviceRemove })
-
- usbDetection.emit('remove', mockDevice)
-
- expect(onDeviceRemove).toHaveBeenCalledWith(mockDevice)
+ vi.resetAllMocks()
})
it('can get the Windows driver version of a device', () => {
- execaCommand.mockResolvedValue({ stdout: '1.2.3' } as any)
+ vi.mocked(execa.command).mockResolvedValue({ stdout: '1.2.3' } as any)
const device = {
...mockDevice,
@@ -89,7 +227,7 @@ describe('app-shell::system-info::usb-devices', () => {
})
it('returns null for unknown if command errors out', () => {
- execaCommand.mockRejectedValue('AH!')
+ vi.mocked(execa.command).mockRejectedValue('AH!')
return getWindowsDriverVersion(mockDevice).then(version => {
expect(version).toBe(null)
diff --git a/app-shell/src/system-info/index.ts b/app-shell/src/system-info/index.ts
index f42cf474f81..806e4432863 100644
--- a/app-shell/src/system-info/index.ts
+++ b/app-shell/src/system-info/index.ts
@@ -1,7 +1,6 @@
// system info module
import { app } from 'electron'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
-import * as SystemInfo from '@opentrons/app/src/redux/system-info'
+import { UI_INITIALIZED } from '../constants'
import { createLogger } from '../log'
import { isWindows } from '../os'
import { createUsbDeviceMonitor, getWindowsDriverVersion } from './usb-devices'
@@ -12,11 +11,17 @@ import {
import type { UsbDevice } from '@opentrons/app/src/redux/system-info/types'
import type { Action, Dispatch } from '../types'
-import type { UsbDeviceMonitor, Device } from './usb-devices'
+import type { UsbDeviceMonitor } from './usb-devices'
import type {
NetworkInterface,
NetworkInterfaceMonitor,
} from './network-interfaces'
+import {
+ initialized,
+ networkInterfacesChanged,
+ usbDeviceAdded,
+ usbDeviceRemoved,
+} from '../config/actions'
export { createNetworkInterfaceMonitor }
export type { NetworkInterface, NetworkInterfaceMonitor }
@@ -26,15 +31,19 @@ const IFACE_POLL_INTERVAL_MS = 30000
const log = createLogger('system-info')
-const addDriverVersion = (device: Device): Promise => {
- if (isWindows() && RE_REALTEK.test(device.manufacturer)) {
+const addDriverVersion = (device: UsbDevice): Promise => {
+ if (
+ isWindows() &&
+ device.manufacturerName != null &&
+ RE_REALTEK.test(device.manufacturerName)
+ ) {
return getWindowsDriverVersion(device).then(windowsDriverVersion => ({
...device,
windowsDriverVersion,
}))
}
- return Promise.resolve({ ...device })
+ return Promise.resolve(device)
}
export function registerSystemInfo(
@@ -43,17 +52,17 @@ export function registerSystemInfo(
let usbMonitor: UsbDeviceMonitor
let ifaceMonitor: NetworkInterfaceMonitor
- const handleDeviceAdd = (device: Device): void => {
+ const handleDeviceAdd = (device: UsbDevice): void => {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
- addDriverVersion(device).then(d => dispatch(SystemInfo.usbDeviceAdded(d)))
+ addDriverVersion(device).then(d => dispatch(usbDeviceAdded(d)))
}
- const handleDeviceRemove = (d: Device): void => {
- dispatch(SystemInfo.usbDeviceRemoved({ ...d }))
+ const handleDeviceRemove = (d: UsbDevice): void => {
+ dispatch(usbDeviceRemoved(d))
}
const handleIfacesChanged = (interfaces: NetworkInterface[]): void => {
- dispatch(SystemInfo.networkInterfacesChanged(interfaces))
+ dispatch(networkInterfacesChanged(interfaces))
}
app.once('will-quit', () => {
@@ -91,7 +100,7 @@ export function registerSystemInfo(
.getAllDevices()
.then(devices => Promise.all(devices.map(addDriverVersion)))
.then(devices => {
- dispatch(SystemInfo.initialized(devices, getActiveInterfaces()))
+ dispatch(initialized(devices, getActiveInterfaces()))
})
.catch((error: Error) =>
log.warn(`unable to start usb monitor with error: ${error.message}`)
diff --git a/app-shell/src/system-info/usb-devices.ts b/app-shell/src/system-info/usb-devices.ts
index 6000229ef9c..30ed5a53dc2 100644
--- a/app-shell/src/system-info/usb-devices.ts
+++ b/app-shell/src/system-info/usb-devices.ts
@@ -1,72 +1,332 @@
import assert from 'assert'
import execa from 'execa'
-import usbDetection from 'usb-detection'
+import { usb } from 'usb'
import { isWindows } from '../os'
import { createLogger } from '../log'
+import { createHmac } from 'crypto'
-import type { Device } from 'usb-detection'
-
-export type { Device }
+import type { UsbDevice } from '@opentrons/app/src/redux/system-info/types'
export type UsbDeviceMonitorOptions = Partial<{
- onDeviceAdd?: (device: Device) => unknown
- onDeviceRemove?: (device: Device) => unknown
+ onDeviceAdd?: (device: UsbDevice) => void
+ onDeviceRemove?: (device: UsbDevice) => void
}>
export interface UsbDeviceMonitor {
- getAllDevices: () => Promise
+ getAllDevices: () => Promise
stop: () => void
}
const log = createLogger('usb-devices')
+const decToHex = (number: number): string =>
+ number.toString(16).toUpperCase().padStart(4, '0')
+const idVendor = (device: usb.Device): string =>
+ decToHex(device.deviceDescriptor.idVendor)
+const idProduct = (device: usb.Device): string =>
+ decToHex(device.deviceDescriptor.idProduct)
+
+const descriptorToDevice = (
+ descriptors: usb.Device,
+ manufacturerName?: string,
+ serialNumber?: string,
+ productName?: string,
+ systemIdentifier?: string
+): UsbDevice => ({
+ vendorId: descriptors.deviceDescriptor.idVendor,
+ productId: descriptors.deviceDescriptor.idProduct,
+ identifier: createHmac('md5', '')
+ .update(decToHex(descriptors.busNumber))
+ .update(decToHex(descriptors.deviceAddress))
+ .digest('hex'),
+ serialNumber,
+ manufacturerName,
+ productName,
+ systemIdentifier,
+})
+
+const getStringDescriptorPromise = (
+ device: usb.Device,
+ index: number
+): Promise =>
+ new Promise((resolve, reject) => {
+ device.getStringDescriptor(index, (error?, value?) => {
+ // fyi if you do something in this callback that throws there's a good chance
+ // it will crash node. fyi things that might raise include calling half the
+ // built-ins since this executes in a weird extension environment. for instance
+ // log.info or in fact console.log will cause a hard crash here
+ !!error || !!!value ? reject(error ?? 'no value') : resolve(value)
+ })
+ })
+
+const orDefault = (
+ promise: Promise,
+ defaulter: (err: any) => U
+): Promise =>
+ promise
+ .then((result: T): T => result)
+ .catch(
+ (err: any) =>
+ new Promise(resolve => {
+ resolve(defaulter(err))
+ })
+ )
+
+const doUpstreamDeviceFromUsbDevice = (
+ device: usb.Device
+): Promise =>
+ isWindows()
+ ? upstreamDeviceFromUsbDeviceWinAPI(device)
+ : upstreamDeviceFromUsbDeviceLibUSB(device)
+
+function upstreamDeviceFromUsbDevice(device: usb.Device): Promise {
+ return doUpstreamDeviceFromUsbDevice(device).catch(err => {
+ log.error(
+ `Failed to get device information for vid=${idVendor(
+ device
+ )} pid=${idProduct(device)}: ${err}: friendly names unavailable`
+ )
+ return [descriptorToDevice(device)]
+ })
+}
+
+interface WmiObject {
+ Present: boolean
+ Manufacturer: string
+ Name: string
+ DeviceID: string
+}
+
+function upstreamDeviceFromUsbDeviceWinAPI(
+ device: usb.Device
+): Promise {
+ // Here begins an annotated series of interesting powershell interactions!
+ // We don't know the device ID of the device. For USB devices it's typically composed of
+ // the VID, the PID, and the serial, and we don't know the serial. (Also if there's two devices
+ // with the same vid+pid+serial, as with devices that hardcode serial to 1, then you get some
+ // random something-or-other in there so even if we had the serial we couldn't rely on it.)
+
+ // We also essentially have no way of linking this uniquely identifying information to that
+ // provided by libusb. Libusb provides usb-oriented identifiers like the bus address; windows
+ // provides identifiers about hubs and ports.
+
+ // This is basically why we have everything returning lists of devices - this function needs
+ // to tell people that it found multiple devices and it doesn't know which is which.
+
+ // We can get a json-formatted dump of information about all devices with the specified vid and
+ // pid
+ return execa
+ .command(
+ `Get-WmiObject Win32_PnpEntity -Filter "DeviceId like '%\\\\VID_${idVendor(
+ device
+ )}&PID_${idProduct(
+ device
+ )}%'" | Select-Object -Property * | ConvertTo-JSON -Compress`,
+ { shell: 'PowerShell.exe' }
+ )
+ .then(dump => {
+ // powershell helpfully will dump a json object when there's exactly one result and a json
+ // array when there's more than one result. isn't that really cool? this is actually fixed
+ // in any at-all modern powershell version, where ConvertTo-JSON has a flag -AsArray that
+ // forces array output, but you absolutely cannot rely on anything past like powershell
+ // 5.1 being present
+ const parsePoshJsonOutputToWmiObjectArray = (
+ dump: string
+ ): WmiObject[] => {
+ if (dump[0] === '[') {
+ return JSON.parse(dump) as WmiObject[]
+ } else {
+ return [JSON.parse(dump) as WmiObject]
+ }
+ }
+ if (dump.stderr !== '') {
+ return Promise.reject(new Error(`Command failed: ${dump.stderr}`))
+ }
+ const getObjsWithCorrectPresence = (wmiDump: WmiObject[]): WmiObject[] =>
+ wmiDump.filter(obj => obj.Present)
+
+ const objsToQuery = getObjsWithCorrectPresence(
+ parsePoshJsonOutputToWmiObjectArray(dump.stdout.trim())
+ )
+ return objsToQuery.map(wmiObj =>
+ descriptorToDevice(
+ device,
+ wmiObj.Manufacturer,
+ // the serial number, or something kind of like a serial number in the case of devices
+ // with duplicate serial numbers, is the third element of the device id which is formed
+ // by concatenating stuff with \\ as a separator (and of course each \ must be escaped)
+ wmiObj.DeviceID.match(/.*\\\\.*\\\\(.*)/)?.at(1) ?? undefined,
+ wmiObj.Name,
+ wmiObj.DeviceID
+ )
+ )
+ })
+}
+
+function upstreamDeviceFromUsbDeviceLibUSB(
+ device: usb.Device
+): Promise {
+ return new Promise((resolve, reject) => {
+ try {
+ device.open(false)
+ } catch (err: any) {
+ log.error(
+ `Failed to open vid=${idVendor(device)} pid=${idProduct(
+ device
+ )}: ${err}`
+ )
+ reject(err)
+ }
+ resolve(device)
+ })
+ .then(() =>
+ Promise.all([
+ orDefault(
+ getStringDescriptorPromise(
+ device,
+ device.deviceDescriptor.iManufacturer
+ ),
+ (err: any): undefined => {
+ log.error(
+ `Failed to get manufacturer for vid=${idVendor(
+ device
+ )} pid=${idProduct(device)}: ${err}`
+ )
+ return undefined
+ }
+ ),
+ orDefault(
+ getStringDescriptorPromise(
+ device,
+ device.deviceDescriptor.iSerialNumber
+ ),
+ (err: any): undefined => {
+ log.error(
+ `Failed to get serial for vid=${idVendor(device)} pid=${idProduct(
+ device
+ )}: ${err}`
+ )
+ return undefined
+ }
+ ),
+ orDefault(
+ getStringDescriptorPromise(device, device.deviceDescriptor.iProduct),
+ (err: any): undefined => {
+ log.error(
+ `Failed to get product name for vid=${idVendor(
+ device
+ )} pid=${idProduct(device)}: ${err}`
+ )
+ return undefined
+ }
+ ),
+ ])
+ )
+ .then(([manufacturer, serialNumber, productName]) => {
+ return [
+ descriptorToDevice(device, manufacturer, serialNumber, productName),
+ ]
+ })
+ .finally(() => {
+ setImmediate(() => {
+ try {
+ device.close()
+ log.info(
+ `closed vid=${idVendor(device)}, pid=${idProduct(device)} ok`
+ )
+ } catch (err) {
+ log.info(
+ `failed to close vid=${idVendor(device)}, pid=${idProduct(
+ device
+ )}: ${err}`
+ )
+ }
+ })
+ })
+}
+
export function createUsbDeviceMonitor(
options: UsbDeviceMonitorOptions = {}
): UsbDeviceMonitor {
const { onDeviceAdd, onDeviceRemove } = options
- usbDetection.startMonitoring()
-
+ if (isWindows()) {
+ try {
+ log.info('Initializing USBDk backend on windows')
+ usb.useUsbDkBackend()
+ log.info('USBDk backend initialized')
+ } catch (err) {
+ log.error(`Could not initialize USBDk backend: ${err}`)
+ }
+ }
if (typeof onDeviceAdd === 'function') {
- usbDetection.on('add', onDeviceAdd)
+ usb.on('attach', device => {
+ upstreamDeviceFromUsbDevice(device).then(devices =>
+ devices.forEach(onDeviceAdd)
+ )
+ })
}
if (typeof onDeviceRemove === 'function') {
- usbDetection.on('remove', onDeviceRemove)
+ usb.on('detach', device => {
+ onDeviceRemove(descriptorToDevice(device))
+ })
}
return {
- getAllDevices: () => usbDetection.find(),
+ getAllDevices: () =>
+ new Promise((resolve, reject) => {
+ resolve(usb.getDeviceList())
+ })
+ .then(deviceList =>
+ Promise.all(deviceList.map(upstreamDeviceFromUsbDevice))
+ )
+ .then(upstreamDevices => upstreamDevices.flat()),
stop: () => {
if (typeof onDeviceAdd === 'function') {
- usbDetection.off('add', onDeviceAdd)
+ usb.removeAllListeners('attach')
}
if (typeof onDeviceRemove === 'function') {
- usbDetection.off('remove', onDeviceRemove)
+ usb.removeAllListeners('detach')
}
- usbDetection.stopMonitoring()
log.debug('usb detection monitoring stopped')
},
}
}
-const decToHex = (number: number): string =>
- number.toString(16).toUpperCase().padStart(4, '0')
+const deviceIdFromDetails = (device: UsbDevice): string | null => {
+ const {
+ vendorId: vidDecimal,
+ productId: pidDecimal,
+ serialNumber,
+ systemIdentifier,
+ } = device
+ if (systemIdentifier !== undefined) {
+ return systemIdentifier
+ }
+ const [vid, pid] = [decToHex(vidDecimal), decToHex(pidDecimal)]
+
+ // USBDevice serialNumber is string | undefined
+ if (serialNumber == null) {
+ return null
+ }
+ return `USB\\VID_${vid}&PID_${pid}\\${serialNumber}`
+}
export function getWindowsDriverVersion(
- device: Device
+ device: UsbDevice
): Promise {
- const { vendorId: vidDecimal, productId: pidDecimal, serialNumber } = device
- const [vid, pid] = [decToHex(vidDecimal), decToHex(pidDecimal)]
-
+ console.log('getWindowsDriverVersion', device)
assert(
isWindows() || process.env.NODE_ENV === 'test',
`getWindowsDriverVersion cannot be called on ${process.platform}`
)
+ const deviceId = deviceIdFromDetails(device)
+
return execa
.command(
- `Get-PnpDeviceProperty -InstanceID "USB\\VID_${vid}&PID_${pid}\\${serialNumber}" -KeyName "DEVPKEY_Device_DriverVersion" | % { $_.Data }`,
+ `Get-PnpDeviceProperty -InstanceID "${deviceId}" -KeyName "DEVPKEY_Device_DriverVersion" | % { $_.Data }`,
{ shell: 'PowerShell.exe' }
)
.then(result => result.stdout.trim())
diff --git a/app-shell/src/types.ts b/app-shell/src/types.ts
index 44493b35b73..494549f8c3d 100644
--- a/app-shell/src/types.ts
+++ b/app-shell/src/types.ts
@@ -4,9 +4,100 @@ import type {
Error as PlainError,
} from '@opentrons/app/src/redux/types'
+import type { Config } from './config'
import type { Logger } from '@opentrons/app/src/logger'
export type { Action, PlainError }
export type Dispatch = (action: Action) => void
export type { Logger }
+
+// copied types below from the app so the app shell does not pull in the app
+// in its bundle
+
+export type UI_INITIALIZED_TYPE = 'shell:UI_INITIALIZED'
+export type CONFIG_INITIALIZED_TYPE = 'config:INITIALIZED'
+export type CONFIG_UPDATE_VALUE_TYPE = 'config:UPDATE_VALUE'
+export type CONFIG_RESET_VALUE_TYPE = 'config:RESET_VALUE'
+export type CONFIG_TOGGLE_VALUE_TYPE = 'config:TOGGLE_VALUE'
+export type CONFIG_ADD_UNIQUE_VALUE_TYPE = 'config:ADD_UNIQUE_VALUE'
+export type CONFIG_SUBTRACT_VALUE_TYPE = 'config:SUBTRACT_VALUE'
+export type CONFIG_VALUE_UPDATED_TYPE = 'config:VALUE_UPDATED'
+
+export type POLL_TYPE = 'poll'
+export type INITIAL_TYPE = 'initial'
+export type ADD_LABWARE_TYPE = 'addLabware'
+export type DELETE_LABWARE_TYPE = 'deleteLabware'
+export type OVERWRITE_LABWARE_TYPE = 'overwriteLabware'
+export type CHANGE_DIRECTORY_TYPE = 'changeDirectory'
+
+export type FETCH_CUSTOM_LABWARE_TYPE = 'labware:FETCH_CUSTOM_LABWARE'
+export type CUSTOM_LABWARE_LIST_TYPE = 'labware:CUSTOM_LABWARE_LIST'
+export type CUSTOM_LABWARE_LIST_FAILURE_TYPE = 'labware:CUSTOM_LABWARE_LIST_FAILURE'
+export type CHANGE_CUSTOM_LABWARE_DIRECTORY_TYPE = 'labware:CHANGE_CUSTOM_LABWARE_DIRECTORY'
+export type ADD_CUSTOM_LABWARE_TYPE = 'labware:ADD_CUSTOM_LABWARE'
+export type ADD_CUSTOM_LABWARE_FILE_TYPE = 'labware:ADD_CUSTOM_LABWARE_FILE'
+export type ADD_CUSTOM_LABWARE_FAILURE_TYPE = 'labware:ADD_CUSTOM_LABWARE_FAILURE'
+export type CLEAR_ADD_CUSTOM_LABWARE_FAILURE_TYPE = 'labware:CLEAR_ADD_CUSTOM_LABWARE_FAILURE'
+export type ADD_NEW_LABWARE_NAME_TYPE = 'labware:ADD_NEW_LABWARE_NAME'
+export type CLEAR_NEW_LABWARE_NAME_TYPE = 'labware:CLEAR_NEW_LABWARE_NAME'
+export type OPEN_CUSTOM_LABWARE_DIRECTORY_TYPE = 'labware:OPEN_CUSTOM_LABWARE_DIRECTORY'
+export type DELETE_CUSTOM_LABWARE_FILE_TYPE = 'labware:DELETE_CUSTOM_LABWARE_FILE'
+export type INVALID_LABWARE_FILE_TYPE = 'INVALID_LABWARE_FILE'
+export type DUPLICATE_LABWARE_FILE_TYPE = 'DUPLICATE_LABWARE_FILE'
+export type OPENTRONS_LABWARE_FILE_TYPE = 'OPENTRONS_LABWARE_FILE'
+export type VALID_LABWARE_FILE_TYPE = 'VALID_LABWARE_FILE'
+export type OPEN_PYTHON_DIRECTORY_TYPE = 'protocol-analysis:OPEN_PYTHON_DIRECTORY'
+export type CHANGE_PYTHON_PATH_OVERRIDE_TYPE = 'protocol-analysis:CHANGE_PYTHON_PATH_OVERRIDE'
+
+export type FETCH_PROTOCOLS_TYPE = 'protocolStorage:FETCH_PROTOCOLS'
+export type UPDATE_PROTOCOL_LIST_TYPE = 'protocolStorage:UPDATE_PROTOCOL_LIST'
+export type UPDATE_PROTOCOL_LIST_FAILURE_TYPE = 'protocolStorage:UPDATE_PROTOCOL_LIST_FAILURE'
+export type ADD_PROTOCOL_TYPE = 'protocolStorage:ADD_PROTOCOL'
+export type REMOVE_PROTOCOL_TYPE = 'protocolStorage:REMOVE_PROTOCOL'
+export type ADD_PROTOCOL_FAILURE_TYPE = 'protocolStorage:ADD_PROTOCOL_FAILURE'
+export type CLEAR_ADD_PROTOCOL_FAILURE_TYPE = 'protocolStorage:CLEAR_ADD_PROTOCOL_FAILURE'
+export type OPEN_PROTOCOL_DIRECTORY_TYPE = 'protocolStorage:OPEN_PROTOCOL_DIRECTORY'
+export type ANALYZE_PROTOCOL_TYPE = 'protocolStorage:ANALYZE_PROTOCOL'
+export type ANALYZE_PROTOCOL_SUCCESS_TYPE = 'protocolStorage:ANALYZE_PROTOCOL_SUCCESS'
+export type ANALYZE_PROTOCOL_FAILURE_TYPE = 'protocolStorage:ANALYZE_PROTOCOL_FAILURE'
+export type VIEW_PROTOCOL_SOURCE_FOLDER_TYPE = 'protocolStorage:VIEW_PROTOCOL_SOURCE_FOLDER'
+
+export type PROTOCOL_ADDITION_TYPE = 'protocolAddition'
+
+export type OPENTRONS_USB_TYPE = 'opentrons-usb'
+
+export type SYSTEM_INFO_INITIALIZED_TYPE = 'systemInfo:INITIALIZED'
+
+export type USB_DEVICE_ADDED_TYPE = 'systemInfo:USB_DEVICE_ADDED'
+
+export type USB_DEVICE_REMOVED_TYPE = 'systemInfo:USB_DEVICE_REMOVED'
+
+export type NETWORK_INTERFACES_CHANGED_TYPE = 'systemInfo:NETWORK_INTERFACES_CHANGED'
+export type USB_HTTP_REQUESTS_START_TYPE = 'shell:USB_HTTP_REQUESTS_START'
+export type USB_HTTP_REQUESTS_STOP_TYPE = 'shell:USB_HTTP_REQUESTS_STOP'
+export type APP_RESTART_TYPE = 'shell:APP_RESTART'
+export type RELOAD_UI_TYPE = 'shell:RELOAD_UI'
+export type SEND_LOG_TYPE = 'shell:SEND_LOG'
+
+// copy
+// TODO(mc, 2020-05-11): i18n
+export type U2E_DRIVER_OUTDATED_MESSAGE_TYPE = 'There is an updated Realtek USB-to-Ethernet adapter driver available for your computer.'
+export type U2E_DRIVER_DESCRIPTION_TYPE = 'The OT-2 uses this adapter for its USB connection to the Opentrons App.'
+export type U2E_DRIVER_OUTDATED_CTA_TYPE = "Please update your computer's driver to ensure a reliable connection to your OT-2."
+
+export type DISCOVERY_START_TYPE = 'discovery:START'
+export type DISCOVERY_FINISH_TYPE = 'discovery:FINISH'
+export type DISCOVERY_UPDATE_LIST_TYPE = 'discovery:UPDATE_LIST'
+export type DISCOVERY_REMOVE_TYPE = 'discovery:REMOVE'
+export type CLEAR_CACHE_TYPE = 'discovery:CLEAR_CACHE'
+
+export interface ConfigInitializedAction {
+ type: CONFIG_INITIALIZED_TYPE
+ payload: { config: Config }
+}
+
+export interface ConfigValueUpdatedAction {
+ type: CONFIG_VALUE_UPDATED_TYPE
+ payload: { path: string; value: any }
+}
diff --git a/app-shell/src/ui.ts b/app-shell/src/ui.ts
index 6bdd1240edf..6f7a2a360fd 100644
--- a/app-shell/src/ui.ts
+++ b/app-shell/src/ui.ts
@@ -1,9 +1,13 @@
// sets up the main window ui
import { app, shell, BrowserWindow } from 'electron'
import path from 'path'
+
import { getConfig } from './config'
+import { RELOAD_UI } from './constants'
import { createLogger } from './log'
+import type { Action } from './types'
+
const config = getConfig('ui')
const log = createLogger('ui')
@@ -52,12 +56,25 @@ export function createUi(): BrowserWindow {
mainWindow.loadURL(url, { extraHeaders: 'pragma: no-cache\n' })
// open new windows ( {
- log.debug('Opening external link', { url })
- event.preventDefault()
- // eslint-disable-next-line @typescript-eslint/no-floating-promises
- shell.openExternal(url)
+ mainWindow.webContents.setWindowOpenHandler(({ url }) => {
+ // eslint-disable-next-line no-void
+ void shell.openExternal(url)
+ return { action: 'deny' }
})
return mainWindow
}
+
+export function registerReloadUi(
+ browserWindow: BrowserWindow
+): (action: Action) => unknown {
+ return function handleAction(action: Action) {
+ switch (action.type) {
+ case RELOAD_UI:
+ log.info(`reloading UI: ${action.payload.message}`)
+ browserWindow.webContents.reload()
+
+ break
+ }
+ }
+}
diff --git a/app-shell/src/update.ts b/app-shell/src/update.ts
index c272581356a..afaac30020b 100644
--- a/app-shell/src/update.ts
+++ b/app-shell/src/update.ts
@@ -1,18 +1,18 @@
// app updater
-import { autoUpdater as updater } from 'electron-updater'
+import updater from 'electron-updater'
-import { UI_INITIALIZED } from '@opentrons/app/src/redux/shell/actions'
import { createLogger } from './log'
import { getConfig } from './config'
-import { UPDATE_VALUE } from '@opentrons/app/src/redux/config'
-
+import { UI_INITIALIZED, UPDATE_VALUE } from './constants'
import type { UpdateInfo } from '@opentrons/app/src/redux/shell/types'
import type { Action, Dispatch, PlainError } from './types'
-updater.logger = createLogger('update')
-updater.autoDownload = false
+const autoUpdater = updater.autoUpdater
+
+autoUpdater.logger = createLogger('update')
+autoUpdater.autoDownload = false
-export const CURRENT_VERSION: string = updater.currentVersion.version
+export const CURRENT_VERSION: string = autoUpdater.currentVersion.version
export function registerUpdate(
dispatch: Dispatch
@@ -27,7 +27,7 @@ export function registerUpdate(
return downloadUpdate(dispatch)
case 'shell:APPLY_UPDATE':
- return updater.quitAndInstall()
+ return autoUpdater.quitAndInstall()
}
}
}
@@ -44,23 +44,23 @@ function checkUpdate(dispatch: Dispatch): void {
done({ error: PlainObjectError(error), info: null, available: false })
}
- updater.once('update-available', onAvailable)
- updater.once('update-not-available', onNotAvailable)
- updater.once('error', onError)
+ autoUpdater.once('update-available', onAvailable)
+ autoUpdater.once('update-not-available', onNotAvailable)
+ autoUpdater.once('error', onError)
// @ts-expect-error(mc, 2021-02-16): do not use dot-path notation
- updater.channel = getConfig('update.channel')
+ autoUpdater.channel = getConfig('update.channel')
// eslint-disable-next-line @typescript-eslint/no-floating-promises
- updater.checkForUpdates()
+ autoUpdater.checkForUpdates()
function done(payload: {
info?: UpdateInfo | null
available?: boolean
error?: PlainError
}): void {
- updater.removeListener('update-available', onAvailable)
- updater.removeListener('update-not-available', onNotAvailable)
- updater.removeListener('error', onError)
+ autoUpdater.removeListener('update-available', onAvailable)
+ autoUpdater.removeListener('update-not-available', onNotAvailable)
+ autoUpdater.removeListener('error', onError)
dispatch({ type: 'shell:CHECK_UPDATE_RESULT', payload })
}
}
@@ -88,16 +88,16 @@ function downloadUpdate(dispatch: Dispatch): void {
done({ error: PlainObjectError(error) })
}
- updater.on('download-progress', onDownloading)
- updater.once('update-downloaded', onDownloaded)
- updater.once('error', onError)
+ autoUpdater.on('download-progress', onDownloading)
+ autoUpdater.once('update-downloaded', onDownloaded)
+ autoUpdater.once('error', onError)
// eslint-disable-next-line @typescript-eslint/no-floating-promises
- updater.downloadUpdate()
+ autoUpdater.downloadUpdate()
function done(payload: { error?: PlainError }): void {
- updater.removeListener('download-progress', onDownloading)
- updater.removeListener('update-downloaded', onDownloaded)
- updater.removeListener('error', onError)
+ autoUpdater.removeListener('download-progress', onDownloading)
+ autoUpdater.removeListener('update-downloaded', onDownloaded)
+ autoUpdater.removeListener('error', onError)
if (payload.error == null)
dispatch({
type: UPDATE_VALUE,
diff --git a/app-shell/src/usb.ts b/app-shell/src/usb.ts
index 816f06defa2..accdf5c00d7 100644
--- a/app-shell/src/usb.ts
+++ b/app-shell/src/usb.ts
@@ -1,18 +1,7 @@
import { ipcMain, IpcMainInvokeEvent } from 'electron'
import axios, { AxiosRequestConfig } from 'axios'
import FormData from 'form-data'
-import fs from 'fs'
-import path from 'path'
-import {
- usbRequestsStart,
- usbRequestsStop,
-} from '@opentrons/app/src/redux/shell'
-import {
- INITIALIZED as SYSTEM_INFO_INITIALIZED,
- USB_DEVICE_ADDED,
- USB_DEVICE_REMOVED,
-} from '@opentrons/app/src/redux/system-info/constants'
import {
fetchSerialPortList,
SerialPortHttpAgent,
@@ -21,8 +10,14 @@ import {
} from '@opentrons/usb-bridge/node-client'
import { createLogger } from './log'
-import { getProtocolSrcFilePaths } from './protocol-storage'
+import { usbRequestsStart, usbRequestsStop } from './config/actions'
+import {
+ SYSTEM_INFO_INITIALIZED,
+ USB_DEVICE_ADDED,
+ USB_DEVICE_REMOVED,
+} from './constants'
+import type { IPCSafeFormData } from '@opentrons/app/src/redux/shell/types'
import type { UsbDevice } from '@opentrons/app/src/redux/system-info/types'
import type { PortInfo } from '@opentrons/usb-bridge/node-client'
import type { Action, Dispatch } from './types'
@@ -34,27 +29,50 @@ let usbFetchInterval: NodeJS.Timeout
export function getSerialPortHttpAgent(): SerialPortHttpAgent | undefined {
return usbHttpAgent
}
-
-export function createSerialPortHttpAgent(path: string): void {
- const serialPortHttpAgent = new SerialPortHttpAgent({
- maxFreeSockets: 1,
- maxSockets: 1,
- maxTotalSockets: 1,
- keepAlive: true,
- keepAliveMsecs: 10000,
- path,
- logger: usbLog,
- timeout: 100000,
- })
-
- usbHttpAgent = serialPortHttpAgent
+export function createSerialPortHttpAgent(
+ path: string,
+ onComplete: (err: Error | null, agent?: SerialPortHttpAgent) => void
+): void {
+ if (usbHttpAgent != null) {
+ onComplete(
+ new Error('Tried to make a USB http agent when one already existed')
+ )
+ } else {
+ usbHttpAgent = new SerialPortHttpAgent(
+ {
+ maxFreeSockets: 1,
+ maxSockets: 1,
+ maxTotalSockets: 1,
+ keepAlive: true,
+ keepAliveMsecs: Infinity,
+ path,
+ logger: usbLog,
+ timeout: 100000,
+ },
+ (err, agent?) => {
+ if (err != null) {
+ usbHttpAgent = undefined
+ }
+ onComplete(err, agent)
+ }
+ )
+ }
}
-export function destroyUsbHttpAgent(): void {
+export function destroyAndStopUsbHttpRequests(dispatch: Dispatch): void {
if (usbHttpAgent != null) {
usbHttpAgent.destroy()
}
usbHttpAgent = undefined
+ ipcMain.removeHandler('usb:request')
+ dispatch(usbRequestsStop())
+ // handle any additional invocations of usb:request
+ ipcMain.handle('usb:request', () =>
+ Promise.resolve({
+ status: 400,
+ statusText: 'USB robot disconnected',
+ })
+ )
}
function isUsbDeviceOt3(device: UsbDevice): boolean {
@@ -64,37 +82,34 @@ function isUsbDeviceOt3(device: UsbDevice): boolean {
)
}
+function reconstructFormData(ipcSafeFormData: IPCSafeFormData): FormData {
+ const result = new FormData()
+ ipcSafeFormData.forEach(entry => {
+ entry.type === 'file'
+ ? result.append(entry.name, Buffer.from(entry.value), entry.filename)
+ : result.append(entry.name, entry.value)
+ })
+ return result
+}
+
async function usbListener(
_event: IpcMainInvokeEvent,
config: AxiosRequestConfig
): Promise {
- try {
- // TODO(bh, 2023-05-03): remove mutation
- let { data } = config
- let formHeaders = {}
-
- // check for formDataProxy
- if (data?.formDataProxy != null) {
- // reconstruct FormData
- const formData = new FormData()
- const { protocolKey } = data.formDataProxy
-
- const srcFilePaths: string[] = await getProtocolSrcFilePaths(protocolKey)
-
- // create readable stream from file
- srcFilePaths.forEach(srcFilePath => {
- const readStream = fs.createReadStream(srcFilePath)
- formData.append('files', readStream, path.basename(srcFilePath))
- })
-
- formData.append('key', protocolKey)
-
- formHeaders = formData.getHeaders()
- data = formData
- }
-
- const usbHttpAgent = getSerialPortHttpAgent()
+ // TODO(bh, 2023-05-03): remove mutation
+ let { data } = config
+ let formHeaders = {}
+
+ // check for formDataProxy
+ if (data?.proxiedFormData != null) {
+ // reconstruct FormData
+ const formData = reconstructFormData(data.proxiedFormData)
+ formHeaders = formData.getHeaders()
+ data = formData
+ }
+ const usbHttpAgent = getSerialPortHttpAgent()
+ try {
const response = await axios.request({
httpAgent: usbHttpAgent,
...config,
@@ -102,13 +117,15 @@ async function usbListener(
headers: { ...config.headers, ...formHeaders },
})
return {
+ error: false,
data: response.data,
status: response.status,
statusText: response.statusText,
}
} catch (e) {
- // eslint-disable-next-line @typescript-eslint/restrict-template-expressions
- usbLog.debug(`usbListener error ${e?.message ?? 'unknown'}`)
+ if (e instanceof Error) {
+ console.log(`axios request error ${e?.message ?? 'unknown'}`)
+ }
}
}
@@ -119,42 +136,11 @@ function pollSerialPortAndCreateAgent(dispatch: Dispatch): void {
}
usbFetchInterval = setInterval(() => {
// already connected to an Opentrons robot via USB
- if (getSerialPortHttpAgent() != null) {
- return
- }
- usbLog.debug('fetching serialport list')
- fetchSerialPortList()
- .then((list: PortInfo[]) => {
- const ot3UsbSerialPort = list.find(
- port =>
- port.productId?.localeCompare(DEFAULT_PRODUCT_ID, 'en-US', {
- sensitivity: 'base',
- }) === 0 &&
- port.vendorId?.localeCompare(DEFAULT_VENDOR_ID, 'en-US', {
- sensitivity: 'base',
- }) === 0
- )
-
- if (ot3UsbSerialPort == null) {
- usbLog.debug('no OT-3 serial port found')
- return
- }
-
- createSerialPortHttpAgent(ot3UsbSerialPort.path)
- // remove any existing handler
- ipcMain.removeHandler('usb:request')
- ipcMain.handle('usb:request', usbListener)
-
- dispatch(usbRequestsStart())
- })
- .catch(e =>
- // eslint-disable-next-line @typescript-eslint/restrict-template-expressions
- usbLog.debug(`fetchSerialPortList error ${e?.message ?? 'unknown'}`)
- )
+ tryCreateAndStartUsbHttpRequests(dispatch)
}, 10000)
}
-function startUsbHttpRequests(dispatch: Dispatch): void {
+function tryCreateAndStartUsbHttpRequests(dispatch: Dispatch): void {
fetchSerialPortList()
.then((list: PortInfo[]) => {
const ot3UsbSerialPort = list.find(
@@ -167,19 +153,24 @@ function startUsbHttpRequests(dispatch: Dispatch): void {
}) === 0
)
- // retry if no OT-3 serial port found - usb-detection and serialport packages have race condition
+ // retry if no Flex serial port found - usb-detection and serialport packages have race condition
if (ot3UsbSerialPort == null) {
- usbLog.debug('no OT-3 serial port found, retrying')
- setTimeout(() => startUsbHttpRequests(dispatch), 1000)
+ usbLog.debug('No Flex serial port found.')
return
}
-
- createSerialPortHttpAgent(ot3UsbSerialPort.path)
- // remove any existing handler
- ipcMain.removeHandler('usb:request')
- ipcMain.handle('usb:request', usbListener)
-
- dispatch(usbRequestsStart())
+ if (usbHttpAgent == null) {
+ createSerialPortHttpAgent(ot3UsbSerialPort.path, (err, agent?) => {
+ if (err != null) {
+ const message = err?.message ?? err
+ usbLog.error(`Failed to create serial port: ${message}`)
+ }
+ if (agent) {
+ ipcMain.removeHandler('usb:request')
+ ipcMain.handle('usb:request', usbListener)
+ dispatch(usbRequestsStart())
+ }
+ })
+ }
})
.catch(e =>
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
@@ -192,27 +183,18 @@ export function registerUsb(dispatch: Dispatch): (action: Action) => unknown {
switch (action.type) {
case SYSTEM_INFO_INITIALIZED:
if (action.payload.usbDevices.find(isUsbDeviceOt3) != null) {
- startUsbHttpRequests(dispatch)
+ tryCreateAndStartUsbHttpRequests(dispatch)
}
pollSerialPortAndCreateAgent(dispatch)
break
case USB_DEVICE_ADDED:
if (isUsbDeviceOt3(action.payload.usbDevice)) {
- startUsbHttpRequests(dispatch)
+ tryCreateAndStartUsbHttpRequests(dispatch)
}
break
case USB_DEVICE_REMOVED:
if (isUsbDeviceOt3(action.payload.usbDevice)) {
- destroyUsbHttpAgent()
- ipcMain.removeHandler('usb:request')
- dispatch(usbRequestsStop())
- // handle any additional invocations of usb:request
- ipcMain.handle('usb:request', () =>
- Promise.resolve({
- status: 400,
- statusText: 'USB robot disconnected',
- })
- )
+ destroyAndStopUsbHttpRequests(dispatch)
}
break
}
diff --git a/app-shell/tsconfig.json b/app-shell/tsconfig.json
index 38724a7c56c..bb29d546ddb 100644
--- a/app-shell/tsconfig.json
+++ b/app-shell/tsconfig.json
@@ -15,7 +15,9 @@
"compilerOptions": {
"composite": true,
"rootDir": "src",
- "outDir": "lib"
+ "outDir": "lib",
+ "target": "esnext",
+ "module": "ESNext"
},
"include": ["typings", "src"]
}
diff --git a/app-shell/typings/global.d.ts b/app-shell/typings/global.d.ts
index 8513596d045..67f9a5a1955 100644
--- a/app-shell/typings/global.d.ts
+++ b/app-shell/typings/global.d.ts
@@ -1,16 +1,9 @@
-import type { IpcRenderer } from 'electron'
-
+/* eslint-disable no-var */
declare global {
- const _PKG_VERSION_: string
- const _PKG_PRODUCT_NAME_: string
- const _PKG_BUGS_URL_: string
- const _OPENTRONS_PROJECT_: string
-
- namespace NodeJS {
- export interface Global {
- APP_SHELL_REMOTE: {
- ipcRenderer: IpcRenderer
- }
- }
- }
+ var APP_SHELL_REMOTE: { ipcRenderer: IpcRenderer; [key: string]: any }
}
+
+declare const _PKG_VERSION_: string
+declare const _PKG_PRODUCT_NAME_: string
+declare const _PKG_BUGS_URL_: string
+declare const _OPENTRONS_PROJECT_: string
diff --git a/app-shell/typings/usb-detection.d.ts b/app-shell/typings/usb-detection.d.ts
index 194cb8cb6fb..783ab4bd1c8 100644
--- a/app-shell/typings/usb-detection.d.ts
+++ b/app-shell/typings/usb-detection.d.ts
@@ -1,6 +1,6 @@
-import 'usb-detection'
+// import 'usb-detection'
-declare module 'usb-detection' {
- export function off(event: string, handler: unknown): void
- export function emit(event: string, payload: unknown): void
-}
+// declare module 'usb-detection' {
+// export function off(event: string, handler: unknown): void
+// export function emit(event: string, payload: unknown): void
+// }
diff --git a/app-shell/vite.config.ts b/app-shell/vite.config.ts
new file mode 100644
index 00000000000..546fe19e23f
--- /dev/null
+++ b/app-shell/vite.config.ts
@@ -0,0 +1,63 @@
+import { versionForProject } from '../scripts/git-version.mjs'
+import pkg from './package.json'
+import path from 'path'
+import { defineConfig } from 'vite'
+import type { UserConfig } from 'vite'
+
+export default defineConfig(
+ async (): Promise => {
+ const project = process.env.OPENTRONS_PROJECT ?? 'robot-stack'
+ const version = await versionForProject(project)
+ return {
+ // this makes imports relative rather than absolute
+ base: '',
+ publicDir: false,
+ build: {
+ // Relative to the root
+ ssr: 'src/main.ts',
+ outDir: 'lib',
+ commonjsOptions: {
+ transformMixedEsModules: true,
+ esmExternals: true,
+ exclude: [/node_modules/],
+ },
+ lib: {
+ entry: {
+ main: 'src/main.ts',
+ preload: 'src/preload.ts',
+ },
+
+ formats: ['cjs'],
+ },
+ },
+ optimizeDeps: {
+ esbuildOptions: {
+ target: 'CommonJs',
+ },
+ exclude: ['node_modules']
+ },
+ define: {
+ 'process.env': process.env,
+ global: 'globalThis',
+ _PKG_VERSION_: JSON.stringify(version),
+ _PKG_PRODUCT_NAME_: JSON.stringify(pkg.productName),
+ _PKG_BUGS_URL_: JSON.stringify(pkg.bugs.url),
+ _OPENTRONS_PROJECT_: JSON.stringify(project),
+ },
+ resolve: {
+ alias: {
+ '@opentrons/shared-data': path.resolve('../shared-data/js/index.ts'),
+ '@opentrons/step-generation': path.resolve(
+ '../step-generation/src/index.ts'
+ ),
+ '@opentrons/discovery-client': path.resolve(
+ '../discovery-client/src/index.ts'
+ ),
+ '@opentrons/usb-bridge/node-client': path.resolve(
+ '../usb-bridge/node-client/src/index.ts'
+ ),
+ },
+ },
+ }
+ }
+)
diff --git a/app-shell/webpack.config.js b/app-shell/webpack.config.js
deleted file mode 100644
index c10c6569a91..00000000000
--- a/app-shell/webpack.config.js
+++ /dev/null
@@ -1,44 +0,0 @@
-'use strict'
-
-const path = require('path')
-const webpackMerge = require('webpack-merge')
-const { DefinePlugin } = require('webpack')
-const { nodeBaseConfig } = require('@opentrons/webpack-config')
-const { versionForProject } = require('../scripts/git-version')
-const pkg = require('./package.json')
-
-const ENTRY_MAIN = path.join(__dirname, 'src/main.ts')
-const ENTRY_PRELOAD = path.join(__dirname, 'src/preload.ts')
-const OUTPUT_PATH = path.join(__dirname, 'lib')
-
-const project = process.env.OPENTRONS_PROJECT ?? 'robot-stack'
-
-module.exports = async () => {
- const version = await versionForProject(project)
-
- const COMMON_CONFIG = {
- output: { path: OUTPUT_PATH },
- plugins: [
- new DefinePlugin({
- _PKG_VERSION_: JSON.stringify(version),
- _PKG_PRODUCT_NAME_: JSON.stringify(pkg.productName),
- _PKG_BUGS_URL_: JSON.stringify(pkg.bugs.url),
- _OPENTRONS_PROJECT_: JSON.stringify(project),
- }),
- ],
- }
-
- return [
- // main process (runs in electron)
- webpackMerge(nodeBaseConfig, COMMON_CONFIG, {
- target: 'electron-main',
- entry: { main: ENTRY_MAIN },
- }),
-
- // preload script (runs in the browser window)
- webpackMerge(nodeBaseConfig, COMMON_CONFIG, {
- target: 'electron-preload',
- entry: { preload: ENTRY_PRELOAD },
- }),
- ]
-}
diff --git a/app-testing/.gitignore b/app-testing/.gitignore
index 6ba4abe0128..6ae4921e11a 100644
--- a/app-testing/.gitignore
+++ b/app-testing/.gitignore
@@ -1,2 +1,5 @@
.env
results
+analysis_results/*.json
+files/protocols/generated_protocols/*
+!files/protocols/generated_protocols/.keepme
diff --git a/app-testing/Makefile b/app-testing/Makefile
index f9c3770b66a..e1d9698d3cb 100644
--- a/app-testing/Makefile
+++ b/app-testing/Makefile
@@ -1,51 +1,77 @@
.PHONY: black
black:
- pipenv run python -m black .
+ python -m pipenv run python -m black .
.PHONY: black-check
black-check:
- pipenv run python -m black . --check
+ python -m pipenv run python -m black . --check
.PHONY: ruff
ruff:
- pipenv run python -m ruff . --fix
+ python -m pipenv run python -m ruff check . --fix
.PHONY: ruff-check
ruff-check:
- pipenv run python -m ruff .
+ python -m pipenv run python -m ruff check .
.PHONY: mypy
mypy:
- pipenv run python -m mypy conftest.py automation tests
+ python -m pipenv run python -m mypy conftest.py automation tests citools
.PHONY: lint
-lint:
- $(MAKE) black-check
- $(MAKE) ruff-check
- $(MAKE) mypy
+lint: black-check ruff-check mypy
.PHONY: format
-format:
+format:
+ @echo runnning black
$(MAKE) black
+ @echo running ruff
$(MAKE) ruff
+ @echo formatting the readme with yarn prettier
$(MAKE) format-readme
.PHONY: test-ci
test-ci:
- pipenv run python -m pytest -m "emulated_alpha"
+ python -m pipenv run python -m pytest -m "emulated_alpha"
+
+.PHONY: test-protocol-analysis
+test-protocol-analysis:
+ pipenv run python -m pytest -v tests/protocol_analyze_test.py
.PHONY: setup
-setup:
- pipenv install
+setup: install-pipenv
+ python -m pipenv install
.PHONY: teardown
teardown:
- pipenv --rm
+ python -m pipenv --rm
.PHONY: format-readme
format-readme:
yarn prettier --ignore-path .eslintignore --write app-testing/**/*.md
-.PHONY: print-protocols
-print-protocols:
- pipenv run python print_protocols.py
+.PHONY: install-pipenv
+install-pipenv:
+ python -m pip install -U pipenv
+
+.PHONY: snapshot-test
+snapshot-test:
+ python -m pipenv run pytest -k analyses_snapshot_test -vv
+
+.PHONY: snapshot-test-update
+snapshot-test-update:
+ python -m pipenv run pytest -k analyses_snapshot_test --snapshot-update
+
+TARGET ?= edge
+CACHEBUST := $(shell date +%s)
+
+.PHONY: build-opentrons-analysis
+build-opentrons-analysis:
+ @echo "Building docker image for $(TARGET)"
+ @echo "If you want to build a different version, run 'make build-opentrons-analysis TARGET='"
+ @echo "Cache is always busted to ensure latest version of the code is used"
+ docker build --build-arg OPENTRONS_VERSION=$(TARGET) --build-arg CACHEBUST=$(CACHEBUST) -t opentrons-analysis:$(TARGET) citools/.
+
+.PHONY: generate-protocols
+generate-protocols:
+ python -m pipenv run python -m automation.data.protocol_registry
diff --git a/app-testing/Pipfile b/app-testing/Pipfile
index 12dfe7880a0..43bb4dd2475 100644
--- a/app-testing/Pipfile
+++ b/app-testing/Pipfile
@@ -4,21 +4,20 @@ url = "https://pypi.org/simple"
verify_ssl = true
[packages]
-pytest = "==7.2.0"
-black = "==22.12.0"
-selenium = "==4.7.2"
-importlib-metadata = "==5.0.0"
-requests = "==2.28.1"
-python-dotenv = "==0.21.0"
-pytest-xdist = "==3.0.2"
-mypy = "==0.991"
-types-requests = "==2.28.11.5"
-rich = "==13.0.0"
-atomicwrites = "==1.4.1"
-pyreadline3 = "==3.4.1"
-pydantic = "==1.10.4"
-pygithub = "==1.57"
-ruff = "==0.0.236"
+pytest = "==8.1.1"
+black = "==24.3.0"
+selenium = "==4.19.0"
+importlib-metadata = "==7.1.0"
+requests = "==2.31.0"
+python-dotenv = "==1.0.1"
+mypy = "==1.9.0"
+types-requests = "==2.31.0.20240311"
+rich = "==13.7.1"
+pydantic = "==2.6.4"
+ruff = "==0.3.4"
+docker = "==7.0.0"
+syrupy = "==4.6.1"
+pytest-html = "==4.1.1"
[requires]
-python_version = "3.11"
+python_version = "3.12"
diff --git a/app-testing/Pipfile.lock b/app-testing/Pipfile.lock
index 38341adc09c..0672556f9cd 100644
--- a/app-testing/Pipfile.lock
+++ b/app-testing/Pipfile.lock
@@ -1,11 +1,11 @@
{
"_meta": {
"hash": {
- "sha256": "34f82d64c90df6aff09f96dc7c22e861d0b7d9826cfbdcea9e1e1eb287aefb1f"
+ "sha256": "b7ac4510c6e3aa343c669e1bd838183e905abb6f1701c6efbfb1c22f20cfae44"
},
"pipfile-spec": 6,
"requires": {
- "python_version": "3.11"
+ "python_version": "3.12"
},
"sources": [
{
@@ -16,170 +16,171 @@
]
},
"default": {
- "async-generator": {
+ "annotated-types": {
"hashes": [
- "sha256:01c7bf666359b4967d2cda0000cc2e4af16a0ae098cbffcb8472fb9e8ad6585b",
- "sha256:6ebb3d106c12920aaae42ccb6f787ef5eefdcdd166ea3d628fa8476abe712144"
+ "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43",
+ "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"
],
- "markers": "python_version >= '3.5'",
- "version": "==1.10"
- },
- "atomicwrites": {
- "hashes": [
- "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"
- ],
- "index": "pypi",
- "version": "==1.4.1"
+ "markers": "python_version >= '3.8'",
+ "version": "==0.6.0"
},
"attrs": {
"hashes": [
- "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836",
- "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"
+ "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
+ "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
],
- "markers": "python_version >= '3.6'",
- "version": "==22.2.0"
+ "markers": "python_version >= '3.7'",
+ "version": "==23.2.0"
},
"black": {
"hashes": [
- "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320",
- "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351",
- "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350",
- "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f",
- "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf",
- "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148",
- "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4",
- "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d",
- "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc",
- "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d",
- "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2",
- "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"
+ "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f",
+ "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93",
+ "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11",
+ "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0",
+ "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9",
+ "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5",
+ "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213",
+ "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d",
+ "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7",
+ "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837",
+ "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f",
+ "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395",
+ "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995",
+ "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f",
+ "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597",
+ "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959",
+ "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5",
+ "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb",
+ "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4",
+ "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7",
+ "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd",
+ "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"
],
"index": "pypi",
- "version": "==22.12.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==24.3.0"
},
"certifi": {
"hashes": [
- "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3",
- "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"
+ "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"
],
"markers": "python_version >= '3.6'",
- "version": "==2022.12.7"
- },
- "cffi": {
- "hashes": [
- "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5",
- "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef",
- "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104",
- "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426",
- "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405",
- "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375",
- "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a",
- "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e",
- "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc",
- "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf",
- "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185",
- "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497",
- "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3",
- "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35",
- "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c",
- "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83",
- "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21",
- "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca",
- "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984",
- "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac",
- "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd",
- "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee",
- "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a",
- "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2",
- "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192",
- "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7",
- "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585",
- "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f",
- "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e",
- "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27",
- "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b",
- "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e",
- "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e",
- "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d",
- "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c",
- "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415",
- "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82",
- "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02",
- "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314",
- "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325",
- "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c",
- "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3",
- "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914",
- "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045",
- "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d",
- "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9",
- "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5",
- "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2",
- "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c",
- "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3",
- "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2",
- "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8",
- "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d",
- "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d",
- "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9",
- "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162",
- "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76",
- "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4",
- "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e",
- "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9",
- "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6",
- "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b",
- "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01",
- "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"
- ],
- "version": "==1.15.1"
+ "version": "==2024.2.2"
},
"charset-normalizer": {
"hashes": [
- "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845",
- "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"
+ "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
+ "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
+ "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
+ "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
+ "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
+ "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
+ "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
+ "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
+ "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
+ "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
+ "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
+ "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
+ "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
+ "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
+ "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
+ "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
+ "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
+ "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
+ "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
+ "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
+ "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
+ "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
+ "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
+ "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
+ "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
+ "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
+ "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
+ "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
+ "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
+ "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
+ "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
+ "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
+ "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
+ "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
+ "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
+ "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
+ "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
+ "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
+ "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
+ "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
+ "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
+ "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
+ "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
+ "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
+ "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
+ "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
+ "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
+ "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
+ "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
+ "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
+ "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
+ "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
+ "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
+ "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
+ "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
+ "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
+ "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
+ "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
+ "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
+ "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
+ "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
+ "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
+ "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
+ "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
+ "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
+ "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
+ "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
+ "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
+ "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
+ "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
+ "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
+ "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
+ "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
+ "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
+ "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
],
- "markers": "python_full_version >= '3.6.0'",
- "version": "==2.1.1"
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.3.2"
},
"click": {
"hashes": [
- "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e",
- "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"
+ "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
],
"markers": "python_version >= '3.7'",
- "version": "==8.1.3"
+ "version": "==8.1.7"
},
- "colorama": {
+ "docker": {
"hashes": [
- "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44",
- "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"
+ "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b",
+ "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3"
],
- "markers": "sys_platform == 'win32'",
- "version": "==0.4.6"
- },
- "commonmark": {
- "hashes": [
- "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60",
- "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"
- ],
- "version": "==0.9.1"
- },
- "deprecated": {
- "hashes": [
- "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d",
- "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"
- ],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==1.2.13"
- },
- "execnet": {
- "hashes": [
- "sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5",
- "sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"
- ],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
- "version": "==1.9.0"
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==7.0.0"
},
"h11": {
"hashes": [
@@ -191,19 +192,20 @@
},
"idna": {
"hashes": [
- "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
- "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
+ "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"
],
"markers": "python_version >= '3.5'",
- "version": "==3.4"
+ "version": "==3.6"
},
"importlib-metadata": {
"hashes": [
- "sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab",
- "sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43"
+ "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570",
+ "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"
],
"index": "pypi",
- "version": "==5.0.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==7.1.0"
},
"iniconfig": {
"hashes": [
@@ -213,185 +215,279 @@
"markers": "python_version >= '3.7'",
"version": "==2.0.0"
},
+ "jinja2": {
+ "hashes": [
+ "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa",
+ "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==3.1.3"
+ },
+ "markdown-it-py": {
+ "hashes": [
+ "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1",
+ "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==3.0.0"
+ },
+ "markupsafe": {
+ "hashes": [
+ "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf",
+ "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
+ "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f",
+ "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3",
+ "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532",
+ "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
+ "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617",
+ "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df",
+ "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4",
+ "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906",
+ "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f",
+ "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4",
+ "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8",
+ "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371",
+ "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2",
+ "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465",
+ "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52",
+ "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6",
+ "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169",
+ "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
+ "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2",
+ "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0",
+ "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029",
+ "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f",
+ "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a",
+ "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced",
+ "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
+ "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c",
+ "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf",
+ "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9",
+ "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb",
+ "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad",
+ "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3",
+ "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1",
+ "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46",
+ "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
+ "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a",
+ "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee",
+ "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
+ "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5",
+ "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea",
+ "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f",
+ "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5",
+ "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e",
+ "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a",
+ "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f",
+ "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50",
+ "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a",
+ "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
+ "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4",
+ "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff",
+ "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2",
+ "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
+ "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b",
+ "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf",
+ "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5",
+ "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5",
+ "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab",
+ "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
+ "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.1.5"
+ },
+ "mdurl": {
+ "hashes": [
+ "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8",
+ "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.1.2"
+ },
"mypy": {
"hashes": [
- "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d",
- "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6",
- "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf",
- "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f",
- "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813",
- "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33",
- "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad",
- "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05",
- "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297",
- "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06",
- "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd",
- "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243",
- "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305",
- "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476",
- "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711",
- "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70",
- "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5",
- "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461",
- "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab",
- "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c",
- "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d",
- "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135",
- "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93",
- "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648",
- "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a",
- "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb",
- "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3",
- "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372",
- "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb",
- "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"
+ "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6",
+ "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913",
+ "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129",
+ "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc",
+ "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974",
+ "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374",
+ "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150",
+ "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03",
+ "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9",
+ "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02",
+ "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89",
+ "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2",
+ "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d",
+ "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3",
+ "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612",
+ "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e",
+ "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3",
+ "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e",
+ "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd",
+ "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04",
+ "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed",
+ "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185",
+ "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf",
+ "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b",
+ "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4",
+ "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f",
+ "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"
],
"index": "pypi",
- "version": "==0.991"
+ "markers": "python_version >= '3.8'",
+ "version": "==1.9.0"
},
"mypy-extensions": {
"hashes": [
- "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d",
- "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"
+ "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
+ "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
],
- "version": "==0.4.3"
+ "markers": "python_version >= '3.5'",
+ "version": "==1.0.0"
},
"outcome": {
"hashes": [
- "sha256:6f82bd3de45da303cf1f771ecafa1633750a358436a8bb60e06a1ceb745d2672",
- "sha256:c4ab89a56575d6d38a05aa16daeaa333109c1f96167aba8901ab18b6b5e0f7f5"
+ "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8",
+ "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"
],
"markers": "python_version >= '3.7'",
- "version": "==1.2.0"
+ "version": "==1.3.0.post0"
},
"packaging": {
"hashes": [
- "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2",
- "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"
+ "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+ "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"
],
"markers": "python_version >= '3.7'",
- "version": "==23.0"
+ "version": "==24.0"
},
"pathspec": {
"hashes": [
- "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229",
- "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"
+ "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
+ "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
],
- "markers": "python_version >= '3.7'",
- "version": "==0.11.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==0.12.1"
},
"platformdirs": {
"hashes": [
- "sha256:83c8f6d04389165de7c9b6f0c682439697887bca0aa2f1c87ef1826be3584490",
- "sha256:e1fea1fe471b9ff8332e229df3cb7de4f53eeea4998d3b6bfff542115e998bd2"
+ "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
+ "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.6.2"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.2.0"
},
"pluggy": {
"hashes": [
- "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
- "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
- ],
- "markers": "python_version >= '3.6'",
- "version": "==1.0.0"
- },
- "pycparser": {
- "hashes": [
- "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9",
- "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"
+ "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
+ "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"
],
- "version": "==2.21"
+ "markers": "python_version >= '3.8'",
+ "version": "==1.4.0"
},
"pydantic": {
"hashes": [
- "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72",
- "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423",
- "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f",
- "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c",
- "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06",
- "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53",
- "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774",
- "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6",
- "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c",
- "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f",
- "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6",
- "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3",
- "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817",
- "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903",
- "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a",
- "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e",
- "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d",
- "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85",
- "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00",
- "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28",
- "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3",
- "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024",
- "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4",
- "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e",
- "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d",
- "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa",
- "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854",
- "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15",
- "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648",
- "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8",
- "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c",
- "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857",
- "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f",
- "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416",
- "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978",
- "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"
- ],
- "index": "pypi",
- "version": "==1.10.4"
- },
- "pygithub": {
- "hashes": [
- "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f",
- "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"
+ "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6",
+ "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"
],
"index": "pypi",
- "version": "==1.57"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.6.4"
+ },
+ "pydantic-core": {
+ "hashes": [
+ "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a",
+ "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed",
+ "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979",
+ "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff",
+ "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5",
+ "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45",
+ "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340",
+ "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad",
+ "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23",
+ "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6",
+ "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7",
+ "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241",
+ "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda",
+ "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187",
+ "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba",
+ "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c",
+ "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2",
+ "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c",
+ "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132",
+ "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf",
+ "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972",
+ "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db",
+ "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade",
+ "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4",
+ "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8",
+ "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f",
+ "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9",
+ "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48",
+ "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec",
+ "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d",
+ "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9",
+ "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb",
+ "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4",
+ "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89",
+ "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c",
+ "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9",
+ "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da",
+ "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac",
+ "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b",
+ "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf",
+ "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e",
+ "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137",
+ "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1",
+ "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b",
+ "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8",
+ "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e",
+ "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053",
+ "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01",
+ "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe",
+ "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd",
+ "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805",
+ "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183",
+ "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8",
+ "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99",
+ "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820",
+ "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074",
+ "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256",
+ "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8",
+ "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975",
+ "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad",
+ "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e",
+ "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca",
+ "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df",
+ "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b",
+ "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a",
+ "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a",
+ "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721",
+ "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a",
+ "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f",
+ "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2",
+ "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97",
+ "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6",
+ "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed",
+ "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc",
+ "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1",
+ "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe",
+ "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120",
+ "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f",
+ "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2.16.3"
},
"pygments": {
"hashes": [
- "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297",
- "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"
- ],
- "markers": "python_version >= '3.6'",
- "version": "==2.14.0"
- },
- "pyjwt": {
- "hashes": [
- "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd",
- "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"
+ "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"
],
"markers": "python_version >= '3.7'",
- "version": "==2.6.0"
- },
- "pynacl": {
- "hashes": [
- "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858",
- "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d",
- "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93",
- "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1",
- "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92",
- "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff",
- "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba",
- "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394",
- "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b",
- "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"
- ],
- "markers": "python_version >= '3.6'",
- "version": "==1.5.0"
- },
- "pyreadline3": {
- "hashes": [
- "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae",
- "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"
- ],
- "index": "pypi",
- "version": "==3.4.1"
+ "version": "==2.17.2"
},
"pysocks": {
"hashes": [
@@ -403,81 +499,97 @@
},
"pytest": {
"hashes": [
- "sha256:892f933d339f068883b6fd5a459f03d85bfcb355e4981e146d2c7616c21fef71",
- "sha256:c4014eb40e10f11f355ad4e3c2fb2c6c6d1919c73f3b5a433de4708202cade59"
+ "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7",
+ "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"
],
"index": "pypi",
- "version": "==7.2.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==8.1.1"
},
- "pytest-xdist": {
+ "pytest-html": {
"hashes": [
- "sha256:688da9b814370e891ba5de650c9327d1a9d861721a524eb917e620eec3e90291",
- "sha256:9feb9a18e1790696ea23e1434fa73b325ed4998b0e9fcb221f16fd1945e6df1b"
+ "sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07",
+ "sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71"
],
"index": "pypi",
- "version": "==3.0.2"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.1.1"
+ },
+ "pytest-metadata": {
+ "hashes": [
+ "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b",
+ "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==3.1.1"
},
"python-dotenv": {
"hashes": [
- "sha256:1684eb44636dd462b66c3ee016599815514527ad99965de77f43e0944634a7e5",
- "sha256:b77d08274639e3d34145dfa6c7008e66df0f04b7be7a75fd0d5292c191d79045"
+ "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca",
+ "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"
],
"index": "pypi",
- "version": "==0.21.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==1.0.1"
},
"requests": {
"hashes": [
- "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983",
- "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
],
"index": "pypi",
- "version": "==2.28.1"
+ "markers": "python_version >= '3.7'",
+ "version": "==2.31.0"
},
"rich": {
"hashes": [
- "sha256:12b1d77ee7edf251b741531323f0d990f5f570a4e7c054d0bfb59fb7981ad977",
- "sha256:3aa9eba7219b8c575c6494446a59f702552efe1aa261e7eeb95548fa586e1950"
+ "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222",
+ "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"
],
"index": "pypi",
- "version": "==13.0.0"
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==13.7.1"
},
"ruff": {
"hashes": [
- "sha256:055438a8b3ba5de45e2bf606f2aae0c6b928b5fba3928ad008c3019440f40547",
- "sha256:1f9121c656021720391b1ddba843813569910adf7c08b4c0ed325a4418ff4acd",
- "sha256:2939a8360c45a76373554e426aae691b4dc785e1fec647a74626cb8c7ea9429c",
- "sha256:29654591e610630f3c3614a4ae16524ac4e0baa2680cf7fc8676fd047cb3f1c6",
- "sha256:3bea5d101c5918ff5b4bf997565667e4ea040fa9ec0ef98f63d5168c84219519",
- "sha256:4185c328c78adabfb0417a3e7f86395f8d5b3bcfb0e763d6cdde7246c3d08b35",
- "sha256:53495204366169766b501332909a245072f5bc6a976972150f4df5fef961c119",
- "sha256:64826b12171080be6731d5d46494fbb51b90cf6593134bed5a76f424dc98a480",
- "sha256:6c21e3f42eb2d943d8437b365b7ec3c6d7d98a6c742cf08fd385671744737e5e",
- "sha256:71f87d4e841b18c8b2c1a1f9c04e822285cfac7786fcde4ae34d4b107ab0bc81",
- "sha256:7cd622e5d4f6aa356b4193840a2ca39cf6eb6d37de80c4e54a79f61f7c6b52ef",
- "sha256:a5765fe2434e85f6c058cdda2538e18a7c6c87594ee63f3999fe39f558ee7c63",
- "sha256:be96ee774c3e30c2aa0314bf73e3f5144e8e1d5971d13e60bf2f4a07112de6f9",
- "sha256:ce82f88a2e8ad530a8ae81886b4bcbb33ba12643e873bcd65c2193fdf96bf49f",
- "sha256:d4d8f3646f678c0148ddc1477151f14068d35609681663a916aae643ae724a0f",
- "sha256:e3b09c10cabae034babdc0864985101f417d26ad70fccdd0022593b9cbd8e0b6"
+ "sha256:3f3860057590e810c7ffea75669bdc6927bfd91e29b4baa9258fd48b540a4365",
+ "sha256:519cf6a0ebed244dce1dc8aecd3dc99add7a2ee15bb68cf19588bb5bf58e0488",
+ "sha256:60c870a7d46efcbc8385d27ec07fe534ac32f3b251e4fc44b3cbfd9e09609ef4",
+ "sha256:64abeed785dad51801b423fa51840b1764b35d6c461ea8caef9cf9e5e5ab34d9",
+ "sha256:6810563cc08ad0096b57c717bd78aeac888a1bfd38654d9113cb3dc4d3f74232",
+ "sha256:6fc14fa742e1d8f24910e1fff0bd5e26d395b0e0e04cc1b15c7c5e5fe5b4af91",
+ "sha256:986f2377f7cf12efac1f515fc1a5b753c000ed1e0a6de96747cdf2da20a1b369",
+ "sha256:98e98300056445ba2cc27d0b325fd044dc17fcc38e4e4d2c7711585bd0a958ed",
+ "sha256:af27ac187c0a331e8ef91d84bf1c3c6a5dea97e912a7560ac0cef25c526a4102",
+ "sha256:bb0acfb921030d00070539c038cd24bb1df73a2981e9f55942514af8b17be94e",
+ "sha256:c4fd98e85869603e65f554fdc5cddf0712e352fe6e61d29d5a6fe087ec82b76c",
+ "sha256:cf133dd744f2470b347f602452a88e70dadfbe0fcfb5fd46e093d55da65f82f7",
+ "sha256:cf187a7e7098233d0d0c71175375c5162f880126c4c716fa28a8ac418dcf3378",
+ "sha256:d3ee7880f653cc03749a3bfea720cf2a192e4f884925b0cf7eecce82f0ce5854",
+ "sha256:de0d5069b165e5a32b3c6ffbb81c350b1e3d3483347196ffdf86dc0ef9e37dd6",
+ "sha256:df52972138318bc7546d92348a1ee58449bc3f9eaf0db278906eb511889c4b50",
+ "sha256:f0f4484c6541a99862b693e13a151435a279b271cff20e37101116a21e2a1ad1"
],
"index": "pypi",
- "version": "==0.0.236"
+ "markers": "python_version >= '3.7'",
+ "version": "==0.3.4"
},
"selenium": {
"hashes": [
- "sha256:06a1c7d9f313130b21c3218ddd8852070d0e7419afdd31f96160cd576555a5ce",
- "sha256:3aefa14a28a42e520550c1cd0f29cf1d566328186ea63aa9a3e01fb265b5894d"
+ "sha256:5b4f49240d61e687a73f7968ae2517d403882aae3550eae2a229c745e619f1d9",
+ "sha256:d9dfd6d0b021d71d0a48b865fe7746490ba82b81e9c87b212360006629eb1853"
],
"index": "pypi",
- "version": "==4.7.2"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.19.0"
},
"sniffio": {
"hashes": [
- "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101",
- "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"
+ "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2",
+ "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"
],
"markers": "python_version >= '3.7'",
- "version": "==1.3.0"
+ "version": "==1.3.1"
},
"sortedcontainers": {
"hashes": [
@@ -486,122 +598,58 @@
],
"version": "==2.4.0"
},
+ "syrupy": {
+ "hashes": [
+ "sha256:203e52f9cb9fa749cf683f29bd68f02c16c3bc7e7e5fe8f2fc59bdfe488ce133",
+ "sha256:37a835c9ce7857eeef86d62145885e10b3cb9615bc6abeb4ce404b3f18e1bb36"
+ ],
+ "index": "pypi",
+ "markers": "python_version < '4' and python_full_version >= '3.8.1'",
+ "version": "==4.6.1"
+ },
"trio": {
"hashes": [
- "sha256:ce68f1c5400a47b137c5a4de72c7c901bd4e7a24fbdebfe9b41de8c6c04eaacf",
- "sha256:f1dd0780a89bfc880c7c7994519cb53f62aacb2c25ff487001c0052bd721cdf0"
+ "sha256:9b41f5993ad2c0e5f62d0acca320ec657fdb6b2a2c22b8c7aed6caf154475c4e",
+ "sha256:e6458efe29cc543e557a91e614e2b51710eba2961669329ce9c862d50c6e8e81"
],
- "markers": "python_version >= '3.7'",
- "version": "==0.22.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==0.25.0"
},
"trio-websocket": {
"hashes": [
- "sha256:5b558f6e83cc20a37c3b61202476c5295d1addf57bd65543364e0337e37ed2bc",
- "sha256:a3d34de8fac26023eee701ed1e7bf4da9a8326b61a62934ec9e53b64970fd8fe"
+ "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f",
+ "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638"
],
- "markers": "python_version >= '3.5'",
- "version": "==0.9.2"
+ "markers": "python_version >= '3.7'",
+ "version": "==0.11.1"
},
"types-requests": {
"hashes": [
- "sha256:091d4a5a33c1b4f20d8b1b952aa8fa27a6e767c44c3cf65e56580df0b05fd8a9",
- "sha256:a7df37cc6fb6187a84097da951f8e21d335448aa2501a6b0a39cbd1d7ca9ee2a"
+ "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d",
+ "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"
],
"index": "pypi",
- "version": "==2.28.11.5"
- },
- "types-urllib3": {
- "hashes": [
- "sha256:ed6b9e8a8be488796f72306889a06a3fc3cb1aa99af02ab8afb50144d7317e49",
- "sha256:eec5556428eec862b1ac578fb69aab3877995a99ffec9e5a12cf7fbd0cc9daee"
- ],
- "version": "==1.26.25.4"
+ "markers": "python_version >= '3.8'",
+ "version": "==2.31.0.20240311"
},
"typing-extensions": {
"hashes": [
- "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa",
- "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"
+ "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475",
+ "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"
],
- "markers": "python_version >= '3.7'",
- "version": "==4.4.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.10.0"
},
"urllib3": {
+ "extras": [
+ "socks"
+ ],
"hashes": [
- "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72",
- "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"
- ],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
- "version": "==1.26.14"
- },
- "wrapt": {
- "hashes": [
- "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3",
- "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b",
- "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4",
- "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2",
- "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656",
- "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3",
- "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff",
- "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310",
- "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a",
- "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57",
- "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069",
- "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383",
- "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe",
- "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87",
- "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d",
- "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b",
- "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907",
- "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f",
- "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0",
- "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28",
- "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1",
- "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853",
- "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc",
- "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3",
- "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3",
- "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164",
- "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1",
- "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c",
- "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1",
- "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7",
- "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1",
- "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320",
- "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed",
- "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1",
- "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248",
- "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c",
- "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456",
- "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77",
- "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef",
- "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1",
- "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7",
- "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86",
- "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4",
- "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d",
- "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d",
- "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8",
- "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5",
- "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471",
- "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00",
- "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68",
- "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3",
- "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d",
- "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735",
- "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d",
- "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569",
- "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7",
- "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59",
- "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5",
- "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb",
- "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b",
- "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f",
- "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462",
- "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015",
- "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"
- ],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
- "version": "==1.14.1"
+ "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2.2.1"
},
"wsproto": {
"hashes": [
@@ -613,11 +661,11 @@
},
"zipp": {
"hashes": [
- "sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa",
- "sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766"
+ "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b",
+ "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"
],
- "markers": "python_version >= '3.7'",
- "version": "==3.11.0"
+ "markers": "python_version >= '3.8'",
+ "version": "==3.18.1"
}
},
"develop": {}
diff --git a/app-testing/README.md b/app-testing/README.md
index 19e8f937ee3..ec2e149ec18 100644
--- a/app-testing/README.md
+++ b/app-testing/README.md
@@ -11,31 +11,35 @@ Slices of the tests will be selected as candidates for automation and then perfo
## Steps
1. Have pyenv installed per [DEV_SETUP.md](../DEV_SETUP.md)
- 1. Install python 3.11
+ 1. Install python 3.12
2. Install the Opentrons application on your machine.
1.
2. This could also be done by building the installer on a branch and installing the App.
1. for Mac
1. `make -C app-shell dist-osx`
3. Install Chromedriver
- = 1. in the app-testing directory 1. `sudo ./ci-tools/mac_get_chromedriver.sh 21.3.1` per the version of electron in the repository root package.json (`/opentrons/package.json`) for electron 1. Windows `sudo .\ci-tools\windows_get_chromedriver.ps1 -version 21.3.1` 1. if you experience `wget: command not found` 1. brew install wget and try again 2. when you run `chromedriver --version` 1. It should work 2. It should output the below. The chromedriver version must match Electron version we build into the App. 1. ChromeDriver 106.0.5249.181 (7e86549ea18ccbc17d7b600e3cd4190f45db35c7-refs/heads/main@{#1045491})
+ 1. in the app-testing directory
+ 2. `sudo ./citools/{YOUR_OS}_get_chromedriver.sh 21.3.1` passing the version of electron in the repository root [package.json](`/opentrons/package.json`) for electron
+ 3. windows example using sudo (scoop install sudo): `sudo .\citools\windows_get_chromedriver.ps1 21.3.1`
+ 4. run `chromedriver --version` to verify
4. Create .env from example.env `cp example.env .env`
1. Fill in values (if there are secrets)
2. Make sure the paths work on your machine
5. Install pipenv globally against the python version you are using in this module.
1. pip install -U pipenv
+ 2. note: the rest of the monorepo uses pipenv but pinned at `pipenv==2021.5.29`
6. In the app-testing directory (make, python, pipenv required on your path)
1. `make teardown`
2. `make setup`
7. Run all tests
1. `make test`
8. Run specific test(s)
- 1. `pipenv run python -m pytest -k test_labware_landing`
- 1. [See docs on pytest -k flag](https://docs.pytest.org/en/6.2.x/usage.html#specifying-tests-selecting-tests)
+ 1. `pipenv run python -m pytest -k protocol_analyze_test`
+ 1. [See docs on pytest -k flag](https://docs.pytest.org/en/7.4.x/usage.html#specifying-tests-selecting-tests)
## Tools
-python 3.11.0 - manage python with [pyenv](https://realpython.com/intro-to-pyenv)
+python 3.12.0 - manage python with [pyenv](https://realpython.com/intro-to-pyenv)
[pipenv](https://pipenv.pypa.io/en/latest/)
## Locator Tool
@@ -59,39 +63,78 @@ pipenv run python -i locators.py
> sometimes chromedriver does not cleanly exit.
> `pkill -x chromedriver`
-## Emulation
+## Gotchas
+
+- Only have 1 robot connected at once.
+ - Build locators like you have more than 1 to future proof.
+
+### Analyses Snapshot Test
+
+> The primary test in this module.
+
+The analyses snapshot test runs protocol analysis using `TARGET` branch or tag then compares them against snapshots found on `TEST_SOURCE` branch or tag.
+
+#### Protocol Files Location
+
+The set of protocols to analyze is defined inside of `app-testing/.env` file, under the `APP_ANALYSIS_TEST_PROTOCOLS` and `APP_ANALYSIS_TEST_PROTOCOLS_WITH_OVERRIDES` variables.
+
+#### Protocol Files with Overrides
+
+Sometimes we want to have a bunch of protocols that are just slightly different from each other. This is especially helpful with negative test cases. We can have a protocol that depending on the value of a variable does different things. You may then override the variable to test different scenarios.
+
+The best way to learn this is by example. Look at:
+
+- `app-testing/files/protocolsFlex_X_v2_18_NO_PIPETTES_Overrides_BadTypesInRTP.py`
+- `app-testing/automation/data/protocols_with_overrides.py`
+- `make generate-protocols`
+- see the protocols generated in `app-testing/files/generated_protocols/`
+
+#### Analysis Snapshots Location
-We have made the choice to setup all test runs local and in CI against this emulator [config](./ci-tools/ot2_with_all_modules.yaml)
+Analysis snapshots are located inside of `app-testing/tests/__snapshots__/analyses_snapshot_test` folder. These are generated. If you want to update them, see below.
-To use locally setup the [emulator](https://github.com/Opentrons/opentrons-emulation)
+#### Running Analysis Snapshot Tests Locally
-run our expected config
+> Note: Passing `TARGET` can be done as below or in the `.env` file.
-```shell
-make run file_path=$MONOREPOPATH/app-testing/ci-tools/ot2_with_all_modules.yaml
+To run analysis snapshot tests locally, you must first build the Docker image by running the following command:
+
+```bash
+TARGET="" make build-opentrons-analysis
```
-ctrl-c to stop
+Then to run the analysis snapshot test, you can run the following command:
+
+```bash
+TARGET="" make snapshot-test
+```
+
+This will run the analyses snapshot test using the `TARGET` branch or tag, and compare the results against your local analysis snapshots located inside `app-testing/tests/__snapshots__/analyses_snapshot_test`.
+
+#### Updating Analysis Snapshots
-remove the containers (this resets calibration, stopping them does not)
+If you want to update the analysis snapshots, you can run the following command:
-```shell
-make remove file_path=$MONOREPOPATH/app-testing/ci-tools/ot2_with_all_modules.yaml
+```bash
+TARGET="" make snapshot-test-update
```
-## Gotchas
+This will take the results of the analysis snapshot test using the `TARGET` branch or tag, and update the local analysis snapshots located inside `app-testing/tests/__snapshots__/analyses_snapshot_test`.
-- Only have 1 robot connected at once.
- - Build locators like you have more than 1 to future proof.
+#### Running Analysis Snapshot Tests on CI
+
+To run analysis snapshot tests on CI, you need to run the `Analyses Snapshot Test` workflow dispatch job. This job requires two inputs, `TARGET` and `TEST_SOURCE`.
+
+Given the scenario that you want to see if the latest version of `chore_release-v7.2.0` release branch has any differences compared to the current analysis snapshots.
+
+`TARGET` - is chore_release-v7.2.0. "I want to run analysis against `chore_release-v7.2.0`"
-## Analysis Test
+`TEST_SOURCE` - This one varies a bit on what it can be. The question to ask is, "Where are the snapshots that you want to compare against?"
-The analysis test `pipenv run pytest -k test_analyses` is driven by the comma delimited string variable `APP_ANALYSIS_TEST_PROTOCOLS` in `.env`
-This allows us to run one or many.
+- If you want to compare against the current analysis snapshots for this release, then TEST_SOURCE is chore_release-v7.2.0.
+- If you want to compare against the previous release branch, then TEST_SOURCE is chore_release-v7.1.0.
+- If you want to compare your in-progress release branch against the previous release branch, then TEST_SOURCE is ``.
-### Adding protocols
+##### Run the Workflow Dispatch job
-1. add the protocol file named according to the naming convention in the files/protocols appropriate folder
-1. add the protocol stem to `protocol_files.py`
-1. add the protocol data as a property to `protocols.py`
-1. run `make print-protocols`
+- `gh workflow run 'Analyses Snapshot Test' --ref chore_release-v7.2.0 -f TARGET=chore_release-v7.2.0 -f TEST_SOURCE=chore_release-v7.1.0`
diff --git a/app-testing/analysis_results/.keep-me b/app-testing/analysis_results/.keep-me
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/app-testing/automation/data/protocol.py b/app-testing/automation/data/protocol.py
index e8ef4db3bf6..71c33ed0ce1 100644
--- a/app-testing/automation/data/protocol.py
+++ b/app-testing/automation/data/protocol.py
@@ -1,21 +1,24 @@
"""Model of a protocol for testing."""
+
+import hashlib
import os
from pathlib import Path
from typing import Literal, Optional
from pydantic import BaseModel, Field
-from automation.data.protocol_files import names
from automation.resources.robot_data import module_types
+GENERATED_PROTOCOLS_FOLDER = "generated_protocols"
+OVERRIDE_MONIKER = "_Override_"
+
class Protocol(BaseModel):
"""Model to describe a protocol used in a test."""
- file_name: names = Field(description="file name not including extension")
+ file_stem: str = Field(description="file name not including extension")
file_extension: Literal["json", "py"] = Field(description="file extension of the protocol")
- protocol_name: str = Field(description="the protocol name which will appear in the protocol name field in the app")
- robot: Literal["OT-2", "OT-3"] = Field(description="the robot type which will appear in the robot field in the app")
+ robot: Literal["OT2", "Flex"] = Field(description="the robot type which will appear in the robot field in the app")
app_error: bool = Field(description="will analysis with the app raise an error")
robot_error: bool = Field(description="will analysis with the robot raise an error")
app_analysis_error: Optional[str] = Field(description="the exact error shown in the app popout", default=None)
@@ -24,16 +27,28 @@ class Protocol(BaseModel):
instruments: Optional[list[str]] = Field(description="list of instruments that will show in the app", default=None)
modules: Optional[list[module_types]] = Field(description="list of modules that will show in the app", default=None)
description: Optional[str] = Field(description="Details about this protocol", default=None)
+ expected_test_failure: bool = Field(description="Is this test expected to fail", default=False)
+ expected_test_reason: Optional[str] = Field(description="Reason test is failing", default=None)
+ override_variable_name: Optional[str] = Field(description="The variable name to override", default=None)
+ override_value: Optional[str] = Field(description="The value of the override", default=None)
+ from_override: bool = Field(description="Is this protocol generated from an override", default=False)
@property
def file_path(self) -> Path:
"""Path of the file."""
+ if self.from_override:
+ return Path(
+ Path(__file__).resolve().parent.parent.parent,
+ os.getenv("FILES_FOLDER", "files"),
+ "protocols",
+ GENERATED_PROTOCOLS_FOLDER,
+ f"{self.file_stem}.{self.file_extension}",
+ )
return Path(
Path(__file__).resolve().parent.parent.parent,
os.getenv("FILES_FOLDER", "files"),
"protocols",
- f"{self.file_extension}",
- f"{self.file_name}.{self.file_extension}",
+ f"{self.file_stem}.{self.file_extension}",
)
@property
@@ -50,3 +65,11 @@ def labware_paths(self) -> list[Path]:
)
for p in self.custom_labware
]
+
+ @property
+ def short_sha(self) -> str:
+ """Short sha of the file."""
+ # Hash the string using SHA-1
+ hash_object = hashlib.sha1(self.file_stem.encode())
+ # Convert to hexadecimal and truncate
+ return hash_object.hexdigest()[:10]
diff --git a/app-testing/automation/data/protocol_files.py b/app-testing/automation/data/protocol_files.py
deleted file mode 100644
index 277d31ea3dc..00000000000
--- a/app-testing/automation/data/protocol_files.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""Define the possible names of protocol files to use in testing."""
-from typing import Literal
-
-names = Literal[
- "OT2_P1000SLeft_None_6_1_SimpleTransfer",
- "OT2_P20S_P300M_HS_6_1_HS_WithCollision_Error",
- "OT2_P20S_P300M_NoMods_6_1_TransferReTransferLiquid",
- "OT2_P20SRight_None_6_1_SimpleTransferError",
- "OT2_P300M_P20S_HS_6_1_Smoke620release",
- "OT2_P300M_P20S_MM_HS_TD_TC_6_1_AllMods_Error",
- "OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40",
- "OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40Error",
- "OT2_P300M_P20S_NoMod_6_1_MixTransferManyLiquids",
- "OT2_P300M_P300S_HS_6_1_HS_NormalUseWithTransfer",
- "OT2_P300SG1_None_5_2_6_Gen1PipetteSimple",
- "OT2_P300SLeft_MM_TM_TM_5_2_6_MOAMTemps",
- "OT2_None_None_2_12_Python310SyntaxRobotAnalysisOnlyError",
- "OT2_None_None_2_13_PythonSyntaxError",
- "OT2_P10S_P300M_TC1_TM_MM_2_11_Swift",
- "OT2_P20S_None_2_7_Walkthrough",
- "OT2_P300M_P20S_None_2_12_FailOnRun",
- "OT2_P300M_P20S_TC_HS_TM_2_13_SmokeTestV3",
- "OT2_P300M_P20S_TC_HS_TM_2_14_SmokeTestV3",
- "OT2_P300M_P20S_TC_HS_TM_2_15_SmokeTestV3",
- "OT2_P300M_P20S_TC_MM_TM_2_13_Smoke620Release",
- "OT2_P300MLeft_MM_TM_2_4_Zymo",
- "OT2_P300S_Thermocycler_Moam_Error",
- "OT2_P300S_Twinning_Error",
- "OT2_P300SLeft_MM1_MM_2_2_EngageMagHeightFromBase",
- "OT2_P300SLeft_MM1_MM_TM_2_3_Mix",
- "OT3_P100_96_HS_TM_2_15_Quick_Zymo_RNA_Bacteria",
- "OT3_P1000_96_HS_TM_MM_2_15_ABR5_6_HDQ_Bacteria_ParkTips_96_channel",
- "OT3_P1000_96_HS_TM_MM_2_15_MagMaxRNACells96Ch",
- "OT3_P1000_96_HS_TM_TC_MM_2_15_ABR5_6_Illumina_DNA_Prep_96x_Head_PART_III",
- "OT3_P1000_96_None_2_15_ABR5_6_IDT_xGen_EZ_96x_Head_PART_I_III_ABR",
- "OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment_v4",
- "OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment",
- "OT3_P1000MLeft_P50MRight_HS_TM_MM_TC_2_15_ABR4_Illumina_DNA_Prep_24x",
- "OT3_P1000SRight_None_2_15_ABR_Simple_Normalize_Long_Right",
- "OT3_P50MLeft_P1000MRight_None_2_15_ABRKAPALibraryQuantLongv2",
-]
diff --git a/app-testing/automation/data/protocol_registry.py b/app-testing/automation/data/protocol_registry.py
new file mode 100644
index 00000000000..d22bd1620cc
--- /dev/null
+++ b/app-testing/automation/data/protocol_registry.py
@@ -0,0 +1,84 @@
+import os
+from pathlib import Path
+from typing import Optional
+
+from rich.console import Console
+from rich.panel import Panel
+
+from automation.data.protocol import Protocol
+from automation.data.protocol_with_overrides import ProtocolWithOverrides
+from automation.data.protocols import Protocols
+from automation.data.protocols_with_overrides import ProtocolsWithOverrides
+
+
+class ProtocolRegistry:
+ def __init__(self) -> None:
+ self.protocols: Protocols = Protocols()
+ self.protocols_with_overrides: ProtocolsWithOverrides = ProtocolsWithOverrides()
+ self.protocols_to_test: Optional[list[Protocol]] = self._what_protocols()
+
+ def _what_protocols(self) -> Optional[list[Protocol]]:
+ protocol_names: Optional[str] = os.environ.get("APP_ANALYSIS_TEST_PROTOCOLS")
+ override_protocol_names: Optional[str] = os.environ.get("APP_ANALYSIS_TEST_PROTOCOLS_WITH_OVERRIDES")
+ protocols_to_test: list[Protocol] = []
+ if protocol_names:
+ for protocol_name in [x.strip() for x in protocol_names.split(",")]:
+ protocol: Protocol = getattr(self.protocols, protocol_name) # raises
+ protocols_to_test.append(protocol)
+ if override_protocol_names:
+ for protocol_with_overrides__name in [x.strip() for x in override_protocol_names.split(",")]:
+ protocol_with_overrides: ProtocolWithOverrides = getattr(
+ self.protocols_with_overrides, protocol_with_overrides__name
+ ) # raises
+ if protocol_with_overrides.protocols is not None:
+ protocols_to_test.extend(protocol_with_overrides.protocols)
+ if protocols_to_test == []:
+ return None
+ return protocols_to_test
+
+ def all_defined_protocols(self) -> list[Protocol]:
+ return [getattr(self.protocols, prop) for prop in dir(self.protocols) if "__" not in prop]
+
+ def all_defined_protocols_with_overrides(self) -> list[ProtocolWithOverrides]:
+ return [getattr(self.protocols_with_overrides, prop) for prop in dir(self.protocols_with_overrides) if "__" not in prop]
+
+
+def all_stems() -> set[str]:
+ dir_path = Path(Path(__file__).resolve().parent.parent.parent, os.getenv("FILES_FOLDER", "files"), "protocols")
+ file_stems = {file.stem for file in dir_path.glob("*.py")}
+ return file_stems
+
+
+def main() -> None:
+ console = Console()
+ protocol_registry = ProtocolRegistry()
+ console.print("protocols for APP_ANALYSIS_TEST_PROTOCOLS")
+ console.print(Panel("Formatted for .env"))
+ regular_stems = sorted([p.file_stem for p in protocol_registry.all_defined_protocols()])
+ console.print('APP_ANALYSIS_TEST_PROTOCOLS="')
+ console.print(",\n".join(regular_stems))
+ console.print('"')
+ override_stems = sorted([p.file_stem for p in protocol_registry.all_defined_protocols_with_overrides()])
+ console.print('APP_ANALYSIS_TEST_PROTOCOLS_WITH_OVERRIDES="')
+ console.print(",\n".join(override_stems))
+ console.print('"')
+
+ all_files = all_stems()
+ filtered_stems = {stem for stem in all_files if "overrides" not in stem.lower()}
+ found_override_stems = {stem for stem in all_files if "overrides" in stem.lower()}
+ # Finding and displaying differences
+ differences = filtered_stems - set(regular_stems)
+ if differences:
+ console.print(f"Stems in actual files not in mapping: {differences}")
+ else:
+ console.print("No differences between files and mapping.")
+
+ differences = found_override_stems - set(override_stems)
+ if differences:
+ console.print(f"Override Stems in actual files not in mapping: {differences}")
+ else:
+ console.print("No differences between actual override protocols and the mapping.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/app-testing/automation/data/protocol_with_overrides.py b/app-testing/automation/data/protocol_with_overrides.py
new file mode 100644
index 00000000000..f1410ac5f32
--- /dev/null
+++ b/app-testing/automation/data/protocol_with_overrides.py
@@ -0,0 +1,56 @@
+"""Model of a protocol for testing."""
+
+from pathlib import Path
+from typing import Any, Optional
+
+from pydantic import Field
+
+from automation.data.protocol import GENERATED_PROTOCOLS_FOLDER, OVERRIDE_MONIKER, Protocol
+
+
+class ProtocolWithOverrides(Protocol):
+ """Model to describe a protocol that uses a base protocol to generate multiple Protocol classes"""
+
+ overrides: list[str] = Field(description="A list of test options to iterate on, suitable to concatenate in a filename")
+ protocols: Optional[list[Protocol]] = Field(description="A list of the generated protocols", default=None)
+
+ def __init__(self, **data: Any) -> None:
+ super().__init__(**data)
+ self.create_protocols()
+
+ def create_protocols(self) -> None:
+ with open(self.file_path, "r") as file:
+ original_content = file.read()
+ protocols: list[Protocol] = []
+ for override in self.overrides:
+ # Create the new file name with the override appended before the extension
+ new_file_stem: str = f"{self.file_stem}{OVERRIDE_MONIKER}{override}"
+ new_file_name = f"{new_file_stem}.{self.file_extension}"
+ # Create the full path for the new file
+ # all generated files live at files/protocols/$GENERATED_PROTOCOLS_FOLDER
+ new_file_path = Path(self.file_path.parent, GENERATED_PROTOCOLS_FOLDER, new_file_name)
+ # Prepare the override string to prepend
+ override_string = f'{self.override_variable_name} = "{override}"\n'
+ # Write the new file with the override string prepended
+ with open(new_file_path, "w") as new_file:
+ new_file.write(override_string + original_content)
+
+ protocol = Protocol(
+ file_stem=new_file_stem,
+ file_extension=self.file_extension,
+ robot=self.robot,
+ app_error=self.app_error,
+ robot_error=self.robot_error,
+ app_analysis_error=self.app_analysis_error,
+ robot_analysis_error=self.robot_analysis_error,
+ custom_labware=self.custom_labware,
+ instruments=self.instruments,
+ modules=self.modules,
+ description=self.description,
+ expected_test_failure=self.expected_test_failure,
+ expected_test_reason=self.expected_test_reason,
+ from_override=True,
+ override_value=override,
+ )
+ protocols.append(protocol)
+ self.protocols = protocols
diff --git a/app-testing/automation/data/protocols.py b/app-testing/automation/data/protocols.py
index 139739b784d..136dacfe481 100644
--- a/app-testing/automation/data/protocols.py
+++ b/app-testing/automation/data/protocols.py
@@ -1,331 +1,744 @@
"""Map for protocol files available for testing."""
+
from automation.data.protocol import Protocol
class Protocols:
"""Describe protocols available for testing."""
- # The name of the property must match the file_name property
+ # The name of the property must match the file_stem property
# and be in protocol_files.names
##########################################################################################################
# Begin JSON Protocols ###################################################################################
##########################################################################################################
- OT2_P1000SLeft_None_6_1_SimpleTransfer: Protocol = Protocol(
- file_name="OT2_P1000SLeft_None_6_1_SimpleTransfer",
- file_extension="json",
- protocol_name="Need Pipette",
- robot="OT-2",
- app_error=False,
- robot_error=False,
- )
- OT2_P20S_P300M_HS_6_1_HS_WithCollision_Error: Protocol = Protocol(
- file_name="OT2_P20S_P300M_HS_6_1_HS_WithCollision_Error",
+ OT2_S_v6_P1000S_None_SimpleTransfer: Protocol = Protocol(
+ file_stem="OT2_S_v6_P1000S_None_SimpleTransfer",
file_extension="json",
- protocol_name="HS Collision",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
- description="""This protocol gives an error in PD.8-Channel pipette cannot access labware8-Channel pipettes cannot access labware or tip racks to the left or right of a Heater-Shaker GEN1 module. Move labware to a different slot to access it with an 8-Channel pipette.If you export it anyway there are NOT analysis errors in the app side analysis.TODO on if there are robot side analysis errors but do not expect them?""", # noqa: E501
)
- OT2_P20S_P300M_NoMods_6_1_TransferReTransferLiquid: Protocol = Protocol(
- file_name="OT2_P20S_P300M_NoMods_6_1_TransferReTransferLiquid",
+
+ OT2_S_v6_P20S_P300M_TransferReTransferLiquid: Protocol = Protocol(
+ file_stem="OT2_S_v6_P20S_P300M_TransferReTransferLiquid",
file_extension="json",
- protocol_name="Transfer- Multi liquid (retransfer)",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P20SRight_None_6_1_SimpleTransferError: Protocol = Protocol(
- file_name="OT2_P20SRight_None_6_1_SimpleTransferError",
+ OT2_X_v6_P20S_None_SimpleTransfer: Protocol = Protocol(
+ file_stem="OT2_X_v6_P20S_None_SimpleTransfer",
file_extension="json",
- protocol_name="Have Pipette",
- robot="OT-2",
+ robot="OT2",
app_error=True,
robot_error=True,
app_analysis_error="Cannot aspirate more than pipette max volume",
robot_analysis_error="?",
)
- OT2_P300M_P20S_HS_6_1_Smoke620release: Protocol = Protocol(
- file_name="OT2_P300M_P20S_HS_6_1_Smoke620release",
+
+ OT2_S_v6_P300M_P20S_HS_Smoke620release: Protocol = Protocol(
+ file_stem="OT2_S_v6_P300M_P20S_HS_Smoke620release",
file_extension="json",
- protocol_name="H/S normal use",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300M_P20S_MM_HS_TD_TC_6_1_AllMods_Error: Protocol = Protocol(
- file_name="OT2_P300M_P20S_MM_HS_TD_TC_6_1_AllMods_Error",
+
+ OT2_X_v6_P300M_P20S_HS_MM_TM_TC_AllMods: Protocol = Protocol(
+ file_stem="OT2_X_v6_P300M_P20S_HS_MM_TM_TC_AllMods",
file_extension="json",
- protocol_name="All mods",
- robot="OT-2",
+ robot="OT2",
app_error=True,
robot_error=True,
app_analysis_error="Heater-Shaker cannot open its labware latch while it is shaking.",
robot_analysis_error="?",
)
- OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40: Protocol = Protocol(
- file_name="OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40",
+
+ OT2_S_v4_P300M_P20S_MM_TM_TC1_PD40: Protocol = Protocol(
+ file_stem="OT2_S_v4_P300M_P20S_MM_TM_TC1_PD40",
file_extension="json",
- protocol_name="script_pur_sample_1",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40Error: Protocol = Protocol(
- file_name="OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40Error",
+
+ OT2_X_v4_P300M_P20S_MM_TC1_TM_e2eTests: Protocol = Protocol(
+ file_stem="OT2_X_v4_P300M_P20S_MM_TC1_TM_e2eTests",
file_extension="json",
- protocol_name="script_pur_sample_1",
- robot="OT-2",
+ robot="OT2",
app_error=True,
robot_error=True,
app_analysis_error="Cannot aspirate more than pipette max volume",
robot_analysis_error="?",
)
- OT2_P300M_P20S_NoMod_6_1_MixTransferManyLiquids: Protocol = Protocol(
- file_name="OT2_P300M_P20S_NoMod_6_1_MixTransferManyLiquids",
+
+ OT2_S_v6_P300M_P20S_MixTransferManyLiquids: Protocol = Protocol(
+ file_stem="OT2_S_v6_P300M_P20S_MixTransferManyLiquids",
file_extension="json",
- protocol_name="Mix/transfer- several liquids",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300M_P300S_HS_6_1_HS_NormalUseWithTransfer: Protocol = Protocol(
- file_name="OT2_P300M_P300S_HS_6_1_HS_NormalUseWithTransfer",
+
+ OT2_S_v6_P300M_P300S_HS_HS_NormalUseWithTransfer: Protocol = Protocol(
+ file_stem="OT2_S_v6_P300M_P300S_HS_HS_NormalUseWithTransfer",
file_extension="json",
- protocol_name="H/S normal use",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300SG1_None_5_2_6_Gen1PipetteSimple: Protocol = Protocol(
- file_name="OT2_P300SG1_None_5_2_6_Gen1PipetteSimple",
+
+ OT2_S_v3_P300SGen1_None_Gen1PipetteSimple: Protocol = Protocol(
+ file_stem="OT2_S_v3_P300SGen1_None_Gen1PipetteSimple",
file_extension="json",
- protocol_name="gen1 pipette",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300SLeft_MM_TM_TM_5_2_6_MOAMTemps: Protocol = Protocol(
- file_name="OT2_P300SLeft_MM_TM_TM_5_2_6_MOAMTemps",
+
+ OT2_S_v4_P300S_None_MM_TM_TM_MOAMTemps: Protocol = Protocol(
+ file_stem="OT2_S_v4_P300S_None_MM_TM_TM_MOAMTemps",
file_extension="json",
- protocol_name="MoaM",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
+ Flex_X_v8_P1000_96_HS_GRIP_TC_TM_GripperCollisionWithTips: Protocol = Protocol(
+ file_stem="Flex_X_v8_P1000_96_HS_GRIP_TC_TM_GripperCollisionWithTips",
+ file_extension="json",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="Gripper collision with tips",
+ )
+
############################################################################################################
# Begin Python Protocols ###################################################################################
############################################################################################################
- OT2_None_None_2_12_Python310SyntaxRobotAnalysisOnlyError: Protocol = Protocol(
- file_name="OT2_None_None_2_12_Python310SyntaxRobotAnalysisOnlyError",
+ OT2_S_v2_12_NO_PIPETTES_Python310SyntaxRobotAnalysisOnlyError: Protocol = Protocol(
+ file_stem="OT2_S_v2_12_NO_PIPETTES_Python310SyntaxRobotAnalysisOnlyError",
file_extension="py",
- protocol_name="🛠 3.10 only Python 🛠",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=True,
robot_analysis_error="?",
)
- OT2_None_None_2_13_PythonSyntaxError: Protocol = Protocol(
- file_name="OT2_None_None_2_13_PythonSyntaxError",
+
+ OT2_X_v2_13_None_None_PythonSyntaxError: Protocol = Protocol(
+ file_stem="OT2_X_v2_13_None_None_PythonSyntaxError",
file_extension="py",
- protocol_name="bad import",
- robot="OT-2",
+ robot="OT2",
app_error=True,
robot_error=True,
app_analysis_error="No module named 'superspecialmagic'",
robot_analysis_error="?",
)
- OT2_P10S_P300M_TC1_TM_MM_2_11_Swift: Protocol = Protocol(
- file_name="OT2_P10S_P300M_TC1_TM_MM_2_11_Swift",
+
+ OT2_S_v2_11_P10S_P300M_MM_TC1_TM_Swift: Protocol = Protocol(
+ file_stem="OT2_S_v2_11_P10S_P300M_MM_TC1_TM_Swift",
file_extension="py",
- protocol_name="OT2_P10S_P300M_TC1_TM_MM_2_11_Swift.py",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P20S_None_2_7_Walkthrough: Protocol = Protocol(
- file_name="OT2_P20S_None_2_7_Walkthrough",
+
+ OT2_S_v2_7_P20S_None_Walkthrough: Protocol = Protocol(
+ file_stem="OT2_S_v2_7_P20S_None_Walkthrough",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_16_P300M_P20S_aspirateDispenseMix0Volume: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_P300M_P20S_aspirateDispenseMix0Volume",
file_extension="py",
- protocol_name="OT-2 Guided Walk-through",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300M_P20S_None_2_12_FailOnRun: Protocol = Protocol(
- file_name="OT2_P300M_P20S_None_2_12_FailOnRun",
+
+ OT2_S_v2_12_P300M_P20S_FailOnRun: Protocol = Protocol(
+ file_stem="OT2_S_v2_12_P300M_P20S_FailOnRun",
file_extension="py",
- protocol_name="Will fail on run",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300M_P20S_TC_HS_TM_2_13_SmokeTestV3: Protocol = Protocol(
- file_name="OT2_P300M_P20S_TC_HS_TM_2_13_SmokeTestV3",
+ OT2_S_v2_13_P300M_P20S_HS_TC_TM_SmokeTestV3: Protocol = Protocol(
+ file_stem="OT2_S_v2_13_P300M_P20S_HS_TC_TM_SmokeTestV3",
file_extension="py",
- protocol_name="🛠️ 2.13 Smoke Test V3 🪄",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
custom_labware=["cpx_4_tuberack_100ul"],
)
- OT2_P300M_P20S_TC_HS_TM_2_14_SmokeTestV3: Protocol = Protocol(
- file_name="OT2_P300M_P20S_TC_HS_TM_2_14_SmokeTestV3",
+
+ OT2_S_v2_14_P300M_P20S_HS_TC_TM_SmokeTestV3: Protocol = Protocol(
+ file_stem="OT2_S_v2_14_P300M_P20S_HS_TC_TM_SmokeTestV3",
file_extension="py",
- protocol_name="🛠️ 2.14 Smoke Test V3 🪄",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
custom_labware=["cpx_4_tuberack_100ul"],
)
- OT2_P300M_P20S_TC_HS_TM_2_15_SmokeTestV3: Protocol = Protocol(
- file_name="OT2_P300M_P20S_TC_HS_TM_2_15_SmokeTestV3",
+
+ OT2_S_v2_15_P300M_P20S_HS_TC_TM_SmokeTestV3: Protocol = Protocol(
+ file_stem="OT2_S_v2_15_P300M_P20S_HS_TC_TM_SmokeTestV3",
file_extension="py",
- protocol_name="🛠️ 2.15 Smoke Test V3 🪄",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
custom_labware=["cpx_4_tuberack_100ul"],
)
- OT2_P300M_P20S_TC_MM_TM_2_13_Smoke620Release: Protocol = Protocol(
- file_name="OT2_P300M_P20S_TC_MM_TM_2_13_Smoke620Release",
+
+ OT2_S_v2_16_P300M_P20S_HS_TC_TM_SmokeTestV3: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_P300M_P20S_HS_TC_TM_SmokeTestV3",
file_extension="py",
- protocol_name="🛠 Logo-Modules-CustomLabware 🛠",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
custom_labware=["cpx_4_tuberack_100ul"],
)
- OT2_P300MLeft_MM_TM_2_4_Zymo: Protocol = Protocol(
- file_name="OT2_P300MLeft_MM_TM_2_4_Zymo",
+
+ OT2_S_v2_17_P300M_P20S_HS_TC_TM_SmokeTestV3: Protocol = Protocol(
+ file_stem="OT2_S_v2_17_P300M_P20S_HS_TC_TM_SmokeTestV3",
file_extension="py",
- protocol_name="Zymo Direct-zol96 Magbead RNA",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300S_Thermocycler_Moam_Error: Protocol = Protocol(
- file_name="OT2_P300S_Thermocycler_Moam_Error",
+
+ OT2_S_v2_13_P300M_P20S_MM_TC_TM_Smoke620Release: Protocol = Protocol(
+ file_stem="OT2_S_v2_13_P300M_P20S_MM_TC_TM_Smoke620Release",
file_extension="py",
- protocol_name="OT2_P300S_Thermocycler_Moam_Error.py",
- robot="OT-2",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ custom_labware=["cpx_4_tuberack_100ul"],
+ )
+
+ OT2_S_v2_4_P300M_None_MM_TM_Zymo: Protocol = Protocol(
+ file_stem="OT2_S_v2_4_P300M_None_MM_TM_Zymo",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_X_v2_11_P300S_TC1_TC2_ThermocyclerMoamError: Protocol = Protocol(
+ file_stem="OT2_X_v2_11_P300S_TC1_TC2_ThermocyclerMoamError",
+ file_extension="py",
+ robot="OT2",
app_error=True,
robot_error=True,
app_analysis_error="DeckConflictError [line 19]: thermocyclerModuleV2 in slot 7 prevents thermocyclerModuleV1 from using slot 7.", # noqa: E501
robot_analysis_error="?",
)
- OT2_P300S_Twinning_Error: Protocol = Protocol(
- file_name="OT2_P300S_Twinning_Error",
+
+ OT2_X_v2_7_P300S_TwinningError: Protocol = Protocol(
+ file_stem="OT2_X_v2_7_P300S_TwinningError",
file_extension="py",
- protocol_name="My Protocol",
- robot="OT-2",
+ robot="OT2",
app_error=True,
robot_error=True,
app_analysis_error="AttributeError [line 24]: 'InstrumentContext' object has no attribute 'pair_with'",
robot_analysis_error="?",
)
- OT2_P300SLeft_MM1_MM_2_2_EngageMagHeightFromBase: Protocol = Protocol(
- file_name="OT2_P300SLeft_MM1_MM_2_2_EngageMagHeightFromBase",
+
+ OT2_S_v2_2_P300S_None_MM1_MM2_EngageMagHeightFromBase: Protocol = Protocol(
+ file_stem="OT2_S_v2_2_P300S_None_MM1_MM2_EngageMagHeightFromBase",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_3_P300S_None_MM1_MM2_TM_Mix: Protocol = Protocol(
+ file_stem="OT2_S_v2_3_P300S_None_MM1_MM2_TM_Mix",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_16_P300M_P20S_HS_TC_TM_aspirateDispenseMix0Volume: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_P300M_P20S_HS_TC_TM_aspirateDispenseMix0Volume",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_15_P300M_P20S_HS_TC_TM_dispense_changes: Protocol = Protocol(
+ file_stem="OT2_S_v2_15_P300M_P20S_HS_TC_TM_dispense_changes",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_16_P300M_P20S_HS_TC_TM_dispense_changes: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_P300M_P20S_HS_TC_TM_dispense_changes",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_17_P300M_P20S_HS_TC_TM_dispense_changes: Protocol = Protocol(
+ file_stem="OT2_S_v2_17_P300M_P20S_HS_TC_TM_dispense_changes",
+ file_extension="py",
+ robot="OT2",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ValueError [line 15]: Cannot dispense more than pipette max volume", # noqa: E501
+ )
+
+ OT2_S_v2_14_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="OT2_S_v2_14_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_15_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="OT2_S_v2_15_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_16_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_S_v2_17_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="OT2_S_v2_17_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots",
file_extension="py",
- protocol_name="OT2_P300SLeft_MM1_MM_2_2_EngageMagHeightFromBase.py",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT2_P300SLeft_MM1_MM_TM_2_3_Mix: Protocol = Protocol(
- file_name="OT2_P300SLeft_MM1_MM_TM_2_3_Mix",
+
+ OT2_X_v2_16_None_None_HS_HeaterShakerConflictWithTrashBin1: Protocol = Protocol(
+ file_stem="OT2_X_v2_16_None_None_HS_HeaterShakerConflictWithTrashBin1",
+ file_extension="py",
+ robot="OT2",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="DeckConflictError [line 19]: trash_bin in slot 12 prevents heater_shaker in slot 11 from using slot 11.", # noqa: E501
+ )
+
+ OT2_X_v2_16_None_None_HS_HeaterShakerConflictWithTrashBin2: Protocol = Protocol(
+ file_stem="OT2_X_v2_16_None_None_HS_HeaterShakerConflictWithTrashBin2",
+ file_extension="py",
+ robot="OT2",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="DeckConflictError [line 19]: trash_bin in slot 12 prevents heater_shaker in slot 11 from using slot 11.", # noqa: E501
+ )
+
+ OT2_S_v2_16_NO_PIPETTES_verifyDoesNotDeadlock: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_NO_PIPETTES_verifyDoesNotDeadlock",
file_extension="py",
- protocol_name="OT2_P300SLeft_MM1_MM_TM_2_3_Mix.py",
- robot="OT-2",
+ robot="OT2",
app_error=False,
robot_error=False,
)
- OT3_P100_96_HS_TM_2_15_Quick_Zymo_RNA_Bacteria: Protocol = Protocol(
- file_name="OT3_P100_96_HS_TM_2_15_Quick_Zymo_RNA_Bacteria",
+
+ OT2_S_v2_16_P300S_None_verifyNoFloatingPointErrorInPipetting: Protocol = Protocol(
+ file_stem="OT2_S_v2_16_P300S_None_verifyNoFloatingPointErrorInPipetting",
file_extension="py",
- protocol_name="Quick Zymo Magbead RNA Extraction with Lysis: Bacteria 96 Channel Deletion Test",
- robot="OT-3",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_15_P1000_96_GRIP_HS_TM_QuickZymoMagbeadRNAExtraction: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000_96_GRIP_HS_TM_QuickZymoMagbeadRNAExtraction",
+ file_extension="py",
+ robot="Flex",
app_error=False,
robot_error=False,
custom_labware=["opentrons_ot3_96_tiprack_1000ul_rss"],
)
- OT3_P1000_96_HS_TM_MM_2_15_ABR5_6_HDQ_Bacteria_ParkTips_96_channel: Protocol = Protocol(
- file_name="OT3_P1000_96_HS_TM_MM_2_15_ABR5_6_HDQ_Bacteria_ParkTips_96_channel",
+ Flex_S_v2_15_P1000_96_GRIP_HS_MB_TM_OmegaHDQDNAExtraction: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000_96_GRIP_HS_MB_TM_OmegaHDQDNAExtraction",
file_extension="py",
- protocol_name="Omega HDQ DNA Extraction: Bacteria 96 FOR ABR TESTING",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
custom_labware=["opentrons_ot3_96_tiprack_1000ul_rss"],
)
- OT3_P1000_96_HS_TM_MM_2_15_MagMaxRNACells96Ch: Protocol = Protocol(
- file_name="OT3_P1000_96_HS_TM_MM_2_15_MagMaxRNACells96Ch",
+ Flex_S_v2_15_P1000_96_GRIP_HS_MB_TM_MagMaxRNAExtraction: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000_96_GRIP_HS_MB_TM_MagMaxRNAExtraction",
file_extension="py",
- protocol_name="MagMax RNA Extraction: Cells 96 ABR TESTING",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
custom_labware=["opentrons_ot3_96_tiprack_200ul_rss"],
)
- OT3_P1000_96_HS_TM_TC_MM_2_15_ABR5_6_Illumina_DNA_Prep_96x_Head_PART_III: Protocol = Protocol(
- file_name="OT3_P1000_96_HS_TM_TC_MM_2_15_ABR5_6_Illumina_DNA_Prep_96x_Head_PART_III",
+ Flex_S_v2_15_P1000_96_GRIP_HS_MB_TC_TM_IlluminaDNAPrep96PART3: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000_96_GRIP_HS_MB_TC_TM_IlluminaDNAPrep96PART3",
file_extension="py",
- protocol_name="Illumina DNA Prep 96x Head PART III",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
custom_labware=["opentrons_ot3_96_tiprack_200ul_rss", "opentrons_ot3_96_tiprack_50ul_rss"],
)
- OT3_P1000_96_None_2_15_ABR5_6_IDT_xGen_EZ_96x_Head_PART_I_III_ABR: Protocol = Protocol(
- file_name="OT3_P1000_96_None_2_15_ABR5_6_IDT_xGen_EZ_96x_Head_PART_I_III_ABR",
+ Flex_S_v2_15_P1000_96_GRIP_HS_MB_TC_TM_IDTXgen96Part1to3: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000_96_GRIP_HS_MB_TC_TM_IDTXgen96Part1to3",
file_extension="py",
- protocol_name="IDT xGen EZ 96x Head PART I-III ABR",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
custom_labware=["opentrons_ot3_96_tiprack_50ul_rss", "opentrons_ot3_96_tiprack_200ul_rss"],
)
- OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment_v4: Protocol = Protocol(
- file_name="OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment_v4",
+ Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4",
file_extension="py",
- protocol_name="Illumina DNA Enrichment v4",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
)
- OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment: Protocol = Protocol(
- file_name="OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment",
+ Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichment: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichment",
file_extension="py",
- protocol_name="Illumina DNA Enrichment",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
)
- OT3_P1000MLeft_P50MRight_HS_TM_MM_TC_2_15_ABR4_Illumina_DNA_Prep_24x: Protocol = Protocol(
- file_name="OT3_P1000MLeft_P50MRight_HS_TM_MM_TC_2_15_ABR4_Illumina_DNA_Prep_24x",
+ Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAPrep24x: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAPrep24x",
file_extension="py",
- protocol_name="Illumina DNA Prep 24x",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
)
- OT3_P1000SRight_None_2_15_ABR_Simple_Normalize_Long_Right: Protocol = Protocol(
- file_name="OT3_P1000SRight_None_2_15_ABR_Simple_Normalize_Long_Right",
+ Flex_S_v2_15_P1000S_None_SimpleNormalizeLongRight: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P1000S_None_SimpleNormalizeLongRight",
file_extension="py",
- protocol_name="OT3 ABR Simple Normalize Long",
- robot="OT-3",
+ robot="Flex",
app_error=False,
robot_error=False,
custom_labware=["opentrons_ot3_96_tiprack_200ul_rss"],
)
- OT3_P50MLeft_P1000MRight_None_2_15_ABRKAPALibraryQuantLongv2: Protocol = Protocol(
- file_name="OT3_P50MLeft_P1000MRight_None_2_15_ABRKAPALibraryQuantLongv2",
+ Flex_S_v2_15_P50M_P1000M_KAPALibraryQuantLongv2: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_P50M_P1000M_KAPALibraryQuantLongv2",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TrashBinInCol2: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TrashBinInCol2",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="InvalidTrashBinLocationError [line 15]: Invalid location for trash bin: C2. Valid slots: Any slot in column 1 or 3.", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TrashBinInStagingAreaCol3: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TrashBinInStagingAreaCol3",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ProtocolCommandFailedError [line 21]: Error 4000 GENERAL_ERROR (ProtocolCommandFailedError): IncompatibleAddressableAreaError: Cannot use Trash Bin in C3, not compatible with one or more of the following fixtures: Slot C4", # noqa: E501
+ expected_test_failure=True,
+ expected_test_reason="Analysis does not throw error when modules or fixtures are in staging area column 3.", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TrashBinInStagingAreaCol4: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TrashBinInStagingAreaCol4",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ValueError [line 15]: Staging areas not permitted for trash bin.", # noqa: E501
+ )
+
+ Flex_X_v2_16_P1000_96_DropTipsWithNoTrash: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_DropTipsWithNoTrash",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="NoTrashDefinedError [line 24]: Error 4000 GENERAL_ERROR (NoTrashDefinedError): No trash container has been defined in this protocol.", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TM_ModuleInStagingAreaCol3: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TM_ModuleInStagingAreaCol3",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="InvalidModuleError [line 19]: Error 4000 GENERAL_ERROR (InvalidModuleError): Cannot use temperature module in C3, not compatible with one or more of the following fixtures: Slot C4", # noqa: E501
+ expected_test_failure=True,
+ expected_test_reason="Analysis does not throw error when modules or fixtures are in staging area column 3.", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TM_ModuleInStagingAreaCol4: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TM_ModuleInStagingAreaCol4",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ValueError [line 15]: Cannot load a module onto a staging slot.", # noqa: E501
+ )
+
+ Flex_X_v2_16_P1000_96_TM_ModuleAndWasteChuteConflict: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_TM_ModuleAndWasteChuteConflict",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ProtocolCommandFailedError [line 25]: Error 4000 GENERAL_ERROR (ProtocolCommandFailedError): IncompatibleAddressableAreaError: Cannot use Waste Chute, not compatible with one or more of the following fixtures: Slot D3", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_AccessToFixedTrashProp: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_AccessToFixedTrashProp",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="APIVersionError [line 15]: Fixed Trash is not supported on Flex protocols in API Version 2.16 and above.", # noqa: E501
+ )
+
+ Flex_X_v2_16_P1000_96_GRIP_DropLabwareIntoTrashBin: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_GRIP_DropLabwareIntoTrashBin",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ProtocolCommandFailedError [line 20]: Error 4000 GENERAL_ERROR (ProtocolCommandFailedError): IncompatibleAddressableAreaError: Cannot use Slot C3, not compatible with one or more of the following fixtures: Trash Bin in C3", # noqa: E501
+ )
+
+ Flex_X_v2_16_P300MGen2_None_OT2PipetteInFlexProtocol: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P300MGen2_None_OT2PipetteInFlexProtocol",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ProtocolCommandFailedError [line 22]: Error 4000 GENERAL_ERROR (ProtocolCommandFailedError): InvalidSpecificationForRobotTypeError: Cannot load a Gen2 pipette on a Flex.", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_MM_MagneticModuleInFlexProtocol: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_MM_MagneticModuleInFlexProtocol",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ValueError [line 15]: A magneticModuleType cannot be loaded into slot C1", # noqa: E501
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TM_ModuleInCol2: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TM_ModuleInCol2",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ValueError [line 15]: A temperatureModuleType cannot be loaded into slot C2", # noqa: E501
+ )
+
+ Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_DeckConfiguration1NoFixtures: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_DeckConfiguration1NoFixtures",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_16_P1000_96_GRIP_DeckConfiguration1NoModules: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_GRIP_DeckConfiguration1NoModules",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_16_P1000_96_GRIP_DeckConfiguration1NoModulesNoFixtures: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_GRIP_DeckConfiguration1NoModulesNoFixtures",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_DeckConfiguration1: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_DeckConfiguration1",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_Smoke: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_Smoke",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_15_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="Flex_S_v2_15_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_16_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_17_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots: Protocol = Protocol(
+ file_stem="Flex_S_v2_17_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_X_v2_16_NO_PIPETTES_TC_TrashBinAndThermocyclerConflict: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_NO_PIPETTES_TC_TrashBinAndThermocyclerConflict",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="IncompatibleAddressableAreaError [line 15]: Cannot use Trash Bin in C3, not compatible with one or more of the following fixtures: Thermocycler in C3", # noqa: E501
+ )
+
+ Flex_X_v2_16_P1000_96_TC_pipetteCollisionWithThermocyclerLidClips: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_TC_pipetteCollisionWithThermocyclerLidClips",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="IncompatibleAddressableAreaError [line 15]: Cannot use Slot C3, not compatible with one or more of the following fixtures: Thermocycler in C3", # noqa: E501
+ )
+
+ Flex_X_v2_16_P1000_96_TC_pipetteCollisionWithThermocyclerLid: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_TC_pipetteCollisionWithThermocyclerLid",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="IncompatibleAddressableAreaError [line 15]: Cannot use Slot C3, not compatible with one or more of the following fixtures: Thermocycler in C3", # noqa: E501
+ )
+
+ Flex_S_v2_16_P1000_96_TC_PartialTipPickupSingle: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_TC_PartialTipPickupSingle",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_16_P1000_96_TC_PartialTipPickupColumn: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_TC_PartialTipPickupColumn",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_X_v2_16_P1000_96_TC_PartialTipPickupTryToReturnTip: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_TC_PartialTipPickupTryToReturnTip",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="ValueError [line 15]: Cannot return tip in partial tip pickup mode.", # noqa: E501
+ )
+
+ Flex_X_v2_16_P1000_96_TC_PartialTipPickupThermocyclerLidConflict: Protocol = Protocol(
+ file_stem="Flex_X_v2_16_P1000_96_TC_PartialTipPickupThermocyclerLidConflict",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=False,
+ app_analysis_error="IncompatibleAddressableAreaError [line 15]: Cannot use Slot C3, not compatible with one or more of the following fixtures: Thermocycler in C3", # noqa: E501
+ )
+
+ Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_TriggerPrepareForMountMovement: Protocol = Protocol(
+ file_stem="Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_TriggerPrepareForMountMovement",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_S_v2_18_NO_PIPETTES_GoldenRTP: Protocol = Protocol(
+ file_stem="Flex_S_v2_18_NO_PIPETTES_GoldenRTP",
+ file_extension="py",
+ robot="Flex",
+ app_error=False,
+ robot_error=False,
+ )
+
+ Flex_X_v2_18_NO_PIPETTES_DescriptionTooLongRTP: Protocol = Protocol(
+ file_stem="Flex_X_v2_18_NO_PIPETTES_DescriptionTooLongRTP",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=True,
+ )
+
+ OT2_X_v2_18_None_None_duplicateRTPVariableName: Protocol = Protocol(
+ file_stem="OT2_X_v2_18_None_None_duplicateRTPVariableName",
+ file_extension="py",
+ robot="OT2",
+ app_error=True,
+ robot_error=True,
+ )
+
+ OT2_S_v2_18_None_None_duplicateChoiceValue: Protocol = Protocol(
+ file_stem="OT2_S_v2_18_None_None_duplicateChoiceValue",
+ file_extension="py",
+ robot="OT2",
+ app_error=False,
+ robot_error=False,
+ )
+
+ OT2_X_v2_18_None_None_StrRTPwith_unit: Protocol = Protocol(
+ file_stem="OT2_X_v2_18_None_None_StrRTPwith_unit",
+ file_extension="py",
+ robot="OT2",
+ app_error=True,
+ robot_error=True,
+ )
+
+ OT2_X_v2_18_None_None_NoRTPdisplay_name: Protocol = Protocol(
+ file_stem="OT2_X_v2_18_None_None_NoRTPdisplay_name",
+ file_extension="py",
+ robot="OT2",
+ app_error=True,
+ robot_error=True,
+ )
+
+ OT2_S_v2_18_NO_PIPETTES_GoldenRTP_OT2: Protocol = Protocol(
+ file_stem="OT2_S_v2_18_NO_PIPETTES_GoldenRTP_OT2",
file_extension="py",
- protocol_name="OT3 ABR KAPA Library Quant v2",
- robot="OT-3",
+ robot="OT2",
app_error=False,
robot_error=False,
)
diff --git a/app-testing/automation/data/protocols_with_overrides.py b/app-testing/automation/data/protocols_with_overrides.py
new file mode 100644
index 00000000000..2c6133180ad
--- /dev/null
+++ b/app-testing/automation/data/protocols_with_overrides.py
@@ -0,0 +1,46 @@
+from automation.data.protocol_with_overrides import ProtocolWithOverrides
+
+
+class ProtocolsWithOverrides:
+ Flex_X_v2_18_NO_PIPETTES_Overrides_BadTypesInRTP: ProtocolWithOverrides = ProtocolWithOverrides(
+ file_stem="Flex_X_v2_18_NO_PIPETTES_Overrides_BadTypesInRTP",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=True,
+ override_variable_name="type_to_test",
+ overrides=[
+ "wrong_type_in_display_name",
+ "wrong_type_in_variable_name",
+ "wrong_type_in_choice_display_name",
+ "wrong_type_in_choice_value",
+ "wrong_type_in_default",
+ "wrong_type_in_description",
+ "wrong_type_in_minimum",
+ "wrong_type_in_maximum",
+ "wrong_type_in_unit", # we going unit or suffix?
+ ],
+ )
+
+ Flex_X_v2_18_NO_PIPETTES_Overrides_DefaultOutOfRangeRTP: ProtocolWithOverrides = ProtocolWithOverrides(
+ file_stem="Flex_X_v2_18_NO_PIPETTES_Overrides_DefaultOutOfRangeRTP",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=True,
+ override_variable_name="type_to_test",
+ overrides=[
+ "default_greater_than_maximum",
+ "default_less_than_minimum",
+ ],
+ )
+
+ Flex_X_v2_18_NO_PIPETTES_Overrides_DefaultChoiceNoMatchChoice: ProtocolWithOverrides = ProtocolWithOverrides(
+ file_stem="Flex_X_v2_18_NO_PIPETTES_Overrides_DefaultChoiceNoMatchChoice",
+ file_extension="py",
+ robot="Flex",
+ app_error=True,
+ robot_error=True,
+ override_variable_name="type_to_test",
+ overrides=["str_default_no_matching_choices", "float_default_no_matching_choices", "int_default_no_matching_choices"],
+ )
diff --git a/app-testing/automation/driver/base.py b/app-testing/automation/driver/base.py
index 2a5c2398233..3ec9317b835 100644
--- a/app-testing/automation/driver/base.py
+++ b/app-testing/automation/driver/base.py
@@ -2,6 +2,7 @@
Expose clear information upon failure.
"""
+
from __future__ import annotations
import os
@@ -57,16 +58,17 @@ def apply_border_to_locator(
def apply_style(argument: str) -> None:
"""Execute the javascript to apply the style."""
- self.driver.execute_script(
- "arguments[0].setAttribute('style', arguments[1]);", finder(), argument
- ) # type: ignore
+ self.driver.execute_script("arguments[0].setAttribute('style', arguments[1]);", finder(), argument) # type: ignore
original_style = finder().get_attribute("style")
apply_style(f"border: {border_size_px}px solid {color};")
if screenshot:
self.take_screenshot(message=screenshot_message)
time.sleep(effect_time_sec)
- apply_style(original_style)
+ if original_style is None:
+ apply_style("")
+ else:
+ apply_style(original_style)
def apply_border_to_element(
self,
@@ -81,16 +83,17 @@ def apply_border_to_element(
def apply_style(argument: str) -> None:
"""Execute the javascript to apply the style."""
- self.driver.execute_script(
- "arguments[0].setAttribute('style', arguments[1]);", element, argument
- ) # type: ignore
+ self.driver.execute_script("arguments[0].setAttribute('style', arguments[1]);", element, argument) # type: ignore
original_style = element.get_attribute("style")
apply_style(f"border: {border_size_px}px solid {color};")
if screenshot:
self.take_screenshot(message=screenshot_message)
time.sleep(effect_time_sec)
- apply_style(original_style)
+ if original_style is None:
+ apply_style("")
+ else:
+ apply_style(original_style)
def highlight_element(self, finder: Callable[..., WebElement]) -> None:
"""Highlight an element."""
@@ -118,9 +121,7 @@ def take_screenshot(self, message: str = "") -> None:
os.makedirs(directory_for_results)
note = "" if (message == "") else f"_{message}".replace(" ", "_")
file_name = (
- f"{str(time.time_ns())[:-3]}_{self.execution_id}".replace("/", "_").replace("::", "__").replace(".py", "")
- + note
- + ".png"
+ f"{str(time.time_ns())[:-3]}_{self.execution_id}".replace("/", "_").replace("::", "__").replace(".py", "") + note + ".png"
)
screenshot_full_path: str = str(Path(directory_for_results, file_name))
self.console.print(f"screenshot saved: {file_name}", style="white on blue")
@@ -188,9 +189,7 @@ def create_finder(
)
def finder() -> Any:
- return WebDriverWait(self.driver, timeout_sec, ignored_exceptions=ignored_exceptions).until(
- expected_condition(element.locator)
- )
+ return WebDriverWait(self.driver, timeout_sec, ignored_exceptions=ignored_exceptions).until(expected_condition(element.locator))
return finder
diff --git a/app-testing/automation/driver/drag_drop.py b/app-testing/automation/driver/drag_drop.py
index 0605be6e02e..90d24748a4a 100644
--- a/app-testing/automation/driver/drag_drop.py
+++ b/app-testing/automation/driver/drag_drop.py
@@ -1,4 +1,5 @@
"""Inject javascript to utilize drag and drop functionality."""
+
from pathlib import Path
from selenium.webdriver.chrome.webdriver import WebDriver
diff --git a/app-testing/automation/driver/wait.py b/app-testing/automation/driver/wait.py
index 90274d43d21..5213239df3b 100644
--- a/app-testing/automation/driver/wait.py
+++ b/app-testing/automation/driver/wait.py
@@ -2,6 +2,7 @@
https://stackoverflow.com/questions/2785821/is-there-an-easy-way-in-python-to-wait-until-certain-condition-is-true
"""
+
import time
from typing import Any, Callable, Optional
diff --git a/app-testing/automation/menus/left_menu.py b/app-testing/automation/menus/left_menu.py
index 1875bc0fa12..a567a38a6f6 100644
--- a/app-testing/automation/menus/left_menu.py
+++ b/app-testing/automation/menus/left_menu.py
@@ -1,4 +1,5 @@
"""Left Menu Locators."""
+
from typing import Literal
from rich.console import Console
diff --git a/app-testing/automation/pages/app_settings.py b/app-testing/automation/pages/app_settings.py
index b7eb0289cfc..fc734321ba4 100644
--- a/app-testing/automation/pages/app_settings.py
+++ b/app-testing/automation/pages/app_settings.py
@@ -1,4 +1,5 @@
"""Model for the App Settings page that displays info and settings for the app."""
+
from typing import Optional
from rich.console import Console
@@ -471,8 +472,8 @@ def click_enable_developer_tools_toggle(self) -> None:
tests. The click works but has no effect.
"""
button = self.get_enable_developer_tools_toggle()
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(button).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(button).perform()
self.base.click(self.enable_developer_tools_toggle)
# Elements for Feature Flag
diff --git a/app-testing/automation/pages/deck_calibrate.py b/app-testing/automation/pages/deck_calibrate.py
index f2b6aa5eeb4..fc5941ec89c 100644
--- a/app-testing/automation/pages/deck_calibrate.py
+++ b/app-testing/automation/pages/deck_calibrate.py
@@ -1,4 +1,5 @@
"""Model for the screens of deck calibration."""
+
from enum import Enum
from typing import Optional
diff --git a/app-testing/automation/pages/device_landing.py b/app-testing/automation/pages/device_landing.py
index 425ec0d3916..3cb6dc9a570 100644
--- a/app-testing/automation/pages/device_landing.py
+++ b/app-testing/automation/pages/device_landing.py
@@ -1,4 +1,5 @@
"""Model for the App page that displays info and settings for the app."""
+
import time
from typing import List, Optional
@@ -164,7 +165,10 @@ def get_lights_status(self) -> bool:
if not button: # None check but the finder throws so should never be hit
return False
# get the status of the toggle
- return button.get_attribute("aria-checked").lower() == "true"
+ aria: str | None = button.get_attribute("aria-checked")
+ if not aria: # None check but the finder throws so *should* never be hit
+ return False
+ return aria.lower() == "true"
def set_lights(self, on: bool) -> bool:
"""Set the lights toggle. Return a bool of the condition: final light state == the desired state."""
@@ -540,8 +544,8 @@ def is_deck_calibrated(self) -> bool:
def get_pipette_calibration_overflow_1(self) -> WebElement:
"""Get the first pipette three dot menu button."""
scroll: WebElement = self.base.clickable_wrapper(self.pipette_calibration_overflow_1, 3)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(scroll).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(scroll).perform()
return scroll
def click_pipette_calibration_overflow_1(self) -> None:
@@ -556,8 +560,8 @@ def click_pipette_calibration_overflow_1(self) -> None:
def get_pipette_calibration_overflow_2(self) -> WebElement:
"""Get the first pipette three dot menu button."""
scroll: WebElement = self.base.clickable_wrapper(self.pipette_calibration_overflow_2, 3)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(scroll).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(scroll).perform()
return scroll
def click_pipette_calibration_overflow_2(self) -> None:
@@ -573,8 +577,8 @@ def click_pipette_calibration_overflow_2(self) -> None:
def click_pipette_offset_calibrate_button(self) -> None:
"""Click the calibrate button."""
scroll: WebElement = self.base.clickable_wrapper(self.calibrate_pipette_offset_button, 3)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(scroll).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(scroll).perform()
self.base.click(self.calibrate_pipette_offset_button)
# pipette calibration
@@ -665,9 +669,9 @@ def click_continue_to_pipette_offset(self) -> None:
def shift_down_arrow_key(self) -> None:
"""Send the keystroke shift + down arrow key."""
- actions = ActionChains(self.base.driver) # type: ignore
- actions.send_keys(Keys.LEFT_SHIFT + Keys.ARROW_DOWN) # type: ignore
- actions.perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.send_keys(Keys.LEFT_SHIFT + Keys.ARROW_DOWN)
+ actions.perform()
save_calibration_move_to_slot_1: Element = Element(
(By.XPATH, '//button[text()="save calibration and move to slot 1"]'),
@@ -683,9 +687,9 @@ def click_save_calibration_move_to_slot_1(self) -> None:
def up_arrow_key(self) -> None:
"""Send the keystroke arrow up key."""
- actions = ActionChains(self.base.driver) # type: ignore
- actions.send_keys(Keys.ARROW_UP) # type: ignore
- actions.perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.send_keys(Keys.ARROW_UP)
+ actions.perform()
save_calibration: Element = Element(
(By.XPATH, '//button[text()="save calibration"]'),
diff --git a/app-testing/automation/pages/labware_landing.py b/app-testing/automation/pages/labware_landing.py
index b3ef87940d9..2ed609f2881 100644
--- a/app-testing/automation/pages/labware_landing.py
+++ b/app-testing/automation/pages/labware_landing.py
@@ -1,4 +1,5 @@
"""Model for the Labware Landing page that displays labware info for the app."""
+
from typing import Optional
from rich.console import Console
diff --git a/app-testing/automation/pages/labware_position_check.py b/app-testing/automation/pages/labware_position_check.py
index fdbbd12a0d7..1db45492a38 100644
--- a/app-testing/automation/pages/labware_position_check.py
+++ b/app-testing/automation/pages/labware_position_check.py
@@ -388,8 +388,8 @@ def __init__(self, driver: WebDriver, console: Console, execution_id: str) -> No
def get_labware_position_check_button(self) -> WebElement:
"""Button to locate LPC button."""
button = self.base.clickable_wrapper(self.labware_setup_position_check_button, 2)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(button).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(button).perform()
return button
def get_labware_success_toast(self) -> WebElement:
@@ -423,8 +423,8 @@ def get_labware_position_check_complete(self) -> WebElement:
def click_labware_position_button(self) -> None:
"""Click labware position button."""
button = self.base.clickable_wrapper(self.labware_setup_position_check_button, 2)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(button).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(button).perform()
self.base.click(self.labware_setup_position_check_button)
def get_introScreen_labware_position_check_overview(self) -> WebElement:
@@ -506,8 +506,8 @@ def click_down_jog_button(self) -> None:
def get_confirm_position_button_pickup_tip(self) -> WebElement:
"""Locator for confirm position button pickup."""
toggle: WebElement = self.base.clickable_wrapper(self.confirm_position_button_pickup_tip, 5)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def click_confirm_position_button_pickup_tip(self) -> None:
@@ -518,8 +518,8 @@ def click_confirm_position_button_pickup_tip(self) -> None:
def get_confirm_position_moveto_slot_2(self) -> WebElement:
"""Locator for confirm position moveto slot."""
toggle: WebElement = self.base.clickable_wrapper(self.confirm_position_moveto_slot_2, 5)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def click_confirm_position_moveto_slot_2(self) -> None:
@@ -530,8 +530,8 @@ def click_confirm_position_moveto_slot_2(self) -> None:
def get_confirm_position_moveto_slot_5(self) -> WebElement:
"""Locator for confirm position move to slot."""
toggle: WebElement = self.base.clickable_wrapper(self.confirm_position_moveto_slot_5, 5)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def click_confirm_position_moveto_slot_5(self) -> None:
@@ -542,8 +542,8 @@ def click_confirm_position_moveto_slot_5(self) -> None:
def get_confirm_position_moveto_slot_6(self) -> WebElement:
"""Locator for confirm position moveto slot."""
toggle: WebElement = self.base.clickable_wrapper(self.confirm_position_moveto_slot_6, 5)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def click_confirm_position_moveto_slot_6(self) -> None:
diff --git a/app-testing/automation/pages/labware_setup.py b/app-testing/automation/pages/labware_setup.py
index e0dae8f9918..818a41a3518 100644
--- a/app-testing/automation/pages/labware_setup.py
+++ b/app-testing/automation/pages/labware_setup.py
@@ -1,4 +1,5 @@
"""Model for the screen of Labware Setup."""
+
from rich.console import Console
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
@@ -133,29 +134,29 @@ def get_thermocycler_module_modal_text(self) -> WebElement:
def get_close_button(self) -> WebElement:
"""Locator for close button."""
toggle: WebElement = self.base.clickable_wrapper(LabwareSetup.close_button)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def click_close_button(self) -> None:
"""Click close button."""
toggle: WebElement = self.base.clickable_wrapper(LabwareSetup.close_button)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
self.base.click(LabwareSetup.close_button)
def get_proceed_to_run_button(self) -> WebElement:
"""Locator for proceed to run button."""
scroll: WebElement = self.base.clickable_wrapper(LabwareSetup.proceed_to_run_button)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(scroll).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(scroll).perform()
return scroll
def click_proceed_to_run_button(self) -> None:
"""Click proceed to run."""
scroll: WebElement = self.base.clickable_wrapper(LabwareSetup.proceed_to_run_button)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(scroll).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(scroll).perform()
self.base.click(LabwareSetup.proceed_to_run_button)
def get_start_run_button(self) -> WebElement:
diff --git a/app-testing/automation/pages/modal.py b/app-testing/automation/pages/modal.py
index ea4bd67cb64..8cdcf9811a4 100644
--- a/app-testing/automation/pages/modal.py
+++ b/app-testing/automation/pages/modal.py
@@ -1,4 +1,5 @@
"""Model for the App page that displays info and settings for the app."""
+
from typing import Optional
from rich.console import Console
diff --git a/app-testing/automation/pages/module_setup.py b/app-testing/automation/pages/module_setup.py
index 2afd0ad6ae6..7c7656bc710 100644
--- a/app-testing/automation/pages/module_setup.py
+++ b/app-testing/automation/pages/module_setup.py
@@ -1,6 +1,5 @@
"""Model for the screen of module setup."""
-
from rich.console import Console
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
@@ -69,29 +68,29 @@ def get_temperature_module(self) -> WebElement:
def get_module_setup_text_locator(self) -> WebElement:
"""Locator for module setup text."""
toggle: WebElement = self.base.clickable_wrapper(ModuleSetup.module_setup_text_locator)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def get_magnetic_module(self) -> WebElement:
"""Locator for magnetic module on deckmap."""
toggle: WebElement = self.base.clickable_wrapper(ModuleSetup.magnetic_module)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def get_proceed_to_labware_setup(self) -> WebElement:
"""Locator for proceed to labware setup."""
toggle: WebElement = self.base.clickable_wrapper(ModuleSetup.proceed_to_labware_setup)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
return toggle
def click_proceed_to_labware_setup(self) -> None:
"""Proceed to labware setup."""
toggle: WebElement = self.base.clickable_wrapper(ModuleSetup.proceed_to_labware_setup)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
self.base.click(ModuleSetup.proceed_to_labware_setup)
def click_proceed_to_module_setup(self) -> None:
@@ -101,6 +100,6 @@ def click_proceed_to_module_setup(self) -> None:
def click_module_setup_text(self) -> None:
"""Click module setup text."""
toggle: WebElement = self.base.clickable_wrapper(ModuleSetup.module_setup_text_locator)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(toggle).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(toggle).perform()
self.base.click(ModuleSetup.module_setup_text_locator)
diff --git a/app-testing/automation/pages/setup_calibration.py b/app-testing/automation/pages/setup_calibration.py
index 7006ab18242..ea9589f027f 100644
--- a/app-testing/automation/pages/setup_calibration.py
+++ b/app-testing/automation/pages/setup_calibration.py
@@ -90,9 +90,7 @@ def __init__(self, driver: WebDriver, console: Console, execution_id: str) -> No
"close robot calibration button",
)
- proceed_to_module_setup_cta: Element = Element(
- (By.ID, "RobotCalStep_proceedButton"), "proceed to module setup button"
- )
+ proceed_to_module_setup_cta: Element = Element((By.ID, "RobotCalStep_proceedButton"), "proceed to module setup button")
def get_setup_for_run(self) -> WebElement:
"""Search for the setup for run text."""
@@ -129,8 +127,8 @@ def get_robot_calibration_help_modal_text(self) -> WebElement:
def get_robot_calibration_close_button(self) -> WebElement:
"""Robot claibration close button."""
close: WebElement = self.base.clickable_wrapper(SetupCalibration.close_robot_calibration_button)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(close).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(close).perform()
return close
def click_robot_calibration_help_link(self) -> None:
@@ -140,8 +138,8 @@ def click_robot_calibration_help_link(self) -> None:
def click_robot_calibration_close_button(self) -> None:
"""Click robot calibration close."""
close: WebElement = self.base.clickable_wrapper(SetupCalibration.close_robot_calibration_button)
- actions = ActionChains(self.base.driver) # type: ignore
- actions.move_to_element(close).perform() # type: ignore
+ actions = ActionChains(self.base.driver)
+ actions.move_to_element(close).perform()
self.base.click(SetupCalibration.close_robot_calibration_button)
def get_required_tip_length_calibration(self) -> WebElement:
diff --git a/app-testing/automation/resources/ot_robot.py b/app-testing/automation/resources/ot_robot.py
index a132e19cdcb..2a7e7587140 100644
--- a/app-testing/automation/resources/ot_robot.py
+++ b/app-testing/automation/resources/ot_robot.py
@@ -1,4 +1,5 @@
"""Model the the Opentrons Robot."""
+
from typing import List
import requests
diff --git a/app-testing/automation/resources/robot_data.py b/app-testing/automation/resources/robot_data.py
index b248029bcf2..c84f9735a12 100644
--- a/app-testing/automation/resources/robot_data.py
+++ b/app-testing/automation/resources/robot_data.py
@@ -1,4 +1,5 @@
"""Robot data."""
+
from dataclasses import dataclass
from typing import Literal
diff --git a/app-testing/ci-tools/ot2_with_all_modules.yaml b/app-testing/ci-tools/ot2_with_all_modules.yaml
deleted file mode 100644
index d9c1957446c..00000000000
--- a/app-testing/ci-tools/ot2_with_all_modules.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-######################
-# System Description #
-######################
-
-# This system builds an OT2 and 1 of each module.
-
- # - name: Substitute current sha into yaml
- # id: sub-step
- # uses: Opentrons/opentrons-emulation@release-v2.3.1
- # with:
- # command: yaml-sub
- # substitutions: >-
- # [
- # ["otie", "source-location", "${{ github.sha }}"],
- # ["t00-hot-to-handle", "source-location", "${{ github.sha }}"],
- # ["fatal-attraction", "source-location", "${{ github.sha }}"],
- # ["temperamental", "source-location", "${{ github.sha }}"],
- # ["maggy", "source-location", "${{ github.sha }}"],
- # ]
- # input-file: ${{ github.workspace }}/ot3-firmware/emulation_setups/ci/ot3_only.yaml
- # output-file-location: ${{ github.workspace }}/output.yaml
-
-system-unique-id: ot2-with-all-modules
-robot:
- id: otie
- hardware: ot2
- source-type: remote
- source-location: latest
- emulation-level: firmware
- robot-server-source-type: remote
- robot-server-source-location: latest
- exposed-port: 31950
- hardware-specific-attributes:
- left-pipette:
- model: p300_multi_v2.1
- id: p300multi
- right-pipette:
- model: p20_single_v2.2
- id: p20single
-modules:
- - id: shakey-and-warm
- hardware: heater-shaker-module
- source-type: remote
- source-location: latest
- emulation_level: firmware
- - id: t00-hot-to-handle
- hardware: thermocycler-module
- source-type: remote
- source-location: latest
- emulation_level: firmware
- - id: fatal-attraction
- hardware: magnetic-module
- source-type: remote
- source-location: latest
- emulation_level: firmware
- - id: temperamental
- hardware: temperature-module
- source-type: remote
- source-location: latest
- emulation_level: firmware
- - id: maggy
- hardware: magnetic-module
- source-type: remote
- source-location: latest
- emulation_level: firmware
diff --git a/app-testing/citools/Dockerfile b/app-testing/citools/Dockerfile
new file mode 100644
index 00000000000..71522ec11b2
--- /dev/null
+++ b/app-testing/citools/Dockerfile
@@ -0,0 +1,27 @@
+# Use 3.10 just like the app does
+FROM python:3.10-slim-bullseye
+
+# Update packages and install git
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git libsystemd-dev
+
+# Define a build argument for the commit/tag/hash to clone
+ARG OPENTRONS_VERSION=edge
+
+# Set the working directory in the container
+WORKDIR /opentrons
+
+# Clone the Opentrons repository at the specified commit or tag
+ARG CACHEBUST=1
+RUN git clone --branch $OPENTRONS_VERSION --depth 1 https://github.com/Opentrons/opentrons .
+
+# Install packages from local directories
+RUN python -m pip install -U ./shared-data/python
+RUN python -m pip install -U ./hardware[flex]
+RUN python -m pip install -U ./api
+RUN python -m pip install -U pandas==1.4.3
+
+# The default command to run when starting the container
+CMD ["tail", "-f", "/dev/null"]
+
diff --git a/app-testing/citools/__init__.py b/app-testing/citools/__init__.py
new file mode 100644
index 00000000000..e11ed81c96c
--- /dev/null
+++ b/app-testing/citools/__init__.py
@@ -0,0 +1 @@
+"""Package for all automation tools."""
diff --git a/app-testing/citools/generate_analyses.py b/app-testing/citools/generate_analyses.py
new file mode 100644
index 00000000000..67ad42c44c7
--- /dev/null
+++ b/app-testing/citools/generate_analyses.py
@@ -0,0 +1,330 @@
+import json
+import os
+import signal
+import time
+from contextlib import contextmanager
+from dataclasses import dataclass
+from datetime import datetime, timezone
+from enum import Enum, auto
+from pathlib import Path
+from typing import Any, Dict, Generator, List, Optional
+
+import docker # type: ignore
+from automation.data.protocol import Protocol
+from rich.console import Console
+from rich.traceback import install
+
+install(show_locals=True)
+IMAGE = "opentrons-analysis"
+CONTAINER_LABWARE = "/var/lib/ot"
+HOST_LABWARE = Path(Path(__file__).parent.parent, "files", "labware")
+HOST_PROTOCOLS_ROOT = Path(Path(__file__).parent.parent, "files", "protocols")
+CONTAINER_PROTOCOLS_ROOT = "/var/lib/ot/protocols"
+CONTAINER_RESULTS = "/var/lib/ot/analysis_results"
+HOST_RESULTS = Path(Path(__file__).parent.parent, "analysis_results")
+ANALYSIS_SUFFIX = "analysis.json"
+
+console = Console()
+
+
+@contextmanager
+def timeout(seconds: int) -> Generator[None, None, None]:
+ # Signal handler function
+ def raise_timeout(signum, frame) -> None: # type: ignore[no-untyped-def]
+ raise TimeoutError
+
+ # Set the signal handler for the alarm signal
+ signal.signal(signal.SIGALRM, raise_timeout)
+ signal.alarm(seconds) # Set the alarm
+ try:
+ yield
+ finally:
+ signal.alarm(0) # Disable the alarm
+
+
+class ProtocolType(Enum):
+ PROTOCOL_DESIGNER = auto()
+ PYTHON = auto()
+
+
+@dataclass
+class AnalyzedProtocol:
+ host_protocol_file: Path
+ container_protocol_file: Path
+ host_analysis_file: Path
+ container_analysis_file: Path
+ tag: str
+ analysis_execution_time: Optional[float] = None
+ command_exit_code: Optional[int] = None
+ command_output: Optional[str] = None
+ analysis: Optional[Dict[str, Any]] = None
+
+ @property
+ def analysis_file_exists(self) -> bool:
+ return self.host_analysis_file.exists()
+
+ def create_failed_analysis(self) -> Dict[str, Any]:
+ created_at = datetime.now(timezone.utc).isoformat()
+
+ return {
+ "createdAt": created_at,
+ "errors": [
+ {
+ "analysis_execution_time": self.analysis_execution_time,
+ "command_output": self.command_output,
+ "command_exit_code": self.command_exit_code,
+ },
+ ],
+ "files": [],
+ "metadata": [],
+ "commands": [],
+ "labware": [],
+ "pipettes": [],
+ "modules": [],
+ "liquids": [],
+ "config": {},
+ "runTimeParameters": [],
+ }
+
+ def write_failed_analysis(self) -> None:
+ analysis = self.create_failed_analysis()
+ with open(self.host_analysis_file, "w") as file:
+ json.dump(analysis, file, indent=4)
+
+ def set_analysis(self) -> None:
+ if self.analysis_file_exists:
+ with open(self.host_analysis_file, "r") as file:
+ self.analysis = json.load(file)
+ else:
+ self.write_failed_analysis()
+
+ @property
+ def protocol_file_name(self) -> str:
+ return self.host_protocol_file.name
+
+ @property
+ def protocol_type(self) -> str:
+ return (ProtocolType.PYTHON if self.host_protocol_file.suffix == ".py" else ProtocolType.PROTOCOL_DESIGNER).name.title()
+
+ def set_analysis_execution_time(self, analysis_execution_time: float) -> None:
+ self.analysis_execution_time = analysis_execution_time
+
+
+def stop_and_restart_container(image_name: str, timeout: int = 60) -> docker.models.containers.Container:
+ client = docker.from_env()
+ volumes = {
+ str(HOST_LABWARE): {"bind": CONTAINER_LABWARE, "mode": "rw"},
+ str(HOST_RESULTS): {"bind": CONTAINER_RESULTS, "mode": "rw"},
+ str(HOST_PROTOCOLS_ROOT): {"bind": CONTAINER_PROTOCOLS_ROOT, "mode": "rw"},
+ }
+
+ # Find the running container using the specified image
+ containers = client.containers.list(filters={"ancestor": image_name, "status": "running"})
+
+ if containers:
+ console.print("Stopping the running container(s)...")
+ for container in containers:
+ container.stop(timeout=10)
+
+ # Start a new container with the specified volume
+ console.print("Starting a new container.")
+ container = client.containers.run(image_name, detach=True, volumes=volumes)
+
+ # Wait for the container to be ready if a readiness command is provided
+ start_time = time.time()
+ while time.time() - start_time < timeout:
+ exit_code, output = container.exec_run(f"ls -al {CONTAINER_LABWARE}")
+ if exit_code == 0:
+ console.print("Container is ready.")
+ break
+ else:
+ console.print("Waiting for container to be ready...")
+ time.sleep(5)
+ else:
+ console.print("Timeout waiting for container to be ready. Proceeding anyway.")
+ return container
+
+
+def stop_and_remove_containers(image_name: str) -> None:
+ client = docker.from_env()
+
+ # Find all containers created from the specified image
+ containers = client.containers.list(all=True, filters={"ancestor": image_name})
+
+ for container in containers:
+ try:
+ # Stop the container if it's running
+ if container.status == "running":
+ console.print(f"Stopping container {container.short_id}...")
+ container.stop(timeout=10)
+
+ # Remove the container
+ console.print(f"Removing container {container.short_id}...")
+ container.remove()
+ except docker.errors.ContainerError as e:
+ console.print(f"Error stopping/removing container {container.short_id}: {e}")
+
+
+def has_designer_application(json_file_path: Path) -> bool:
+ try:
+ with open(json_file_path, "r", encoding="utf-8") as file:
+ data = json.load(file)
+ return "designerApplication" in data
+ except json.JSONDecodeError:
+ # Handle the exception if the file is not a valid JSON
+ console.print(f"Invalid JSON file: {json_file_path}")
+ return False
+
+
+def host_analysis_path(protocol_file: Path, tag: str) -> Path:
+ return Path(HOST_RESULTS, f"{protocol_file.stem}_{tag}_{ANALYSIS_SUFFIX}")
+
+
+def container_analysis_path(protocol_file: Path, tag: str) -> Path:
+ return Path(CONTAINER_RESULTS, f"{protocol_file.stem}_{tag}_{ANALYSIS_SUFFIX}")
+
+
+def generate_protocols(tag: str) -> List[AnalyzedProtocol]:
+ def find_pd_protocols() -> List[AnalyzedProtocol]:
+ # Check if the provided path is a valid directory
+ if not HOST_PROTOCOLS_ROOT.is_dir():
+ raise NotADirectoryError(f"The path {HOST_PROTOCOLS_ROOT} is not a valid directory.")
+
+ # Recursively find all .json files
+ json_files = list(HOST_PROTOCOLS_ROOT.rglob("*.json"))
+ filtered_json_files = [file for file in json_files if has_designer_application(file)]
+ pd_protocols: List[AnalyzedProtocol] = []
+ for path in filtered_json_files:
+ relative_path = path.relative_to(HOST_PROTOCOLS_ROOT)
+ updated_path = Path(CONTAINER_PROTOCOLS_ROOT, relative_path)
+ pd_protocols.append(
+ AnalyzedProtocol(path, updated_path, host_analysis_path(path, tag), container_analysis_path(path, tag), tag)
+ )
+ return pd_protocols
+
+ def find_python_protocols() -> List[AnalyzedProtocol]:
+ # Check if the provided path is a valid directory
+ if not HOST_PROTOCOLS_ROOT.is_dir():
+ raise NotADirectoryError(f"The path {HOST_PROTOCOLS_ROOT} is not a valid directory.")
+
+ # Recursively find all .py files
+ python_files = list(HOST_PROTOCOLS_ROOT.rglob("*.py"))
+ py_protocols: List[AnalyzedProtocol] = []
+
+ for path in python_files:
+ relative_path = path.relative_to(HOST_PROTOCOLS_ROOT)
+ container_path = Path(CONTAINER_PROTOCOLS_ROOT, relative_path)
+ py_protocols.append(
+ AnalyzedProtocol(path, container_path, host_analysis_path(path, tag), container_analysis_path(path, tag), tag=tag)
+ )
+ return py_protocols
+
+ return find_pd_protocols() + find_python_protocols()
+
+
+def remove_all_files_in_directory(directory: Path) -> None:
+ for filename in os.listdir(directory):
+ file_path = os.path.join(directory, filename)
+ try:
+ if os.path.isfile(file_path) or os.path.islink(file_path):
+ os.unlink(file_path)
+ elif os.path.isdir(file_path):
+ pass # Currently, subdirectories are not removed
+ except Exception as e:
+ console.print(f"Failed to delete {file_path}. Reason: {e}")
+
+
+def container_custom_labware_paths() -> List[str]:
+ if HOST_LABWARE.is_dir():
+ return [os.path.join(CONTAINER_LABWARE, file) for file in os.listdir(HOST_LABWARE) if file.endswith(".json")]
+ return []
+
+
+def analyze(protocol: AnalyzedProtocol, container: docker.models.containers.Container) -> bool:
+ # Run the analyze command
+ command = f"python -I -m opentrons.cli analyze --json-output {protocol.container_analysis_file} {protocol.container_protocol_file} {' '.join(map(str, container_custom_labware_paths()))}" # noqa: E501
+ start_time = time.time()
+ timeout_duration = 30 # seconds
+ try:
+ with timeout(timeout_duration):
+ command_result = container.exec_run(cmd=command)
+ exit_code = command_result.exit_code
+ result = command_result.output
+ protocol.command_output = result.decode("utf-8")
+ protocol.command_exit_code = exit_code
+ protocol.set_analysis()
+ protocol.set_analysis_execution_time(time.time() - start_time)
+ return True
+ except TimeoutError:
+ console.print(f"Command execution exceeded {timeout_duration} seconds and was aborted.")
+ logs = container.logs()
+ # Decode and print the logs
+ console.print(f"container logs{logs.decode('utf-8')}")
+ except KeyboardInterrupt:
+ console.print("Execution was interrupted by the user.")
+ raise
+ except Exception as e:
+ console.print(f"An unexpected error occurred: {e}")
+ protocol.command_output = result.decode("utf-8")
+ console.print(f"Command output: {protocol.command_output}")
+ protocol.command_exit_code = exit_code
+ console.print(f"Exit code: {protocol.command_exit_code}")
+ protocol.set_analysis()
+ return False
+ protocol.command_output = None
+ protocol.command_exit_code = None
+ protocol.analysis = None
+ protocol.set_analysis_execution_time(time.time() - start_time)
+ return False
+
+
+def analyze_many(protocol_files: List[AnalyzedProtocol], container: docker.models.containers.Container) -> None:
+ for file in protocol_files:
+ analyze(file, container)
+ accumulated_time = sum(protocol.analysis_execution_time for protocol in protocol_files if protocol.analysis_execution_time is not None)
+ console.print(f"{len(protocol_files)} protocols with total analysis time of {accumulated_time:.2f} seconds.\n")
+
+
+def analyze_against_image(tag: str) -> List[AnalyzedProtocol]:
+ image_name = f"{IMAGE}:{tag}"
+ protocols = generate_protocols(tag)
+ protocols_to_process = protocols
+ # protocols_to_process = protocols[:1] # For testing
+ try:
+ console.print(f"Analyzing {len(protocols_to_process)} protocol(s) against {image_name}...")
+ container = stop_and_restart_container(image_name)
+ analyze_many(protocols_to_process, container)
+ finally:
+ stop_and_remove_containers(image_name)
+ return protocols_to_process
+
+
+def generate_analyses_from_test(tag: str, protocols: List[Protocol]) -> None:
+ """Generate analyses from the tests."""
+ try:
+ image_name = f"{IMAGE}:{tag}"
+ protocols_to_process: List[AnalyzedProtocol] = []
+ # convert the protocols to AnalyzedProtocol
+ for test_protocol in protocols:
+ host_protocol_file = Path(test_protocol.file_path)
+ container_protocol_file = Path(CONTAINER_PROTOCOLS_ROOT, host_protocol_file.relative_to(HOST_PROTOCOLS_ROOT))
+ host_analysis_file = host_analysis_path(host_protocol_file, tag)
+ container_analysis_file = container_analysis_path(host_protocol_file, tag)
+ protocols_to_process.append(
+ AnalyzedProtocol(host_protocol_file, container_protocol_file, host_analysis_file, container_analysis_file, tag)
+ )
+ console.print(f"Analyzing {len(protocols_to_process)} protocol(s) against {tag}...")
+ container = stop_and_restart_container(image_name)
+ # Analyze the protocols
+ for protocol_to_analyze in protocols_to_process:
+ console.print(f"Analyzing {protocol_to_analyze.host_protocol_file}...")
+ analyzed = analyze(protocol_to_analyze, container)
+ if not analyzed: # Fail fast
+ console.print("Analysis failed. Exiting.")
+ stop_and_remove_containers(image_name)
+ accumulated_time = sum(
+ protocol.analysis_execution_time for protocol in protocols_to_process if protocol.analysis_execution_time is not None
+ )
+ console.print(f"{len(protocols_to_process)} protocols with total analysis time of {accumulated_time:.2f} seconds.\n")
+ finally:
+ stop_and_remove_containers(image_name)
diff --git a/app-testing/ci-tools/linux_get_chromedriver.sh b/app-testing/citools/linux_get_chromedriver.sh
similarity index 100%
rename from app-testing/ci-tools/linux_get_chromedriver.sh
rename to app-testing/citools/linux_get_chromedriver.sh
diff --git a/app-testing/ci-tools/mac_get_chromedriver.sh b/app-testing/citools/mac_get_chromedriver.sh
similarity index 100%
rename from app-testing/ci-tools/mac_get_chromedriver.sh
rename to app-testing/citools/mac_get_chromedriver.sh
diff --git a/app-testing/ci-tools/windows_get_chromedriver.ps1 b/app-testing/citools/windows_get_chromedriver.ps1
similarity index 100%
rename from app-testing/ci-tools/windows_get_chromedriver.ps1
rename to app-testing/citools/windows_get_chromedriver.ps1
diff --git a/app-testing/citools/write_failed_analysis.py b/app-testing/citools/write_failed_analysis.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/app-testing/conftest.py b/app-testing/conftest.py
index afc55ea719e..dc12b965acc 100644
--- a/app-testing/conftest.py
+++ b/app-testing/conftest.py
@@ -1,4 +1,5 @@
"""Pytest setup."""
+
import os
from typing import Generator, List, Optional
@@ -20,24 +21,14 @@
traceback.install(console=_console)
-# Check to see if we have a dotenv file and use it
+# My setting overrides to false we give preference to System Environment Variables
+# This is important for CI
if find_dotenv():
- load_dotenv(find_dotenv())
-
-
-def pytest_collection_modifyitems(items): # type: ignore # noqa: ANN201,ANN001
- """Order tests."""
- # When running all tests calibrate the robot first.
- # Most other tests require this.
- MODULE_ORDER = ["tests.calibrate_test"]
- module_mapping = {item: item.module.__name__ for item in items}
- sorted_items = items.copy()
- # Iteratively move tests of each module to the end of the test queue
- for module in MODULE_ORDER:
- sorted_items = [it for it in sorted_items if module_mapping[it] == module] + [
- it for it in sorted_items if module_mapping[it] != module
- ]
- items[:] = sorted_items
+ load_dotenv(find_dotenv(), override=False)
+elif find_dotenv(filename="example.env"): # example.env has our defaults
+ load_dotenv(find_dotenv(filename="example.env"), override=False)
+else:
+ raise AssertionError("No .env or example.env file found.")
def _chrome_options() -> Options:
@@ -47,14 +38,14 @@ def _chrome_options() -> Options:
assert executable_path is not None, "EXECUTABLE_PATH environment variable must be set"
_console.print(f"EXECUTABLE_PATH is {executable_path}", style="white on blue")
options.binary_location = executable_path
- options.add_argument("whitelisted-ips=''") # type: ignore
- options.add_argument("disable-xss-auditor") # type: ignore
- options.add_argument("disable-web-security") # type: ignore
- options.add_argument("allow-running-insecure-content") # type: ignore
- options.add_argument("no-sandbox") # type: ignore
- options.add_argument("disable-setuid-sandbox") # type: ignore
- options.add_argument("disable-popup-blocking") # type: ignore
- options.add_argument("allow-elevated-browser") # type: ignore
+ options.add_argument("whitelisted-ips=''")
+ options.add_argument("disable-xss-auditor")
+ options.add_argument("disable-web-security")
+ options.add_argument("allow-running-insecure-content")
+ options.add_argument("no-sandbox")
+ options.add_argument("disable-setuid-sandbox")
+ options.add_argument("disable-popup-blocking")
+ options.add_argument("allow-elevated-browser")
return options
diff --git a/app-testing/example.env b/app-testing/example.env
index d5a97f48962..749dddc4cf6 100644
--- a/app-testing/example.env
+++ b/app-testing/example.env
@@ -9,48 +9,110 @@ ROBOT_BASE_URL="http://localhost:31950"
# slow down execution and highlight found elements
SLOWMO=TrUe
HIGHLIGHT_SECONDS=.3 # default is 2
-UPDATE_CHANNEL="beta" # latest beta alpha
+UPDATE_CHANNEL="alpha" # latest beta alpha
LOCALHOST=false
+# Analyses Snapshot test target
+TARGET=edge
# run all tests
# possible values in \automation\data\protocol_files.py
# dynamically generate with make print-protocols
-# OT-2 Protocols
-# APP_ANALYSIS_TEST_PROTOCOLS="OT2_P1000SLeft_None_6_1_SimpleTransfer,
-# OT2_P20SRight_None_6_1_SimpleTransferError,
-# OT2_P20S_P300M_HS_6_1_HS_WithCollision_Error,
-# OT2_P20S_P300M_NoMods_6_1_TransferReTransferLiquid,
-# OT2_P300M_P20S_HS_6_1_Smoke620release,
-# OT2_P300M_P20S_MM_HS_TD_TC_6_1_AllMods_Error,
-# OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40,
-# OT2_P300M_P20S_MM_TM_TC1_5_2_6_PD40Error,
-# OT2_P300M_P20S_NoMod_6_1_MixTransferManyLiquids,
-# OT2_P300M_P300S_HS_6_1_HS_NormalUseWithTransfer,
-# OT2_P300SG1_None_5_2_6_Gen1PipetteSimple,
-# OT2_P300SLeft_MM_TM_TM_5_2_6_MOAMTemps,
-# OT2_None_None_2_12_Python310SyntaxRobotAnalysisOnlyError,
-# OT2_None_None_2_13_PythonSyntaxError,
-# OT2_P10S_P300M_TC1_TM_MM_2_11_Swift,
-# OT2_P20S_None_2_7_Walkthrough,
-# OT2_P300MLeft_MM_TM_2_4_Zymo,
-# OT2_P300M_P20S_None_2_12_FailOnRun,
-# OT2_P300M_P20S_TC_MM_TM_6_13_Smoke620Release,
-# OT2_P300SLeft_MM1_MM_2_2_EngageMagHeightFromBase,
-# OT2_P300SLeft_MM1_MM_TM_2_3_Mix,
-# OT2_P300S_Thermocycler_Moam_Error,
-# OT2_P300S_Twinning_Error"
-# Flex Protocols
-APP_ANALYSIS_TEST_PROTOCOLS="OT3_P1000_96_HS_TM_MM_2_15_MagMaxRNACells96Ch,
-OT3_P1000SRight_None_2_15_ABR_Simple_Normalize_Long_Right,
-OT3_P50MLeft_P1000MRight_None_2_15_ABRKAPALibraryQuantLongv2,
-OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment_v4,
-OT3_P1000MLeft_P50MRight_HS_MM_TC_TM_2_15_ABR3_Illumina_DNA_Enrichment,
-OT3_P1000MLeft_P50MRight_HS_TM_MM_TC_2_15_ABR4_Illumina_DNA_Prep_24x,
-OT3_P1000_96_HS_TM_MM_2_15_ABR5_6_HDQ_Bacteria_ParkTips_96_channel,
-OT3_P1000_96_None_2_15_ABR5_6_IDT_xGen_EZ_96x_Head_PART_I_III_ABR,
-OT3_P1000_96_HS_TM_TC_MM_2_15_ABR5_6_Illumina_DNA_Prep_96x_Head_PART_III,
-OT3_P100_96_HS_TM_2_15_Quick_Zymo_RNA_Bacteria"
+APP_ANALYSIS_TEST_PROTOCOLS="
+Flex_S_v2_15_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots,
+Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichment,
+Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4,
+Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAPrep24x,
+Flex_S_v2_15_P1000S_None_SimpleNormalizeLongRight,
+Flex_S_v2_15_P1000_96_GRIP_HS_MB_TC_TM_IDTXgen96Part1to3,
+Flex_S_v2_15_P1000_96_GRIP_HS_MB_TC_TM_IlluminaDNAPrep96PART3,
+Flex_S_v2_15_P1000_96_GRIP_HS_MB_TM_MagMaxRNAExtraction,
+Flex_S_v2_15_P1000_96_GRIP_HS_MB_TM_OmegaHDQDNAExtraction,
+Flex_S_v2_15_P1000_96_GRIP_HS_TM_QuickZymoMagbeadRNAExtraction,
+Flex_S_v2_15_P50M_P1000M_KAPALibraryQuantLongv2,
+Flex_S_v2_16_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots,
+Flex_S_v2_16_P1000_96_GRIP_DeckConfiguration1NoModules,
+Flex_S_v2_16_P1000_96_GRIP_DeckConfiguration1NoModulesNoFixtures,
+Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_DeckConfiguration1,
+Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_DeckConfiguration1NoFixtures,
+Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_Smoke,
+Flex_S_v2_16_P1000_96_GRIP_HS_MB_TC_TM_TriggerPrepareForMountMovement,
+Flex_S_v2_16_P1000_96_TC_PartialTipPickupColumn,
+Flex_S_v2_16_P1000_96_TC_PartialTipPickupSingle,
+Flex_S_v2_17_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots,
+Flex_S_v2_18_NO_PIPETTES_GoldenRTP,
+Flex_X_v2_16_NO_PIPETTES_AccessToFixedTrashProp,
+Flex_X_v2_16_NO_PIPETTES_MM_MagneticModuleInFlexProtocol,
+Flex_X_v2_16_NO_PIPETTES_TC_TrashBinAndThermocyclerConflict,
+Flex_X_v2_16_NO_PIPETTES_TM_ModuleInCol2,
+Flex_X_v2_16_NO_PIPETTES_TM_ModuleInStagingAreaCol3,
+Flex_X_v2_16_NO_PIPETTES_TM_ModuleInStagingAreaCol4,
+Flex_X_v2_16_NO_PIPETTES_TrashBinInCol2,
+Flex_X_v2_16_NO_PIPETTES_TrashBinInStagingAreaCol3,
+Flex_X_v2_16_NO_PIPETTES_TrashBinInStagingAreaCol4,
+Flex_X_v2_16_P1000_96_DropTipsWithNoTrash,
+Flex_X_v2_16_P1000_96_GRIP_DropLabwareIntoTrashBin,
+Flex_X_v2_16_P1000_96_TC_PartialTipPickupThermocyclerLidConflict,
+Flex_X_v2_16_P1000_96_TC_PartialTipPickupTryToReturnTip,
+Flex_X_v2_16_P1000_96_TC_pipetteCollisionWithThermocyclerLid,
+Flex_X_v2_16_P1000_96_TC_pipetteCollisionWithThermocyclerLidClips,
+Flex_X_v2_16_P1000_96_TM_ModuleAndWasteChuteConflict,
+Flex_X_v2_16_P300MGen2_None_OT2PipetteInFlexProtocol,
+Flex_X_v2_18_NO_PIPETTES_DescriptionTooLongRTP,
+Flex_X_v8_P1000_96_HS_GRIP_TC_TM_GripperCollisionWithTips,
+OT2_S_v2_11_P10S_P300M_MM_TC1_TM_Swift,
+OT2_S_v2_12_NO_PIPETTES_Python310SyntaxRobotAnalysisOnlyError,
+OT2_S_v2_12_P300M_P20S_FailOnRun,
+OT2_S_v2_13_P300M_P20S_HS_TC_TM_SmokeTestV3,
+OT2_S_v2_13_P300M_P20S_MM_TC_TM_Smoke620Release,
+OT2_S_v2_14_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots,
+OT2_S_v2_14_P300M_P20S_HS_TC_TM_SmokeTestV3,
+OT2_S_v2_15_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots,
+OT2_S_v2_15_P300M_P20S_HS_TC_TM_SmokeTestV3,
+OT2_S_v2_15_P300M_P20S_HS_TC_TM_dispense_changes,
+OT2_S_v2_16_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots,
+OT2_S_v2_16_NO_PIPETTES_verifyDoesNotDeadlock,
+OT2_S_v2_16_P300M_P20S_HS_TC_TM_SmokeTestV3,
+OT2_S_v2_16_P300M_P20S_HS_TC_TM_aspirateDispenseMix0Volume,
+OT2_S_v2_16_P300M_P20S_HS_TC_TM_dispense_changes,
+OT2_S_v2_16_P300M_P20S_aspirateDispenseMix0Volume,
+OT2_S_v2_16_P300S_None_verifyNoFloatingPointErrorInPipetting,
+OT2_S_v2_17_NO_PIPETTES_TC_VerifyThermocyclerLoadedSlots,
+OT2_S_v2_17_P300M_P20S_HS_TC_TM_SmokeTestV3,
+OT2_S_v2_17_P300M_P20S_HS_TC_TM_dispense_changes,
+OT2_S_v2_18_NO_PIPETTES_GoldenRTP_OT2,
+OT2_S_v2_18_None_None_duplicateChoiceValue,
+OT2_S_v2_2_P300S_None_MM1_MM2_EngageMagHeightFromBase,
+OT2_S_v2_3_P300S_None_MM1_MM2_TM_Mix,
+OT2_S_v2_4_P300M_None_MM_TM_Zymo,
+OT2_S_v2_7_P20S_None_Walkthrough,
+OT2_S_v3_P300SGen1_None_Gen1PipetteSimple,
+OT2_S_v4_P300M_P20S_MM_TM_TC1_PD40,
+OT2_S_v4_P300S_None_MM_TM_TM_MOAMTemps,
+OT2_S_v6_P1000S_None_SimpleTransfer,
+OT2_S_v6_P20S_P300M_TransferReTransferLiquid,
+OT2_S_v6_P300M_P20S_HS_Smoke620release,
+OT2_S_v6_P300M_P20S_MixTransferManyLiquids,
+OT2_S_v6_P300M_P300S_HS_HS_NormalUseWithTransfer,
+OT2_X_v2_11_P300S_TC1_TC2_ThermocyclerMoamError,
+OT2_X_v2_13_None_None_PythonSyntaxError,
+OT2_X_v2_16_None_None_HS_HeaterShakerConflictWithTrashBin1,
+OT2_X_v2_16_None_None_HS_HeaterShakerConflictWithTrashBin2,
+OT2_X_v2_18_None_None_NoRTPdisplay_name,
+OT2_X_v2_18_None_None_StrRTPwith_unit,
+OT2_X_v2_18_None_None_duplicateRTPVariableName,
+OT2_X_v2_7_P300S_TwinningError,
+OT2_X_v4_P300M_P20S_MM_TC1_TM_e2eTests,
+OT2_X_v6_P20S_None_SimpleTransfer,
+OT2_X_v6_P300M_P20S_HS_MM_TM_TC_AllMods
+"
+APP_ANALYSIS_TEST_PROTOCOLS_WITH_OVERRIDES="
+Flex_X_v2_18_NO_PIPETTES_Overrides_BadTypesInRTP,
+Flex_X_v2_18_NO_PIPETTES_Overrides_DefaultChoiceNoMatchChoice,
+Flex_X_v2_18_NO_PIPETTES_Overrides_DefaultOutOfRangeRTP
+"
# run one
-# APP_ANALYSIS_TEST_PROTOCOLS="OT2_P1000SLeft_None_6_1_SimpleTransfer"
-FILES_FOLDER="files"
\ No newline at end of file
+# APP_ANALYSIS_TEST_PROTOCOLS="Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4"
+# APP_ANALYSIS_TEST_PROTOCOLS_WITH_OVERRIDES="Flex_X_v2_18_NO_PIPETTES_Overrides_BadTypesInRTP"
+
+FILES_FOLDER="files"
diff --git a/app-testing/files/examples/description_too_long_2.18.py b/app-testing/files/examples/description_too_long_2.18.py
new file mode 100644
index 00000000000..8b63fe2afd1
--- /dev/null
+++ b/app-testing/files/examples/description_too_long_2.18.py
@@ -0,0 +1,59 @@
+metadata = {
+ "protocolName": "Description Too Long 2.18",
+}
+
+requirements = {"robotType": "Flex", "apiLevel": "2.18"}
+
+
+# change me to test that a bad description is caught
+# for each type of parameter we can add.
+type_to_test = 1
+
+
+def add_parameters(parameters):
+ too_long: str = "This is a description that is longer than 30 characters."
+ match type_to_test:
+ case 1:
+ parameters.add_int(
+ display_name="Dilutions",
+ variable_name="dilutions",
+ default=1,
+ minimum=1,
+ maximum=3,
+ description=too_long,
+ )
+ case 2:
+ parameters.add_float(
+ display_name="Mixing Volume in µL",
+ variable_name="mixing_volume",
+ default=150.0,
+ choices=[
+ {"display_name": "Low Volume ⬇️", "value": 100.0},
+ {"display_name": "Medium Volume 🟰", "value": 150.0},
+ {"display_name": "High Volume ⬆️", "value": 200.0},
+ ],
+ description=too_long,
+ )
+ case 3:
+ parameters.add_str(
+ display_name="Pipette Name",
+ variable_name="pipette",
+ choices=[
+ {"display_name": "Single channel 50µL", "value": "flex_1channel_50"},
+ {"display_name": "Eight Channel 50µL", "value": "flex_8channel_50"},
+ ],
+ default="flex_1channel_50",
+ description=too_long,
+ )
+ case 4:
+ parameters.add_bool(
+ display_name="Dry Run",
+ variable_name="dry_run",
+ default=False,
+ description=too_long,
+ )
+
+
+def run(context):
+ for variable_name, value in context.params.get_all().items():
+ context.comment(f"variable {variable_name} has value {value}")
diff --git a/app-testing/files/examples/invalid_properties_2.18.py b/app-testing/files/examples/invalid_properties_2.18.py
new file mode 100644
index 00000000000..eb8c1d0e745
--- /dev/null
+++ b/app-testing/files/examples/invalid_properties_2.18.py
@@ -0,0 +1,24 @@
+metadata = {
+ "protocolName": "Add invalid properties to an RTP",
+}
+
+requirements = {"robotType": "Flex", "apiLevel": "2.18"}
+
+
+def add_parameters(parameters):
+ parameters.add_int(
+ display_name="Washes",
+ variable_name="washes",
+ default=6,
+ description="How many washes to perform.",
+ choices=[
+ {"display_name": "1X", "value": 6},
+ {"display_name": "2X", "value": 12},
+ ],
+ magic="🪄🪄🪄🪄",
+ )
+
+
+def run(context):
+ for variable_name, value in context.params.get_all().items():
+ context.comment(f"variable {variable_name} has value {value}")
diff --git a/app-testing/files/examples/invalid_rtp.py b/app-testing/files/examples/invalid_rtp.py
new file mode 100644
index 00000000000..5d5fb9d314b
--- /dev/null
+++ b/app-testing/files/examples/invalid_rtp.py
@@ -0,0 +1,78 @@
+from dataclasses import dataclass, fields
+from typing import Union
+
+from typing import Union, Type, List
+
+
+def not_my_type(the_type: Type) -> List[Union[str, float, int, bool, dict, list, tuple, set, frozenset]]:
+ """
+ Returns a list of values of all local variables that do not match the type specified by 'the_type'.
+
+ Args:
+ the_type: The type (e.g., int, str, list) to be excluded from the return value.
+
+ Returns:
+ A list of values of local variables not matching 'the_type'.
+ """
+ none: None = None
+ string: str = "string"
+ integer: int = 1
+ the_float: float = 1.0
+ the_dict: dict = {}
+ the_list: list = []
+ the_tuple: tuple = ()
+ the_set: set = set()
+ the_frozenset: frozenset = frozenset()
+
+ # Collect values that are not of 'the_type'.
+ return [value for value in locals().values() if not isinstance(value, the_type)]
+
+
+@dataclass
+class ErrorVariableNames:
+ dunder: str = "__dunder"
+ leading_underscore: str = "_leading_underscore" # maybe
+ leading_space: str = " space"
+ trailing_space: str = "space "
+ middle_space: str = "middle space"
+ asterisk: str = "*asterisk"
+ period: str = ".period"
+ the_def: str = "def"
+ the_class: str = "class"
+ the_return: str = "return"
+ the_yield: str = "yield"
+ the_raise: str = "raise"
+ the_except: str = "except"
+ the_import: str = "import"
+ the_from: str = "from"
+ the_as: str = "as"
+ the_with: str = "with"
+ the_if: str = "if"
+ the_else: str = "else"
+ the_elif: str = "elif"
+ the_while: str = "while"
+ the_for: str = "for"
+ the_in: str = "in"
+ the_is: str = "is"
+ the_not: str = "not"
+ the_and: str = "and"
+ the_or: str = "or"
+ the_lambda: str = "lambda"
+ the_global: str = "global"
+ the_nonlocal: str = "nonlocal"
+ the_del: str = "del"
+ the_pass: str = "pass"
+ the_break: str = "break"
+ the_continue: str = "continue"
+ the_try: str = "try"
+ the_and: str = "and"
+ the_none: str = "None"
+ the_true: str = "True"
+ the_false: str = "False"
+ the_as: str = "as"
+ the_assert: str = "assert"
+ the_async: str = "async"
+ the_await: str = "await"
+
+ def get_values(self):
+ return [getattr(self, field.name) for field in fields(self)]
diff --git a/app-testing/files/protocols/Flex_S_v2_15_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots.py b/app-testing/files/protocols/Flex_S_v2_15_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots.py
new file mode 100644
index 00000000000..8f56e560552
--- /dev/null
+++ b/app-testing/files/protocols/Flex_S_v2_15_NO_PIPETTES_TC_verifyThermocyclerLoadedSlots.py
@@ -0,0 +1,14 @@
+# Pulled from: https://github.com/Opentrons/opentrons/pull/14491
+
+
+requirements = {
+ "robotType": "Flex",
+ "apiLevel": "2.15",
+}
+
+
+def run(protocol):
+ thermocycler = protocol.load_module("thermocycler module gen2")
+
+ assert protocol.loaded_modules == {"B1": thermocycler}
+ assert protocol.deck["A1"] == thermocycler
diff --git a/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichment.py b/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichment.py
new file mode 100644
index 00000000000..e4b55c71752
--- /dev/null
+++ b/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichment.py
@@ -0,0 +1,794 @@
+from opentrons import protocol_api
+from opentrons import types
+
+metadata = {
+ "protocolName": "Illumina DNA Enrichment",
+ "author": "Opentrons ",
+ "source": "Protocol Library",
+}
+
+requirements = {
+ "robotType": "OT-3",
+ "apiLevel": "2.15",
+}
+
+# SCRIPT SETTINGS
+DRYRUN = "YES" # YES or NO, DRYRUN = 'YES' will return tips, skip incubation times, shorten mix, for testing purposes
+USE_GRIPPER = True
+
+
+# PROTOCOL SETTINGS
+SAMPLES = "8x" # 8x
+HYBRIDDECK = True
+HYBRIDTIME = 1.6 # Hours
+
+# PROTOCOL BLOCKS
+STEP_VOLPOOL = 1
+STEP_CAPTURE = 1
+STEP_WASH = 1
+STEP_PCR = 1
+STEP_PCRDECK = 1
+STEP_POSTPCR = 1
+STEP_CLEANUP = 1
+
+############################################################################################################################################
+############################################################################################################################################
+############################################################################################################################################
+
+
+def run(protocol: protocol_api.ProtocolContext):
+ global DRYRUN
+
+ protocol.comment("THIS IS A DRY RUN") if DRYRUN == "YES" else protocol.comment("THIS IS A REACTION RUN")
+
+ # DECK SETUP AND LABWARE
+ # ========== FIRST ROW ===========
+ heatershaker = protocol.load_module("heaterShakerModuleV1", "1")
+ sample_plate_2 = heatershaker.load_labware("nest_96_wellplate_2ml_deep")
+ tiprack_200_1 = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "2")
+ temp_block = protocol.load_module("temperature module gen2", "3")
+ reagent_plate = temp_block.load_labware("nest_96_wellplate_100ul_pcr_full_skirt")
+ # ========== SECOND ROW ==========
+ MAG_PLATE_SLOT = protocol.load_module("magneticBlockV1", "4")
+ reservoir = protocol.load_labware("nest_96_wellplate_2ml_deep", "5")
+ tiprack_200_2 = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "6")
+ # ========== THIRD ROW ===========
+ thermocycler = protocol.load_module("thermocycler module gen2")
+ sample_plate_1 = thermocycler.load_labware("nest_96_wellplate_100ul_pcr_full_skirt")
+ tiprack_20 = protocol.load_labware("opentrons_flex_96_tiprack_50ul", "9")
+ # ========== FOURTH ROW ==========
+
+ # reagent
+
+ AMPure = reservoir["A1"]
+ SMB = reservoir["A2"]
+ EEW = reservoir["A3"]
+ EtOH = reservoir["A4"]
+ RSB = reservoir["A5"]
+ Liquid_trash = reservoir["A12"]
+
+ EEW_1 = sample_plate_1.wells_by_name()["A8"]
+ EEW_2 = sample_plate_1.wells_by_name()["A9"]
+ EEW_3 = sample_plate_1.wells_by_name()["A10"]
+ EEW_4 = sample_plate_1.wells_by_name()["A11"]
+
+ NHB2 = reagent_plate.wells_by_name()["A1"]
+ Panel = reagent_plate.wells_by_name()["A2"]
+ EHB2 = reagent_plate.wells_by_name()["A3"]
+ Elute = reagent_plate.wells_by_name()["A4"]
+ ET2 = reagent_plate.wells_by_name()["A5"]
+ PPC = reagent_plate.wells_by_name()["A6"]
+ EPM = reagent_plate.wells_by_name()["A7"]
+
+ # pipette
+ p1000 = protocol.load_instrument("flex_8channel_1000", "left", tip_racks=[tiprack_200_1, tiprack_200_2])
+ p50 = protocol.load_instrument("flex_8channel_50", "right", tip_racks=[tiprack_20])
+
+ # tip and sample tracking
+ sample_well = "A3"
+
+ WASHES = [EEW_1, EEW_2, EEW_3, EEW_4]
+
+ def grip_offset(action, item, slot=None):
+ """Grip offset."""
+ from opentrons.types import Point
+
+ # EDIT these values
+ # NOTE: we are still testing to determine our software's defaults
+ # but we also expect users will want to edit these
+ _pick_up_offsets = {
+ "deck": Point(),
+ "mag-plate": Point(),
+ "heater-shaker": Point(z=1.0),
+ "temp-module": Point(),
+ "thermo-cycler": Point(),
+ }
+ # EDIT these values
+ # NOTE: we are still testing to determine our software's defaults
+ # but we also expect users will want to edit these
+ _drop_offsets = {
+ "deck": Point(),
+ "mag-plate": Point(z=0.5),
+ "heater-shaker": Point(y=-0.5),
+ "temp-module": Point(),
+ "thermo-cycler": Point(),
+ }
+ # do NOT edit these values
+ # NOTE: these values will eventually be in our software
+ # and will not need to be inside a protocol
+ _hw_offsets = {
+ "deck": Point(),
+ "mag-plate": Point(z=2.5),
+ "heater-shaker-right": Point(z=2.5),
+ "heater-shaker-left": Point(z=2.5),
+ "temp-module": Point(z=5.0),
+ "thermo-cycler": Point(z=2.5),
+ }
+ # make sure arguments are correct
+ action_options = ["pick-up", "drop"]
+ item_options = list(_hw_offsets.keys())
+ item_options.remove("heater-shaker-left")
+ item_options.remove("heater-shaker-right")
+ item_options.append("heater-shaker")
+ if action not in action_options:
+ raise ValueError(f'"{action}" not recognized, available options: {action_options}')
+ if item not in item_options:
+ raise ValueError(f'"{item}" not recognized, available options: {item_options}')
+ if item == "heater-shaker":
+ assert slot, 'argument slot= is required when using "heater-shaker"'
+ if slot in [1, 4, 7, 10]:
+ side = "left"
+ elif slot in [3, 6, 9, 12]:
+ side = "right"
+ else:
+ raise ValueError("heater shaker must be on either left or right side")
+ hw_offset = _hw_offsets[f"{item}-{side}"]
+ else:
+ hw_offset = _hw_offsets[item]
+ if action == "pick-up":
+ offset = hw_offset + _pick_up_offsets[item]
+ else:
+ offset = hw_offset + _drop_offsets[item]
+
+ # convert from Point() to dict()
+ return {"x": offset.x, "y": offset.y, "z": offset.z}
+
+ ############################################################################################################################################
+ ############################################################################################################################################
+ ############################################################################################################################################
+ # commands
+ heatershaker.open_labware_latch()
+ if DRYRUN == "NO":
+ protocol.comment("SETTING THERMO and TEMP BLOCK Temperature")
+ thermocycler.set_block_temperature(4)
+ thermocycler.set_lid_temperature(100)
+ temp_block.set_temperature(4)
+ thermocycler.open_lid()
+ protocol.pause("Ready")
+ heatershaker.close_labware_latch()
+
+ if STEP_VOLPOOL == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> Quick Vol Pool")
+ protocol.comment("==============================================")
+
+ if STEP_CAPTURE == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> Capture")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Adding NHB2")
+ NHB2Vol = 50
+ p50.pick_up_tip()
+ p50.aspirate(NHB2Vol, NHB2.bottom())
+ p50.dispense(NHB2Vol, sample_plate_1[sample_well].bottom())
+ p50.return_tip()
+
+ protocol.comment("--> Adding Panel")
+ PanelVol = 10
+ p50.pick_up_tip()
+ p50.aspirate(PanelVol, Panel.bottom())
+ p50.dispense(PanelVol, sample_plate_1[sample_well].bottom())
+ p50.return_tip()
+
+ protocol.comment("--> Adding EHB2")
+ EHB2Vol = 10
+ EHB2MixRep = 10 if DRYRUN == "NO" else 1
+ EHB2MixVol = 90
+ p1000.pick_up_tip()
+ p1000.aspirate(EHB2Vol, EHB2.bottom())
+ p1000.dispense(EHB2Vol, sample_plate_1[sample_well].bottom())
+ p1000.move_to(sample_plate_1[sample_well].bottom())
+ p1000.mix(EHB2MixRep, EHB2MixVol)
+ p1000.return_tip()
+
+ if HYBRIDDECK == True:
+ protocol.comment("Hybridize on Deck")
+ ############################################################################################################################################
+ thermocycler.close_lid()
+ if DRYRUN == "NO":
+ profile_TAGSTOP = [
+ {"temperature": 98, "hold_time_minutes": 5},
+ {"temperature": 97, "hold_time_minutes": 1},
+ {"temperature": 95, "hold_time_minutes": 1},
+ {"temperature": 93, "hold_time_minutes": 1},
+ {"temperature": 91, "hold_time_minutes": 1},
+ {"temperature": 89, "hold_time_minutes": 1},
+ {"temperature": 87, "hold_time_minutes": 1},
+ {"temperature": 85, "hold_time_minutes": 1},
+ {"temperature": 83, "hold_time_minutes": 1},
+ {"temperature": 81, "hold_time_minutes": 1},
+ {"temperature": 79, "hold_time_minutes": 1},
+ {"temperature": 77, "hold_time_minutes": 1},
+ {"temperature": 75, "hold_time_minutes": 1},
+ {"temperature": 73, "hold_time_minutes": 1},
+ {"temperature": 71, "hold_time_minutes": 1},
+ {"temperature": 69, "hold_time_minutes": 1},
+ {"temperature": 67, "hold_time_minutes": 1},
+ {"temperature": 65, "hold_time_minutes": 1},
+ {"temperature": 63, "hold_time_minutes": 1},
+ {"temperature": 62, "hold_time_minutes": HYBRIDTIME * 60},
+ ]
+ thermocycler.execute_profile(steps=profile_TAGSTOP, repetitions=1, block_max_volume=100)
+ thermocycler.set_block_temperature(10)
+ thermocycler.open_lid()
+ ############################################################################################################################################
+ else:
+ protocol.comment("Hybridize off Deck")
+
+ if STEP_CAPTURE == 1:
+ if DRYRUN == "NO":
+ heatershaker.set_and_wait_for_temperature(62)
+
+ protocol.comment("--> Heating EEW")
+ EEWVol = 120
+ p1000.pick_up_tip()
+ for loop, X in enumerate(["A8", "A9", "A10", "A11"]):
+ p1000.aspirate(EEWVol + 1, EEW.bottom(z=0.25), rate=0.25)
+ p1000.dispense(EEWVol + 5, sample_plate_1[sample_well].bottom(z=1))
+ p1000.return_tip() # <---------------- Tip Return
+
+ protocol.comment("--> Transfer Hybridization")
+ TransferSup = 100
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_1[sample_well].bottom(z=0.25))
+ p1000.aspirate(TransferSup + 1, rate=0.25)
+ p1000.dispense(TransferSup + 5, sample_plate_2[sample_well].bottom(z=1))
+ p1000.return_tip()
+
+ thermocycler.close_lid()
+
+ protocol.comment("--> ADDING SMB")
+ SMBVol = 250
+ SampleVol = 100
+ SMBMixRep = 15 * 60 if DRYRUN == "NO" else 0.1 * 60
+ SMBPremix = 3 if DRYRUN == "NO" else 1
+ # ========NEW SINGLE TIP DISPENSE===========
+ p1000.pick_up_tip()
+ p1000.mix(SMBMixRep, 200, SMB.bottom(z=1))
+ p1000.aspirate(SMBVol / 2, SMB.bottom(z=1), rate=0.25)
+ p1000.dispense(SMBVol / 2, sample_plate_2[sample_well].top(z=2), rate=0.25)
+ p1000.aspirate(SMBVol / 2, SMB.bottom(z=1), rate=0.25)
+ p1000.dispense(SMBVol / 2, sample_plate_2[sample_well].bottom(z=1), rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=5))
+ for Mix in range(2):
+ p1000.aspirate(100, rate=0.5)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=1))
+ p1000.aspirate(80, rate=0.5)
+ p1000.dispense(80, rate=0.5)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=5))
+ p1000.dispense(100, rate=0.5)
+ Mix += 1
+ p1000.blow_out(sample_plate_2[sample_well].top(z=2))
+ p1000.default_speed = 400
+ p1000.move_to(sample_plate_2[sample_well].top(z=5))
+ p1000.move_to(sample_plate_2[sample_well].top(z=0))
+ p1000.move_to(sample_plate_2[sample_well].top(z=5))
+ p1000.return_tip()
+ # ========NEW HS MIX=========================
+ protocol.delay(SMBMixRep)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ thermocycler.open_lid()
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=2)
+
+ protocol.comment("==============================================")
+ protocol.comment("--> WASH")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Remove SUPERNATANT")
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(4))
+ p1000.aspirate(200, rate=0.25)
+ p1000.dispense(200, Liquid_trash)
+ p1000.aspirate(200, rate=0.25)
+ p1000.dispense(200, Liquid_trash)
+ p1000.move_to(Liquid_trash.top(z=5))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out(Liquid_trash.top(z=5))
+ p1000.aspirate(20)
+ p1000.return_tip()
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Repeating 3 washes")
+ washreps = 3
+ for wash in range(washreps):
+ protocol.comment("--> Adding EEW")
+ EEWVol = 200
+ p1000.pick_up_tip()
+ p1000.aspirate(EEWVol, WASHES[wash].bottom())
+ p1000.dispense(EEWVol, sample_plate_2[sample_well].bottom())
+ p1000.return_tip()
+
+ heatershaker.close_labware_latch()
+ heatershaker.set_and_wait_for_shake_speed(rpm=1600)
+ protocol.delay(seconds=4 * 60)
+ heatershaker.deactivate_shaker()
+ heatershaker.open_labware_latch()
+
+ if DRYRUN == "NO":
+ protocol.delay(seconds=5 * 60)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Removing Supernatant")
+ RemoveSup = 200
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.move_to(sample_plate_2[sample_well].top(z=2))
+ p1000.dispense(200, Liquid_trash.top(z=0))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out(Liquid_trash.top(z=0))
+ p1000.aspirate(20)
+ p1000.return_tip()
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Adding EEW")
+ EEWVol = 200
+ p1000.pick_up_tip()
+ p1000.aspirate(EEWVol, WASHES[3].bottom())
+ p1000.dispense(EEWVol, sample_plate_2[sample_well].bottom())
+ p1000.return_tip()
+
+ heatershaker.set_and_wait_for_shake_speed(rpm=1600)
+ if DRYRUN == "NO":
+ protocol.delay(seconds=4 * 60)
+ heatershaker.deactivate_shaker()
+
+ protocol.comment("--> Transfer Hybridization")
+ TransferSup = 200
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0.25))
+ p1000.aspirate(TransferSup, rate=0.25)
+ sample_well = "A4"
+ p1000.dispense(TransferSup, sample_plate_2[sample_well].bottom(z=1))
+ p1000.return_tip()
+
+ protocol.delay(seconds=5 * 60)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Removing Supernatant")
+ RemoveSup = 200
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.move_to(sample_plate_2[sample_well].top(z=2))
+ p1000.dispense(200, Liquid_trash.top(z=0))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out(Liquid_trash.top(z=0))
+ p1000.aspirate(20)
+ p1000.return_tip()
+
+ protocol.comment("--> Removing Residual")
+ p50.pick_up_tip()
+ p50.move_to(sample_plate_2[sample_well].bottom(z=0))
+ p50.aspirate(50, rate=0.25)
+ p50.default_speed = 200
+ p50.dispense(100, Liquid_trash.top(z=0))
+ protocol.delay(minutes=0.1)
+ p50.blow_out()
+ p50.default_speed = 400
+ p50.move_to(Liquid_trash.top(z=-5))
+ p50.move_to(Liquid_trash.top(z=0))
+ p50.return_tip()
+
+ protocol.comment("==============================================")
+ protocol.comment("--> ELUTE")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Adding EE1")
+ EluteVol = 23
+ p50.pick_up_tip()
+ p50.aspirate(EluteVol, Elute.bottom())
+ p50.dispense(EluteVol, sample_plate_2[sample_well].bottom())
+ p50.return_tip()
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ heatershaker.close_labware_latch()
+ heatershaker.set_and_wait_for_shake_speed(rpm=1600)
+ if DRYRUN == "NO":
+ protocol.delay(seconds=2 * 60)
+ heatershaker.deactivate_shaker()
+ heatershaker.open_labware_latch()
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=2)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Transfer Elution")
+ TransferSup = 21
+ p50.pick_up_tip()
+ p50.move_to(sample_plate_2[sample_well].bottom(z=0.25))
+ p50.aspirate(TransferSup + 1, rate=0.25)
+ sample_well = "A5"
+ p50.dispense(TransferSup + 5, sample_plate_1[sample_well].bottom(z=1))
+ p50.return_tip()
+
+ protocol.comment("--> Adding ET2")
+ ET2Vol = 4
+ ET2MixRep = 10 if DRYRUN == "NO" else 1
+ ET2MixVol = 20
+ p50.pick_up_tip()
+ p50.aspirate(ET2Vol, ET2.bottom())
+ p50.dispense(ET2Vol, sample_plate_1[sample_well].bottom())
+ p50.move_to(sample_plate_1[X].bottom())
+ p50.mix(ET2MixRep, ET2MixVol)
+ p50.return_tip()
+
+ if STEP_PCR == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> AMPLIFICATION")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Adding PPC")
+ PPCVol = 5
+ p50.pick_up_tip()
+ p50.aspirate(PPCVol, PPC.bottom())
+ p50.dispense(PPCVol, sample_plate_1[sample_well].bottom())
+ p50.return_tip()
+
+ protocol.comment("--> Adding EPM")
+ EPMVol = 20
+ EPMMixRep = 10 if DRYRUN == "NO" else 1
+ EPMMixVol = 45
+ p50.pick_up_tip()
+ p50.aspirate(EPMVol, EPM.bottom())
+ p50.dispense(EPMVol, sample_plate_1[sample_well].bottom())
+ p50.move_to(sample_plate_1[sample_well].bottom())
+ p50.mix(EPMMixRep, EPMMixVol)
+ p50.return_tip()
+
+ heatershaker.deactivate_heater()
+
+ if STEP_PCRDECK == 1:
+ if DRYRUN == "NO":
+ ############################################################################################################################################
+ protocol.pause("Seal, Run PCR (60min)")
+ if DRYRUN == "NO":
+ thermocycler.close_lid()
+ profile_PCR_1 = [{"temperature": 98, "hold_time_seconds": 45}]
+ thermocycler.execute_profile(steps=profile_PCR_1, repetitions=1, block_max_volume=50)
+ profile_PCR_2 = [
+ {"temperature": 98, "hold_time_seconds": 30},
+ {"temperature": 60, "hold_time_seconds": 30},
+ {"temperature": 72, "hold_time_seconds": 30},
+ ]
+ thermocycler.execute_profile(steps=profile_PCR_2, repetitions=12, block_max_volume=50)
+ profile_PCR_3 = [{"temperature": 72, "hold_time_minutes": 1}]
+ thermocycler.execute_profile(steps=profile_PCR_3, repetitions=1, block_max_volume=50)
+ thermocycler.set_block_temperature(10)
+ ############################################################################################################################################
+ thermocycler.open_lid()
+
+ if STEP_CLEANUP == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> Cleanup")
+ protocol.comment("==============================================")
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Transfer Elution")
+ TransferSup = 45
+ p50.pick_up_tip()
+ p50.move_to(sample_plate_1[sample_well].bottom(z=0.25))
+ p50.aspirate(TransferSup + 1, rate=0.25)
+ sample_well = "A5"
+ p50.dispense(TransferSup + 5, sample_plate_2[sample_well].bottom(z=1))
+ p50.return_tip()
+
+ protocol.comment("--> ADDING AMPure (0.8x)")
+ AMPureVol = 40.5
+ SampleVol = 45
+ AMPureMixRep = 5 * 60 if DRYRUN == "NO" else 0.1 * 60
+ AMPurePremix = 3 if DRYRUN == "NO" else 1
+ # ========NEW SINGLE TIP DISPENSE===========
+ p1000.pick_up_tip()
+ p1000.mix(AMPurePremix, AMPureVol + 10, AMPure.bottom(z=1))
+ p1000.aspirate(AMPureVol, AMPure.bottom(z=1), rate=0.25)
+ p1000.dispense(AMPureVol, sample_plate_2[sample_well].bottom(z=1), rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=5))
+ for Mix in range(2):
+ p1000.aspirate(60, rate=0.5)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=1))
+ p1000.aspirate(60, rate=0.5)
+ p1000.dispense(60, rate=0.5)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=5))
+ p1000.dispense(30, rate=0.5)
+ Mix += 1
+ p1000.blow_out(sample_plate_2[sample_well].top(z=2))
+ p1000.default_speed = 400
+ p1000.move_to(sample_plate_2[sample_well].top(z=5))
+ p1000.move_to(sample_plate_2[sample_well].top(z=0))
+ p1000.move_to(sample_plate_2[sample_well].top(z=5))
+ p1000.return_tip()
+ # ========NEW HS MIX=========================
+ heatershaker.set_and_wait_for_shake_speed(rpm=1800)
+ protocol.delay(AMPureMixRep)
+ heatershaker.deactivate_shaker()
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM HEATER SHAKER TO MAG PLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=4)
+
+ protocol.comment("--> Removing Supernatant")
+ RemoveSup = 200
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[sample_well].top(z=2))
+ p1000.default_speed = 200
+ p1000.dispense(200, Liquid_trash.top(z=0))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.default_speed = 400
+ p1000.move_to(Liquid_trash.top(z=-5))
+ p1000.move_to(Liquid_trash.top(z=0))
+ p1000.return_tip()
+
+ for X in range(2):
+ protocol.comment("--> ETOH Wash")
+ ETOHMaxVol = 150
+ p1000.pick_up_tip()
+ p1000.aspirate(ETOHMaxVol, EtOH.bottom(z=1))
+ p1000.move_to(EtOH.top(z=0))
+ p1000.move_to(EtOH.top(z=-5))
+ p1000.move_to(EtOH.top(z=0))
+ p1000.move_to(sample_plate_2[sample_well].top(z=-2))
+ p1000.dispense(ETOHMaxVol, rate=1)
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.move_to(sample_plate_2[sample_well].top(z=5))
+ p1000.move_to(sample_plate_2[sample_well].top(z=0))
+ p1000.move_to(sample_plate_2[sample_well].top(z=5))
+ p1000.return_tip()
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=0.5)
+
+ protocol.comment("--> Remove ETOH Wash")
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[sample_well].top(z=2))
+ p1000.default_speed = 200
+ p1000.dispense(200, Liquid_trash.top(z=0))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.default_speed = 400
+ p1000.move_to(Liquid_trash.top(z=-5))
+ p1000.move_to(Liquid_trash.top(z=0))
+ p1000.return_tip()
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=2)
+
+ protocol.comment("--> Removing Residual ETOH")
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0))
+ p1000.aspirate(50, rate=0.25)
+ p1000.default_speed = 200
+ p1000.dispense(100, Liquid_trash.top(z=0))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.default_speed = 400
+ p1000.move_to(Liquid_trash.top(z=-5))
+ p1000.move_to(Liquid_trash.top(z=0))
+ p1000.return_tip()
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=1)
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM MAG PLATE TO HEATER SHAKER
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Adding RSB")
+ RSBVol = 32
+ RSBMixRep = 1 * 60 if DRYRUN == "NO" else 0.1 * 60
+ p1000.pick_up_tip()
+ p1000.aspirate(RSBVol, RSB.bottom(z=1))
+
+ p1000.move_to((sample_plate_2.wells_by_name()[sample_well].center().move(types.Point(x=1.3 * 0.8, y=0, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.move_to((sample_plate_2.wells_by_name()[sample_well].center().move(types.Point(x=0, y=1.3 * 0.8, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.move_to((sample_plate_2.wells_by_name()[sample_well].center().move(types.Point(x=1.3 * -0.8, y=0, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.move_to((sample_plate_2.wells_by_name()[sample_well].center().move(types.Point(x=0, y=1.3 * -0.8, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.dispense(RSBVol, rate=1)
+
+ p1000.blow_out(sample_plate_2.wells_by_name()[sample_well].center())
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].top(z=5))
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].top(z=0))
+ p1000.move_to(sample_plate_2.wells_by_name()[sample_well].top(z=5))
+ p1000.return_tip()
+ heatershaker.set_and_wait_for_shake_speed(rpm=1600)
+ protocol.delay(RSBMixRep)
+ heatershaker.deactivate_shaker()
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM HEATER SHAKER TO MAG PLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ if DRYRUN == "NO":
+ protocol.delay(minutes=3)
+
+ protocol.comment("--> Transferring Supernatant")
+ TransferSup = 30
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[sample_well].bottom(z=0.25))
+ p1000.aspirate(TransferSup + 1, rate=0.25)
+ p1000.dispense(TransferSup + 5, sample_plate_2["A7"].bottom(z=1))
+ p1000.return_tip()
diff --git a/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4.py b/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4.py
new file mode 100644
index 00000000000..92b7018f773
--- /dev/null
+++ b/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAEnrichmentv4.py
@@ -0,0 +1,1066 @@
+from opentrons import protocol_api
+from opentrons import types
+
+metadata = {
+ "protocolName": "Illumina DNA Enrichment v4",
+ "author": "Opentrons ",
+ "source": "Protocol Library",
+}
+
+requirements = {
+ "robotType": "OT-3",
+ "apiLevel": "2.15",
+}
+
+# SCRIPT SETTINGS
+DRYRUN = True # True = skip incubation times, shorten mix, for testing purposes
+USE_GRIPPER = True # True = Uses Gripper, False = Manual Move
+TIP_TRASH = False # True = Used tips go in Trash, False = Used tips go back into rack
+HYBRID_PAUSE = True # True = sets a pause on the Hybridization
+
+# PROTOCOL SETTINGS
+COLUMNS = 3 # 1-3
+HYBRIDDECK = True
+HYBRIDTIME = 1.6 # Hours
+
+# PROTOCOL BLOCKS
+STEP_VOLPOOL = 0
+STEP_HYB = 0
+STEP_CAPTURE = 1
+STEP_WASH = 1
+STEP_PCR = 1
+STEP_PCRDECK = 1
+STEP_CLEANUP = 1
+
+############################################################################################################################################
+############################################################################################################################################
+############################################################################################################################################
+
+p200_tips = 0
+p50_tips = 0
+
+ABR_TEST = True
+if ABR_TEST == True:
+ DRYRUN = True # Overrides to only DRYRUN
+ TIP_TRASH = False # Overrides to only REUSING TIPS
+ RUN = 3 # Repetitions
+else:
+ RUN = 1
+
+
+def run(protocol: protocol_api.ProtocolContext):
+ global p200_tips
+ global p50_tips
+
+ if ABR_TEST == True:
+ protocol.comment("THIS IS A ABR RUN WITH " + str(RUN) + " REPEATS")
+ protocol.comment("THIS IS A DRY RUN") if DRYRUN == True else protocol.comment("THIS IS A REACTION RUN")
+ protocol.comment("USED TIPS WILL GO IN TRASH") if TIP_TRASH == True else protocol.comment("USED TIPS WILL BE RE-RACKED")
+
+ # DECK SETUP AND LABWARE
+ # ========== FIRST ROW ===========
+ heatershaker = protocol.load_module("heaterShakerModuleV1", "1")
+ sample_plate_2 = heatershaker.load_labware("nest_96_wellplate_2ml_deep")
+ reservoir = protocol.load_labware("nest_96_wellplate_2ml_deep", "2")
+ temp_block = protocol.load_module("temperature module gen2", "3")
+ reagent_plate = temp_block.load_labware("nest_96_wellplate_100ul_pcr_full_skirt")
+ # ========== SECOND ROW ==========
+ MAG_PLATE_SLOT = 4
+ tiprack_200_1 = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "5")
+ tiprack_50_1 = protocol.load_labware("opentrons_flex_96_tiprack_50ul", "6")
+ # ========== THIRD ROW ===========
+ thermocycler = protocol.load_module("thermocycler module gen2")
+ sample_plate_1 = thermocycler.load_labware("nest_96_wellplate_100ul_pcr_full_skirt")
+ tiprack_200_2 = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "8")
+ tiprack_50_2 = protocol.load_labware("opentrons_flex_96_tiprack_50ul", "9")
+ # ========== FOURTH ROW ==========
+ tiprack_200_3 = protocol.load_labware("opentrons_flex_96_tiprack_200ul", "11")
+
+ # reagent
+ AMPure = reservoir["A1"]
+ SMB = reservoir["A2"]
+
+ EtOH = reservoir["A4"]
+ RSB = reservoir["A5"]
+ Liquid_trash_well_1 = reservoir["A9"]
+ Liquid_trash_well_2 = reservoir["A10"]
+ Liquid_trash_well_3 = reservoir["A11"]
+ Liquid_trash_well_4 = reservoir["A12"]
+
+ # Will Be distributed during the protocol
+ EEW_1 = sample_plate_2.wells_by_name()["A10"]
+ EEW_2 = sample_plate_2.wells_by_name()["A11"]
+ EEW_3 = sample_plate_2.wells_by_name()["A12"]
+
+ NHB2 = reagent_plate.wells_by_name()["A1"]
+ Panel = reagent_plate.wells_by_name()["A2"]
+ EHB2 = reagent_plate.wells_by_name()["A3"]
+ Elute = reagent_plate.wells_by_name()["A4"]
+ ET2 = reagent_plate.wells_by_name()["A5"]
+ PPC = reagent_plate.wells_by_name()["A6"]
+ EPM = reagent_plate.wells_by_name()["A7"]
+
+ # pipette
+ p1000 = protocol.load_instrument("flex_8channel_1000", "left", tip_racks=[tiprack_200_1, tiprack_200_2, tiprack_200_3])
+ p50 = protocol.load_instrument("flex_8channel_50", "right", tip_racks=[tiprack_50_1, tiprack_50_2])
+
+ # tip and sample tracking
+ if COLUMNS == 1:
+ column_1_list = ["A1"] # Plate 1
+ column_2_list = ["A1"] # Plate 2
+ column_3_list = ["A4"] # Plate 2
+ column_4_list = ["A4"] # Plate 1
+ column_5_list = ["A7"] # Plate 2
+ column_6_list = ["A7"] # Plate 1
+ WASHES = [EEW_1]
+ if COLUMNS == 2:
+ column_1_list = ["A1", "A2"] # Plate 1
+ column_2_list = ["A1", "A2"] # Plate 2
+ column_3_list = ["A4", "A5"] # Plate 2
+ column_4_list = ["A4", "A5"] # Plate 1
+ column_5_list = ["A7", "A8"] # Plate 2
+ column_6_list = ["A7", "A8"] # Plate 1
+ WASHES = [EEW_1, EEW_2]
+ if COLUMNS == 3:
+ column_1_list = ["A1", "A2", "A3"] # Plate 1
+ column_2_list = ["A1", "A2", "A3"] # Plate 2
+ column_3_list = ["A4", "A5", "A6"] # Plate 2
+ column_4_list = ["A4", "A5", "A6"] # Plate 1
+ column_5_list = ["A7", "A8", "A9"] # Plate 2
+ column_6_list = ["A7", "A8", "A9"] # Plate 1
+ WASHES = [EEW_1, EEW_2, EEW_3]
+
+ def tipcheck():
+ if p200_tips >= 3 * 12:
+ if ABR_TEST == True:
+ p1000.reset_tipracks()
+ else:
+ protocol.pause("RESET p200 TIPS")
+ p1000.reset_tipracks()
+ p200_tips == 0
+ if p50_tips >= 2 * 12:
+ if ABR_TEST == True:
+ p50.reset_tipracks()
+ else:
+ protocol.pause("RESET p50 TIPS")
+ p50.reset_tipracks()
+ p50_tips == 0
+
+ def grip_offset(action, item, slot=None):
+ """Grip offset."""
+ from opentrons.types import Point
+
+ # EDIT these values
+ # NOTE: we are still testing to determine our software's defaults
+ # but we also expect users will want to edit these
+ _pick_up_offsets = {
+ "deck": Point(),
+ "mag-plate": Point(),
+ "heater-shaker": Point(z=1.0),
+ "temp-module": Point(),
+ "thermo-cycler": Point(),
+ }
+ # EDIT these values
+ # NOTE: we are still testing to determine our software's defaults
+ # but we also expect users will want to edit these
+ _drop_offsets = {
+ "deck": Point(),
+ "mag-plate": Point(x=0.1, y=-0.25, z=0.5),
+ "heater-shaker": Point(y=-0.5),
+ "temp-module": Point(),
+ "thermo-cycler": Point(),
+ }
+ # do NOT edit these values
+ # NOTE: these values will eventually be in our software
+ # and will not need to be inside a protocol
+ _hw_offsets = {
+ "deck": Point(),
+ "mag-plate": Point(z=34.5),
+ "heater-shaker-right": Point(z=2.5),
+ "heater-shaker-left": Point(z=2.5),
+ "temp-module": Point(z=5.0),
+ "thermo-cycler": Point(z=2.5),
+ }
+ # make sure arguments are correct
+ action_options = ["pick-up", "drop"]
+ item_options = list(_hw_offsets.keys())
+ item_options.remove("heater-shaker-left")
+ item_options.remove("heater-shaker-right")
+ item_options.append("heater-shaker")
+ if action not in action_options:
+ raise ValueError(f'"{action}" not recognized, available options: {action_options}')
+ if item not in item_options:
+ raise ValueError(f'"{item}" not recognized, available options: {item_options}')
+ if item == "heater-shaker":
+ assert slot, 'argument slot= is required when using "heater-shaker"'
+ if slot in [1, 4, 7, 10]:
+ side = "left"
+ elif slot in [3, 6, 9, 12]:
+ side = "right"
+ else:
+ raise ValueError("heater shaker must be on either left or right side")
+ hw_offset = _hw_offsets[f"{item}-{side}"]
+ else:
+ hw_offset = _hw_offsets[item]
+ if action == "pick-up":
+ offset = hw_offset + _pick_up_offsets[item]
+ else:
+ offset = hw_offset + _drop_offsets[item]
+
+ # convert from Point() to dict()
+ return {"x": offset.x, "y": offset.y, "z": offset.z}
+
+ ############################################################################################################################################
+ ############################################################################################################################################
+ ############################################################################################################################################
+ # commands
+ for loop in range(RUN):
+ thermocycler.open_lid()
+ heatershaker.open_labware_latch()
+ if DRYRUN == False:
+ if STEP_HYB == 1:
+ protocol.comment("SETTING THERMO and TEMP BLOCK Temperature")
+ thermocycler.set_block_temperature(4)
+ thermocycler.set_lid_temperature(100)
+ temp_block.set_temperature(4)
+ else:
+ protocol.comment("SETTING THERMO and TEMP BLOCK Temperature")
+ thermocycler.set_block_temperature(58)
+ thermocycler.set_lid_temperature(58)
+ heatershaker.set_and_wait_for_temperature(58)
+ protocol.pause("Ready")
+ heatershaker.close_labware_latch()
+ Liquid_trash = Liquid_trash_well_1
+
+ # Sample Plate contains 30ul of DNA
+
+ if STEP_VOLPOOL == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> Quick Vol Pool")
+ protocol.comment("==============================================")
+
+ if STEP_HYB == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> HYB")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Adding NHB2")
+ NHB2Vol = 50
+ for loop, X in enumerate(column_1_list):
+ p50.pick_up_tip()
+ p50.aspirate(NHB2Vol, NHB2.bottom())
+ p50.dispense(NHB2Vol, sample_plate_1[X].bottom())
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ protocol.comment("--> Adding Panel")
+ PanelVol = 10
+ for loop, X in enumerate(column_1_list):
+ p50.pick_up_tip()
+ p50.aspirate(PanelVol, Panel.bottom())
+ p50.dispense(PanelVol, sample_plate_1[X].bottom())
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ protocol.comment("--> Adding EHB2")
+ EHB2Vol = 10
+ EHB2MixRep = 10 if DRYRUN == False else 1
+ EHB2MixVol = 90
+ for loop, X in enumerate(column_1_list):
+ p1000.pick_up_tip()
+ p1000.aspirate(EHB2Vol, EHB2.bottom())
+ p1000.dispense(EHB2Vol, sample_plate_1[X].bottom())
+ p1000.move_to(sample_plate_1[X].bottom())
+ p1000.mix(EHB2MixRep, EHB2MixVol)
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ if HYBRIDDECK == True:
+ protocol.comment("Hybridize on Deck")
+ ############################################################################################################################################
+ thermocycler.close_lid()
+ if DRYRUN == False:
+ profile_TAGSTOP = [
+ {"temperature": 98, "hold_time_minutes": 5},
+ {"temperature": 97, "hold_time_minutes": 1},
+ {"temperature": 95, "hold_time_minutes": 1},
+ {"temperature": 93, "hold_time_minutes": 1},
+ {"temperature": 91, "hold_time_minutes": 1},
+ {"temperature": 89, "hold_time_minutes": 1},
+ {"temperature": 87, "hold_time_minutes": 1},
+ {"temperature": 85, "hold_time_minutes": 1},
+ {"temperature": 83, "hold_time_minutes": 1},
+ {"temperature": 81, "hold_time_minutes": 1},
+ {"temperature": 79, "hold_time_minutes": 1},
+ {"temperature": 77, "hold_time_minutes": 1},
+ {"temperature": 75, "hold_time_minutes": 1},
+ {"temperature": 73, "hold_time_minutes": 1},
+ {"temperature": 71, "hold_time_minutes": 1},
+ {"temperature": 69, "hold_time_minutes": 1},
+ {"temperature": 67, "hold_time_minutes": 1},
+ {"temperature": 65, "hold_time_minutes": 1},
+ {"temperature": 63, "hold_time_minutes": 1},
+ {"temperature": 62, "hold_time_minutes": HYBRIDTIME * 60},
+ ]
+ thermocycler.execute_profile(steps=profile_TAGSTOP, repetitions=1, block_max_volume=100)
+ thermocycler.set_block_temperature(62)
+ if HYBRID_PAUSE == True:
+ protocol.comment("HYBRIDIZATION PAUSED")
+ thermocycler.set_block_temperature(10)
+ thermocycler.open_lid()
+ ############################################################################################################################################
+ else:
+ protocol.comment("Hybridize off Deck")
+
+ if STEP_CAPTURE == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> Capture")
+ protocol.comment("==============================================")
+ # Standard Setup
+
+ if DRYRUN == False:
+ protocol.comment("SETTING THERMO and TEMP BLOCK Temperature")
+ thermocycler.set_block_temperature(58)
+ thermocycler.set_lid_temperature(58)
+
+ if DRYRUN == False:
+ heatershaker.set_and_wait_for_temperature(58)
+
+ protocol.comment("--> Transfer Hybridization")
+ TransferSup = 100
+ for loop, X in enumerate(column_1_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_1[X].bottom(z=0.25))
+ p1000.aspirate(TransferSup + 1, rate=0.25)
+ p1000.dispense(TransferSup + 5, sample_plate_2[column_2_list[loop]].bottom(z=1))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ thermocycler.close_lid()
+
+ protocol.comment("--> ADDING SMB")
+ SMBVol = 250
+ SampleVol = 100
+ SMBMixRPM = 2000
+ SMBMixRep = 5 * 60 if DRYRUN == False else 0.1 * 60
+ SMBPremix = 3 if DRYRUN == False else 1
+ # ==============================
+ for loop, X in enumerate(column_2_list):
+ p1000.pick_up_tip()
+ p1000.mix(SMBPremix, 200, SMB.bottom(z=1))
+ p1000.aspirate(SMBVol / 2, SMB.bottom(z=1), rate=0.25)
+ p1000.dispense(SMBVol / 2, sample_plate_2[X].top(z=-7), rate=0.25)
+ p1000.aspirate(SMBVol / 2, SMB.bottom(z=1), rate=0.25)
+ p1000.dispense(SMBVol / 2, sample_plate_2[X].bottom(z=1), rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[X].bottom(z=5))
+ for Mix in range(2):
+ p1000.aspirate(100, rate=0.5)
+ p1000.move_to(sample_plate_2[X].bottom(z=1))
+ p1000.aspirate(80, rate=0.5)
+ p1000.dispense(80, rate=0.5)
+ p1000.move_to(sample_plate_2[X].bottom(z=5))
+ p1000.dispense(100, rate=0.5)
+ Mix += 1
+ p1000.blow_out(sample_plate_2[X].top(z=-7))
+ p1000.default_speed = 400
+ p1000.move_to(sample_plate_2[X].top(z=5))
+ p1000.move_to(sample_plate_2[X].top(z=0))
+ p1000.move_to(sample_plate_2[X].top(z=5))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+ # ==============================
+ heatershaker.set_and_wait_for_shake_speed(rpm=SMBMixRPM)
+ protocol.delay(SMBMixRep)
+ heatershaker.deactivate_shaker()
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ thermocycler.open_lid()
+
+ if DRYRUN == False:
+ protocol.delay(minutes=2)
+
+ protocol.comment("==============================================")
+ protocol.comment("--> WASH")
+ protocol.comment("==============================================")
+ # Setting Labware to Resume at Cleanup 1
+
+ protocol.comment("--> Remove SUPERNATANT")
+ for loop, X in enumerate(column_2_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(4))
+ p1000.aspirate(200, rate=0.25)
+ p1000.dispense(200, Liquid_trash.top(z=-7))
+ p1000.move_to(sample_plate_2[X].bottom(0.5))
+ p1000.aspirate(200, rate=0.25)
+ p1000.dispense(200, Liquid_trash.top(z=-7))
+ p1000.move_to(Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out(Liquid_trash.top(z=-7))
+ p1000.aspirate(20)
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ Liquid_trash = Liquid_trash_well_2
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Repeating 3 washes")
+ washreps = 3
+ washcount = 0
+ for wash in range(washreps):
+ protocol.comment("--> Adding EEW")
+ EEWVol = 200
+ for loop, X in enumerate(column_2_list):
+ p1000.pick_up_tip()
+ p1000.aspirate(EEWVol, WASHES[loop].bottom())
+ p1000.dispense(EEWVol, sample_plate_2[X].bottom())
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+ heatershaker.close_labware_latch()
+ heatershaker.set_and_wait_for_shake_speed(rpm=1800)
+ if DRYRUN == False:
+ protocol.delay(seconds=4 * 60)
+ heatershaker.deactivate_shaker()
+ heatershaker.open_labware_latch()
+
+ if DRYRUN == False:
+ protocol.delay(seconds=5 * 60)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ if DRYRUN == False:
+ protocol.delay(seconds=1 * 60)
+
+ if washcount > 2:
+ Liquid_trash = Liquid_trash_well_3
+
+ protocol.comment("--> Removing Supernatant")
+ RemoveSup = 200
+ for loop, X in enumerate(column_2_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[X].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.move_to(sample_plate_2[X].top(z=0.5))
+ p1000.dispense(200, Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out(Liquid_trash.top(z=-7))
+ p1000.aspirate(20)
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ washcount += 1
+
+ protocol.comment("--> Adding EEW")
+ EEWVol = 200
+ for loop, X in enumerate(column_2_list):
+ p1000.pick_up_tip()
+ p1000.aspirate(EEWVol, WASHES[loop].bottom())
+ p1000.dispense(EEWVol, sample_plate_2[X].bottom())
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ heatershaker.set_and_wait_for_shake_speed(rpm=1800)
+ if DRYRUN == False:
+ protocol.delay(seconds=4 * 60)
+ heatershaker.deactivate_shaker()
+
+ if DRYRUN == False:
+ protocol.delay(seconds=1 * 60)
+
+ protocol.comment("--> Transfer Hybridization")
+ TransferSup = 200
+ for loop, X in enumerate(column_2_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=0.25))
+ p1000.aspirate(TransferSup, rate=0.25)
+ p1000.dispense(TransferSup, sample_plate_2[column_3_list[loop]].bottom(z=1))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ if DRYRUN == False:
+ protocol.delay(seconds=5 * 60)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ if DRYRUN == False:
+ protocol.delay(seconds=1 * 60)
+
+ protocol.comment("--> Removing Supernatant")
+ RemoveSup = 150
+ for loop, X in enumerate(column_3_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[X].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.move_to(sample_plate_2[X].top(z=0.5))
+ p1000.dispense(200, Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out(Liquid_trash.top(z=-7))
+ p1000.aspirate(20)
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ protocol.comment("--> Removing Residual")
+ for loop, X in enumerate(column_3_list):
+ p50.pick_up_tip()
+ p50.move_to(sample_plate_2[X].bottom(z=0))
+ p50.aspirate(50, rate=0.25)
+ p50.default_speed = 200
+ p50.dispense(100, Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p50.blow_out()
+ p50.default_speed = 400
+ p50.move_to(Liquid_trash.top(z=-7))
+ p50.move_to(Liquid_trash.top(z=0))
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ protocol.comment("==============================================")
+ protocol.comment("--> ELUTE")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Adding Elute")
+ EluteVol = 23
+ for loop, X in enumerate(column_3_list):
+ p50.pick_up_tip()
+ p50.aspirate(EluteVol, Elute.bottom())
+ p50.dispense(EluteVol, sample_plate_2[X].bottom())
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ heatershaker.close_labware_latch()
+ heatershaker.set_and_wait_for_shake_speed(rpm=1800)
+ if DRYRUN == False:
+ protocol.delay(seconds=2 * 60)
+ heatershaker.deactivate_shaker()
+ heatershaker.open_labware_latch()
+
+ if DRYRUN == False:
+ protocol.delay(minutes=2)
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM heatershaker TO MAGPLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Transfer Elution")
+ TransferSup = 21
+ for loop, X in enumerate(column_3_list):
+ p50.pick_up_tip()
+ p50.move_to(sample_plate_2[X].bottom(z=0.25))
+ p50.aspirate(TransferSup + 1, rate=0.25)
+ p50.dispense(TransferSup + 5, sample_plate_1[column_4_list[loop]].bottom(z=1))
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ protocol.comment("--> Adding ET2")
+ ET2Vol = 4
+ ET2MixRep = 10 if DRYRUN == False else 1
+ ET2MixVol = 20
+ for loop, X in enumerate(column_4_list):
+ p50.pick_up_tip()
+ p50.aspirate(ET2Vol, ET2.bottom())
+ p50.dispense(ET2Vol, sample_plate_1[X].bottom())
+ p50.move_to(sample_plate_1[X].bottom())
+ p50.mix(ET2MixRep, ET2MixVol)
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ if STEP_PCR == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> AMPLIFICATION")
+ protocol.comment("==============================================")
+
+ protocol.comment("--> Adding PPC")
+ PPCVol = 5
+ for loop, X in enumerate(column_4_list):
+ p50.pick_up_tip()
+ p50.aspirate(PPCVol, PPC.bottom())
+ p50.dispense(PPCVol, sample_plate_1[X].bottom())
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ protocol.comment("--> Adding EPM")
+ EPMVol = 20
+ EPMMixRep = 10 if DRYRUN == False else 1
+ EPMMixVol = 45
+ for loop, X in enumerate(column_4_list):
+ p50.pick_up_tip()
+ p50.aspirate(EPMVol, EPM.bottom())
+ p50.dispense(EPMVol, sample_plate_1[X].bottom())
+ p50.move_to(sample_plate_1[X].bottom())
+ p50.mix(EPMMixRep, EPMMixVol)
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ if DRYRUN == False:
+ heatershaker.deactivate_heater()
+
+ if STEP_PCRDECK == 1:
+ if DRYRUN == False:
+ ############################################################################################################################################
+ if DRYRUN == False:
+ thermocycler.close_lid()
+ profile_PCR_1 = [{"temperature": 98, "hold_time_seconds": 45}]
+ thermocycler.execute_profile(steps=profile_PCR_1, repetitions=1, block_max_volume=50)
+ profile_PCR_2 = [
+ {"temperature": 98, "hold_time_seconds": 30},
+ {"temperature": 60, "hold_time_seconds": 30},
+ {"temperature": 72, "hold_time_seconds": 30},
+ ]
+ thermocycler.execute_profile(steps=profile_PCR_2, repetitions=12, block_max_volume=50)
+ profile_PCR_3 = [{"temperature": 72, "hold_time_minutes": 1}]
+ thermocycler.execute_profile(steps=profile_PCR_3, repetitions=1, block_max_volume=50)
+ thermocycler.set_block_temperature(10)
+ ############################################################################################################################################
+
+ thermocycler.open_lid()
+
+ if STEP_CLEANUP == 1:
+ protocol.comment("==============================================")
+ protocol.comment("--> Cleanup")
+ protocol.comment("==============================================")
+
+ # ============================================================================================
+ # GRIPPER MOVE sample_plate_2 FROM MAGPLATE TO heatershaker
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Transfer Elution")
+ TransferSup = 45
+ for loop, X in enumerate(column_4_list):
+ p50.pick_up_tip()
+ p50.move_to(sample_plate_1[X].bottom(z=0.25))
+ p50.aspirate(TransferSup + 1, rate=0.25)
+ p50.dispense(TransferSup + 5, sample_plate_2[column_5_list[loop]].bottom(z=1))
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
+
+ Liquid_trash = Liquid_trash_well_4
+
+ protocol.comment("--> ADDING AMPure (0.8x)")
+ AMPureVol = 40.5
+ SampleVol = 45
+ AMPureMixRep = 5 * 60 if DRYRUN == False else 0.1 * 60
+ AMPurePremix = 3 if DRYRUN == False else 1
+ # ========NEW SINGLE TIP DISPENSE===========
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.mix(AMPurePremix, AMPureVol + 10, AMPure.bottom(z=1))
+ p1000.aspirate(AMPureVol, AMPure.bottom(z=1), rate=0.25)
+ p1000.dispense(AMPureVol, sample_plate_2[X].bottom(z=1), rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[X].bottom(z=5))
+ for Mix in range(2):
+ p1000.aspirate(60, rate=0.5)
+ p1000.move_to(sample_plate_2[X].bottom(z=1))
+ p1000.aspirate(60, rate=0.5)
+ p1000.dispense(60, rate=0.5)
+ p1000.move_to(sample_plate_2[X].bottom(z=5))
+ p1000.dispense(30, rate=0.5)
+ Mix += 1
+ p1000.blow_out(sample_plate_2[X].top(z=2))
+ p1000.default_speed = 400
+ p1000.move_to(sample_plate_2[X].top(z=5))
+ p1000.move_to(sample_plate_2[X].top(z=0))
+ p1000.move_to(sample_plate_2[X].top(z=5))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+ # ========NEW HS MIX=========================
+ heatershaker.set_and_wait_for_shake_speed(rpm=1800)
+ protocol.delay(AMPureMixRep)
+ heatershaker.deactivate_shaker()
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM HEATER SHAKER TO MAG PLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ if DRYRUN == False:
+ protocol.delay(minutes=4)
+
+ protocol.comment("--> Removing Supernatant")
+ RemoveSup = 200
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[X].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[X].top(z=2))
+ p1000.default_speed = 200
+ p1000.dispense(200, Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.default_speed = 400
+ p1000.move_to(Liquid_trash.top(z=-7))
+ p1000.move_to(Liquid_trash.top(z=0))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ for X in range(2):
+ protocol.comment("--> ETOH Wash")
+ ETOHMaxVol = 150
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.aspirate(ETOHMaxVol, EtOH.bottom(z=1))
+ p1000.move_to(EtOH.top(z=0))
+ p1000.move_to(EtOH.top(z=-5))
+ p1000.move_to(EtOH.top(z=0))
+ p1000.move_to(sample_plate_2[X].top(z=-2))
+ p1000.dispense(ETOHMaxVol, rate=1)
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.move_to(sample_plate_2[X].top(z=5))
+ p1000.move_to(sample_plate_2[X].top(z=0))
+ p1000.move_to(sample_plate_2[X].top(z=5))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ if DRYRUN == False:
+ protocol.delay(minutes=0.5)
+
+ protocol.comment("--> Remove ETOH Wash")
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=3.5))
+ p1000.aspirate(RemoveSup - 100, rate=0.25)
+ protocol.delay(minutes=0.1)
+ p1000.move_to(sample_plate_2[X].bottom(z=0.5))
+ p1000.aspirate(100, rate=0.25)
+ p1000.default_speed = 5
+ p1000.move_to(sample_plate_2[X].top(z=2))
+ p1000.default_speed = 200
+ p1000.dispense(200, Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.default_speed = 400
+ p1000.move_to(Liquid_trash.top(z=-7))
+ p1000.move_to(Liquid_trash.top(z=0))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ if DRYRUN == False:
+ protocol.delay(minutes=2)
+
+ protocol.comment("--> Removing Residual ETOH")
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=0))
+ p1000.aspirate(50, rate=0.25)
+ p1000.default_speed = 200
+ p1000.dispense(100, Liquid_trash.top(z=-7))
+ protocol.delay(minutes=0.1)
+ p1000.blow_out()
+ p1000.default_speed = 400
+ p1000.move_to(Liquid_trash.top(z=-7))
+ p1000.move_to(Liquid_trash.top(z=0))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ if DRYRUN == False:
+ protocol.delay(minutes=1)
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM MAG PLATE TO HEATER SHAKER
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ protocol.comment("--> Adding RSB")
+ RSBVol = 32
+ RSBMixRep = 1 * 60 if DRYRUN == False else 0.1 * 60
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.aspirate(RSBVol, RSB.bottom(z=1))
+
+ p1000.move_to((sample_plate_2.wells_by_name()[X].center().move(types.Point(x=1.3 * 0.8, y=0, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[X].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.move_to((sample_plate_2.wells_by_name()[X].center().move(types.Point(x=0, y=1.3 * 0.8, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[X].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.move_to((sample_plate_2.wells_by_name()[X].center().move(types.Point(x=1.3 * -0.8, y=0, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[X].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.move_to((sample_plate_2.wells_by_name()[X].center().move(types.Point(x=0, y=1.3 * -0.8, z=-4))))
+ p1000.dispense(RSBVol, rate=1)
+ p1000.move_to(sample_plate_2.wells_by_name()[X].bottom(z=1))
+ p1000.aspirate(RSBVol, rate=1)
+ p1000.dispense(RSBVol, rate=1)
+
+ p1000.blow_out(sample_plate_2.wells_by_name()[X].center())
+ p1000.move_to(sample_plate_2.wells_by_name()[X].top(z=5))
+ p1000.move_to(sample_plate_2.wells_by_name()[X].top(z=0))
+ p1000.move_to(sample_plate_2.wells_by_name()[X].top(z=5))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+ if DRYRUN == False:
+ heatershaker.set_and_wait_for_shake_speed(rpm=1600)
+ protocol.delay(RSBMixRep)
+ heatershaker.deactivate_shaker()
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM HEATER SHAKER TO MAG PLATE
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=MAG_PLATE_SLOT,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "heater-shaker", 1),
+ drop_offset=grip_offset("drop", "mag-plate"),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ if DRYRUN == False:
+ protocol.delay(minutes=3)
+
+ protocol.comment("--> Transferring Supernatant")
+ TransferSup = 30
+ for loop, X in enumerate(column_5_list):
+ p1000.pick_up_tip()
+ p1000.move_to(sample_plate_2[X].bottom(z=0.25))
+ p1000.aspirate(TransferSup + 1, rate=0.25)
+ p1000.dispense(TransferSup + 5, sample_plate_1[column_6_list[loop]].bottom(z=1))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ if ABR_TEST == True:
+ protocol.comment("==============================================")
+ protocol.comment("--> Resetting Run")
+ protocol.comment("==============================================")
+
+ # ============================================================================================
+ # GRIPPER MOVE PLATE FROM MAG PLATE TO HEATER SHAKER
+ heatershaker.open_labware_latch()
+ protocol.move_labware(
+ labware=sample_plate_2,
+ new_location=heatershaker,
+ use_gripper=USE_GRIPPER,
+ pick_up_offset=grip_offset("pick-up", "mag-plate"),
+ drop_offset=grip_offset("drop", "heater-shaker", 1),
+ )
+ heatershaker.close_labware_latch()
+ # ============================================================================================
+
+ p1000.pick_up_tip()
+ # Resetting NHB2
+ p1000.aspirate(COLUMNS * 50, Liquid_trash_well_1.bottom(z=1))
+ p1000.dispense(COLUMNS * 50, NHB2.bottom(z=1))
+ # Resetting Panel
+ p1000.aspirate(COLUMNS * 10, Liquid_trash_well_1.bottom(z=1))
+ p1000.dispense(COLUMNS * 10, Panel.bottom(z=1))
+ # Resetting EHB2
+ p1000.aspirate(COLUMNS * 10, Liquid_trash_well_1.bottom(z=1))
+ p1000.dispense(COLUMNS * 10, EHB2.bottom(z=1))
+ # Resetting SMB
+ for X in range(COLUMNS):
+ p1000.aspirate(125, Liquid_trash_well_1.bottom(z=1))
+ p1000.dispense(125, SMB.bottom(z=1))
+ p1000.aspirate(125, Liquid_trash_well_1.bottom(z=1))
+ p1000.dispense(125, SMB.bottom(z=1))
+
+ # Resetting TWB
+ for X in range(COLUMNS):
+ p1000.aspirate(200, Liquid_trash_well_2.bottom(z=1))
+ p1000.dispense(200, EEW_1.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_2.bottom(z=1))
+ p1000.dispense(200, EEW_1.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_2.bottom(z=1))
+ p1000.dispense(200, EEW_2.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_2.bottom(z=1))
+ p1000.dispense(200, EEW_2.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_2.bottom(z=1))
+ p1000.dispense(200, EEW_3.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_2.bottom(z=1))
+ p1000.dispense(200, EEW_3.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_3.bottom(z=1))
+ p1000.dispense(200, EEW_1.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_3.bottom(z=1))
+ p1000.dispense(200, EEW_1.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_3.bottom(z=1))
+ p1000.dispense(200, EEW_2.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_3.bottom(z=1))
+ p1000.dispense(200, EEW_2.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_3.bottom(z=1))
+ p1000.dispense(200, EEW_3.bottom(z=1))
+ p1000.aspirate(200, Liquid_trash_well_3.bottom(z=1))
+ p1000.dispense(200, EEW_3.bottom(z=1))
+ # Resetting ETOH
+ for X in range(COLUMNS):
+ p1000.aspirate(150, Liquid_trash_well_4.bottom(z=1))
+ p1000.dispense(150, EtOH.bottom(z=1))
+ p1000.aspirate(150, Liquid_trash_well_4.bottom(z=1))
+ p1000.dispense(150, EtOH.bottom(z=1))
+ # Resetting AMPURE
+ for X in range(COLUMNS):
+ p1000.aspirate(COLUMNS * 40.5, Liquid_trash_well_4.bottom(z=1))
+ p1000.dispense(COLUMNS * 40.5, AMPure.bottom(z=1))
+ # Resetting Elute
+ p1000.aspirate(COLUMNS * 25, Liquid_trash_well_4.bottom(z=1))
+ p1000.dispense(COLUMNS * 25, Elute.bottom(z=1))
+ # Resetting EPM
+ p1000.aspirate(COLUMNS * 40, Liquid_trash_well_4.bottom(z=1))
+ p1000.dispense(COLUMNS * 40, EPM.bottom(z=1))
+ p1000.return_tip() if TIP_TRASH == False else p1000.drop_tip()
+ p200_tips += 1
+ tipcheck()
+
+ p50.pick_up_tip()
+ # Resetting ET2
+ p50.aspirate(COLUMNS * 4, Liquid_trash_well_4.bottom(z=1))
+ p50.dispense(COLUMNS * 4, ET2.bottom(z=1))
+ # Resetting PPC
+ p50.aspirate(COLUMNS * 5, Liquid_trash_well_4.bottom(z=1))
+ p50.dispense(COLUMNS * 5, PPC.bottom(z=1))
+ # Removing Final Samples
+ for loop, X in enumerate(column_6_list):
+ p50.aspirate(32, sample_plate_1[X].bottom(z=1))
+ p50.dispense(32, Liquid_trash_well_4.bottom(z=1))
+ # Resetting Samples
+ for loop, X in enumerate(column_1_list):
+ p50.aspirate(30, Liquid_trash_well_4.bottom(z=1))
+ p50.dispense(30, sample_plate_1[X].bottom(z=1))
+
+ p50.return_tip() if TIP_TRASH == False else p50.drop_tip()
+ p50_tips += 1
+ tipcheck()
diff --git a/app-testing/files/protocols/py/OT3_P1000MLeft_P50MRight_HS_TM_MM_TC_2_15_ABR4_Illumina_DNA_Prep_24x.py b/app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAPrep24x.py
similarity index 100%
rename from app-testing/files/protocols/py/OT3_P1000MLeft_P50MRight_HS_TM_MM_TC_2_15_ABR4_Illumina_DNA_Prep_24x.py
rename to app-testing/files/protocols/Flex_S_v2_15_P1000M_P50M_GRIP_HS_MB_TC_TM_IlluminaDNAPrep24x.py
diff --git a/app-testing/files/protocols/Flex_S_v2_15_P1000S_None_SimpleNormalizeLongRight.py b/app-testing/files/protocols/Flex_S_v2_15_P1000S_None_SimpleNormalizeLongRight.py
new file mode 100644
index 00000000000..3b8ebfa3b15
--- /dev/null
+++ b/app-testing/files/protocols/Flex_S_v2_15_P1000S_None_SimpleNormalizeLongRight.py
@@ -0,0 +1,250 @@
+import inspect
+from dataclasses import replace
+
+from opentrons import protocol_api, types
+
+metadata = {
+ "protocolName": "OT3 ABR Simple Normalize Long",
+ "author": "Opentrons Engineering