diff --git a/Dockerfile b/Dockerfile index fb6f27305eb..4c9b2882cf4 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,32 +1,15 @@ +ARG base_image=resin/raspberrypi3-alpine-python:3.6-slim-20180120 # Use this for local development on intel machines # FROM resin/amd64-alpine-python:3.6-slim-20180123 + # Use this for running on a robot -FROM resin/raspberrypi3-alpine-python:3.6-slim-20180120 - -ENV RUNNING_ON_PI=1 -# This is used by D-Bus clients such as Network Manager cli, announce_mdns -# connecting to Host OS services -ENV DBUS_SYSTEM_BUS_ADDRESS=unix:path=/host/run/dbus/system_bus_socket -# Add persisted data directory where new python packages are being installed -ENV PYTHONPATH=$PYTHONPATH/data/packages/usr/local/lib/python3.6/site-packages -ENV PATH=$PATH:/data/packages/usr/local/bin -# Port name for connecting to smoothie over serial, i.e. /dev/ttyAMA0 -ENV OT_SMOOTHIE_ID=AMA -ENV OT_SERVER_PORT=31950 -ENV OT_UPDATE_PORT=34000 -# File path to unix socket API server is listening -ENV OT_SERVER_UNIX_SOCKET_PATH=/tmp/aiohttp.sock - -# Static IPv6 used on Ethernet interface for USB connectivity -ENV ETHERNET_NETWORK_PREFIX=169.254 -ENV ETHERNET_NETWORK_PREFIX_LENGTH=16 +FROM $base_image # See compute/README.md for details. Make sure to keep them in sync RUN apk add --update \ util-linux \ vim \ - radvd \ dropbear \ dropbear-scp \ gnupg \ @@ -59,14 +42,14 @@ RUN pip install --force-reinstall \ # Copy server files and data into the container. Note: any directories that # you wish to copy into the container must be excluded from the .dockerignore # file, or you will encounter a copy error -ENV LABWARE_DEF /etc/labware -ENV AUDIO_FILES /etc/audio -ENV USER_DEFN_ROOT /data/user_storage/opentrons_data/labware + +COPY ./compute/container_setup.sh /usr/local/bin/container_setup.sh + COPY ./shared-data/robot-data /etc/robot-data -COPY ./compute/conf/jupyter_notebook_config.py /root/.jupyter/ COPY ./shared-data/definitions /etc/labware -COPY ./audio/ /etc/audio COPY ./api /tmp/api +# Make our shared data available for the api setup.py +COPY ./shared-data /tmp/shared-data COPY ./api-server-lib /tmp/api-server-lib COPY ./update-server /tmp/update-server COPY ./compute/avahi_tools /tmp/avahi_tools @@ -81,6 +64,7 @@ RUN pipenv install /tmp/api-server-lib --system && \ rm -rf /tmp/api && \ rm -rf /tmp/api-server-lib && \ rm -rf /tmp/update-server && \ + rm -rf /tmp/shared-data && \ rm -rf /tmp/avahi_tools # Redirect nginx logs to stdout and stderr @@ -90,49 +74,12 @@ RUN ln -sf /dev/stdout /var/log/nginx/access.log && \ # Use udev rules file from opentrons_data RUN ln -sf /data/user_storage/opentrons_data/95-opentrons-modules.rules /etc/udev/rules.d/95-opentrons-modules.rules -# GPG public key to verify signed packages -COPY ./compute/opentrons.asc . -RUN gpg --import opentrons.asc && rm opentrons.asc - -# Everything you want in /usr/local/bin goes into compute/scripts -COPY ./compute/scripts/* /usr/local/bin/ - -# All configuration files live in compute/etc and dispatched here -COPY ./compute/conf/inetd.conf /etc/ -COPY ./compute/conf/nginx.conf /etc/nginx/nginx.conf -COPY ./compute/static /usr/share/nginx/html - # Logo for login shell COPY ./compute/opentrons.motd /etc/motd -# Replace placeholders with actual environment variable values -RUN sed -i "s/{OT_SERVER_PORT}/$OT_SERVER_PORT/g" /etc/nginx/nginx.conf && \ - sed -i "s#{OT_SERVER_UNIX_SOCKET_PATH}#$OT_SERVER_UNIX_SOCKET_PATH#g" /etc/nginx/nginx.conf - -# All newly installed packages will go to persistent storage -ENV PIP_ROOT /data/packages - # Generate keys for dropbear -RUN ssh_key_gen.sh - -# Generate the id that we will later check to see if that's the -# new container and that local Opentrons API package should be deleted -# and persist all environment variables from the docker definition, -# because they are sometimes not picked up from PID 1 -RUN echo "export CONTAINER_ID=$(uuidgen)" >> /etc/profile && \ - echo "export OT_SETTINGS_DIR=$OT_SETTINGS_DIR" >> /etc/profile && \ - echo "export OT_SERVER_PORT=$OT_SERVER_PORT" >> /etc/profile && \ - echo "export OT_SERVER_UNIX_SOCKET_PATH=$OT_SERVER_UNIX_SOCKET_PATH" >> /etc/profile && \ - echo "export PIP_ROOT=$PIP_ROOT" >> /etc/profile && \ - echo "export LABWARE_DEF=$LABWARE_DEF" >> /etc/profile && \ - echo "export USER_DEFN_ROOT=$USER_DEFN_ROOT" >> /etc/profile && \ - echo "export AUDIO_FILES=$AUDIO_FILES" >> /etc/profile && \ - echo "export PIPENV_VENV_IN_PROJECT=$PIPENV_VENV_IN_PROJECT" >> /etc/profile && \ - echo "export DBUS_SYSTEM_BUS_ADDRESS=$DBUS_SYSTEM_BUS_ADDRESS" >> /etc/profile && \ - echo "export PYTHONPATH=$PYTHONPATH" >> /etc/profile && \ - echo "export PATH=$PATH" >> /etc/profile && \ - echo "export RUNNING_ON_PI=$RUNNING_ON_PI" >> /etc/profile && \ - echo "export OT_SMOOTHIE_ID=$OT_SMOOTHIE_ID" >> /etc/profile +COPY ./compute/ssh_key_gen.sh /tmp/ +RUN /tmp/ssh_key_gen.sh # Updates, HTTPS (for future use), API, SSH for link-local over USB EXPOSE 80 443 31950 @@ -142,10 +89,28 @@ STOPSIGNAL SIGTERM # For backward compatibility, udev is enabled by default ENV UDEV on +RUN echo "export CONTAINER_ID=$(uuidgen)" | tee -a /etc/profile.d/opentrons.sh + +# The one link we have to make in the dockerfile still to make sure we get our +# environment variables +COPY ./compute/find_python_module_path.py /usr/local/bin/ +RUN ln -sf /data/system/ot-environ.sh /etc/profile.d/00-persistent-ot-environ.sh &&\ + ln -sf `find_python_module_path.py opentrons`/resources/ot-environ.sh /etc/profile.d/01-builtin-ot-environ.sh + + +# This configuration is used both by both the build and runtime so it has to +# be here. When building a container for local use, set this to 0. If set to +# 0, ENABLE_VIRTUAL_SMOOTHIE will be set at runtime automatically +ARG running_on_pi=1 +ENV RUNNING_ON_PI=$running_on_pi + +ARG data_mkdir_path_slash_if_none=/ +RUN mkdir -p $data_mkdir_path_slash_if_none + # For interactive one-off use: # docker run --name opentrons -it opentrons /bin/sh # or uncomment: # CMD ["python", "-c", "while True: pass"] -CMD ["bash", "-c", "source /etc/profile && setup.sh && exec start.sh"] +CMD ["bash", "-lc", "container_setup.sh && setup.sh && exec start.sh"] # Using Resin base image's default entrypoint and init system- tini diff --git a/Makefile b/Makefile index f75944fd420..61d0b879a35 100755 --- a/Makefile +++ b/Makefile @@ -58,6 +58,10 @@ push-api: $(MAKE) -C $(API_DIR) push $(MAKE) -C $(API_DIR) restart +.PHONY: api-local-container +api-local-container: + docker build --no-cache --build-arg base_image=resin/amd64-alpine-python:3.6-slim-20180123 --build-arg running_on_pi=0 --build-arg data_mkdir_path_slash_if_none=/data/system . + # all tests .PHONY: test test: test-py test-js diff --git a/RESIN_README.md b/RESIN_README.md deleted file mode 100644 index bdaa68cbee9..00000000000 --- a/RESIN_README.md +++ /dev/null @@ -1,34 +0,0 @@ - This document provides information on resin.io in the context of Opentrons. -Last updated: 9/24/17 - Jared Greene - -Overview: -resin.io is a service that is used for fleet management. Opentrons uses it to update and support our automated pipetting systems. -Resin works upon resin.os which is a custom operating system they build ontop of Yocto linux distro. It is a stripped down version of -linux and, most importantly, runs docker. Two docker containers run within each resin-supported device: -1) the resin supervisor container -2) the opentrons server container - -The resin supervisor container monitors the opentrons container and device. It also connects with the resin system -which allows multiple forms for support; including secure remote updating. - -The opentrons server container runs the opentrons server and api which accepts incoming client connects and -allows clients to control the robot and upload/run protocols on it. - - -Getting Started [INTERNAL]: -Let's walk through our first api update / deployment. -For the sake of this walkthrough, we assume that you want to push an updated server image to -all the devices on an existing resin application (a fleet of devices). - -1) Make an account on resin.io (make sure you set up an ssh key) -2) Commit your changes -3) then build your docker image and deploy it to all of the devices with: - `git push resin [CURRENT_BRANCH_NAME]:master` - -Done! This will push this update out to all devices on this application (as long as the image is successfully built). - -If you want to do something more complicated like pushing updates to a single device, creating a new application, or adding -a new device to a new or existing fleet then check out the resin docs at: https://docs.resin.io/introduction/ - - - diff --git a/api/MANIFEST.in b/api/MANIFEST.in index 86f4d84a801..0f7e297d524 100755 --- a/api/MANIFEST.in +++ b/api/MANIFEST.in @@ -5,3 +5,4 @@ include opentrons/config/smoothie/config_one_pro_plus include opentrons/config/modules/avrdude.conf include opentrons/config/modules/95-opentrons-modules.rules include opentrons/config/pipette-config.json +recursive-include opentrons/resources * diff --git a/compute/conf/api.service b/api/opentrons/resources/api.service similarity index 100% rename from compute/conf/api.service rename to api/opentrons/resources/api.service diff --git a/compute/conf/inetd.conf b/api/opentrons/resources/inetd.conf old mode 100755 new mode 100644 similarity index 100% rename from compute/conf/inetd.conf rename to api/opentrons/resources/inetd.conf diff --git a/compute/conf/jupyter_notebook_config.py b/api/opentrons/resources/jupyter/jupyter_notebook_config.py similarity index 100% rename from compute/conf/jupyter_notebook_config.py rename to api/opentrons/resources/jupyter/jupyter_notebook_config.py diff --git a/compute/conf/nginx.conf b/api/opentrons/resources/nginx.conf old mode 100755 new mode 100644 similarity index 75% rename from compute/conf/nginx.conf rename to api/opentrons/resources/nginx.conf index 78b5c420ea5..72dc3a25542 --- a/compute/conf/nginx.conf +++ b/api/opentrons/resources/nginx.conf @@ -14,8 +14,8 @@ http { server { # Values in {} get replaced by environment variable during # container build - listen [::]:{OT_SERVER_PORT}; - listen 0.0.0.0:{OT_SERVER_PORT}; + listen [::]:31950; + listen 0.0.0.0:31950; client_body_in_file_only off; client_body_buffer_size 128k; @@ -30,7 +30,7 @@ http { proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_read_timeout 1h; - proxy_pass http://unix:{OT_SERVER_UNIX_SOCKET_PATH}; + proxy_pass http://unix:/tmp/aiohttp.sock; } location /server { @@ -41,15 +41,4 @@ http { proxy_pass http://127.0.0.1:34000; } } - - server { - root /usr/share/nginx/html; - - client_max_body_size 100m; - listen [::] default_server ipv6only=on; - - location / { - index index.htm index.html; - } - } } diff --git a/api/opentrons/resources/ot-environ.sh b/api/opentrons/resources/ot-environ.sh new file mode 100644 index 00000000000..75749d9a14a --- /dev/null +++ b/api/opentrons/resources/ot-environ.sh @@ -0,0 +1,33 @@ +# Set up the environment for the OT2. +# This is sourced by the system login shell profile by symlinks placed in +# /etc/profile.d by the Dockerfile. + +if [ -z $OT_ENVIRON_SET_UP ]; then + echo "[ $0 ] Configuring environment" + + # Make sure pip installs things into /data + export PIP_ROOT=/data/packages + + export OT_CONFIG_PATH=/data/system + + # Required for proper pipenv operation + export PIPENV_VENV_IN_PROJECT=true + # This is used by D-Bus clients such as Network Manager cli, announce_mdns + # connecting to Host OS services + export DBUS_SYSTEM_BUS_ADDRESS=unix:path=/host/run/dbus/system_bus_socket + export PYTHONPATH=$PYTHONPATH:/data/packages/usr/local/lib/python3.6/site-packages + export PATH=$PATH:/data/packages/usr/local/bin:$OT_CONFIG_PATH/scripts + + # TODO(seth, 8/15/2018): These are almost certainly unused and should be hardcoded + # if they are in fact still used + export OT_SETTINGS_DIR="" + export OT_SERVER_UNIX_SOCKET_PATH=/tmp/aiohttp.sock + export LABWARE_DEF=/etc/labware + export AUDIO_FILES=/etc/audio + export USER_DEFN_ROOT=/data/user_storage/opentrons_data/labware + export OT_SMOOTHIE_ID=AMA + export OT_ENVIRON_SET_UP=1 + echo "[ $0 ] Environment configuration done" +else + echo "[ $0 ] Environment already configured" +fi diff --git a/compute/scripts/announce_mdns.py b/api/opentrons/resources/scripts/announce_mdns.py similarity index 100% rename from compute/scripts/announce_mdns.py rename to api/opentrons/resources/scripts/announce_mdns.py diff --git a/compute/scripts/calibrate b/api/opentrons/resources/scripts/calibrate similarity index 100% rename from compute/scripts/calibrate rename to api/opentrons/resources/scripts/calibrate diff --git a/compute/scripts/lpc21isp b/api/opentrons/resources/scripts/lpc21isp similarity index 100% rename from compute/scripts/lpc21isp rename to api/opentrons/resources/scripts/lpc21isp diff --git a/compute/scripts/miniterm b/api/opentrons/resources/scripts/miniterm old mode 100644 new mode 100755 similarity index 100% rename from compute/scripts/miniterm rename to api/opentrons/resources/scripts/miniterm diff --git a/compute/scripts/mount_usb.py b/api/opentrons/resources/scripts/mount_usb.py old mode 100644 new mode 100755 similarity index 100% rename from compute/scripts/mount_usb.py rename to api/opentrons/resources/scripts/mount_usb.py diff --git a/compute/scripts/pipette_memory b/api/opentrons/resources/scripts/pipette_memory old mode 100644 new mode 100755 similarity index 100% rename from compute/scripts/pipette_memory rename to api/opentrons/resources/scripts/pipette_memory diff --git a/api/opentrons/resources/scripts/provision-api-resources b/api/opentrons/resources/scripts/provision-api-resources new file mode 100755 index 00000000000..2204667b940 --- /dev/null +++ b/api/opentrons/resources/scripts/provision-api-resources @@ -0,0 +1,31 @@ +#!/usr/bin/env python +""" Copy everything here (except this script) into /data/system. + +This should be run +- On the first boot of a new container (handled by `container_setup.sh`) +- When a new version of the API server is installed by runapp (handled by `setup.py`) in the API server wheel +""" + +import os +import shutil +import sys + +sys.path.append('/usr/local/bin') +import find_python_module_path + +def provision(): + """ Should be called the first time a given version of the server is run in a container. + + Should not be called if the server is not running in a container. + """ + provision_from_module = find_python_module_path.find_module('opentrons') + provision_from_resources = os.path.join(provision_from_module, 'resources') + print("Provisioning config and initialization from {}" + .format(provision_from_resources)) + config_dir = os.environ.get('OT_CONFIG_PATH', '/data/system') + if os.path.exists(config_dir): + shutil.rmtree(config_dir) + shutil.copytree(provision_from_resources, config_dir) + +if __name__ == '__main__': + provision() diff --git a/api/opentrons/resources/scripts/setup.sh b/api/opentrons/resources/scripts/setup.sh new file mode 100755 index 00000000000..cabd012081d --- /dev/null +++ b/api/opentrons/resources/scripts/setup.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +echo "[ $0 ] API server setup beginning" + +if [ ! -z $RUNNING_ON_PI ] ; then + echo "[ $0 ] Container running on raspi detected, running system setup" + mount_usb.py + setup_gpio.py + + # Cleanup any connections. This will leave only wlan0 + nmcli --terse --fields uuid,device connection show | sed -rn "s/(.*):(--)/\1/p" | xargs nmcli connection del || true + nmcli --terse --fields uuid,device connection show | sed -rn "s/(.*):(eth0)/\1/p" | xargs nmcli connection del || true + + + # nmcli makes an async call which might not finish before next network-related + # operation starts. There is no graceful way to await for D-BUS event in shell + # hence sleep is added to avoid race condition + sleep 1 + nmcli con add con-name "static-eth0" ifname eth0 type ethernet ipv4.method link-local +else + echo "[ $0 ] Container running locally" +fi + +echo "[$0 ] Creating config file links (OT_CONFIG_PATH=$OT_CONFIG_PATH )..." + +ln -sf $OT_CONFIG_PATH/jupyter /root/.jupyter +ln -sf $OT_CONFIG_PATH/audio /etc/audio +rm /etc/nginx/nginx.conf +ln -sf $OT_CONFIG_PATH/nginx.conf /etc/nginx/nginx.conf +ln -sf $OT_CONFIG_PATH/inetd.conf /etc/inetd.conf +mkdir -p /run/nginx + +echo "[ $0 ] API server setup done" diff --git a/compute/scripts/setup_gpio.py b/api/opentrons/resources/scripts/setup_gpio.py similarity index 96% rename from compute/scripts/setup_gpio.py rename to api/opentrons/resources/scripts/setup_gpio.py index aa1833665d7..6eae464cd58 100755 --- a/compute/scripts/setup_gpio.py +++ b/api/opentrons/resources/scripts/setup_gpio.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python from time import sleep from opentrons.drivers.rpi_drivers import gpio diff --git a/compute/scripts/start.sh b/api/opentrons/resources/scripts/start.sh similarity index 51% rename from compute/scripts/start.sh rename to api/opentrons/resources/scripts/start.sh index f04cf47ca9a..2bf0969d9d1 100755 --- a/compute/scripts/start.sh +++ b/api/opentrons/resources/scripts/start.sh @@ -1,36 +1,42 @@ #!/usr/bin/env bash - +echo "[ $0 ] API server starting" # mdns announcement -announce_mdns.py & +if [ ! -z $RUNNING_ON_PI ]; then + echo "[ $0 ] MDNS beginning" + announce_mdns.py & +fi +echo "[ $0 ] Starting nginx" # serve static pages and proxy HTTP services nginx +echo "[ $0 ] Starting inetd" # enable SSH over ethernet inetd -e /etc/inetd.conf +echo "[ $0 ] Running user boot scripts" # If user boot script exists, run it mkdir -p /data/boot.d run-parts /data/boot.d -echo "Starting Opentrons update server" -python -m otupdate --debug --port $OT_UPDATE_PORT & +echo "[ $0 ] Starting Opentrons update server" +python -m otupdate --debug --port 34000 & -echo "Starting Jupyter Notebook server" +echo "[ $0 ] Starting Jupyter Notebook server" mkdir -p /data/user_storage/opentrons_data/jupyter jupyter notebook --allow-root & # Check if config exists, and alert if not found -echo "Checking for deck calibration data..." +echo "[ $0 ] Checking for deck calibration data..." config_path=`python -c "from opentrons import config; print(config.get_config_index().get('deckCalibrationFile'))"` if [ ! -e "$config_path" ]; then echo $config_path - echo "Config file not found. Please perform factory calibration and then restart robot" + echo "[ $0 ] Config file not found. Please perform factory calibration and then restart robot" fi export ENABLE_NETWORKING_ENDPOINTS=true -echo "Starting Opentrons API server" +echo "[ $0 ] Starting Opentrons API server" python -m opentrons.server.main -U $OT_SERVER_UNIX_SOCKET_PATH opentrons.server.main:init -echo "Server exited unexpectedly. Please power-cycle the machine, and contact Opentrons support." +echo "[ $0 ] Server exited unexpectedly. Please power-cycle the machine, and contact Opentrons support." while true; do sleep 1; done diff --git a/compute/scripts/switches_lights b/api/opentrons/resources/scripts/switches_lights old mode 100644 new mode 100755 similarity index 100% rename from compute/scripts/switches_lights rename to api/opentrons/resources/scripts/switches_lights diff --git a/api/pylama.ini b/api/pylama.ini index 2ea617cd36a..0ef2eb90d1b 100755 --- a/api/pylama.ini +++ b/api/pylama.ini @@ -26,5 +26,8 @@ skip = 1 [pylama:opentrons\_version.py] skip = 1 +[pylama:opentrons/resources/jupyter/jupyter_notebook_config.py] +skip = 1 + diff --git a/api/setup.py b/api/setup.py index 1bef688b759..d1eb297a36d 100755 --- a/api/setup.py +++ b/api/setup.py @@ -44,7 +44,6 @@ def get_version(): 'numpy==1.12.1', 'urwid==1.3.1'] - def read(*parts): """ Build an absolute path from *parts* and and return the contents of the @@ -59,6 +58,8 @@ def read(*parts): config_src = os.path.join( '..', 'shared-data', 'robot-data', pipette_config_filename) config_dst = os.path.join('opentrons', 'config') + # If you add more copies like this in setup.py you must add them to the + # Dockerfile as well, since this doesn’t work during a docker build try: pipette_config_file = os.path.join(config_dst, pipette_config_filename) if os.path.exists(pipette_config_file): @@ -88,3 +89,15 @@ def read(*parts): tests_require=['pytest'], include_package_data=True ) + if os.environ.get('RUNNING_ON_PI'): + # This only applies to software updates: when `pip install` is invoked + # on a running robot - not when `pip install` is invoked in the + # Dockerfile and not when the server starts up on a robot. + resource_dir = os.path.join(HERE, 'opentrons', 'resources') + provision = os.path.join(resource_dir, 'provision.py') + # We use a subprocess that invokes another python here to avoid + # importing the opentrons module that we’re about to install, since this + # is side-effect-heavy. + import sys + import subprocess + subprocess.check_call([sys.executable, provision], stdout=sys.stdout) diff --git a/compute/README.md b/compute/README.md index 4cea70814fe..64c1dcc3310 100644 --- a/compute/README.md +++ b/compute/README.md @@ -44,6 +44,10 @@ robot. The script must: Scripts in that directory will be executed in alphabetical order. The most straight-forward way to guarantee exectution order is to prefix file names with numbers (e.g.: `00-configure-wifi` and `05-do-something-else`). +### System Configuration and Initialization +The system is organized so as little configuration as possible resides in the Dockerfile; rather, we try and locate configuration and initialization in scripts that are bundled in with the API server wheel, and can therefore be updated without a resin application update. The only configuration absolutely required to be in the container itself is now in `container_setup.sh`. This script detects the first time a container is running on a device and instructs the API server to, among other things documented in the API subproject, to copy its configuration and initialization scripts to `/data/system`. + + ## More info about Resin ### Application @@ -78,9 +82,6 @@ See `/Dockerfile` for details. Directory structure: * `avahi_tools` — avahi D-Bus client to advertise over mdns - * `conf` — service configuration files (see `Dockerfile` for destinations on container image) - * `scripts` — copied to `/usr/local/bin` on container image - * `static` — static pages to support auto-update Services: * `nginx` — serve update page (static/) and proxy `POST` to `/upload` diff --git a/compute/container_setup.sh b/compute/container_setup.sh new file mode 100755 index 00000000000..4d664da5426 --- /dev/null +++ b/compute/container_setup.sh @@ -0,0 +1,23 @@ +#! /usr/bin/env bash + +echo "[ $0 ] Running container setup" + +OT_CONFIG_PATH=/data/system +# Clean up opentrons package dir if it"s a first start of a new container +touch /data/id +previous_id=$(cat /data/id) +current_id=$CONTAINER_ID +if [ "$previous_id" != "$current_id" ] ; then + echo "[ $0 ] First start of a new container (new id < $current_id > old id < $previous_id >). Deleting local Opentrons installation" + rm -rf /data/packages/usr/local/lib/python3.6/site-packages/opentrons* + rm -rf /data/packages/usr/local/lib/python3.6/site-packages/ot2serverlib* + rm -rf /data/packages/usr/local/lib/python3.6/site-packages/otupdate* + provision=`find_python_module_path.py opentrons`/resources/scripts/provision-api-resources + echo "[ $0 ] provisioning with $provision" + python "$provision" + echo "$current_id" > /data/id +else + echo "[ $0 ] IDs < $previous_id > match, no container change" +fi + +echo "[ $0 ] Container setup complete" diff --git a/compute/find_python_module_path.py b/compute/find_python_module_path.py new file mode 100755 index 00000000000..6c132224893 --- /dev/null +++ b/compute/find_python_module_path.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +""" Simple python script to find the path of a module _without_ importing it. + +This allows us to keep around the path to opentrons (and therefore its contents) +without having to do the bootstrapping of global state and possible database +migrations that happen when it is actually loaded. + +Usage: find_module_path.py . If the module is not found, raises an +exception +""" +import importlib +import os +import sys + +def find_module(mod_name): + mod = importlib.find_loader(mod_name) + if not mod: + raise RuntimeError('Module not found') + return os.path.dirname(mod.get_filename()) + +if __name__ == '__main__': + print(find_module(sys.argv[1])) diff --git a/compute/opentrons.motd b/compute/opentrons.motd index b2603fa01e2..b18c9d1cfe5 100644 --- a/compute/opentrons.motd +++ b/compute/opentrons.motd @@ -1,15 +1,15 @@ - - - @@@@@ @@@@@ - @@@@ @@@@ - @@@ @@ @@@ @@@ - @@@ @@@@ @@@ @@@@@@ @@@@@@@ @@@@@@ @@@@@@@ @@@@@ @@@@@ @@@@@@ @@@@@@@ @@@@@ - @@ @@@@@@@@ &@@ @@ (@@ @@ @@ %@@ @@ @@ @@ @@@ @@ @@@ @@ @@ @@ @@ - @@ .@@@ @ #@@ @@ @@ @@ @@ @@@@@@@@ @@ @@ @@@ @@ @@, @@ @@ @@ @@@ - @@@ @ @ @@@ @@@ @@ @@@@ @@ @@ @@ @@ @@@ @@ @@ @@@ @@ @@ @@ - @@@ @@..@@ @@@ @@@@ @@ %@& (@/ @@@ %@@* - @@@@ @@@@ @@ - @@@@@@@@@@@@@@ - (@@@@@@. - - + + + @@@@@ @@@@@ + @@@@ @@@@ + @@@ @@ @@@ @@@@@@ @@@@@ + @@@ @@@@ @@@ @@@@@@ &@' '@@ + @@ @@@@@@@@ &@@ @@ @@ + @@ .@@@ @ #@@ @@ @@ + @@@ @ @ @@@ @@ @@ + @@@ @@..@@ @@@ @@ @@ + @@@@ @@@@ @@ @@@@@& + @@@@@@@@@@@@@@ ## &@@@@@# + (@@@@@@. + + diff --git a/compute/scripts/setup.sh b/compute/scripts/setup.sh deleted file mode 100755 index 07328cdcc53..00000000000 --- a/compute/scripts/setup.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -python /usr/local/bin/mount_usb.py -python /usr/local/bin/setup_gpio.py - -# Keep all IPv6 addresses on an interface down event. If set static -# global addresses with no expiration time are not flushed. -# -# This allows us to bind to Ethernet adapter's address even if the link -# us down: i.e. the robot is not connected over USB -# -# See: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/tree/Documentation/networking/ip-sysctl.txt?id=refs/tags/v4.9 -echo 1 > /proc/sys/net/ipv6/conf/eth0/keep_addr_on_down - -# Disable duplicate address detection (DAD) -# Leaving DAD enabled sometimes results in static address being reset -# to a random value after robot restart with USB plugged in, -# because host computer remembers it being used and reports collision. -echo 0 > /proc/sys/net/ipv6/conf/eth0/accept_dad - -# Cleanup any connections. This will leave only wlan0 -nmcli --terse --fields uuid,device connection show | sed -rn 's/(.*):(--)/\1/p' | xargs nmcli connection del || true -nmcli --terse --fields uuid,device connection show | sed -rn 's/(.*):(eth0)/\1/p' | xargs nmcli connection del || true - -# nmcli makes an async call which might not finish before next network-related -# operation starts. There is no graceful way to await for D-BUS event in shell -# hence sleep is added to avoid race condition -sleep 1 -nmcli con add con-name "static-eth0" ifname eth0 type ethernet ipv4.method link-local - -# Clean up opentrons package dir if it's a first start of a new container -touch /data/id -previous_id=$(cat /data/id) -current_id=$CONTAINER_ID -if [ "$previous_id" != "$current_id" ] ; then - echo 'First start of a new container. Deleting local Opentrons installation' - rm -rf /data/packages/usr/local/lib/python3.6/site-packages/opentrons* - rm -rf /data/packages/usr/local/lib/python3.6/site-packages/ot2serverlib* - rm -rf /data/packages/usr/local/lib/python3.6/site-packages/otupdate* - echo "$current_id" > /data/id -fi diff --git a/compute/scripts/ssh_key_gen.sh b/compute/ssh_key_gen.sh similarity index 100% rename from compute/scripts/ssh_key_gen.sh rename to compute/ssh_key_gen.sh diff --git a/compute/static/index.html b/compute/static/index.html deleted file mode 100644 index 59bcd80f8e7..00000000000 --- a/compute/static/index.html +++ /dev/null @@ -1,15 +0,0 @@ - - - - Package Upload - - - -

Select files to upload

-
-
- - -
- - \ No newline at end of file diff --git a/update-server/otupdate/install.py b/update-server/otupdate/install.py index 0ad57eef4db..d27217dcd12 100644 --- a/update-server/otupdate/install.py +++ b/update-server/otupdate/install.py @@ -3,6 +3,7 @@ import shutil import asyncio import logging +import traceback from aiohttp import web log = logging.getLogger(__name__) @@ -156,6 +157,22 @@ async def install_py(python, data, loop) -> (dict, int): return {'message': msg, 'filename': filename}, returncode +async def _provision_container(python, loop) -> (str, int): + provision_command = 'provision-api-resources' + proc = await asyncio.create_subprocess_shell( + provision_command, stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, loop=loop) + sub_stdout = await proc.stdout.read() + sub_stderr = await proc.stderr.read() + await proc.communicate() + rc = proc.returncode + if rc != 0: + res = sub_stderr.decode().strip() + else: + res = sub_stdout.decode().strip() + return {'message': res, 'filename': ''}, rc + + async def update_api(request: web.Request) -> web.Response: """ This handler accepts a POST request with Content-Type: multipart/form-data @@ -173,6 +190,12 @@ async def update_api(request: web.Request) -> web.Response: res0, rc0 = await install_py( sys.executable, data['whl'], request.loop) reslist = [res0] + if rc0 == 0 and os.environ.get('RUNNING_ON_PI'): + resprov, rcprov = await _provision_container( + sys.executable, request.loop) + reslist.append(resprov) + else: + rcprov = 0 if 'serverlib' in data.keys(): res1, rc1 = await install_py( sys.executable, data['serverlib'], request.loop) @@ -189,14 +212,14 @@ async def update_api(request: web.Request) -> web.Response: 'message': [r['message'] for r in reslist], 'filename': [r['filename'] for r in reslist] } - returncode = rc0 + rc1 + rc2 + returncode = rc0 + rc1 + rc2 + rcprov if returncode == 0: status = 200 else: status = 400 except Exception as e: res = {'message': 'Exception {} raised by update of {}: {}'.format( - type(e), data, e.__traceback__)} + type(e), data, traceback.format_tb(e.__traceback__))} status = 500 return web.json_response(res, status=status)