Skip to content

Commit

Permalink
switched deployment method to use custom docker compose file and dock…
Browse files Browse the repository at this point in the history
…er image
  • Loading branch information
csegarragonz committed Jan 19, 2021
1 parent 1f6e8a3 commit 4efc512
Show file tree
Hide file tree
Showing 9 changed files with 152 additions and 46 deletions.
17 changes: 17 additions & 0 deletions bin/clean_mpi_native.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash

set -e

THIS_DIR=$(dirname $(readlink -f $0))
PROJ_ROOT=${THIS_DIR}/..

pushd ${PROJ_ROOT} >> /dev/null

source ./docker/mpi-native.env

docker-compose \
--file ${COMPOSE_FILE} \
--env-file ${ENV_FILE} \
down

popd >> /dev/null
28 changes: 20 additions & 8 deletions tasks/mpi.py → bin/mpi_native_dev.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
#!/usr/bin/python3

import argparse
from copy import copy
from os import environ
from os.path import exists, join
from subprocess import run

from tasks.util.env import PROJ_ROOT, FAABRIC_INSTALL_PREFIX

from invoke import task
PROJ_ROOT = "/code/faabric"
FAABRIC_INSTALL_PREFIX = "/build/faabric/install"

EXAMPLES_DIR = join(PROJ_ROOT, "examples")
BUILD_DIR = join(EXAMPLES_DIR, "build")
Expand All @@ -15,7 +17,6 @@

# As it appears in the docker-compose file
FAABRIC_WORKER_NAME = "cli"
MPI_DEFAULT_WORLD_SIZE = 1


def _find_mpi_hosts():
Expand All @@ -41,8 +42,7 @@ def _find_mpi_hosts():


# TODO: eventually move all MPI examples to ./examples/mpi
@task
def execute(ctx, example, clean=False, np=MPI_DEFAULT_WORLD_SIZE):
def execute(example, np, clean=False):
"""
Runs an MPI example
"""
Expand Down Expand Up @@ -90,8 +90,7 @@ def execute(ctx, example, clean=False, np=MPI_DEFAULT_WORLD_SIZE):
run(exe_cmd, shell=True)


@task
def clean(ctx, force=False):
def clean(force=False):
"""
Clean environment from failed deployments
"""
Expand All @@ -100,3 +99,16 @@ def clean(ctx, force=False):
)
print(docker_cmd)
run(docker_cmd, shell=True, check=True)

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run MPI in development.')
parser.add_argument("example", default="mpi_helloworld")
parser.add_argument("--np", type=int, default=3)
parser.add_argument("--clean", action="store_true", default=False)
args = parser.parse_args()

if args.clean:
clean()
else:
print(args.example, args.np)
execute(args.example, args.np)
25 changes: 25 additions & 0 deletions bin/run_mpi_native.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash

set -e

THIS_DIR=$(dirname $(readlink -f $0))
PROJ_ROOT=${THIS_DIR}/..

pushd ${PROJ_ROOT} >> /dev/null

source ./docker/mpi-native.env

# Check for command line arguments
docker-compose \
--file ${COMPOSE_FILE} \
--env-file ${ENV_FILE} \
up -d \
--scale worker=$((${MPI_WORLD_SIZE} -1)) \
--force-recreate

docker-compose \
--file ${COMPOSE_FILE} \
--env-file ${ENV_FILE} \
logs master worker

popd >> /dev/null
30 changes: 30 additions & 0 deletions docker/faabric-mpi-native.dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
FROM faasm/grpc-root:0.0.16
ARG FAABRIC_VERSION

# Note - the version of grpc-root here can be quite behind as it's rebuilt very
# rarely

# Redis
RUN apt install -y \
libpython3-dev \
python3-dev \
python3-pip \
python3-venv \
redis-tools

# Put the code in place
WORKDIR /code
# RUN git clone -b v${FAABRIC_VERSION} https://github.com/faasm/faabric
RUN git clone -b standalone-mpi https://github.com/csegarragonz/faabric

WORKDIR /code/faabric

RUN pip3 install invoke

# Build MPI native lib
RUN inv dev.cmake --shared
RUN inv dev.cc faabricmpi_native --shared
RUN inv dev.install faabricmpi_native --shared

# Build examples
RUN inv examples.build
34 changes: 34 additions & 0 deletions docker/mpi-native-docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
version: "3"

services:
redis:
image: redis

worker:
image: ${FAABRIC_MPI_NATIVE_IMAGE}
entrypoint: /code/faabric/examples/build/${MPI_EXAMPLE}
working_dir: /code/faabric
privileged: true
environment:
- LD_LIBRARY_PATH=/usr/local/lib:/build/faabric/install/lib
- FUNCTION_STORAGE=local
- LOG_LEVEL=debug
- REDIS_STATE_HOST=redis
- REDIS_QUEUE_HOST=redis
depends_on:
- redis

master:
image: ${FAABRIC_MPI_NATIVE_IMAGE}
entrypoint: ['/code/faabric/examples/build/${MPI_EXAMPLE}', 'master', '${MPI_WORLD_SIZE}']
working_dir: /code/faabric
privileged: true
environment:
- LD_LIBRARY_PATH=/usr/local/lib:/build/faabric/install/lib
- FUNCTION_STORAGE=local
- LOG_LEVEL=debug
- REDIS_STATE_HOST=redis
- REDIS_QUEUE_HOST=redis
depends_on:
- redis
- worker
10 changes: 10 additions & 0 deletions docker/mpi-native.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FAABRIC_VERSION=0.0.16
FAABRIC_MPI_NATIVE_IMAGE=faasm/faabric-mpi-native:0.0.16
COMPOSE_PROJECT_NAME=faabric-mpi

COMPOSE_FILE="./docker/mpi-native-docker-compose.yml"
ENV_FILE="./docker/mpi-native.env"

# Deployment-specific
MPI_WORLD_SIZE=1
MPI_EXAMPLE=mpi_helloworld
43 changes: 7 additions & 36 deletions docs/native_mpi.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,45 +5,16 @@ used in [Faasm](https://github.com/faasm/faasm). This way, you can test the
compliance of your MPI application with our API (a subset of the standard)
without the burden of cross-compiling to WebAssembly.

To run native MPI applications you need to: compile the dynamic library, and
slightly modify the original soource code.

## Compiling the library

Compilation should be smooth if you are running our recommended containerised
[development environment](../README.md). You may access the container running
`./bin/cli.sh`.

Then, to compile the library:
```bash
inv dev.cmake --shared
inv dev.cc faabricmpi_native --shared
inv dev.install faabricmpi_native --shared
To run native MPI applications you need to first modify your binary matching
the examples provided, and then build the worker image running:
```

## Running the binary

To run an example, run this command _outside_ the container:
```bash
# The --clean flag re-creates _all_ containers
inv mpi.execute mpi_helloworld --np 5 --clean
```

To clean the cluster and set the development one again:
```bash
inv mpi.clean
```

Using the `--force` flag will recreate _all_ containers hence finishing any
sessions you may have open:
```bash
inv mpi.clean --force
inv container.build-mpi-native
```

## Debugging
Then you may run arbitrary deployments setting the right values in
`docker/mpi-native.env` and running `./bin/run_mpi_native.sh`.

If at some point you reach an unstable state of the cluster, stop it completely
using:
You may remove all stopped and running container images with:
```bash
docker-compose down
docker-compose -f docker/mpi-native-docker-compose.yml --env-file docker/mpi-native.env down
```
2 changes: 0 additions & 2 deletions tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,11 @@
from . import dev
from . import examples
from . import git
from . import mpi

ns = Collection(
call,
container,
dev,
examples,
git,
mpi,
)
9 changes: 9 additions & 0 deletions tasks/container.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

FAABRIC_IMAGE_NAME = "faabric"
GRPC_IMAGE_NAME = "grpc-root"
MPI_NATIVE_IMAGE_NAME = "faabric-mpi-native"


def _get_docker_tag(img_name):
Expand Down Expand Up @@ -65,6 +66,14 @@ def build_grpc(ctx, nocache=False, push=False):
_do_container_build(GRPC_IMAGE_NAME, nocache=nocache, push=push)


@task
def build_mpi_native(ctx, nocache=False, push=False):
"""
Build current MPI native container
"""
_do_container_build(MPI_NATIVE_IMAGE_NAME, nocache=nocache, push=push)


@task
def push(ctx):
"""
Expand Down

0 comments on commit 4efc512

Please sign in to comment.