Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial functional tests #77

Merged
merged 15 commits into from
Dec 6, 2024
22 changes: 22 additions & 0 deletions tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Running tests

Installing and running tests
```bash
pip install -r requirements
bstrzele marked this conversation as resolved.
Show resolved Hide resolved
pytest
```

Running tests with gpu
```bash
pytest --gpu
```

Run tests without re-downloading models
```bash
pytest --model-cache ./cache
```

All subsequent runs could be made using
```bash
pytest --model-cache ./cache --skip-download
bstrzele marked this conversation as resolved.
Show resolved Hide resolved
```
29 changes: 9 additions & 20 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@

import pytest

def pytest_addoption(parser):
parser.addoption("--gpu", action="store_true")
parser.addoption("--skip-download", action="store_true")
parser.addoption("--model-cache", action="store")

def pytest_configure(config):
config.addinivalue_line(
Expand All @@ -12,23 +16,8 @@ def pytest_configure(config):
)


def pytest_runtest_setup(item):
for mark in item.iter_markers():
if "gpu" in mark.name:
if sys.platform.startswith("linux"):
process = subprocess.run(
["/bin/bash", "-c", 'lspci | grep -E "VGA|3D"'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
)
if process.returncode != 0:
pytest.skip("Test requires Intel GPU device on the host machine")
elif sys.platform.startswith("win") and "win" not in item.config.getoption(
"--image_os"
):
wsl = shutil.which("wsl")
if not wsl:
pytest.skip(
"Test requires Intel GPU device and configured WSL2 on the host machine"
)
def pytest_collection_modifyitems(config, items):
for item in items:
if "gpu" in item.keywords:
if not config.getoption("--gpu"):
item.add_marker(pytest.mark.skip("Test requires --gpu flag to be set and Intel GPU device on the host machine"))
25 changes: 16 additions & 9 deletions tests/functional/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,8 @@

CONTAINER_NAME = "openvino_backend_pytest"


@pytest.fixture(scope="class")
def triton_server(model_repository):
def triton_server(model_repository, request):
port = 0
try:
sock = socket.socket()
Expand All @@ -24,8 +23,9 @@ def triton_server(model_repository):
raise Exception(f"Not expected exception found in port manager: {e.errno}")
image_name = os.environ.get("TIS_IMAGE_NAME")
image_name = image_name if image_name is not None else "tritonserver:latest"
gpu = '--device /dev/dri --group-add=$(stat -c "%g" /dev/dri/render* )' if request.config.getoption("--gpu") else ""
subprocess.run(
nnshah1 marked this conversation as resolved.
Show resolved Hide resolved
f'docker run -p {port}:8001 -d -v {model_repository}:/model_repository --name={CONTAINER_NAME} --device /dev/dri --group-add=$(stat -c "%g" /dev/dri/render* ) {image_name} bin/tritonserver --model-repository /model_repository',
f'docker run -p {port}:8001 -d -v {model_repository}:/model_repository --name={CONTAINER_NAME} {gpu} {image_name} bin/tritonserver --model-repository /model_repository',
capture_output=True,
shell=True,
)
Expand All @@ -47,14 +47,21 @@ def setup_model(cache, repo, name, gpu=False):


@pytest.fixture(scope="session")
def model_cache():
dir = tempfile.TemporaryDirectory()
cache = dir.name
for model in models:
MODEL_CONFIG[model]["fetch"](model, cache)
def model_cache(request):
input_dir = request.config.getoption('--model-cache')
dir = None
if input_dir == "":
dir = tempfile.TemporaryDirectory()
cache = dir.name
else:
cache = input_dir
if not request.config.getoption("--skip-download"):
for model in models:
MODEL_CONFIG[model]["fetch"](model, cache)
yield cache

dir.cleanup()
if dir is not None:
dir.cleanup()


@pytest.fixture(scope="class", params=[CPU, pytest.param(GPU, marks=pytest.mark.gpu)])
Expand Down