Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add pytest for e2e testing #1188

Merged
merged 4 commits into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions engine/controllers/command_line_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,15 +138,13 @@ bool CommandLineParser::SetupCommand(int argc, char** argv) {
});

auto install_cmd = engines_cmd->add_subcommand("install", "Install engine");
install_cmd->callback([] { CLI_LOG("Engine name can't be empty!"); });
for (auto& engine : engine_service_.kSupportEngines) {
std::string engine_name{engine};
EngineInstall(install_cmd, engine_name, version);
}

auto uninstall_cmd =
engines_cmd->add_subcommand("uninstall", "Uninstall engine");
uninstall_cmd->callback([] { CLI_LOG("Engine name can't be empty!"); });
for (auto& engine : engine_service_.kSupportEngines) {
std::string engine_name{engine};
EngineUninstall(uninstall_cmd, engine_name);
Expand Down
9 changes: 9 additions & 0 deletions engine/e2e-test/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import pytest
from test_api_engine_list import TestApiEngineList
from test_cli_engine_get import TestCliEngineGet
from test_cli_engine_install import TestCliEngineInstall
from test_cli_engine_list import TestCliEngineList
from test_cli_engine_uninstall import TestCliEngineUninstall

if __name__ == "__main__":
pytest.main([__file__, "-v"])
22 changes: 22 additions & 0 deletions engine/e2e-test/test_api_engine_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import pytest
import requests
from test_runner import start_server, stop_server


class TestApiEngineList:

@pytest.fixture(autouse=True)
def setup_and_teardown(self):
# Setup
success = start_server()
if not success:
raise Exception("Failed to start server")

yield

# Teardown
stop_server()

def test_engines_list_api_run_successfully(self):
response = requests.get("http://localhost:3928/engines")
assert response.status_code == 200
56 changes: 56 additions & 0 deletions engine/e2e-test/test_cli_engine_get.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import platform

import pytest
from test_runner import run


class TestCliEngineGet:

@pytest.mark.skipif(platform.system() != "Windows", reason="Windows-specific test")
def test_engines_get_tensorrt_llm_should_not_be_incompatible(self):
exit_code, output, error = run(
"Get engine", ["engines", "get", "cortex.tensorrt-llm"]
)
assert exit_code == 0, f"Get engine failed with error: {error}"
assert (
"Incompatible" not in output
), "cortex.tensorrt-llm should be Ready or Not Installed on Windows"

@pytest.mark.skipif(platform.system() != "Windows", reason="Windows-specific test")
def test_engines_get_onnx_should_not_be_incompatible(self):
exit_code, output, error = run("Get engine", ["engines", "get", "cortex.onnx"])
assert exit_code == 0, f"Get engine failed with error: {error}"
assert (
"Incompatible" not in output
), "cortex.onnx should be Ready or Not Installed on Windows"

def test_engines_get_llamacpp_should_not_be_incompatible(self):
exit_code, output, error = run(
"Get engine", ["engines", "get", "cortex.llamacpp"]
)
assert exit_code == 0, f"Get engine failed with error: {error}"
assert (
"Incompatible" not in output
), "cortex.llamacpp should be compatible for Windows, MacOs and Linux"

@pytest.mark.skipif(platform.system() != "Darwin", reason="macOS-specific test")
def test_engines_get_tensorrt_llm_should_be_incompatible_on_macos(self):
exit_code, output, error = run(
"Get engine", ["engines", "get", "cortex.tensorrt-llm"]
)
assert exit_code == 0, f"Get engine failed with error: {error}"
assert (
"Incompatible" in output
), "cortex.tensorrt-llm should be Incompatible on MacOS"

@pytest.mark.skipif(platform.system() != "Darwin", reason="macOS-specific test")
def test_engines_get_onnx_should_be_incompatible_on_macos(self):
exit_code, output, error = run("Get engine", ["engines", "get", "cortex.onnx"])
assert exit_code == 0, f"Get engine failed with error: {error}"
assert "Incompatible" in output, "cortex.onnx should be Incompatible on MacOS"

@pytest.mark.skipif(platform.system() != "Linux", reason="Linux-specific test")
def test_engines_get_onnx_should_be_incompatible_on_linux(self):
exit_code, output, error = run("Get engine", ["engines", "get", "cortex.onnx"])
assert exit_code == 0, f"Get engine failed with error: {error}"
assert "Incompatible" in output, "cortex.onnx should be Incompatible on Linux"
30 changes: 30 additions & 0 deletions engine/e2e-test/test_cli_engine_install.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import platform

import pytest
from test_runner import run


class TestCliEngineInstall:

def test_engines_install_llamacpp_should_be_successfully(self):
exit_code, output, error = run(
"Install Engine", ["engines", "install", "cortex.llamacpp"]
)
assert "Download" in output, "Should display downloading message"
assert exit_code == 0, f"Install engine failed with error: {error}"

@pytest.mark.skipif(platform.system() != "Darwin", reason="macOS-specific test")
def test_engines_install_onnx_on_macos_should_be_failed(self):
exit_code, output, error = run(
"Install Engine", ["engines", "install", "cortex.onnx"]
)
assert "No variant found" in output, "Should display error message"
assert exit_code == 0, f"Install engine failed with error: {error}"

@pytest.mark.skipif(platform.system() != "Darwin", reason="macOS-specific test")
def test_engines_install_onnx_on_tensorrt_should_be_failed(self):
exit_code, output, error = run(
"Install Engine", ["engines", "install", "cortex.tensorrt-llm"]
)
assert "No variant found" in output, "Should display error message"
assert exit_code == 0, f"Install engine failed with error: {error}"
24 changes: 24 additions & 0 deletions engine/e2e-test/test_cli_engine_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import platform

import pytest
from test_runner import run


class TestCliEngineList:
@pytest.mark.skipif(platform.system() != "Windows", reason="Windows-specific test")
def test_engines_list_run_successfully_on_windows(self):
exit_code, output, error = run("List engines", ["engines", "list"])
assert exit_code == 0, f"List engines failed with error: {error}"
assert "llama.cpp" in output

@pytest.mark.skipif(platform.system() != "Darwin", reason="macOS-specific test")
def test_engines_list_run_successfully_on_macos(self):
exit_code, output, error = run("List engines", ["engines", "list"])
assert exit_code == 0, f"List engines failed with error: {error}"
assert "llama.cpp" in output

@pytest.mark.skipif(platform.system() != "Linux", reason="Linux-specific test")
def test_engines_list_run_successfully_on_linux(self):
exit_code, output, error = run("List engines", ["engines", "list"])
assert exit_code == 0, f"List engines failed with error: {error}"
assert "llama.cpp" in output
24 changes: 24 additions & 0 deletions engine/e2e-test/test_cli_engine_uninstall.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import pytest
from test_runner import run


class TestCliEngineUninstall:

@pytest.fixture(autouse=True)
def setup_and_teardown(self):
# Setup
# Preinstall llamacpp engine
run("Install Engine", ["engines", "install", "cortex.llamacpp"])

yield

# Teardown
# Clean up, removing installed engine
run("Uninstall Engine", ["engines", "uninstall", "cortex.llamacpp"])

def test_engines_uninstall_llamacpp_should_be_successfully(self):
exit_code, output, error = run(
"Uninstall engine", ["engines", "uninstall", "cortex.llamacpp"]
)
assert "Engine cortex.llamacpp uninstalled successfully!" in output
assert exit_code == 0, f"Install engine failed with error: {error}"
137 changes: 137 additions & 0 deletions engine/e2e-test/test_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import platform
import queue
import select
import subprocess
import threading
import time
from typing import List

# You might want to change the path of the executable based on your build directory
executable_windows_path = "build\\Debug\\cortex.exe"
executable_unix_path = "build/cortex"

# Timeout
timeout = 5 # secs
start_server_success_message = "Server started"


# Get the executable path based on the platform
def getExecutablePath() -> str:
if platform.system() == "Windows":
return executable_windows_path
else:
return executable_unix_path


# Execute a command
def run(test_name: str, arguments: List[str]):
executable_path = getExecutablePath()
print("Running:", test_name)
print("Command:", [executable_path] + arguments)

result = subprocess.run(
[executable_path] + arguments, capture_output=True, text=True, timeout=timeout
)
return result.returncode, result.stdout, result.stderr


# Start the API server
# Wait for `Server started` message or failed
def start_server() -> bool:
if platform.system() == "Windows":
return start_server_windows()
else:
return start_server_nix()


def start_server_nix() -> bool:
executable = getExecutablePath()
process = subprocess.Popen(
executable, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)

start_time = time.time()
while time.time() - start_time < timeout:
# Use select to check if there's data to read from stdout or stderr
readable, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)

for stream in readable:
line = stream.readline()
if line:
if start_server_success_message in line:
# have to wait a bit for server to really up and accept connection
print("Server started found, wait 0.3 sec..")
time.sleep(0.3)
return True

# Check if the process has ended
if process.poll() is not None:
return False

return False


def start_server_windows() -> bool:
executable = getExecutablePath()
process = subprocess.Popen(
executable,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1,
universal_newlines=True,
)

q_out = queue.Queue()
q_err = queue.Queue()

def enqueue_output(out, queue):
for line in iter(out.readline, b""):
queue.put(line)
out.close()

# Start threads to read stdout and stderr
t_out = threading.Thread(target=enqueue_output, args=(process.stdout, q_out))
t_err = threading.Thread(target=enqueue_output, args=(process.stderr, q_err))
t_out.daemon = True
t_err.daemon = True
t_out.start()
t_err.start()

# only wait for defined timeout
start_time = time.time()
while time.time() - start_time < timeout:
# Check stdout
try:
line = q_out.get_nowait()
except queue.Empty:
pass
else:
print(f"STDOUT: {line.strip()}")
if start_server_success_message in line:
return True

# Check stderr
try:
line = q_err.get_nowait()
except queue.Empty:
pass
else:
print(f"STDERR: {line.strip()}")
if start_server_success_message in line:
# found the message. let's wait for some time for the server successfully started
time.sleep(0.3)
return True, process

# Check if the process has ended
if process.poll() is not None:
return False

time.sleep(0.1)

return False


# Stop the API server
def stop_server():
run("Stop server", ["stop"])
6 changes: 4 additions & 2 deletions engine/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@

void RunServer() {
auto config = file_manager_utils::GetCortexConfig();
LOG_INFO << "Host: " << config.apiServerHost << " Port: " << config.apiServerPort << "\n";
LOG_INFO << "Host: " << config.apiServerHost
<< " Port: " << config.apiServerPort << "\n";

// Create logs/ folder and setup log to file
std::filesystem::create_directory(config.logFolderPath + "/" +
Expand Down Expand Up @@ -72,7 +73,8 @@ void RunServer() {
LOG_INFO << "Server started, listening at: " << config.apiServerHost << ":"
<< config.apiServerPort;
LOG_INFO << "Please load your model";
drogon::app().addListener(config.apiServerHost, std::stoi(config.apiServerPort));
drogon::app().addListener(config.apiServerHost,
std::stoi(config.apiServerPort));
drogon::app().setThreadNum(drogon_thread_num);
LOG_INFO << "Number of thread is:" << drogon::app().getThreadNum();

Expand Down
2 changes: 0 additions & 2 deletions engine/services/engine_service.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,6 @@ std::vector<EngineInfo> EngineService::GetEngineInfoList() const {
}

void EngineService::UninstallEngine(const std::string& engine) {
CTL_INF("Uninstall engine " + engine);

// TODO: Unload the model which is currently running on engine_

// TODO: Unload engine if is loaded
Expand Down
Loading