Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial functional tests #77

Merged
merged 15 commits into from
Dec 6, 2024
Empty file added tests/__init__.py
mc-nv marked this conversation as resolved.
Show resolved Hide resolved
Empty file.
20 changes: 20 additions & 0 deletions tests/configs/dynamic.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
input [
{
name: "data"
data_type: TYPE_FP32
dims: [ -1, 3, -1, -1]
}
]
output [
{
name: "detection_out"
data_type: TYPE_FP32
dims: [ 1, 1, -1, 7]
}
]
parameters: {
key: "RESHAPE_IO_LAYERS"
value: {
string_value:"yes"
}
}
28 changes: 28 additions & 0 deletions tests/configs/dynamic_gpu.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
input: [
{
name: "data"
data_type: TYPE_FP32
dims: [ -1, 3, -1, -1]
}
]
output: [
{
name: "detection_out"
data_type: TYPE_FP32
dims: [ 1, 1, -1, 7]
}
]
parameters: [
{
key: "RESHAPE_IO_LAYERS"
value: {
string_value:"yes"
}
},
{
key: "TARGET_DEVICE"
value: {
string_value: "GPU"
}
}
]
14 changes: 14 additions & 0 deletions tests/configs/ir.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
}
]
20 changes: 20 additions & 0 deletions tests/configs/ir_gpu.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
},
{
key: "TARGET_DEVICE"
value: {
string_value: "GPU"
}
}
]
23 changes: 23 additions & 0 deletions tests/configs/onnx.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
backend: "openvino"
input: [
{
name: "gpu_0/data_0"
data_type: TYPE_FP32
dims: [ -1, 3, 224, 224]
}
]
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
}
]
default_model_filename: "model.onnx"
30 changes: 30 additions & 0 deletions tests/configs/onnx_gpu.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
backend: "openvino"
input: [
{
name: "gpu_0/data_0"
data_type: TYPE_FP32
dims: [ -1, 3, 224, 224]
}
]
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
},
{
key: "TARGET_DEVICE"
value: {
string_value: "GPU"
}
}

]
default_model_filename: "model.onnx"
31 changes: 31 additions & 0 deletions tests/configs/paddle.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
backend: "openvino"
input: [
{
name: "inputs"
data_type: TYPE_FP32
dims: [ -1, 3, 224, 224]
}
]
output: [
{
name: "save_infer_model/scale_0.tmp_1"
data_type: TYPE_FP32
dims: [ -1, 1000]
}
]

parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
}
]
default_model_filename: "model.pdmodel"
31 changes: 31 additions & 0 deletions tests/configs/paddle_gpu.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
backend: "openvino"
input: [
{
name: "inputs"
data_type: TYPE_FP32
dims: [ -1, 3, 224, 224]
}
]
output: [
{
name: "save_infer_model/scale_0.tmp_1"
data_type: TYPE_FP32
dims: [ -1, 1000]
}
]

parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
}
]
default_model_filename: "model.pdmodel"
30 changes: 30 additions & 0 deletions tests/configs/pb.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
backend: "openvino"
bstrzele marked this conversation as resolved.
Show resolved Hide resolved
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
}
]
input [
{
name: "input_1"
data_type: TYPE_FP32
dims: [-1, 224, 224, 3 ]
}
]
output [
{
name: "activation_49"
data_type: TYPE_FP32
dims: [-1, 1001]
}
]
default_model_filename: "model.saved_model"
16 changes: 16 additions & 0 deletions tests/configs/tflite.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
backend: "openvino"
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
}
]
default_model_filename: "model.tflite"
22 changes: 22 additions & 0 deletions tests/configs/tflite_gpu.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
backend: "openvino"
parameters: [
{
key: "NUM_STREAMS"
value: {
string_value: "1"
}
},
{
key: "PERFORMANCE_HINT"
value: {
string_value: "LATENCY"
}
},
{
key: "TARGET_DEVICE"
value: {
string_value: "GPU"
}
}
]
default_model_filename: "model.tflite"
22 changes: 22 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import pytest
import sys
import subprocess

def pytest_configure(config):
config.addinivalue_line(
'markers', 'gpu: run tests on GPU device',
)

def pytest_runtest_setup(item):
for mark in item.iter_markers():
if 'gpu' in mark.name:
if sys.platform.startswith('linux'):
process = subprocess.run(['/bin/bash', '-c', 'lspci | grep -E "VGA|3D"'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
if process.returncode != 0:
pytest.skip('Test requires Intel GPU device on the host machine')
elif sys.platform.startswith('win') and 'win' not in item.config.getoption('--image_os'):
wsl = shutil.which('wsl')
if not wsl:
pytest.skip('Test requires Intel GPU device and configured WSL2 on the host machine')
21 changes: 21 additions & 0 deletions tests/functional/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy as np
import grpc
import os
import tritonclient.grpc as grpcclient
from tritonclient.grpc import service_pb2_grpc

def prepare_inputs(input_name, shape):
inputs = []
inputs.append(grpcclient.InferInput(input_name, shape, "FP32"))
bstrzele marked this conversation as resolved.
Show resolved Hide resolved

inputs[0].set_data_from_numpy(np.ones(shape, dtype=np.float32))
bstrzele marked this conversation as resolved.
Show resolved Hide resolved
return inputs

def prepare_grpc_stub(port):
channel = grpc.insecure_channel(f"localhost:{port}")
return service_pb2_grpc.GRPCInferenceServiceStub(channel)

def prepare_triton_client(port):
return grpcclient.InferenceServerClient(
url=f"localhost:{port}",
verbose=os.environ.get("LOG_LEVEL")=="DEBUG")
Loading