Skip to content

Commit

Permalink
Merge pull request #166 from thom311/th/print-results-tests
Browse files Browse the repository at this point in the history
[th/print-results-tests] add unit test for `print_results.py` and refactor tests

#166
  • Loading branch information
thom311 authored Nov 27, 2024
2 parents 6a3a529 + 9d73582 commit 9c0c5af
Show file tree
Hide file tree
Showing 7 changed files with 171 additions and 138 deletions.
17 changes: 7 additions & 10 deletions print_results.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,14 @@
#!/usr/bin/env python3

import argparse
import json
import sys
import traceback

from ktoolbox import common

import tftbase


def read_test_result(filename: str) -> tftbase.TestResultCollection:
with open(filename, "r") as f:
jdata = json.load(f)

return common.dataclass_from_dict(tftbase.TestResultCollection, jdata)


def print_result(test_result: tftbase.TestResult) -> None:
if not test_result.success:
msg = f"failed: {test_result.msg or 'unspecified failure'}"
Expand Down Expand Up @@ -54,7 +47,7 @@ def main() -> None:

common.log_config_logger(args.verbose, "tft", "ktoolbox")

test_results = read_test_result(args.result)
test_results = tftbase.TestResultCollection.read_from_file(args.result)

group_passing, group_failing = tftbase.GroupedResult.grouped_from(test_results)

Expand Down Expand Up @@ -86,4 +79,8 @@ def main() -> None:


if __name__ == "__main__":
main()
try:
main()
except Exception:
traceback.print_exc()
sys.exit(2)
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ jc
jinja2
paramiko
pyserial
git+https://github.com/thom311/ktoolbox@1ed46e23e058521d6ed7811dbbe6e97f244dafd9
git+https://github.com/thom311/ktoolbox@1d59ea4b5861fde27bc1e895378e8d23b52a92f7
File renamed without changes.
1 change: 1 addition & 0 deletions tests/input6-RESULTS

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions tests/input6.json

Large diffs are not rendered by default.

281 changes: 154 additions & 127 deletions tests/test_evaluator.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import dataclasses
import filecmp
import json
import os
Expand All @@ -8,6 +9,9 @@

from pathlib import Path
from typing import Any
from typing import Optional

from ktoolbox import common

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

Expand All @@ -20,171 +24,194 @@
TestCaseType = tftbase.TestCaseType


current_dir = os.path.dirname(__file__)
parent_dir = os.path.dirname(current_dir)

config_path = os.path.join(parent_dir, "eval-config.yaml")
config_path2 = os.path.join(parent_dir, "tests/eval-config-2.yaml")
evaluator_file = os.path.join(parent_dir, "evaluator.py")
test_dir = os.path.dirname(__file__)
source_dir = os.path.dirname(test_dir)

COMMON_COMMAND = ["python", evaluator_file, config_path]

TEST_INPUT_FILES = [
"input1.json",
"input2.json",
"input3.json",
"input4.json",
"input5.json",
]

def _test_file(filename: str) -> str:
return os.path.join(test_dir, filename)

def run_subprocess(
command: list[str], **kwargs: Any
) -> subprocess.CompletedProcess[str]:
full_command = COMMON_COMMAND + command
result = subprocess.run(full_command, text=True, **kwargs)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
return result

def _source_file(filename: str) -> str:
return os.path.join(source_dir, filename)

def test_evaluator_valid_input() -> None:
log_path = os.path.join(current_dir, "input1.json")
compare_path1 = os.path.join(current_dir, "output1a.json")
output_path1 = os.path.join(current_dir, "test-output1.json")

run_subprocess(
[log_path, output_path1],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
EVAL_CONFIG_FILE = _source_file("eval-config.yaml")

assert filecmp.cmp(
output_path1, compare_path1
), f"{output_path1} does not match {compare_path1}"
EVALUATOR_EXEC = _source_file("evaluator.py")
PRINT_RESULTS_EXEC = _source_file("print_results.py")

Path(output_path1).unlink()

@dataclasses.dataclass(frozen=True)
class TestConfigFile:
filename: str
is_valid: bool = dataclasses.field(default=True)
expected_outputfile: Optional[str] = dataclasses.field(default=None)

def test_evaluator_invalid_test_case_id() -> None:
log_path = os.path.join(current_dir, "input2.json")

with pytest.raises(subprocess.CalledProcessError):
run_subprocess(
[log_path],
check=True,
)
TEST_CONFIG_FILES = [
TestConfigFile(_test_file("input1.json"), expected_outputfile="input1-RESULTS"),
TestConfigFile(_test_file("input2.json"), is_valid=False),
TestConfigFile(_test_file("input3.json"), is_valid=False),
TestConfigFile(_test_file("input4.json"), is_valid=False),
TestConfigFile(_test_file("input5.json")),
TestConfigFile(_test_file("input6.json"), expected_outputfile="input6-RESULTS"),
]

TEST_EVAL_CONFIG_FILES = [
EVAL_CONFIG_FILE,
_test_file("eval-config-2.yaml"),
]

def test_evaluator_invalid_test_type() -> None:
log_path = os.path.join(current_dir, "input3.json")

with pytest.raises(subprocess.CalledProcessError):
run_subprocess(
[log_path],
check=True,
)
def _run_subprocess(
command: list[str],
**kwargs: Any,
) -> subprocess.CompletedProcess[str]:
if "check" not in kwargs:
kwargs["check"] = True
result = subprocess.run(command, text=True, **kwargs)
print("STDOUT:", result.stdout)
print("STDERR:", result.stderr)
return result


def test_evaluator_invalid_pod_type() -> None:
log_path = os.path.join(current_dir, "input4.json")
def _run_evaluator(filename: str, outfile: str) -> subprocess.CompletedProcess[str]:
return _run_subprocess(
[
sys.executable,
EVALUATOR_EXEC,
EVAL_CONFIG_FILE,
filename,
outfile,
]
)

with pytest.raises(subprocess.CalledProcessError):
run_subprocess(
[log_path],
check=True,
)

def _run_print_results(filename: str) -> subprocess.CompletedProcess[str]:
return _run_subprocess(
[
sys.executable,
PRINT_RESULTS_EXEC,
filename,
],
check=False,
)

def test_eval_config() -> None:
def _check(filename: str) -> None:
assert os.path.exists(filename)

with open(filename, encoding="utf-8") as file:
conf_dict = yaml.safe_load(file)
@pytest.mark.parametrize("test_eval_config", TEST_EVAL_CONFIG_FILES)
def test_eval_config(test_eval_config: str) -> None:
filename = test_eval_config
assert os.path.exists(filename)

c = evalConfig.Config.parse(conf_dict)
with open(filename, encoding="utf-8") as file:
conf_dict = yaml.safe_load(file)

assert (
c.configs[TestType.IPERF_UDP]
.test_cases[TestCaseType.HOST_TO_NODE_PORT_TO_HOST_SAME_NODE]
.normal.threshold
== 5
)
c = evalConfig.Config.parse(conf_dict)

for test_type in TestType:
if test_type not in c.configs:
continue
assert (
c.configs[TestType.IPERF_UDP]
.test_cases[TestCaseType.HOST_TO_NODE_PORT_TO_HOST_SAME_NODE]
.normal.threshold
== 5
)

assert test_type in (
TestType.IPERF_UDP,
TestType.IPERF_TCP,
)
for test_type in TestType:
if test_type not in c.configs:
continue

d = c.configs[test_type].test_cases
assert test_type in (
TestType.IPERF_UDP,
TestType.IPERF_TCP,
)

for test_case_type in TestCaseType:
assert test_case_type in d
d = c.configs[test_type].test_cases

dump = c.serialize()
assert c == evalConfig.Config.parse(dump)
for test_case_type in TestCaseType:
assert test_case_type in d

c2 = c.configs[TestType.IPERF_UDP]
assert c2.test_type == TestType.IPERF_UDP
assert isinstance(c2.serialize(), list)
assert c2 == evalConfig.TestTypeData.parse(
1, "", TestType.IPERF_UDP, c2.serialize()
)
dump = c.serialize()
assert c == evalConfig.Config.parse(dump)

c3 = c2.test_cases[TestCaseType.POD_TO_HOST_DIFF_NODE]
assert c3.test_case_type == TestCaseType.POD_TO_HOST_DIFF_NODE
assert c3.serialize_json() == json.dumps(
{
"id": "POD_TO_HOST_DIFF_NODE",
"Normal": {"threshold": 5.0},
"Reverse": {"threshold": 5.0},
}
)
assert c3.yamlpath == ".IPERF_UDP[1]"
assert c3.normal.yamlpath == ".IPERF_UDP[1].Normal"
c2 = c.configs[TestType.IPERF_UDP]
assert c2.test_type == TestType.IPERF_UDP
assert isinstance(c2.serialize(), list)
assert c2 == evalConfig.TestTypeData.parse(
1, "", TestType.IPERF_UDP, c2.serialize()
)

_check(config_path)
_check(config_path2)
c3 = c2.test_cases[TestCaseType.POD_TO_HOST_DIFF_NODE]
assert c3.test_case_type == TestCaseType.POD_TO_HOST_DIFF_NODE
assert c3.serialize_json() == json.dumps(
{
"id": "POD_TO_HOST_DIFF_NODE",
"Normal": {"threshold": 5.0},
"Reverse": {"threshold": 5.0},
}
)
assert c3.yamlpath == ".IPERF_UDP[1]"
assert c3.normal.yamlpath == ".IPERF_UDP[1].Normal"


def test_output_list_parse() -> None:
for test_input_file in TEST_INPUT_FILES:
filename = os.path.join(current_dir, test_input_file)
assert os.path.isfile(filename)
@pytest.mark.parametrize("test_input_file", TEST_CONFIG_FILES)
def test_output_list_parse(
test_input_file: TestConfigFile,
tmp_path: Path,
) -> None:
filename = test_input_file.filename
assert os.path.isfile(filename)

with open(filename, "r") as f:
data = f.read()
with open(filename, "r") as f:
data = f.read()

file_is_good = True
if test_input_file in ("input2.json", "input3.json", "input4.json"):
with pytest.raises(RuntimeError):
tftbase.output_list_parse_file(filename)
if not test_input_file.is_valid:
with pytest.raises(RuntimeError):
tftbase.output_list_parse_file(filename)
# The file is invalid, but we can patch the content to make it valid.
data = data.replace('"invalid_test_case_id"', '"POD_TO_POD_SAME_NODE"')
data = data.replace('"invalid_test_type"', '"IPERF_TCP"')
data = data.replace('"invalid_pod_type"', '"SRIOV"')

file_is_good = False
data = data.replace('"invalid_test_case_id"', '"POD_TO_POD_SAME_NODE"')
data = data.replace('"invalid_test_type"', '"IPERF_TCP"')
data = data.replace('"invalid_pod_type"', '"SRIOV"')
def _check(output: list[tftbase.TftAggregateOutput]) -> None:
assert isinstance(output, list)
assert output

def _check(output: list[tftbase.TftAggregateOutput]) -> None:
assert isinstance(output, list)
assert output
jdata = json.loads(data)

jdata = json.loads(data)
output = tftbase.output_list_parse(jdata, filename=filename)
_check(output)

output = tftbase.output_list_parse(jdata, filename=filename)
if test_input_file.is_valid:
output = tftbase.output_list_parse_file(filename)
_check(output)

if file_is_good:
output = tftbase.output_list_parse_file(filename)
_check(output)
data2 = tftbase.output_list_serialize(output)
output2 = tftbase.output_list_parse(data2)
_check(output2)
assert output == output2

outputfile = str(tmp_path / "outputfile.json")
if test_input_file.is_valid:
_run_evaluator(filename, outputfile)
else:
with pytest.raises(subprocess.CalledProcessError):
_run_evaluator(filename, outputfile)

if not test_input_file.is_valid:
assert not os.path.exists(outputfile)
else:
assert os.path.exists(outputfile)

test_collection1 = common.dataclass_from_file(
tftbase.TestResultCollection, outputfile
)
assert isinstance(test_collection1, tftbase.TestResultCollection)

if test_input_file.expected_outputfile is not None:
assert filecmp.cmp(
outputfile,
_test_file(test_input_file.expected_outputfile),
), f"{repr(outputfile)} does not match {repr(_test_file(test_input_file.expected_outputfile))}"

data2 = tftbase.output_list_serialize(output)
output2 = tftbase.output_list_parse(data2)
_check(output2)
assert output == output2
res = _run_print_results(outputfile)
assert res.returncode in (0, 1)
7 changes: 7 additions & 0 deletions tftbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,13 @@ class TestResultCollection:
plugin_passing: list[PluginResult]
plugin_failing: list[PluginResult]

@staticmethod
def read_from_file(filename: str | Path) -> "TestResultCollection":
return common.dataclass_from_file(
TestResultCollection,
filename,
)


@common.strict_dataclass
@dataclasses.dataclass(frozen=True, kw_only=True, unsafe_hash=True)
Expand Down

0 comments on commit 9c0c5af

Please sign in to comment.