From 3788e4bd28fe64ab1662c0867d4b12e90cf7f182 Mon Sep 17 00:00:00 2001 From: Venktesh Shivam Patel Date: Fri, 9 Feb 2024 14:13:29 +0000 Subject: [PATCH] Add reload and req-resp performance tests for ing and vs (#5048) --- perf-tests/README.md | 34 +++++--- perf-tests/conftest.py | 2 +- perf-tests/suite/common.py | 42 ++++++++++ perf-tests/suite/ing_request_perf.py | 26 ++++++ perf-tests/suite/test_ingress_perf.py | 113 ++++++++++++++++++++++++++ perf-tests/suite/test_vs_perf.py | 80 ++++++++++++++++++ perf-tests/suite/vs_request_perf.py | 28 +++++++ 7 files changed, 311 insertions(+), 14 deletions(-) create mode 100644 perf-tests/suite/common.py create mode 100644 perf-tests/suite/ing_request_perf.py create mode 100644 perf-tests/suite/test_ingress_perf.py create mode 100644 perf-tests/suite/test_vs_perf.py create mode 100644 perf-tests/suite/vs_request_perf.py diff --git a/perf-tests/README.md b/perf-tests/README.md index ee93def9f9..e4d3163a35 100644 --- a/perf-tests/README.md +++ b/perf-tests/README.md @@ -11,33 +11,41 @@ to find out about various configuration options. ### Prerequisites -- Minikube. -- Python3 (in a virtualenv) +- Any k8s platform of your choice (kind, minikube, GKE, AKS etc.) +- Python3 and Pytest (in a virtualenv) -#### Step 1 - Create a Minikube Cluster - -```bash -minikube start -``` +#### Step 1 - Create a cluster on platform of your choice #### Step 2 - Run the Performance Tests -**Note**: if you have the Ingress Controller deployed in the cluster, please uninstall it first, making sure to remove +**Note**: if you already have the Ingress Controller deployed in the cluster, please uninstall it first, making sure to remove its namespace and RBAC resources. Run the tests: - Use local Python3 installation (advised to use pyenv/virtualenv): - ```bash + ```shell cd perf_tests pip install -r ../tests/requirements.txt --no-deps - pytest -v -s -m ap_perf --count= --users= --hatch-rate= --time= ``` -The tests will use the Ingress Controller for NGINX with the image built from `debian-image-nap-plus`. See the section -below to learn how to configure the tests including the image and the type of NGINX -- NGINX or NGINX Plus. Refer the -[Configuring the Tests](#configuring-the-tests) section for valid arguments. + For Ingress and VS performance tests: + + ```shell + pytest -v -s -m perf --count= --users= --hatch-rate= --time= + ``` + + For AppProtect performance tests: + + ```shell + pytest -v -s -m ap_perf --count= --users= --hatch-rate= --time= + ``` + +The tests can use the Ingress Controller for NGINX with the image built from `debian-image-nap-plus`, `debian-image-plus` +or `debian-image`. +See the section below to learn how to configure the tests including the image and the type of NGINX -- NGINX or +NGINX Plus. Refer to [Configuring the Tests](#configuring-the-tests) section for valid arguments. ## Configuring the Tests diff --git a/perf-tests/conftest.py b/perf-tests/conftest.py index f27202e064..b60dc9a05d 100644 --- a/perf-tests/conftest.py +++ b/perf-tests/conftest.py @@ -132,7 +132,7 @@ def pytest_collection_modifyitems(config, items) -> None: for item in items: if "skip_for_nginx_plus" in item.keywords: item.add_marker(skip_for_nginx_plus) - if "-ap" not in config.getoption("--image"): + if "-nap" not in config.getoption("--image"): appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image") for item in items: if "appprotect" in item.keywords: diff --git a/perf-tests/suite/common.py b/perf-tests/suite/common.py new file mode 100644 index 0000000000..fa67001231 --- /dev/null +++ b/perf-tests/suite/common.py @@ -0,0 +1,42 @@ +import re +import subprocess +from datetime import datetime + +import requests + + +def collect_prom_reload_metrics(metric_list, scenario, ip, port) -> None: + req_url = f"http://{ip}:{port}/metrics" + resp = requests.get(req_url) + resp_decoded = resp.content.decode("utf-8") + reload_metric = "" + for line in resp_decoded.splitlines(): + if "last_reload_milliseconds{class" in line: + reload_metric = re.findall(r"\d+", line)[0] + metric_list.append( + { + f"Reload time ({scenario}) ": f"{reload_metric}ms", + "TimeStamp": str(datetime.utcnow()), + } + ) + + +def run_perf(url, setup_users, setup_rate, setup_time, resource): + subprocess.run( + [ + "locust", + "-f", + f"suite/{resource}_request_perf.py", + "--headless", + "--host", + url, + "--csv", + f"{resource}_response_times", + "-u", + setup_users, # total no. of users + "-r", + setup_rate, # no. of users hatched per second + "-t", + setup_time, # locust session duration in seconds + ] + ) diff --git a/perf-tests/suite/ing_request_perf.py b/perf-tests/suite/ing_request_perf.py new file mode 100644 index 0000000000..e3c81f7590 --- /dev/null +++ b/perf-tests/suite/ing_request_perf.py @@ -0,0 +1,26 @@ +import os + +import yaml +from locust import HttpUser, task + +host = "" + + +class TestResponse(HttpUser): + # locust class to be invoked + def on_start(self): + # get host from appprotect-ingress yaml before each test + ing_yaml = os.path.join(os.path.dirname(__file__), "../../tests/data/smoke/standard/smoke-ingress.yaml") + with open(ing_yaml) as f: + docs = yaml.safe_load_all(f) + for dep in docs: + self.host = dep["spec"]["rules"][0]["host"] + print("Setup finished") + + @task + def send_request(self): + response = self.client.get(url="", headers={"host": self.host}, verify=False) + print(response.text) + + min_wait = 400 + max_wait = 1400 diff --git a/perf-tests/suite/test_ingress_perf.py b/perf-tests/suite/test_ingress_perf.py new file mode 100644 index 0000000000..ae1d162c1e --- /dev/null +++ b/perf-tests/suite/test_ingress_perf.py @@ -0,0 +1,113 @@ +import json + +import pytest +import requests +from common import collect_prom_reload_metrics, run_perf +from settings import TEST_DATA +from suite.utils.resources_utils import ( + create_example_app, + create_items_from_yaml, + create_secret_from_yaml, + delete_common_app, + delete_items_from_yaml, + delete_secret, + ensure_connection, + ensure_connection_to_public_endpoint, + wait_before_test, + wait_until_all_pods_are_ready, +) +from suite.utils.yaml_utils import get_first_ingress_host_from_yaml + +reload = [] + + +class Setup: + """ + Encapsulate the Smoke Example details. + + Attributes: + public_endpoint (PublicEndpoint): + """ + + def __init__(self, req_url): + self.req_url = req_url + + +@pytest.fixture(scope="class") +def setup(request, kube_apis, ingress_controller_endpoint, test_namespace) -> Setup: + print("------------------------- Deploy prerequisites -----------------------------------") + secret_name = create_secret_from_yaml(kube_apis.v1, test_namespace, f"{TEST_DATA}/smoke/smoke-secret.yaml") + + create_example_app(kube_apis, "simple", test_namespace) + wait_until_all_pods_are_ready(kube_apis.v1, test_namespace) + req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend1" + ensure_connection_to_public_endpoint( + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.port, + ingress_controller_endpoint.port_ssl, + ) + + def fin(): + print("Clean up simple app") + delete_common_app(kube_apis, "simple", test_namespace) + delete_secret(kube_apis.v1, secret_name, test_namespace) + with open("reload_ing.json", "w+") as f: + json.dump(reload, f, ensure_ascii=False, indent=4) + + request.addfinalizer(fin) + return Setup(req_url) + + +@pytest.fixture +def setup_users(request): + return request.config.getoption("--users") + + +@pytest.fixture +def setup_rate(request): + return request.config.getoption("--hatch-rate") + + +@pytest.fixture +def setup_time(request): + return request.config.getoption("--time") + + +@pytest.mark.perf +@pytest.mark.parametrize( + "ingress_controller", + [ + { + "extra_args": [ + f"-enable-prometheus-metrics", + ] + } + ], + indirect=["ingress_controller"], +) +class TestIngressPerf: + def test_perf( + self, + kube_apis, + ingress_controller_endpoint, + test_namespace, + ingress_controller, + setup, + setup_users, + setup_rate, + setup_time, + ): + create_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace) + ingress_host = get_first_ingress_host_from_yaml(f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml") + wait_before_test() + ensure_connection(setup.req_url, 200, {"host": ingress_host}) + resp = requests.get(setup.req_url, headers={"host": ingress_host}, verify=False) + assert resp.status_code == 200 + collect_prom_reload_metrics( + reload, + "Ingress resource", + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.metrics_port, + ) + run_perf(setup.req_url, setup_users, setup_rate, setup_time, "ing") + delete_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace) diff --git a/perf-tests/suite/test_vs_perf.py b/perf-tests/suite/test_vs_perf.py new file mode 100644 index 0000000000..9d80baf732 --- /dev/null +++ b/perf-tests/suite/test_vs_perf.py @@ -0,0 +1,80 @@ +import json +import re +import subprocess +from datetime import datetime + +import pytest +import requests +from common import collect_prom_reload_metrics, run_perf +from suite.utils.resources_utils import wait_before_test + +reload = [] + + +@pytest.fixture(scope="class") +def collect(request, kube_apis, ingress_controller_endpoint, test_namespace) -> None: + def fin(): + with open("reload_vs.json", "w+") as f: + json.dump(reload, f, ensure_ascii=False, indent=4) + + request.addfinalizer(fin) + + +@pytest.fixture +def setup_users(request): + return request.config.getoption("--users") + + +@pytest.fixture +def setup_rate(request): + return request.config.getoption("--hatch-rate") + + +@pytest.fixture +def setup_time(request): + return request.config.getoption("--time") + + +@pytest.mark.perf +@pytest.mark.parametrize( + "crd_ingress_controller, virtual_server_setup", + [ + ( + { + "type": "complete", + "extra_args": [f"-enable-custom-resources", f"-enable-prometheus-metrics"], + }, + { + "example": "virtual-server", + "app_type": "simple", + }, + ) + ], + indirect=True, +) +class TestVirtualServerPerf: + def test_vs_perf( + self, + kube_apis, + ingress_controller_endpoint, + crd_ingress_controller, + virtual_server_setup, + collect, + setup_rate, + setup_time, + setup_users, + ): + wait_before_test() + resp = requests.get( + virtual_server_setup.backend_1_url, + headers={"host": virtual_server_setup.vs_host}, + ) + assert resp.status_code == 200 + collect_prom_reload_metrics( + reload, + "VS resource", + ingress_controller_endpoint.public_ip, + ingress_controller_endpoint.metrics_port, + ) + + run_perf(virtual_server_setup.backend_1_url, setup_users, setup_rate, setup_time, "vs") diff --git a/perf-tests/suite/vs_request_perf.py b/perf-tests/suite/vs_request_perf.py new file mode 100644 index 0000000000..4b7bc142dd --- /dev/null +++ b/perf-tests/suite/vs_request_perf.py @@ -0,0 +1,28 @@ +import os + +import yaml +from locust import HttpUser, task + +host = "" + + +class TestResponse(HttpUser): + # locust class to be invoked + def on_start(self): + # get host from appprotect-ingress yaml before each test + ing_yaml = os.path.join( + os.path.dirname(__file__), "../../tests/data/virtual-server/standard/virtual-server.yaml" + ) + with open(ing_yaml) as f: + docs = yaml.safe_load_all(f) + for dep in docs: + self.host = dep["spec"]["host"] + print("Setup finished") + + @task + def send_request(self): + response = self.client.get(url="", headers={"host": self.host}, verify=False) + print(response.text) + + min_wait = 400 + max_wait = 1400