diff --git a/perfkitbenchmarker/benchmark_spec.py b/perfkitbenchmarker/benchmark_spec.py index eb63a2e19..6952c7f61 100644 --- a/perfkitbenchmarker/benchmark_spec.py +++ b/perfkitbenchmarker/benchmark_spec.py @@ -179,7 +179,7 @@ def __init__( self.uuid = '%s-%s' % (FLAGS.run_uri, uuid.uuid4()) self.always_call_cleanup = pkb_flags.ALWAYS_CALL_CLEANUP.value self.dpb_service: dpb_service.BaseDpbService = None - self.container_cluster = None + self.container_cluster: container_service.BaseContainerCluster = None self.key = None self.relational_db = None self.non_relational_db = None diff --git a/perfkitbenchmarker/container_service.py b/perfkitbenchmarker/container_service.py index 892b92827..c4f6d0502 100644 --- a/perfkitbenchmarker/container_service.py +++ b/perfkitbenchmarker/container_service.py @@ -955,6 +955,23 @@ def WaitForResource( run_cmd.append('--all') RunKubectlCommand(run_cmd, timeout=timeout + 10) + @staticmethod + def WaitForSucceeded( + resource_name: str, + namespace: str | None = None, + timeout: int = vm_util.DEFAULT_TIMEOUT, + ): + """Waits for a resource to complete (i.e. .status.phase=='Succeeded').""" + run_cmd = [ + 'wait', + '--for=jsonpath={.status.phase}=Succeeded', + f'--timeout={timeout}s', + resource_name, + ] + if namespace: + run_cmd.append(f'--namespace={namespace}') + RunKubectlCommand(run_cmd, timeout=timeout + 10) + @staticmethod def WaitForRollout( resource_name: str, timeout: int = vm_util.DEFAULT_TIMEOUT diff --git a/perfkitbenchmarker/data/container/kubernetes_hpa/fib.yaml.j2 b/perfkitbenchmarker/data/container/kubernetes_hpa/fib.yaml.j2 new file mode 100644 index 000000000..ace760934 --- /dev/null +++ b/perfkitbenchmarker/data/container/kubernetes_hpa/fib.yaml.j2 @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: fib +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fib + namespace: fib +spec: + selector: + matchLabels: + app: "fib" + template: + metadata: + labels: + app: "fib" + spec: + containers: + - name: "fib" + image: {{ fib_image }} + imagePullPolicy: "Always" + resources: + requests: + cpu: "1000m" + memory: "128Mi" + limits: + cpu: "2000m" + memory: "128Mi" + ports: + - containerPort: 5000 + name: "web" + protocol: "TCP" +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: "fib" + namespace: "fib" +spec: + scaleTargetRef: + apiVersion: "apps/v1" + kind: "Deployment" + name: "fib" + minReplicas: 5 + maxReplicas: 250 + metrics: + - type: "Resource" + resource: + name: "cpu" + target: + type: "Utilization" + averageUtilization: 70 + behavior: + scaleDown: + stabilizationWindowSeconds: 60 + policies: + - periodSeconds: 15 + type: "Percent" + value: 100 + selectPolicy: "Min" + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - periodSeconds: 15 + type: "Percent" + value: 100 + - periodSeconds: 15 + type: "Pods" + value: 1000 + selectPolicy: "Max" +--- +apiVersion: v1 +kind: Service +metadata: + name: "fib" + namespace: "fib" +spec: + selector: + app: "fib" + type: LoadBalancer + externalTrafficPolicy: Cluster + ports: + - name: "tcp-port" + protocol: "TCP" + port: 5000 + targetPort: 5000 diff --git a/perfkitbenchmarker/data/docker/fibonacci/Dockerfile b/perfkitbenchmarker/data/docker/fibonacci/Dockerfile new file mode 100644 index 000000000..b5bfdf504 --- /dev/null +++ b/perfkitbenchmarker/data/docker/fibonacci/Dockerfile @@ -0,0 +1,11 @@ +# Some combinations of python 3.13/C++17 cause build failures in pandas: +# https://github.com/cython/cython/issues/5790 +# Avoid it by just picking 3.12. +FROM --platform=linux/amd64 python:3.12 as build + +WORKDIR / +COPY requirements.txt requirements.txt +RUN pip install -r requirements.txt +EXPOSE 5000 +COPY . . +ENTRYPOINT [ "./entrypoint.sh" ] diff --git a/perfkitbenchmarker/data/docker/fibonacci/entrypoint.sh b/perfkitbenchmarker/data/docker/fibonacci/entrypoint.sh new file mode 100755 index 000000000..0e15c9511 --- /dev/null +++ b/perfkitbenchmarker/data/docker/fibonacci/entrypoint.sh @@ -0,0 +1,2 @@ +#!/bin/sh +gunicorn perf_server:app -w 4 --threads 2 --bind 0.0.0.0:5000 diff --git a/perfkitbenchmarker/data/docker/fibonacci/perf_server.py b/perfkitbenchmarker/data/docker/fibonacci/perf_server.py new file mode 100644 index 000000000..2b957d2b2 --- /dev/null +++ b/perfkitbenchmarker/data/docker/fibonacci/perf_server.py @@ -0,0 +1,38 @@ +"""Toy flask app to inefficiently calculate Fibonacci numbers.""" + +import socket +import time +from flask import Flask + +app = Flask(__name__) +hostname = socket.gethostname() + + +def calculate_fibonacci(n): + """Returns the nth Fibonacci number (inefficient for the sake of CPU load). + + Args: + n: nth Fibonacci number to be calculated. + """ + if n <= 1: + return n + else: + return calculate_fibonacci(n - 1) + calculate_fibonacci(n - 2) + + +@app.route('/calculate') +def do_calculation(): + start_time = time.time() + result = calculate_fibonacci(30) # Adjust the Fibonacci number for load + end_time = time.time() + + return [{ + 'result': result, + 'calculation_time': end_time - start_time, + 'timestamp': start_time, + 'pod_id': hostname, + }] + + +if __name__ == '__main__': + app.run(debug=True, host='0.0.0.0', port=5000) diff --git a/perfkitbenchmarker/data/locust/rampup.py b/perfkitbenchmarker/data/locust/rampup.py new file mode 100644 index 000000000..2fbd5b75a --- /dev/null +++ b/perfkitbenchmarker/data/locust/rampup.py @@ -0,0 +1,47 @@ +"""Locust file to simulate a "stepped" rampup of load.""" + +import locust + + +class Rampup(locust.HttpUser): + # Send 1QPS (per user) + wait_time = locust.constant_throughput(1) + + @locust.task + def rampup(self): + # Close the connection after each request (or else users won't get load + # balanced to new pods.) + headers = {"Connection": "close"} + + self.client.get("/calculate", headers=headers) + + +class StagesShape(locust.LoadTestShape): + """Locust LoadTestShape to simulate a "stepped" rampup.""" + + # pyformat: disable + # pylint: disable=bad-whitespace + _stages = [ + {"endtime": 60, "users": 1}, # 1 rps for 1m + {"endtime": 360, "users": 20}, # 20 rps for 5m + {"endtime": 420, "users": 40}, # 40 rps for 1m + {"endtime": 480, "users": 60}, # 60 rps for 1m + {"endtime": 540, "users": 90}, # 90 rps for 1m + {"endtime": 660, "users": 120}, # 120 rps for 2m + {"endtime": 780, "users": 150}, # 150 rps for 2m + {"endtime": 900, "users": 1}, # 1 rps for 2m + # -------------- + # Total: 15m + ] + # pyformat: enable + + def tick(self): + run_time = self.get_run_time() + + for stage in self._stages: + if run_time < stage["endtime"]: + user_count = stage["users"] + spawn_rate = 100 # spawn all new users roughly immediately (over 1s) + return (user_count, spawn_rate) + + return None diff --git a/perfkitbenchmarker/data/locust/simple.py b/perfkitbenchmarker/data/locust/simple.py new file mode 100644 index 000000000..7506279d5 --- /dev/null +++ b/perfkitbenchmarker/data/locust/simple.py @@ -0,0 +1,15 @@ +"""Locust file to flood the SUT.""" + +from locust import HttpUser +from locust import task + + +class Simple(HttpUser): + + @task + def simple(self): + # Close the connection after each request (or else users won't get load + # balanced to new pods.) + headers = {"Connection": "close"} + + self.client.get("/calculate", headers=headers) diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_hpa_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_hpa_benchmark.py new file mode 100644 index 000000000..baa994fee --- /dev/null +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_hpa_benchmark.py @@ -0,0 +1,141 @@ +# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Runs a locust based hpa benchmark on a k8s cluster.""" + +import functools +from typing import Any, Dict, List + +from absl import flags +from perfkitbenchmarker import background_tasks +from perfkitbenchmarker import benchmark_spec as bm_spec +from perfkitbenchmarker import configs +from perfkitbenchmarker import container_service +from perfkitbenchmarker.linux_packages import locust +from perfkitbenchmarker.sample import Sample + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'kubernetes_hpa_runtime_class_name', + None, + 'A custom runtimeClassName to apply to the pods.', +) + +BENCHMARK_NAME = 'kubernetes_hpa' +BENCHMARK_CONFIG = """ +kubernetes_hpa: + description: Benchmarks how quickly hpa reacts to load + vm_groups: + default: + vm_spec: *default_dual_core + vm_count: 1 + container_specs: + kubernetes_fib: + image: fibonacci + container_registry: {} + container_cluster: + cloud: GCP + type: Kubernetes + vm_count: 1 + vm_spec: *default_dual_core + nodepools: + fibpool: + vm_count: 3 + vm_spec: + GCP: + machine_type: n2-standard-4 + AWS: + machine_type: m6i.xlarge + Azure: + machine_type: Standard_D4s_v5 +""" + + +def GetConfig(user_config: Dict[str, Any]) -> Dict[str, Any]: + """Load and return benchmark config. + + Args: + user_config: user supplied configuration (flags and config file) + + Returns: + loaded benchmark configuration + """ + config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) + + return config + + +def _PrepareCluster(benchmark_spec: bm_spec.BenchmarkSpec): + """Prepares a cluster to run the hpa benchmark.""" + cluster: container_service.KubernetesCluster = ( + benchmark_spec.container_cluster + ) + fib_image = benchmark_spec.container_specs['kubernetes_fib'].image + + cluster.ApplyManifest( + 'container/kubernetes_hpa/fib.yaml.j2', + fib_image=fib_image, + runtime_class_name=FLAGS.kubernetes_hpa_runtime_class_name, + ) + + cluster.WaitForResource('deploy/fib', 'available', namespace='fib') + + +def _PrepareLocust(benchmark_spec: bm_spec.BenchmarkSpec): + """Prepares a vm to run locust.""" + vm = benchmark_spec.vms[0] + locust.Install(vm) + locust.Prep(vm, locust.Locustfile.RAMPUP) + + +def Prepare(benchmark_spec: bm_spec.BenchmarkSpec): + """Install fib workload (and associated hpa) on the K8s Cluster. + + Args: + benchmark_spec: The benchmark specification. Contains all data that is + required to run the benchmark. + """ + + prepare_fns = [ + functools.partial(_PrepareCluster, benchmark_spec), + functools.partial(_PrepareLocust, benchmark_spec), + ] + + background_tasks.RunThreaded(lambda f: f(), prepare_fns) + + +def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[Sample]: + """Run a benchmark against the Nginx server.""" + + # Get the SUT address + stdout, _, _ = container_service.RunKubectlCommand([ + 'get', + '-n', + 'fib', + 'svc/fib', + '-o', + "jsonpath='{.status.loadBalancer.ingress[0].ip}'", + ]) + addr = 'http://' + stdout.strip() + ':5000' + + # Run locust against the SUT + vm = benchmark_spec.vms[0] + samples = locust.Run(vm, addr) + + return list(samples) + + +def Cleanup(benchmark_spec): + """Cleanup.""" + del benchmark_spec diff --git a/perfkitbenchmarker/linux_packages/locust.py b/perfkitbenchmarker/linux_packages/locust.py new file mode 100644 index 000000000..a88d1169b --- /dev/null +++ b/perfkitbenchmarker/linux_packages/locust.py @@ -0,0 +1,131 @@ +"""Utilities for managing a locust benchmark on a VM.""" + +import csv +import enum +import re +from typing import Iterable, TYPE_CHECKING + +from perfkitbenchmarker import data +from perfkitbenchmarker.sample import Sample + +if TYPE_CHECKING: + from perfkitbenchmarker import linux_virtual_machine # pylint: disable=g-import-not-at-top + + +class Locustfile(enum.Enum): + SIMPLE = 'locust/simple.py' + RAMPUP = 'locust/rampup.py' + + def GetPath(self): + return data.ResourcePath(self.value) + + +def Install(vm: 'linux_virtual_machine.BaseLinuxVirtualMachine') -> None: + """Installs locust on the given VM. + + Installs locust on the indicated VM. Does not start locust. + + Running this a second time will idempotently install locust, but will have no + other effect. (If locust is already running at the time, it will not interrupt + it.) + + Args: + vm: Already running VM where locust should be installed. + + Raises: + errors.VirtualMachine.RemoteCommandError: If an error occurred on the VM. + """ + vm.RunCommand(['sudo', 'apt', 'update']) + vm.RunCommand(['sudo', 'apt', 'install', 'python3-locust', '-y']) + + +def Prep( + vm: 'linux_virtual_machine.BaseLinuxVirtualMachine', + locustfile_path: str | Locustfile, +) -> None: + """Prepares a locustfile to run on the given VM. + + Prepares the locustfile, but does not start (or install) locust. + + Running this a second time will idempotently replace the locustfile, but will + have no other effect. (If locust is already running at the time, it will not + interrupt it.) + + Args: + vm: Already running VM where locust should be installed. + locustfile_path: Path of the locustfile; see + https://docs.locust.io/en/stable/writing-a-locustfile.html. NB: Two + pre-defined locust files exist in this module that you can use: + Locustfile.SIMPLE, Locustfile.RAMPUP. + + Raises: + errors.VirtualMachine.RemoteCommandError: If an error occurred on the VM. + """ + if isinstance(locustfile_path, Locustfile): + locustfile_path = locustfile_path.GetPath() + vm.RemoteCopy(locustfile_path, 'locustfile.py') + + +def Run( + vm: 'linux_virtual_machine.BaseLinuxVirtualMachine', target_host: str +) -> Iterable[Sample]: + """Runs locust. + + This won't return until the test is complete. (Test length is defined by the + locustfile.) This can be called repeatedly, in which case, the test will run + again. This should not be called repeatedly *in parallel*. + + Args: + vm: Already running VM where locust/locustfile has already been installed. + target_host: The SUT. Can be an ipaddr or hostname. Must include the scheme. + e.g. 'http://192.168.0.1:8080' + + Yields: + Samples corresponding to the locust results. + + Raises: + errors.VirtualMachine.RemoteCommandError: If an error occurred on the VM. + (Notably, if `prep()` was not previously called to install locust, then a + RemoteCommandError will be raised.) + """ + vm.RunCommand([ + 'locust', + '-f', + 'locustfile.py', + '--host', + target_host, + '--autostart', + '--csv', + 'test1', + '--autoquit', + '5', + ]) + stdout, _, _ = vm.RunCommand(['cat', 'test1_stats_history.csv']) + yield from _ConvertLocustResultsToSamples(stdout) + + +def _ConvertLocustResultsToSamples(locust_results: str) -> Iterable[Sample]: + lines = locust_results.splitlines() + reader = csv.DictReader(lines) + + for row in reader: + for field in reader.fieldnames: + if field in ['Timestamp', 'Type', 'Name']: + continue + if row[field] == 'N/A': + continue + + yield Sample( + metric='locust/' + _SanitizeFieldName(field), + value=float(row[field]), + unit='', + metadata={}, + timestamp=int(row['Timestamp']), + ) + + +def _SanitizeFieldName(field: str) -> str: + field = re.sub(' ', '_', field) + field = re.sub('%', 'p', field) + field = re.sub('/', '_per_', field) + return field diff --git a/tests/linux_packages/locust_test.py b/tests/linux_packages/locust_test.py new file mode 100644 index 000000000..3246e8680 --- /dev/null +++ b/tests/linux_packages/locust_test.py @@ -0,0 +1,28 @@ +import unittest +from perfkitbenchmarker import sample +from perfkitbenchmarker.linux_packages import locust + + +class LocustTest(unittest.TestCase): + + def test_convert_locust_to_samples(self): + locust_output = """Timestamp,Type,Name,field1,field2 + 12345678,ignored,also ignored,1,10 + 12345679,ignored,also ignored,2,20 + 12345680,ignored,also ignored,N/A,30""" + + samples = locust._ConvertLocustResultsToSamples(locust_output) + self.assertCountEqual( + [ + sample.Sample("locust/field1", 1, "", {}, 12345678), + sample.Sample("locust/field2", 10, "", {}, 12345678), + sample.Sample("locust/field1", 2, "", {}, 12345679), + sample.Sample("locust/field2", 20, "", {}, 12345679), + sample.Sample("locust/field2", 30, "", {}, 12345680), + ], + samples, + ) + + +if __name__ == "__main__": + unittest.main()