diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..075c2c1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,23 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Version information** +The pypi version or branch of snappi-cyperf + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior. Prefer code snippets, pytest test cases. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..cb561cf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a use case? Please describe.** +A clear and concise description of what the use case is. + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..f02ef7d --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,51 @@ +name: cicd + +on: [push] + +jobs: + build: + runs-on: ubuntu-20.04 + strategy: + max-parallel: 1 + matrix: + python-version: [3.9] + + steps: + - name: Checkout source + uses: actions/checkout@v2 + with: + ref: ${{ github.head_ref }} + submodules: recursive + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + rm -rf .env + python do.py setup + python do.py init + - name: Build distribution + run: | + python do.py dist + - name: Install package on clean env + run: | + rm -rf .env + python do.py setup + python do.py install + python do.py init + - name: Get package version + id: get_version + run: | + echo "::set-output name=version::v$(python do.py version)" + - name: Check tag for current version + uses: mukunku/tag-exists-action@v1.0.0 + id: check_tag + with: + tag: ${{ steps.get_version.outputs.version }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Publish package + if: github.ref == 'refs/heads/main' && steps.check_tag.outputs.exists == 'false' + run: | + PYPI_USERNAME=__token__ PYPI_PASSWORD=${{ secrets.PYPI_API_TOKEN }} python do.py release \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..260daa8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +__pycache__/ +.pytest_cache/ +.pytype/ +*.pyc + +.env/ +env/ +build/ +dist/ +*.egg-info/ + +*.log +*.pcap +*.cap +logs/ +scratch/ +.coverage +cov_report \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..62c2974 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,134 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +open-traffic-generator@googlegroups.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations + diff --git a/do.py b/do.py new file mode 100644 index 0000000..a4ca2b8 --- /dev/null +++ b/do.py @@ -0,0 +1,276 @@ +import fnmatch +import os +import re +import sys +import shutil +import subprocess + + +def setup(): + run( + [ + py() + " -m pip install --upgrade pip", + py() + " -m pip install --upgrade virtualenv", + py() + " -m virtualenv .env", + ] + ) + + +def init(): + run( + [ + py() + " -m pip install -r requirements.txt", + ] + ) + + +def lint(): + paths = [pkg()[0], "tests", "setup.py", "do.py"] + + run( + [ + py() + " -m black " + " ".join(paths), + py() + " -m flake8 " + " ".join(paths), + ] + ) + + +def test(): + coverage_threshold = 67 + # args = [ + # '--location="https://10.39.71.97:443"', + # ( + # '--ports="10.39.65.230;6;1 10.39.65.230;6;2 10.39.65.230;6;3' + # ' 10.39.65.230;6;4"' + # ), + # '--media="fiber"', + # "tests", + # '-m "not e2e and not l1_manual"', + # '--cov=./snappi_cyperf --cov-report term' + # ' --cov-report html:cov_report', + # ] + args = [ + "tests", + '-m "not e2e and not l1_manual and not uhd"', + "--cov=./snappi_cyperf --cov-report term" " --cov-report html:cov_report", + ] + run( + [ + py() + " -m pip install pytest-cov", + py() + " -m pytest -sv {}".format(" ".join(args)), + ] + ) + import re + + with open("./cov_report/index.html") as fp: + out = fp.read() + result = re.findall(r"data-ratio.*?[>](\d+)\b", out)[0] + if int(result) < coverage_threshold: + raise Exception( + "Coverage thresold[{0}] is NOT achieved[{1}]".format( + coverage_threshold, result + ) + ) + else: + print( + "Coverage thresold[{0}] is achieved[{1}]".format( + coverage_threshold, result + ) + ) + + +def dist(): + clean() + run( + [ + py() + " setup.py sdist bdist_wheel --universal", + ] + ) + print(os.listdir("dist")) + + +def install(): + wheel = "{}-{}-py2.py3-none-any.whl".format(*pkg()) + run(["{} -m pip install snappi/snappi-1.6.2-py2.py3-none-any.whl".format(py())]) + # run( + # [ + # "{} -m pip install --upgrade --force-reinstall {}[testing]".format( + # py(), os.path.join("dist", wheel) + # ), + # ] + # ) + + +def generate_checksum(file): + hash_sha256 = hashlib.sha256() + with open(file, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + hash_sha256.update(chunk) + return hash_sha256.hexdigest() + + +def generate_distribution_checksum(): + tar_name = "{}-{}.tar.gz".format(*pkg()) + tar_file = os.path.join("dist", tar_name) + tar_sha = os.path.join("dist", tar_name + ".sha.txt") + with open(tar_sha, "w") as f: + f.write(generate_checksum(tar_file)) + wheel_name = "{}-{}-py2.py3-none-any.whl".format(*pkg()) + wheel_file = os.path.join("dist", wheel_name) + wheel_sha = os.path.join("dist", wheel_name + ".sha.txt") + with open(wheel_sha, "w") as f: + f.write(generate_checksum(wheel_file)) + + +# addimg release + + +def release(): + run( + [ + py() + " -m pip install --upgrade twine", + "{} -m twine upload -u {} -p {} dist/*.whl dist/*.tar.gz".format( + py(), + os.environ["PYPI_USERNAME"], + os.environ["PYPI_PASSWORD"], + ), + ] + ) + + +def clean(): + """ + Removes filenames or dirnames matching provided patterns. + """ + pwd_patterns = [ + ".pytype", + "dist", + "build", + "*.egg-info", + ] + recursive_patterns = [ + ".pytest_cache", + ".coverage", + "__pycache__", + "*.pyc", + "*.log", + ] + + for pattern in pwd_patterns: + for path in pattern_find(".", pattern, recursive=False): + rm_path(path) + + for pattern in recursive_patterns: + for path in pattern_find(".", pattern, recursive=True): + rm_path(path) + + +def version(): + print(pkg()[-1]) + + +def pkg(): + """ + Returns name of python package in current directory and its version. + """ + try: + return pkg.pkg + except AttributeError: + with open("setup.py") as f: + out = f.read() + name = re.findall(r"pkg_name = \"(.+)\"", out)[0] + version = re.findall(r"version = \"(.+)\"", out)[0] + + pkg.pkg = (name, version) + return pkg.pkg + + +def rm_path(path): + """ + Removes a path if it exists. + """ + if os.path.exists(path): + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + + +def pattern_find(src, pattern, recursive=True): + """ + Recursively searches for a dirname or filename matching given pattern and + returns all the matches. + """ + matches = [] + + if not recursive: + for name in os.listdir(src): + if fnmatch.fnmatch(name, pattern): + matches.append(os.path.join(src, name)) + return matches + + for dirpath, dirnames, filenames in os.walk(src): + for names in [dirnames, filenames]: + for name in names: + if fnmatch.fnmatch(name, pattern): + matches.append(os.path.join(dirpath, name)) + + return matches + + +def py(): + """ + Returns path to python executable to be used. + """ + try: + return py.path + except AttributeError: + py.path = os.path.join(".env", "bin", "python") + if not os.path.exists(py.path): + py.path = sys.executable + + # since some paths may contain spaces + py.path = '"' + py.path + '"' + return py.path + + +def run(commands): + """ + Executes a list of commands in a native shell and raises exception upon + failure. + """ + try: + for cmd in commands: + print(cmd) + if sys.platform != "win32": + cmd = cmd.encode("utf-8", errors="ignore") + subprocess.check_call(cmd, shell=True) + except Exception: + sys.exit(1) + + +def get_workflow_id(): + import requests + + cmd = ( + "https://api.github.com/repos/open-traffic-generator/snappi-cyperf/actions/runs" + ) + res = requests.get(cmd) + workflow_id = res.json()["workflow_runs"][0]["workflow_id"] + return workflow_id + + +def install_requests(path): + cmd = "{} -m pip install requests".format(path) + subprocess.check_call(cmd, shell=True) + + +def main(): + if len(sys.argv) >= 2: + globals()[sys.argv[1]](*sys.argv[2:]) + else: + print("usage: python do.py [args]") + + +if __name__ == "__main__": + main() diff --git a/readme.md b/readme.md index 1367c76..3a3c374 100644 --- a/readme.md +++ b/readme.md @@ -1,5 +1,14 @@ # snappi extension for Cyperf +[![license](https://img.shields.io/badge/license-MIT-green.svg)](https://en.wikipedia.org/wiki/MIT_License) +[![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) +[![Build](https://github.com/open-traffic-generator/snappi-cyperf/workflows/Build/badge.svg)](https://github.com/open-traffic-generator/snappi-cyperf/actions) +[![pypi](https://img.shields.io/pypi/v/snappi_cyperf.svg)](https://pypi.org/project/snappi_cyperf) +[![python](https://img.shields.io/pypi/pyversions/snappi_cyperf.svg)](https://pypi.python.org/pypi/snappi_cyperf) +[![Total alerts](https://img.shields.io/lgtm/alerts/g/open-traffic-generator/snappi-cyperf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/open-traffic-generator/snappi-cyperf/alerts/) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/open-traffic-generator/snappi-cyperf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/open-traffic-generator/snappi-cyperf/context:python) +[![downloads](https://pepy.tech/badge/snappi-cyperf)](https://pepy.tech/project/snappi-cyperf) + This extension allows executing tests written using [snappi](https://github.com/open-traffic-generator/snappi) against Cyperf, (one of) Keysight's implementation of [Open Traffic Generator](https://github.com/open-traffic-generator/models/releases). @@ -14,12 +23,126 @@ python -m pip install --upgrade "snappi[cyperf]" ## Start scripting ```python -# TODO: add complete description and snippet +import sys + +""" +Configure a raw TCP flow with, +- tx port as source to rx port as destination +- frame count 10000, each of size 128 bytes +- transmit rate of 1000 packets per second +Validate, +- frames transmitted and received for configured flow is as expected +""" import snappi + # host is Cyperf API Server -api = snappi.api(location='https://localhost:8444', ext='cyperf') -# new config +api = snappi.api(location="https://localhost:443", ext="cyperf") config = api.config() -``` +# port location is agent-ip +tx = config.ports.port(name="tx", location="10.39.44.147")[-1] +rx = config.ports.port(name="rx", location="10.39.44.190")[-1] +# configure layer 1 properties +(d1, d2) = config.devices.device(name="d1").device(name="d2") +(e1,) = d1.ethernets.ethernet(name="d1.e1") +e1.connection.port_name = "tx" +e1.mac = "01:02:03:04:05:06" +e1.step = "00:00:00:00:00:01" +e1.count = 1 +e1.mtu = 1488 + +(e2,) = d2.ethernets.ethernet(name="d2.e2") +e2.connection.port_name = "rx" +e2.mac = "01:02:03:04:06:06" +e2.step = "00:00:00:00:00:01" +e2.count = 2 +e2.mtu = 1488 + +(vlan1,) = e1.vlans.vlan(name="vlan1") +vlan1.id = 1 +vlan1.priority = 1 +vlan1.tpid = "x8100" +vlan1.count = 1 +vlan1.step = 1 +vlan1.per_count = 1 + +(vlan2,) = e2.vlans.vlan(name="vlan2") +vlan2.id = 1 +vlan2.priority = 1 +vlan2.tpid = "x8100" +vlan2.count = 1 +vlan2.step = 1 +vlan2.per_count = 1 + +(ip1,) = e1.ipv4_addresses.ipv4(name="e1.ipv4") +ip1.address = "10.0.0.10" +ip1.gateway = "10.0.0.1" +ip1.step = "0.0.0.1" +ip1.count = 1 +ip1.prefix = 16 + +(ip2,) = e2.ipv4_addresses.ipv4(name="e2.ipv4") +ip2.address = "10.0.0.20" +ip2.gateway = "10.0.0.1" +ip2.step = "0.0.0.1" +ip2.count = 1 +ip2.prefix = 16 + +# TCP/UDP configs + +(t1,) = d1.tcps.tcp(name="Tcp1") +t1.ip_interface_name = ip1.name +t1.receive_buffer_size = 1111 +t1.transmit_buffer_size = 1112 +t1.retransmission_minimum_timeout = 100 +t1.retransmission_maximum_timeout = 1001 +t1.minimum_source_port = 100 +t1.maximum_source_port = 101 + +(t2,) = d2.tcps.tcp(name="Tcp2") +t2.ip_interface_name = ip2.name +t2.receive_buffer_size = 2222 +t2.transmit_buffer_size = 2221 +t2.retransmission_minimum_timeout = 200 +t2.retransmission_maximum_timeout = 2002 +t2.minimum_source_port = 200 +t2.maximum_source_port = 202 + +(http_1,) = d1.https.http(name="HTTP1") +http_1.profile = "Chrome" +http_1.version = "HTTP11" +http_1.connection_persistence = "ConnectionPersistenceStandard" +(http_client,) = http_1.clients.client() +http_client.cookie_reject_probability = False +http_client.max_persistent_requests = 1 + +(http_2,) = d2.https.http(name="HTTP2") +http_2.profile = "Apache" +http_2.version = "HTTP11" +http_2.connection_persistence = "ConnectionPersistenceEnabled" +(http_server,) = http_2.servers.server() + +(tp1,) = config.trafficprofile.trafficprofile() +(segment1, segment2) = tp1.segment.segment().segment() +segment1.name = "Linear segment1" +segment1.duration = 40 +segment2.name = "Linear segment2" +segment2.duration = 70 +tp1.timeline = [segment1.name, segment2.name] +tp1.objective_type = ["Connections per second", "Simulated users"] +tp1.objective_value = [100, 200] +(obj1, obj2) = tp1.objectives.objective().objective() + +print("In test before set_config") +response = api.set_config(config) +print("In test after set_config") +print(response) +api.close() + +cs = api.control_state() +cs.app.state = "start" # cs.app.state.START +response1 = api.set_control_state(cs) +print(response1) +cs.app.state = "stop" # cs.app.state.START +api.set_control_state(cs) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..65d4ec8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +mock +ipaddr==2.2.0 +netaddr==0.8.0 +ipaddress==1.0.23 +flake8 +dpkt +requests +black; python_version > '3.6' +PyYAML +flask +urllib3 +simplejson +requests +tabulate +pandas +requests_toolbelt +timer \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..4cc76aa --- /dev/null +++ b/setup.py @@ -0,0 +1,46 @@ +""" +To build distribution: python setup.py sdist bdist_wheel --universal +""" + +import os +import setuptools + +pkg_name = "snappi_cyperf" +version = "0.0.1" + +# read long description from readme.md +base_dir = os.path.abspath(os.path.dirname(__file__)) +with open(os.path.join(base_dir, "readme.md")) as fd: + long_description = fd.read() +print(setuptools.find_packages()) +setuptools.setup( + name=pkg_name, + version=version, + description="The Snappi Cyperf Open Traffic Generator Python Package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/open-traffic-generator/snappi-cyperf", + author="dipendughosh", + author_email="dipendu.ghosh@keysight.com", + license="MIT", + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Topic :: Software Development :: Testing :: Traffic Generation", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + ], + keywords="snappi cyperf testing open traffic generator automation", + include_package_data=True, + packages=setuptools.find_packages(), + python_requires=">=2.7, <4", + install_requires=["requests"], + extras_require={ + "testing": [ + "pytest", + "mock", + "dpkt==1.9.4", + ] + }, +) diff --git a/snappi/snappi-1.6.2-py2.py3-none-any.whl b/snappi/snappi-1.6.2-py2.py3-none-any.whl new file mode 100644 index 0000000..fa93814 Binary files /dev/null and b/snappi/snappi-1.6.2-py2.py3-none-any.whl differ diff --git a/snappi_cyperf/RESTasV3.py b/snappi_cyperf/RESTasV3.py new file mode 100644 index 0000000..cfaab6b --- /dev/null +++ b/snappi_cyperf/RESTasV3.py @@ -0,0 +1,3074 @@ +import os +import io +import sys +import glob +import time +import urllib +import urllib3 +import requests +import simplejson as json +from zipfile import ZipFile +from datetime import datetime +from requests_toolbelt import MultipartEncoder + + +class RESTasV3: + + def __init__(self, ipAddress, username, password, client_id, verify=True): + print("In RESTasV3 init") + self.ipAddress = ipAddress + self.username = username + self.password = password + self.client_id = client_id + self.verify = verify + self.connect_to_mdw() + self.startTime = None + self.startingStartTime = None + self.configuringStartTime = None + self.startTrafficTime = None + self.stopTrafficTime = None + self.stopTime = None + self.configID = None + self.sessionID = None + self.config = None + self.app_list = None + self.strike_list = None + self.attack_list = None + self.testDuration = 60 + + def connect_to_mdw(self): + self.session = requests.Session() + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + self.session.verify = False + self.host = "https://{}".format(self.ipAddress) + self.cookie = self.get_automation_token() + self.headers = {"authorization": self.cookie} + + def __sendPost(self, url, payload, customHeaders=None, files=None, debug=True): + print("url:-", url) + expectedResponse = [200, 201, 202, 204] + print("POST at URL: {} with payload: {}".format(url, payload)) + payload = json.dumps(payload) if customHeaders is None else payload + response = self.session.post( + "{}{}".format(self.host, url), + headers=customHeaders if customHeaders else self.headers, + data=payload, + files=files, + verify=False, + ) + if debug: + print( + "POST response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if response.status_code == 401: + print("Token has expired, resending request") + self.refresh_access_token() + response = self.session.post( + "{}{}".format(self.host, url), + headers=customHeaders if customHeaders else self.headers, + data=payload, + files=files, + verify=False, + ) + print( + "POST response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + "Unexpected response code. Actual: {} Expected: {}".format( + response.status_code, expectedResponse + ) + ) + + return response + + def __sendGet(self, url, expectedResponse, customHeaders=None, debug=True): + print("GET at URL: {}".format(url)) + response = self.session.get( + "{}{}".format(self.host, url), + headers=customHeaders if customHeaders else self.headers, + ) + if debug: + print( + "GET response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if response.status_code == 401: + print("Token has expired, resending request") + self.refresh_access_token() + response = self.session.get( + "{}{}".format(self.host, url), + headers=customHeaders if customHeaders else self.headers, + ) + print( + "GET response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if self.verify and response.status_code != expectedResponse: + raise Exception( + "Unexpected response code. Actual: {} Expected: {}".format( + response.status_code, expectedResponse + ) + ) + + return response + + def __sendPut(self, url, payload, customHeaders=None, debug=True): + print("PUT at URL: {} with payload: {}".format(url, payload)) + expectedResponse = [200, 204] + response = self.session.put( + "{}{}".format(self.host, url), + headers=customHeaders if customHeaders else self.headers, + data=json.dumps(payload), + ) + if debug: + print( + "PUT response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if response.status_code == 401: + print("Token has expired, resending request") + self.refresh_access_token() + response = self.session.put( + "{}{}".format(self.host, url), + headers=customHeaders if customHeaders else self.headers, + data=json.dumps(payload), + ) + print( + "PUT response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + "Unexpected response code. Actual: {} Expected: {}".format( + response.status_code, expectedResponse + ) + ) + + return response + + def __sendPatch(self, url, payload, debug=True): + print("PATCH at URL: {} with payload: {}".format(url, payload)) + expectedResponse = [200, 204] + response = self.session.patch( + "{}{}".format(self.host, url), + headers=self.headers, + data=json.dumps(payload), + ) + if debug: + print( + "PATCH response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if response.status_code == 401: + print("Token has expired, resending request") + self.refresh_access_token() + response = self.session.patch( + "{}{}".format(self.host, url), + headers=self.headers, + data=json.dumps(payload), + ) + print( + "PATCH response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + "Unexpected response code. Actual: {} Expected: {}".format( + response.status_code, expectedResponse + ) + ) + + return response + + def __sendDelete(self, url, headers=None, debug=True): + print("DELETE at URL: {}".format(url)) + expectedResponse = [200, 202, 204] + response = self.session.delete("%s%s" % (self.host, url), headers=headers) + if debug: + print( + "DELETE response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if response.status_code == 401: + print("Token has expired, resending request") + self.refresh_access_token() + response = self.session.delete("%s%s" % (self.host, url), headers=headers) + print( + "DELETE response message: {}, response code: {}".format( + response.content, response.status_code + ) + ) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + "Unexpected response code. Actual: {} Expected: {}".format( + response.status_code, expectedResponse + ) + ) + + return response + + def get_automation_token(self): + apiPath = "/auth/realms/keysight/protocol/openid-connect/token" + headers = {"content-type": "application/x-www-form-urlencoded"} + payload = { + "username": self.username, + "password": self.password, + "grant_type": "password", + "client_id": self.client_id, + } + + response = self.__sendPost(apiPath, payload, customHeaders=headers) + if self.verify: + if response.headers.get("content-type") == "application/json": + response = response.json() + print("Access Token: {}".format(response["access_token"])) + return response["access_token"] + else: + raise Exception("Fail to obtain authentication token") + return response + + def add_new_user(self, **kwargs): + """ + A method to add new users to keycloak + """ + apiPath = "/auth/admin/realms/keysight/users" + + user_credentials = { + "enabled": True, + "attributes": {}, + "groups": [], + "emailVerified": "", + "username": "luke_cywalker", + "email": "perfsaber@lightmail.com", + "firstName": "Luke", + "lastName": "CyWalker", + } + customHeaders = { + "Authorization": f"Bearer {self.cookie}", + "Content-Type": "application/json", + "Accept": "application/json", + } + bad_keys = [k for k in kwargs.keys() if k not in user_credentials] + if not bad_keys: + user_credentials.update(kwargs) + new_user = json.dumps(user_credentials) + response = self.__sendPost( + apiPath, payload=new_user, customHeaders=customHeaders + ) + print(response.text) + return response + + def __get_configured_users(self, start_point=0, max_no_of_users_in_resp=20): + apiPath = f"/auth/admin/realms/keysight/users?briefRepresentation=true&first={start_point}&max={max_no_of_users_in_resp}" + customHeaders = { + "Authorization": f"Bearer {self.cookie}", + "Content-Type": "application/json", + } + response = self.__sendGet(apiPath, 200, customHeaders=customHeaders).json() + return response + + def get_user_id_from_username(self, username): + configured_users = self.__get_configured_users() + for user in configured_users: + if user["username"] == username: + return user["id"] + else: + continue + raise f"{username} is not a configured user on this MDW" + + def change_user_role(self, username, user_role): + user_id = self.get_user_id_from_username(username) + apiPath = ( + f"/auth/admin/realms/keysight/users/{user_id}/role-mappings/realm/available" + ) + customHeaders = { + "Authorization": f"Bearer {self.cookie}", + "Content-Type": "application/json", + "Accept": "application/json", + } + user_roles = [ + "cyperf-admin", + "cyperf-user", + "uma_authorization", + "offline_access", + ] + if user_role not in user_roles: + raise f"The role {user_role} you are trying to assign does not exist" + available_user_roles = self.__sendGet( + apiPath, 200, customHeaders=customHeaders + ).json() + user_role_payload = [] + for index in range(0, len(available_user_roles)): + if available_user_roles[index]["name"] == user_role: + user_role_payload.append(available_user_roles[index]) + apiPath = f"/auth/admin/realms/keysight/users/{user_id}/role-mappings/realm" + new_role = json.dumps(user_role_payload) + response = self.__sendPost( + apiPath, payload=new_role, customHeaders=customHeaders + ) + return response + + def change_user_password(self, username, password): + user_id = self.get_user_id_from_username(username) + customHeaders = { + "Authorization": f"Bearer {self.cookie}", + "Content-Type": "application/json", + } + apiPath = f"/auth/admin/realms/keysight/users/{user_id}/reset-password" + payload = {"type": "password", "value": password, "temporary": False} + self.__sendPut(apiPath, payload=payload, customHeaders=customHeaders) + + def delete_user_by_username(self, username): + apiPath = "/auth/admin/realms/keysight/users" + configured_users = self.__get_configured_users() + user_id = "" + for user in configured_users: + if user["username"] == username: + user_id = user["id"] + customHeaders = { + "Authorization": f"Bearer {self.cookie}", + "Content-Type": "application/json", + "Accept": "application/json", + } + response = self.__sendDelete(apiPath + "/" + user_id, headers=customHeaders) + return response + + def get_cluster_info(self): + """ + This method can be used to get versions of all the charts available in the mdw. + E.g.: self.get_cluster_info()['ati-updates'] will return the ati version currently installed on the mdw machine + """ + apiPath = "/api/v2/deployment/helm/cluster/releases" + response = self.__sendGet(apiPath, 200).json() + charts = {} + for chart in response: + charts[chart["name"]] = chart["chartDeployment"]["chartVersion"] + return charts + + def refresh_access_token(self): + access_token = self.get_automation_token() + self.headers = {"authorization": access_token} + print("Authentication token refreshed!") + + def setup(self, config=None): + if config: + self.configID = self.import_config(config) + else: + self.configID = "appsec-two-arm-base" + self.sessionID = self.open_config() + self.config = self.get_session_config() + + def get_session(self, session_id): + apiPath = "/api/v2/sessions/{}".format(session_id) + response = self.__sendGet(apiPath, 200).json() + return response + + def create_session(self, configID): + apiPath = "/api/v2/sessions" + # payload = {} + self.configID = configID + response = self.__sendPost(apiPath, payload={"configUrl": self.configID}) + if response: + self.sessionID = response.json()[0]["id"] + print("create_session response - ", response) + return response + + def delete_session(self, session_id): + """ + Delete a session by its id + :param session_id: The id got from getSessions + """ + apiPath = "/api/v2/sessions/{}".format(session_id) + self.__sendDelete(apiPath, headers=self.headers) + + def delete_current_session(self): + """ + Delete the current session + return: None + """ + apiPath = "/api/v2/sessions/{}".format(self.sessionID) + self.__sendDelete(apiPath, headers=self.headers) + + def get_all_sessions(self): + apiPath = "/api/v2/sessions" + response = self.__sendGet(apiPath, 200).json() + return response + + def delete_all_sessions(self): + """ + Delete all the current sessions opened on the application + :return: None + """ + print("Deleting all sessions...") + session_list = self.get_all_sessions() + for i in range(0, len(session_list)): + try: + self.delete_session(session_list[i]["id"]) + except Exception as e: + print( + "{} could not be deleted because: {}".format( + session_list[i]["id"], e + ) + ) + pass + if len(self.get_all_sessions()) > 0: + raise Exception("Not all sessions could be deleted!") + else: + print("No sessions opened!") + + def get_test_details(self, session_id): + apiPath = "/api/v2/sessions/{}/test".format(session_id) + response = self.__sendGet(apiPath, 200).json() + return response + + def set_license_server(self, licenseServerIP): + apiPath = "/api/v2/license-servers" + self.__sendPost(apiPath, payload={"hostName": licenseServerIP}) + + def get_license_servers(self): + apiPath = "/api/v2/license-servers" + return self.__sendGet(apiPath, 200).json() + + def wait_event_success(self, apiPath, timeout): + counter = 1 + while timeout > 0: + response = self.__sendGet(apiPath, 200).json() + if response["state"] == "SUCCESS": + return response + else: + timeout -= counter + time.sleep(counter) + + def activate_license(self, activation_code, quantity=1, timeout=60): + apiPath = "/api/v2/licensing/operations/activate" + response = self.__sendPost( + apiPath, payload=[{"activationCode": activation_code, "quantity": quantity}] + ).json() + apiPath = "/api/v2/licensing/operations/activate/{}".format(response["id"]) + if not self.wait_event_success(apiPath, timeout): + raise TimeoutError( + "Failed to activate license. Timeout reached = {} seconds".format( + timeout + ) + ) + + def deactivate_license(self, activation_code, quantity=1, timeout=60): + apiPath = "/api/v2/licensing/operations/deactivate" + response = self.__sendPost( + apiPath, payload=[{"activationCode": activation_code, "quantity": quantity}] + ).json() + apiPath = "/api/v2/licensing/operations/deactivate/{}".format(response["id"]) + if ( + "The Activation Code : '{}' is not installed.".format(activation_code) + == self.__sendGet(apiPath, 200).json()["message"] + ): + print("License code {} is not installed".format(activation_code)) + elif not self.wait_event_success(apiPath, timeout): + raise TimeoutError( + "Failed to deactivate license. Timeout reached = {} seconds".format( + timeout + ) + ) + + def get_license_statistics(self, timeout=30): + apiPath = "/api/v2/licensing/operations/retrieve-counted-feature-stats" + response = self.__sendPost(apiPath, payload={}).json() + apiPath = ( + "/api/v2/licensing/operations/retrieve-counted-feature-stats/{}".format( + response["id"] + ) + ) + if not self.wait_event_success(apiPath, timeout): + raise TimeoutError( + "Failed to obtain license stats. Timeout reached = {} seconds".format( + timeout + ) + ) + apiPath = "/api/v2/licensing/operations/retrieve-counted-feature-stats/{}/result".format( + response["id"] + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def nats_update_route(self, nats_address): + apiPath = "/api/v2/brokers" + self.__sendPost(apiPath, payload={"host": nats_address}) + + def import_config(self, config): + apiPath = "/api/v2/configs" + if config.endswith(".json"): + config = json.loads(open(config, "r").read()) + response = self.__sendPost(apiPath, config) + elif config.endswith(".zip"): + zip_file_path = {"archive": (config, open(config, "rb"), "application/zip")} + response = self.__sendPost( + apiPath, None, customHeaders=self.headers, files=zip_file_path + ) + else: + raise Exception("Config type not supported. Requires zip or json.") + if response: + print( + "Config successfully imported, config ID: {}".format( + response.json()[0]["id"] + ) + ) + return response.json()[0]["id"] + else: + raise Exception("Failed to import test config") + + def export_config(self, export_path=None): + config_id = self.configID + apiPath = "/api/v2/configs/{}?include=all&resolveDependencies=true".format( + config_id + ) + customHeaders = self.headers + customHeaders["Accept"] = "application/zip" + response = self.__sendGet(apiPath, 200, customHeaders=customHeaders) + + file_name = response.headers.get("content-disposition").split("=")[1].strip('"') + + if export_path: + file_name = os.path.join(export_path, file_name) + print("Export path/file: {}".format(file_name)) + with open(file_name, "wb") as archive: + archive.write(response.content) + return file_name + + def export_config_by_name(self, export_path=None, config_name=None): + config_id = self.get_config_id(config_name) + apiPath = "/api/v2/configs/{}?include=all&resolveDependencies=true".format( + config_id + ) + customHeaders = self.headers + customHeaders["Accept"] = "application/zip" + response = self.__sendGet(apiPath, 200, customHeaders=customHeaders) + + file_name = response.headers.get("content-disposition").split("=")[1].strip('"') + + if export_path: + file_name = os.path.join(export_path, file_name) + print("Export path/file: {}".format(file_name)) + with open(file_name, "wb") as archive: + archive.write(response.content) + return file_name + + def open_config(self): + apiPath = "/api/v2/sessions" + response = self.__sendPost(apiPath, payload={"configUrl": self.configID}) + if response: + print( + "Config successfully opened, session ID: {}".format( + response.json()[0]["id"] + ) + ) + return response.json()[0]["id"] + + def get_session_config(self, sessionId=None): + sessionId = sessionId if sessionId else self.sessionID + apiPath = "/api/v2/sessions/{}/config?include=all".format(sessionId) + return self.__sendGet(apiPath, 200, debug=False).json() + + def delete_config(self, config_id): + """ + Delete a config after you've specified its id + :param config_id: The id of the config + :return: None + """ + apiPath = "/api/v2/configs/{}".format(config_id) + self.__sendDelete(apiPath, self.headers) + + def delete_config_by_name(self, config_name=None): + config_id = self.get_config_id(config_name) + apiPath = "/api/v2/configs/{}".format(config_id) + self.__sendDelete(apiPath, self.headers) + + def add_network_segment(self): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment".format( + self.sessionID + ) + self.__sendPost(apiPath, {}) + + def wait_agents_connect(self, agents_nr=3, timeout=300): + response = [] + init_timeout = timeout + print("Waiting for agents to connect to the CyPerf controller...") + while timeout > 0: + response = self.get_agents() + if len(response) >= agents_nr: + print( + "There are {} agents connected to the CyPerf controller".format( + len(response) + ) + ) + return True + else: + time.sleep(10) + timeout -= 10 + else: + raise Exception( + "Expected {} agents connected after {}s. Got only {}.".format( + agents_nr, init_timeout, len(response) + ) + ) + + def get_agents(self): + apiPath = "/api/v2/agents" + return self.__sendGet(apiPath, 200).json() + + def check_agents_status(self, timeout=60): + waiting_time = 0 + print("Waiting for agents to be available") + while True: + agents_status = [agent["Status"] for agent in self.get_agents()] + if all(status == "STOPPED" for status in agents_status): + if waiting_time <= timeout: + print("Agents are available") + return True + else: + raise Exception( + "The agents were not available for the next test in {}s, but after {}s".format( + timeout, waiting_time + ) + ) + time.sleep(10) + waiting_time += 10 + + def get_agents_ids(self, agentIPs=None, wait=None): + if wait: + self.wait_agents_connect() + agentsIDs = list() + response = self.get_agents() + print("Found {} agents".format(len(response))) + if type(agentIPs) is str: + agentIPs = [agentIPs] + for agentIP in agentIPs: + for agent in response: + if agent["IP"] == agentIP: + print("agent_IP: {}, agent_ID: {}".format(agent["IP"], agent["id"])) + agentsIDs.append(agent["id"]) + break + return agentsIDs + + def get_agents_ips(self, wait=None): + if wait: + self.wait_agents_connect() + agentsIPs = list() + response = self.get_agents() + print("Found {} agents".format(len(response))) + # fixme B2B only - ClientAgent is excluded in AWS scenario + for agent in response: + agentsIPs.append(agent["IP"]) + print("Agents IP List: {}".format(agentsIPs)) + return agentsIPs + + def assign_agents(self): + agents_ips = self.get_agents_ips() + self.assign_agents_by_ip(agents_ips=agents_ips[0], network_segment=1) + self.assign_agents_by_ip(agents_ips=agents_ips[1], network_segment=2) + + def assign_agents_by_ip(self, agents_ips, network_segment): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/agentAssignments".format( + self.sessionID, network_segment + ) + payload = {"ByID": [], "ByTag": []} + agents_ids = self.get_agents_ids(agentIPs=agents_ips) + for agent_id in agents_ids: + payload["ByID"].append({"agentId": agent_id}) + self.__sendPatch(apiPath, payload) + + def test_warmup_value(self, value): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/ObjectivesAndTimeline/AdvancedSettings".format( + self.sessionID + ) + payload = {"WarmUpPeriod": int(value)} + self.__sendPatch(apiPath, payload) + + def assign_agents_by_tag(self, agents_tags, network_segment): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/agentAssignments".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"ByID": [], "ByTag": [agents_tags]}) + + def set_traffic_capture( + self, + agents_ips, + network_segment, + is_enabled=True, + capture_latest_packets=False, + max_capture_size=104857600, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/agentAssignments".format( + self.sessionID, network_segment + ) + payload = {"ByID": []} + capture_settings = { + "captureEnabled": is_enabled, + "captureLatestPackets": capture_latest_packets, + "maxCaptureSize": max_capture_size, + } + agents_ids = self.get_agents_ids(agentIPs=agents_ips) + for agent_id in agents_ids: + payload["ByID"].append( + {"agentId": agent_id, "captureSettings": capture_settings} + ) + self.__sendPatch(apiPath, payload) + + def get_capture_files(self, captureLocation, exportTimeout=180): + self.get_result_ended() + test_id = self.get_test_id() + apiPath = "/api/v2/results/{}/operations/generate-results".format(test_id) + response = self.__sendPost(apiPath, None).json() + apiPath = response["url"][len(self.host) :] + response = self.wait_event_success(apiPath, timeout=exportTimeout) + if not response: + raise TimeoutError( + "Failed to download Captures. Timeout reached = {} seconds".format( + exportTimeout + ) + ) + apiPath = response["resultUrl"] + response = self.__sendGet(apiPath, 200, debug=False) + zf = ZipFile(io.BytesIO(response.content), "r") + zf.extractall(captureLocation) + for arh in glob.iglob(os.path.join(captureLocation, "*.zip")): + files = os.path.splitext(os.path.basename(arh))[0] + zf = ZipFile(arh) + zf.extractall(path=os.path.join(captureLocation, "pcaps", files)) + return response + + def add_dut(self): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment".format( + self.sessionID + ) + response = self.__sendPost(apiPath, payload={}).json() + return response + + def delete_dut(self, network_segment): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendDelete(apiPath, self.headers) + + def set_dut(self, active=True, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"active": active}) + + def check_if_dut_is_active(self, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}".format( + self.sessionID, network_segment + ) + response = self.__sendGet(apiPath, 200).json() + return response["active"] + + def set_dut_host(self, host, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"host": host}) + + def set_active_dut_configure_host(self, hostname, active=True, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"active": active, "ServerDUTHost": hostname}) + + def set_client_http_proxy(self, host, client_port, connect_mode, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"active": True}) + self.__sendPatch(apiPath, payload={"ConfigSettings": "ADVANCED_MODE"}) + self.__sendPatch(apiPath, payload={"ClientDUTActive": True}) + self.__sendPatch(apiPath, payload={"ClientDUTHost": host}) + self.__sendPatch(apiPath, payload={"ClientDUTPort": int(client_port)}) + self.__sendPatch(apiPath, payload={"HttpForwardProxyMode": connect_mode}) + self.__sendPatch(apiPath, payload={"ServerDUTActive": False}) + + def set_http_health_check(self, enabled=True, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Enabled": enabled}) + + def set_http_health_check_port(self, port, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Port": port}) + + def set_http_health_check_url(self, target_url, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath + "/Params/1", payload={"Value": target_url}) + + def set_http_health_check_payload(self, payload_file, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck".format( + self.sessionID, network_segment + ) + if isinstance(payload_file, float): + self.__sendPatch(apiPath + "/Params/2", payload={"Value": payload_file}) + else: + self.set_custom_payload(apiPath + "/Params/2", payload_file) + + def set_http_health_check_version(self, http_version, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath + "/Params/3", payload={"Value": http_version}) + + def set_https_health_check(self, enabled=True, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Enabled": enabled}) + + def set_https_health_check_port(self, port, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Port": port}) + + def set_https_health_check_url(self, target_url, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath + "/Params/1", payload={"Value": target_url}) + + def set_https_health_check_payload(self, payload_file, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck".format( + self.sessionID, network_segment + ) + if isinstance(payload_file, float): + self.__sendPatch(apiPath + "/Params/2", payload={"Value": payload_file}) + else: + self.set_custom_payload(apiPath + "/Params/2", payload_file) + + def set_https_health_check_version(self, https_version, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath + "/Params/3", payload={"Value": https_version}) + + def set_tcp_health_check(self, enabled=True, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/TCPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Enabled": enabled}) + + def set_tcp_health_check_port(self, port, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/TCPHealthCheck".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Port": port}) + + def set_client_recieve_buffer_size_attack_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_client_tcp_profile(self, tcp_profile): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, tcp_profile) + + def set_server_tcp_profile(self, tcp_profile): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, tcp_profile) + + def get_client_tcp_profile(self): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile".format( + self.sessionID + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def get_server_tcp_profile(self): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile".format( + self.sessionID + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def set_client_http_profile(self, http_profile): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientHTTPProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, http_profile) + + def set_server_http_profile(self, http_profile): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerHTTPProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, http_profile) + + def get_client_http_profile(self): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientHTTPProfile".format( + self.sessionID + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def get_server_http_profile(self): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerHTTPProfile".format( + self.sessionID + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def get_config_config(self): + apiPath = ( + "/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications".format( + self.sessionID + ) + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def set_client_transmit_buffer_size_attack_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_client_recieve_buffer_size_traffic_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_client_transmit_buffer_size_traffic_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_server_recieve_buffer_size_attack_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_server_transmit_buffer_size_attack_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_server_recieve_buffer_size_traffic_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_server_transmit_buffer_size_traffic_profile(self, value): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_ip_network_segment(self, active=True, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"active": active}) + + def set_network_tags(self, tags="Client", network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"networkTags": [tags]}) + + def set_application_client_network_tags(self, tags, app_nr): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/operations/modify-tags-recursively".format( + self.sessionID, app_nr + ) + self.__sendPost( + apiPath, payload={"SelectTags": True, "ClientNetworkTags": [tags]} + ) + + def remove_application_client_network_tags(self, tags, app_nr): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/operations/modify-tags-recursively".format( + self.sessionID, app_nr + ) + self.__sendPost( + apiPath, payload={"SelectTags": False, "ClientNetworkTags": [tags]} + ) + + def set_network_min_agents(self, min_agents=1, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"minAgents": min_agents}) + + def add_eth_range(self, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment".format( + self.sessionID + ) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]["id"] + + def add_ip_range(self, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges".format( + self.sessionID, network_segment + ) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]["id"] + + def delete_ip_range(self, network_segment=1, ip_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendDelete(apiPath, self.headers) + + def set_ip_range_automatic_ip( + self, + ip_auto=True, + network_segment=1, + ip_range=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"IpAuto": ip_auto}) + + def set_ip_range_ip_start( + self, + ip_start, + network_segment=1, + ip_range=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"IpStart": ip_start}) + self.__sendPatch(apiPath, payload={"IpAuto": False}) + + def set_ip_range_ip_start_if_enabled( + self, + ip_start, + network_segment=1, + ip_range=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"IpStart": ip_start}) + + def set_ip_range_ip_increment( + self, ip_increment="0.0.0.1", network_segment=1, ip_range=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"IpIncr": ip_increment}) + + def set_ip_range_ip_count(self, count=1, network_segment=1, ip_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"Count": count}) + + def set_ip_range_max_count_per_agent( + self, max_count_per_agent=1, network_segment=1, ip_range=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"maxCountPerAgent": max_count_per_agent}) + + def set_ip_range_automatic_netmask( + self, netmask_auto=True, network_segment=1, ip_range=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"NetMaskAuto": netmask_auto}) + + def set_ip_range_netmask(self, netmask=16, network_segment=1, ip_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"NetMask": netmask}) + + def set_ip_range_automatic_gateway( + self, gateway_auto=True, network_segment=1, ip_range=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"GwAuto": gateway_auto}) + + def set_ip_range_gateway(self, gateway="10.0.0.1", network_segment=1, ip_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"GwStart": gateway}) + self.__sendPatch(apiPath, payload={"GwAuto": False}) + + def set_ip_range_gateway_if_enabled( + self, gateway="10.0.0.1", network_segment=1, ip_range=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"GwStart": gateway}) + + def set_ip_range_network_tags(self, tags, network_segment=1, ip_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"networkTags": [tags]}) + + def set_ip_range_mss(self, mss=1460, network_segment=1, ip_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch(apiPath, payload={"Mss": mss}) + + def set_eth_range( + self, + payload, + network_segment=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch( + apiPath, + payload, + ) + + def get_eth_range( + self, + network_segment=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}".format( + self.sessionID, network_segment + ) + response = self.__sendGet(apiPath, 200).json() + return response + + def set_ip_range( + self, + payload, + network_segment=1, + ip_range=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch( + apiPath, + payload, + ) + + def set_ip_range_innervlan_range( + self, + payload, + network_segment=1, + ip_range=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}/InnerVlanRange".format( + self.sessionID, network_segment, ip_range + ) + self.__sendPatch( + apiPath, + payload, + ) + + def set_eth_range_mac_auto_false(self, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"MacAuto": False}) + + def set_eth_range_mac_start(self, mac_start, network_segment=1): + self.set_eth_range_mac_auto_false(network_segment) + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"MacStart": mac_start}) + + def set_eth_range_mac_increment(self, mac_increment, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"MacIncr": mac_increment}) + + def set_eth_range_one_mac_per_ip_false(self, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"OneMacPerIP": False}) + + def set_eth_range_max_mac_count(self, count, network_segment=1): + self.set_eth_range_one_mac_per_ip_false(network_segment) + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"Count": count}) + + def set_eth_range_max_mac_count_per_agent( + self, max_count_per_agent, network_segment=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"maxCountPerAgent": max_count_per_agent}) + + def set_dns_resolver(self, name_server, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/DNSResolver".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"nameServers": [{"name": name_server}]}) + + def set_dns_resolver_cache_timeout(self, timeout=0, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/DNSResolver".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"cacheTimeout": timeout}) + + def set_dut_connections(self, connections, network_segment=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}".format( + self.sessionID, network_segment + ) + self.__sendPatch(apiPath, payload={"DUTConnections": connections}) + + def set_profile_duration(self, profile_type, value): + apiPath = "/api/v2/sessions/{}/config/config/{}/1/ObjectivesAndTimeline/TimelineSegments/1".format( + self.sessionID, profile_type + ) + self.__sendPatch(apiPath, payload={"Duration": value}) + + def get_iteration_count_info(self, ap_id=1): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1".format( + self.sessionID, ap_id + ) + config_type = self.get_config_type() + if config_type["traffic"]: + print("Parameter not available in traffic profile") + if config_type["attack"]: + response = self.__sendGet(apiPath, 200).json() + return response["IterationCount"] + + def get_profile_duration(self, profile_type): + apiPath = "/api/v2/sessions/{}/config/config/{}/1/ObjectivesAndTimeline/TimelineSegments/1".format( + self.sessionID, profile_type + ) + response = self.__sendGet(apiPath, 200).json() + return response["Duration"] + + def set_test_duration(self, value): + config_type = self.get_config_type() + if config_type["traffic"]: + self.set_profile_duration(profile_type="TrafficProfiles", value=int(value)) + if config_type["attack"]: + self.set_profile_duration(profile_type="AttackProfiles", value=int(value)) + self.testDuration = int(value) + + def read_test_duration(self): + TrafficProfilesDuration = 0 + AttackProfilesDuration = 0 + config_type = self.get_config_type() + if config_type["traffic"]: + TrafficProfilesDuration = self.get_profile_duration( + profile_type="TrafficProfiles" + ) + if config_type["attack"]: + AttackProfilesDuration = self.get_profile_duration( + profile_type="AttackProfiles" + ) + self.testDuration = max(TrafficProfilesDuration, AttackProfilesDuration) + + def send_modified_config(self): + apiPath = "/api/v2/sessions/{}/config".format(self.sessionID) + self.__sendPut(apiPath, self.config) + + def get_CPUCoresNR(self): + agent_info = self.get_agents() + l = [i["cpuInfo"] for i in agent_info] + count = list(map(len, l)) + return count[0] + + def get_CC_min_value(self, configured_cc_value, agent_cpu_cores): + favoured_sessions_no = 1 # static value + global_cc_min_value = 0 + app_info = {} + config = self.get_session_config() + traffic_profiles = config["Config"]["TrafficProfiles"][0]["Applications"] + total_weight = sum(app["ObjectiveWeight"] for app in traffic_profiles) + for i in range(len(config["Config"]["TrafficProfiles"][0]["Applications"])): + app_info[i] = { + "weight": config["Config"]["TrafficProfiles"][0]["Applications"][i][ + "ObjectiveWeight" + ], + "connections_no": len( + config["Config"]["TrafficProfiles"][0]["Applications"][i][ + "Connections" + ] + ), + } + for app_no, app_details in app_info.items(): + individual_app_cc_value = configured_cc_value * ( + app_details["weight"] / total_weight + ) + individual_app_cc_min_value = min( + individual_app_cc_value + - 2 * app_details["connections_no"] * agent_cpu_cores, + individual_app_cc_value + - min( + max(0.1 * individual_app_cc_value, agent_cpu_cores), + 40 * agent_cpu_cores, + ), + ) + min_alowed_cc_value = ( + app_details["connections_no"] * favoured_sessions_no + + app_details["connections_no"] + ) * agent_cpu_cores + global_cc_min_value += int( + max(individual_app_cc_min_value, min_alowed_cc_value) + ) + return global_cc_min_value + + def get_config_type(self): + self.config = self.get_session_config() + config_type = { + "traffic": False, + "traffic_profiles": [], + "tls_applications": [], + "tp_primary_obj": None, + "primary_obj_adv_time_params": [], + "tp_secondary_obj": None, + "tp_ssl": False, + "attack": False, + "attack_profiles": [], + "tls_attacks": [], + "att_obj": False, + "at_ssl": False, + "dut": False, + "tunnel_count_per_outer_ip": None, + "redirect_info": {}, + "ipsec": None, + "CPUCoresNr": self.get_CPUCoresNR(), + "test_duration": self.testDuration, + } + if len(self.config["Config"]["TrafficProfiles"]) > 0: + config_type["traffic"] = ( + True if self.config["Config"]["TrafficProfiles"][0]["Active"] else False + ) + tp_profiles = self.config["Config"]["TrafficProfiles"][0] + redirect_info = {} + for application in tp_profiles["Applications"]: + if application["ProtocolID"] not in config_type["traffic_profiles"]: + ###TODO: THIS IS A PROVISORY WORKAROUND BECAUSE CONFIG DOES NOT HOLD INFORMATION OF ENABLED APPS + try: + if application["Active"] == True: + config_type["traffic_profiles"].append( + application["ProtocolID"] + ) + if ( + application["ClientTLSProfile"]["tls12Enabled"] + or application["ClientTLSProfile"]["tls13Enabled"] + ): + config_type["tls_applications"].append( + application["ProtocolID"] + ) + except KeyError: + config_type["traffic_profiles"].append( + application["ProtocolID"] + ) + if ( + application["ClientTLSProfile"]["tls12Enabled"] + or application["ClientTLSProfile"]["tls13Enabled"] + ): + config_type["tls_applications"].append( + application["ProtocolID"] + ) + if application["ProtocolID"] == "HTTP": + for param in application["Params"]: + if param["Name"] == "Follow HTTP Redirects": + redirect_info[application["Name"]] = param["Value"] + config_type["redirect_info"] = redirect_info + objectives = tp_profiles["ObjectivesAndTimeline"] + objective_dm = { + "type": objectives["PrimaryObjective"]["Type"], + "unit": objectives["TimelineSegments"][0]["PrimaryObjectiveUnit"], + "value": objectives["TimelineSegments"][0]["PrimaryObjectiveValue"], + "steady_step_duration": objectives["TimelineSegments"][0]["Duration"], + } + if objectives["PrimaryObjective"]["Type"] == "Concurrent connections": + objective_dm["concurrent_connections_min"] = self.get_CC_min_value( + objectives["TimelineSegments"][0]["PrimaryObjectiveValue"], + config_type["CPUCoresNr"], + ) + config_type["tp_primary_obj"] = objective_dm + advance_timeline_params = [None, None] + if objectives["PrimaryObjective"]["Timeline"][0]["Enabled"]: + step_ramp_up = { + "Duration": objectives["PrimaryObjective"]["Timeline"][0][ + "Duration" + ], + "NumberOfSteps": objectives["PrimaryObjective"]["Timeline"][0][ + "NumberOfSteps" + ], + } + advance_timeline_params[0] = step_ramp_up + if objectives["PrimaryObjective"]["Timeline"][2]["Enabled"]: + step_ramp_down = { + "Duration": objectives["PrimaryObjective"]["Timeline"][2][ + "Duration" + ], + "NumberOfSteps": objectives["PrimaryObjective"]["Timeline"][2][ + "NumberOfSteps" + ], + } + advance_timeline_params[1] = step_ramp_down + config_type["primary_objective_adv_time_params"] = advance_timeline_params + if ( + len(objectives["TimelineSegments"][0]["SecondaryObjectiveValues"]) > 0 + and len(objectives["SecondaryObjectives"]) > 0 + ): + objective_dm = { + "type": objectives["SecondaryObjectives"][0]["Type"], + "unit": objectives["TimelineSegments"][0][ + "SecondaryObjectiveValues" + ][0]["Unit"], + "value": objectives["TimelineSegments"][0][ + "SecondaryObjectiveValues" + ][0]["Value"], + } + if ( + objectives["SecondaryObjectives"][0]["Type"] + == "Concurrent connections" + ): + objective_dm["concurrent_connections_min"] = self.get_CC_min_value( + objectives["TimelineSegments"][0]["SecondaryObjectiveValues"][ + 0 + ]["Value"], + config_type["CPUCoresNr"], + ) + config_type["tp_secondary_obj"] = objective_dm + if tp_profiles["TrafficSettings"]["DefaultTransportProfile"][ + "ClientTLSProfile" + ]["version"]: + config_type["tp_ssl"] = True + + if len(self.config["Config"]["AttackProfiles"]) > 0: + config_type["attack"] = ( + True if self.config["Config"]["AttackProfiles"][0]["Active"] else False + ) + at_profiles = self.config["Config"]["AttackProfiles"][0] + for attack in at_profiles["Attacks"]: + if attack["ProtocolID"] not in config_type["attack_profiles"]: + ###TODO: THIS IS A PROVISORY WORKAROUND BECAUSE CONFIG DOES NOT HOLD INFORMATION OF ENABLED ATTACKS + try: + if attack["Active"] == True: + config_type["attack_profiles"].append(attack["ProtocolID"]) + if ( + attack["ClientTLSProfile"]["tls12Enabled"] + or attack["ClientTLSProfile"]["tls13Enabled"] + ): + config_type["tls_attacks"].append(attack["ProtocolID"]) + except KeyError: + config_type["attack_profiles"].append(attack["ProtocolID"]) + if ( + attack["ClientTLSProfile"]["tls12Enabled"] + or attack["ClientTLSProfile"]["tls13Enabled"] + ): + config_type["tls_attacks"].append(attack["ProtocolID"]) + objective_dm = { + "attack_rate": at_profiles["ObjectivesAndTimeline"]["TimelineSegments"][ + 0 + ]["AttackRate"], + "max_concurrent_attack": at_profiles["ObjectivesAndTimeline"][ + "TimelineSegments" + ][0]["MaxConcurrentAttack"], + } + config_type["att_obj"] = objective_dm + if at_profiles["TrafficSettings"]["DefaultTransportProfile"][ + "ClientTLSProfile" + ]["version"]: + config_type["tp_ssl"] = True + + if self.config["Config"]["NetworkProfiles"][0]["DUTNetworkSegment"][0][ + "active" + ]: + config_type["dut"] = True + + tunnel_stacks = self.config["Config"]["NetworkProfiles"][0]["IPNetworkSegment"][ + 0 + ]["TunnelStacks"] + if len(tunnel_stacks) > 0: + config_type["tunnel_count_per_outer_ip"] = tunnel_stacks[0]["TunnelRange"][ + "TunnelCountPerOuterIP" + ] + + ipsec_stacks = self.config["Config"]["NetworkProfiles"][0]["IPNetworkSegment"][ + 0 + ]["IPSecStacks"] + if len(ipsec_stacks) > 0: + config_type["ipsec"] = { + "host_count_per_tunnel": ipsec_stacks[0]["EmulatedSubConfig"][ + "HostCountPerTunnel" + ], + "outer_ip_count": ipsec_stacks[0]["OuterIPRange"]["Count"], + "rekey_margin": ipsec_stacks[0]["RekeyMargin"], + "lifetime_phase_1": ipsec_stacks[0]["IPSecRange"]["IKEPhase1Config"][ + "Lifetime" + ], + "lifetime_phase_2": ipsec_stacks[0]["IPSecRange"]["IKEPhase2Config"][ + "Lifetime" + ], + } + return config_type + + def start_test(self, initializationTimeout=60): + apiPath = "/api/v2/sessions/{}/test-run/operations/start".format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + self.startTime = self.__getEpochTime() + self.read_test_duration() + print("Waiting for the test to start...") + response = self.get_test_status() + actual_duration = 0 + counter = 1 + while actual_duration < initializationTimeout: + response = self.get_test_status() + if response["status"] == "STARTING" and not self.startingStartTime: + self.startingStartTime = self.__getEpochTime() + if response["status"] == "CONFIGURING" and not self.configuringStartTime: + self.configuringStartTime = self.__getEpochTime() + if response["status"] == "STARTED": + if not self.startTrafficTime: + self.startTrafficTime = self.__getEpochTime() + return self.startTrafficTime + if response["status"] == "ERROR": + raise Exception( + "Error when starting the test! {}".format( + self.get_test_details(self.sessionID) + ) + ) + actual_duration += counter + time.sleep(counter) + else: + raise Exception( + "ERROR! Test could not start in {} seconds, test state: {}".format( + initializationTimeout, response + ) + ) + + def stop_test(self, stopTimeout=60): + apiPath = "/api/v2/sessions/{}/test-run/operations/stop".format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + stopID = response["id"] + print("Stop ID : {}".format(stopID)) + progressPath = "/api/v2/sessions/{}/test-run/operations/stop/{}".format( + self.sessionID, stopID + ) + self.__sendGet(progressPath, 200).json() + counter = 2 + iteration = 0 + while iteration < stopTimeout: + response = self.__sendGet(progressPath, 200).json() + print("Stop Test Progress: {}".format(response["progress"])) + if response["state"] == "SUCCESS": + break + if response["state"] == "ERROR": + raise Exception( + "Error when stopping the test! {}".format( + self.get_test_details(self.sessionID) + ) + ) + iteration += counter + time.sleep(counter) + + def abort_test(self, abortTimeout=60): + apiPath = "/api/v2/sessions/{}/test-run/operations/abort".format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + stopID = response["id"] + print("Abort ID : {}".format(stopID)) + progressPath = "/api/v2/sessions/{}/test-run/operations/abort/{}".format( + self.sessionID, stopID + ) + self.__sendGet(progressPath, 200).json() + counter = 2 + iteration = 0 + while iteration < abortTimeout: + response = self.__sendGet(progressPath, 200).json() + print("Abort Test Progress: {}".format(response["progress"])) + if response["state"] == "SUCCESS": + break + if response["state"] == "ERROR": + raise Exception( + "Error when aborting the test! {}".format( + self.get_test_details(self.sessionID) + ) + ) + iteration += counter + time.sleep(counter) + + def get_error_notifications_for_session(self, notif_type="error"): + apiPath = "/api/v2/notifications?exclude=links&includeSeen=true&severity={}&sessionId={}".format( + notif_type, self.sessionID + ) + return self.__sendGet(apiPath, 200).json() + + def get_error_notifications_for_test(self, notif_type="error"): + apiPath = "/api/v2/notifications?exclude=links&includeSeen=true&severity={}&sessionId={}".format( + notif_type, self.sessionID + ) + error_notifs = self.__sendGet(apiPath, 200).json() + test_id = self.get_test_id() + return [ + notif + for notif in error_notifs + if "TestId" in notif["tags"] and notif["tags"]["TestId"] == test_id + ] + + def get_test_status(self): + apiPath = "/api/v2/sessions/{}/test".format(self.sessionID) + return self.__sendGet(apiPath, 200).json() + + def wait_test_finished(self, timeout=300): + print("Waiting for the test to finish...") + response = self.get_test_status() + actual_duration = 0 + counter = 1 + while actual_duration < self.testDuration + timeout: + response = self.get_test_status() + if response["status"] == "STOPPING" and not self.stopTrafficTime: + self.stopTrafficTime = self.__getEpochTime() + if response["status"] == "STOPPED": + if response["testElapsed"] >= response["testDuration"]: + print("Test gracefully finished") + self.stopTime = self.__getEpochTime() + return self.stopTime + else: + raise Exception( + "Error! Test stopped before reaching the configured duration = {}; Elapsed = {}".format( + response["testDuration"], response["testElapsed"] + ) + ) + else: + print( + "Test duration = {}; Elapsed = {}".format( + response["testDuration"], response["testElapsed"] + ) + ) + actual_duration += counter + time.sleep(counter) + else: + print( + "Test did not stop after timeout {}s. Test status= {}. Force stopping the test!".format( + timeout, response["status"] + ) + ) + self.stop_test() + raise Exception( + "Error! Test failed to stop after timeout {}s.".format(timeout) + ) + + @staticmethod + def __getEpochTime(): + pattern = "%d.%m.%Y %H:%M:%S" + timeH = datetime.now().strftime(pattern) + epoch = int(time.mktime(time.strptime(timeH, pattern))) + return epoch + + def get_test_id(self): + apiPath = "/api/v2/sessions/{}/test".format(self.sessionID) + response = self.__sendGet(apiPath, 200).json() + return response["testId"] + + def get_available_stats_name(self): + apiPath = "/api/v2/results/{}/stats".format(self.get_test_id()) + response = self.__sendGet(apiPath, 200).json() + available_stats = [] + for stat in response: + available_stats.append(stat["name"]) + return available_stats + + def get_stats_values(self, statName): + print("Get the values for {}".format(statName)) + apiPath = "/api/v2/results/{}/stats/{}".format(self.get_test_id(), statName) + response = self.__sendGet(apiPath, 200).json() + return response + + def get_all_stats(self, csvLocation, exportTimeout=180): + test_id = self.get_test_id() + apiPath = "/api/v2/results/{}/operations/generate-csv".format(test_id) + response = self.__sendPost(apiPath, None).json() + apiPath = response["url"][len(self.host) :] + response = self.wait_event_success(apiPath, timeout=exportTimeout) + if not response: + raise TimeoutError( + "Failed to download CSVs. Timeout reached = {} seconds".format( + exportTimeout + ) + ) + apiPath = response["resultUrl"] + response = self.__sendGet(apiPath, 200, debug=False) + zf = ZipFile(io.BytesIO(response.content), "r") + zf.extractall(csvLocation) + return response + + def get_result_ended(self, timeout=5): + apiPath = "/api/v2/results/{}".format(self.get_test_id()) + while timeout > 0: + print("Pending result availability...") + response = self.__sendGet(apiPath, 200).json() + result_end_time = response["endTime"] + result_availability = result_end_time > 0 + if result_availability: + print("Result may now be downloaded...") + return result_availability + else: + time.sleep(1) + timeout -= 1 + raise Exception("Result are not available for {}".format(self.get_test_id())) + + def get_applications(self): + apiPath = "/api/v2/resources/apps?include=all" + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def get_applications_by_pages(self, take=50, skip=0): + apiPath = "/api/v2/resources/apps?take={}&skip={}&exclude=links".format( + take, skip + ) + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def get_configured_applications(self): + """ + return: returns a list of tuples containing application name(index 0) / configured app position in UI(index 1) + """ + configured = self.get_session_config(self.sessionID) + applications = configured["Config"]["TrafficProfiles"][0]["Applications"] + configured_applications = [app["Name"] for app in applications] + apps_in_test = [] + for item in configured_applications: + application_position = item.split()[-1] + application_name = " ".join( + [word for word in item.split() if not word.isdigit()] + ) + apps_in_test.append((application_name, application_position)) + return apps_in_test + + def switch_application_order(self, new_apps_order): + """ + Gets current order of the apps and reorders them the same order as the given list. + + :parameter new_apps_order: a list with new indexes representing the new order of the apps. + :return: list + """ + configured = self.get_session_config(self.sessionID) + applications = configured["Config"]["TrafficProfiles"][0]["Applications"] + reordered_applications = [applications[index] for index in new_apps_order] + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"Applications": reordered_applications}) + + def get_applications_with_filter_applied( + self, *args, filter_mode="and", take=50, skip=0 + ): + filter_by = ",".join(args) + apiPath = f"/api/v2/resources/apps?take={take}&skip={skip}&exclude=links&searchCol=Name,Description&searchVal={filter_by}&filterMode={filter_mode}" + result = self.__sendGet(apiPath, 200).json() + filtered_applications = [ + (item["Name"], item["Description"]) for item in result["data"] + ] + return filtered_applications + + def get_attacks_with_filter_applied( + self, *args, filter_mode="and", take=50, skip=0 + ): + filter_by = ",".join(args) + apiPath = f"/api/v2/resources/attacks?take={take}&skip={skip}&exclude=links&include=Metadata&searchCol=Name,Description,Direction,Severity,Keywords,References&searchVal={filter_by}&filterMode={filter_mode}" + result = self.__sendGet(apiPath, 200).json() + filtered_applications = [ + ( + item["Name"], + item["Description"], + item["Metadata"]["Direction"], + item["Metadata"]["Severity"], + ) + for item in result["data"] + ] + return filtered_applications + + def get_attacks(self, filter_by, take=50, skip=0): + apiPath = f"/api/v2/resources/attacks?take={take}&skip={skip}&exclude=links&include=Metadata&searchCol=Name&searchVal={urllib.parse.quote(filter_by)}&filterMode=and" + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def get_strikes(self): + apiPath = "/api/v2/resources/strikes?include=all" + response = self.__sendGet(apiPath, 200).json() + return response + + def get_application_id(self, app_name): + if not self.app_list: + self.app_list = self.get_applications() + print("Getting application {} ID...".format(app_name)) + for app in self.app_list: + if app["Name"] == app_name: + print("Application ID = {}".format(app["id"])) + return app["id"] + + def get_strike_id(self, strike_name): + if not self.strike_list: + self.strike_list = self.get_strikes() + for strike in self.strike_list: + if strike["Name"] == strike_name: + print("Strike ID = {}".format(strike["id"])) + return strike["id"] + + def get_attack_id(self, attack_name): + self.attack_list = self.get_attacks(attack_name) + for attack in self.attack_list["data"]: + if attack_name == attack["Name"]: + return attack["id"] + + def set_agent_optimization_mode(self, mode: str, tp_id=1): + """ + Configures the agent optimization mode. + Currently the 2 modes supported: + RATE_MODE, BALANCED_MODE + """ + if mode in ["RATE_MODE", "BALANCED_MODE"]: + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/AdvancedSettings".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload={"AgentOptimizationMode": mode}) + else: + raise ValueError("{} is not supported".format(mode)) + + def set_attack_warmup_period(self, warmup_period, ap_id=1): + """ + Sets the warmup period for the Attack profile + """ + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1".format( + self.sessionID, ap_id + ) + self.__sendPatch(apiPath, payload={"WarmUpPeriod": int(warmup_period)}) + + def set_traffic_warmup_period(self, warmup_period, tp_id=1): + """ + Sets the warmup period for the Traffic profile + """ + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/AdvancedSettings".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload={"WarmUpPeriod": int(warmup_period)}) + + def add_attack(self, attack_name, ap_id=1): + app_id = self.get_attack_id(attack_name=attack_name) + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/Attacks".format( + self.sessionID, ap_id + ) + response = self.__sendPost( + apiPath, payload={"ExternalResourceURL": app_id} + ).json() + return response[-1]["id"] + + def add_strike_as_attack(self, strike_name, ap_id=1): + app_id = self.get_strike_id(strike_name=strike_name) + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/Attacks".format( + self.sessionID, ap_id + ) + response = self.__sendPost(apiPath, payload={"ProtocolID": app_id}).json() + return response[-1]["id"] + + def create_customized_attack( + self, application_name, strike_name, insert_at_position + ): + app_id = self.get_application_id(app_name=application_name) + api_path = "/api/v2/sessions/{}/config/config/AttackProfiles/1/Attacks/operations/create".format( + self.sessionID + ) + payload = { + "Actions": [ + {"ProtocolID": strike_name, "InsertAtIndex": insert_at_position} + ], + "ResourceURL": f"api/v2/resources/apps/{app_id}", + } + response = self.__sendPost(api_path, payload=payload).json() + status_url = response["url"].split("/")[-1] + api_path = "/api/v2/sessions/{}/config/config/AttackProfiles/1/Attacks/operations/create/{}".format( + self.sessionID, status_url + ) + for index in range(10): + response = self.__sendGet(api_path, 200).json() + time.sleep(1) + if response["state"] == "SUCCESS": + return + else: + continue + raise Exception("Application action was not added after 10 seconds") + + def insert_attack_action_at_exact_position( + self, attack_id, action_id, insert_at_position + ): + api_path = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1/Attacks/{attack_id}/Tracks/1/operations/add-actions" + response = self.__sendPost( + api_path, + payload={ + "Actions": [ + {"ActionID": action_id, "InsertAtIndex": insert_at_position} + ] + }, + ).json() + status_url = response["url"].split("/")[-1] + api_path = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1/Attacks/{attack_id}/Tracks/1/operations/add-actions/{status_url}" + for index in range(10): + response = self.__sendGet(api_path, 200).json() + time.sleep(1) + if response["state"] == "SUCCESS": + return response["result"][insert_at_position]["id"] + else: + continue + + def add_application(self, app_name, tp_id=1): + app_id = self.get_application_id(app_name=app_name) + apiPath = ( + "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications".format( + self.sessionID, tp_id + ) + ) + response = self.__sendPost( + apiPath, payload={"ExternalResourceURL": app_id} + ).json() + return response[-1]["id"] + + def get_application_profile(self, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}".format( + self.sessionID, tp_id + ) + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + # Not working need to check + def add_application_profile(self, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles".format( + self.sessionID + ) + response = self.__sendPost( + apiPath, payload={"id": 1, "Active": True, "Name": "Application Profile"} + ).json() + return response[-1]["id"] + + def insert_application_at_action_exact_position(self, app_id, action_id, position): + api_path = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}/Tracks/1/operations/add-actions" + response = self.__sendPost( + api_path, + payload={"Actions": [{"ActionID": action_id, "InsertAtIndex": position}]}, + ).json() + status_url = response["url"].split("/")[-1] + api_path = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}/Tracks/1/operations/add-actions/{status_url}" + for index in range(10): + response = self.__sendGet(api_path, 200).json() + time.sleep(1) + if response["state"] == "SUCCESS": + return response["result"][position]["id"] + else: + continue + raise Exception("Application action was not added after 10 seconds") + + def add_application_action(self, app_id, action_name, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions".format( + self.sessionID, tp_id, app_id + ) + self.__sendPost(apiPath, payload={"Name": action_name}).json() + + def set_application_action_value( + self, app_id, action_id, param_id, value, file_value=None, source=None, tp_id=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions/{}/Params/{}".format( + self.sessionID, tp_id, app_id, action_id, param_id + ) + payload = {"Value": value, "FileValue": file_value, "Source": source} + self.__sendPatch(apiPath, payload) + + def get_application_actions(self, app_id): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}/Tracks/1/Actions" + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def set_application_actions_values( + self, payload, app_id, action_id, param_id, tp_id=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions/{}/Params/{}".format( + self.sessionID, tp_id, app_id, action_id, param_id + ) + self.__sendPatch(apiPath, payload) + + def get_application_actions_values(self, app_id, action_id): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/Tracks/1/Actions/{}/Params".format( + self.sessionID, app_id, action_id + ) + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def delete_application_action(self, app_id, action_id, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions/{}".format( + self.sessionID, tp_id, app_id, action_id + ) + self.__sendDelete(apiPath, self.headers) + + def add_attack_action(self, att_id, action_name, ap_id=1): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/Attacks/{}/Tracks/1/Actions".format( + self.sessionID, ap_id, att_id + ) + self.__sendPost(apiPath, payload={"Name": action_name}).json() + + def add_attack_profile(self): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles".format( + self.sessionID + ) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]["id"] + + def add_traffic_profile(self): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles".format( + self.sessionID + ) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]["id"] + + def set_traffic_profile_timeline( + self, duration, objective_value, objective_unit=None, pr_id=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1".format( + self.sessionID, pr_id + ) + payload = { + "Duration": duration, + "PrimaryObjectiveValue": objective_value, + "PrimaryObjectiveUnit": objective_unit, + } + self.__sendPatch(apiPath, payload) + + def set_application_simulated_users_timeline( + self, max_su_per_second, max_pending_su, objective_unit=None, pr_id=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/ObjectivesAndTimeline/PrimaryObjective".format( + self.sessionID + ) + payload = { + "MaxSimulatedUsersPerInterval": max_su_per_second, + "MaxPendingSimulatedUsers": max_pending_su, + } + self.__sendPatch(apiPath, payload) + + def set_primary_objective(self, payload, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/PrimaryObjective".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload) + + def get_primary_objective(self, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/PrimaryObjective".format( + self.sessionID, tp_id + ) + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def set_secondary_objective(self, payload, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/SecondaryObjective".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload) + + def get_secondary_objective(self, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/SecondaryObjective".format( + self.sessionID, tp_id + ) + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def add_primary_objective(self, objective, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/PrimaryObjective".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload={"Type": objective, "Unit": ""}) + + def add_secondary_objective(self, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/SecondaryObjectives".format( + self.sessionID, tp_id + ) + self.__sendPost(apiPath, payload={}).json() + + def add_secondary_objective_value( + self, objective, objective_value, objective_unit=None, tp_id=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/SecondaryObjectives/1".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload={"Type": objective}) + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1/SecondaryObjectiveValues/1".format( + self.sessionID, tp_id + ) + self.__sendPatch(apiPath, payload={"Value": objective_value}) + if objective_unit: + self.__sendPatch(apiPath, payload={"Unit": objective_unit}) + + def set_traffic_profile_client_tls(self, version, status, pr_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/TrafficSettings/DefaultTransportProfile/ClientTLSProfile".format( + self.sessionID, pr_id + ) + self.__sendPatch(apiPath, payload={version: status}) + + def set_traffic_profile_server_tls(self, version, status, pr_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/TrafficSettings/DefaultTransportProfile/ServerTLSProfile".format( + self.sessionID, pr_id + ) + self.__sendPatch(apiPath, payload={version: status}) + + def set_attack_profile_timeline( + self, + duration, + objective_value, + max_concurrent_attacks=None, + iteration_count=0, + ap_id=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1".format( + self.sessionID, ap_id + ) + payload = { + "Duration": duration, + "AttackRate": objective_value, + "MaxConcurrentAttack": max_concurrent_attacks, + "IterationCount": iteration_count, + } + self.__sendPatch(apiPath, payload) + + def set_attack_profile_client_tls(self, version, status, pr_id=1): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/TrafficSettings/DefaultTransportProfile/ClientTLSProfile".format( + self.sessionID, pr_id + ) + self.__sendPatch(apiPath, payload={version: status}) + + def set_attack_profile_server_tls(self, version, status, pr_id=1): + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/{}/TrafficSettings/DefaultTransportProfile/ServerTLSProfile".format( + self.sessionID, pr_id + ) + self.__sendPatch(apiPath, payload={version: status}) + + def set_custom_payload(self, apiPath, fileName): + resp = self.__sendPatch(apiPath, payload={"Source": "PayloadProfile"}) + if resp.status_code != 204: + print("Error patching payload type: {}".format(resp.json())) + uploadUrl = "/api/v2/resources/payloads" + payload = self.get_resource(uploadUrl, name=os.path.basename(fileName)) + if not payload: + payloadFile = open(fileName, "rb") + resp = self.__sendPost( + uploadUrl, + payload=None, + customHeaders=self.headers, + files={"file": payloadFile}, + ).json() + payload = { + "FileValue": { + "fileName": resp["fileName"], + "resourceURL": resp["resourceURL"], + } + } + else: + payload = { + "FileValue": { + "fileName": payload["name"], + "resourceURL": payload["links"][0]["href"], + } + } + self.__sendPatch(apiPath, payload=payload) + + def set_application_custom_payload(self, appName, actionName, paramName, fileName): + config = self.get_session_config()["Config"] + applicationsByName = { + app["Name"]: app for app in config["TrafficProfiles"][0]["Applications"] + } + httpApp = applicationsByName[appName] + actionsByName = { + action["Name"]: action for action in httpApp["Tracks"][0]["Actions"] + } + postAction = actionsByName[actionName] + actionParametersByName = { + param["Name"]: param for param in postAction["Params"] + } + bodyParam = actionParametersByName[paramName] + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/Tracks/1/Actions/{}/Params/{}".format( + self.sessionID, httpApp["id"], postAction["id"], bodyParam["id"] + ) + self.set_custom_payload(apiPath, fileName) + + def set_custom_playlist(self, apiPath, fileName, value=None): + resp = self.__sendPatch(apiPath, payload={"Source": "Playlist"}) + if resp.status_code != 204: + print("Error patching payload type: {}".format(resp.json())) + uploadUrl = "/api/v2/resources/playlists" + playlist = self.get_resource(uploadUrl, name=os.path.basename(fileName)) + if not playlist: + playlistFile = open(fileName, "rb") + resp = self.__sendPost( + uploadUrl, + payload=None, + customHeaders=self.headers, + files={"file": playlistFile}, + ).json() + payload = { + "FileValue": { + "fileName": resp["fileName"], + "resourceURL": resp["resourceURL"], + "Value": value, + } + } + else: + payload = { + "FileValue": { + "fileName": playlist["name"], + "resourceURL": playlist["links"][0]["href"], + "Value": value, + } + } + self.__sendPatch(apiPath, payload=payload) + + def set_attack_custom_playlist( + self, attackName, actionName, paramName, fileName, value="Query" + ): + config = self.get_session_config()["Config"] + attacksByName = { + app["Name"]: app for app in config["AttackProfiles"][0]["Attacks"] + } + attack = attacksByName[attackName] + actionsByName = { + action["Name"]: action for action in attack["Tracks"][0]["Actions"] + } + postAction = actionsByName[actionName] + actionParametersByName = { + param["Name"]: param for param in postAction["Params"] + } + bodyParam = actionParametersByName[paramName] + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/1/Attacks/{}/Tracks/1/Actions/{}/Params/{}".format( + self.sessionID, attack["id"], postAction["id"], bodyParam["id"] + ) + self.set_custom_playlist(apiPath, fileName, value) + + def get_all_configs(self): + apiPath = "/api/v2/configs" + response = self.__sendGet(apiPath, 200).json() + return response + + def get_config_id(self, test_name): + configs = self.get_all_configs() + for config in configs: + if config["displayName"] == test_name: + print("Config ID = {}".format(config["id"])) + return config["id"] + + def get_resource(self, apiPath, name): + resp = self.__sendGet(apiPath, 200).json() + for resource in resp: + if resource["name"] == name: + return resource + + def save_config(self, test_name, timeout=10): + apiPath = "/api/v2/sessions/{}/config/operations/save".format(self.sessionID) + response = self.__sendPost(apiPath, payload={"Name": test_name}).json() + apiPath = "/api/v2/sessions/{}/config/operations/save/{}".format( + self.sessionID, response["id"] + ) + if not self.wait_event_success(apiPath, timeout): + raise TimeoutError( + "Could not save copy for test= {}. Timeout reached = {} seconds".format( + test_name, timeout + ) + ) + + def load_config(self, test_name): + configID = self.get_config_id(test_name=test_name) + apiPath = "/api/v2/sessions" + response = self.__sendPost(apiPath, payload={"configUrl": configID}).json() + if response: + print( + "Test= {} was loaded with ID= {}".format(test_name, response[-1]["id"]) + ) + self.sessionID = response[-1]["id"] + return + else: + raise Exception("Failed to load test= {}".format(test_name)) + + def collect_diagnostics(self, timeout=600): + apiPath = "/api/v2/diagnostics/operations/export" + response = self.__sendPost( + apiPath, payload={"componentList": [], "sessionId": self.sessionID} + ).json() + apiPath = "/api/v2/diagnostics/operations/export/{}".format(response["id"]) + response = self.wait_event_success(apiPath, timeout) + + return response["id"] + + def set_diagnostics_level(self, log_level): + apiPath = "/api/v2/log-config" + response = self.__sendPut(apiPath, payload={"level": log_level}) + + return response + + def add_tunnel_stack(self, network_segment_number=1): + """ + Add a tunnel stack + """ + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/NetworkProfiles/1/IPNetworkSegment/{network_segment_number}/TunnelStacks" + self.__sendPost(apiPath, payload={}) + + def get_tunnel_stack(self): + """ + Obtain information about tunnel stack configuration on client. + """ + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/NetworkProfiles/1/IPNetworkSegment/1/TunnelStacks" + return self.__sendGet(apiPath, 200).json() + + def get_tunnel_outer_ip(self): + """ + Obtain information about tunnel stack configuration on client. + """ + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/NetworkProfiles/1/IPNetworkSegment/1/TunnelStacks/1/OuterIPRange" + return self.__sendGet(apiPath, 200).json() + + def set_tunnel_stack_gateway_vpn_ip( + self, tunnel_type, gw_vpn_ip, network_segment_number=1 + ): + """ + Set the value for VPN gateway IP. + + :params tunnel_type: (str) CiscoAnyConnectSettings, FortinetSettings, PANGPSettings + """ + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/NetworkProfiles/1/IPNetworkSegment/{network_segment_number}/TunnelStacks/1/TunnelRange/{tunnel_type}" + response = self.get_tunnel_stack() + if response: + self.__sendPatch(apiPath, payload={"VPNGateway": gw_vpn_ip}) + else: + self.add_tunnel_stack() + self.__sendPatch(apiPath, payload={"VPNGateway": gw_vpn_ip}) + + def set_tunnel_stack_type(self, tunnel_type, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"VendorType": tunnel_type}) + + def set_tunnel_count(self, number_of_tunnels, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange".format( + self.sessionID, network_segment_number + ) + resp = self.__sendPatch( + apiPath, payload={"TunnelCountPerOuterIP": int(number_of_tunnels)} + ) + if resp.status_code != 204: + print("Error setting VPN tunnel count per outer IP: {}".format(resp.json())) + + def set_tunnel_establisment_timeout(self, tunnel_timeout, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange".format( + self.sessionID, network_segment_number + ) + self.__sendPatch( + apiPath, payload={"TunnelEstablishmentTimeout": tunnel_timeout} + ) + + def set_pan_tunnel_portal_hostname(self, portal_hostname, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/PANGPSettings".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"PortalHostname": portal_hostname}) + + def set_tunnel_pan_vpn_gateways( + self, tunnel_type, vpn_gateways, network_segment_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/{}".format( + self.sessionID, network_segment_number, tunnel_type + ) + self.__sendPatch( + apiPath, payload={"VPNGateways": list(vpn_gateways.split(" "))} + ) + + def set_tunnel_cisco_vpn_gateway( + self, tunnel_type, vpn_gateways, network_segment_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/{}".format( + self.sessionID, network_segment_number, tunnel_type + ) + self.__sendPatch(apiPath, payload={"VPNGateway": vpn_gateways}) + + def set_tunnel_cisco_connection_profiles( + self, connection_profiles, network_segment_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/CiscoAnyConnectSettings".format( + self.sessionID, network_segment_number + ) + self.__sendPatch( + apiPath, + payload={"ConnectionProfiles": list(connection_profiles.split(" "))}, + ) + + def set_tunnel_auth_settings( + self, tunnel_type, field, value, source, network_segment_number=1 + ): + # Field can be UsernamesParam or PasswordsParam + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/{}/AuthSettings/{}".format( + self.sessionID, network_segment_number, tunnel_type, field + ) + self.__sendPatch(apiPath, payload={"Value": value, "Source": source}) + + def set_tunnel_outer_ip_gateway(self, gateway_ip, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/OuterIPRange".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"GwAuto": False}) + resp = self.__sendPatch(apiPath, payload={"GwStart": gateway_ip}) + if resp.status_code != 204: + print("Error network tags in Inncer IP range: {}".format(resp.json())) + + def set_tunnel_automatic_gateway( + self, gateway_auto=True, network_segment=1, tunnel_stack=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/{}/OuterIPRange".format( + self.sessionID, network_segment, tunnel_stack + ) + self.__sendPatch(apiPath, payload={"GwAuto": gateway_auto}) + + def set_tunnel_outer_ip_range( + self, ip_start, count, max_count, network_segment_number=1, ip_incr="0.0.0.1" + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/OuterIPRange".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"IpAuto": False}) + self.__sendPatch(apiPath, payload={"IpStart": ip_start}) + self.__sendPatch(apiPath, payload={"IpIncr": ip_incr}) + self.__sendPatch(apiPath, payload={"Count": count}) + self.__sendPatch(apiPath, payload={"maxCountPerAgent": max_count}) + + def set_tunnel_outer_ip_start(self, ip_start, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/OuterIPRange".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"IpStart": ip_start}) + + def set_tunnel_stack_dns_servers(self, servers, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/DNSResolver".format( + self.sessionID, network_segment_number + ) + serverList = list(map(lambda x: {"name": x}, list(servers.split(",")))) + self.__sendPatch(apiPath, payload={"nameServers": serverList}) + + def delete_ip_stack(self, network_segment_number=1, ip_stack_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment_number, ip_stack_number + ) + self.__sendDelete(apiPath, self.headers) + + def set_tunnel_inner_ip_network_tags(self, network_tags, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/InnerIPRange".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"networkTags": network_tags}) + + def set_tunnel_udp_port( + self, tunnel_type, encapsulation_type, udp_port, network_segment_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/1/TunnelRange/{}/{}".format( + self.sessionID, network_segment_number, tunnel_type, encapsulation_type + ) + self.__sendPatch(apiPath, payload={"UdpPort": udp_port}) + + def __get_status_for_advance_timeline(self): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/ObjectivesAndTimeline/PrimaryObjective/Timeline" + result = self.__sendGet(apiPath, 200) + return result.json() + + def set_primary_timeline(self, payload, segment_id, tp_id=1): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/PrimaryObjective/Timeline/{}".format( + self.sessionID, tp_id, segment_id + ) + self.__sendPatch(apiPath, payload) + + def get_primary_timeline(self): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/ObjectivesAndTimeline/PrimaryObjective/Timeline" + result = self.__sendGet(apiPath, 200) + return result.json() + + def get_specific_value_from_given_ramp_segment(self, segment_type, given_key): + acceptable_keys = [ + "Duration", + "SegmentType", + "Enabled", + "AutomaticObjectiveValue", + "NumberOfSteps", + "PrimaryObjectiveValue", + "SecondaryObjectiveValues", + "ObjectiveValue", + "ObjectiveUnit", + ] + if given_key not in acceptable_keys: + raise Exception( + f"Cannot find key {given_key}, make sure the key exists in this list {acceptable_keys}" + ) + response = self.__get_status_for_advance_timeline() + return response[segment_type - 1][given_key] + + def enable_step_ramp_down_or_up(self, segment_type=None): + """ + A method to activate the step ramp-up feature + + :params segment_type: default set to enable for both ramp up or down, could receive a string "up"/"down" if + ... only one segment is to be enabled. + """ + if segment_type: + self.__setStepRamp(True, segment_type) + else: + for segment in [1, 3]: + self.__setStepRamp(True, segment) + + def disable_step_ramp_down_or_up(self, segment_type=None): + """ + A method to activate the step ramp-up feature + + :params segment_type: default set to enable for both ramp up or down, could receive a string "up"/"down" if + ... only one segment is to be enabled. + """ + if segment_type: + self.__setStepRamp(False, segment_type) + else: + for segment in [1, 3]: + self.__setStepRamp(False, segment) + + def __setStepRamp(self, action, segment_type): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/ObjectivesAndTimeline/PrimaryObjective/Timeline/{segment_type}" + self.__sendPatch(apiPath, payload={"Enabled": action}) + + def set_specific_value_for_given_ramp_segment(self, segment_type, key, value): + """ + A method to set different parameters at the timeline segment + + :params key: one of the endpoint keys, must fit in the acceptable_keys list + :params value: any value + :params segment_type: specify which one of the RampUpDown segment to use must be "UP" / "DOWN" + """ + acceptable_keys = [ + "Duration", + "NumberOfSteps", + "ObjectiveValue", + "ObjectiveUnit", + ] + if key not in acceptable_keys: + raise Exception( + f"Cannot find key {key}, make sure the key exists in this list {acceptable_keys}" + ) + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/ObjectivesAndTimeline/PrimaryObjective/Timeline/{segment_type}" + self.__sendPatch(apiPath, payload={key: value}) + if self.get_specific_value_from_given_ramp_segment(segment_type, key) != value: + raise Exception( + f"An error has occured, setting {key} to {value} did not work!" + ) + return True + + def delete_added_application(self, app_id): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}" + self.__sendDelete(apiPath, self.headers) + + def get_disk_usage_info(self): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/ExpectedDiskSpace" + result = self.__sendGet(apiPath, 200) + return result.json() + + def __set_traffic_profile(self, option): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1" + self.__sendPatch(apiPath, payload={"Active": option}) + + def __set_attack_profile(self, option): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1" + self.__sendPatch(apiPath, payload={"Active": option}) + + def enable_traffic_profile(self): + self.__set_traffic_profile(True) + + def disable_traffic_profile(self): + self.__set_traffic_profile(False) + + def enable_attack_profile(self): + self.__set_attack_profile(True) + + def disable_attack_profile(self): + self.__set_attack_profile(False) + + def enable_application_inherit_tls(self, app_id): + """ + enable inherit tls for any added application + """ + pass + self.__set_application_inherit_tls_status(True, app_id) + + def disable_application_inherit_tls(self, app_id): + """ + disable inherit tls for any added application + + :param app_id: application index in the added order + """ + self.__set_application_inherit_tls_status(False, app_id) + + def enable_attack_inherit_tls(self, attack_id): + """ + enable inherit tls for any added attack + + :param attack_id: attack index in the added order + """ + self.__set_attack_inherit_tls_status(True, attack_id) + + def disable_attack_inherit_tls(self, attack_id): + """ + enable inherit tls for any added attack + + :param attack_id: attack index in the added order + """ + self.__set_attack_inherit_tls_status(False, attack_id) + + def __set_application_inherit_tls_status(self, status, app_id): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}" + self.__sendPatch(apiPath, payload={"InheritTLS": status}) + + def __set_attack_inherit_tls_status(self, status, attack_id): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1/Attacks/{attack_id}" + self.__sendPatch(apiPath, payload={"InheritTLS": status}) + + def clear_agent_ownership(self, agents_ips=None): + agents_ids = [] + result_id = self.get_current_session_result() + if agents_ips: + for agent_ip in agents_ips: + agents_ids.append(self.get_agents_ids(agent_ip)[0]) + apiPath = "/api/v2/agents/operations/release" + agents_to_release = [] + for agent_id in agents_ids: + agents_to_release.append({"agentId": agent_id}) + payload = {"sessionId": result_id, "agentsData": agents_to_release} + response = self.__sendPost(apiPath, payload=payload).json() + status_url = response["id"] + api_path = f"/api/v2/agents/operations/release/{status_url}" + for index in range(10): + response = self.__sendGet(api_path, 200).json() + time.sleep(1) + if response["state"] == "SUCCESS": + return True + else: + continue + + def __get_results_list(self): + apiPath = "/api/v2/results?exclude=links" + result = self.__sendGet(apiPath, 200) + return result.json() + + def get_current_session_result(self): + sessions_results = self.__get_results_list() + for index in range(0, len(sessions_results)): + if sessions_results[index]["activeSession"] == self.sessionID: + return sessions_results[index]["testName"] + else: + continue + + def configure_attack_client_tls_settings( + self, attack_id, config_endpoint, config_change + ): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1/Attacks/{attack_id}/ClientTLSProfile" + available_endpoints = [ + "tls12Enabled", + "tls13Enabled", + "ciphers12", + "ciphers13", + "immediateClose", + "middleBoxEnabled", + ] + if config_endpoint not in available_endpoints: + raise ("The endpoint you are trying to configure doesn't exist!") + self.__sendPatch(apiPath, payload={config_endpoint: config_change}) + + def configure_attack_server_tls_settings( + self, attack_id, config_endpoint, config_change + ): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1/Attacks/{attack_id}/ServerTLSProfile" + available_endpoints = [ + "tls12Enabled", + "tls13Enabled", + "ciphers12", + "ciphers13", + "immediateClose", + "middleBoxEnabled", + ] + if config_endpoint not in available_endpoints: + raise ("The endpoint you are trying to configure doesn't exist!") + self.__sendPatch(apiPath, payload={config_endpoint: config_change}) + + def configure_application_client_tls_settings( + self, app_id, config_endpoint, config_change + ): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}/ClientTLSProfile" + available_endpoints = [ + "tls12Enabled", + "tls13Enabled", + "ciphers12", + "ciphers13", + "immediateClose", + "middleBoxEnabled", + ] + if config_endpoint not in available_endpoints: + raise ("The endpoint you are trying to configure doesn't exist!") + self.__sendPatch(apiPath, payload={config_endpoint: config_change}) + + def configure_application_server_tls_settings( + self, app_id, config_endpoint, config_change + ): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{app_id}/ServerTLSProfile" + available_endpoints = [ + "tls12Enabled", + "tls13Enabled", + "ciphers12", + "ciphers13", + "immediateClose", + "middleBoxEnabled", + ] + if config_endpoint not in available_endpoints: + raise ("The endpoint you are trying to configure doesn't exist!") + self.__sendPatch(apiPath, payload={config_endpoint: config_change}) + + def enable_configured_application(self, app_id): + self.__set_configured_applications(True, app_id) + + def disable_configured_application(self, app_id): + self.__set_configured_applications(False, app_id) + + def enable_configured_attack(self, app_id): + self.__set_configured_attacks(True, app_id) + + def disable_configured_attack(self, attack_id): + self.__set_configured_attacks(False, attack_id) + + def __set_configured_applications(self, option, application): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/TrafficProfiles/1/Applications/{application}" + self.__sendPatch(apiPath, payload={"Active": option}) + + def __set_configured_attacks(self, option, attack): + apiPath = f"/api/v2/sessions/{self.sessionID}/config/config/AttackProfiles/1/Attacks/{attack}" + self.__sendPatch(apiPath, payload={"Active": option}) + + def set_sack(self, profile, tcp_profile, value=True): + apiPath = "/api/v2/sessions/{}/config/config/{}/1/TrafficSettings/DefaultTransportProfile/{}".format( + self.sessionID, profile, tcp_profile + ) + self.__sendPatch(apiPath, payload={"SackEnabled": value}) + + def add_ipsec_stack(self, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks".format( + self.sessionID, network_segment_number + ) + self.__sendPost(apiPath, {}) + + def set_ipsec_stack_role(self, network_segment_number, ipstack_role): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/1".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"StackRole": ipstack_role}) + + def set_ipsec_tunnel_reattempt_count( + self, tunnel_reattempt_count, network_segment_number + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/1".format( + self.sessionID, network_segment_number + ) + self.__sendPatch(apiPath, payload={"RetryCount": tunnel_reattempt_count}) + + def set_ipsec_emulated_subnet_settings( + self, + startIP, + increment, + prefix=24, + host_count_per_tunnel=1, + network_segment_number=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/1/EmulatedSubConfig".format( + self.sessionID, network_segment_number + ) + payload = { + "Start": startIP, + "Increment": increment, + "Prefix": prefix, + "HostCountPerTunnel": host_count_per_tunnel, + } + self.__sendPatch(apiPath, payload) + + def set_ph1_ipsec_algorithms( + self, ph1_algorithms, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange/IKEPhase1Config".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + algorithms = list(ph1_algorithms.split(" ")) + payload = { + "EncAlgorithm": algorithms[0], + "HashAlgorithm": algorithms[1], + "DHGroup": algorithms[2], + "PrfAlgorithm": algorithms[3], + } + self.__sendPatch(apiPath, payload) + + def set_ph2_ipsec_algorithms( + self, ph2_algorithms, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange/IKEPhase2Config".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + algorithms = list(ph2_algorithms.split(" ")) + payload = { + "EncAlgorithm": algorithms[0], + "HashAlgorithm": algorithms[1], + "PfsGroup": algorithms[2], + } + self.__sendPatch(apiPath, payload) + + def set_ipsec_public_peer( + self, public_peer_ip, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"PublicPeer": public_peer_ip}) + + def set_ipsec_public_peer_increment( + self, public_peer_increment, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"PublicPeerIncrement": public_peer_increment}) + + def set_ipsec_protected_subnet_start( + self, protected_subnet_start, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange/ProtectedSubConfig".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"Start": protected_subnet_start}) + + def set_ipsec_protected_subnet_increment( + self, increment, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange/ProtectedSubConfig".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"Increment": increment}) + + def set_ipsec_preshared_key( + self, sharedkey, network_segment_number, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/IPSecRange/AuthSettings".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"SharedKey": sharedkey}) + + def set_ipsec_outer_ip_range_start( + self, outer_ip_range_start, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/OuterIPRange".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"IpStart": outer_ip_range_start}) + + def set_ipsec_outer_ip_range_gateway( + self, outer_ip_range_gateway, network_segment_number=1, ipsec_stack_number=1 + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/OuterIPRange".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + self.__sendPatch(apiPath, {"GwStart": outer_ip_range_gateway}) + + def set_ipsec_outer_ip_range_params( + self, + startIP, + increment, + count, + maxcountperagent, + netmask, + gateway, + network_segment_number, + ipsec_stack_number=1, + ): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/OuterIPRange".format( + self.sessionID, network_segment_number, ipsec_stack_number + ) + payload = { + "IpAuto": False, + "IpStart": startIP, + "IpIncr": increment, + "Count": count, + "maxCountPerAgent": maxcountperagent, + "NetMask": netmask, + "GwStart": gateway, + } + self.__sendPatch(apiPath, payload) + + def check_ER_status(self, network_segment_number=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EmulatedRouter".format( + self.sessionID, network_segment_number + ) + response = self.__sendGet(apiPath, 200).json() + return response["Enabled"] + + def get_IP_stack_IP_start(self, network_segment_number=1, IP_range=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}".format( + self.sessionID, network_segment_number, IP_range + ) + response = self.__sendGet(apiPath, 200).json() + return response["IpStart"] + + def get_TLS_VPN_IP_start(self, network_segment_number=1, tunnel_stack=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/TunnelStacks/{}/OuterIPRange".format( + self.sessionID, network_segment_number, tunnel_stack + ) + response = self.__sendGet(apiPath, 200).json() + return response["IpStart"] + + def get_IPsec_IP_start(self, network_segment_number=1, IPsec_stack=1): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPSecStacks/{}/OuterIPRange".format( + self.sessionID, network_segment_number, IPsec_stack + ) + response = self.__sendGet(apiPath, 200).json() + return response["IpStart"] + + def create_session_precanned_config(self, config_name): + config_lookup = { + config["displayName"]: config["id"] for config in self.get_all_configs() + } + config_id = config_lookup.get(config_name) + if config_id is None: + raise ValueError( + f"Pre-canned config with name '{config_name}' was not found" + ) + self.configID = config_id + self.sessionID = self.open_config() + + def set_base_path_url(self, server_test_ip): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/1/PepDUT/AuthProfileParams/3".format( + self.sessionID + ) + concated_ip = "/http/" + server_test_ip + self.__sendPatch(apiPath, payload={"Value": concated_ip}) + + def set_okta_credentials_ep(self, okta_username, okta_password): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/1/PepDUT/AuthProfileParams/4".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"Value": okta_username}) + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/1/PepDUT/AuthProfileParams/5".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"Value": okta_password}) + + def set_okta_credentials_gp(self, okta_username, okta_password): + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/1/PepDUT/AuthProfileParams/1".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"Value": okta_username}) + apiPath = "/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/1/PepDUT/AuthProfileParams/2".format( + self.sessionID + ) + self.__sendPatch(apiPath, payload={"Value": okta_password}) + + def export_controller(self, export_path=None, file_name="mycontroller.zip"): + apiPath = "/api/v2/controller-migration/operations/export" + payload = { + "keycloak": True, + "config": True, + "licenseServers": True, + "externalNatsBrokers": True, + "results": True, + } + result = self.__sendPost(apiPath, payload=payload).json() + apiPath = apiPath + "/" + str(result["id"]) + for index in range(36): + time.sleep(10) + result = self.__sendGet(apiPath, 200).json() + if result["state"] == "SUCCESS": + download_url = result["resultUrl"] + break + elif result["state"] == "FAILURE": + raise "Export controller operation did not succeeded after 5 minutes" + customHeaders = self.headers + customHeaders["Accept"] = "application/zip" + response = self.session.get( + "{}{}".format(self.host, "/" + download_url), + headers=customHeaders, + stream=True, + ) + if export_path: + file_name = os.path.join(export_path, file_name) + with open(file_name, "wb") as fd: + for chunk in response.iter_content(chunk_size=256): + fd.write(chunk) + + def import_controller(self, import_path=None, file_name="fakenews.zip"): + apiPath = "/api/v2/controller-migration/operations/import" + customHeaders = self.headers + customHeaders["Accept"] = "application/json" + if import_path: + file_to_open = import_path + file_name + else: + file_to_open = file_name + mp_encoder = MultipartEncoder( + fields={ + "request": json.dumps( + { + "keycloak": True, + "config": True, + "licenseServers": True, + "externalNatsBrokers": True, + "results": True, + } + ), + "file": (file_name, open(file_to_open, "rb"), "application/zip"), + } + ) + + customHeaders["content-type"] = mp_encoder.content_type + result = self.__sendPost( + apiPath, payload=mp_encoder, customHeaders=self.headers + ).json() + print(f"This is post {result}") + + apiPath = apiPath + "/" + str(result["id"]) + for index in range(360): + time.sleep(3) + result = self.__sendGet(apiPath, 200).json() + if result["state"] == "SUCCESS": + self.session.close() + elif result["state"] == "ERROR": + self.session.close() + raise "Import was not succesful" diff --git a/snappi_cyperf/Test_l47.py b/snappi_cyperf/Test_l47.py new file mode 100644 index 0000000..d827955 --- /dev/null +++ b/snappi_cyperf/Test_l47.py @@ -0,0 +1,149 @@ +import sys + +# sys.path.append("C:\\Users\\waseebai\\Documents\\GitHub\\snappi\\artifacts\\snappi") +sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") + +import snappi + +api = snappi.api(location="http://127.0.0.1:5000", verify=False) +config = api.config() + +# port_1_ip = config.ports.port(name="p1", location="10.39.44.120")[-1] +# port_2_ip = config.ports.port(name="p2", location="10.39.44.195")[-1] + +tx = config.ports.port(name="tx", location="10.39.44.147")[-1] +rx = config.ports.port(name="rx", location="10.39.44.190")[-1] +# port_1_tag = config.ports.port(name="p1", location="user:port1")[-1] +# port_2_tag = config.ports.port(name="p2", location="user:port2")[-1] + +(d1, d2) = config.devices.device(name="d1").device(name="d2") +(e1,) = d1.ethernets.ethernet(name="d1.e1") +e1.connection.port_name = "tx" +e1.mac = "01:02:03:04:05:06" +e1.step = "00:00:00:00:00:01" +e1.count = 1 +# e1.max_count = 100 +e1.mtu = 1488 + +(e2,) = d2.ethernets.ethernet(name="d2.e2") +e2.connection.port_name = "rx" +e2.mac = "01:02:03:04:06:06" +e2.step = "00:00:00:00:00:01" +e2.count = 2 +# e2.max_count = 100 +e2.mtu = 1488 + +(vlan1,) = e1.vlans.vlan(name="vlan1") +vlan1.id = 1 +vlan1.priority = 1 +vlan1.tpid = "x8100" +vlan1.count = 1 +vlan1.step = 1 +vlan1.per_count = 1 + +(vlan2,) = e2.vlans.vlan(name="vlan2") +vlan2.id = 1 +vlan2.priority = 1 +vlan2.tpid = "x8100" +vlan2.count = 1 +vlan2.step = 1 +vlan2.per_count = 1 + +(ip1,) = e1.ipv4_addresses.ipv4(name="e1.ipv4") +ip1.address = "10.0.0.10" +ip1.gateway = "10.0.0.1" +ip1.step = "0.0.0.1" +ip1.count = 1 +# ip1.max_count = 1 +ip1.prefix = 16 + +(ip2,) = e2.ipv4_addresses.ipv4(name="e2.ipv4") +ip2.address = "10.0.0.20" +ip2.gateway = "10.0.0.1" +ip2.step = "0.0.0.1" +ip2.count = 1 +# ip2.max_count = 1 +ip2.prefix = 16 + +# TCP/UDP configs + +(t1,) = d1.tcps.tcp(name="Tcp1") +t1.ip_interface_name = ip1.name +t1.receive_buffer_size = 1111 +t1.transmit_buffer_size = 1112 +t1.retransmission_minimum_timeout = 100 +t1.retransmission_maximum_timeout = 1001 +t1.minimum_source_port = 100 +t1.maximum_source_port = 101 +# t1.data_content_source = "SyntheticData" +# t1.data_content_value = "30000" +# t1.direction = "client_to_server" + +(t2,) = d2.tcps.tcp(name="Tcp2") +t2.ip_interface_name = ip2.name +t2.receive_buffer_size = 2222 +t2.transmit_buffer_size = 2221 +t2.retransmission_minimum_timeout = 200 +t2.retransmission_maximum_timeout = 2002 +t2.minimum_source_port = 200 +t2.maximum_source_port = 202 + +(http_1,) = d1.https.http(name="HTTP1") +http_1.profile = "Chrome" +http_1.version = "HTTP11" +http_1.connection_persistence = "ConnectionPersistenceStandard" +(http_client,) = http_1.clients.client() +http_client.cookie_reject_probability = False +http_client.max_persistent_requests = 1 + +(http_2,) = d2.https.http(name="HTTP2") +http_2.profile = "Apache" +http_2.version = "HTTP11" +http_2.connection_persistence = "ConnectionPersistenceEnabled" +(http_server,) = http_2.servers.server() + +# (get_a, post_a) = http_client.methods.method().method() +# (get1,) = get_a.get.get() + +# get1.destination = "Traffic2_HTTPServer1:80" +# get1.page = "./1b.html" +# get1.destination = "Traffic2_HTTPServer1:80" #real http server ip or emulated http object get1.destination = "http2:80"\ +# get1.name_value_args = "name1=val1" +# for http server emulation +# get1.destination = http_2.name +# (post1,) = post1_a.post.post() +# post1.destination = "Traffic2_HTTPServer1:80" +# post1.page = "./1b.html" +# (delete1,) = delete_a.delete.delete() +# delete1.destination = "Traffic2_HTTPServer1:80" +# delete1.page = "./1b.html" + +(tp1,) = config.trafficprofile.trafficprofile() +(segment1, segment2) = tp1.segment.segment().segment() +segment1.name = "Linear segment1" +segment1.duration = 40 +# segment1.enable_ramp_up = True +# segment1.ramp_down_time = 50 +# segment1.ramp_down_value = 100 +# segment1.enable_ramp_down = True +# segment1.ramp_up_time = 60 +# segment1.ramp_up_value = 100 +segment2.name = "Linear segment2" +segment2.duration = 70 +tp1.timeline = [segment1.name, segment2.name] +tp1.objective_type = ["Connections per second", "Simulated users"] +tp1.objective_value = [100, 200] +(obj1, obj2) = tp1.objectives.objective().objective() + +print("In test before set_config") +response = api.set_config(config) +print("In test after set_config") +print(response) +api.close() + +cs = api.control_state() +cs.app.state = "start" # cs.app.state.START +response1 = api.set_control_state(cs) +print(response1) +# cs.app.state = "stop" # cs.app.state.START +# api.set_control_state(cs) diff --git a/snappi_cyperf/__init__.py b/snappi_cyperf/__init__.py new file mode 100644 index 0000000..60f26d0 --- /dev/null +++ b/snappi_cyperf/__init__.py @@ -0,0 +1 @@ +from snappi_cyperf.cyperfapi import Api diff --git a/snappi_cyperf/common.py b/snappi_cyperf/common.py new file mode 100644 index 0000000..418f432 --- /dev/null +++ b/snappi_cyperf/common.py @@ -0,0 +1,41 @@ +from snappi_cyperf.logger import get_cyperf_logger + + +class Common(object): + def __init__(self): + self.logger = get_cyperf_logger(__name__) + + def get_ip_name(self, name): + """ + return the IP config from statefull flow endpoints. + """ + for app in self._api._apps: + for flow in app.stateful_flows: + if flow.client.protocol == name: + for endpoint in flow.client.endpoints: + return "e1.ipv4" + elif flow.server.protocol == name: + for endpoint in flow.server.endpoints: + return "e2.ipv4" + + def get_protocol_ip(self, apps_obj): + return_list = [] + for app in apps_obj.apps: + for flow in app.stateful_flows: + if flow.client.endpoints and flow.server.endpoints: + temp_dict, client, server = {}, {}, {} + for endpoint in flow.client.endpoints: + temp_dict[flow.client.protocol] = endpoint.ip_interface_name + for endpoint in flow.server.endpoints: + temp_dict[flow.server.protocol] = endpoint.dest.name + # temp_dict['client'] = client + # temp_dict['server'] = server + # return_list.append(temp_dict) + return temp_dict + + def get_community_url(self, url): + import re + + match = re.match(r"(.*\/communityList\/)(\d+)", url) + if match: + return match.group(1) + match.group(2) + "/" diff --git a/snappi_cyperf/cyperfapi.py b/snappi_cyperf/cyperfapi.py new file mode 100644 index 0000000..1e4a241 --- /dev/null +++ b/snappi_cyperf/cyperfapi.py @@ -0,0 +1,397 @@ +import inspect +import json +import time, re +import sys +import logging + +# import ixrestutils as http_transport +from collections import namedtuple + +# import sys +# sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") +import snappi +from snappi_cyperf.RESTasV3 import RESTasV3 +import snappi_cyperf.ixrestutils as http_transport +from snappi_cyperf.ports import port +from snappi_cyperf.interface import interfaces +from snappi_cyperf.tcp import tcp_config +from snappi_cyperf.http_config import http_config +from snappi_cyperf.objectiveandtimeline import objectiveandtimeline + +# from snappi_cyperf.http_config import client_config + +# from snappi_cyperf.http_server_config import server_config +from snappi_cyperf.logger import setup_cyperf_logger +from snappi_cyperf.common import Common +from snappi_cyperf.exceptions import Snappil47Exception + + +# from protocols import protocols +# from snappi_cyperf.chassis import chassis +# from stats import stats + +WAP_USERNAME = "admin" +WAP_PASSWORD = "CyPerf&Keysight#1" +WAP_CLIENT_ID = "clt-wap" +TOKEN_ENDPOINT = "/auth/realms/keysight/protocol/openid-connect/token" + + +class Api(snappi.Api): + """ """ + + def __init__( + self, + host, + version, + username, + password, + gateway_port=8080, + log_level="info", + **kwargs, + ): + """Create a session + - address (str): The ip address of the TestPlatform to connect to + where test sessions will be created or connected to. + - port (str): The rest port of the TestPlatform to connect to. + - username (str): The username to be used for authentication + - password (str): The password to be used for authentication + """ + print("In cyperfAPI init") + super().__init__(**kwargs) + # host='https://127.0.0.1:11009' if host is None else host + # ) + self.rest = RESTasV3( + ipAddress=host, + username=WAP_USERNAME, + password=WAP_PASSWORD, + client_id=WAP_CLIENT_ID, + ) + self._host = host + self._address, self._port = self._get_addr_port(self._host) + self._username = username + self._password = password + self._config_url = {} + self._network_segments = {} + self._ip_ranges = {} + self.configID = None + self.session_id = None + + self.cyperf_version = version + self._gateway_port = gateway_port + self._l4config = None + self._assistant = None + self._config_type = self.config() + self.interfaces = interfaces(self) + self.tcp = tcp_config(self) + self.common = Common() + self.http = http_config(self) + self.objectiveandtimeline = objectiveandtimeline(self) + # self.http_sr = server_config(self) + self.port = port(self) + self._log_level = ( + logging.INFO if kwargs.get("loglevel") is None else kwargs.get("loglevel") + ) + self.logger = setup_cyperf_logger(self.log_level, module_name=__name__) + # self.protocols = protocols(self) + # self.chassis = chassis(self) + # self.stats = stats(self) + + def _get_addr_port(self, host): + items = host.split("/") + items = items[-1].split(":") + + addr = items[0] + if len(items) == 2: + return addr, items[-1] + else: + if host.startswith("https"): + return addr, "443" + else: + return addr, "80" + + @property + def _config(self): + return self._config + + @property + def log_level(self): + return self._log_level + + def _request_detail(self): + request_detail = snappi.Warning() + errors = self._errors + warnings = list() + if errors: + Snappil47Exception(errors) + warnings.append("") + request_detail.warnings = warnings + return request_detail + + def set_config(self, config): + """Set or update the configuration""" + try: + if isinstance(config, (type(self._config_type), str)) is False: + raise TypeError("The content must be of type Union[Config, str]") + + if isinstance(config, str) is True: + config = self._config_type.deserialize(config) + self._config_objects = {} + self._device_encap = {} + self._cyperf_objects = {} + self._connect() + # self._ip_list = self.common.get_protocol_ip(config) + self._l47config = config + self.interfaces.config(self.rest) + # self.tcp.config(self.rest) + # self.http.config(self.rest) + # self.objectiveandtimeline.config(self.rest) + self.port.config(self.rest) + # self._apply_config() + except Exception as err: + self.logger.info(f"error:{err}") + raise Snappil47Exception(err) + return self._request_detail() + + def add_error(self, error): + """Add an error to the global errors""" + if isinstance(error, str) is False: + self._errors.append("%s %s" % (type(error), str(error))) + else: + self._errors.append(error) + + def get_config(self): + return self._l47config + + def set_control_state(self, config): + try: + if config.app.state == "start": + self.start_test(self.rest) + + elif config.app.state == "stop": + self.stop_test(self.rest) + elif config.app.state == "abort": + self.abort_test(self.rest) + except Exception as err: + self.logger.info(f"error:{err}") + raise Snappil47Exception(err) + return self._request_detail() + + def stats(self, config): + """Set or update the configuration""" + try: + if isinstance(config, (type(self._config_type), str)) is False: + raise TypeError("The content must be of type Union[Config, str]") + + if isinstance(config, str) is True: + config = self._config_type.deserialize(config) + self._config = config + except Exception as err: + print(err) + + def _apply_config(self): + """ + Apply configs + """ + + url = self._cyperf + "cyperf/test/operations/applyConfiguration" + payload = {} + reply = self._request("POST", url, payload, option=1) + if not reply.ok: + raise Exception(reply.text) + self._wait_for_action_to_finish(reply, url) + + def start_test(self, rest): + """ + start test + """ + rest.start_test() + + def stop_test(self, rest): + """ + stop test + """ + rest.stop_test() + + def abort_test(self, rest): + """ + abort test + """ + rest.abort_test() + + def get_current_state(self): + """ + This method gets the test current state. (for example - running, unconfigured, ..) + """ + url = "%s/cyperf/test/activeTest" % (self._cyperf) + reply = self._request("GET", url, option=1) + return reply["currentState"] + + def strip_api_and_version_from_url(self, url): + """ + #remove the slash (if any) at the beginning of the url + """ + if url[0] == "/": + url = url[1:] + url_elements = url.split("/") + if "api" in url: + # strip the api/v0 part of the url + url_elements = url_elements[2:] + return "/".join(url_elements) + + def session_assistance(self): + """ """ + # self.connection = http_transport.get_connection( + # self._host, self._gateway_port, http_redirect=True + # ) + # session_url = "sessions" + # data = {"cyperfVersion": self.cyperf_version} + # data = json.dumps(data) + # reply = self.connection.http_post(url=session_url, data=data) + # if not reply.ok: + # raise Exception(reply.text) + # try: + # new_obj_path = reply.headers["location"] + # except: + # raise Exception( + # "Location header is not present. Please check if the action was created successfully." + # ) + # session_id = new_obj_path.split("/")[-1] + # self._cyperf = "%s/%s" % (session_url, session_id) + # start_session_url = "%s/operations/start" % (self._cyperf) + # reply = self.connection.http_post(url=start_session_url, data={}) + # if not reply.ok: + # Snappil47Exception.status_code = reply.status_code + # raise Exception(reply.text) + # action_result_url = reply.headers.get("location") + # if action_result_url: + # action_result_url = self.strip_api_and_version_from_url(action_result_url) + # action_finished = False + # while not action_finished: + # action_status_obj = self.connection.http_get(url=action_result_url) + # if action_status_obj.state == "finished": + # if action_status_obj.status == "Successful": + # action_finished = True + # self._cyperf = self._cyperf + "/" + # self.logger.info( + # "Connected to L47, Session ID:%s" % (self._cyperf) + # ) + # msg = "Successfully connected to cyperf" + # # self.add_error(msg) + # self.warning(msg) + # else: + # error_msg = ( + # "Error while executing action '%s'." % start_session_url + # ) + # if action_status_obj.status == "Error": + # error_msg += action_status_obj.error + # raise Exception(error_msg) + # else: + # time.sleep(0.1) + # self.configID = "appsec-appmix-and-attack" + # self.configID = "appsec-empty-config" + self.configID = "appsec-appmix" + response = self.rest.create_session(self.configID) + if response: + self.session_id = response.json()[0]["id"] + print("Config successfully opened, session ID: {}".format(self.session_id)) + + def _connect(self): + """Connect to s Cyperf API Server.""" + self._errors = [] + self.logger = setup_cyperf_logger(self.log_level, module_name=__name__) + self.logger.info("Connecting to Cyperf") + if self._assistant is None: + self.session_assistance() + + def _request( + self, method, url, payload=None, params=None, headers=None, option=None + ): + """ """ + data = json.dumps(payload) + response = self.connection._request( + method=method, + url=url, + data=data, + params=params, + headers=headers, + option=option, + ) + return response + + def _wait_for_action_to_finish(self, reply_obj, action_url): + """ + This method waits for an action to finish executing. after a POST request is sent in order to start an action, + The HTTP reply will contain, in the header, a 'location' field, that contains an URL. + The action URL contains the status of the action. we perform a GET on that URL every 0.5 seconds until the action finishes with a success. + If the action fails, we will throw an error and print the action's error message. + Args: + - reply_obj the reply object holding the location + - rt_handle - the url pointing to the operation + """ + action_result_url = reply_obj.headers.get("location") + if action_result_url: + action_result_url = self.strip_api_and_version_from_url(action_result_url) + action_finished = False + while not action_finished: + action_status_obj = self._request("GET", action_result_url) + # action_status_obj = rt_handle.invoke('http_get', url=action_result_url) + if action_status_obj.state == "finished": + if action_status_obj.status == "Successful": + action_finished = True + else: + error_msg = "Error while executing action '%s'." % action_url + if action_status_obj.status == "Error": + error_msg += action_status_obj.error + raise Exception(error_msg) + else: + time.sleep(0.1) + + def strip_api_and_version_from_url(self, url): + """ + #remove the slash (if any) at the beginning of the url + """ + if url[0] == "/": + url = url[1:] + url_elements = url.split("/") + if "api" in url: + # strip the api/v0 part of the url + url_elements = url_elements[2:] + return "/".join(url_elements) + + def _get_url(self, parent, child): + """ """ + try: + pattern = re.search(r"(.*)(\{id\})(.*)", child) + if pattern: + url1 = parent + pattern.group(1) + url2 = pattern.group(3) + return (url1, url2) + except Exception: + raise Exception( + "can not get url for parent %s and child %s" % (parent, child) + ) + + def _set_payload(self, snappi_obj, attr_map): + properties = snappi_obj._properties + payload = {} + for snappi_attr, ixl_map in attr_map.items(): + value = snappi_obj.get(snappi_attr) + payload[ixl_map] = value + + # for prop_name in properties: + + # if prop_name != 'url' and isinstance(properties[prop_name], (str, int, float, list)): + # prop_name_new = self._convert_camel(prop_name) + # payload[prop_name_new] = properties[prop_name] + return payload + + def _convert_camel(self, argument): + word_regex_pattern = re.compile("[^A-Za-z]+") + words = word_regex_pattern.split(argument) + return "".join(w.lower() if i == 0 else w.title() for i, w in enumerate(words)) + + def info(self, message): + print("working %s" % message) + + def warning(self, message): + logging.warning(message) diff --git a/snappi_cyperf/exceptions.py b/snappi_cyperf/exceptions.py new file mode 100644 index 0000000..ccc47c8 --- /dev/null +++ b/snappi_cyperf/exceptions.py @@ -0,0 +1,70 @@ +# import errors as err +import sys +import traceback + +# sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") +import snappi + + +class Snappil47Exception(Exception): + def __init__(self, *args): + super(Snappil47Exception, self).__init__(*args) + self._args = args + self._message = None + self._status_code = None + self._url = None + self.process_exception() + + @property + def args(self): + return ( + self._status_code, + ([self._message] if not isinstance(self._message, list) else self._message), + self._url, + ) + + @property + def message(self): + return self._message.args[0] + + @property + def url(self): + return self._url + + @property + def status_code(self): + return self._status_code + + def process_exception(self): + # import pdb + + # pdb.set_trace() + if isinstance(self._args[0].args, tuple) and len(self._args[0].args) == 1: + if "Max retries exceeded" in str(self._args[0].args[0]): + self._status_code = 500 + self._message = self._args[0] + return (self._message, self._status_code) + if isinstance(self._args[0].args[0], (str, list)): + self._status_code = ( + 400 if self._status_code is None else self._status_code + ) + self._message = self._args[0].args[0] + return (self._message, self._status_code) + if isinstance( + self._args[0], + (NameError, TypeError, ValueError, KeyError, AttributeError), + ): + self._status_code = 400 + self._message = self._args[0].args + return (self._status_code, self._message) + if isinstance(self._args[0], (ImportError, RuntimeError)): + self._status_code = 400 + self._message = self._args[0] + return (self._status_code, self._message) + else: + self._status_code = 400 + self._message = self._args + return (self._status_code, self._message) + else: + self._message = self._args + return diff --git a/snappi_cyperf/hero.py b/snappi_cyperf/hero.py new file mode 100644 index 0000000..c12f81e --- /dev/null +++ b/snappi_cyperf/hero.py @@ -0,0 +1,161 @@ +import sys +import pprint +import socket +import struct +import macaddress +import ipaddress +from copy import deepcopy +from munch import DefaultMunch + +# utils +socket_inet_ntoa = socket.inet_ntoa +struct_pack = struct.pack + + +class MAC(macaddress.MAC): + formats = ("xx:xx:xx:xx:xx:xx",) + macaddress.MAC.formats + + +maca = MAC # optimization so the . does not get executed multiple times + +dflt_params = { # CONFIG VALUE # DEFAULT VALUE + "SCHEMA_VER": "0.0.4", + "DC_START": "220.0.1.1", # '220.0.1.2' + "DC_STEP": "0.0.1.0", # '0.0.1.0' + "LOOPBACK": "221.0.0.1", # '221.0.0.1' + "PAL": "221.1.0.0", # '221.1.0.1' + "PAR": "221.2.0.0", # '221.2.0.1' + "GATEWAY": "222.0.0.1", # '222.0.0.1' + "DPUS": 8, # 1 + "ENI_START": 1, # 1 + "ENI_COUNT": 256, # 32 + "ENI_STEP": 1, # 1 + "ENI_L2R_STEP": 0, # 1000 + "VNET_PER_ENI": 1, # 16 TODO: partialy implemented + "ACL_NSG_COUNT": 5, # 5 (per direction per ENI) + "ACL_RULES_NSG": 1000, # 1000 + "IP_PER_ACL_RULE": 1, # 100 + "ACL_MAPPED_PER_NSG": 500, # 500, efective is 250 because denny are skiped + "MAC_L_START": "00:1A:C5:00:00:01", + "MAC_R_START": "00:1B:6E:00:00:01", + "MAC_STEP_ENI": "00:00:00:18:00:00", # '00:00:00:18:00:00' + "MAC_STEP_NSG": "00:00:00:02:00:00", + "MAC_STEP_ACL": "00:00:00:00:01:00", + "IP_L_START": "1.1.0.1", # local, eni + "IP_R_START": "1.4.0.1", # remote, the world + "IP_STEP1": "0.0.0.1", + "IP_STEP_ENI": "0.64.0.0", + "IP_STEP_NSG": "0.2.0.0", + "IP_STEP_ACL": "0.0.1.0", + "IP_STEPE": "0.0.0.2", + "TOTAL_OUTBOUND_ROUTES": 25600000, # ENI_COUNT * 100K +} + +params_dict = deepcopy(dflt_params) + +cooked_params_dict = {} +for ip in [ + "IP_STEP1", + "IP_STEP_ENI", + "IP_STEP_NSG", + "IP_STEP_ACL", + "IP_STEPE", + "IP_L_START", + "IP_R_START", + "PAL", + "PAR", + "GATEWAY", +]: + cooked_params_dict[ip] = int(ipaddress.ip_address((params_dict[ip]))) +for mac in [ + "MAC_L_START", + "MAC_R_START", + "MAC_STEP_ENI", + "MAC_STEP_NSG", + "MAC_STEP_ACL", +]: + cooked_params_dict[mac] = int(maca(params_dict[mac])) + + +params = DefaultMunch.fromDict(params_dict) +cooked_params = DefaultMunch.fromDict(cooked_params_dict) + + +p = params +ip_int = cooked_params + + +################################################################################### + + +sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") +import snappi + + +ixl_api = snappi.api(location="http://127.0.0.1:5000", verify=False) +# ixl_api = snappi.api(location="http://127.0.0.1:5000", verify=False, ext="ixload") +ixlc = ixl_api.config() + +port_1 = ixlc.ports.port(name="p1", location="10.39.44.147")[-1] +port_2 = ixlc.ports.port(name="p2", location="10.39.44.190")[-1] + +c_device = ixlc.devices.add(name="client") +s_device = ixlc.devices.add(name="server") + +# c_device.port = port_1 +# S_device.port = port_2 + + +for eni_index, eni in enumerate( + range(p.ENI_START, p.ENI_START + p.ENI_COUNT * p.ENI_STEP, p.ENI_STEP) +): # Per ENI + + # vtep_remote = socket_inet_ntoa(struct_pack('>L', ip_int.PAR + ip_int.IP_STEP1 * eni_index)) + # gateway_ip = socket_inet_ntoa(struct_pack('>L', ip_int.GATEWAY + ip_int.IP_STEP1 * eni_index)) + remote_ip_a_eni = ip_int.IP_R_START + eni_index * ip_int.IP_STEP_ENI + remote_mac_a_eni = ip_int.MAC_R_START + eni_index * ip_int.MAC_STEP_ENI + gateway_mac = str(maca(remote_mac_a_eni)) + + # add mapping for ENI itself SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY + + r_vni_id = eni + p.ENI_L2R_STEP + + c_eth = c_device.ethernets.add() + c_eth.name = "NETMAC%d" % eni + c_eth.mac = str(maca(remote_mac_a_eni)) + + c_vlan = c_eth.vlans.add() + c_vlan.name = "NETVLAN%d" % eni + c_vlan.id = r_vni_id + + c_ip = c_eth.ipv4_addresses.add() + c_ip.name = "NETIP%d" % eni + c_ip.address = socket_inet_ntoa(struct_pack(">L", remote_ip_a_eni)) + c_ip.gateway = "0.0.0.0" + c_ip.prefix = 10 + + eni_ip = socket_inet_ntoa( + struct_pack(">L", ip_int.IP_L_START + eni_index * ip_int.IP_STEP_ENI) + ) + eni_mac = str(maca(ip_int.MAC_L_START + eni_index * ip_int.MAC_STEP_ENI)) + + # import pdb; pdb.set_trace() + + s_eth = s_device.ethernets.add() + s_eth.name = "ENIMAC%d" % eni + s_eth.mac = eni_mac + + s_vlan = s_eth.vlans.add() + s_vlan.name = "ENIVLAN%d" % eni + s_vlan.id = eni + + s_ip = s_eth.ipv4_addresses.add() + s_ip.name = "ENIIP%d" % eni + s_ip.address = eni_ip + s_ip.gateway = "0.0.0.0" + s_ip.prefix = 10 + + +# import pdb; pdb.set_trace() +response = ixl_api.set_config(ixlc) +print(response) diff --git a/snappi_cyperf/hero_1.py b/snappi_cyperf/hero_1.py new file mode 100644 index 0000000..0b14c9c --- /dev/null +++ b/snappi_cyperf/hero_1.py @@ -0,0 +1,161 @@ +import sys +import pprint +import socket +import struct +import macaddress +import ipaddress +from copy import deepcopy +from munch import DefaultMunch + +# utils +socket_inet_ntoa = socket.inet_ntoa +struct_pack = struct.pack + + +class MAC(macaddress.MAC): + formats = ("xx:xx:xx:xx:xx:xx",) + macaddress.MAC.formats + + +maca = MAC # optimization so the . does not get executed multiple times + +dflt_params = { # CONFIG VALUE # DEFAULT VALUE + "SCHEMA_VER": "0.0.4", + "DC_START": "220.0.1.1", # '220.0.1.2' + "DC_STEP": "0.0.1.0", # '0.0.1.0' + "LOOPBACK": "221.0.0.1", # '221.0.0.1' + "PAL": "221.1.0.0", # '221.1.0.1' + "PAR": "221.2.0.0", # '221.2.0.1' + "GATEWAY": "222.0.0.1", # '222.0.0.1' + "DPUS": 8, # 1 + "ENI_START": 1, # 1 + "ENI_COUNT": 2, # 32 + "ENI_STEP": 1, # 1 + "ENI_L2R_STEP": 0, # 1000 + "VNET_PER_ENI": 1, # 16 TODO: partialy implemented + "ACL_NSG_COUNT": 5, # 5 (per direction per ENI) + "ACL_RULES_NSG": 1000, # 1000 + "IP_PER_ACL_RULE": 1, # 100 + "ACL_MAPPED_PER_NSG": 500, # 500, efective is 250 because denny are skiped + "MAC_L_START": "00:1A:C5:00:00:01", + "MAC_R_START": "00:1B:6E:00:00:01", + "MAC_STEP_ENI": "00:00:00:18:00:00", # '00:00:00:18:00:00' + "MAC_STEP_NSG": "00:00:00:02:00:00", + "MAC_STEP_ACL": "00:00:00:00:01:00", + "IP_L_START": "1.1.0.1", # local, eni + "IP_R_START": "1.4.0.1", # remote, the world + "IP_STEP1": "0.0.0.1", + "IP_STEP_ENI": "0.64.0.0", + "IP_STEP_NSG": "0.2.0.0", + "IP_STEP_ACL": "0.0.1.0", + "IP_STEPE": "0.0.0.2", + "TOTAL_OUTBOUND_ROUTES": 200, # ENI_COUNT * 100K +} + +params_dict = deepcopy(dflt_params) + +cooked_params_dict = {} +for ip in [ + "IP_STEP1", + "IP_STEP_ENI", + "IP_STEP_NSG", + "IP_STEP_ACL", + "IP_STEPE", + "IP_L_START", + "IP_R_START", + "PAL", + "PAR", + "GATEWAY", +]: + cooked_params_dict[ip] = int(ipaddress.ip_address((params_dict[ip]))) +for mac in [ + "MAC_L_START", + "MAC_R_START", + "MAC_STEP_ENI", + "MAC_STEP_NSG", + "MAC_STEP_ACL", +]: + cooked_params_dict[mac] = int(maca(params_dict[mac])) + + +params = DefaultMunch.fromDict(params_dict) +cooked_params = DefaultMunch.fromDict(cooked_params_dict) + + +p = params +ip_int = cooked_params + + +################################################################################### + + +sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") +import snappi + + +ixl_api = snappi.api(location="http://127.0.0.1:5000", verify=False) +# ixl_api = snappi.api(location="http://127.0.0.1:5000", verify=False, ext="ixload") +ixlc = ixl_api.config() + +port_1 = ixlc.ports.port(name="p1", location="10.39.44.147")[-1] +port_2 = ixlc.ports.port(name="p2", location="10.39.44.190")[-1] + +c_device = ixlc.devices.add(name="client") +s_device = ixlc.devices.add(name="server") + +# c_device.port = port_1 +# S_device.port = port_2 + + +for eni_index, eni in enumerate( + range(p.ENI_START, p.ENI_START + p.ENI_COUNT * p.ENI_STEP, p.ENI_STEP) +): # Per ENI + + # vtep_remote = socket_inet_ntoa(struct_pack('>L', ip_int.PAR + ip_int.IP_STEP1 * eni_index)) + # gateway_ip = socket_inet_ntoa(struct_pack('>L', ip_int.GATEWAY + ip_int.IP_STEP1 * eni_index)) + remote_ip_a_eni = ip_int.IP_R_START + eni_index * ip_int.IP_STEP_ENI + remote_mac_a_eni = ip_int.MAC_R_START + eni_index * ip_int.MAC_STEP_ENI + gateway_mac = str(maca(remote_mac_a_eni)) + + # add mapping for ENI itself SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY + + r_vni_id = eni + p.ENI_L2R_STEP + + c_eth = c_device.ethernets.add() + c_eth.name = "NETMAC%d" % eni + c_eth.mac = str(maca(remote_mac_a_eni)) + + c_vlan = c_eth.vlans.add() + c_vlan.name = "NETVLAN%d" % eni + c_vlan.id = r_vni_id + + c_ip = c_eth.ipv4_addresses.add() + c_ip.name = "NETIP%d" % eni + c_ip.address = socket_inet_ntoa(struct_pack(">L", remote_ip_a_eni)) + c_ip.gateway = "0.0.0.0" + c_ip.prefix = 10 + + eni_ip = socket_inet_ntoa( + struct_pack(">L", ip_int.IP_L_START + eni_index * ip_int.IP_STEP_ENI) + ) + eni_mac = str(maca(ip_int.MAC_L_START + eni_index * ip_int.MAC_STEP_ENI)) + + # import pdb; pdb.set_trace() + + s_eth = s_device.ethernets.add() + s_eth.name = "ENIMAC%d" % eni + s_eth.mac = eni_mac + + s_vlan = s_eth.vlans.add() + s_vlan.name = "ENIVLAN%d" % eni + s_vlan.id = eni + + s_ip = s_eth.ipv4_addresses.add() + s_ip.name = "ENIIP%d" % eni + s_ip.address = eni_ip + s_ip.gateway = "0.0.0.0" + s_ip.prefix = 10 + + +# import pdb; pdb.set_trace() +response = ixl_api.set_config(ixlc) +print(response) diff --git a/snappi_cyperf/hero_2.py b/snappi_cyperf/hero_2.py new file mode 100644 index 0000000..b6fee75 --- /dev/null +++ b/snappi_cyperf/hero_2.py @@ -0,0 +1,165 @@ +import sys +import pprint +import socket +import struct +import macaddress +import ipaddress +from copy import deepcopy +from munch import DefaultMunch + +# utils +socket_inet_ntoa = socket.inet_ntoa +struct_pack = struct.pack + + +class MAC(macaddress.MAC): + formats = ("xx:xx:xx:xx:xx:xx",) + macaddress.MAC.formats + + +maca = MAC # optimization so the . does not get executed multiple times + +dflt_params = { # CONFIG VALUE # DEFAULT VALUE + "SCHEMA_VER": "0.0.4", + "DC_START": "220.0.1.1", # '220.0.1.2' + "DC_STEP": "0.0.1.0", # '0.0.1.0' + "LOOPBACK": "221.0.0.1", # '221.0.0.1' + "PAL": "221.1.0.0", # '221.1.0.1' + "PAR": "221.2.0.0", # '221.2.0.1' + "GATEWAY": "222.0.0.1", # '222.0.0.1' + "DPUS": 8, # 1 + "ENI_START": 1, # 1 + "ENI_COUNT": 2, # 32 + "ENI_STEP": 1, # 1 + "ENI_L2R_STEP": 0, # 1000 + "VNET_PER_ENI": 1, # 16 TODO: partialy implemented + "ACL_NSG_COUNT": 5, # 5 (per direction per ENI) + "ACL_RULES_NSG": 1000, # 1000 + "IP_PER_ACL_RULE": 1, # 100 + "ACL_MAPPED_PER_NSG": 500, # 500, efective is 250 because denny are skiped + "MAC_L_START": "00:1A:C5:00:00:01", + "MAC_R_START": "00:1B:6E:00:00:01", + "MAC_STEP_ENI": "00:00:00:18:00:00", # '00:00:00:18:00:00' + "MAC_STEP_NSG": "00:00:00:02:00:00", + "MAC_STEP_ACL": "00:00:00:00:01:00", + "IP_L_START": "1.1.0.1", # local, eni + "IP_R_START": "1.4.0.1", # remote, the world + "IP_STEP1": "0.0.0.1", + "IP_STEP_ENI": "0.64.0.0", + "IP_STEP_NSG": "0.2.0.0", + "IP_STEP_ACL": "0.0.1.0", + "IP_STEPE": "0.0.0.2", + "TOTAL_OUTBOUND_ROUTES": 200, # ENI_COUNT * 100K +} + +params_dict = deepcopy(dflt_params) + +cooked_params_dict = {} +for ip in [ + "IP_STEP1", + "IP_STEP_ENI", + "IP_STEP_NSG", + "IP_STEP_ACL", + "IP_STEPE", + "IP_L_START", + "IP_R_START", + "PAL", + "PAR", + "GATEWAY", +]: + cooked_params_dict[ip] = int(ipaddress.ip_address((params_dict[ip]))) +for mac in [ + "MAC_L_START", + "MAC_R_START", + "MAC_STEP_ENI", + "MAC_STEP_NSG", + "MAC_STEP_ACL", +]: + cooked_params_dict[mac] = int(maca(params_dict[mac])) + + +params = DefaultMunch.fromDict(params_dict) +cooked_params = DefaultMunch.fromDict(cooked_params_dict) + + +p = params +ip_int = cooked_params + + +################################################################################### + + +sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") +import snappi + + +ixl_api = snappi.api(location="http://127.0.0.1:5000", verify=False) +# ixl_api = snappi.api(location="http://127.0.0.1:5000", verify=False, ext="ixload") +ixlc = ixl_api.config() + +port_1 = ixlc.ports.port(name="p1", location="10.39.44.147")[-1] +port_2 = ixlc.ports.port(name="p2", location="10.39.44.190")[-1] + +c_device = ixlc.devices.add(name="client") +s_device = ixlc.devices.add(name="server") + +# c_device.port = port_1 +# S_device.port = port_2 + +for eni_index2, eni2 in enumerate( + range(p.ENI_START, p.ENI_START + p.ENI_COUNT * p.ENI_STEP, p.ENI_STEP) +): # Per ENI + remote_mac_a_eni = ip_int.MAC_R_START + eni_index2 * ip_int.MAC_STEP_ENI + gateway_mac = str(maca(remote_mac_a_eni)) + + c_eth = c_device.ethernets.add() + c_eth.name = "NETMAC%d" % eni2 + c_eth.mac = str(maca(remote_mac_a_eni)) + + eni_mac = str(maca(ip_int.MAC_L_START + eni_index2 * ip_int.MAC_STEP_ENI)) + + s_eth = s_device.ethernets.add() + s_eth.name = "ENIMAC%d" % eni2 + s_eth.mac = eni_mac + + for eni_index, eni in enumerate( + range(p.ENI_START, p.ENI_START + p.ENI_COUNT * p.ENI_STEP, p.ENI_STEP) + ): # Per ENI + + # vtep_remote = socket_inet_ntoa(struct_pack('>L', ip_int.PAR + ip_int.IP_STEP1 * eni_index)) + # gateway_ip = socket_inet_ntoa(struct_pack('>L', ip_int.GATEWAY + ip_int.IP_STEP1 * eni_index)) + remote_ip_a_eni = ip_int.IP_R_START + eni_index * ip_int.IP_STEP_ENI + + # add mapping for ENI itself SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY + + r_vni_id = eni + p.ENI_L2R_STEP + + c_vlan = c_eth.vlans.add() + c_vlan.name = "NETVLAN%d" % eni + c_vlan.id = r_vni_id + + c_ip = c_eth.ipv4_addresses.add() + c_ip.name = "NETIP%d" % eni + c_ip.address = socket_inet_ntoa(struct_pack(">L", remote_ip_a_eni)) + c_ip.gateway = "0.0.0.0" + c_ip.prefix = 10 + + eni_ip = socket_inet_ntoa( + struct_pack(">L", ip_int.IP_L_START + eni_index * ip_int.IP_STEP_ENI) + ) + + # import pdb; pdb.set_trace() + + s_vlan = s_eth.vlans.add() + s_vlan.name = "ENIVLAN%d" % eni + s_vlan.id = eni + + s_ip = s_eth.ipv4_addresses.add() + s_ip.name = "ENIIP%d" % eni + s_ip.address = eni_ip + s_ip.gateway = "0.0.0.0" + s_ip.prefix = 10 + + +# import pdb; pdb.set_trace() +response = ixl_api.set_config(ixlc) +print(response) diff --git a/snappi_cyperf/http_config.py b/snappi_cyperf/http_config.py new file mode 100644 index 0000000..23f5a16 --- /dev/null +++ b/snappi_cyperf/http_config.py @@ -0,0 +1,363 @@ +import json +import re +import time +from snappi_cyperf.timer import Timer + + +class http_config: + """ """ + + _HTTP_CLIENT = { + # "enable_tos": "enableTos", + # "priority_flow_control_class" : "pfcClass", + # "precedence_tos" : "precedenceTOS", + # "delay_tos" : "delayTOS", + # "throughput_tos" : "throughputTOS", + # "reliability_tos" : "reliabilityTOS", + # "url_stats_count": "urlStatsCount", + # "disable_priority_flow_control": "disablePfc", + # "enable_vlan_priority": "enableVlanPriority", + # "vlan_priority": "vlanPriority", + # "esm": "esm", + # "enable_esm": "enableEsm", + # "time_to_live_value": "ttlValue", + # "tcp_close_option" : "tcpCloseOption", + # "enable_integrity_check_support" : "enableIntegrityCheckSupport", + # "type_of_service": "tos", + # "high_perf_with_simulated_user": "highPerfWithSU", + "profile": "Name", + "version": "HTTPVersion", + "connection_persistence": "ConnectionPersistence", + } + _HTTP_SERVER = { + # "enable_tos": "enableTos", + # "priority_flow_control_class" : "pfcClass", + # "precedence_tos" : "precedenceTOS", + # "delay_tos" : "delayTOS", + # "throughput_tos" : "throughputTOS", + # "reliability_tos" : "reliabilityTOS", + # "url_stats_count": "urlStatsCount", + # "disable_priority_flow_control": "disablePfc", + # "enable_vlan_priority": "enableVlanPriority", + # "vlan_priority": "vlanPriority", + # "esm": "esm", + # "enable_esm": "enableEsm", + # "time_to_live_value": "ttlValue", + # "tcp_close_option" : "tcpCloseOption", + # "enable_integrity_check_support" : "enableIntegrityCheckSupport", + # "type_of_service": "tos", + # "high_perf_with_simulated_user": "highPerfWithSU", + "profile": "Name", + "version": "HTTPVersion", + "connection_persistence": "ConnectionPersistence", + } + _HTTP_CLIENTS = { + # "browser_emulation_name": "Name", + # "version": "httpVersion", + # "cookie_jar_size": "cookieJarSize", + # "cookie_reject_probability": "cookieRejectProbability", + # "enable_cookie_support": "enableCookieSupport", + # "command_timeout": "commandTimeout", + # "command_timeout_ms": "commandTimeout_ms", + # "enable_proxy": "enableHttpProxy", + # "proxy" : "httpProxy", + # "keep_alive": "keepAlive", + # "max_sessions": "maxSessions", + # "max_streams": "maxStreams", + # "max_pipeline": "maxPipeline", + "max_persistent_requests": "ConnectionsMaxTransactions", + # "exact_transactions": "exactTransactions", + # "follow_http_redirects": "followHttpRedirects", + # "enable_decompress_support": "enableDecompressSupport", + # "enable_per_conn_cookie_support": "enablePerConnCookieSupport", + # "ip_preference" : "ipPreference", + # "enable_large_header": "enableLargeHeader", + # "max_header_len": "maxHeaderLen", + # "per_header_percent_dist": "perHeaderPercentDist", + # "enable_auth": "enableAuth", + # "piggy_back_ack": "piggybackAck", + # "tcp_fast_open": "tcpFastOpen", + # "content_length_deviation_tolerance": "contentLengthDeviationTolerance", + # "disable_dns_resolution_cache": "disableDnsResolutionCache", + # "enable_consecutive_ips_per_session": "enableConsecutiveIpsPerSession", + # "enable_achieve_cc_first": "enableAchieveCCFirst", + # "enable_traffic_distribution_for_cc": "enableTrafficDistributionForCC", + # "browser_emulation_name": "browserEmulationName", + } + _HTTP_SERVERS = { + # "rst_timeout": "rstTimeout", + # "enable_http2": "enableHTTP2", + # "port": "httpPort", + # "request_timeout": "requestTimeout", + # "maximum_response_delay": "maxResponseDelay", + # "minimum_response_delay": "minResponseDelay", + # "dont_expect_upgrade": "dontExpectUpgrade", + # "enable_per_server_per_url_stat": "enablePerServerPerURLstat", + # "url_page_size": "urlPageSize", + # "enable_chunk_encoding": "enableChunkEncoding", + # # "integrity_check_option" : "integrityCheckOption", + # "enable_md5_checksum": "enableMD5Checksum", + } + _HTTP_GET = { + # "destination": "destination", + # "page": "pageObject", + # "abort" : "abort", + # "profile": "profile", + # "name_value_args": "namevalueargs", + # "enable_direct_server_return": "enableDi", + } + + _HTTP_DELETE = { + # "destination": "destination", + # "page": "pageObject", + # "abort": "abort", + # "profile": "profile", + } + + _HTTP_POST = { + # "destination": "destination", + # "page": "pageObject", + # "abort": "abort", + # "profile": "profile", + # "name_value_args": "namevalueargs", + # "arguments": "arguments", + # "sending_chunk_size": "sendingChunkSize", + # "send_md5_chksum_header": "sendMD5ChkSumHeader", + } + _HTTP_PUT = { + # "destination": "destination", + # "page": "pageObject", + # "abort": "abort", + # "profile": "profile", + # "name_value_args": "namevalueargs", + # "arguments": "arguments", + # "sending_chunk_size": "sendingChunkSize", + # "send_md5_check_sum_header": "sendMD5ChkSumHeader", + } + _HTTP_HEADER = { + # "destination": "destination", + # "page": "pageObject", + # "abort": "abort", + # "profile": "profile", + # "name_value_args": "namevalueargs", + } + + _TCP = {"keep_alive_time": "tcp_keepalive_time"} + + def __init__(self, cyperfapi): + self._api = cyperfapi + + def config(self, rest): + """ """ + print("config 1") + self._devices_config = self._api._l47config.devices + app_id = rest.add_application("HTTP App") + # response = rest.get_application_actions(app_id) + # print("HTTP Actions :- ", response) + # response = rest.get_application_actions_values(app_id, 1) + # print("HTTP Action Get values :- ", response) + # response = rest.get_application_actions_values(app_id, 2) + # print("HTTP Action POST values :- ", response) + print("config 2") + with Timer(self._api, "HTTP client Configurations"): + self._create_client_app(rest) + self._create_server_app(rest) + + def _create_client_app(self, rest): + """Add any scenarios to the api server that do not already exist""" + print("_create_client_app") + for device in self._devices_config: + self._create_http_client(device, rest) + + def _create_http_client(self, device, rest): + """Add any scenarios to the api server that do not already exist""" + for http_client in device.https: + payload = self._api._set_payload(http_client, http_config._HTTP_CLIENT) + print("payload ", payload) + for http_clients in http_client.clients: + new_payload = self._api._set_payload( + http_clients, http_config._HTTP_CLIENTS + ) + payload["ExternalResourceURL"] = ( + "/api/v2/resources/http-profiles/" + payload["Name"] + ) + payload.update(new_payload) + print("client payload - ", payload) + rest.set_client_http_profile(payload) + name_payload = {} + name_payload["Name"] = payload["Name"] + rest.set_client_http_profile(name_payload) + # for client in app_config.http_client: + # ip_object = self._api.common.get_ip_name(client.client.name) + # url = self._api._config_url.get(self._api._ip_list.get(client.client.name)) + # url = self._api.common.get_community_url(url) + # protocol_url = url+"activityList/" + # options = {} + # options.update({'protocolAndType': "HTTP Client"}) + # response = self._api._request('POST', protocol_url, options) + # protocol_url = protocol_url+response + # self._api._config_url[client.client.name] = protocol_url + # payload = self._api._set_payload(client.client, client_config._HTTP_CLIENT) + # response = self._api._request('PATCH', protocol_url+"/agent", payload) + # self._update_tcp_client(app_config, client) + # self._create_method(app_config, client, protocol_url) + # return + + def _create_server_app(self, rest): + """Add any scenarios to the api server that do not already exist""" + for device in self._devices_config: + self._create_http_server(device, rest) + + def _create_http_server(self, device, rest): + """Add any scenarios to the api server that do not already exist""" + # + for http_server in device.https: + payload = self._api._set_payload(http_server, http_config._HTTP_SERVER) + for http_servers in http_server.servers: + payload["ExternalResourceURL"] = ( + "/api/v2/resources/http-profiles/" + payload["Name"] + ) + print("server payload - ", payload) + rest.set_server_http_profile(payload) + + def _update_tcp_client(self, app_config, client): + # ip_object = self._api.common.get_ip_name(client.client.name) + for tcp in app_config.tcp: + # url = self._api._config_url.get(ip_object) + url = self._api._config_url.get(self._api._ip_list.get(client.client.name)) + url = self._api.common.get_community_url(url) + tcp_child_url = "%snetwork/globalPlugins" % url + response_list = self._api._request("GET", tcp_child_url) + for index in range(len(response_list)): + if response_list[index]["itemType"] == "TCPPlugin": + tcp_url = "%s/%s" % ( + tcp_child_url, + response_list[index]["objectID"], + ) + payload = self._api._set_payload(tcp, client_config._TCP) + response = self._api._request("PATCH", tcp_url, payload) + + def _create_method(self, http_client, protocol_url): + for method in http_client.methods: + for post in method.post: + payload = self._api._set_payload(post, client_config._HTTP_POST) + payload.update({"commandType": "POST"}) + command_url = protocol_url + "/agent/actionList" + response = self._api._request("POST", command_url, payload) + for get in method.get: + payload = self._api._set_payload(get, client_config._HTTP_GET) + payload.update({"commandType": "GET"}) + command_url = protocol_url + "/agent/actionList" + response = self._api._request("POST", command_url, payload) + for delete in method.delete: + payload = self._api._set_payload(delete, client_config._HTTP_DELETE) + payload.update({"commandType": "DELETE"}) + command_url = protocol_url + "/agent/actionList" + response = self._api._request("POST", command_url, payload) + for put in method.put: + payload = self._api._set_payload(put, client_config._HTTP_PUT) + payload.update({"commandType": "PUT"}) + command_url = protocol_url + "/agent/actionList" + response = self._api._request("POST", command_url, payload) + for header in method.header: + payload = self._api._set_payload(header, client_config._HTTP_HEADER) + payload.update({"commandType": "HEAD"}) + command_url = protocol_url + "/agent/actionList" + response = self._api._request("POST", command_url, payload) + + +class server_config: + """ """ + + _HTTP_SERVER = { + "enable_tos": "enableTos", + # "priority_flow_control_class" : "pfcClass", + # "precedence_tos" : "precedenceTOS", + # "delay_tos" : "delayTOS", + # "throughput_tos" : "throughputTOS", + "url_stats_count": "urlStatsCount", + "disable_priority_flow_control": "disablePfc", + "enable_vlan_priority": "enableVlanPriority", + "vlan_priority": "vlanPriority", + "esm": "esm", + "enable_esm": "enableEsm", + "time_to_live_value": "ttlValue", + # "tcp_close_option" : "tcpCloseOption", + "type_of_service": "tos", + "high_perf_with_simulated_user": "highPerfWithSU", + } + + _HTTP_SERVERS = { + "profile": "Name", + "version": "HTTPVersion", + "connection_persistence": "ConnectionPersistence", + "rst_timeout": "rstTimeout", + "enable_http2": "enableHTTP2", + "port": "httpPort", + "request_timeout": "requestTimeout", + "maximum_response_delay": "maxResponseDelay", + "minimum_response_delay": "minResponseDelay", + "dont_expect_upgrade": "dontExpectUpgrade", + "enable_per_server_per_url_stat": "enablePerServerPerURLstat", + "url_page_size": "urlPageSize", + "enable_chunk_encoding": "enableChunkEncoding", + # "integrity_check_option" : "integrityCheckOption", + "enable_md5_checksum": "enableMD5Checksum", + } + + _TCP = {"keep_alive_time": "tcp_keepalive_time"} + + def __init__(self, cyperfapi): + self._api = cyperfapi + + def config(self): + """ """ + self._devices_config = self._api._l47config.devices + with Timer(self._api, "HTTP server Configurations"): + self._create_server_app() + + def _create_server_app(self): + """Add any scenarios to the api server that do not already exist""" + for device in self._devices_config: + self._create_http_server(device) + + def _create_http_server(self, device): + """Add any scenarios to the api server that do not already exist""" + for http in device.https: + for http_server in http.servers: + # ip_object = self._ip_list(server.server.name) + # ip_object = "e2.ipv4" + url = self._api._config_url.get(http.tcp_name) + url = self._api.common.get_community_url(url) + # url = self._api._cyperf+"cyperf/test/activeTest/communityList/1" + protocol_url = url + "activityList/" + options = {} + options.update({"protocolAndType": "HTTP Server"}) + response = self._api._request("POST", protocol_url, options) + protocol_url = protocol_url + response + # self._api._config_url[server.server.name] = protocol_url + payload = self._api._set_payload(http, server_config._HTTP_SERVER) + response = self._api._request("PATCH", protocol_url + "/agent", payload) + payload = self._api._set_payload( + http_server, server_config._HTTP_SERVERS + ) + response = self._api._request("PATCH", protocol_url + "/agent", payload) + del http + # self._update_tcp_server(app_config, server) + + def _update_tcp_server(self, app_config, server): + # ip_object = self._api.common.get_ip_name(self._server_config, server.server.name) + for tcp in app_config.tcp: + url = self._api._config_url.get(self._api._ip_list.get(server.server.name)) + # url = self._api._config_url.get(ip_object) + url = self._api.common.get_community_url(url) + tcp_child_url = "%snetwork/globalPlugins" % url + response_list = self._api._request("GET", tcp_child_url) + for index in range(len(response_list)): + if response_list[index]["itemType"] == "TCPPlugin": + tcp_url = "%s/%s" % ( + tcp_child_url, + response_list[index]["objectID"], + ) + payload = self._api._set_payload(tcp, server_config._TCP) + response = self._api._request("PATCH", tcp_url, payload) diff --git a/snappi_cyperf/interface.py b/snappi_cyperf/interface.py new file mode 100644 index 0000000..eaeab57 --- /dev/null +++ b/snappi_cyperf/interface.py @@ -0,0 +1,139 @@ +import json +import re +import time +from snappi_cyperf.timer import Timer + + +class interfaces(object): + """Transforms OpenAPI objects into IxNetwork objects + - Lag to /lag + Args + ---- + - cyperfapi (Api): instance of the Api class + + """ + + _ETHERNET = { + "mac": "MacStart", + "step": "MacIncr", + "count": "Count", + "max_count": "maxCountPerAgent", + } + + _IP = { + "address": "IpStart", + "gateway": "GwStart", + "prefix": "NetMask", + "name": "networkTags", + "step": "IpIncr", + "count": "Count", + "max_count": "maxCountPerAgent", + } + + _MTU_MSS = { + "mtu": "Mss", + } + + _VLAN = { + "id": "VlanId", + "step": "VlanIncr", + "count": "Count", + "per_count": "CountPerAgent", + "priority": "Priority", + "tpid": "TagProtocolId", + } + + def __init__(self, cyperfapi): + self._api = cyperfapi + self._device_cnt = 0 + self._network_segment_cnt = 0 + self._ip_range_cnt = 0 + self._total_ip_networks = 0 + + def config(self, rest): + """T""" + self._devices_config = self._api._l47config.devices + with Timer(self._api, "Interface Configuration"): + self._create_devices(rest) + + def _create_devices(self, rest): + """Add any scenarios to the api server that do not already exist""" + self._total_ip_networks = 1 + for device in self._devices_config: + for ethernet in device.ethernets: + if self._total_ip_networks > 2: + response = rest.add_eth_range(self._network_segment_cnt) + self._total_ip_networks = self._total_ip_networks + 1 + self._modify_devices(rest) + + def _modify_devices(self, rest): + self._device_cnt = 1 + for device in self._devices_config: + self._network_segment_cnt = self._device_cnt + self._create_ethernet(device, rest) + self._device_cnt = self._device_cnt + 1 + + def _create_ethernet(self, device, rest): + """Add any scenarios to the api server that do not already exist""" + for ethernet in device.ethernets: + self._api._network_segments[ethernet.name] = self._network_segment_cnt + self._api._network_segments[ethernet.connection.port_name] = ( + self._network_segment_cnt + ) + payload = self._api._set_payload(ethernet, interfaces._ETHERNET) + payload["MacAuto"] = False + payload["OneMacPerIP"] = False + if self._device_cnt % 2 == self._network_segment_cnt % 2: + rest.set_eth_range( + payload, + self._network_segment_cnt, + ) + self._create_ipv4(ethernet, rest) + self._network_segment_cnt = self._network_segment_cnt + 2 + + def _create_ipv4(self, ethernet, rest): + """ + Add any ipv4 to the api server that do not already exist + """ + ipv4_addresses = ethernet.get("ipv4_addresses") + if ipv4_addresses is None: + return + self._ip_range_cnt = 1 + for ipv4 in ethernet.ipv4_addresses: + self._api._ip_ranges[ipv4.name] = self._ip_range_cnt + payload = self._api._set_payload(ipv4, interfaces._IP) + payload["IpAuto"] = False + payload["NetMaskAuto"] = False + payload["GwAuto"] = False + payload.update(self._api._set_payload(ethernet, interfaces._MTU_MSS)) + mss = payload["Mss"] + payload["Mss"] = mss - 28 + network_tag = payload["networkTags"] + payload["networkTags"] = [network_tag] + response = self._ip_range_cnt + if self._ip_range_cnt > 1: + response = rest.add_ip_range(self._network_segment_cnt) + rest.set_ip_range( + payload, + self._network_segment_cnt, + response, + ) + self._create_vlan(ethernet, ipv4, rest) + self._ip_range_cnt = self._ip_range_cnt + 1 + + def _create_vlan(self, ethernet, ipv4, rest): + """ + Add any ipv4 to the api server that do not already exist + """ + vlans = ethernet.get("vlans") + if vlans is None: + return + for vlan in ethernet.vlans: + payload = self._api._set_payload(vlan, interfaces._VLAN) + payload["VlanEnabled"] = True + payload["TagProtocolId"] = 33024 + rest.set_ip_range_innervlan_range( + payload, + self._api._network_segments[ethernet.name], + self._api._ip_ranges[ipv4.name], + ) diff --git a/snappi_cyperf/ixrestutils.py b/snappi_cyperf/ixrestutils.py new file mode 100644 index 0000000..7a17175 --- /dev/null +++ b/snappi_cyperf/ixrestutils.py @@ -0,0 +1,296 @@ +""" + DESCRIPTION + IxRestUtils is a collection of classes that offer a generic wrapper around a raw REST API. + It handles: + - Creating a connection; + - Running HTTP methods for an active connection + Abstracting the RAW HTTP input / output to tangible objects that will act as an interface to the REST API. +""" + +import requests +from urllib.parse import urljoin +import json +import re + +try: + requests.packages.urllib3.disable_warnings() +except Exception: + pass + + +# Gets a Connection instance, that will be used to make the HTTP requests to the application +def get_connection(server, port, http_redirect=False): + """ + getconnection perform connection to Cyperf and returns session Url. + """ + transport_type = "https" if not http_redirect else "http" + connection_url = "%s://%s:%s/" % (transport_type, server, port) + conn = Connection(connection_url, "v0", http_redirect) + return conn + + +class Connection(object): + """ + Class that executes the HTTP requests to the application instance. + It handles creating the HTTP session and executing HTTP methods. + + """ + + kHeaderContentType = "content-type" + kContentJson = "application/json" + kApiKeyHeader = "X-Api-Key" + kApiKey = "" + + def __init__(self, siteUrl, apiVersion, http_redirect=False): + """ + Args: + - siteUrl is the actual url to which the Connection instance will be made. + - apiVersion is the actual version of the REST API that the Connection instance will use. + The HTTP session will be created when the first http request is made. + """ + self.http_session = None + self.http_redirect = http_redirect + self.url = Connection.urljoin(siteUrl, "api") + self.url = Connection.urljoin(self.url, apiVersion) + + @staticmethod + def set_api_key(api_key): + """ + set_api_key will set connection key for REST calls. + """ + Connection.kApiKey = api_key + + def _get_http_session(self): + """ + This is a lazy initializer for the HTTP session. + It does not need to be active until it is required. + """ + if self.http_session is None: + self.http_session = requests.Session() + if not self.http_redirect: + from requests.adapters import HTTPAdapter + from requests.packages.urllib3.poolmanager import PoolManager + import ssl + + http_adapter = HTTPAdapter() + http_adapter.poolmanager = PoolManager(ssl_version=ssl.PROTOCOL_TLSv1) + self.http_session.mount("https://", http_adapter) + return self.http_session + + @classmethod + def urljoin(cls, base, end): + """Join two URLs. If the second URL is absolute, the base is ignored. + Use this instead of urlparse.urljoin directly so that we can customize its behavior if necessary. + Currently differs in that it + 1. appends a / to base if not present. + 2. casts end to a str as a convenience + """ + if base and not base.endswith("/"): + base = base + "/" + return urljoin(base, str(end)) + + def _request(self, method, url="", data="", option=None, params=None, headers=None): + """ """ + if method == "POST": + reply = self.http_post(url, data, params, headers) + if not reply.ok: + raise Exception(reply.text) + try: + new_obj_path = reply.headers["location"] + except: + raise Exception( + "Location header is not present. Please check if the action was created successfully." + ) + if option: + return reply + response = new_obj_path.split("/")[-1] + return response + elif method == "GET": + response = self.http_get(url=url, option=option) + return response + elif method == "PATCH": + reply = self.http_patch(url, data, params, headers) + if not reply.ok: + raise Exception(reply.text) + return reply + elif method == "DELETE": + reply = self.http_delete(url, data, params, headers) + if not reply.ok: + raise Exception(reply.text) + return reply + + def http_request(self, method, url="", data="", params=None, headers=None): + """ + Args: + + - Method (mandatory) represents the HTTP method that will be executed. + - url (optional) is the url that will be appended to the application url. + - data (optional) is the data that needs to be sent along with the HTTP method as the JSON payload + - params (optional) the payload python dict not necessary if data is used. + - headers (optional) these are the HTTP headers that will be sent along with the request. If left blank will use default + + Method for making a HTTP request. The method type (GET, POST, PATCH, DELETE) will be sent as a parameter. + Along with the url and request data. The HTTP response is returned + """ + params = {} if params is None else params + headers = {} if headers is None else headers + headers[Connection.kHeaderContentType] = Connection.kContentJson + if Connection.kApiKey != "": + headers[Connection.kApiKeyHeader] = Connection.kApiKey + if isinstance(data, dict): + data = json.dumps(data) + abs_url = Connection.urljoin(self.url, url) + result = self._get_http_session().request( + method, + abs_url, + data=str(data), + params=params, + headers=headers, + verify=False, + ) + return result + + def http_get( + self, url="", data="", option=None, params=None, headers=None, error_codes=None + ): + """ + Method for calling HTTP GET. This will return a WebObject that has the fields returned + in JSON format by the GET operation. + """ + params = {} if params is None else params + headers = {} if headers is None else headers + error_codes = [] if error_codes is None else error_codes + reply = self.http_request("GET", url, data, params, headers) + if reply.status_code in error_codes: + raise Exception( + "Error on executing GET request on url %s: %s" % (url, reply.text) + ) + if option: + return reply.json() + else: + return _format_response(reply.json(), url) + + def http_options( + self, url="", data="", params=None, headers=None, error_codes=None + ): + """ + Method for calling HTTP Option. This will return a WebObject that has the fields returned + in JSON format by the Option operation. + """ + params = {} if params is None else params + headers = {} if headers is None else headers + error_codes = [] if error_codes is None else error_codes + reply = self.http_request("OPTIONS", url, data, params, headers) + if reply.status_code in error_codes: + raise Exception( + "Error on executing Options request on url %s: %s" % (url, reply.text) + ) + return _format_response(reply.json(), url) + + def http_post(self, url="", data="", params=None, headers=None): + """ + Method for calling HTTP POST. Will return the HTTP reply. + """ + params = {} if params is None else params + headers = {} if headers is None else headers + return self.http_request("POST", url, data, params, headers) + + def http_patch(self, url="", data="", params=None, headers=None): + """ + Method for calling HTTP PATCH. Will return the HTTP reply. + """ + params = {} if params is None else params + headers = {} if headers is None else headers + return self.http_request("PATCH", url, data, params, headers) + + def http_delete(self, url="", data="", params=None, headers=None): + """ + Method for calling HTTP DELETE. Will return the HTTP reply. + """ + params = {} if params is None else params + headers = {} if headers is None else headers + return self.http_request("DELETE", url, data, params, headers) + + +def _format_response(value, _url_=None): + """ + Method used for creating a wrapper object corresponding to the JSON string received on a GET request. + """ + if isinstance(value, dict): + if _url_ and bool(re.search(r"[\w.-]+/cyperf/stats/[\w().-]+/values", _url_)): + pass #'values' resources only have name and value of the stats, don't need the url + else: + value["_url_"] = _url_ + result = WebObject(**value) + # result = value + elif isinstance(value, list): + # result = WebList(entries=value, _url_=_url_) + result = value + else: + result = value + return result + + +class WebList(list): + """ + Using this class a JSON list will be transformed in a list of WebObject instances. + """ + + def __init__(self, entries=None, _url_=None): + """ + Create a WebList from a list of items that are processed by the _format_response function + """ + list.__init__(self) + entries = [] if entries is None else entries + self._url_ = _url_ + url = _url_ + filter_syntax = "?filter=" # we need to remove the query param syntax from all chindren of the list. + if url and filter_syntax in url: + url = url.split(filter_syntax)[ + 0 + ] # get everything on the left of the filter, removing the query param + for item in entries: + item_url = None + if "objectID" in item: + item_url = "%s/%s" % (url, item["objectID"]) + self.append(_format_response(item, item_url)) + + def copy_data(self, new_obj): + """ + copy date and retrun json + """ + self[:] = [] + for item in new_obj: + self.append(item) + + +class WebObject(object): + """ + A WebObject instance will have its fields set to correspond to the JSON format received on a GET request. + for example: a response in the format: {"caption": "http"} will return an object that has obj.caption="http" + """ + + def __init__(self, **entries): + """ + Create a WebObject instance by providing a dict having a property - value structure. + """ + self.json_options = {} + for key, value in entries.items(): + web_obj = _format_response(value) + self.json_options[key] = web_obj + self.__dict__[key] = web_obj + + def copy_data(self, new_obj): + """ + copy date and retrun json + """ + self.json_options = {} + for key, obj in new_obj.json_options.items(): + self.json_options[key] = obj + self.__dict__[key] = obj + + def get_options(self): + """ + Get the JSON dictionary which represents the WebObject Instance + """ + return self.json_options diff --git a/snappi_cyperf/l47server.py b/snappi_cyperf/l47server.py new file mode 100644 index 0000000..abb5b12 --- /dev/null +++ b/snappi_cyperf/l47server.py @@ -0,0 +1,108 @@ +from flask import Flask, request, jsonify, Response + +# from exceptions import Snappil47Exception +import sys + +# sys.path.insert(0, "/home/dipendu/otg/open_traffic_generator/snappi/artifacts/snappi") + +import snappi + +app = Flask(__name__) + +global CONFIG +CONFIG = snappi.Config() +CS = snappi.ControlState() +GM = snappi.MetricsRequest() +# sys.path.append("/home/dipendu/otg/open_traffic_generator/snappi-cyperf") +from snappi_cyperf import cyperfapi + +API = cyperfapi.Api( + host="10.39.44.178", + version="10.00.0.152", + username="admin", + password="CyPerf&Keysight#1", +) +# API = cyperfapi.Api(host="10.39.47.88", version="10.00.0.152") + + +@app.route("/config", methods=["POST"]) +def set_config(): + # print("server ----> ", request.data) + CONFIG.deserialize(request.data.decode("utf-8")) + try: + response = API.set_config(CONFIG) + warn = snappi.Warning() + warn.warnings = ["Successfully configured Cyperf"] + return Response(warn.serialize(), mimetype="application/json", status=200) + except Exception as err: + error = snappi.Error() + error.code = err._status_code + error.kind = "validation" + error.errors = [err._message] + print(err) + return Response( + status=400, + response=error.serialize(), + headers={"Content-Type": "application/json"}, + ) + return Response(err.status_code) + # return Response(response=err.message, mimetype="application/json", status=err.status_code) + # try: + # response = API.set_config(CONFIG) + # except Exception as error: + # print("error") + + # warn = snappi.Warning() + # warn.warnings = ["Successfully configured Cyperf"] + # return Response(warn.serialize(), mimetype="application/json", status=200) + + +@app.route("/config", methods=["GET"]) +def get_config(): + try: + config = API.get_config() + return Response(config.serialize(), mimetype="application/json", status=200) + + except Exception as error: + print("error") + + +@app.route("/control/state", methods=["POST"]) +def set_control_state(): + CS.deserialize(request.data.decode("utf-8")) + + try: + + config = API.set_control_state(CS) + return Response(config.serialize(), mimetype="application/json", status=200) + # return Response(config.serialize(), + # mimetype='application/json', + # status=200) + except Exception as err: + error = snappi.Error() + error.code = err._status_code + error.kind = "validation" + error.errors = [err._message] + print(err) + return Response( + status=400, + response=error.serialize(), + headers={"Content-Type": "application/json"}, + ) + + +@app.route("/monitor/metrics", methods=["POST"]) +def get_metrics(): + GM.deserialize(request.data.decode("utf-8")) + + try: + + config = API.metrics_request(CS) + return Response(config.serialize(), mimetype="application/json", status=200) + except Exception as error: + print(error) + + +# main driver function +if __name__ == "__main__": + app.run() diff --git a/snappi_cyperf/logger.py b/snappi_cyperf/logger.py new file mode 100644 index 0000000..a5c2cd7 --- /dev/null +++ b/snappi_cyperf/logger.py @@ -0,0 +1,30 @@ +import sys +import logging + +APP_LOGGER_NAME = "snappi_cyperf" + + +def setup_cyperf_logger(log_level, file_name=None, module_name=None): + logger = logging.getLogger(APP_LOGGER_NAME) + logger.setLevel(log_level) + formatter = logging.Formatter( + fmt="%(asctime)s [%(name)s] [%(levelname)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + sh = logging.StreamHandler(sys.stdout) + sh.setFormatter(formatter) + if len(logger.handlers) > 0: + del logger.handlers[:] + logger.addHandler(sh) + if file_name: + fh = logging.FileHandler(file_name) + fh.setFormatter(formatter) + logger.addHandler(fh) + if module_name is not None: + logger = get_cyperf_logger(module_name) + return logger + + +def get_cyperf_logger(module_name): + module_name = ".".join(str(module_name).split(".")[1:]) + return logging.getLogger(APP_LOGGER_NAME).getChild(module_name) diff --git a/snappi_cyperf/objectiveandtimeline.py b/snappi_cyperf/objectiveandtimeline.py new file mode 100644 index 0000000..6852131 --- /dev/null +++ b/snappi_cyperf/objectiveandtimeline.py @@ -0,0 +1,138 @@ +import ipaddress +import json +import re +import time +from snappi_cyperf.timer import Timer + + +class objectiveandtimeline(object): + """ + Args + ---- + - Cyperfapi (Api): instance of the Api class + + """ + + def __init__(self, cyperfapi): + self._api = cyperfapi + + def config(self, rest): + """T""" + self._config = self._api._l47config + with Timer(self._api, "Traffic Profile"): + self._create_objectives(rest) + + def _create_objectives(self, rest): + trafficprofile_config = self._config.trafficprofile + for tp in trafficprofile_config: + primary_objective = True + for ( + segment, + timeline, + objective_type, + objective_value, + objectives, + ) in zip( + tp.segment, + tp.timeline, + tp.objective_type, + tp.objective_value, + tp.objectives, + ): + objective_payload = self._get_objective_payload( + objective_type, objective_value, objectives, primary_objective + ) + + if primary_objective: + rest.set_primary_objective(objective_payload) + timeline_payload = self._get_timeline_payload( + segment, + objective_value, + objectives, + ) + id = 1 + for payload in timeline_payload: + rest.set_primary_timeline(payload, id) + id = id + 1 + primary_objective = False + else: + rest.set_secondary_objective(objective_payload) + rest.set_secondary_objective(objective_payload) + + def _get_objective_payload( + self, objective_type, objective_value, objectives, primary_objective + ): + payload = {} + payload["Type"] = objective_type + if objectives.simulated_user.max_pending_user != None: + payload["MaxPendingSimulatedUsers"] = ( + objectives.simulated_user.max_pending_user + ) + if objectives.simulated_user.max_user_per_second != None: + payload["MaxSimulatedUsersPerInterval"] = ( + objectives.simulated_user.max_user_per_second + ) + if not primary_objective: + payload["Enabled"] = True + if objective_value != None: + payload["ObjectiveValue"] = objective_value + if objectives.throughput.throughput_unit != None: + payload["ObjectiveUnit"] = objectives.throughput.throughput_unit + + return payload + + def _get_timeline_payload( + self, + segment, + objective_value, + objectives, + ): + step_up_payload = self._get_segment_payload( + segment.enable_ramp_up, + "1", + "StepUpSegment", + segment.ramp_up_time, + segment.ramp_up_value, + "", + ) + step_steady_payload = self._get_segment_payload( + True, + "2", + "SteadySegment", + segment.duration, + objective_value, + objectives.throughput.throughput_unit, + ) + step_down_payload = self._get_segment_payload( + segment.enable_ramp_down, + "3", + "StepDownSegment", + segment.ramp_down_time, + segment.ramp_down_value, + "", + ) + + payload = [step_up_payload, step_steady_payload, step_down_payload] + + return payload + + def _get_segment_payload( + self, + segment_enabled, + segment_id, + segment_type, + segment_duration, + segment_value, + segment_unit, + ): + payload = {} + payload["Enabled"] = segment_enabled + payload["id"] = segment_id + payload["SegmentType"] = segment_type + payload["Duration"] = segment_duration + if segment_value != None: + payload["ObjectiveValue"] = segment_value + if segment_unit != None: + payload["ObjectiveUnit"] = segment_unit + + return payload diff --git a/snappi_cyperf/ports.py b/snappi_cyperf/ports.py new file mode 100644 index 0000000..a27585f --- /dev/null +++ b/snappi_cyperf/ports.py @@ -0,0 +1,45 @@ +import ipaddress +import json +import re +import time +from snappi_cyperf.timer import Timer + + +class port(object): + """ + Args + ---- + - Cyperfapi (Api): instance of the Api class + + """ + + def __init__(self, cyperfapi): + self._api = cyperfapi + + def config(self, rest): + """T""" + self._config = self._api._l47config + with Timer(self._api, "Port Configuration"): + port_config = self._config.ports + for port in port_config: + if self._is_valid_ip(port.location): + self._assign_agents_by_ip( + rest, port.location, self._api._network_segments[port.name] + ) + else: + self._assign_agents_by_tag( + rest, port.location, self._api._network_segments[port.name] + ) + + def _is_valid_ip(self, ip): + try: + ipaddress.ip_address(ip) + return True + except ValueError: + return False + + def _assign_agents_by_ip(self, rest, location, network_segment): + rest.assign_agents_by_ip(location, network_segment) + + def _assign_agents_by_tag(self, rest, location, network_segment): + rest.assign_agents_by_tag(location, network_segment) diff --git a/snappi_cyperf/tcp.py b/snappi_cyperf/tcp.py new file mode 100644 index 0000000..6866164 --- /dev/null +++ b/snappi_cyperf/tcp.py @@ -0,0 +1,74 @@ +import json +import re +import time +from snappi_cyperf.timer import Timer +from snappi_cyperf.common import Common + + +class tcp_config(Common): + """Transforms OpenAPI objects into IxNetwork objects + - Lag to /lag + Args + ---- + - Cyperfapi (Api): instance of the Api class + + """ + + _TCP = { + "receive_buffer_size": "RxBuffer", + "transmit_buffer_size": "TxBuffer", + "retransmission_minimum_timeout": "MinRto", + "retransmission_maximum_timeout": "MaxRto", + "minimum_source_port": "MinSrcPort", + "maximum_source_port": "MaxSrcPort", + } + + def __init__(self, cyperfapi): + self._api = cyperfapi + + def config(self, rest): + """ """ + self._devices_config = self._api._l47config.devices + with Timer(self._api, "Tcp Configurations"): + self._update_tcp(rest) + + def _update_tcp(self, rest): + """Add any scenarios to the api server that do not already exist""" + app_id = rest.add_application("TCP App") + response = rest.get_application_actions(app_id) + action_id = response[-1]["id"] + client = True + for device in self._devices_config: + # + self._update_tcp_config(device, client, rest, app_id, action_id) + client = False + + def _update_tcp_config(self, device, client, rest, app_id, action_id): + """Add any scenarios to the api server that do not already exist""" + for tcp in device.tcps: + payload = self._api._set_payload(tcp, tcp_config._TCP) + payload["DeferAccept"] = True + payload["PingPong"] = True + payload["CloseWithReset"] = False + payload["EcnEnabled"] = False + payload["TimestampHdrEnabled"] = True + payload["RecycleTwEnabled"] = True + payload["ReuseTwEnabled"] = True + payload["SackEnabled"] = False + payload["WscaleEnabled"] = False + payload["PmtuDiscDisabled"] = False + payload["Reordering"] = False + if client: + rest.set_client_tcp_profile(payload) + # self._update_tcp_properties(tcp, rest, app_id, action_id) + else: + rest.set_server_tcp_profile(payload) + + # def _update_tcp_properties(self, tcp, rest, app_id, action_id): + # payload_data_content = {} + # payload_data_content["Source"] = tcp.data_content_source + # payload_data_content["Value"] = tcp.data_content_value + # rest.set_application_actions_values(payload_data_content, app_id, action_id, 0) + # payload_direction = {} + # payload_direction["Value"] = tcp.direction + # rest.set_application_actions_values(payload_direction, app_id, action_id, 1) diff --git a/snappi_cyperf/timer.py b/snappi_cyperf/timer.py new file mode 100644 index 0000000..10ea15c --- /dev/null +++ b/snappi_cyperf/timer.py @@ -0,0 +1,16 @@ +import time + + +class Timer: + def __init__(self, api, msg): + self._api = api + self._msg = msg + + def __enter__(self): + """Start a new timer as a context manager""" + self._start = time.time() + return self + + def __exit__(self, *exc_info): + """Stop the context manager timer""" + self._api.info(self._msg + " %.3fs" % (time.time() - self._start)) diff --git a/tests/Test_l47.py b/tests/Test_l47.py new file mode 100644 index 0000000..a045f7e --- /dev/null +++ b/tests/Test_l47.py @@ -0,0 +1,122 @@ +import sys + +""" +Configure a raw TCP flow with, +- tx port as source to rx port as destination +- frame count 10000, each of size 128 bytes +- transmit rate of 1000 packets per second +Validate, +- frames transmitted and received for configured flow is as expected +""" + +import snappi + +# host is Cyperf API Server +api = snappi.api(location="http://127.0.0.1:5000", verify=False) +config = api.config() +# port location is agent-ip +tx = config.ports.port(name="tx", location="10.39.44.147")[-1] +rx = config.ports.port(name="rx", location="10.39.44.190")[-1] + +(d1, d2) = config.devices.device(name="d1").device(name="d2") +(e1,) = d1.ethernets.ethernet(name="d1.e1") +e1.connection.port_name = "tx" +e1.mac = "01:02:03:04:05:06" +e1.step = "00:00:00:00:00:01" +e1.count = 1 +e1.mtu = 1488 + +(e2,) = d2.ethernets.ethernet(name="d2.e2") +e2.connection.port_name = "rx" +e2.mac = "01:02:03:04:06:06" +e2.step = "00:00:00:00:00:01" +e2.count = 2 +e2.mtu = 1488 + +(vlan1,) = e1.vlans.vlan(name="vlan1") +vlan1.id = 1 +vlan1.priority = 1 +vlan1.tpid = "x8100" +vlan1.count = 1 +vlan1.step = 1 +vlan1.per_count = 1 + +(vlan2,) = e2.vlans.vlan(name="vlan2") +vlan2.id = 1 +vlan2.priority = 1 +vlan2.tpid = "x8100" +vlan2.count = 1 +vlan2.step = 1 +vlan2.per_count = 1 + +(ip1,) = e1.ipv4_addresses.ipv4(name="e1.ipv4") +ip1.address = "10.0.0.10" +ip1.gateway = "10.0.0.1" +ip1.step = "0.0.0.1" +ip1.count = 1 +ip1.prefix = 16 + +(ip2,) = e2.ipv4_addresses.ipv4(name="e2.ipv4") +ip2.address = "10.0.0.20" +ip2.gateway = "10.0.0.1" +ip2.step = "0.0.0.1" +ip2.count = 1 +ip2.prefix = 16 + +# TCP/UDP configs + +(t1,) = d1.tcps.tcp(name="Tcp1") +t1.ip_interface_name = ip1.name +t1.receive_buffer_size = 1111 +t1.transmit_buffer_size = 1112 +t1.retransmission_minimum_timeout = 100 +t1.retransmission_maximum_timeout = 1001 +t1.minimum_source_port = 100 +t1.maximum_source_port = 101 + +(t2,) = d2.tcps.tcp(name="Tcp2") +t2.ip_interface_name = ip2.name +t2.receive_buffer_size = 2222 +t2.transmit_buffer_size = 2221 +t2.retransmission_minimum_timeout = 200 +t2.retransmission_maximum_timeout = 2002 +t2.minimum_source_port = 200 +t2.maximum_source_port = 202 + +(http_1,) = d1.https.http(name="HTTP1") +http_1.profile = "Chrome" +http_1.version = "HTTP11" +http_1.connection_persistence = "ConnectionPersistenceStandard" +(http_client,) = http_1.clients.client() +http_client.cookie_reject_probability = False +http_client.max_persistent_requests = 1 + +(http_2,) = d2.https.http(name="HTTP2") +http_2.profile = "Apache" +http_2.version = "HTTP11" +http_2.connection_persistence = "ConnectionPersistenceEnabled" +(http_server,) = http_2.servers.server() + +(tp1,) = config.trafficprofile.trafficprofile() +(segment1, segment2) = tp1.segment.segment().segment() +segment1.name = "Linear segment1" +segment1.duration = 40 +segment2.name = "Linear segment2" +segment2.duration = 70 +tp1.timeline = [segment1.name, segment2.name] +tp1.objective_type = ["Connections per second", "Simulated users"] +tp1.objective_value = [100, 200] +(obj1, obj2) = tp1.objectives.objective().objective() + +print("In test before set_config") +response = api.set_config(config) +print("In test after set_config") +print(response) +api.close() + +cs = api.control_state() +cs.app.state = "start" # cs.app.state.START +response1 = api.set_control_state(cs) +print(response1) +cs.app.state = "stop" # cs.app.state.START +api.set_control_state(cs)