Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tests #403

Merged
merged 12 commits into from
Apr 24, 2019
Merged

Tests #403

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
language: python

python:
- "2.7"
- "3.6"

sudo: false

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
'classifiers': ['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.7'],
'Programming Language :: Python :: 3'],
'zip_safe': False
}

Expand Down
1 change: 0 additions & 1 deletion templates/brocade_netiron_show_monitor_config.template

This file was deleted.

3 changes: 1 addition & 2 deletions templates/index
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,7 @@ brocade_netiron_show_running-config_interface.template, .*, brocade_netiron, sh[
brocade_netiron_show_lldp_neighbors_detail.template, .*, brocade_netiron, sh[[ow]] ll[[dp]] n[[eighbors]] d[[etail]]
brocade_netiron_show_running-config_vlan.template, .*, brocade_netiron, sh[[ow]] ru[[nning-config]] v[[lan]]
brocade_netiron_show_interfaces_brief.template, .*, brocade_netiron, sh[[ow]] in[[terfaces]] b[[rief]]
brocade_netiron_show_monitor_actual.template, .*, brocade_netiron, sh[[ow]] mon[[itor]] actual
brocade_netiron_show_monitor_config.template, .*, brocade_netiron, sh[[ow]] mon[[itor]] config
brocade_netiron_show_monitor_actual.template, .*, brocade_netiron, sh[[ow]] mon[[itor]] (?:ac|co)
brocade_netiron_show_interfaces.template, .*, brocade_netiron, sh[[ow]] in[[terfaces]]
brocade_netiron_show_lag_brief.template, .*, brocade_netiron, sh[[ow]] lag b[[rief]]
brocade_netiron_show_metro.template, .*, brocade_netiron, sh[[ow]] met[[ro-ring]]
Expand Down
16 changes: 16 additions & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,17 @@
"""tests."""

import os
import csv

from ntc_templates.parse import _get_template_dir


def load_index_data():
"""Load data from index file."""
index_data = []
with open('%s%sindex' % (_get_template_dir(), os.sep)) as indexfs:
data = csv.reader(indexfs)
for row in data:
if len(row) > 2 and row[0] != 'Template':
index_data.append(row)
return index_data

This file was deleted.

This file was deleted.

19 changes: 0 additions & 19 deletions tests/ntc_template_test_helper.py

This file was deleted.

54 changes: 23 additions & 31 deletions tests/test_index_order.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,10 @@
import glob
import csv

from tests import load_index_data


def load_indexdata():
"""Load data from index file."""
index_data = []
with open('./templates/index') as indexfs:
data = csv.reader(indexfs)
for row in data:
if len(row) > 2 and row[0] != 'Template':
index_data.append(row)
return index_data

def check_order(current_os, prior_os,cmd_len, prior_len, os_choices, used_os, cmd, prior_cmd):
def check_order(current_os, prior_os, cmd_len, prior_len, os_choices, used_os, cmd, prior_cmd):
add_os_check = []

if current_os not in used_os and used_os is not None:
Expand All @@ -27,36 +18,41 @@ def check_order(current_os, prior_os,cmd_len, prior_len, os_choices, used_os, cm

if used_os != sorted(used_os):
msg = "OS's are not in alpabetical order, current order: '{}'".format(used_os)
return False , msg
return False, msg
elif add_os_check != sorted(add_os_check):
msg = "OS's are not in alpabetical order, current order: '{}'".format(add_os_check)
return False , msg
return False, msg

if current_os not in os_choices:
msg = "'{}' is not one of the valid options '{}'".format(current_os, used_os)
return False, msg

if not prior_os and prior_len == 0:
# Starting Point
return True , ''
return True, ''
elif current_os == prior_os and cmd_len == prior_len and cmd == min(prior_cmd, cmd):
msg = "OS is the same and command same length, please set {} before {} to be in alphabetical order".format(cmd, prior_cmd)
msg = (
"OS is the same and command same length, "
"please set {} before {} to be in alphabetical order".format(cmd, prior_cmd)
)
return False, msg
elif current_os == prior_os and cmd_len <= prior_len:
# OS is the same as previous, and cmd_len is smaller or equal to prior so good
return True , ''
return True, ''
elif current_os != prior_os and current_os not in used_os:
# prior OS has changed, do not need to check for length yet
return True , ''
return True, ''
elif current_os == prior_os and cmd_len > prior_len:
msg = "Current Command len '{}' larger then previous '{}', for command '{}'".format(cmd_len, prior_len, cmd )
return False , msg
msg = "Current Command len '{}' larger then previous '{}', for command '{}'".format(
cmd_len, prior_len, cmd
)
return False, msg
elif current_os != prior_os and current_os in used_os:
msg = "'{}' OS was already used in list on command '{}'".format(current_os, cmd)
return False , msg
return False, msg
else:
msg = "Failed for unknown reason"
return False , msg
return False, msg

def test_index_ordering():

Expand All @@ -67,34 +63,30 @@ def test_index_ordering():
'cisco_xe', 'cisco_xr', 'dell_force10', 'enterasys', 'extreme', 'f5_ltm', 'fortinet',
'hp_comware', 'hp_procurve', 'huawei', 'juniper', 'juniper_junos', 'juniper_screenos',
'alcatel_sros', 'linux', 'ovs_linux', 'paloalto_panos', 'quanta_mesh',
'ubiquiti_edgeswitch', 'vmware_nsxv', 'vyatta_vyos', 'vyos'
'ubiquiti_edgeswitch', 'vmware_nsxv', 'vyatta_vyos', 'vyos',
]

prior_os = ""
prior_len = 0
prior_cmd = ""
used_os = []

index = load_indexdata()
index = load_index_data()
for row in index:
template = row[0].strip()
os = '_'.join(template.split('_')[:2])
cmd = '_'.join(template.split('_')[2:])
cmd_len = len(cmd)
check_val, check_msg = check_order(os, prior_os, cmd_len, prior_len, os_choices, used_os, cmd, prior_cmd)
check_val, check_msg = check_order(
os, prior_os, cmd_len, prior_len, os_choices, used_os, cmd, prior_cmd
)
if not check_val:
#assertFalse(check_val, msg=check_msg)
print("Error on line: {}".format(row))
print("Error Message: {}".format(check_msg))
assert check_val != False
assert check_val
if os not in used_os:
used_os.append(os)
prior_len = cmd_len
prior_cmd = cmd
prior_os = os

def main():
test_index_ordering()

if __name__ == "__main__":
main()
59 changes: 24 additions & 35 deletions tests/test_structured_data_against_parsed_reference_files.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,25 @@
#!/usr/bin/env python

"""Run tests against all the *.raw files."""
import glob

import pytest
import yaml
from ntc_template_test_helper import return_test_files

from ntc_templates.parse import parse_output


# Populate test_collection with a list of all the .raw template files we want
# to run tests against
test_collection = return_test_files()
def return_test_files():
"""Return a list of all the *.raw files to run tests against."""
platform_dirs = glob.glob('tests/*')
platforms = (glob.glob('%s/*' % platform) for platform in platform_dirs)
template_dirs = (item for sublist in platforms for item in sublist)
test_commands = (glob.glob('%s/*.raw' % template_dir) for template_dir in template_dirs)

return (item for sublist in test_commands for item in sublist)

@pytest.fixture(scope='function', params=test_collection)

@pytest.fixture(scope='function', params=return_test_files())
def load_template_test(request):
"""Return each *.raw file to run tests on."""
return request.param
Expand All @@ -28,42 +35,44 @@ def raw_template_test(raw_file):
rawoutput = data.read()
structured = parse_output(platform=platform, command=command, data=rawoutput)
with open(parsed_file, 'r') as data:
parsed_data = yaml.load(data.read())
parsed_data = yaml.safe_load(data.read())

return structured, parsed_data['parsed_sample']


def test_correct_number_of_entries(load_template_test):
def test_raw_data_against_mock(load_template_test):
processed, reference = raw_template_test(load_template_test)

correct_number_of_entries_test(processed, reference)
all_entries_have_the_same_keys_test(processed, reference)
correct_data_in_entries_test(processed, reference)


def correct_number_of_entries_test(processed, reference):
"""Test that the number of entries returned are the same as the control.

This will create a test for each of the files in the test_collection
variable.
"""
processed, reference = raw_template_test(load_template_test)

assert len(processed) == len(reference)


def test_that_all_entries_have_the_same_keys(load_template_test):
def all_entries_have_the_same_keys_test(processed, reference):
"""Test that the keys of the returned data are the same as the control.

This will create a test for each of the files in the test_collection
variable.
"""
processed, reference = raw_template_test(load_template_test)

for i in range(len(processed)):
assert sorted(processed[i].keys()) == sorted(reference[i].keys())


def test_correct_data_in_entries(load_template_test):
def correct_data_in_entries_test(processed, reference):
"""Test that the actual data in each entry is the same as the control.

This will create a test for each of the files in the test_collection
variable.
"""
processed, reference = raw_template_test(load_template_test)

# Can be uncommented if we don't care that the parsed data isn't
# in the same order as the raw data
# reference = sorted(reference)
Expand All @@ -72,23 +81,3 @@ def test_correct_data_in_entries(load_template_test):
for i in range(len(reference)):
for key in reference[i].keys():
assert processed[i][key] == reference[i][key]


def test_that_all_entries_dicts_match(load_template_test):
"""Test that the values of the dicts returned are the same as the control.

This test swaps place with the processed and reference variable so it's not run
in the same order as test_correct_data_in_entries to catch dicts with extra keys

This will create a test for each of the files in the test_collection
variable.
"""
processed, reference = raw_template_test(load_template_test)

# Can be uncommented if we don't care that the parsed data isn't
# in the same order as the raw data
# reference = sorted(reference)
# processed = sorted(processed)

for i in range(len(processed)):
assert processed[i] == reference[i]
55 changes: 25 additions & 30 deletions tests/test_testcases_exists.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,39 @@
"""Ensure that testcases exist for all templates."""
import csv
import glob
import os
import glob
import re

from ntc_templates.parse import _get_template_dir
from tests import load_index_data

KNOWN_MISSING_TESTS = [

KNOWN_MISSING_TESTS = {
'cisco_ios_show_vlan',
'cisco_nxos_show_interface_brief',
'cisco_nxos_show_ip_ospf_neighbor_vrf',
'cisco_xr_show_controllers',
'.*vyos.*_os_show_interfaces',
'.*vyos.*_os_show_arp',
]


def load_indexdata():
"""Load data from index file."""
index_data = []
with open('%s%sindex' % (_get_template_dir(), os.sep)) as indexfs:
data = csv.reader(indexfs)
for row in data:
if len(row) > 2 and row[0] != 'Template':
index_data.append(row)
return index_data
}


def test_verify_parsed_and_reference_data_exists():
"""Verify that at least one test exists for all entries in the index file."""
index = sorted(load_indexdata())
coverage = {}
"""Verify that at least one test exists for all entries in the index file.

TODO:
Add test cases for ``KNOWN_MISSING_TESTS`` and remove related conditional.
Remove "_ssh" from ``cisco_wlc_ssh`` and rely on vendor_platform_command syntax
instead of using regex on the directories.
"""
index = sorted(load_index_data())
for row in index:
template = row[0].strip()
template_short = template.split('.')[0]
platform = row[2].strip()
cut = len(platform) + 1
command = template[cut:].split('.')[0]
cases = 'tests/%s/%s/*.raw' % (platform, command)
test_list = glob.glob(cases)
coverage['%s_%s' % (platform, command)] = len(test_list)

for test in coverage:
if coverage[test] == 0 and test not in KNOWN_MISSING_TESTS:
assert test == 'No test cases found'
for directory in os.listdir("tests"):
if re.match(platform, directory):
platform_directory = directory
break
cut = len(platform_directory) + 1
command = template_short[cut:]
if template_short not in KNOWN_MISSING_TESTS:
cases = 'tests/%s/%s/*.raw' % (platform_directory, command)
test_list = glob.glob(cases)
assert len(test_list) != 0, 'Could not find tests for %s' % template
Loading