diff --git a/olivertwist/config/model.py b/olivertwist/config/model.py index bf1c8a5..25bfeec 100644 --- a/olivertwist/config/model.py +++ b/olivertwist/config/model.py @@ -6,7 +6,7 @@ """ -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import Enum from typing import Optional, List @@ -23,6 +23,7 @@ class RuleConfig(JsonSchemaMixin): id: str enabled: bool severity: Optional[Severity] = Severity.ERROR + ignore: Optional[List[str]] = field(default_factory=list) @dataclass diff --git a/olivertwist/main.py b/olivertwist/main.py index 50100b5..740b2d6 100644 --- a/olivertwist/main.py +++ b/olivertwist/main.py @@ -18,7 +18,7 @@ from olivertwist.config.io import ConfigIO from olivertwist.manifest import Manifest from olivertwist.metricengine.engine import MetricEngine -from olivertwist.reporter.adapter import to_html_report +from olivertwist.reporter.adapter import to_report from olivertwist.reporter.model import MyEncoder from olivertwist.reporter.reporter import output_json, render_html_report from olivertwist.reporter.terminal import report_to_terminal @@ -66,7 +66,7 @@ def check(input, config, html=True, browser=False): results = rule_engine.run(manifest) report_to_terminal(results) metric_results = MetricEngine().run(manifest) - report = to_html_report(results, metric_results) + report = to_report(results, metric_results) oliver_twist = json.loads(MyEncoder().encode(report)) output_json(oliver_twist) if html or browser: diff --git a/olivertwist/reporter/adapter.py b/olivertwist/reporter/adapter.py index 7375691..e1762ac 100644 --- a/olivertwist/reporter/adapter.py +++ b/olivertwist/reporter/adapter.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- +"""Adapter to transform list of results into a Report.""" +from collections import defaultdict from typing import Dict, List from olivertwist.manifest import Node @@ -14,22 +17,11 @@ from olivertwist.ruleengine.rule import Rule -def to_html_report( +def to_report( domain_results: List[Result], metric_results: List[MetricResult] ) -> Report: models: List[ReportModel] = __organise_by_model(domain_results, metric_results) - summary: ReportSummary = __derive_summary(models) - return Report(summary, models) - - -def __derive_summary(models: List[ReportModel]) -> ReportSummary: - # TODO: summary.skipped? - skipped = 0 - errored = len([a_model for a_model in models if a_model.summary.errored > 0]) - warned = len([a_model for a_model in models if a_model.summary.warned > 0]) - passed = len(models) - errored - warned - - return ReportSummary(passed, skipped, errored, warned) + return Report.from_models(models) def __organise_by_model( @@ -78,7 +70,7 @@ def __html_rules_by_model_name( errored_rules_by_model: Dict[str, List[Rule]], warned_rules_by_model: Dict[str, List[Rule]], ) -> Dict[str, List[ReportRule]]: - result: Dict[str, List[ReportRule]] = {} + result: Dict[str, List[ReportRule]] = defaultdict(list) for model_name, domain_rules in passed_rules_by_model.items(): html_rules: List[ReportRule] = [ ReportRule(domain_rule.id, domain_rule.name, ReportStatus.PASSED) @@ -91,41 +83,43 @@ def __html_rules_by_model_name( ReportRule(domain_rule.id, domain_rule.name, ReportStatus.ERRORED) for domain_rule in domain_rules ] - result.setdefault(model_name, []).extend(html_rules) + result[model_name].extend(html_rules) for model_name, domain_rules in warned_rules_by_model.items(): html_rules: List[ReportRule] = [ ReportRule(domain_rule.id, domain_rule.name, ReportStatus.WARNED) for domain_rule in domain_rules ] - result.setdefault(model_name, []).extend(html_rules) + result[model_name].extend(html_rules) return result def __passed_rules_by_model_name(domain: List[Result]) -> Dict[str, List[Rule]]: - passed_rules: Dict[str, List[Rule]] = {} + passed_rules: Dict[str, List[Rule]] = defaultdict(list) for a_domain in domain: for node in a_domain.passes: - passed_rules.setdefault(node.id, []).append(a_domain.rule) + passed_rules[node.id].append(a_domain.rule) return passed_rules def __errored_rules_by_model_name(domain: List[Result]) -> Dict[str, List[Rule]]: - failed_rules: Dict[str, List[Rule]] = {} + failed_rules: Dict[str, List[Rule]] = defaultdict(list) for a_domain in domain: if a_domain.has_errors: for node in a_domain.failures: - failed_rules.setdefault(node.id, []).append(a_domain.rule) + failed_rules[node.id].append(a_domain.rule) + return failed_rules def __warned_rules_by_model(domain: List[Result]) -> Dict[str, List[Rule]]: - failed_rules: Dict[str, List[Rule]] = {} + failed_rules: Dict[str, List[Rule]] = defaultdict(list) for a_domain in domain: if a_domain.has_warnings: for node in a_domain.failures: - failed_rules.setdefault(node.id, []).append(a_domain.rule) + failed_rules[node.id].append(a_domain.rule) + return failed_rules @@ -135,6 +129,7 @@ def __domain_node_by_model_name(domain: List[Result]) -> Dict[str, Node]: all_nodes: List[Node] = a_domain.passes + a_domain.failures for duped_node in all_nodes: deduped_nodes[duped_node.id] = duped_node + return deduped_nodes diff --git a/olivertwist/reporter/model.py b/olivertwist/reporter/model.py index d23a69a..25b401d 100644 --- a/olivertwist/reporter/model.py +++ b/olivertwist/reporter/model.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- +"""Domain objects for reporting.""" + from dataclasses import dataclass from enum import Enum from json import JSONEncoder @@ -35,7 +38,7 @@ class ReportMetrics: def __init__( self, name: str, - score: int, + score: float, ) -> None: self.name = name self.score = score @@ -56,6 +59,16 @@ def __init__( self.errored = errored self.warned = warned + @classmethod + def from_models(cls, models: List["ReportModel"]): + # TODO: Implement summary.skipped + skipped = 0 + errored = len([a_model for a_model in models if a_model.summary.errored > 0]) + warned = len([a_model for a_model in models if a_model.summary.warned > 0]) + passed = len(models) - errored - warned + + return cls(passed, skipped, errored, warned) + @dataclass class ReportModel: @@ -81,3 +94,7 @@ class Report: def __init__(self, summary: ReportSummary, models: List[ReportModel]) -> None: self.summary = summary self.models = models + + @classmethod + def from_models(cls, models: List[ReportModel]): + return cls(ReportSummary.from_models(models), models) diff --git a/tests/config/examples/config_with_ignore.yml b/tests/config/examples/config_with_ignore.yml new file mode 100644 index 0000000..79ffac4 --- /dev/null +++ b/tests/config/examples/config_with_ignore.yml @@ -0,0 +1,6 @@ +version: 1 +universal: + - id: no-rejoin-models + enabled: true + ignore: + - model.oliver_twist.example_1 diff --git a/tests/config/test_io.py b/tests/config/test_io.py index 6970de4..8968696 100644 --- a/tests/config/test_io.py +++ b/tests/config/test_io.py @@ -24,6 +24,7 @@ PATH_TO_INVALID_CONFIG = PATH_TO_CONFIGS / "invalid_config.yml" PATH_TO_DUPLICATE_CONFIG = PATH_TO_CONFIGS / "duplicate_config.yml" PATH_TO_NO_VERSION_CONFIG = PATH_TO_CONFIGS / "no_version_config.yml" +PATH_TO_VALID_CONFIG_WITH_IGNORE = PATH_TO_CONFIGS / "config_with_ignore.yml" @pytest.fixture @@ -69,6 +70,13 @@ def test_parsing_missing_config_file(): ConfigIO.read(path_to_non_existent_config) +def test_parsing_config_with_ignore(): + config = ConfigIO.read(PATH_TO_VALID_CONFIG_WITH_IGNORE) + rule_config = config.get_config_for_rule_id("no-rejoin-models") + + assert "model.oliver_twist.example_1" in rule_config.ignore + + def test_serializing_config(config: Config): work_dir = mkdtemp() path = Path(work_dir) / "olivertwist.yml" diff --git a/tests/reporter/test_adapter.py b/tests/reporter/test_adapter.py index ec7c1d2..eb4b03c 100644 --- a/tests/reporter/test_adapter.py +++ b/tests/reporter/test_adapter.py @@ -1,8 +1,10 @@ from typing import List +import pytest + from olivertwist.manifest import Node from olivertwist.metricengine.result import MetricResult -from olivertwist.reporter.adapter import to_html_report +from olivertwist.reporter.adapter import to_report from olivertwist.reporter.model import ( ReportSummary, ReportModel, @@ -15,50 +17,47 @@ from olivertwist.ruleengine.result import Result from olivertwist.ruleengine.rule import Rule -expected_html_report: Report = Report( - ReportSummary(1, 0, 1, 1), - [ - ReportModel( - "model1", - "", - "model1", - [ - ReportMetrics("degree_centrality", 0), - ReportMetrics("in_degree_centrality", 0), - ReportMetrics("out_degree_centrality", 0), - ReportMetrics("closeness_centrality", 0), - ReportMetrics("betweenness_centrality", 0), - ReportMetrics("pagerank", 0), - ], - [ReportRule("a-rule", "a rule name", ReportStatus.PASSED)], - ReportSummary(1, 0, 0, 0), - ), - ReportModel( - "model2", - "", - "model2", - [ - ReportMetrics("degree_centrality", 0), - ReportMetrics("in_degree_centrality", 0), - ReportMetrics("out_degree_centrality", 0), - ReportMetrics("closeness_centrality", 0), - ReportMetrics("betweenness_centrality", 0), - ReportMetrics("pagerank", 0), - ], - [ - ReportRule( - "a-failed-rule", - "a rule name", - ReportStatus.ERRORED, - ) - ], - ReportSummary(0, 0, 1, 1), - ), - ], -) + +@pytest.fixture() +def expected_report() -> Report: + metrics = [ + ReportMetrics("degree_centrality", 0), + ReportMetrics("in_degree_centrality", 0), + ReportMetrics("out_degree_centrality", 0), + ReportMetrics("closeness_centrality", 0), + ReportMetrics("betweenness_centrality", 0), + ReportMetrics("pagerank", 0), + ] + return Report( + ReportSummary(1, 0, 1, 1), + [ + ReportModel( + model_key="model1", + file_path="", + model_name="Model 1", + metrics=metrics, + rules=[ReportRule("a-rule", "a rule name", ReportStatus.PASSED)], + summary=ReportSummary(1, 0, 0, 0), + ), + ReportModel( + model_key="model2", + file_path="", + model_name="Model 2", + metrics=metrics, + rules=[ + ReportRule( + "a-failed-rule", + "a rule name", + ReportStatus.ERRORED, + ) + ], + summary=ReportSummary(0, 0, 1, 1), + ), + ], + ) -def test_should_convert_to_html_output(): +def test_should_convert_to_html_output(expected_report: Report): rule: Rule = Rule("a-failed-rule", "a rule name", None) model1 = Node({"unique_id": "model1"}) model2 = Node({"unique_id": "model2"}) @@ -70,14 +69,14 @@ def test_should_convert_to_html_output(): MetricResult(model2, 0, 0, 0, 0, 0, 0), ] - actual: Report = to_html_report(domain_results, metric_results) + actual: Report = to_report(domain_results, metric_results) - assert actual == expected_html_report + assert actual == expected_report -def test_json_serialisation(): - actual = MyEncoder().encode(expected_html_report) +def test_json_serialisation(expected_report: Report): + actual = MyEncoder().encode(expected_report) - expected_serialisation = """{"summary": {"passed": 1, "skipped": 0, "errored": 1, "warned": 1}, "models": [{"model_key": "model1", "file_path": "", "model_name": "model1", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-rule", "name": "a rule name", "status": "passed"}], "summary": {"passed": 1, "skipped": 0, "errored": 0, "warned": 0}}, {"model_key": "model2", "file_path": "", "model_name": "model2", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-failed-rule", "name": "a rule name", "status": "errored"}], "summary": {"passed": 0, "skipped": 0, "errored": 1, "warned": 1}}]}""" + expected_serialisation = """{"summary": {"passed": 1, "skipped": 0, "errored": 1, "warned": 1}, "models": [{"model_key": "model1", "file_path": "", "model_name": "Model 1", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-rule", "name": "a rule name", "status": "passed"}], "summary": {"passed": 1, "skipped": 0, "errored": 0, "warned": 0}}, {"model_key": "model2", "file_path": "", "model_name": "Model 2", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-failed-rule", "name": "a rule name", "status": "errored"}], "summary": {"passed": 0, "skipped": 0, "errored": 1, "warned": 1}}]}""" assert actual == expected_serialisation