Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ignore section to config for each rule #32

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion olivertwist/config/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

"""

from dataclasses import dataclass
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional, List

Expand All @@ -23,6 +23,7 @@ class RuleConfig(JsonSchemaMixin):
id: str
enabled: bool
severity: Optional[Severity] = Severity.ERROR
ignore: Optional[List[str]] = field(default_factory=list)


@dataclass
Expand Down
4 changes: 2 additions & 2 deletions olivertwist/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from olivertwist.config.io import ConfigIO
from olivertwist.manifest import Manifest
from olivertwist.metricengine.engine import MetricEngine
from olivertwist.reporter.adapter import to_html_report
from olivertwist.reporter.adapter import to_report
from olivertwist.reporter.model import MyEncoder
from olivertwist.reporter.reporter import output_json, render_html_report
from olivertwist.reporter.terminal import report_to_terminal
Expand Down Expand Up @@ -66,7 +66,7 @@ def check(input, config, html=True, browser=False):
results = rule_engine.run(manifest)
report_to_terminal(results)
metric_results = MetricEngine().run(manifest)
report = to_html_report(results, metric_results)
report = to_report(results, metric_results)
oliver_twist = json.loads(MyEncoder().encode(report))
output_json(oliver_twist)
if html or browser:
Expand Down
39 changes: 17 additions & 22 deletions olivertwist/reporter/adapter.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
"""Adapter to transform list of results into a Report."""
from collections import defaultdict
from typing import Dict, List

from olivertwist.manifest import Node
Expand All @@ -14,22 +17,11 @@
from olivertwist.ruleengine.rule import Rule


def to_html_report(
def to_report(
domain_results: List[Result], metric_results: List[MetricResult]
) -> Report:
models: List[ReportModel] = __organise_by_model(domain_results, metric_results)
summary: ReportSummary = __derive_summary(models)
return Report(summary, models)


def __derive_summary(models: List[ReportModel]) -> ReportSummary:
# TODO: summary.skipped?
skipped = 0
errored = len([a_model for a_model in models if a_model.summary.errored > 0])
warned = len([a_model for a_model in models if a_model.summary.warned > 0])
passed = len(models) - errored - warned

return ReportSummary(passed, skipped, errored, warned)
return Report.from_models(models)


def __organise_by_model(
Expand Down Expand Up @@ -78,7 +70,7 @@ def __html_rules_by_model_name(
errored_rules_by_model: Dict[str, List[Rule]],
warned_rules_by_model: Dict[str, List[Rule]],
) -> Dict[str, List[ReportRule]]:
result: Dict[str, List[ReportRule]] = {}
result: Dict[str, List[ReportRule]] = defaultdict(list)
for model_name, domain_rules in passed_rules_by_model.items():
html_rules: List[ReportRule] = [
ReportRule(domain_rule.id, domain_rule.name, ReportStatus.PASSED)
Expand All @@ -91,41 +83,43 @@ def __html_rules_by_model_name(
ReportRule(domain_rule.id, domain_rule.name, ReportStatus.ERRORED)
for domain_rule in domain_rules
]
result.setdefault(model_name, []).extend(html_rules)
result[model_name].extend(html_rules)

for model_name, domain_rules in warned_rules_by_model.items():
html_rules: List[ReportRule] = [
ReportRule(domain_rule.id, domain_rule.name, ReportStatus.WARNED)
for domain_rule in domain_rules
]
result.setdefault(model_name, []).extend(html_rules)
result[model_name].extend(html_rules)

return result


def __passed_rules_by_model_name(domain: List[Result]) -> Dict[str, List[Rule]]:
passed_rules: Dict[str, List[Rule]] = {}
passed_rules: Dict[str, List[Rule]] = defaultdict(list)
for a_domain in domain:
for node in a_domain.passes:
passed_rules.setdefault(node.id, []).append(a_domain.rule)
passed_rules[node.id].append(a_domain.rule)
return passed_rules


def __errored_rules_by_model_name(domain: List[Result]) -> Dict[str, List[Rule]]:
failed_rules: Dict[str, List[Rule]] = {}
failed_rules: Dict[str, List[Rule]] = defaultdict(list)
for a_domain in domain:
if a_domain.has_errors:
for node in a_domain.failures:
failed_rules.setdefault(node.id, []).append(a_domain.rule)
failed_rules[node.id].append(a_domain.rule)

return failed_rules


def __warned_rules_by_model(domain: List[Result]) -> Dict[str, List[Rule]]:
failed_rules: Dict[str, List[Rule]] = {}
failed_rules: Dict[str, List[Rule]] = defaultdict(list)
for a_domain in domain:
if a_domain.has_warnings:
for node in a_domain.failures:
failed_rules.setdefault(node.id, []).append(a_domain.rule)
failed_rules[node.id].append(a_domain.rule)

return failed_rules


Expand All @@ -135,6 +129,7 @@ def __domain_node_by_model_name(domain: List[Result]) -> Dict[str, Node]:
all_nodes: List[Node] = a_domain.passes + a_domain.failures
for duped_node in all_nodes:
deduped_nodes[duped_node.id] = duped_node

return deduped_nodes


Expand Down
19 changes: 18 additions & 1 deletion olivertwist/reporter/model.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
"""Domain objects for reporting."""

from dataclasses import dataclass
from enum import Enum
from json import JSONEncoder
Expand Down Expand Up @@ -35,7 +38,7 @@ class ReportMetrics:
def __init__(
self,
name: str,
score: int,
score: float,
) -> None:
self.name = name
self.score = score
Expand All @@ -56,6 +59,16 @@ def __init__(
self.errored = errored
self.warned = warned

@classmethod
def from_models(cls, models: List["ReportModel"]):
# TODO: Implement summary.skipped
skipped = 0
errored = len([a_model for a_model in models if a_model.summary.errored > 0])
warned = len([a_model for a_model in models if a_model.summary.warned > 0])
passed = len(models) - errored - warned

return cls(passed, skipped, errored, warned)


@dataclass
class ReportModel:
Expand All @@ -81,3 +94,7 @@ class Report:
def __init__(self, summary: ReportSummary, models: List[ReportModel]) -> None:
self.summary = summary
self.models = models

@classmethod
def from_models(cls, models: List[ReportModel]):
return cls(ReportSummary.from_models(models), models)
6 changes: 6 additions & 0 deletions tests/config/examples/config_with_ignore.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
version: 1
universal:
- id: no-rejoin-models
enabled: true
ignore:
- model.oliver_twist.example_1
8 changes: 8 additions & 0 deletions tests/config/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
PATH_TO_INVALID_CONFIG = PATH_TO_CONFIGS / "invalid_config.yml"
PATH_TO_DUPLICATE_CONFIG = PATH_TO_CONFIGS / "duplicate_config.yml"
PATH_TO_NO_VERSION_CONFIG = PATH_TO_CONFIGS / "no_version_config.yml"
PATH_TO_VALID_CONFIG_WITH_IGNORE = PATH_TO_CONFIGS / "config_with_ignore.yml"


@pytest.fixture
Expand Down Expand Up @@ -69,6 +70,13 @@ def test_parsing_missing_config_file():
ConfigIO.read(path_to_non_existent_config)


def test_parsing_config_with_ignore():
config = ConfigIO.read(PATH_TO_VALID_CONFIG_WITH_IGNORE)
rule_config = config.get_config_for_rule_id("no-rejoin-models")

assert "model.oliver_twist.example_1" in rule_config.ignore


def test_serializing_config(config: Config):
work_dir = mkdtemp()
path = Path(work_dir) / "olivertwist.yml"
Expand Down
95 changes: 47 additions & 48 deletions tests/reporter/test_adapter.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from typing import List

import pytest

from olivertwist.manifest import Node
from olivertwist.metricengine.result import MetricResult
from olivertwist.reporter.adapter import to_html_report
from olivertwist.reporter.adapter import to_report
from olivertwist.reporter.model import (
ReportSummary,
ReportModel,
Expand All @@ -15,50 +17,47 @@
from olivertwist.ruleengine.result import Result
from olivertwist.ruleengine.rule import Rule

expected_html_report: Report = Report(
ReportSummary(1, 0, 1, 1),
[
ReportModel(
"model1",
"",
"model1",
[
ReportMetrics("degree_centrality", 0),
ReportMetrics("in_degree_centrality", 0),
ReportMetrics("out_degree_centrality", 0),
ReportMetrics("closeness_centrality", 0),
ReportMetrics("betweenness_centrality", 0),
ReportMetrics("pagerank", 0),
],
[ReportRule("a-rule", "a rule name", ReportStatus.PASSED)],
ReportSummary(1, 0, 0, 0),
),
ReportModel(
"model2",
"",
"model2",
[
ReportMetrics("degree_centrality", 0),
ReportMetrics("in_degree_centrality", 0),
ReportMetrics("out_degree_centrality", 0),
ReportMetrics("closeness_centrality", 0),
ReportMetrics("betweenness_centrality", 0),
ReportMetrics("pagerank", 0),
],
[
ReportRule(
"a-failed-rule",
"a rule name",
ReportStatus.ERRORED,
)
],
ReportSummary(0, 0, 1, 1),
),
],
)

@pytest.fixture()
def expected_report() -> Report:
metrics = [
ReportMetrics("degree_centrality", 0),
ReportMetrics("in_degree_centrality", 0),
ReportMetrics("out_degree_centrality", 0),
ReportMetrics("closeness_centrality", 0),
ReportMetrics("betweenness_centrality", 0),
ReportMetrics("pagerank", 0),
]
return Report(
ReportSummary(1, 0, 1, 1),
[
ReportModel(
model_key="model1",
file_path="",
model_name="Model 1",
metrics=metrics,
rules=[ReportRule("a-rule", "a rule name", ReportStatus.PASSED)],
summary=ReportSummary(1, 0, 0, 0),
),
ReportModel(
model_key="model2",
file_path="",
model_name="Model 2",
metrics=metrics,
rules=[
ReportRule(
"a-failed-rule",
"a rule name",
ReportStatus.ERRORED,
)
],
summary=ReportSummary(0, 0, 1, 1),
),
],
)


def test_should_convert_to_html_output():
def test_should_convert_to_html_output(expected_report: Report):
rule: Rule = Rule("a-failed-rule", "a rule name", None)
model1 = Node({"unique_id": "model1"})
model2 = Node({"unique_id": "model2"})
Expand All @@ -70,14 +69,14 @@ def test_should_convert_to_html_output():
MetricResult(model2, 0, 0, 0, 0, 0, 0),
]

actual: Report = to_html_report(domain_results, metric_results)
actual: Report = to_report(domain_results, metric_results)

assert actual == expected_html_report
assert actual == expected_report


def test_json_serialisation():
actual = MyEncoder().encode(expected_html_report)
def test_json_serialisation(expected_report: Report):
actual = MyEncoder().encode(expected_report)

expected_serialisation = """{"summary": {"passed": 1, "skipped": 0, "errored": 1, "warned": 1}, "models": [{"model_key": "model1", "file_path": "", "model_name": "model1", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-rule", "name": "a rule name", "status": "passed"}], "summary": {"passed": 1, "skipped": 0, "errored": 0, "warned": 0}}, {"model_key": "model2", "file_path": "", "model_name": "model2", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-failed-rule", "name": "a rule name", "status": "errored"}], "summary": {"passed": 0, "skipped": 0, "errored": 1, "warned": 1}}]}"""
expected_serialisation = """{"summary": {"passed": 1, "skipped": 0, "errored": 1, "warned": 1}, "models": [{"model_key": "model1", "file_path": "", "model_name": "Model 1", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-rule", "name": "a rule name", "status": "passed"}], "summary": {"passed": 1, "skipped": 0, "errored": 0, "warned": 0}}, {"model_key": "model2", "file_path": "", "model_name": "Model 2", "metrics": [{"name": "degree_centrality", "score": 0, "pretty_name": "Degree centrality"}, {"name": "in_degree_centrality", "score": 0, "pretty_name": "In degree centrality"}, {"name": "out_degree_centrality", "score": 0, "pretty_name": "Out degree centrality"}, {"name": "closeness_centrality", "score": 0, "pretty_name": "Closeness centrality"}, {"name": "betweenness_centrality", "score": 0, "pretty_name": "Betweenness centrality"}, {"name": "pagerank", "score": 0, "pretty_name": "Pagerank"}], "rules": [{"id": "a-failed-rule", "name": "a rule name", "status": "errored"}], "summary": {"passed": 0, "skipped": 0, "errored": 1, "warned": 1}}]}"""

assert actual == expected_serialisation