Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

convert jira issues - removing backward compatible, adding links #30714

Merged
merged 17 commits into from
Nov 8, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .gitlab/ci/.gitlab-ci.bucket-upload.yml
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,6 @@ sync-buckets-between-projects:
- gke
extends:
- .bucket-upload-rule
kobymeir marked this conversation as resolved.
Show resolved Hide resolved
- .default-job-settings
stage: upload-to-marketplace
needs: ["upload-packs-to-marketplace", "upload-packs-to-marketplace-v2", "upload-packs-to-xpanse-marketplace"]
when: always
Expand Down
2 changes: 1 addition & 1 deletion .gitlab/ci/.gitlab-ci.on-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ jobs-done-check-on-merge:
- .push-rule
- .jobs-done-check
needs:
kobymeir marked this conversation as resolved.
Show resolved Hide resolved
- merge-dev-secrets
- merge-dev-secrets
kobymeir marked this conversation as resolved.
Show resolved Hide resolved
- run-unittests-and-lint
- run-validations
- test-upload-flow
Expand Down
17 changes: 10 additions & 7 deletions Tests/Marketplace/print_test_modeling_rule_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
TEST_SUITE_CELL_EXPLANATION
from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, \
JIRA_API_KEY, jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type, JIRA_LABELS
from Tests.scripts.test_modeling_rule_report import TEST_MODELING_RULES_BASE_HEADERS, calculate_test_modeling_rule_results
from Tests.scripts.test_modeling_rule_report import TEST_MODELING_RULES_BASE_HEADERS, calculate_test_modeling_rule_results, \
write_test_modeling_rule_to_jira_mapping
from Tests.scripts.utils import logging_wrapper as logging
from Tests.scripts.utils.log_util import install_logging

Expand Down Expand Up @@ -55,17 +56,19 @@ def print_test_modeling_rule_summary(artifacts_path: Path, without_jira: bool) -
calculate_test_modeling_rule_results(test_modeling_rules_results_files, issues)
)

write_test_modeling_rule_to_jira_mapping(artifacts_path, jira_tickets_for_modeling_rule)

if modeling_rules_to_test_suite:
logging.info(f"Found {len(jira_tickets_for_modeling_rule)} Jira tickets out of {len(modeling_rules_to_test_suite)} "
"Test modeling rules")

headers, column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_modeling_rule,
modeling_rules_to_test_suite,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS,
without_jira=without_jira)
column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_modeling_rule,
modeling_rules_to_test_suite,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS,
without_jira=without_jira)

table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align)
table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align)
logging.info(f"Test Modeling rule Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}")
return total_errors != 0

Expand Down
69 changes: 20 additions & 49 deletions Tests/Marketplace/print_test_playbook_summary.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import argparse
import json
import sys
import traceback
from pathlib import Path
Expand All @@ -11,7 +12,8 @@
from Tests.scripts.common import calculate_results_table, TEST_PLAYBOOKS_REPORT_FILE_NAME, get_test_results_files, \
TEST_SUITE_CELL_EXPLANATION
from Tests.scripts.jira_issues import JIRA_SERVER_URL, JIRA_VERIFY_SSL, JIRA_PROJECT_ID, JIRA_ISSUE_TYPE, JIRA_COMPONENT, \
JIRA_API_KEY, jira_server_information, generate_query_by_component_and_issue_type, jira_search_all_by_query, JIRA_LABELS
JIRA_API_KEY, jira_server_information, generate_query_by_component_and_issue_type, jira_search_all_by_query, JIRA_LABELS, \
jira_ticket_to_json_data
from Tests.scripts.test_playbooks_report import calculate_test_playbooks_results, \
TEST_PLAYBOOKS_BASE_HEADERS, get_jira_tickets_for_playbooks
from Tests.scripts.utils import logging_wrapper as logging
Expand Down Expand Up @@ -39,41 +41,6 @@ def read_file_contents(file_path: Path) -> list | None:
return None


def print_test_playbooks_summary_without_junit_report(artifacts_path: Path) -> bool:
kobymeir marked this conversation as resolved.
Show resolved Hide resolved
"""
Takes the information stored in the files and prints it in a human-readable way.
"""
instance_path = Path(artifacts_path) / "instance_Server Master"
failed_tests_path = instance_path / "failed_tests.txt"
succeeded_tests_path = instance_path / "succeeded_tests.txt"
succeeded_playbooks = read_file_contents(succeeded_tests_path)
failed_playbooks = read_file_contents(failed_tests_path)

# if one of the files isn't existing, we want to fail.
if succeeded_playbooks is None or failed_playbooks is None:
return True

succeeded_count = len(succeeded_playbooks)
failed_count = len(failed_playbooks)

logging.info("TEST RESULTS:")
logging.info(f"Number of playbooks tested - {succeeded_count + failed_count}")

if succeeded_count:
logging.success(f"Number of succeeded tests - {succeeded_count}")
logging.success("Successful Tests:")
for playbook_id in succeeded_playbooks:
logging.success(f"\t- {playbook_id}")

if failed_count:
logging.error(f"Number of failed tests - {failed_count}:")
logging.error("Failed Tests:")
for playbook_id in failed_playbooks:
logging.error(f"\t- {playbook_id}")
return True
return False


def filter_skipped_playbooks(playbooks_results: dict[str, dict[str, TestSuite]]) -> list[str]:
filtered_playbooks_ids = []
for playbook_id, playbook_results in playbooks_results.items():
Expand All @@ -90,15 +57,15 @@ def filter_skipped_playbooks(playbooks_results: dict[str, dict[str, TestSuite]])
return filtered_playbooks_ids


def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tuple[bool, bool]:
def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> bool:
test_playbooks_report = artifacts_path / TEST_PLAYBOOKS_REPORT_FILE_NAME

# iterate over the artifacts path and find all the test playbook result files
if not (test_playbooks_result_files_list := get_test_results_files(artifacts_path, TEST_PLAYBOOKS_REPORT_FILE_NAME)):
# Write an empty report file to avoid failing the build artifacts collection.
JUnitXml().write(test_playbooks_report.as_posix(), pretty=True)
logging.error(f"Could not find any test playbook result files in {artifacts_path}")
return False, False
return True

logging.info(f"Found {len(test_playbooks_result_files_list)} test playbook result files")
playbooks_results, server_versions = calculate_test_playbooks_results(test_playbooks_result_files_list)
Expand All @@ -124,16 +91,22 @@ def print_test_playbooks_summary(artifacts_path: Path, without_jira: bool) -> tu
jira_tickets_for_playbooks = get_jira_tickets_for_playbooks(playbooks_ids, issues)
logging.info(f"Found {len(jira_tickets_for_playbooks)} Jira tickets out of {len(playbooks_ids)} filtered playbooks")

headers, column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks,
playbooks_results,
server_versions,
TEST_PLAYBOOKS_BASE_HEADERS,
without_jira=without_jira)
column_align, tabulate_data, xml, total_errors = calculate_results_table(jira_tickets_for_playbooks,
playbooks_results,
server_versions,
TEST_PLAYBOOKS_BASE_HEADERS,
without_jira=without_jira)
logging.info(f"Writing test playbook report to {test_playbooks_report}")
xml.write(test_playbooks_report.as_posix(), pretty=True)
table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align)
with open(artifacts_path / "playbook_to_jira_mapping.json", "w") as playbook_to_jira_mapping_file:
playbook_to_jira_mapping = {playbook_id: jira_ticket_to_json_data(jira_ticket)
for playbook_id, jira_ticket in jira_tickets_for_playbooks.items()}
playbook_to_jira_mapping_file.write(json.dumps(playbook_to_jira_mapping, indent=4, sort_keys=True,
default=str))

table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align)
logging.info(f"Test Playbook Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}")
return True, total_errors != 0
return total_errors != 0


def main():
Expand All @@ -142,10 +115,8 @@ def main():
options = options_handler()
artifacts_path = Path(options.artifacts_path)
logging.info(f"Printing test playbook summary - artifacts path: {artifacts_path}")
junit_result_exist, errors_found = print_test_playbooks_summary(artifacts_path, options.without_jira)
if not junit_result_exist:
errors_found = print_test_playbooks_summary_without_junit_report(artifacts_path)
if errors_found:

if print_test_playbooks_summary(artifacts_path, options.without_jira):
logging.critical("Test playbook summary found errors")
sys.exit(1)

Expand Down
11 changes: 8 additions & 3 deletions Tests/scripts/common.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from pathlib import Path
from typing import Any

import pandas as pd
from jira import Issue
from junitparser import TestSuite, JUnitXml

Expand Down Expand Up @@ -105,7 +106,7 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue],
without_jira: bool = False,
with_skipped: bool = False,
multiline_headers: bool = True,
transpose: bool = False) -> tuple[list[str], list[str], list[list[Any]], JUnitXml, int]:
transpose: bool = False) -> tuple[list[str], list[list[Any]], JUnitXml, int]:
xml = JUnitXml()
headers_multiline_char = "\n" if multiline_headers else " "
headers = [h.replace("\n", headers_multiline_char) for h in base_headers]
Expand All @@ -119,7 +120,7 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue],
server_version if transpose else f"{server_version}{headers_multiline_char}({TEST_SUITE_DATA_CELL_HEADER})"
)
column_align.append("center")
tabulate_data = []
tabulate_data = [headers]
total_row: list[Any] = ([""] * fixed_headers_length + [TestSuiteStatistics(no_color)
for _ in range(len(server_versions_list))])
total_errors = 0
Expand Down Expand Up @@ -180,7 +181,11 @@ def calculate_results_table(jira_tickets_for_result: dict[str, Issue],
total_row[0] = (green_text(TOTAL_HEADER) if total_errors == 0 else red_text(TOTAL_HEADER)) \
if not no_color else TOTAL_HEADER
tabulate_data.append(total_row)
return headers, column_align, tabulate_data, xml, total_errors

if transpose:
tabulate_data = pd.DataFrame(tabulate_data, index=None).transpose().to_numpy()

return column_align, tabulate_data, xml, total_errors


def get_all_failed_results(results: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
Expand Down
15 changes: 9 additions & 6 deletions Tests/scripts/convert_test_modeling_result_to_jira_issues.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
jira_server_information, jira_search_all_by_query, generate_query_by_component_and_issue_type
from Tests.scripts.test_modeling_rule_report import (create_jira_issue_for_test_modeling_rule,
TEST_MODELING_RULES_BASE_HEADERS,
calculate_test_modeling_rule_results)
calculate_test_modeling_rule_results,
write_test_modeling_rule_to_jira_mapping)
from Tests.scripts.utils import logging_wrapper as logging
from Tests.scripts.utils.log_util import install_logging

Expand Down Expand Up @@ -76,16 +77,18 @@ def main():
logging.info(f"Found {len(jira_tickets_for_modeling_rule)} Jira tickets out "
f"of {len(modeling_rules_to_test_suite)} Test modeling rules")

write_test_modeling_rule_to_jira_mapping(options.artifacts_path, jira_tickets_for_modeling_rule)

# Search if we have too many test modeling rules that failed beyond the max allowed limit to open, if so we print the
# list and exit. This is to avoid opening too many Jira issues.
failed_test_modeling_rule = get_all_failed_results(modeling_rules_to_test_suite)

if len(failed_test_modeling_rule) >= options.max_failures_to_handle:
headers, column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_modeling_rule,
failed_test_modeling_rule,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS)
table = tabulate(tabulate_data, headers, tablefmt="pretty", colalign=column_align)
column_align, tabulate_data, _, _ = calculate_results_table(jira_tickets_for_modeling_rule,
failed_test_modeling_rule,
server_versions,
TEST_MODELING_RULES_BASE_HEADERS)
table = tabulate(tabulate_data, headers="firstrow", tablefmt="pretty", colalign=column_align)
logging.info(f"Test Modeling rule Results: {TEST_SUITE_CELL_EXPLANATION}\n{table}")
logging.critical(f"Found {len(failed_test_modeling_rule)} failed test modeling rule, "
f"which is more than the max allowed limit of {options.max_failures_to_handle} to handle.")
Expand Down
Loading
Loading