diff --git a/.gitignore b/.gitignore index 19c954ea8f3..aae2dbd6cf1 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,9 @@ node_modules/ # Python Compile Outputs *.pyc + +# IntelliJ +.idea/ + +# vscode python env files +.env \ No newline at end of file diff --git a/eng/test-configuration.json b/eng/test-configuration.json new file mode 100644 index 00000000000..27fdfceca0d --- /dev/null +++ b/eng/test-configuration.json @@ -0,0 +1,5 @@ +{ + "version": 1, + "defaultOnFailure": "rerun", + "localRerunCount": 1 +} \ No newline at end of file diff --git a/src/Microsoft.DotNet.Helix/Sdk.Tests/Microsoft.DotNet.Helix.Sdk.Tests/HelpersTests.cs b/src/Microsoft.DotNet.Helix/Sdk.Tests/Microsoft.DotNet.Helix.Sdk.Tests/HelpersTests.cs index a971acd4da5..b14dab3163f 100644 --- a/src/Microsoft.DotNet.Helix/Sdk.Tests/Microsoft.DotNet.Helix.Sdk.Tests/HelpersTests.cs +++ b/src/Microsoft.DotNet.Helix/Sdk.Tests/Microsoft.DotNet.Helix.Sdk.Tests/HelpersTests.cs @@ -1,5 +1,8 @@ +using System; +using System.IO; using Xunit; using System.Net; +using Newtonsoft.Json; namespace Microsoft.DotNet.Helix.Sdk.Tests { @@ -35,5 +38,18 @@ public void VerifyNonEncodedFowardSlashIsConverted() Assert.Equal(workItemNameExpected, actual); } + + [Fact] + public void FailOnceThenPass() + { + string target = Path.Combine(Environment.GetEnvironmentVariable("HELIX_WORKITEM_ROOT") ?? Environment.GetEnvironmentVariable("TEMP"), "my-test-file-123456.snt"); + bool exists = File.Exists(target); + if (!exists) + { + File.WriteAllText(target, "Test failed once"); + } + + Assert.True(exists, $"File should exist: {target}"); + } } } diff --git a/src/Microsoft.DotNet.Helix/Sdk/SendHelixJob.cs b/src/Microsoft.DotNet.Helix/Sdk/SendHelixJob.cs index 6212882bbea..abbf3ef3d80 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/SendHelixJob.cs +++ b/src/Microsoft.DotNet.Helix/Sdk/SendHelixJob.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Globalization; using System.IO; +using System.IO.Compression; using System.Linq; using System.Text; using System.Threading; @@ -33,6 +34,7 @@ public static class MetadataNames public const string Uri = "Uri"; public const string Destination = "Destination"; public const string IncludeDirectoryName = "IncludeDirectoryName"; + public const string AsArchive = "AsArchive"; } /// @@ -521,16 +523,43 @@ private IJobDefinition AddCorrelationPayload(IJobDefinition def, ITaskItem corre if (Directory.Exists(path)) { string includeDirectoryNameStr = correlationPayload.GetMetadata(MetadataNames.IncludeDirectoryName); - bool.TryParse(includeDirectoryNameStr, out bool includeDirectoryName); + if (!bool.TryParse(includeDirectoryNameStr, out bool includeDirectoryName)) + { + includeDirectoryName = false; + } - Log.LogMessage(MessageImportance.Low, $"Adding Correlation Payload Directory '{path}', destination '{destination}'"); + Log.LogMessage( + MessageImportance.Low, + $"Adding Correlation Payload Directory '{path}', destination '{destination}'" + ); return def.WithCorrelationPayloadDirectory(path, includeDirectoryName, destination); + } if (File.Exists(path)) { - Log.LogMessage(MessageImportance.Low, $"Adding Correlation Payload Archive '{path}', destination '{destination}'"); - return def.WithCorrelationPayloadArchive(path, destination); + string asArchiveStr = correlationPayload.GetMetadata(MetadataNames.AsArchive); + if (!bool.TryParse(asArchiveStr, out bool asArchive)) + { + // With no other information, default to true, since that was the previous behavior + // before we added the option + asArchive = true; + } + + if (asArchive) + { + Log.LogMessage( + MessageImportance.Low, + $"Adding Correlation Payload Archive '{path}', destination '{destination}'" + ); + return def.WithCorrelationPayloadArchive(path, destination); + } + + Log.LogMessage( + MessageImportance.Low, + $"Adding Correlation Payload File '{path}', destination '{destination}'" + ); + return def.WithCorrelationPayloadFiles(path); } Log.LogError(FailureCategory.Build, $"Correlation Payload '{path}' not found."); diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/Microsoft.DotNet.Helix.Sdk.MonoQueue.targets b/src/Microsoft.DotNet.Helix/Sdk/tools/Microsoft.DotNet.Helix.Sdk.MonoQueue.targets index b271e821239..7f8604a40fa 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/Microsoft.DotNet.Helix.Sdk.MonoQueue.targets +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/Microsoft.DotNet.Helix.Sdk.MonoQueue.targets @@ -3,7 +3,11 @@ false - + + + $(RepositoryEngineeringDir)/test-configuration.json + + @@ -36,6 +40,10 @@ + + + + diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/AzurePipelines.MonoQueue.targets b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/AzurePipelines.MonoQueue.targets index 9dc488962f1..d5fbcb4569f 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/AzurePipelines.MonoQueue.targets +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/AzurePipelines.MonoQueue.targets @@ -7,11 +7,11 @@ $(HelixPostCommands); - /bin/sh $HELIX_CORRELATION_PAYLOAD/reporter/run.sh $(SYSTEM_TEAMFOUNDATIONCOLLECTIONURI) $(SYSTEM_TEAMPROJECT) $(TestRunId) $(SYSTEM_ACCESSTOKEN) || exit $? + $HELIX_PYTHONPATH $HELIX_CORRELATION_PAYLOAD/reporter/run.py $(SYSTEM_TEAMFOUNDATIONCOLLECTIONURI) $(SYSTEM_TEAMPROJECT) $(TestRunId) $(SYSTEM_ACCESSTOKEN) || exit $? $(HelixPostCommands); - call %HELIX_CORRELATION_PAYLOAD%\reporter\run.bat $(SYSTEM_TEAMFOUNDATIONCOLLECTIONURI) $(SYSTEM_TEAMPROJECT) $(TestRunId) $(SYSTEM_ACCESSTOKEN) || exit /b + %HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%\reporter\run.py $(SYSTEM_TEAMFOUNDATIONCOLLECTIONURI) $(SYSTEM_TEAMPROJECT) $(TestRunId) $(SYSTEM_ACCESSTOKEN) || exit /b diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/azure_devops_result_publisher.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/azure_devops_result_publisher.py deleted file mode 100644 index 47fc6454009..00000000000 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/azure_devops_result_publisher.py +++ /dev/null @@ -1,273 +0,0 @@ -import base64 -import os -import logging -import time -from typing import Iterable, Mapping, List, Dict, Optional, Tuple -from builtins import str as text -from azure.devops.connection import Connection -from msrest.authentication import BasicTokenAuthentication, BasicAuthentication -from azure.devops.v5_1.test import TestClient -from azure.devops.v5_1.test.models import TestCaseResult, TestAttachmentRequestModel, TestSubResult -from azure.devops.exceptions import AzureDevOpsClientRequestError - -from helpers import get_env -from defs import TestResult - -log = logging.getLogger(__name__) - - -class AzureDevOpsTestResultPublisher: - def __init__(self, collection_uri, access_token, team_project, test_run_id=None): - """ - - :type collection_uri: str The team project collection uri - :type access_token: str The value of SYSTEM_ACCESSTOKEN from the azure pipelines build - """ - self.collection_uri = collection_uri - self.access_token = access_token - self.team_project = team_project - self.test_run_id = test_run_id - self.work_item_name = get_env("HELIX_WORKITEM_FRIENDLYNAME") - pass - - def upload_batch(self, results: Iterable[TestResult]): - results_with_attachments = {r.name: r for r in results if r is not None and r.attachments} - - (test_case_results, test_name_order) = self.convert_results(results) - - self.publish_results(test_case_results, test_name_order, results_with_attachments) - - def is_data_driven_test(self, r: str) -> bool: - return r.endswith(")") - - def get_ddt_base_name(self, r: str) -> str: - return r.split('(', 1)[0] - - def send_attachment(self, test_client, attachment, published_result): - try: - # Python 3 will throw a TypeError exception because b64encode expects bytes - stream = base64.b64encode(text(attachment.text)) - except TypeError: - # stream has to be a string but b64encode takes and returns bytes on Python 3 - stream = base64.b64encode(bytes(attachment.text, "utf-8")).decode("utf-8") - - test_client.create_test_result_attachment( - TestAttachmentRequestModel( - file_name=text(attachment.name), - stream=stream, - ), self.team_project, self.test_run_id, published_result.id) - - def send_sub_attachment(self, test_client, attachment, published_result, sub_result_id): - stream = base64.b64encode(bytes(attachment.text, "utf-8")).decode("utf-8") - - test_client.create_test_sub_result_attachment( - TestAttachmentRequestModel( - file_name=text(attachment.name), - stream=stream, - ), self.team_project, self.test_run_id, published_result.id, sub_result_id) - - def publish_results(self, test_case_results: Iterable[TestCaseResult], test_result_order: Dict[str, List[str]], - results_with_attachments: Mapping[str, TestResult]) -> None: - connection = self.get_connection() - test_client = connection.get_client("azure.devops.v5_1.test.TestClient") # type: TestClient - - tries_left = 10 - succeeded = False - test_run_ended = False - - while tries_left > 0 and not test_run_ended and not succeeded: - try: - published_results = test_client. \ - add_test_results_to_test_run(list(test_case_results), - self.team_project, - self.test_run_id) # type: List[TestCaseResult] - succeeded = True - - except AzureDevOpsClientRequestError as ex: - # Odd syntax here is to deal with checking substrings of the list of args in this exception - hit_503 = len([element for element in ex.args if ('invalid status code of 503' in element)]) != 0 - test_run_ended = len([element for element in ex.args if ('It may have been deleted' in element)]) != 0 - if hit_503: - tries_left -= 1 - log.warning("Hit HTTP 503 from Azure DevOps. Will wait three seconds and try again.") - time.sleep(3) - elif test_run_ended: # Not exceptional, don't retry. - tries_left = 0 - else: - raise ex - - if test_run_ended: - log.info("Test run has ended, skipping attaching results as it would fail.") - return - - # !succeeded means a 503 and not succeeding after 10 tries (otherwise we threw already), so give a nice error - if not succeeded: - raise Exception('Failed to report test results to Azure Dev Ops after retrying. Please contact dnceng.') - - for published_result in published_results: - - # Don't send attachments if the result was not accepted. - if published_result.id == -1: - continue - - # Does the test result have an attachment with an exact matching name? - if published_result.automated_test_name in results_with_attachments: - log.debug("Result {0} has an attachment".format(published_result.automated_test_name)) - result = results_with_attachments.get(published_result.automated_test_name) - - for attachment in result.attachments: - self.send_attachment(test_client, attachment, published_result) - - # Does the test result have an attachment with a sub-result matching name? - # The data structure returned from AzDO does not contain a subresult's name, only an - # index. The order of results is meant to be the same as was posted. This assumes that - # is true , and uses the order of test names recorded earlier to look-up the attachments. - elif published_result.sub_results is not None: - sub_results_order = test_result_order[published_result.automated_test_name] - - # Sanity check - if len(sub_results_order) != len(published_result.sub_results): - log.warning( - "Returned subresults list length does not match expected. Attachments may not pair correctly.") - - for (name, sub_result) in zip(sub_results_order, published_result.sub_results): - if name in results_with_attachments: - result = results_with_attachments.get(name) - for attachment in result.attachments: - self.send_sub_attachment(test_client, attachment, published_result, sub_result.id) - - def convert_results(self, results: Iterable[TestResult]) -> Tuple[Iterable[TestCaseResult], Dict[str, List[str]]]: - comment = "{{ \"HelixJobId\": \"{}\", \"HelixWorkItemName\": \"{}\" }}".format( - os.getenv("HELIX_CORRELATION_ID"), - os.getenv("HELIX_WORKITEM_FRIENDLYNAME"), - ) - - def convert_to_sub_test(r: TestResult) -> Optional[TestSubResult]: - if r.result == "Pass": - return TestSubResult( - comment=comment, - display_name=text(r.name), - duration_in_ms=r.duration_seconds * 1000, - outcome="Passed" - ) - if r.result == "Fail": - return TestSubResult( - comment=comment, - display_name=text(r.name), - duration_in_ms=r.duration_seconds * 1000, - outcome="Failed", - stack_trace=text(r.stack_trace) if r.stack_trace is not None else None, - error_message=text(r.failure_message) - ) - if r.result == "Skip": - return TestSubResult( - comment=comment, - display_name=text(r.name), - duration_in_ms=r.duration_seconds * 1000, - outcome="NotExecuted" - ) - log.warning("Unexpected result value {} for {}".format(r.result, r.name)) - return None - - def convert_result(r: TestResult) -> Optional[TestCaseResult]: - if r.result == "Pass": - return TestCaseResult( - test_case_title=text(r.name), - automated_test_name=text(r.name), - automated_test_type=text(r.kind), - automated_test_storage=self.work_item_name, - priority=1, - duration_in_ms=r.duration_seconds * 1000, - outcome="Passed", - state="Completed", - comment=comment, - ) - if r.result == "Fail": - return TestCaseResult( - test_case_title=text(r.name), - automated_test_name=text(r.name), - automated_test_type=text(r.kind), - automated_test_storage=self.work_item_name, - priority=1, - duration_in_ms=r.duration_seconds * 1000, - outcome="Failed", - state="Completed", - error_message=text(r.failure_message), - stack_trace=text(r.stack_trace) if r.stack_trace is not None else None, - comment=comment, - ) - - if r.result == "Skip": - return TestCaseResult( - test_case_title=text(r.name), - automated_test_name=text(r.name), - automated_test_type=text(r.kind), - automated_test_storage=self.work_item_name, - priority=1, - duration_in_ms=r.duration_seconds * 1000, - outcome="NotExecuted", - state="Completed", - error_message=text(r.skip_reason), - comment=comment, - ) - - log.warning("Unexpected result value {} for {}".format(r.result, r.name)) - return None - - unconverted_results = list(results) # type: List[TestResult] - log.debug("Count of unconverted_results: {0}".format(len(unconverted_results))) - - # Find all DDTs, determine parent, and add to dictionary - data_driven_tests = {} # type: Dict[str, TestCaseResult] - non_data_driven_tests = [] # type: List[TestCaseResult] - test_name_ordering = {} # type: Dict[str, List[str]] - - for r in unconverted_results: - if r is None: - continue - - if not self.is_data_driven_test(r.name): - non_data_driven_tests.append(convert_result(r)) - test_name_ordering[r.name] = [] - continue - - # Must be a DDT - base_name = self.get_ddt_base_name(r.name) - - if base_name in data_driven_tests: - sub_test = convert_to_sub_test(r) - if sub_test is None: - continue - - data_driven_tests[base_name].sub_results.append(sub_test) - test_name_ordering[base_name].append(r.name) - - # Mark parent test as Failed if any subresult is Failed - if sub_test.outcome == "Failed": - data_driven_tests[base_name].outcome = "Failed" - - else: - cr = convert_result(r) - csr = convert_to_sub_test(r) - - if cr is None or csr is None: - continue - - data_driven_tests[base_name] = cr - data_driven_tests[base_name].automated_test_name = base_name - data_driven_tests[base_name].result_group_type = "dataDriven" - data_driven_tests[base_name].sub_results = [csr] - test_name_ordering[base_name] = [r.name] - - return (list(data_driven_tests.values()) + non_data_driven_tests, test_name_ordering) - - def get_connection(self) -> Connection: - credentials = self.get_credentials() - return Connection(self.collection_uri, credentials) - - def get_credentials(self) -> BasicTokenAuthentication: - if self.access_token: - return BasicTokenAuthentication({'access_token': self.access_token}) - - token = get_env("VSTS_PAT") - return BasicAuthentication("ignored", token) diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/defs.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/defs.py deleted file mode 100644 index 53cc7f8b5c5..00000000000 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/defs.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import List - - -class TestResult: - def __init__(self, name, kind, type_name, method, duration, result, exception_type, failure_message, stack_trace, - skip_reason, attachments): - """ - - :type name: unicode - :type kind: unicode - :type type_name: unicode - :type method: unicode - :type duration: float - :type result: unicode - :type exception_type: unicode - :type failure_message: unicode - :type stack_trace: unicode - :type skip_reason: unicode - :type attachments: List[TestResultAttachment] - """ - self._name = name - self._kind = kind - self._type = type_name - self._method = method - self._duration_seconds = duration - self._result = result - self._exception_type = exception_type - self._failure_message = failure_message - self._stack_trace = stack_trace - self._skip_reason = skip_reason - self._attachments = attachments - - @property - def name(self): - return self._name - - @property - def kind(self): - return self._kind - - @property - def type(self): - return self._type - - @property - def method(self): - return self._method - - @property - def duration_seconds(self): - return self._duration_seconds - - @property - def result(self): - return self._result - - @property - def exception_type(self): - return self._exception_type - - @property - def failure_message(self): - return self._failure_message - - @property - def stack_trace(self): - return self._stack_trace - - @property - def skip_reason(self): - return self._skip_reason - - @property - def output(self): - return self._output - - @property - def attachments(self): - return self._attachments - - -class TestResultAttachment: - def __init__(self, name, text): - """ - - :type name: unicode - :type text: unicode - """ - self._name = name - self._text = text - - @property - def name(self): - return self._name - - @property - def text(self): - return self._text diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/junit.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/junit.py index e5bdbf554cc..629f31b2165 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/junit.py +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/junit.py @@ -1,6 +1,6 @@ import xml.etree.ElementTree from .result_format import ResultFormat -from defs import TestResult, TestResultAttachment +from helix.public import TestResult, TestResultAttachment class JUnitFormat(ResultFormat): diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/result_format.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/result_format.py index 43aa2a2bb40..8d7cc966311 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/result_format.py +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/result_format.py @@ -1,5 +1,5 @@ from abc import ABCMeta, abstractmethod, abstractproperty -from defs import TestResult +from helix.public import TestResult from typing import Iterable @@ -10,15 +10,13 @@ def __init__(self): pass @abstractproperty - def name(self): + def name(self) -> str: pass @abstractproperty - def acceptable_file_suffixes(self): - # type: () -> Iterable[str] + def acceptable_file_suffixes(self) -> Iterable[str]: pass @abstractmethod - def read_results(self, path): - # type: (str) -> Iterable[TestResult] + def read_results(self, path) -> Iterable[TestResult]: pass diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/trx.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/trx.py index 9fabb0d88db..cb2afbec916 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/trx.py +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/trx.py @@ -1,7 +1,7 @@ import glob import xml.etree.ElementTree from .result_format import ResultFormat -from defs import TestResult, TestResultAttachment +from helix.public import TestResult, TestResultAttachment class TRXFormat(ResultFormat): diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/xunit.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/xunit.py index b46a11f1964..bbf8ee6f96e 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/xunit.py +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/formats/xunit.py @@ -2,7 +2,7 @@ import xml.etree.ElementTree from .result_format import ResultFormat -from defs import TestResult, TestResultAttachment +from helix.public import TestResult, TestResultAttachment _unescape_char_map = { 'r': '\r', diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.bat b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.bat deleted file mode 100644 index 3a3c0f9d1ba..00000000000 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.bat +++ /dev/null @@ -1,40 +0,0 @@ - -set ENV_PATH=%USERPROFILE%\.azdo-env -set TMP_ENV_PATH=%USERPROFILE%\.azdo-env-tmp - -REM Removing pythonpath forces a clean installation of the Azure DevOps client, but subsequent commands may use HELIX libraries -set _OLD_PYTHONPATH=%PYTHONPATH% -set PYTHONPATH= - -echo %date%-%time% - -if NOT EXIST %ENV_PATH%\Scripts\python.exe ( - rmdir /Q /S %TMP_ENV_PATH% - rmdir /Q /S %ENV_PATH% - %HELIX_PYTHONPATH% -m virtualenv --no-site-packages %TMP_ENV_PATH% - rename %TMP_ENV_PATH% .azdo-env -) -REM On certain slow machines python.exe keeps a handle open just long enough to break the rename; retry if so -set /a renameAttemptNumber=1 -:retryloop -if NOT EXIST %ENV_PATH%\Scripts\python.exe ( -set /a renameAttemptNumber+=1 -echo Error renaming venv folder; waiting 5 seconds and retrying up to 10x Attempt: %renameAttemptNumber% -ping -n 6 127.0.0.1 > nul -rename %TMP_ENV_PATH% .azdo-env -IF %renameAttemptNumber% GEQ 10 GOTO :renamingdone -GOTO :retryloop -) -:renamingdone - -%ENV_PATH%\Scripts\python.exe -c "import azure.devops" || %ENV_PATH%\Scripts\python.exe -m pip install azure-devops==5.0.0b9 - -%ENV_PATH%\Scripts\python.exe -c "import future" || %ENV_PATH%\Scripts\python.exe -m pip install future==0.17.1 - -echo %date%-%time% -%ENV_PATH%\Scripts\python.exe -B %~dp0run.py %* -set _uploaderExitCode=%ERRORLEVEL% -echo %date%-%time% - -set PYTHONPATH=%_OLD_PYTHONPATH% -exit /b %_uploaderExitCode% diff --git a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.py b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.py index e862eb2335d..00f77cf7cba 100644 --- a/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.py +++ b/src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/run.py @@ -4,52 +4,15 @@ import time import traceback import logging +import shutil from queue import Queue from threading import Thread, Lock -from typing import Tuple, Optional, List - +from typing import Tuple, Optional + +from helpers import get_env from test_results_reader import read_results -from helpers import batch, get_env -from azure_devops_result_publisher import AzureDevOpsTestResultPublisher - -workerFailedLock = Lock() -workerFailed = False - - -class UploadWorker(Thread): - def __init__(self, queue, idx, collection_uri, team_project, test_run_id, access_token): - super(UploadWorker, self).__init__() - self.queue = queue - self.idx = idx - self.publisher = AzureDevOpsTestResultPublisher( - collection_uri=collection_uri, - access_token=access_token, - team_project=team_project, - test_run_id=test_run_id, - ) - self.total_uploaded = 0 - - def __print(self, msg): - sys.stdout.write('Worker {}: {}\n'.format(self.idx, msg)) - sys.stdout.flush() - - def __process(self, batch): - self.publisher.upload_batch(batch) - self.total_uploaded = self.total_uploaded + len(batch) - - def run(self): - global workerFailed, workerFailedLock - while True: - try: - item = self.queue.get() - self.__process(item) - except: - self.__print("got error: {}".format(traceback.format_exc())) - with workerFailedLock: - workerFailed = True - finally: - self.queue.task_done() +from helix.public import DefaultTestReporter, AzureDevOpsReportingParameters, PackingTestReporter def process_args() -> Tuple[str, str, str, Optional[str]]: if len(sys.argv) < 4 or len(sys.argv) > 5: @@ -70,109 +33,36 @@ def process_args() -> Tuple[str, str, str, Optional[str]]: return collection_uri, team_project, test_run_id, access_token -# This reporter will be phased out soon, but until then we need to deal with ADO outages and failures from client lib -# Currently only understands XUnit TestResults.xml (should not be around long enough to need more) -# See https://github.com/dotnet/arcade/issues/7371 for details -def check_passed_to_workaround_ado_api_failure(dirs_to_check: List[str]) -> bool: - print("Reporting has failed. Running mitigation for https://github.com/dotnet/arcade/issues/7371") - found_a_result = False - acceptable_xunit_file_names = [ - "testResults.xml", - "test-results.xml", - "test_results.xml", - "TestResults.xUnit.xml" - ] - - failure_count_found = 0 - - for dir_name in dirs_to_check: - print("Searching '{}' for test results files".format(dir_name)) - for root, dirs, files in os.walk(dir_name): - for file_name in files: - if file_name in acceptable_xunit_file_names: - file_path = os.path.join(root, file_name) - print('Found results file {} '.format(file_path)) - found_a_result = True - failure_count_found += get_failure_count(file_path) - - if found_a_result: - if failure_count_found == 0: - print("Reporter script has failed, but XUnit test results show no failures.") - return True - else: - print("Reporter script has failed, and we were able to find XUnit test results with failures ({})" - .format(str(failure_count_found))) - else: - print("Tried to mitigate but no results files found.") - return False - - -def get_failure_count(test_results_path: str): - fail_count = 0 - with open(test_results_path, encoding="utf-8") as result_file: - total_regex = re.compile(r'failed="(\d+)"') - for line in result_file: - if ' Iterable[TestResult]: found = False for dir in dirs_to_check: - print("Searching '{}' for test results files".format(dir)) + log.info("Searching '{}' for test results files".format(dir)) for root, dirs, files in os.walk(dir): for file_name in files: for f in all_formats: if file_name.endswith(tuple(f.acceptable_file_suffixes)): file_path = os.path.join(root, file_name) - print('Found results file {} with format {}'.format(file_path, f.name)) + log.info('Found results file {} with format {}'.format(file_path, f.name)) found = True file_results = (add_logs(tr, log_list) for tr in f.read_results(file_path)) for result in file_results: yield result if not found: - print('No results file found in any of the following formats: {}'.format(', '.join((f.name for f in all_formats)))) + log.warn('No results file found in any of the following formats: {}'.format(', '.join((f.name for f in all_formats)))) yield add_logs(__no_results_result(), log_list)