From bd7d9d82781b5c0ee80a6c54b027aec9053cf1f5 Mon Sep 17 00:00:00 2001 From: jackdewinter Date: Thu, 2 Jan 2025 22:18:02 -0800 Subject: [PATCH] https://github.com/jackdewinter/pymarkdown/issues/1299 (#1300) * https://github.com/jackdewinter/pymarkdown/issues/1299 --- .gitignore | 1 + main.py | 14 +- newdocs/src/changelog.md | 3 +- perf_sample.cmd | 163 ++++++++++++++ perf_series.cmd | 230 ++++++++++++++++++++ publish/coverage.json | 8 +- publish/test-results.json | 2 +- pymarkdown/plugin_manager/plugin_manager.py | 14 +- run.cmd | 13 +- test/resources/performance/README.md | 1 + test/resources/{ => performance}/sample.md | 0 test/rules/test_plugin_manager.py | 83 +++++++ 12 files changed, 517 insertions(+), 15 deletions(-) create mode 100644 perf_sample.cmd create mode 100644 perf_series.cmd create mode 100644 test/resources/performance/README.md rename test/resources/{ => performance}/sample.md (100%) diff --git a/.gitignore b/.gitignore index 756bc0ebb..d413b25ab 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ dist/ pymarkdownlnt.egg-info/ p0.prof ff/ +.pycache/ diff --git a/main.py b/main.py index 5b15b5dc2..77d625344 100644 --- a/main.py +++ b/main.py @@ -2,6 +2,9 @@ Module to provide for a simple bootstrap for the project. """ +import cProfile +import os + from pymarkdown.main import PyMarkdownLint @@ -14,7 +17,16 @@ def main(self): """ Main entrance point. """ - PyMarkdownLint().main() + performance_run_indicator = ( + os.getenv("PYMARKDOWNLINT__PERFRUN", "0").strip().lower() + ) + if performance_run_indicator in ("1", "true"): + cProfile.run( + "from pymarkdown.main import PyMarkdownLint; PyMarkdownLint().main()", + "p0.prof", + ) + else: + PyMarkdownLint().main() if __name__ == "__main__": diff --git a/newdocs/src/changelog.md b/newdocs/src/changelog.md index 7b0199e98..63ed431f0 100644 --- a/newdocs/src/changelog.md +++ b/newdocs/src/changelog.md @@ -5,7 +5,8 @@ ### Added -None +- [Issue 1299](https://github.com/jackdewinter/pymarkdown/issues/1299) + - adding scripts to capture performance numbers to enable tuning ### Fixed diff --git a/perf_sample.cmd b/perf_sample.cmd new file mode 100644 index 000000000..4c2159439 --- /dev/null +++ b/perf_sample.cmd @@ -0,0 +1,163 @@ +@echo off +setlocal EnableDelayedExpansion +pushd %~dp0 + +rem Set needed environment variables. +set PLONG_TEMPFILE=%TEMP%\temp_plong_%RANDOM%.txt +set TEST_FILE_DIRECTORY=build\ptest +set PERF_OUTPUT=%TEMP%\temp_pout_%RANDOM%.txt + +rem Look for options on the command line. + +set VIEW_MODE= +set NUM_REPEATS=10 +set CSV_OUTPUT= +set NO_RULES_MODE= +:process_arguments +if "%1" == "-h" ( + echo Command: %0 [options] + echo Usage: + echo - Execute a clean build for this project. + echo Options: + echo -h This message. + echo -c {file} Append results to file in CSV format. + echo -r {num} Number of repititions of the test document to merge together as a positive integer. + echo -nr Take measurements without processing any rules. + echo -v View the measured performance metrics. + GOTO real_end +) else if "%1" == "-v" ( + set VIEW_MODE=1 +) else if "%1" == "-nr" ( + set NO_RULES_MODE=1 +) else if "%1" == "-r" ( + set NUM_REPEATS=%2 + if not defined NUM_REPEATS ( + echo Option -r requires a positive integer argument to follow it. + goto error_end + ) + shift +) else if "%1" == "-c" ( + set CSV_OUTPUT=%2 + if not defined CSV_OUTPUT ( + echo Option -c requires a filename argument to follow it. + goto error_end + ) + shift +) else if "%1" == "" ( + goto after_process_arguments +) else ( + echo Argument '%1' not understood. Stopping. + echo Type '%0 -h' to see valid arguments. + goto error_end +) +shift +goto process_arguments +:after_process_arguments + +SET "var="&for /f "delims=0123456789" %%i in ("%NUM_REPEATS%") do set var=%%i +if defined var ( + echo Option -r is followed by '%NUM_REPEATS%' which is not an integer. + goto error_end +) + +if %NUM_REPEATS% lss 1 ( + echo Option -r is followed by '%NUM_REPEATS%' which is not a positive integer. + goto error_end +) + +set SINGLE_TEST_SOURCE_FILE=test\resources\performance\sample.md +set SINGLE_TEST_DESTINATION_FILE=%TEST_FILE_DIRECTORY%\test.md + +rem Announce what this script does. +echo {Profiling of project started.} + +rem Make sure we have a directory to create the test files for profiling in. +if not exist "%TEST_FILE_DIRECTORY%" ( + mkdir "%TEST_FILE_DIRECTORY%" > %PLONG_TEMPFILE% 2>&1 + if ERRORLEVEL 1 ( + type %PLONG_TEMPFILE% + echo. + echo {Creating test directory failed.} + goto error_end + ) +) + +rem Erase all existing contents of the profiling test directory. +erase /s /q %TEST_FILE_DIRECTORY%\* > %PLONG_TEMPFILE% 2>&1 +if ERRORLEVEL 1 ( + type %PLONG_TEMPFILE% + echo. + echo {Removing files in test directory failed.} + goto error_end +) + +rem Create a composite document with NUM_REPEATS copies of the source document. +echo Creating single document with %NUM_REPEATS% copies of '%SINGLE_TEST_SOURCE_FILE%'. +FOR /L %%A IN (1,1,%NUM_REPEATS%) DO ( + type %SINGLE_TEST_SOURCE_FILE% >> %SINGLE_TEST_DESTINATION_FILE% +) + +rem Remove any __pycache__ related files... +echo Resetting Python caches... +set PYTHONPYCACHEPREFIX=C:\enlistments\pymarkdown\.pycache +python3 -Bc "import pathlib; [p.unlink() for p in pathlib.Path('.').rglob('*.py[co]')]" +python3 -Bc "import pathlib; [p.rmdir() for p in pathlib.Path('.').rglob('__pycache__')]" + +rem ... and then take the steps to properly create any needed caching. +python -m compileall pymarkdown > nul 2>&1 +set SINGLE_TEST_SOURCE_FILE=%SINGLE_TEST_SOURCE_FILE:\=\\% +python -OO -c "import subprocess; subprocess.run(['run.cmd','scan','%SINGLE_TEST_SOURCE_FILE%'])" > %PERF_OUTPUT% + +if defined NO_RULES_MODE ( + set "XNO_RULES_MODE='--disable-rules','*'," +) + +echo Scanning created document... +set SINGLE_TEST_DESTINATION_FILE=%SINGLE_TEST_DESTINATION_FILE:\=\\% +python -OO -c "import subprocess,os,time; my_env = os.environ.copy(); my_env['PYMARKDOWNLINT__PERFRUN'] = '1'; start_time = time.time(); subprocess.run(['run.cmd',%XNO_RULES_MODE%'scan','%SINGLE_TEST_DESTINATION_FILE%'], env=my_env); print(time.time() - start_time)" > %PERF_OUTPUT% + +echo Document scanning completed. + +rem Calculate the statistics to report for this profiling run +type %PERF_OUTPUT% | find /c /v "" > %PLONG_TEMPFILE% +for /f "delims=" %%x in (%PLONG_TEMPFILE%) do set LINES_IN_PROF_OUTPUT=%%x +set OLD_LINES_IN_PROF_OUTPUT=%LINES_IN_PROF_OUTPUT% +set /a LINES_IN_PROF_OUTPUT -=1 +more +%LINES_IN_PROF_OUTPUT% %PERF_OUTPUT% > %PLONG_TEMPFILE% +for /f "delims=" %%x in (%PLONG_TEMPFILE%) do set FRANK=%%x +if %LINES_IN_PROF_OUTPUT% LEQ 0 goto bob +set /a OLD_LINES_IN_PROF_OUTPUT -= 3 +:bob + +rem Generate the CSV output to append to the file, or just report the stats to the console. +if defined CSV_OUTPUT ( + echo %NUM_REPEATS%,%OLD_LINES_IN_PROF_OUTPUT%,%FRANK% >> !CSV_OUTPUT! +) else ( + echo Repeats in File: %NUM_REPEATS% + echo Lines in output: %OLD_LINES_IN_PROF_OUTPUT% + echo Execution time: %FRANK% +) + +rem If in view mode, use SnakeViz to visualize. +if defined VIEW_MODE ( + snakeviz p0.prof +) + +rem Cleanly exit the script +:good_end + +echo. +set PC_EXIT_CODE=0 +echo {Profiling of project succeeded.} +goto real_end + +:error_end +set PC_EXIT_CODE=1 +echo {Profiling of project failed.} + +:real_end +erase /f /q %PLONG_TEMPFILE% > nul 2>&1 +erase /f /q %PERF_OUTPUT% > nul 2>&1 +set PLONG_TEMPFILE= +popd +exit /B %PC_EXIT_CODE% diff --git a/perf_series.cmd b/perf_series.cmd new file mode 100644 index 000000000..7601d6d52 --- /dev/null +++ b/perf_series.cmd @@ -0,0 +1,230 @@ +@echo off +setlocal EnableDelayedExpansion +pushd %~dp0 + +rem Set needed environment variables. +set PBOY_TEMPFILE=%TEMP%\temp_pboy_%RANDOM%.txt + +rem Look for options on the command line. + +set VIEW_MODE= +set NUM_MINIMUM=1 +set NUM_MAXIMUM=2 +set NUM_COUNT=1 +set ALTERNATE_REPEAT_LIST= +set WAS_0=%0 +set NO_RULES_MODE= +set TEST_SERIES_TAG= +:process_arguments +if "%1" == "-h" ( + echo Command: %WAS_0% [options] + echo Usage: + echo - Execute a clean build for this project. + echo Options: + echo -h This message. + echo -s Repeat count to start at. + echo -e Repeat count to end at. + echo -l List of _ separated repeat counts to use instead of -s and -e. + echo -c Count of times for each series of repeats. + echo -t Tag to associate with this series of tests. + echo -nr Take measurements without processing any rules {Parser only.} + echo. + echo Example: + echo To run a series of tests, from 10 to 15 repeats: + echo %WAS_0% -s 10 -e 15 + echo To run a series of tests, from 10 to 15 repeats, twice: + echo %WAS_0% -s 10 -e 15 -c 2 + echo To run a series of tests, only 10 and 15 repeats, twice: + echo %WAS_0% -l 10_15 -c 2 + GOTO real_end +) else if "%1" == "-t" ( + set TEST_SERIES_TAG=%2 + if not defined TEST_SERIES_TAG ( + echo Option -t requires a alphabetic argument to follow it. + goto error_end + ) + shift +) else if "%1" == "-s" ( + set NUM_MINIMUM=%2 + if not defined NUM_MINIMUM ( + echo Option -s requires a positive integer argument to follow it. + goto error_end + ) + shift +) else if "%1" == "-e" ( + set NUM_MAXIMUM=%2 + if not defined NUM_MAXIMUM ( + echo Option -e requires a positive integer argument to follow it. + goto error_end + ) + shift +) else if "%1" == "-c" ( + set NUM_COUNT=%2 + if not defined NUM_COUNT ( + echo Option -c requires a positive integer argument to follow it. + goto error_end + ) + shift +) else if "%1" == "-l" ( + set ALTERNATE_REPEAT_LIST=%2 + if not defined ALTERNATE_REPEAT_LIST ( + echo Option -l requires a underscore separated list of repeat counts to follow it. + goto error_end + ) + shift +) else if "%1" == "-nr" ( + set NO_RULES_MODE=1 +) else if "%1" == "" ( + goto after_process_arguments +) else ( + echo Argument '%1' not understood. Stopping. + echo Type '%0 -h' to see valid arguments. + goto error_end +) +shift +goto process_arguments +:after_process_arguments + +if defined TEST_SERIES_TAG ( + SET "var="&for /f "delims=abcdefghijklmnopqrstuvwxyz" %%i in ("%TEST_SERIES_TAG%") do set var=%%i + if defined var ( + echo Option -t is followed by '%TEST_SERIES_TAG%' which is not an alphabetic tag. + goto error_end + ) +) + +SET "var="&for /f "delims=0123456789" %%i in ("%NUM_MINIMUM%") do set var=%%i +if defined var ( + echo Option -s is followed by '%NUM_MINIMUM%' which is not an integer. + goto error_end +) + +if %NUM_MINIMUM% lss 1 ( + echo Option -s is followed by '%NUM_MINIMUM%' which is not a positive integer. + goto error_end +) + +SET "var="&for /f "delims=0123456789" %%i in ("%NUM_MAXIMUM%") do set var=%%i +if defined var ( + echo Option -e is followed by '%NUM_MAXIMUM%' which is not an integer. + goto error_end +) + +if %NUM_MAXIMUM% lss 1 ( + echo Option -e is followed by '%NUM_MAXIMUM%' which is not a positive integer. + goto error_end +) + +SET "var="&for /f "delims=0123456789" %%i in ("%NUM_COUNT%") do set var=%%i +if defined var ( + echo Option -c is followed by '%NUM_COUNT%' which is not an integer. + goto error_end +) + +if %NUM_COUNT% lss 1 ( + echo Option -c is followed by '%NUM_COUNT%' which is not a positive integer. + goto error_end +) + +@REM echo on +if not defined ALTERNATE_REPEAT_LIST goto no_alternate_repeat_list +set WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST=%ALTERNATE_REPEAT_LIST% +:grab_next_count_test +for /f "tokens=1,* delims=_" %%a in ("%WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST%") do ( + set NEXT_ITEM=%%a + set WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST=%%b + + SET "var="&for /f "delims=0123456789" %%i in ("!NEXT_ITEM!") do set var=%%i + if defined var ( + echo Option -l has an element '!NEXT_ITEM!' which is not an integer. + goto error_end + ) + + if !NEXT_ITEM! lss 1 ( + echo Option -c has an element '!NEXT_ITEM!' which is not a positive integer. + goto error_end + ) +) +if "%WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST%" neq "" goto grab_next_count_test +:no_alternate_repeat_list + +rem Announce what this script does. +echo {Batch profiling of project started.} + +if defined TEST_SERIES_TAG ( + set DEST_FILE=build\series-%TEST_SERIES_TAG%.csv +) else ( + set DEST_FILE=build\series.csv +) + +set PERF_SAMPLE_OPTIONS= +if defined NO_RULES_MODE ( + set PERF_SAMPLE_OPTIONS=-nr +) + +erase /f /q %DEST_FILE% > nul 2>&1 + +set PASS_NUMBER=1 +:skip_to_next_pass + +if not defined ALTERNATE_REPEAT_LIST goto exec_by_repeat_count + +set WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST=%ALTERNATE_REPEAT_LIST% +:grab_next_count +for /f "tokens=1,* delims=_" %%a in ("%WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST%") do ( + set WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST=%%b + + echo Sample Pass %PASS_NUMBER%, Repititions Count %%a + call perf_sample.cmd -r %%a %PERF_SAMPLE_OPTIONS% -c %DEST_FILE% > %PBOY_TEMPFILE% + if ERRORLEVEL 1 ( + type %PBOY_TEMPFILE% + echo. + echo {Executing profile run failed.} + goto error_end + ) + +) +if "%WHATS_LEFT_OF_ALTERNATE_REPEAT_LIST%" neq "" goto grab_next_count +goto pass_completed + +:exec_by_repeat_count +set REPEAT_COUNT=%NUM_MINIMUM% + +:skip_to_next_repeat_count +call perf_sample.cmd -r %REPEAT_COUNT% %XXXXXXX% -c fred.csv > %PBOY_TEMPFILE% +if ERRORLEVEL 1 ( + type %PBOY_TEMPFILE% + echo. + echo {Executing profile run failed.} + goto error_end +) + +set /a REPEAT_COUNT +=1 +if %REPEAT_COUNT% leq %NUM_MAXIMUM% ( + goto skip_to_next_repeat_count +) + +:pass_completed +set /a PASS_NUMBER +=1 +if %PASS_NUMBER% leq %NUM_COUNT% ( + goto skip_to_next_pass +) + +rem Cleanly exit the script +:good_end + +echo. +set PC_EXIT_CODE=0 +echo {Batch profiling of project succeeded.} +echo CSV file '%DEST_FILE%' written with sample timings. +goto real_end + +:error_end +set PC_EXIT_CODE=1 +echo {Batch profiling of project failed.} + +:real_end +erase /f /q %PBOY_TEMPFILE% > nul 2>&1 +set PBOY_TEMPFILE= +popd +exit /B %PC_EXIT_CODE% diff --git a/publish/coverage.json b/publish/coverage.json index 874bb22f4..e63b4daab 100644 --- a/publish/coverage.json +++ b/publish/coverage.json @@ -2,12 +2,12 @@ "projectName": "pymarkdown", "reportSource": "pytest", "branchLevel": { - "totalMeasured": 7981, - "totalCovered": 7981 + "totalMeasured": 7983, + "totalCovered": 7983 }, "lineLevel": { - "totalMeasured": 21587, - "totalCovered": 21587 + "totalMeasured": 21590, + "totalCovered": 21590 } } diff --git a/publish/test-results.json b/publish/test-results.json index 33e199de7..305289f08 100644 --- a/publish/test-results.json +++ b/publish/test-results.json @@ -1508,7 +1508,7 @@ }, { "name": "test.rules.test_plugin_manager", - "totalTests": 57, + "totalTests": 58, "failedTests": 0, "errorTests": 0, "skippedTests": 4, diff --git a/pymarkdown/plugin_manager/plugin_manager.py b/pymarkdown/plugin_manager/plugin_manager.py index 4b83ab4a2..51f1c9d16 100644 --- a/pymarkdown/plugin_manager/plugin_manager.py +++ b/pymarkdown/plugin_manager/plugin_manager.py @@ -47,6 +47,7 @@ class PluginManager: __name_regex = re.compile("^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$") __filter_regex = re.compile("^[a-zA-Z0-9-]+$") __version_regex = re.compile("^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)$") + __disable_rules_wildcard = "*" def __init__( self, @@ -561,11 +562,14 @@ def __handle_command_line_settings( LOGGER.debug( "Disabled on command line: %s", str(command_line_disabled_rules) ) - for next_identifier in plugin_object.plugin_identifiers: - if next_identifier in command_line_disabled_rules: - new_value = False - LOGGER.debug("Plugin is disabled from command line.") - break + if PluginManager.__disable_rules_wildcard in command_line_disabled_rules: + new_value = False + else: + for next_identifier in plugin_object.plugin_identifiers: + if next_identifier in command_line_disabled_rules: + new_value = False + LOGGER.debug("Plugin is disabled from command line.") + break if new_value is None and command_line_enabled_rules: LOGGER.debug("Enabled on command line: %s", str(command_line_enabled_rules)) for next_identifier in plugin_object.plugin_identifiers: diff --git a/run.cmd b/run.cmd index ad86fc69e..ebd1c1812 100644 --- a/run.cmd +++ b/run.cmd @@ -1,5 +1,12 @@ -pipenv run python main.py %1 %2 %3 %4 %5 %6 %7 %8 %9 +@ echo off + +@REM If we are doing a performance run, make sure to use optimized python. +set PYTHON_PERFORMANCE_ARGUMENTS= +if "%PYMARKDOWNLINT__PERFRUN%" == "1" ( + set PYTHON_PERFORMANCE_ARGUMENTS=-OO +) + +pipenv run python %PYTHON_PERFORMANCE_ARGUMENTS% main.py %1 %2 %3 %4 %5 %6 %7 %8 %9 + set RETURN_CODE=%ERRORLEVEL% -echo !RETURN_CODE! -echo %RETURN_CODE% exit /b %RETURN_CODE% \ No newline at end of file diff --git a/test/resources/performance/README.md b/test/resources/performance/README.md new file mode 100644 index 000000000..d5b72b6ac --- /dev/null +++ b/test/resources/performance/README.md @@ -0,0 +1 @@ +https://daringfireball.net/projects/markdown/syntax.text --> sample.md \ No newline at end of file diff --git a/test/resources/sample.md b/test/resources/performance/sample.md similarity index 100% rename from test/resources/sample.md rename to test/resources/performance/sample.md diff --git a/test/rules/test_plugin_manager.py b/test/rules/test_plugin_manager.py index b206c40cd..121594d30 100644 --- a/test/rules/test_plugin_manager.py +++ b/test/rules/test_plugin_manager.py @@ -1449,6 +1449,89 @@ def test_markdown_with_plugins_list_only_all(): ) +def test_markdown_with_plugins_list_after_disable_all_rules(): + """ + Test to make sure that `plugins list` lists all plugins after disabling all rules. + """ + + # Arrange + scanner = MarkdownScanner() + supplied_arguments = ["--disable-rules", "*", "plugins", "list"] + + expected_return_code = 0 + expected_output = """ + ID NAMES ENABLED ENABLED VERSION FIX + (DEFAULT) (CURRENT) + + md001 heading-increment, header-incr True False 0.6.0 Yes + ement + md002 first-heading-h1, first-header False False 0.6.0 No + -h1 + md003 heading-style, header-style True False 0.6.0 No + md004 ul-style True False 0.6.0 Yes + md005 list-indent True False 0.5.1 Yes + md006 ul-start-left False False 0.5.1 Yes + md007 ul-indent True False 0.6.0 Yes + md009 no-trailing-spaces True False 0.6.0 Yes + md010 no-hard-tabs True False 0.6.0 Yes + md011 no-reversed-links True False 0.5.0 No + md012 no-multiple-blanks True False 0.7.0 Yes + md013 line-length True False 0.6.0 No + md014 commands-show-output True False 0.5.0 No + md018 no-missing-space-atx True False 0.5.0 No + md019 no-multiple-space-atx True False 0.5.1 Yes + md020 no-missing-space-closed-atx True False 0.5.0 No + md021 no-multiple-space-closed-atx True False 0.5.1 Yes + md022 blanks-around-headings, blanks True False 0.6.0 No + -around-headers + md023 heading-start-left, header-sta True False 0.5.2 Yes + rt-left + md024 no-duplicate-heading, no-dupli True False 0.6.0 No + cate-header + md025 single-title, single-h1 True False 0.6.0 No + md026 no-trailing-punctuation True False 0.6.0 No + md027 no-multiple-space-blockquote True False 0.5.1 Yes + md028 no-blanks-blockquote True False 0.5.0 No + md029 ol-prefix True False 0.6.0 Yes + md030 list-marker-space True False 0.6.0 Yes + md031 blanks-around-fences True False 0.7.0 Yes + md032 blanks-around-lists True False 0.5.0 No + md033 no-inline-html True False 0.6.0 No + md034 no-bare-urls True False 0.5.0 No + md035 hr-style True False 0.6.0 Yes + md036 no-emphasis-as-heading, no-emp True False 0.6.0 No + hasis-as-header + md037 no-space-in-emphasis True False 0.5.1 Yes + md038 no-space-in-code True False 0.5.1 Yes + md039 no-space-in-links True False 0.5.2 Yes + md040 fenced-code-language True False 0.5.0 No + md041 first-line-heading, first-line True False 0.6.0 No + -h1 + md042 no-empty-links True False 0.5.0 No + md043 required-headings, required-he True False 0.6.0 No + aders + md044 proper-names True False 0.7.0 Yes + md045 no-alt-text True False 0.5.0 No + md046 code-block-style True False 0.7.0 Yes + md047 single-trailing-newline True False 0.5.1 Yes + md048 code-fence-style True False 0.6.0 Yes + pml100 disallowed-html False False 0.6.0 No + pml101 list-anchored-indent False False 0.6.0 No + +""" + expected_error = "" + + # Act + execute_results = scanner.invoke_main( + arguments=supplied_arguments, suppress_first_line_heading_rule=False + ) + + # Assert + execute_results.assert_results( + expected_output, expected_error, expected_return_code + ) + + def test_markdown_with_plugins_list_and_filter_by_id_ends_with_nine(): """ Test to make sure that `plugins list` lists all plugins with the specified id filter.