Build(deps-dev): Bump friendsofphp/php-cs-fixer from 3.34.0 to 3.34.1 #2279
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
# ** Dynamic analysis ** | |
# | |
# Dynamic analysis requires the Share community platform to run during evaluation. Currently Catroweb's dynamic tests | |
# are mainly UI-tests and a few unit tests. Hence, the tests take multiple hours. To reduce the run time, the tests | |
# are executed in multiple independent jobs. However, all jobs use the same Catroweb Docker image to reduce build time. | |
# | |
name: 'Dynamic analysis' | |
# Run-on every creation, update and merge of a pull request. | |
# However, prevent checks to be initiated twice if the pull request is a branch on the same repository. (E.g dependabot) | |
on: | |
pull_request: | |
branches: | |
- '**' # all branches | |
- '!master' # excludes master to prevent double test runs during release prep. | |
jobs: | |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
# Build: | |
# | |
# - In order to save computation time the "app.catroweb" image is only build once during this build phase. | |
# Other jobs can download this image to reduce their build time. With several jobs + the matrix build total | |
# computation time for this workflow can be highly reduced. This is important since we do not have unlimited | |
# resources/machines to run the jobs. | |
# | |
build: | |
name: Build catroweb image | |
runs-on: ubuntu-latest | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Build catroweb app image | |
run: | | |
cd docker | |
docker-compose -f docker-compose.test.yaml build app.catroweb | |
docker save app.catroweb > catroweb-image.tar | |
- name: Upload app.catroweb image | |
uses: actions/upload-artifact@v3 | |
with: | |
name: catroweb-image | |
path: docker/catroweb-image.tar | |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
# PhpUnit: | |
# | |
# - the tests are executed in our docker since we have many integration tests which access the database, etc. | |
# One might consider to strictly separate integration and unit tests. Units tests could be executed using | |
# composer scripts only to reduce the runtime to a few seconds. No build needed + dependencies can be easy cached. | |
# | |
# - A code coverage report is pushed to the artifacts where it can be downloaded directly on GitHub. | |
# Keep in mind the report is not including the tests written for behat. | |
# | |
tests_phpunit: | |
name: PhpUnit | |
needs: build | |
runs-on: ubuntu-latest | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Pull the latest images to build (Faster than caching) | |
run: | | |
cd docker | |
docker-compose -f docker-compose.test.yaml pull | |
- name: Download app.catroweb image | |
uses: actions/download-artifact@v3 | |
with: | |
name: catroweb-image | |
path: docker | |
# artificial wait required for elastic services to be fully set-up | |
- name: Build | |
run: | | |
cd docker | |
docker load < catroweb-image.tar | |
docker-compose -f docker-compose.test.yaml up -d | |
sleep 60 | |
- name: PhpUnit tests | |
run: docker exec app.catroweb bin/phpunit --coverage-html tests/TestReports/CoverageReports/PhpUnit --coverage-clover=tests/TestReports/CoverageReports/PhpUnit/coverage.xml | |
- name: Upload code coverage report | |
uses: actions/upload-artifact@v3 | |
if: always() | |
with: | |
name: PhpUnitTestReport | |
path: tests/TestReports/CoverageReports/PhpUnit | |
- uses: codecov/codecov-action@v3 | |
with: | |
# token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos | |
file: tests/TestReports/CoverageReports/PhpUnit/coverage.xml | |
flags: phpunit # optional | |
name: codecov-umbrella # optional | |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
# Behat: | |
# | |
# - This job runs all Behat suites parallel using a matrix strategy. This is done since integrations tests which | |
# are interacting with a browser and the GUI are extremely slow. With a total run-time of over an hour, using this | |
# approach the run-time can be drastically reduced. The disadvantage, we can't easily create a coverage report | |
# for the behat scenarios. Something that is considered to be a bad practice anyway since Behat is intended to | |
# deliver scenario automation in BDD. | |
# | |
# - Behat and especially UI tests using Mink tend to flaky. | |
# A test will only be marked as failed if a test fails more than 3 times in a row. | |
# Flaky tests should be reduced to a minimum in the codebase! | |
# | |
# - Behat only reruns failed tests - Not pending/missing tests or those with exceptions! | |
# A pending/missing test will NOT result in a failed pipeline! | |
# This is the reason why the explicit check for the log file had to be added. | |
# | |
# - To ease the debugging, besides a log file, screenshots of failing tests are uploaded as artifacts. | |
# | |
# Notes: | |
# - Check the behat.yaml when changing / creating new suites | |
# - suites will finish their work even if another suite fails (fail-fast: false) | |
# | |
tests_behat: | |
name: Behat | |
needs: build | |
runs-on: ubuntu-latest | |
strategy: | |
fail-fast: false | |
matrix: | |
testSuite: | |
- api-authentication | |
- api-mediapackage | |
- api-projects | |
- api-user | |
- api-notifications | |
- api-utility | |
- api-translation | |
- api-tag | |
- api-deprecated-general | |
- api-deprecated-project | |
- api-deprecated-profile | |
- api-deprecated-upload | |
- web-achievements | |
- web-admin | |
- web-apk-generation | |
- web-authentication | |
- web-code-statistics | |
- web-code-view | |
- web-comments | |
- web-general | |
- web-media-library | |
- web-notifications | |
- web-profile | |
- web-project | |
- web-project-details | |
- web-project-list | |
- web-reactions | |
- web-recommendations | |
- web-remix-system | |
- web-scratch-integration | |
- web-search | |
- web-studio | |
- web-top-bar | |
- web-translation | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Pull the latest images to build (Faster than caching) | |
run: | | |
cd docker | |
docker-compose -f docker-compose.test.yaml pull | |
- name: Download app.catroweb image | |
uses: actions/download-artifact@v3 | |
with: | |
name: catroweb-image | |
path: docker | |
- name: Build | |
run: | | |
cd docker | |
docker load < catroweb-image.tar | |
docker-compose -f docker-compose.test.yaml up -d | |
sleep 30 | |
# Test Run | |
- name: Behat ${{ matrix.testSuite }} tests | |
id: test-run | |
continue-on-error: true | |
# - The output will of the tests will be piped to the stdout and into the log file. | |
# - A return code != 0 stops the execution of further commands in the pipeline. | |
# "tee" always returns 0, even if the behat test fails. Therefore we need to exit with the first entry of | |
# the pipe status, which contains the correct exit code. | |
# - If the tests pass we can set the output status to success, this allows to only execute the reruns if needed | |
run: | | |
echo ::set-output name=status::failure | |
docker exec app.catroweb chmod -R 777 var | |
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} -f pretty \ | |
|& tee tests/TestReports/Behat/${{ matrix.testSuite }}.log ; \ | |
( exit ${PIPESTATUS[0]} ) | |
echo ::set-output name=status::success | |
# Missing steps are not rerun by behat, without this step they will be lost in the process | |
# We must explicitly kill the pipeline if the log contains undefined steps | |
- name: Check that suite has NO missing steps | |
if: always() | |
id: missing-check | |
run: | | |
if grep -q 'has missing steps. Define them with these snippets:' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then | |
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log; | |
false; | |
fi | |
# Pending steps are not rerun by behat, without this step they will be lost in the process | |
# We must explicitly kill the pipeline if the log contains pending steps | |
- name: Check that suite has NO pending steps | |
if: always() | |
id: pending-check | |
run: | | |
if grep -q 'pending)' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then | |
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log; | |
false; | |
fi | |
# Chrome exception are problems that can't be fixed with a rerun. However, we can try to run the whole suite one more time | |
- name: Check that suite has NO chrome exceptions | |
if: always() | |
id: chrome-exception-check | |
run: | | |
if grep -q '\[DMore\\ChromeDriver\\StreamReadException\]' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then | |
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log; | |
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} -f pretty; | |
fi | |
- uses: codecov/codecov-action@v3 | |
with: | |
# token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos | |
file: tests/TestReports/CoverageReports/Behat/coverage.xml | |
flags: behat # optional | |
name: codecov-umbrella # optional | |
# fail_ci_if_error: true # optional (default = false) | |
# Rerun #1 | |
- name: 1. Rerun for Behat ${{ matrix.testSuite }} tests | |
if: steps.test-run.outputs.status != 'success' | |
id: test-rerun-1 | |
continue-on-error: true | |
run: | | |
echo ::set-output name=status::failure | |
docker exec app.catroweb chmod -R 777 var | |
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} --rerun | |
echo ::set-output name=status::success | |
# Rerun #2 | |
- name: 2. Rerun for Behat ${{ matrix.testSuite }} tests | |
if: steps.test-run.outputs.status != 'success' && steps.test-rerun-1.outputs.status != 'success' | |
id: test-rerun-2 | |
continue-on-error: true | |
run: | | |
echo ::set-output name=status::failure | |
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} --rerun | |
echo ::set-output name=status::success | |
# Rerun #3 | |
- name: 3. Rerun for Behat ${{ matrix.testSuite }} tests | |
if: steps.test-run.outputs.status != 'success' && steps.test-rerun-1.outputs.status != 'success' && steps.test-rerun-2.outputs.status != 'success' | |
id: test-rerun-3 | |
run: | | |
docker exec app.catroweb bin/behat -f pretty -s ${{ matrix.testSuite }} --rerun | |
## Failure debugging | |
- if: failure() | |
run: | | |
docker ps -a | |
echo "--- App ---" | |
docker logs app.catroweb | |
echo "--- DB ---" | |
docker logs db.catroweb.test | |
echo "--- Chrome ---" | |
docker logs chrome.catroweb | |
- uses: actions/upload-artifact@v3 | |
if: failure() | |
with: | |
name: screenshots_${{ matrix.testSuite }} | |
path: tests/TestReports/TestScreenshots | |
- uses: actions/upload-artifact@v3 | |
if: failure() | |
with: | |
name: logs_${{ matrix.testSuite }} | |
path: var/log/test |