-
Notifications
You must be signed in to change notification settings - Fork 898
100 lines (96 loc) · 3.89 KB
/
promptflow-evals-e2e-test-azure.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
name: promptflow-evals-e2e-test-azure
on:
schedule:
- cron: "40 10 * * *" # 2:40 PST every day
pull_request:
paths:
- src/promptflow-evals/**
- .github/workflows/promptflow-evals-e2e-test-azure.yml
workflow_dispatch:
env:
IS_IN_CI_PIPELINE: "true"
WORKING_DIRECTORY: ${{ github.workspace }}/src/promptflow-evals
jobs:
test:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-13]
python-version: ['3.8', '3.9', '3.10', '3.11']
fail-fast: false
# snok/install-poetry need this to support Windows
defaults:
run:
shell: bash
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- name: set test mode
# Always run in replay mode for now until we figure out the test resource to run live mode
run: echo "PROMPT_FLOW_TEST_MODE=replay" >> $GITHUB_ENV
#run: echo "PROMPT_FLOW_TEST_MODE=$(if [[ "${{ github.event_name }}" == "pull_request" ]]; then echo replay; else echo live; fi)" >> $GITHUB_ENV
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- uses: snok/install-poetry@v1
- name: install test dependency group
run: poetry install --only test
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: install promptflow packages in editable mode
run: |
poetry run pip install -e ../promptflow
poetry run pip install -e ../promptflow-core
poetry run pip install -e ../promptflow-devkit
poetry run pip install -e ../promptflow-tracing
poetry run pip install -e ../promptflow-tools
poetry run pip install -e ../promptflow-azure
poetry run pip install -e ../promptflow-evals
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: install recording
run: poetry run pip install -e ../promptflow-recording
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: generate end-to-end test config from secret
run: echo '${{ secrets.PF_EVALS_E2E_TEST_CONFIG }}' >> connections.json
working-directory: ${{ env.WORKING_DIRECTORY }}
- uses: azure/login@v2
with:
creds: ${{ secrets.PF_EVALS_SP_CREDENTIALS }}
enable-AzPSSession: true
- name: run e2e tests
id: run_e2e_tests_azure
run: |
poetry run pytest -m azuretest --cov=promptflow --cov-config=pyproject.toml --cov-report=term --cov-report=html --cov-report=xml
poetry run python ../../scripts/code_qa/report_to_app_insights.py --activity e2e_tests_azure --junit-xml test-results.xml --git-hub-action-run-id ${{ github.run_id }} --git-hub-workflow ${{ github.workflow }} --git-hub-action ${{ github.action }} --git-branch ${{ github.ref }}
working-directory: ${{ env.WORKING_DIRECTORY }}
- name: upload coverage report
uses: actions/upload-artifact@v4
with:
name: report-${{ matrix.os }}-py${{ matrix.python-version }}
path: |
${{ env.WORKING_DIRECTORY }}/*.xml
${{ env.WORKING_DIRECTORY }}/htmlcov/
report:
needs: test
runs-on: ubuntu-latest
permissions:
checks: write
pull-requests: write
contents: read
issues: read
steps:
- uses: actions/download-artifact@v4
with:
path: artifacts
- uses: EnricoMi/publish-unit-test-result-action@v2
with:
check_name: promptflow-evals test result
comment_title: promptflow-evals test result
files: "artifacts/**/test-results.xml" # align with `--junit-xml` in pyproject.toml
- uses: irongut/[email protected]
with:
filename: "artifacts/report-ubuntu-latest-py3.11/coverage.xml"
badge: true
fail_below_min: false
format: markdown
hide_complexity: true
output: both
thresholds: 40 80