-
Notifications
You must be signed in to change notification settings - Fork 918
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Tracing] Add trace tests to tracing_test (#2252)
# Description Add tracing tests to tracing_test. # All Promptflow Contribution checklist: - [x] **The pull request does not introduce [breaking changes].** - [ ] **CHANGELOG is updated for new features, bug fixes or other significant changes.** - [x] **I have read the [contribution guidelines](../CONTRIBUTING.md).** - [ ] **Create an issue and link to the pull request to get dedicated review from promptflow team. Learn more: [suggested workflow](../CONTRIBUTING.md#suggested-workflow).** ## General Guidelines and Best Practices - [x] Title of the pull request is clear and informative. - [x] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. For more information on cleaning up the commits in your PR, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). ### Testing Guidelines - [x] Pull request includes test coverage for the included changes. --------- Co-authored-by: Lina Tang <[email protected]>
- Loading branch information
Showing
6 changed files
with
555 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,152 @@ | ||
name: promptflow-tracing-e2e-test | ||
|
||
on: | ||
schedule: | ||
- cron: "40 17 * * *" # Every day starting at 1:40 BJT | ||
|
||
pull_request: | ||
paths: | ||
- src/promptflow/** | ||
- scripts/building/** | ||
- .github/workflows/promptflow-tracing-e2e-test.yml | ||
|
||
workflow_dispatch: | ||
|
||
|
||
env: | ||
packageSetupType: promptflow_with_extra | ||
testWorkingDirectory: ${{ github.workspace }}/src/promptflow | ||
PYTHONPATH: ${{ github.workspace }}/src/promptflow | ||
IS_IN_CI_PIPELINE: "true" | ||
|
||
|
||
jobs: | ||
build: | ||
strategy: | ||
fail-fast: false | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: checkout | ||
uses: actions/checkout@v4 | ||
- name: Display and Set Environment Variables | ||
run: | | ||
env | sort >> $GITHUB_OUTPUT | ||
id: display_env | ||
shell: bash -el {0} | ||
- name: Python Setup - ubuntu-latest - Python Version 3.9 | ||
uses: "./.github/actions/step_create_python_environment" | ||
with: | ||
pythonVersion: 3.9 | ||
- name: Build wheel | ||
uses: "./.github/actions/step_sdk_setup" | ||
with: | ||
setupType: promptflow_with_extra | ||
scriptPath: ${{ env.testWorkingDirectory }} | ||
- name: Upload Wheel | ||
if: always() | ||
uses: actions/upload-artifact@v3 | ||
with: | ||
name: wheel | ||
path: | | ||
${{ github.workspace }}/src/promptflow/dist/*.whl | ||
${{ github.workspace }}/src/promptflow-tools/dist/*.whl | ||
tracing_tests: | ||
needs: build | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
os: [ubuntu-latest] | ||
pythonVersion: ['3.8', '3.9', '3.10', '3.11'] | ||
runs-on: ${{ matrix.os }} | ||
steps: | ||
- name: checkout | ||
uses: actions/checkout@v4 | ||
|
||
- name: Display and Set Environment Variables | ||
run: | | ||
env | sort >> $GITHUB_OUTPUT | ||
id: display_env | ||
shell: bash -el {0} | ||
|
||
- name: Python Setup - ${{ matrix.os }} - Python Version ${{ matrix.pythonVersion }} | ||
uses: "./.github/actions/step_create_python_environment" | ||
with: | ||
pythonVersion: ${{ matrix.pythonVersion }} | ||
|
||
- name: Download Artifacts | ||
uses: actions/download-artifact@v3 | ||
with: | ||
name: wheel | ||
path: artifacts | ||
|
||
- name: Install wheel | ||
shell: pwsh | ||
working-directory: artifacts | ||
run: | | ||
Set-PSDebug -Trace 1 | ||
pip install -r ${{ github.workspace }}/src/promptflow/dev_requirements.txt | ||
gci ./promptflow -Recurse | % {if ($_.Name.Contains('.whl')) {python -m pip install "$($_.FullName)"}} | ||
gci ./promptflow-tools -Recurse | % {if ($_.Name.Contains('.whl')) {python -m pip install $_.FullName}} | ||
pip freeze | ||
- name: Azure Login | ||
uses: azure/login@v1 | ||
with: | ||
creds: ${{ secrets.AZURE_CREDENTIALS }} | ||
|
||
- name: Generate Configs | ||
uses: "./.github/actions/step_generate_configs" | ||
with: | ||
targetFolder: ${{ env.testWorkingDirectory }} | ||
|
||
- name: Get number of CPU cores | ||
uses: SimenB/github-actions-cpu-cores@v1 | ||
id: cpu-cores | ||
|
||
- name: run promptflow-tracing test | ||
shell: pwsh | ||
working-directory: ${{ env.testWorkingDirectory }} | ||
run: | | ||
python "../../scripts/building/run_coverage_tests.py" ` | ||
-p promptflow ` | ||
-t ${{ github.workspace }}/src/promptflow/tests/tracing_test/e2etests ` | ||
-l eastus ` | ||
-m "e2etest" ` | ||
-n ${{ steps.cpu-cores.outputs.count }} ` | ||
--coverage-config ${{ github.workspace }}/src/promptflow/tests/tracing_test/.coveragerc ` | ||
-o "${{ env.testWorkingDirectory }}/test-results-tracing.xml" | ||
- name: Upload Test Results | ||
if: always() | ||
uses: actions/upload-artifact@v3 | ||
with: | ||
name: Test Results (Python ${{ matrix.pythonVersion }}) (OS ${{ matrix.os }}) | ||
path: | | ||
${{ env.testWorkingDirectory }}/*.xml | ||
${{ env.testWorkingDirectory }}/htmlcov/ | ||
publish-test-results-tracing-test: | ||
needs: tracing_tests | ||
if: always() | ||
|
||
runs-on: ubuntu-latest | ||
permissions: | ||
checks: write | ||
pull-requests: write | ||
contents: read | ||
issues: read | ||
|
||
steps: | ||
- name: checkout | ||
uses: actions/checkout@v4 | ||
- name: Publish Test Results | ||
uses: "./.github/actions/step_publish_test_results" | ||
with: | ||
testActionFileName: promptflow-tracing-e2e-test.yml | ||
testResultTitle: promptflow-tracing e2e test result | ||
osVersion: ubuntu-latest | ||
pythonVersion: 3.9 | ||
coverageThreshold: 40 | ||
context: test/tracing |
Empty file.
Empty file.
85 changes: 85 additions & 0 deletions
85
src/promptflow/tests/tracing_test/e2etests/simple_functions.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
import asyncio | ||
from time import sleep | ||
from typing import Union | ||
|
||
from openai import AsyncAzureOpenAI, AzureOpenAI | ||
|
||
from promptflow.contracts.types import PromptTemplate | ||
from promptflow.tracing import trace | ||
|
||
|
||
@trace | ||
def is_valid_name(name): | ||
sleep(0.5) | ||
return len(name) > 0 | ||
|
||
|
||
@trace | ||
def get_user_name(user_id): | ||
sleep(0.5) | ||
user_name = f"User {user_id}" | ||
if not is_valid_name(user_name): | ||
raise ValueError(f"Invalid user name: {user_name}") | ||
|
||
return user_name | ||
|
||
|
||
@trace | ||
def format_greeting(user_name): | ||
sleep(0.5) | ||
return f"Hello, {user_name}!" | ||
|
||
|
||
@trace | ||
def greetings(user_id): | ||
user_name = get_user_name(user_id) | ||
greeting = format_greeting(user_name) | ||
return greeting | ||
|
||
|
||
@trace | ||
async def dummy_llm(prompt: str, model: str): | ||
asyncio.sleep(0.5) | ||
return "dummy_output" | ||
|
||
|
||
@trace | ||
async def dummy_llm_tasks_async(prompt: str, models: list): | ||
tasks = [] | ||
for model in models: | ||
tasks.append(asyncio.create_task(dummy_llm(prompt, model))) | ||
done, _ = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) | ||
return [task.result() for task in done] | ||
|
||
|
||
@trace | ||
def render_prompt_template(prompt: PromptTemplate, **kwargs): | ||
for k, v in kwargs.items(): | ||
prompt = prompt.replace(f"{{{{{k}}}}}", str(v)) | ||
return prompt | ||
|
||
|
||
@trace | ||
def openai_chat(connection: dict, prompt: str, stream: bool = False): | ||
client = AzureOpenAI(**connection) | ||
|
||
messages = [ | ||
{"role": "system", "content": "You are a helpful assistant."}, | ||
{"role": "user", "content": prompt} | ||
] | ||
response = client.chat.completions.create(model="gpt-35-turbo", messages=messages, stream=stream) | ||
return response.choices[0].message.content or "" | ||
|
||
|
||
@trace | ||
def openai_completion(connection: dict, prompt: str): | ||
client = AzureOpenAI(**connection) | ||
response = client.completions.create(model="text-ada-001", prompt=prompt) | ||
return response.choices[0].text or "" | ||
|
||
|
||
@trace | ||
async def openai_embedding_async(connection: dict, input: Union[str, list]): | ||
client = AsyncAzureOpenAI(**connection) | ||
resp = await client.embeddings.create(model="text-embedding-ada-002", input=input) | ||
return resp.data[0].embedding |
Oops, something went wrong.