Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Tracing] Add trace tests to tracing_test #2252

Merged
merged 4 commits into from
Mar 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
152 changes: 152 additions & 0 deletions .github/workflows/promptflow-tracing-e2e-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
name: promptflow-tracing-e2e-test

on:
schedule:
- cron: "40 17 * * *" # Every day starting at 1:40 BJT

pull_request:
paths:
- src/promptflow/**
- scripts/building/**
- .github/workflows/promptflow-tracing-e2e-test.yml

workflow_dispatch:


env:
packageSetupType: promptflow_with_extra
testWorkingDirectory: ${{ github.workspace }}/src/promptflow
PYTHONPATH: ${{ github.workspace }}/src/promptflow
IS_IN_CI_PIPELINE: "true"


jobs:
build:
strategy:
fail-fast: false
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v4
- name: Display and Set Environment Variables
run: |
env | sort >> $GITHUB_OUTPUT
id: display_env
shell: bash -el {0}
- name: Python Setup - ubuntu-latest - Python Version 3.9
uses: "./.github/actions/step_create_python_environment"
with:
pythonVersion: 3.9
- name: Build wheel
uses: "./.github/actions/step_sdk_setup"
with:
setupType: promptflow_with_extra
scriptPath: ${{ env.testWorkingDirectory }}
- name: Upload Wheel
if: always()
uses: actions/upload-artifact@v3
with:
name: wheel
path: |
${{ github.workspace }}/src/promptflow/dist/*.whl
${{ github.workspace }}/src/promptflow-tools/dist/*.whl

tracing_tests:
needs: build
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
pythonVersion: ['3.8', '3.9', '3.10', '3.11']
runs-on: ${{ matrix.os }}
steps:
- name: checkout
uses: actions/checkout@v4

- name: Display and Set Environment Variables
run: |
env | sort >> $GITHUB_OUTPUT
id: display_env
shell: bash -el {0}

- name: Python Setup - ${{ matrix.os }} - Python Version ${{ matrix.pythonVersion }}
uses: "./.github/actions/step_create_python_environment"
with:
pythonVersion: ${{ matrix.pythonVersion }}

- name: Download Artifacts
uses: actions/download-artifact@v3
with:
name: wheel
path: artifacts

- name: Install wheel
shell: pwsh
working-directory: artifacts
run: |
Set-PSDebug -Trace 1
pip install -r ${{ github.workspace }}/src/promptflow/dev_requirements.txt
lumoslnt marked this conversation as resolved.
Show resolved Hide resolved
gci ./promptflow -Recurse | % {if ($_.Name.Contains('.whl')) {python -m pip install "$($_.FullName)"}}
gci ./promptflow-tools -Recurse | % {if ($_.Name.Contains('.whl')) {python -m pip install $_.FullName}}
pip freeze

- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}

- name: Generate Configs
uses: "./.github/actions/step_generate_configs"
with:
targetFolder: ${{ env.testWorkingDirectory }}

- name: Get number of CPU cores
uses: SimenB/github-actions-cpu-cores@v1
id: cpu-cores

- name: run promptflow-tracing test
shell: pwsh
working-directory: ${{ env.testWorkingDirectory }}
run: |
python "../../scripts/building/run_coverage_tests.py" `
-p promptflow `
-t ${{ github.workspace }}/src/promptflow/tests/tracing_test/e2etests `
-l eastus `
-m "e2etest" `
-n ${{ steps.cpu-cores.outputs.count }} `
--coverage-config ${{ github.workspace }}/src/promptflow/tests/tracing_test/.coveragerc `
-o "${{ env.testWorkingDirectory }}/test-results-tracing.xml"

- name: Upload Test Results
if: always()
uses: actions/upload-artifact@v3
with:
name: Test Results (Python ${{ matrix.pythonVersion }}) (OS ${{ matrix.os }})
path: |
${{ env.testWorkingDirectory }}/*.xml
${{ env.testWorkingDirectory }}/htmlcov/


publish-test-results-tracing-test:
needs: tracing_tests
if: always()

runs-on: ubuntu-latest
permissions:
checks: write
pull-requests: write
contents: read
issues: read

steps:
- name: checkout
uses: actions/checkout@v4
- name: Publish Test Results
uses: "./.github/actions/step_publish_test_results"
with:
testActionFileName: promptflow-tracing-e2e-test.yml
testResultTitle: promptflow-tracing e2e test result
osVersion: ubuntu-latest
pythonVersion: 3.9
coverageThreshold: 40
context: test/tracing
Empty file.
Empty file.
85 changes: 85 additions & 0 deletions src/promptflow/tests/tracing_test/e2etests/simple_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import asyncio
from time import sleep
from typing import Union

from openai import AsyncAzureOpenAI, AzureOpenAI

from promptflow.contracts.types import PromptTemplate
from promptflow.tracing import trace


@trace
def is_valid_name(name):
sleep(0.5)
return len(name) > 0


@trace
def get_user_name(user_id):
sleep(0.5)
user_name = f"User {user_id}"
if not is_valid_name(user_name):
raise ValueError(f"Invalid user name: {user_name}")

return user_name


@trace
def format_greeting(user_name):
sleep(0.5)
return f"Hello, {user_name}!"


@trace
def greetings(user_id):
user_name = get_user_name(user_id)
greeting = format_greeting(user_name)
return greeting


@trace
async def dummy_llm(prompt: str, model: str):
asyncio.sleep(0.5)
return "dummy_output"


@trace
async def dummy_llm_tasks_async(prompt: str, models: list):
tasks = []
for model in models:
tasks.append(asyncio.create_task(dummy_llm(prompt, model)))
done, _ = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
return [task.result() for task in done]


@trace
def render_prompt_template(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt


@trace
def openai_chat(connection: dict, prompt: str, stream: bool = False):
client = AzureOpenAI(**connection)

messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
response = client.chat.completions.create(model="gpt-35-turbo", messages=messages, stream=stream)
return response.choices[0].message.content or ""


@trace
def openai_completion(connection: dict, prompt: str):
client = AzureOpenAI(**connection)
response = client.completions.create(model="text-ada-001", prompt=prompt)
return response.choices[0].text or ""


@trace
async def openai_embedding_async(connection: dict, input: Union[str, list]):
client = AsyncAzureOpenAI(**connection)
resp = await client.embeddings.create(model="text-embedding-ada-002", input=input)
return resp.data[0].embedding
Loading
Loading