Skip to content

Commit

Permalink
Merge pull request #685 from janhq/feat/cortex_openai_coverage
Browse files Browse the repository at this point in the history
feat: CI for OpenAI coverage testing
  • Loading branch information
hiro-v authored Jun 12, 2024
2 parents 9b2c8bb + 352afa4 commit c717df2
Show file tree
Hide file tree
Showing 5 changed files with 255 additions and 0 deletions.
125 changes: 125 additions & 0 deletions .github/workflows/cortex-js-openai-coverage.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
name: Test - OpenAI API Coverage - Nightly / Manual
on:
schedule:
- cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
inputs:
endpoints:
description: 'comma-separated list (see available at coverage/endpoint_mapping.json e.g. GET /users,POST /transform)'
required: false
default: all
type: string

branch:
description: 'Target github branch - default is dev'
required: false
default: dev
type: string

env:
OPENAI_API_PYTHON_TAG: v1.23.2
TARGET_BRANCH: ${{ github.event.inputs.branch }}

jobs:
openai-python-tests:
runs-on: [self-hosted, Linux, ubuntu-desktop]
steps:
- name: Getting the repo
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ env.TARGET_BRANCH }}

- name: Installing node
uses: actions/setup-node@v4
with:
node-version: 20

- name: "Cleanup cache"
continue-on-error: true
run: |
npm cache clean --force
- name: Install dependencies
run: |
npm install -g @stoplight/prism-cli
- run: yarn install && yarn build
working-directory: ./cortex-js

- name: Run and extract openapi.json
run:
|
node cortex-js/dist/src/command.js serve --host 127.0.0.1 --port 4010 > cortex.log & cortex_pid=$!
sleep 3
wget --no-verbose -O api.json http://127.0.0.1:4010/api-json
kill $cortex_pid

- name: Create python virtual environment and run test
run: |
python3 -m venv /tmp/jan
source /tmp/jan/bin/activate
# Clone openai-api-python repo
git clone https://github.com/openai/openai-python.git -b $OPENAI_API_PYTHON_TAG
cd openai-python
pip install -r requirements-dev.lock
pip install pytest-reportportal pytest-html
# Create pytest.ini file with content
mv ../coverage/pytest.ini pytest.ini
echo "rp_api_key=${{ secrets.RP_API_KEY }}" >> pytest.ini
echo "rp_endpoint=${{ secrets.RP_ENDPOINT }}" >> pytest.ini
# Append to conftest.py
mv ../coverage/conftest.py tests/conftest.py
mv ../coverage/endpoint_mapping.json tests/endpoints_mapping.json
prism mock ../api.json > prism.log & prism_pid=$!
pytest --endpoint "$ENDPOINTS" --reportportal --html=report.html && kill $prism_pid
deactivate
env:
ENDPOINTS: ${{ github.event.inputs.endpoints }}


- name: Collect RP artifacts
run: |
wget --no-verbose -O total-coverage.json "${{ secrets.RP_ENDPOINT }}/api/v1/openai-api-test/widget/27" --header 'authorization: bearer ${{ secrets.RP_API_KEY }}'
wget --no-verbose -O today-endpoint.json "${{ secrets.RP_ENDPOINT }}/api/v1/openai-api-test/widget/multilevel/32" --header 'authorization: bearer ${{ secrets.RP_API_KEY }}'
cat total-coverage.json
current_date=$(date +"%m-%d-%Y")
cp today-endpoint.json $current_date.json
cat $current_date.json
- name: Upload report json files to S3
run: |
current_date=$(date +"%m-%d-%Y")
aws s3api put-object --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --key "openai-api-collection-test/$current_date.json" --body "./$current_date.json" --content-type "application/json"
aws s3api put-object --endpoint-url https://${{ secrets.CLOUDFLARE_ACCOUNT_ID }}.r2.cloudflarestorage.com --bucket ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} --key "openai-api-collection-test/total-coverage.json" --body "./total-coverage.json" --content-type "application/json"
env:
AWS_ACCESS_KEY_ID: ${{ secrets.CLOUDFLARE_R2_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CLOUDFLARE_R2_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"

- name: Upload Artifact
uses: actions/upload-artifact@v2
with:
name: report
path: |
openai-python/report.html
openai-python/prism.log
openai-python/cortex.log
openai-python/total-coverage.json
openai-python/today-endpoint.json
api.json
- name: Clean up
if: always()
run: |
rm -rf /tmp/jan
rm -rf openai-python
rm -rf report.html
rm -rf report.zip
7 changes: 7 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,10 @@ dist
*.lock
node_modules
.turbo

# CI - Test - Coverage
cortex.log
api.log
prism.log
api.json
openai-python/*
40 changes: 40 additions & 0 deletions coverage/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import json


def pytest_addoption(parser):
parser.addoption(
"--endpoint", action="store", default="all", help="my option: endpoints"
)


def pytest_configure(config):
config.addinivalue_line(
"markers", "endpoint(endpoint): this mark select the test based on endpoint"
)


def pytest_runtest_setup(item):
getoption = item.config.getoption("--endpoint").split(",")
if getoption not in (["all"], [''], [""]):
endpoint_names = [mark.args[0] for mark in item.iter_markers(name="endpoint")]
if not endpoint_names or not set(getoption).intersection(set(endpoint_names)):
pytest.skip("Test skipped because endpoint is {!r}".format(endpoint_names))


def pytest_collection_modifyitems(items):
# load the JSON file
with open("tests/endpoints_mapping.json", "r") as json_file:
endpoints_file_mapping = json.load(json_file)

# create a dictionary to map filenames to endpoints
filename_to_endpoint = {}
for endpoint, files in endpoints_file_mapping.items():
for filename in files:
filename_to_endpoint[filename] = endpoint

# add the markers based on the JSON file
for item in items:
# map the name of the file to endpoint, else use default value
filename = item.fspath.basename
marker = filename_to_endpoint.get(filename, filename)
item.add_marker(pytest.mark.endpoint(marker, filename=filename))
75 changes: 75 additions & 0 deletions coverage/endpoint_mapping.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
{
"/embeddings": [
"test_embedding.py"
],
"/audio/translations": [
"test_translations.py"
],
"/audio/transcriptions": [
"test_transcriptions.py"
],
"/moderations": [
"test_moderations.py"
],
"/images/generations": [
"test_images.py"
],
"/batches": [
"test_batches.py"
],
"/vector_stores": [
"test_vector_stores.py"
],
"/fine_tuning/jobs": [
"test_jobs.py",
"test_checkpoints.py"
],
"/assistants": [
"test_assistants.py"
],
"/threads/{thread_id}/runs": [
"test_runs.py"
],
"/threads/{thread_id}/runs/{run_id}/steps": [
"test_steps.py"
],
"/vector_stores/{vector_store_id}/file_batches": [
"test_file_batches.py"
],
"/messages": [
"test_messages.py"
],
"/vector_stores/{vector_store_id}/files": [
"test_files.py"
],
"/chat/completions": [
"test_completions.py"
],
"/threads": [
"test_threads.py"
],
"/audio/speech": [
"test_speech.py"
],
"/models": [
"test_models.py"
],
"native_client_sdk_only": [
"test_streaming.py"
],
"utils": [
"test_response.py",
"test_client.py",
"test_extract_files.py",
"test_typing.py",
"test_legacy_response.py",
"test_module_client.py",
"test_old_api.py",
"test_proxy.py",
"test_qs.py",
"test_required_args.py",
"test_transform.py",
"test_azure.py",
"test_deepcopy.py"
]
}
8 changes: 8 additions & 0 deletions coverage/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[pytest]
rp_project = openai-api-test
rp_launch = OpenAI Collection Test
rp_launch_description = Full collection to ensure compatibility with OpenAI API
rp_launch_attributes = 'CI'
filterwarnings = ignore::pytest.PytestUnknownMarkWarning
log_format = %(asctime)s %(levelname)s %(message)s
log_date_format = %Y-%m-%d %H:%M:%S

0 comments on commit c717df2

Please sign in to comment.