Skip to content

Commit

Permalink
Merge branch 'main' into prometheus-for-api
Browse files Browse the repository at this point in the history
  • Loading branch information
gphorvath authored Sep 30, 2024
2 parents fb1e2f2 + e2ce0f4 commit 24a6938
Show file tree
Hide file tree
Showing 23 changed files with 148 additions and 107 deletions.
11 changes: 11 additions & 0 deletions src/leapfrogai_ui/src/app.css
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,17 @@
scrollbar-color: #4b5563 #1f2937;
}

/* Override TailwindCSS default Preflight styles for lists in messages */
#message-content-container {
ul {
margin: revert;
padding: revert;
li {
list-style: square;
}
}
}

/*TODO - can we get rid of some of these?*/
@layer utilities {
.content {
Expand Down
18 changes: 10 additions & 8 deletions src/leapfrogai_ui/src/lib/components/Message.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -178,14 +178,16 @@
{#if message.role !== 'user' && !messageText}
<MessagePendingSkeleton size="sm" class="mt-4" darkColor="bg-gray-500" />
{:else}
<!--eslint-disable-next-line svelte/no-at-html-tags -- We use DomPurity to sanitize the code snippet-->
{@html DOMPurify.sanitize(md.render(messageText), {
CUSTOM_ELEMENT_HANDLING: {
tagNameCheck: /^code-block$/,
attributeNameCheck: /^(code|language)$/,
allowCustomizedBuiltInElements: false
}
})}
<div id="message-content-container">
<!--eslint-disable-next-line svelte/no-at-html-tags -- We use DomPurity to sanitize the code snippet-->
{@html DOMPurify.sanitize(md.render(messageText), {
CUSTOM_ELEMENT_HANDLING: {
tagNameCheck: /^code-block$/,
attributeNameCheck: /^(code|language)$/,
allowCustomizedBuiltInElements: false
}
})}
</div>
<div class="flex flex-col items-start">
{#each getCitations(message, $page.data.files) as { component: Component, props }}
<svelte:component this={Component} {...props} />
Expand Down
3 changes: 3 additions & 0 deletions tests/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,6 @@ test-api-unit:

test-load:
python -m locust -f $$(pwd)/tests/load/loadtest.py --web-port 8089

test-conformance:
PYTHONPATH=$$(pwd) pytest -vv -s tests/conformance
20 changes: 20 additions & 0 deletions tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,3 +107,23 @@ python -m pytest tests/e2e/test_llama.py -v
# Cleanup after yourself
k3d cluster delete uds
```

## Conformance Testing

We include a set of conformance tests to verify our spec against OpenAI to guarantee interoperability with tools that support OpenAI's API (MatterMost, Continue.dev, etc.) and SDKs (Vercel, Azure, etc.). To run these tests the environment variables need to be set:

```bash
LEAPFROGAI_API_KEY="<api key>" # this can be created via the LeapfrogAI UI or Supabase
LEAPFROGAI_API_URL="https://leapfrogai-api.uds.dev/openai/v1" # This is the default when using a UDS-bundle locally
LEAPFROGAI_MODEL="vllm" # or whatever model you have installed
OPENAI_API_KEY="<api key>" # you need a funded OpenAI account for this
OPENAI_MODEL="gpt-4o-mini" # or whatever model you prefer
```

To run the tests, from the root directory of the LeapfrogAI project:

```bash
make install # to ensure all python dependencies are installed

make test-conformance # runs the entire suite
```
File renamed without changes.
6 changes: 3 additions & 3 deletions tests/conformance/test_completions.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
from openai.types.beta.threads import Run, Message, TextContentBlock, Text

from .utils import client_config_factory
from tests.utils.client import client_config_factory


def make_mock_message_object(role, message_text):
Expand Down Expand Up @@ -37,12 +37,12 @@ def make_mock_message_simple(role, message_text):
def test_run_completion(client_name, test_messages):
# Setup
config = client_config_factory(client_name)
client = config["client"]
client = config.client

assistant = client.beta.assistants.create(
name="Test Assistant",
instructions="You must provide a response based on the attached files.",
model=config["model"],
model=config.model,
)
thread = client.beta.threads.create()

Expand Down
10 changes: 6 additions & 4 deletions tests/conformance/test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,17 @@
)
from openai.types.beta.vector_stores.vector_store_file import VectorStoreFile

from ..utils.client import client_config_factory, text_file_path
from tests.utils.client import client_config_factory
from tests.utils.data_path import data_path, TXT_DATA_FILE


@pytest.mark.parametrize("client_name", ["openai", "leapfrogai"])
def test_file_upload(client_name):
config = client_config_factory(client_name)
client = config.client # shorthand
client = config.client

vector_store = client.beta.vector_stores.create(name="Test data")
with open(text_file_path(), "rb") as file:
with open(data_path(TXT_DATA_FILE), "rb") as file:
vector_store_file = client.beta.vector_stores.files.upload(
vector_store_id=vector_store.id, file=file
)
Expand All @@ -24,13 +25,14 @@ def test_file_upload(client_name):
assert isinstance(vector_store_file, VectorStoreFile)


@pytest.mark.xfail(reason="File Batch Upload is not yet implemented in LeapfrogAI")
@pytest.mark.parametrize("client_name", ["openai", "leapfrogai"])
def test_file_delete(client_name):
config = client_config_factory(client_name)
client = config.client

vector_store = client.beta.vector_stores.create(name="Test data")
with open(text_file_path(), "rb") as file:
with open(data_path(TXT_DATA_FILE), "rb") as file:
vector_store_file = client.beta.vector_stores.files.upload(
vector_store_id=vector_store.id, file=file
)
Expand Down
2 changes: 1 addition & 1 deletion tests/conformance/test_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from openai.types.beta.threads.message import Message

from ..utils.client import client_config_factory
from tests.utils.client import client_config_factory


@pytest.mark.parametrize("client_name", ["openai", "leapfrogai"])
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
from openai.types.beta.threads import Run, Message, TextContentBlock, Text

from .utils import client_config_factory
from tests.utils.client import client_config_factory


def make_mock_message_object(role, message_text):
Expand Down Expand Up @@ -37,12 +37,12 @@ def make_mock_message_simple(role, message_text):
def test_run_create(client_name, test_messages):
# Setup
config = client_config_factory(client_name)
client = config["client"]
client = config.client

assistant = client.beta.assistants.create(
name="Test Assistant",
instructions="You must provide a response based on the attached files.",
model=config["model"],
model=config.model,
)
thread = client.beta.threads.create()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from openai.types.beta.thread import Thread
from openai.types.beta.threads import Message, TextContentBlock, Text

from ..utils.client import client_config_factory
from tests.utils.client import client_config_factory


def make_mock_message_object(role, message_text):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
from openai.types.beta.threads.message import Message
import re

from ..utils.client import client_config_factory, text_file_path
from tests.utils.client import client_config_factory
from tests.utils.data_path import data_path, TXT_DATA_FILE


def make_vector_store_with_file(client):
vector_store = client.beta.vector_stores.create(name="Test data")
with open(text_file_path(), "rb") as file:
with open(data_path(TXT_DATA_FILE), "rb") as file:
client.beta.vector_stores.files.upload(
vector_store_id=vector_store.id, file=file
)
Expand Down Expand Up @@ -46,7 +47,7 @@ def validate_annotation_format(annotation):
@pytest.mark.parametrize("client_name", ["openai", "leapfrogai"])
def test_thread_file_annotations(client_name):
config = client_config_factory(client_name)
client = config.client # shorthand
client = config.client

vector_store = make_vector_store_with_file(client)
assistant = make_test_assistant(client, config.model, vector_store.id)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
from openai.types.beta.vector_store import VectorStore
from openai.types.beta.vector_store_deleted import VectorStoreDeleted

from ..utils.client import client_config_factory
from tests.utils.client import client_config_factory


@pytest.mark.parametrize("client_name", ["openai", "leapfrogai"])
def test_vector_store_create(client_name):
config = client_config_factory(client_name)
client = config.client # shorthand
client = config.client

vector_store = client.beta.vector_stores.create(name="Test data")

Expand All @@ -19,7 +19,7 @@ def test_vector_store_create(client_name):
@pytest.mark.parametrize("client_name", ["openai", "leapfrogai"])
def test_vector_store_list(client_name):
config = client_config_factory(client_name)
client = config.client # shorthand
client = config.client

client.beta.vector_stores.create(name="Test data")

Expand Down
5 changes: 3 additions & 2 deletions tests/e2e/test_llm_generation.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import os
from pathlib import Path
from typing import Iterable
import warnings

import pytest
from openai import InternalServerError, OpenAI
from openai.types.chat import ChatCompletionMessageParam
from tests.utils.data_path import data_path, WAV_FILE

DEFAULT_LEAPFROGAI_MODEL = "llama-cpp-python"

Expand Down Expand Up @@ -72,7 +72,8 @@ def test_embeddings(client: OpenAI, model_name: str):
def test_transcriptions(client: OpenAI, model_name: str):
with pytest.raises(InternalServerError) as excinfo:
client.audio.transcriptions.create(
model=model_name, file=Path("tests/data/0min12sec.wav")
model=model_name,
file=data_path(WAV_FILE),
)

assert str(excinfo.value) == "Internal Server Error"
6 changes: 3 additions & 3 deletions tests/e2e/test_text_embeddings.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from pathlib import Path

import pytest
from openai import InternalServerError, OpenAI
from tests.utils.data_path import data_path, WAV_FILE

model_name = "text-embeddings"

Expand Down Expand Up @@ -41,6 +40,7 @@ def test_embeddings(client: OpenAI):
def test_transcriptions(client: OpenAI):
with pytest.raises(InternalServerError) as excinfo:
client.audio.transcriptions.create(
model=model_name, file=Path("tests/data/0min12sec.wav")
model=model_name,
file=data_path(WAV_FILE),
)
assert str(excinfo.value) == "Internal Server Error"
7 changes: 4 additions & 3 deletions tests/e2e/test_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import pytest
from openai import InternalServerError, OpenAI
import unicodedata
from tests.utils.data_path import data_path, WAV_FILE, WAV_FILE_ARABIC


def test_completions(client: OpenAI):
Expand Down Expand Up @@ -38,7 +39,7 @@ def test_embeddings(client: OpenAI):
def test_transcriptions(client: OpenAI):
transcription = client.audio.transcriptions.create(
model="whisper",
file=Path("tests/data/0min12sec.wav"),
file=data_path(WAV_FILE),
language="en",
prompt="This is a test transcription.",
response_format="json",
Expand All @@ -53,7 +54,7 @@ def test_transcriptions(client: OpenAI):
def test_translations(client: OpenAI):
translation = client.audio.translations.create(
model="whisper",
file=Path("tests/data/arabic-audio.wav"),
file=data_path(WAV_FILE_ARABIC),
prompt="This is a test translation.",
response_format="json",
temperature=0.0,
Expand All @@ -79,7 +80,7 @@ def test_non_english_transcription(client: OpenAI):
# Arabic transcription
arabic_transcription = client.audio.transcriptions.create(
model="whisper",
file=Path("tests/data/arabic-audio.wav"),
file=data_path(WAV_FILE_ARABIC),
response_format="json",
temperature=0.5,
timestamp_granularities=["word", "segment"],
Expand Down
7 changes: 3 additions & 4 deletions tests/integration/api/test_assistants.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
CreateAssistantRequest,
ModifyAssistantRequest,
)
from tests.utils.data_path import data_path, TXT_FILE

INSTRUCTOR_XL_EMBEDDING_SIZE: int = 768

Expand Down Expand Up @@ -92,9 +93,7 @@ class MissingEnvironmentVariable(Exception):
def read_testfile():
"""Read the test file content."""

with open(
os.path.dirname(__file__) + "/../../../tests/data/test.txt", "rb"
) as testfile:
with open(data_path(TXT_FILE), "rb") as testfile:
testfile_content = testfile.read()

return testfile_content
Expand All @@ -109,7 +108,7 @@ def create_file(read_testfile): # pylint: disable=redefined-outer-name, unused-

file_response = files_client.post(
"/openai/v1/files",
files={"file": ("test.txt", read_testfile, "text/plain")},
files={"file": (TXT_FILE, read_testfile, "text/plain")},
data={"purpose": "assistants"},
)

Expand Down
Loading

0 comments on commit 24a6938

Please sign in to comment.