-
Notifications
You must be signed in to change notification settings - Fork 1.4k
45 lines (37 loc) · 1.13 KB
/
test_ollama.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
name: Endpoint (Ollama)
env:
OLLAMA_BASE_URL: "http://localhost:11434"
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Start Ollama Server
run: |
curl -fsSL https://ollama.com/install.sh | sh
ollama serve &
sleep 10 # wait for server
ollama pull thewindmom/hermes-3-llama-3.1-8b
ollama pull mxbai-embed-large
- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev -E ollama"
- name: Test LLM endpoint
run: |
poetry run pytest -s -vv tests/test_model_letta_perfomance.py::test_llm_endpoint_ollama
- name: Test embedding endpoint
run: |
poetry run pytest -s -vv tests/test_model_letta_perfomance.py::test_embedding_endpoint_ollama
- name: Test provider
run: |
poetry run pytest -s -vv tests/test_providers.py::test_ollama