Skip to content

Commit

Permalink
update the python slim version to 11, update the packages
Browse files Browse the repository at this point in the history
  • Loading branch information
raunakkathuria committed Dec 27, 2024
1 parent 8702f88 commit 91a432e
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 17 deletions.
6 changes: 3 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Build stage
FROM python:3.9-slim as builder
FROM python:3.11-slim AS builder

WORKDIR /app

Expand All @@ -13,7 +13,7 @@ COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

# Runtime stage
FROM python:3.9-slim
FROM python:3.11-slim

WORKDIR /app

Expand Down Expand Up @@ -53,4 +53,4 @@ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1

# Run the application
CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000"]
CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000", "--log-level", "info"]
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ The project includes two special files that help AI models (like LLMs) better un
## Prerequisites

- Docker
- Python 3.9+
- Python 3.11+
- NVIDIA GPU (optional)
- For GPU support:
1. Install NVIDIA Container Toolkit
Expand Down
27 changes: 14 additions & 13 deletions src/llm_chain/chain.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from langchain.llms import Ollama
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_community.llms import Ollama
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from typing import List, Dict
import logging
from ..config import Config
Expand Down Expand Up @@ -34,7 +35,9 @@ def __init__(self, config: Config):
Assistant Response:"""
)

self.chain = LLMChain(llm=self.llm, prompt=self.prompt)
self.chain = (
RunnablePassthrough() | self.prompt | self.llm | StrOutputParser()
)
logger.info(f"Initialized PaymentSupportChain with model {config.LLM_MODEL}")
except Exception as e:
logger.error(f"Failed to initialize PaymentSupportChain: {str(e)}")
Expand All @@ -48,16 +51,14 @@ def generate_response(
) -> str:
try:
logger.info(f"Generating response for question: {question}")
context_str = "\n".join([doc["content"] for doc in context])
history_str = "\n".join([
f"{msg.role}: {msg.content}" for msg in conversation_history
])
context_str = self._format_context(context)
history_str = self._format_history(conversation_history)

response = self.chain.run(
context=context_str,
conversation_history=history_str,
question=question
)
response = self.chain.invoke({
"context": context_str,
"conversation_history": history_str,
"question": question
})

logger.info("Successfully generated response")
return response
Expand Down

0 comments on commit 91a432e

Please sign in to comment.