Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add RAG agent #34

Merged
merged 1 commit into from
Jul 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 17 additions & 3 deletions submodules/moragents_dockers/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,21 @@
This repo contains multiple agents and a dapp that enables you to interact with the agents, all running locally and containerized with Docker.


## Usage

## Dependencies

* Docker
* Ollama

Pull the required models in ollama

```ollama pull llama3```

```ollama pull nomic-embed-text```


## Installation

Docker compose will build and run two containers. One will be for the agents, the other will be for the UI.

```docker-compose up```
Expand Down Expand Up @@ -44,6 +58,6 @@ A typical flow looks like this:
- If the user accepts the quote, the swap may proceed. The back-end will generate transactions which will be sent to the front-end to be signed by the user's wallet.
- If the allowance for the token being sold is too low, an approval transaction will be generated first

### RAG Agent



This agent will answer questions about an uploaded PDF file.
8 changes: 6 additions & 2 deletions submodules/moragents_dockers/agents/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ scikit-learn
huggingface-hub
flask==2.2.2
Werkzeug==2.2.2
gradio > /dev/null
flask-cors
web3
web3
pymupdf==1.22.5
faiss-cpu
langchain-text-splitters
langchain-core
langchain-community
51 changes: 51 additions & 0 deletions submodules/moragents_dockers/agents/src/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,16 @@
from config import Config
from swap_agent.src import agent as swap_agent
from data_agent.src import agent as data_agent
from rag_agent.src import agent as rag_agent
from llama_cpp import Llama
from llama_cpp.llama_tokenizer import LlamaHFTokenizer
import os
import logging
from langchain_community.llms import Ollama
from langchain_community.embeddings import OllamaEmbeddings
from langchain_core.prompts import ChatPromptTemplate
from rag_agent.src.config import Config as ollama_config



def load_llm():
Expand All @@ -24,6 +32,32 @@ def load_llm():
app = Flask(__name__)
CORS(app)

upload_state=False
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = ollama_config.MAX_LENGTH

llm_ollama = Ollama(model="llama3",base_url=ollama_config.URL)
embeddings = OllamaEmbeddings(model="nomic-embed-text",base_url=ollama_config.URL)

logging.basicConfig(level=logging.DEBUG)


agent = None
messages=[]
prompt = ChatPromptTemplate.from_template(
"""
Answer the following question only based on the given context

<context>
{context}
</context>

Question: {input}
"""
)

@app.route('/swap_agent/', methods=['POST'])
def swap_agent_chat():
global llm
Expand Down Expand Up @@ -67,6 +101,23 @@ def data_agent_messages():
def data_agent_clear_messages():
return data_agent.clear_messages()

@app.route('/rag_agent/upload', methods=['POST'])
def rag_agent_upload():
global llm_ollama,UPLOAD_FOLDER,embeddings
return rag_agent.upload_file(request, UPLOAD_FOLDER, llm_ollama, embeddings,ollama_config.MAX_FILE_SIZE)

@app.route('/rag_agent/', methods=['POST'])
def rag_agent_chat():
return rag_agent.chat(request)

@app.route('/rag_agent/messages', methods=['GET'])
def rag_agent_messages():
return rag_agent.get_messages()

@app.route('/rag_agent/clear_messages', methods=['GET'])
def rag_agent_clear_messages():
return rag_agent.clear_messages()


if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
3 changes: 1 addition & 2 deletions submodules/moragents_dockers/agents/src/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class Config:
MODEL_REVISION = "functionary-small-v2.4.Q4_0.gguf"
MODEL_PATH = "model/"+MODEL_REVISION
DOWNLOAD_DIR = "model"
# Data agent
# API endpoints
COINGECKO_BASE_URL = "https://api.coingecko.com/api/v3"
DEFILLAMA_BASE_URL = "https://api.llama.fi"
PRICE_SUCCESS_MESSAGE = "The price of {coin_name} is ${price:,}"
Expand All @@ -24,7 +24,6 @@ class Config:
MARKET_CAP_SUCCESS_MESSAGE = "The market cap of {coin_name} is ${market_cap:,}"
MARKET_CAP_FAILURE_MESSAGE = "Failed to retrieve market cap. Please enter a valid coin name."
API_ERROR_MESSAGE = "I can't seem to access the API at the moment."
# Swap agent
INCH_URL = "https://api.1inch.dev/token"
QUOTE_URL = "https://api.1inch.dev/swap"
APIBASEURL = f"https://api.1inch.dev/swap/v6.0/"
Expand Down
97 changes: 97 additions & 0 deletions submodules/moragents_dockers/agents/src/rag_agent/src/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
from flask import jsonify
import os
import logging
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from werkzeug.utils import secure_filename


logging.basicConfig(level=logging.DEBUG)


agent = None
messages=[{'role':"assistant","content":"Please upload a file to begin"}]
upload_state = False
prompt = ChatPromptTemplate.from_template(
"""
Answer the following question only based on the given context

<context>
{context}
</context>

Question: {input}
"""
)

def handle_file_upload(file,UPLOAD_FOLDER,llm,embeddings):
global agent,prompt
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
# DocumentToolsGenerator class instantiation
loader = PyMuPDFLoader(os.path.join(UPLOAD_FOLDER,filename))
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter()
split_documents = text_splitter.split_documents(docs)
vector_store = FAISS.from_documents(split_documents, embeddings)
docs_chain = create_stuff_documents_chain(llm, prompt)
retriever = vector_store.as_retriever()
agent = create_retrieval_chain(retriever, docs_chain)


def upload_file(request,UPLOAD_FOLDER,llm,embeddings,MAX_SIZE):
global upload_state
if 'file' not in request.files:
return jsonify({'error': 'No file part'}), 400
file = request.files['file']
if file.filename == '':
return jsonify({'error': 'No selected file'}), 400
# Check file size
file.seek(0, os.SEEK_END)
file_length = file.tell()
file.seek(0, 0) # Reset the file pointer to the beginning
if file_length > MAX_SIZE:
messages.append({"role": "assistant", "content": 'please use a file less than 5 MB'})
return jsonify({"role": "assistant", "content": 'please use a file less than 5 MB'})
try:
handle_file_upload(file,UPLOAD_FOLDER,llm,embeddings)
upload_state = True
messages.append({"role": "assistant", "content": 'You have successfully uploaded the text'})
return jsonify({"role": "assistant", "content": 'You have successfully uploaded the text'})
except Exception as e:
logging.error(f'Error during file upload: {str(e)}')
return jsonify({'error': str(e)}), 500

def chat(request):
global messages,upload_state,agent
try:
data = request.get_json()
if 'prompt' in data:
prompt = data['prompt']['content']
messages.append(data['prompt'])
role = "assistant"
response = agent.invoke({"input": prompt}) if upload_state else {"answer":"please upload a file first"}

messages.append({"role": role, "content": response["answer"]})
return jsonify({"role": role, "content": response["answer"]})
else:
return jsonify({"error": "Missing required parameters"}), 400
except Exception as e:
logging.error(f'Error in chat endpoint: {str(e)}')
return jsonify({"Error": str(e)}), 500

def get_messages():
global messages
return jsonify({"messages": messages})

def clear_messages():
global messages
messages = [{'role':"assistant","content":"Please upload a file to begin"}]
return jsonify({"response": "successfully cleared message history"})

10 changes: 10 additions & 0 deletions submodules/moragents_dockers/agents/src/rag_agent/src/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import logging

# Logging configuration
logging.basicConfig(level=logging.INFO)

# Configuration object
class Config:
MAX_FILE_SIZE=5 * 1024 * 1024 # 5 MB
MAX_LENGTH=16 * 1024 * 1024
URL="http://host.docker.internal:11434"
11 changes: 8 additions & 3 deletions submodules/moragents_dockers/docker-compose-apple.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@ services:
restart: always
volumes:
- agents_data:/var/lib/agents
- ./agents/src:/app/src # Volume for swapagent src
- ./agents/src:/app/src
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- BASE_URL=http://host.docker.internal:11434

nginx:
build:
Expand All @@ -19,6 +23,7 @@ services:
target: nginx
ports:
- '3333:80'



volumes:
agents_data:
agents_data:
7 changes: 6 additions & 1 deletion submodules/moragents_dockers/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@ services:
restart: always
volumes:
- agents_data:/var/lib/agents
- ./agents/src:/app/src # Volume for swapagent src
- ./agents/src:/app/src
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- BASE_URL=http://host.docker.internal:11434

nginx:
build:
Expand All @@ -21,5 +25,6 @@ services:
- '3333:80'



volumes:
agents_data:
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ export const Chat: FC<ChatProps> = ({

await onSubmitMessage(message, file);
setMessage('');
setFile(null); // Clear the file state after upload
setShowSpinner(false);
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import { Button, HStack, Modal, ModalBody, ModalCloseButton, ModalContent, ModalFooter, ModalHeader, ModalOverlay, Spacer, Text, useDisclosure } from '@chakra-ui/react';
import React, { FC, ComponentPropsWithoutRef, useEffect } from 'react';

export interface SwapAgentModalProps extends ComponentPropsWithoutRef<'div'> {
isOpen: boolean;
onClose: () => void;
}

export const SwapAgentModal: FC<SwapAgentModalProps> = ({ isOpen, onClose }) => {
return (
<Modal isCentered onClose={onClose} isOpen={isOpen} motionPreset='slideInBottom' closeOnOverlayClick={false}>
<ModalOverlay />
<ModalContent sx={{ backgroundColor: '#353936', borderColor: '#313137', color: 'white', borderRadius: '8px', padding: 1 }}>
<ModalHeader>Swap Agent</ModalHeader>
<ModalBody>
<Text sx={{ fontSize: '16px', lineHeight: '18px' }}>
You have switched to the Swap Agent. Please ensure you have connected your wallet and selected the correct network.
</Text>
</ModalBody>
<ModalFooter>
<HStack>
<Spacer />
<Button onClick={onClose} variant={'greenCustom'} sx={{ pl: 7, pr: 7 }}>Close</Button>
</HStack>
</ModalFooter>
</ModalContent>
</Modal>
);
};
10 changes: 4 additions & 6 deletions submodules/moragents_dockers/frontend/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,14 @@ export const availableAgents: {
requirements: {
connectedWallet: false
}
}
/*
},
'rag-agent': {
'name': 'Functional Data Agent (RAG FOR FILES)',
'description': 'Mock of the Data Agent that supports files',
'endpoint': 'http://127.0.0.1:8081',
'name': 'PDF Agent',
'description': 'Ask questions about an uploaded PDF file',
'endpoint': 'http://127.0.0.1:8080/rag_agent',
requirements: {
connectedWallet: false
},
supportsFiles: true
}
*/
}