diff --git a/audio-to-text/Containerfile b/audio-to-text/Containerfile index 28d2b58..701aead 100644 --- a/audio-to-text/Containerfile +++ b/audio-to-text/Containerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/python-311:1-72.1722518949 +FROM registry.access.redhat.com/ubi9/python-311:1-77.1726664316 WORKDIR /locallm COPY requirements.txt /locallm/requirements.txt RUN pip install --upgrade pip && \ diff --git a/audio-to-text/whisper_client.py b/audio-to-text/whisper_client.py index a22c0b1..225a38b 100644 --- a/audio-to-text/whisper_client.py +++ b/audio-to-text/whisper_client.py @@ -8,13 +8,17 @@ st.markdown("Upload an audio file you wish to have translated") endpoint = os.getenv("MODEL_ENDPOINT", default="http://0.0.0.0:8001") endpoint = f"{endpoint}/inference" +endpoint_bearer = os.getenv("MODEL_ENDPOINT_BEARER") +request_kwargs = {} +if endpoint_bearer is not None: + request_kwargs["headers"] = {"Authorization": f"Bearer {endpoint_bearer}"} audio = st.file_uploader("", type=["wav","mp3","mp4","flac"], accept_multiple_files=False) # read audio file if audio: audio_bytes = audio.read() st.audio(audio_bytes, format='audio/wav', start_time=0) - files = {'file': audio_bytes} - response = requests.post(endpoint, files=files) + request_kwargs["files"] = {'file': audio_bytes} + response = requests.post(endpoint, **request_kwargs) response_json = response.json() st.subheader(f"Translated Text") st.text_area(label="", value=response_json['text'], height=300) diff --git a/chatbot/Containerfile b/chatbot/Containerfile index 29cff1a..c87ed47 100644 --- a/chatbot/Containerfile +++ b/chatbot/Containerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/python-311:1-72.1722518949 +FROM registry.access.redhat.com/ubi9/python-311:1-77.1726664316 WORKDIR /chat COPY requirements.txt . RUN pip install --upgrade pip diff --git a/chatbot/chatbot_ui.py b/chatbot/chatbot_ui.py index afb0ffa..40f52b0 100644 --- a/chatbot/chatbot_ui.py +++ b/chatbot/chatbot_ui.py @@ -12,6 +12,10 @@ model_service = os.getenv("MODEL_ENDPOINT", "http://localhost:8001") model_service = f"{model_service}/v1" +model_service_bearer = os.getenv("MODEL_ENDPOINT_BEARER") +request_kwargs = {} +if model_service_bearer is not None: + request_kwargs = {"headers": {"Authorization": f"Bearer {model_service_bearer}"}} @st.cache_resource(show_spinner=False) def checking_model_service(): @@ -20,8 +24,8 @@ def checking_model_service(): ready = False while not ready: try: - request_cpp = requests.get(f'{model_service}/models') - request_ollama = requests.get(f'{model_service[:-2]}api/tags') + request_cpp = requests.get(f'{model_service}/models', **request_kwargs) + request_ollama = requests.get(f'{model_service[:-2]}api/tags', **request_kwargs) if request_cpp.status_code == 200: server = "Llamacpp_Python" ready = True @@ -37,7 +41,7 @@ def checking_model_service(): def get_models(): try: - response = requests.get(f"{model_service[:-2]}api/tags") + response = requests.get(f"{model_service[:-2]}api/tags", **request_kwargs) return [i["name"].split(":")[0] for i in json.loads(response.content)["models"]] except: @@ -76,7 +80,7 @@ def memory(): options=models) llm = ChatOpenAI(base_url=model_service, - api_key="sk-no-key-required", + api_key="sk-no-key-required" if model_service_bearer is None else model_service_bearer, model=model_name, streaming=True, callbacks=[StreamlitCallbackHandler(st.empty(), diff --git a/codegen/Containerfile b/codegen/Containerfile index b9de4a3..f57c27f 100644 --- a/codegen/Containerfile +++ b/codegen/Containerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/python-311:1-72.1722518949 +FROM registry.access.redhat.com/ubi9/python-311:1-77.1726664316 WORKDIR /codegen COPY requirements.txt . RUN pip install --upgrade pip diff --git a/codegen/codegen-app.py b/codegen/codegen-app.py index 13d3345..b821edf 100644 --- a/codegen/codegen-app.py +++ b/codegen/codegen-app.py @@ -10,6 +10,10 @@ model_service = os.getenv("MODEL_ENDPOINT", "http://localhost:8001") model_service = f"{model_service}/v1" +model_service_bearer = os.getenv("MODEL_ENDPOINT_BEARER") +request_kwargs = {} +if model_service_bearer is not None: + request_kwargs = {"headers": {"Authorization": f"Bearer {model_service_bearer}"}} @st.cache_resource(show_spinner=False) def checking_model_service(): @@ -18,7 +22,7 @@ def checking_model_service(): ready = False while not ready: try: - request = requests.get(f'{model_service}/models') + request = requests.get(f'{model_service}/models', **request_kwargs) if request.status_code == 200: ready = True except: @@ -43,7 +47,7 @@ def checking_model_service(): llm = ChatOpenAI(base_url=model_service, model=model_name, - api_key="EMPTY", + api_key="EMPTY" if model_service_bearer is None else model_service_bearer, streaming=True) # Define the Langchain chain diff --git a/object-detection/Containerfile b/object-detection/Containerfile index a5f6ae3..7b42097 100644 --- a/object-detection/Containerfile +++ b/object-detection/Containerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/python-311:1-72.1722518949 +FROM registry.access.redhat.com/ubi9/python-311:1-77.1726664316 WORKDIR /locallm COPY requirements.txt /locallm/requirements.txt RUN pip install --upgrade pip && \ diff --git a/object-detection/object_detection_client.py b/object-detection/object_detection_client.py index 10450c2..7f61451 100644 --- a/object-detection/object_detection_client.py +++ b/object-detection/object_detection_client.py @@ -7,8 +7,11 @@ st.title("🕵️‍♀️ Object Detection") endpoint =os.getenv("MODEL_ENDPOINT", default = "http://0.0.0.0:8000") +endpoint_bearer = os.getenv("MODEL_ENDPOINT_BEARER") headers = {"accept": "application/json", "Content-Type": "application/json"} +if endpoint_bearer: + headers["Authorization"] = f"Bearer {endpoint_bearer}" image = st.file_uploader("Upload Image") window = st.empty() diff --git a/object-detection/requirements.txt b/object-detection/requirements.txt index ad64d16..47d2ad5 100644 --- a/object-detection/requirements.txt +++ b/object-detection/requirements.txt @@ -16,7 +16,7 @@ MarkupSafe==2.1.5 mdurl==0.1.2 numpy==1.26.4 packaging==24.0 -pandas==2.2.2 +pandas==2.2.3 pillow==10.3.0 protobuf==4.25.3 pyarrow==15.0.2 @@ -37,4 +37,4 @@ toolz==0.12.1 tornado==6.4.1 typing_extensions==4.11.0 tzdata==2024.1 -urllib3==2.2.2 +urllib3==2.2.3 diff --git a/rag/Containerfile b/rag/Containerfile index 1aa72f5..593ca9f 100644 --- a/rag/Containerfile +++ b/rag/Containerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/python-311:1-72.1722518949 +FROM registry.access.redhat.com/ubi9/python-311:1-77.1726664316 ### Update sqlite for chroma USER root RUN dnf remove sqlite3 -y diff --git a/rag/rag_app.py b/rag/rag_app.py index 756e274..b3591fc 100644 --- a/rag/rag_app.py +++ b/rag/rag_app.py @@ -12,6 +12,7 @@ model_service = os.getenv("MODEL_ENDPOINT","http://0.0.0.0:8001") model_service = f"{model_service}/v1" +model_service_bearer = os.getenv("MODEL_ENDPOINT_BEARER") model_name = os.getenv("MODEL_NAME", "") chunk_size = os.getenv("CHUNK_SIZE", 150) embedding_model = os.getenv("EMBEDDING_MODEL","BAAI/bge-base-en-v1.5") @@ -75,7 +76,7 @@ def read_file(file): llm = ChatOpenAI(base_url=model_service, - api_key="EMPTY", + api_key="EMPTY" if model_service_bearer is None else model_service_bearer, model=model_name, streaming=True, callbacks=[StreamlitCallbackHandler(st.container(), diff --git a/rag/requirements.txt b/rag/requirements.txt index 572acc0..e1a2865 100644 --- a/rag/requirements.txt +++ b/rag/requirements.txt @@ -1,6 +1,6 @@ langchain-openai==0.1.7 langchain==0.1.20 -chromadb==0.5.5 +chromadb==0.5.13 sentence-transformers==2.7.0 streamlit==1.34.0 pypdf==4.2.0