diff --git a/cuda.Dockerfile b/cuda.Dockerfile index 7ab0c3d..b4a86bf 100644 --- a/cuda.Dockerfile +++ b/cuda.Dockerfile @@ -13,7 +13,7 @@ ENV HOST=0.0.0.0 \ CUDA_DOCKER_ARCH=all \ LLAMA_CUBLAS=1 \ GGML_CUDA=1 -RUN CMAKE_ARGS="-DGGML_CUDA=on" FORCE_CMAKE=1 pip install llama-cpp-python==0.2.85 --no-cache-dir +RUN CMAKE_ARGS="-DGGML_CUDA=on" FORCE_CMAKE=1 pip install llama-cpp-python==0.2.87 --no-cache-dir RUN git clone https://github.com/Josh-XT/DeepSeek-VL deepseek RUN pip install torch==2.3.1+cu121 torchaudio==2.3.1+cu121 --index-url https://download.pytorch.org/whl/cu121 COPY cuda-requirements.txt . diff --git a/requirements.txt b/requirements.txt index b4ff19b..c0bfe70 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,4 +25,4 @@ optimum onnx diffusers[torch] torchaudio==2.3.1 -llama-cpp-python==0.2.83 \ No newline at end of file +llama-cpp-python==0.2.87 \ No newline at end of file