From e84a6da71d5d517df6adbc3538e34895db54444e Mon Sep 17 00:00:00 2001 From: letonghan Date: Wed, 29 May 2024 11:34:14 +0800 Subject: [PATCH] refine output format of llm Signed-off-by: letonghan --- comps/llms/README.md | 2 +- comps/llms/text-generation/tgi/llm.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comps/llms/README.md b/comps/llms/README.md index bdb4a4002..1408fecbc 100644 --- a/comps/llms/README.md +++ b/comps/llms/README.md @@ -39,7 +39,7 @@ curl http://${your_ip}:8008/generate \ ```bash export TGI_LLM_ENDPOINT="http://${your_ip}:8008" -python langchain/llm.py +python text-generation/tgi/llm.py ``` # 🚀Start Microservice with Docker diff --git a/comps/llms/text-generation/tgi/llm.py b/comps/llms/text-generation/tgi/llm.py index 2162e48b4..445999992 100644 --- a/comps/llms/text-generation/tgi/llm.py +++ b/comps/llms/text-generation/tgi/llm.py @@ -29,7 +29,7 @@ def post_process_text(text: str): return "data:
\n\n" if text.isspace(): return None - new_text = text.replace(" ", "@#$") + new_text = text.replace("Answer: ", "").replace("Human: ", "").replace(" ", "@#$") return f"data: {new_text}\n\n"