From 812c85cefbefc4d17df5a6e9377c867bac8eec9f Mon Sep 17 00:00:00 2001 From: qgao007 <108324932+qgao007@users.noreply.github.com> Date: Mon, 16 Sep 2024 20:01:22 -0600 Subject: [PATCH] Add Bias Detection Microservice (#659) * Add Bias Detection Microservice Signed-off-by: Qun Gao * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding Bias Detection Container to CI (#695) Signed-off-by: Abolfazl Shahbazi --------- Signed-off-by: Qun Gao Signed-off-by: Abolfazl Shahbazi Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Abolfazl Shahbazi --- .../docker/compose/guardrails-compose-cd.yaml | 4 + .../docker/compose/guardrails-compose.yaml | 10 ++ comps/guardrails/README.md | 1 + comps/guardrails/bias_detection/Dockerfile | 31 +++++++ comps/guardrails/bias_detection/README.md | 93 +++++++++++++++++++ .../bias_detection/bias_detection.py | 31 +++++++ .../bias_detection/requirements.txt | 15 +++ 7 files changed, 185 insertions(+) create mode 100644 comps/guardrails/bias_detection/Dockerfile create mode 100644 comps/guardrails/bias_detection/README.md create mode 100644 comps/guardrails/bias_detection/bias_detection.py create mode 100644 comps/guardrails/bias_detection/requirements.txt diff --git a/.github/workflows/docker/compose/guardrails-compose-cd.yaml b/.github/workflows/docker/compose/guardrails-compose-cd.yaml index 55d6f346e..86b906d48 100644 --- a/.github/workflows/docker/compose/guardrails-compose-cd.yaml +++ b/.github/workflows/docker/compose/guardrails-compose-cd.yaml @@ -6,6 +6,10 @@ services: build: dockerfile: comps/guardrails/pii_detection/Dockerfile image: ${REGISTRY:-opea}/guardrails-pii-detection:${TAG:-latest} + guardrails-bias-detection: + build: + dockerfile: comps/guardrails/bias_detection/Dockerfile + image: ${REGISTRY:-opea}/guardrails-bias-detection:${TAG:-latest} guardrails-toxicity-detection: build: dockerfile: comps/guardrails/toxicity_detection/Dockerfile diff --git a/.github/workflows/docker/compose/guardrails-compose.yaml b/.github/workflows/docker/compose/guardrails-compose.yaml index 81e516209..349676167 100644 --- a/.github/workflows/docker/compose/guardrails-compose.yaml +++ b/.github/workflows/docker/compose/guardrails-compose.yaml @@ -9,3 +9,13 @@ services: build: dockerfile: comps/guardrails/llama_guard/langchain/Dockerfile image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest} + + guardrails-bias-detection: + build: + dockerfile: comps/guardrails/bias_detection/Dockerfile + image: ${REGISTRY:-opea}/guardrails-bias-detection:${TAG:-latest} + + guardrails-toxicity-detection: + build: + dockerfile: comps/guardrails/toxicity_detection/Dockerfile + image: ${REGISTRY:-opea}/guardrails-toxicity-detection:${TAG:-latest} diff --git a/comps/guardrails/README.md b/comps/guardrails/README.md index 0a2686eb0..6be07d18b 100644 --- a/comps/guardrails/README.md +++ b/comps/guardrails/README.md @@ -7,5 +7,6 @@ The Guardrails service enhances the security of LLM-based applications by offeri | [Llama Guard](./llama_guard/langchain/README.md) | Provides guardrails for inputs and outputs to ensure safe interactions | | [PII Detection](./pii_detection/README.md) | Detects Personally Identifiable Information (PII) and Business Sensitive Information (BSI) | | [Toxicity Detection](./toxicity_detection/README.md) | Detects Toxic language (rude, disrespectful, or unreasonable language that is likely to make someone leave a discussion) | +| [Bias Detection](./bias_detection/README.md) | Detects Biased language (framing bias, epistemological bias, and demographic bias) | Additional safety-related microservices will be available soon. diff --git a/comps/guardrails/bias_detection/Dockerfile b/comps/guardrails/bias_detection/Dockerfile new file mode 100644 index 000000000..e0f116d9a --- /dev/null +++ b/comps/guardrails/bias_detection/Dockerfile @@ -0,0 +1,31 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM langchain/langchain:latest + +ENV LANG=C.UTF-8 + +ARG ARCH="cpu" + +RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ + libgl1-mesa-glx \ + libjemalloc-dev + + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +COPY comps /home/user/comps + +RUN pip install --no-cache-dir --upgrade pip && \ + if [ ${ARCH} = "cpu" ]; then pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu; fi && \ + pip install --no-cache-dir -r /home/user/comps/guardrails/bias_detection/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/guardrails/bias_detection/ + +ENTRYPOINT ["python", "bias_detection.py"] diff --git a/comps/guardrails/bias_detection/README.md b/comps/guardrails/bias_detection/README.md new file mode 100644 index 000000000..15935824f --- /dev/null +++ b/comps/guardrails/bias_detection/README.md @@ -0,0 +1,93 @@ +# Bias Detection Microservice + +## Introduction + +Bias Detection Microservice allows AI Application developers to safeguard user input and LLM output from biased language in a RAG environment. By leveraging a smaller fine-tuned Transformer model for bias classification (e.g. DistilledBERT, RoBERTa, etc.), we maintain a lightweight guardrails microservice without significantly sacrificing performance making it readily deployable on both Intel Gaudi and Xeon. + +Bias erodes our collective trust and fuels social conflict. Bias can be defined as inappropriate subjectivity in the form of one of the following: + +- Framing bias -- using subjective words or phrases linked with a particular point of view +- Epistemological bias -- linguistic features that subtly modify the believability of a proposition +- Demographic bias -- text with presuppositions about particular genders, races, or other demographic categories + +## Future Development + +- Add a "neutralizing bias" microservice to neutralizing any detected bias in the RAG serving, guarding the RAG usage. + +## 🚀1. Start Microservice with Python(Option 1) + +### 1.1 Install Requirements + +```bash +pip install -r requirements.txt +``` + +### 1.2 Start Bias Detection Microservice with Python Script + +```bash +python bias_detection.py +``` + +## 🚀2. Start Microservice with Docker (Option 2) + +### 2.1 Prepare bias detection model + +export HUGGINGFACEHUB_API_TOKEN=${HP_TOKEN} + +### 2.2 Build Docker Image + +```bash +cd ../../../ # back to GenAIComps/ folder +docker build -t opea/guardrails-bias-detection:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/guardrails/bias_detection/Dockerfile . +``` + +### 2.3 Run Docker Container with Microservice + +```bash +docker run -d --rm --runtime=runc --name="guardrails-bias-detection" -p 9092:9092 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} opea/guardrails-bias-detection:latest +``` + +## 🚀3. Get Status of Microservice + +```bash +docker container logs -f guardrails-bias-detection +``` + +## 🚀4. Consume Microservice Pre-LLM/Post-LLM + +Once microservice starts, users can use examples (bash or python) below to apply bias detection for both user's query (Pre-LLM) or LLM's response (Post-LLM) + +**Bash:** + +```bash +curl localhost:9092/v1/bias + -X POST + -d '{"text":"John McCain exposed as an unprincipled politician"}' + -H 'Content-Type: application/json' +``` + +Example Output: + +```bash +"\nI'm sorry, but your query or LLM's response is BIASED with an score of 0.74 (0-1)!!!\n" +``` + +**Python Script:** + +```python +import requests +import json + +proxies = {"http": ""} +url = "http://localhost:9092/v1/bias" +data = {"text": "John McCain exposed as an unprincipled politician"} + + +try: + resp = requests.post(url=url, data=data, proxies=proxies) + print(resp.text) + resp.raise_for_status() # Raise an exception for unsuccessful HTTP status codes + print("Request successful!") +except requests.exceptions.RequestException as e: + print("An error occurred:", e) +``` diff --git a/comps/guardrails/bias_detection/bias_detection.py b/comps/guardrails/bias_detection/bias_detection.py new file mode 100644 index 000000000..2555ffb2c --- /dev/null +++ b/comps/guardrails/bias_detection/bias_detection.py @@ -0,0 +1,31 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from transformers import pipeline + +from comps import ServiceType, TextDoc, opea_microservices, register_microservice + + +@register_microservice( + name="opea_service@bias_detection", + service_type=ServiceType.GUARDRAIL, + endpoint="/v1/bias", + host="0.0.0.0", + port=9092, + input_datatype=TextDoc, + output_datatype=TextDoc, +) +def llm_generate(input: TextDoc): + input_text = input.text + toxic = bias_pipeline(input_text) + print("done") + if toxic[0]["label"] == "BIASED": + return TextDoc(text="Violated policies: bias, please check your input.", downstream_black_list=[".*"]) + else: + return TextDoc(text=input_text) + + +if __name__ == "__main__": + model = "valurank/distilroberta-bias" + bias_pipeline = pipeline("text-classification", model=model, tokenizer=model) + opea_microservices["opea_service@bias_detection"].start() diff --git a/comps/guardrails/bias_detection/requirements.txt b/comps/guardrails/bias_detection/requirements.txt new file mode 100644 index 000000000..64bfa169c --- /dev/null +++ b/comps/guardrails/bias_detection/requirements.txt @@ -0,0 +1,15 @@ +aiohttp +docarray[full] +fastapi +httpx +huggingface_hub +langchain-community +langchain-huggingface +opentelemetry-api +opentelemetry-exporter-otlp +opentelemetry-sdk +prometheus-fastapi-instrumentator +pyyaml +requests +shortuuid +uvicorn