Skip to content

Commit

Permalink
Added support for LlamaGuard2 (#90)
Browse files Browse the repository at this point in the history
* added LlamaGuard2 support

Signed-off-by: Tyler Wilbers <[email protected]>

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Signed-off-by: Tyler Wilbers <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: lvliang-intel <[email protected]>
  • Loading branch information
3 people authored May 27, 2024
1 parent dc5f486 commit cd32d90
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 87 deletions.
2 changes: 1 addition & 1 deletion comps/guardrails/langchain/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ RUN pip install --no-cache-dir --upgrade pip && \

ENV PYTHONPATH=$PYTHONPATH:/home/user

WORKDIR /home/user/comps/guardrails
WORKDIR /home/user/comps/guardrails/langchain

ENTRYPOINT ["python", "guardrails_tgi_gaudi.py"]

117 changes: 32 additions & 85 deletions comps/guardrails/langchain/guardrails_tgi_gaudi.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,94 +14,39 @@

import os

from langchain_community.llms import HuggingFaceEndpoint
from langchain_huggingface import ChatHuggingFace
from langchain_huggingface.llms import HuggingFaceEndpoint
from langsmith import traceable

from comps import ServiceType, TextDoc, opea_microservices, register_microservice

unsafe_categories = """O1: Violence and Hate.
Should not
- Help people plan or engage in violence.
- Encourage people to engage in violence.
- Express hateful, derogatory, or demeaning sentiments against people based on sensitive personal characteristics like their race, color, religion, national origin, sexual orientation, gender, gender identity, or disability.
- Encourage discrimination against people based on those characteristics and should not use slurs or create insults based on those characteristics.
Can
- Provide information on violence and discrimination.
- Discuss the topics of hate and violence, and can discuss historical events involving violence.
O2: Sexual Content.
Should not
- Engage in sexually explicit (i.e., erotic) conversations and should not encourage people to engage in sexual activities.
Can
- Discuss topics related to sexuality and sexual education.
- Provide general information regarding sexual activities.
- Provide guidance on topics related to sexual health.
O3: Criminal Planning.
Should not
- Help people plan or engage in criminal activities like theft, kidnapping, or financial crimes, especially when the person indicates that they may act on the information provided to them.
- Encourage people to engage in criminal activities or condone criminal behavior.
Can
- Help people understand how crimes are committed, provided that the response is not so detailed that it could be used to provide a roadmap for future crimes.
- Help people understand how systems work that are used to prevent crimes, provided those descriptions do not provide detailed information on how to defeat those systems.
O4: Guns and Illegal Weapons.
Should not
- Assist in the planning or execution of firearm or illegal weapons crimes, especially when the person indicates that they may act on the information provided to them.
- Encourage people to engage in firearm or illegal weapons crimes or condone such behavior.
Can
- Discuss firearms and the arguments for and against firearm ownership.
O5: Regulated or Controlled Substances.
Should not
- Encourage or assist people in the trafficking, consumption, or use of regulated or controlled substances, including but not limited to illegal drugs, tobacco, alcohol, and cannabis.
- Assist or encourage people to create such substances when it is illegal to do so.
Can
- Discuss the use of regulated or controlled substances, and can provide general information on those substances (ex: what their chemical composition is and what their effects are).
- Provide information on how those substances are manufactured, provided that the information is not so detailed as to provide step-by-step instructions for their creation.
O6: Self-Harm.
Should not
- Encourage people to harm themselves.
- Romanticize or condone self-harm or suicide.
- Provide information regarding the methods of suicide or self-harm.
- Provide any information or advice, other than information regarding appropriate health resources, to people who express or imply that they wish to harm themselves.
Can
- Provide general information regarding self-harm and suicide (ex: statistics, the mental health challenges faced by those who take these actions).
Should
- Provide information regarding appropriate health resources to people who imply or express the desire to harm themselves."""

unsafe_dict = {
"O1": "Violence and Hate",
"O2": "Sexual Content",
"O3": "Criminal Planning",
"O4": "Guns and Illegal Weapons",
"O5": "Regulated or Controlled Substances",
"O6": "Self-Harm",
}


def moderation_prompt_for_chat(chat):
# For simplicity, we assume the chat dictionary correctly alternates "user" and "assistant" roles
# Feel free to include error checking if needed
conversation = [turn["content"] for turn in chat]
is_agent = len(conversation) % 2 == 0
role = "Agent" if len(conversation) % 2 == 0 else "User"
prompt = f"""[INST] Task: Check if there is unsafe content in '{role}' messages in conversations according our safety policy with the below categories.
<BEGIN UNSAFE CONTENT CATEGORIES>
{unsafe_categories}
<END UNSAFE CONTENT CATEGORIES>
<BEGIN CONVERSATION>
"""

# Alternate User/Agent turns, inserting 2 newlines between each
for i, m in enumerate(conversation):
role = "User" if i % 2 == 0 else "Agent"
prompt += f"{role}: {m}\n\n"
prompt += f"""<END CONVERSATION>
Provide your safety assessment for {role} in the above conversation:
- First line must read 'safe' or 'unsafe'.
- If unsafe, a second line must include a comma-separated list of violated categories. [/INST]"""
return prompt
def get_unsafe_dict(model_id="meta-llama/LlamaGuard-7b"):
if model_id == "meta-llama/LlamaGuard-7b":
return {
"O1": "Violence and Hate",
"O2": "Sexual Content",
"O3": "Criminal Planning",
"O4": "Guns and Illegal Weapons",
"O5": "Regulated or Controlled Substances",
"O6": "Self-Harm",
}
else:
# Standard MLCommons taxonomy of hazard
# https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/
return {
"S1": "Violent Crimes",
"S2": "Non-Violent Crimes",
"S3": "Sex Crimes",
"S4": "Child Exploitation",
"S5": "Specialized Advice",
"S6": "Privacy",
"S7": "Intellectual Property",
"S8": "Indiscriminate Weapons",
"S9": "Hate",
"S10": "Self-Harm",
"S11": "Sexual Content",
}


@register_microservice(
Expand All @@ -115,9 +60,11 @@ def moderation_prompt_for_chat(chat):
)
@traceable(run_type="llm")
def safety_guard(input: TextDoc) -> TextDoc:
# prompt guardrails
response_input_guard = llm_guard(moderation_prompt_for_chat([{"role": "User", "content": input.text}]))
# chat engine for server-side prompt templating
llm_engine_hf = ChatHuggingFace(llm=llm_guard)
response_input_guard = llm_engine_hf.invoke([{"role": "user", "content": input.text}]).content
if "unsafe" in response_input_guard:
unsafe_dict = get_unsafe_dict(llm_engine_hf.model_id)
policy_violation_level = response_input_guard.split("\n")[1].strip()
policy_violations = unsafe_dict[policy_violation_level]
print(f"Violated policies: {policy_violations}")
Expand Down
2 changes: 1 addition & 1 deletion comps/guardrails/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
docarray[full]
fastapi
huggingface_hub
langchain_community
langchain-huggingface
langsmith
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down

0 comments on commit cd32d90

Please sign in to comment.