From 49e7c54734cae49c5b3b23797731dacb05621271 Mon Sep 17 00:00:00 2001 From: Clement Wang Date: Wed, 24 Apr 2024 16:01:31 +0800 Subject: [PATCH] update --- examples/prompty/basic/prompty-quickstart.ipynb | 15 +++++++++++---- examples/prompty/chat-basic/README.md | 2 +- .../prompty/chat-basic/chat-with-prompty.ipynb | 8 +++++++- examples/prompty/chat-basic/data.jsonl | 2 +- examples/prompty/eval-apology/README.md | 1 + examples/prompty/eval-apology/apology.prompty | 6 +++--- examples/prompty/format-output/README.md | 2 +- 7 files changed, 25 insertions(+), 11 deletions(-) diff --git a/examples/prompty/basic/prompty-quickstart.ipynb b/examples/prompty/basic/prompty-quickstart.ipynb index 44d5d546d04..bd968189561 100644 --- a/examples/prompty/basic/prompty-quickstart.ipynb +++ b/examples/prompty/basic/prompty-quickstart.ipynb @@ -114,9 +114,9 @@ "\n", "# override configuration with AzureOpenAIModelConfiguration\n", "configuration = AzureOpenAIModelConfiguration(\n", - " azure_endpoint=\"${env:AZURE_OPENAI_ENDPOINT}\", # Use ${env:} to surround the environment variable name.\n", - " api_key=\"${env:AZURE_OPENAI_API_KEY}\",\n", - " azure_deployment=\"gpt-35-turbo\",\n", + " #azure_endpoint=\"${env:AZURE_OPENAI_ENDPOINT}\", # Use ${env:} to surround the environment variable name.\n", + " #api_key=\"${env:AZURE_OPENAI_API_KEY}\",\n", + " azure_deployment=\"gpt-35-turbo-0125\",\n", ")\n", "\n", "# override configuration with OpenAIModelConfiguration\n", @@ -126,7 +126,7 @@ "# model=\"gpt-3.5-turbo\"\n", "# )\n", "\n", - "override_model = {\"configuration\": configuration, \"parameters\": {\"max_token\": 512}}\n", + "override_model = {\"configuration\": configuration, \"parameters\": {\"max_tokens\": 512}}\n", "\n", "# load prompty as a flow\n", "f = Prompty.load(source=\"basic.prompty\", model=override_model)\n", @@ -241,6 +241,11 @@ "base_run = pf.run(\n", " flow=flow,\n", " data=data,\n", + " column_mapping={\n", + " \"first_name\": \"${data.first_name}\",\n", + " \"last_name\": \"${data.last_name}\",\n", + " \"question\": \"${data.question}\",\n", + " },\n", " stream=True,\n", ")" ] @@ -284,7 +289,9 @@ " data=\"./data.jsonl\", # path to the data file\n", " run=base_run, # specify base_run as the run you want to evaluate\n", " column_mapping={\n", + " \"question\": \"${data.question}\",\n", " \"answer\": \"${run.outputs.answer}\",\n", + " \"ground_truth\": \"${data.ground_truth}\"\n", " },\n", " stream=True,\n", ")" diff --git a/examples/prompty/chat-basic/README.md b/examples/prompty/chat-basic/README.md index 4d48f174438..29a2bc5d279 100644 --- a/examples/prompty/chat-basic/README.md +++ b/examples/prompty/chat-basic/README.md @@ -70,7 +70,7 @@ pf flow test --flow chat.prompty --inputs sample.json # start test in interactive terminal (TODO) pf flow test --flow chat.prompty --interactive -# start test in chat ui (TODO) +# start test in chat ui pf flow test --flow chat.prompty --ui ``` diff --git a/examples/prompty/chat-basic/chat-with-prompty.ipynb b/examples/prompty/chat-basic/chat-with-prompty.ipynb index 4553e2fa915..7c1b45dbb69 100644 --- a/examples/prompty/chat-basic/chat-with-prompty.ipynb +++ b/examples/prompty/chat-basic/chat-with-prompty.ipynb @@ -148,7 +148,7 @@ "\n", "# override configuration with created connection in AzureOpenAIModelConfiguration\n", "configuration = AzureOpenAIModelConfiguration(\n", - " connection=connection, azure_deployment=\"gpt-35-turbo\"\n", + " connection=\"open_ai_connection\", azure_deployment=\"gpt-35-turbo-0125\"\n", ")\n", "\n", "# override openai connection with OpenAIModelConfiguration\n", @@ -263,6 +263,12 @@ "base_run = pf.run(\n", " flow=flow,\n", " data=data,\n", + " column_mapping={\n", + " \"first_name\": \"${data.first_name}\",\n", + " \"last_name\": \"${data.last_name}\",\n", + " \"question\": \"${data.question}\",\n", + " \"chat_history\": \"${data.chat_history}\",\n", + " },\n", " stream=True,\n", ")" ] diff --git a/examples/prompty/chat-basic/data.jsonl b/examples/prompty/chat-basic/data.jsonl index 2578f7e65ea..e7fdf7ee8cb 100644 --- a/examples/prompty/chat-basic/data.jsonl +++ b/examples/prompty/chat-basic/data.jsonl @@ -1,3 +1,3 @@ {"first_name": "John", "last_name": "Doe", "question": "What's chat-GPT?", "chat_history": []} {"first_name": "John", "last_name": "Doe", "question": "How many questions did John Doe ask?", "chat_history": []} -{"first_name": "John", "last_name": "Doe", "question": "How many questions did John Doe ask?", "chat_history": [{"role": "user","content": "where is the nearest coffee shop?"},{"role": "system","content": "I'm sorry, I don't know that. Would you like me to look it up for you?"}]} \ No newline at end of file +{"first_name": "John", "last_name": "Doe", "question": "How many questions did John Doe ask?", "chat_history": [{"role": "user","content": "where is the nearest coffee shop?"},{"role": "assistant","content": "I'm sorry, I don't know that. Would you like me to look it up for you?"}]} \ No newline at end of file diff --git a/examples/prompty/eval-apology/README.md b/examples/prompty/eval-apology/README.md index f367e070d51..fef23060a1f 100644 --- a/examples/prompty/eval-apology/README.md +++ b/examples/prompty/eval-apology/README.md @@ -42,4 +42,5 @@ cat ../.env ```bash # sample.json contains messages field which contains the chat conversation. pf flow test --flow apology.prompty --inputs sample.json +pf flow test --flow apology.prompty --inputs sample_no_apology.json ``` diff --git a/examples/prompty/eval-apology/apology.prompty b/examples/prompty/eval-apology/apology.prompty index 9cd6e0a5951..190167f9fde 100644 --- a/examples/prompty/eval-apology/apology.prompty +++ b/examples/prompty/eval-apology/apology.prompty @@ -22,8 +22,8 @@ sample: sample.json system: You are an AI tool that determines if, in a chat conversation, the assistant apologized, like say sorry. -Only provide a response of {"score": 0} or {"score": 1} so that the output is valid JSON. -Give a score of 1 if apologized in the chat conversation. +Only provide a response of {"apology": 0} or {"apology": 1} so that the output is valid JSON. +Give a apology of 1 if apologized in the chat conversation. Here are some examples of chat conversations and the correct response: @@ -31,7 +31,7 @@ Here are some examples of chat conversations and the correct response: user: Where can I get my car fixed? assistant: I'm sorry, I don't know that. Would you like me to look it up for you? result: -{"score": 1} +{"apology": 1} **Here the actual conversation to be scored:** {% for message in messages %} diff --git a/examples/prompty/format-output/README.md b/examples/prompty/format-output/README.md index 7dd744834eb..b3bec516874 100644 --- a/examples/prompty/format-output/README.md +++ b/examples/prompty/format-output/README.md @@ -63,7 +63,7 @@ pf flow test --flow stream_output.prompty # start test in interactive terminal (TODO) pf flow test --flow stream_output.prompty --interactive -# start test in chat ui (TODO) +# start test in chat ui pf flow test --flow stream_output.prompty --ui ```