Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Executor] Enrich error message for flex flow #3054

Merged
merged 11 commits into from
Apr 29, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/flex-flows/chat-stream/data.jsonl
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{"question": "What is Prompt flow?", "statements": {"correctness": "should explain what's 'Prompt flow'"}}
{"question": "What is ChatGPT? Please explain with consise statement", "statements": { "correctness": "should explain what's ChatGPT", "consise": "It is a consise statement."}}
{"question": "What is Prompt flow?", "chat_history": [], "statements": { "correctness": "result should be 1", "consise": "It is a consise statement."}}
{"question": "What is ChatGPT? Please explain with consise statement", "chat_history": [], "statements": { "correctness": "result should be 1", "consise": "It is a consise statement."}}
{"question": "How many questions did user ask?", "chat_history": [{"role": "user","content": "where is the nearest coffee shop?"},{"role": "system","content": "I'm sorry, I don't know that. Would you like me to look it up for you?"}], "statements": { "correctness": "result should be 1", "consise": "It is a consise statement."}}
10 changes: 7 additions & 3 deletions src/promptflow-core/promptflow/executor/_script_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,14 +147,16 @@ def _exec_line(
# For these cases, raise ScriptExecutionError, which is classified as UserError
# and shows stack trace in the error message to make it easy for user to troubleshoot.
error_type_and_message = f"({e.__class__.__name__}) {e}"
e = ScriptExecutionError(
ex = ScriptExecutionError(
message_format="Execution failure in '{func_name}': {error_type_and_message}",
func_name=self._func.__qualname__,
func_name=self._original_function.__qualname__,
error_type_and_message=error_type_and_message,
)
# make sure traceback is from the original exception
ex.__traceback__ = e.__traceback__
if not traces:
traces = Tracer.end_tracing(line_run_id)
run_tracker.end_run(line_run_id, ex=e, traces=traces)
run_tracker.end_run(line_run_id, ex=ex, traces=traces)
finally:
run_tracker.persist_flow_run(run_info)
return self._construct_line_result(output, run_info)
Expand Down Expand Up @@ -427,6 +429,8 @@ def _parse_entry_func(self):

def _initialize_function(self):
func = self._parse_entry_func()
# persist original function for detailed error message
self._original_function = func
D-W- marked this conversation as resolved.
Show resolved Hide resolved
# If the function is not decorated with trace, add trace for it.
if not hasattr(func, "__original_function"):
func = _traced(func, trace_type=TraceType.FLOW)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1853,6 +1853,27 @@ def assert_func(details_dict):
run = pf.runs.create_or_update(run=run)
assert_batch_run_result(run, pf, assert_func)

def test_flow_run_with_enriched_error_message(self, pf):
config = AzureOpenAIModelConfiguration(
connection="azure_open_ai_connection", azure_deployment="gpt-35-turbo-0125"
)
flow_path = Path(f"{EAGER_FLOWS_DIR}/stream_prompty")
init_config = {"model_config": config}

run = pf.run(
flow=flow_path,
data=f"{EAGER_FLOWS_DIR}/stream_prompty/inputs.jsonl",
column_mapping={
"question": "${data.question}",
"chat_history": "${data.chat_history}",
},
init=init_config,
)
run_dict = run._to_dict()
error = run_dict["error"]["additionalInfo"][0]["info"]["errors"][0]["error"]
assert "Execution failure in 'ChatFlow.__call__" in error["message"]
assert "raise Exception" in error["debugInfo"]["stackTrace"]


def assert_batch_run_result(run: Run, pf: PFClient, assert_func):
assert run.status == "Completed"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def test_class_based_eager_flow_test_without_yaml(self):
flow_path = Path(f"{EAGER_FLOWS_DIR}/basic_callable_class_without_yaml/").absolute()
with _change_working_dir(flow_path):
result = _client._flows.test(
flow="simple_callable_class:MyFlow", inputs={"func_input": "input"}, init={"obj_input": "val"}
flow="callable_without_yaml:MyFlow", inputs={"func_input": "input"}, init={"obj_input": "val"}
)
assert result["func_input"] == "input"

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
entry: callable_without_yaml:MyFlow
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
---
name: Stream Chat
description: Chat with stream enabled.
model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
parameters:
temperature: 0.2
stream: true
inputs:
question:
type: string
chat_history:
type: list
sample: sample.json
---

system:
You are a helpful assistant.

{% for item in chat_history %}
{{item.role}}:
{{item.content}}
{% endfor %}

user:
{{question}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
entry: stream_prompty:ChatFlow
environment:
# image: mcr.microsoft.com/azureml/promptflow/promptflow-python
python_requirements_txt: requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"question": "What is Prompt flow?", "chat_history":[]}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from pathlib import Path

from promptflow.tracing import trace
from promptflow.core import AzureOpenAIModelConfiguration, Prompty

BASE_DIR = Path(__file__).absolute().parent


class ChatFlow:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
self.model_config = model_config

@trace
def __call__(
self, question: str = "What is ChatGPT?", chat_history: list = None
) -> str:
"""Flow entry function."""

raise Exception("Exception")


if __name__ == "__main__":
from promptflow.tracing import start_trace

start_trace()
config = AzureOpenAIModelConfiguration(
connection="open_ai_connection", azure_deployment="gpt-35-turbo"
)
flow = ChatFlow(model_config=config)
result = flow("What's Azure Machine Learning?", [])

# print result in stream manner
for r in result:
print(result, end="")
Loading