Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Executor] Enrich error message for flex flow #3054

Merged
merged 11 commits into from
Apr 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/flex-flows/chat-stream/data.jsonl
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{"question": "What is Prompt flow?", "statements": {"correctness": "should explain what's 'Prompt flow'"}}
{"question": "What is ChatGPT? Please explain with consise statement", "statements": { "correctness": "should explain what's ChatGPT", "consise": "It is a consise statement."}}
{"question": "What is Prompt flow?", "chat_history": [], "statements": { "correctness": "result should be 1", "consise": "It is a consise statement."}}
{"question": "What is ChatGPT? Please explain with consise statement", "chat_history": [], "statements": { "correctness": "result should be 1", "consise": "It is a consise statement."}}
{"question": "How many questions did user ask?", "chat_history": [{"role": "user","content": "where is the nearest coffee shop?"},{"role": "system","content": "I'm sorry, I don't know that. Would you like me to look it up for you?"}], "statements": { "correctness": "result should be 1", "consise": "It is a consise statement."}}
20 changes: 2 additions & 18 deletions src/promptflow-core/promptflow/_core/_errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------

from traceback import TracebackException

from promptflow._utils.exception_utils import (
ADDITIONAL_INFO_USER_EXECUTION_ERROR,
is_pf_core_frame,
extract_stack_trace_without_core_frame,
last_frame_info,
remove_suffix,
)
Expand Down Expand Up @@ -92,22 +91,7 @@ def tool_traceback(self):

The traceback inside the promptflow's internal code will be taken off.
"""
exc = self.inner_exception
if exc and exc.__traceback__ is not None:
tb = exc.__traceback__.tb_next
if tb is not None:
# The first frames are always our code invoking the tool.
# We do not want to dump it to user code's traceback.
# So, skip these frames from pf core module.
while is_pf_core_frame(tb.tb_frame) and tb.tb_next is not None:
tb = tb.tb_next
# We don't use traceback.format_exception since its interface differs between 3.8 and 3.10.
# Use this internal class to adapt to different python versions.
te = TracebackException(type(exc), exc, tb)
formatted_tb = "".join(te.format())
return formatted_tb

return None
return extract_stack_trace_without_core_frame(exc=self.inner_exception)

@property
def additional_info(self):
Expand Down
19 changes: 19 additions & 0 deletions src/promptflow-core/promptflow/_utils/exception_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

ADDITIONAL_INFO_USER_EXECUTION_ERROR = "ToolExecutionErrorDetails"
ADDITIONAL_INFO_USER_CODE_STACKTRACE = "UserCodeStackTrace"
ADDITIONAL_INFO_FLEX_FLOW_ERROR = "FlexFlowExecutionErrorDetails"

CAUSE_MESSAGE = "\nThe above exception was the direct cause of the following exception:\n\n"
CONTEXT_MESSAGE = "\nDuring handling of the above exception, another exception occurred:\n\n"
Expand Down Expand Up @@ -410,3 +411,21 @@ def remove_suffix(text: str, suffix: str = None):
return text

return text[: -len(suffix)]


def extract_stack_trace_without_core_frame(exc: Exception):
"""Extract the stack trace without the core frame."""
if exc and exc.__traceback__ is not None:
tb = exc.__traceback__.tb_next
if tb is not None:
# The first frames are always our code invoking the tool.
# We do not want to dump it to user code's traceback.
# So, skip these frames from pf core module.
while is_pf_core_frame(tb.tb_frame) and tb.tb_next is not None:
tb = tb.tb_next
# We don't use traceback.format_exception since its interface differs between 3.8 and 3.10.
# Use this internal class to adapt to different python versions.
te = TracebackException(type(exc), exc, tb)
formatted_tb = "".join(te.format())
return formatted_tb
return None
33 changes: 31 additions & 2 deletions src/promptflow-core/promptflow/executor/_errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,13 @@

from jinja2 import TemplateSyntaxError

from promptflow._utils.exception_utils import ExceptionPresenter, infer_error_code_from_class, remove_suffix
from promptflow._utils.exception_utils import (
ADDITIONAL_INFO_FLEX_FLOW_ERROR,
ExceptionPresenter,
extract_stack_trace_without_core_frame,
infer_error_code_from_class,
remove_suffix,
)
from promptflow.exceptions import (
ErrorTarget,
PromptflowException,
Expand Down Expand Up @@ -96,7 +102,30 @@ def __init__(


class ScriptExecutionError(UserErrorException):
pass
@property
def flow_traceback(self):
"""Return the traceback inside the flow's source code scope.

The traceback inside the promptflow's internal code will be taken off.
"""
return extract_stack_trace_without_core_frame(self.inner_exception)

@property
def additional_info(self):
"""Set the exception details as additional info."""
if not self.inner_exception:
# Only populate additional info when inner exception is present.
return None

info = {
"type": self.inner_exception.__class__.__name__,
"message": str(self.inner_exception),
"traceback": self.flow_traceback,
}

return {
ADDITIONAL_INFO_FLEX_FLOW_ERROR: info,
}


class NodeInputValidationError(InvalidFlowRequest):
Expand Down
18 changes: 15 additions & 3 deletions src/promptflow-core/promptflow/executor/_script_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,16 @@ def __init__(
self._message_format = MessageFormatType.BASIC
self._multimedia_processor = BasicMultimediaProcessor()

@classmethod
def _get_func_name(cls, func: Callable):
try:
original_func = getattr(func, "__original_function")
if isinstance(original_func, partial):
original_func = original_func.func
return original_func.__qualname__
except AttributeError:
return func.__qualname__

@contextlib.contextmanager
def _exec_line_context(self, run_id, line_number):
# TODO: refactor NodeLogManager, for script executor, we don't have node concept.
Expand Down Expand Up @@ -147,14 +157,15 @@ def _exec_line(
# For these cases, raise ScriptExecutionError, which is classified as UserError
# and shows stack trace in the error message to make it easy for user to troubleshoot.
error_type_and_message = f"({e.__class__.__name__}) {e}"
e = ScriptExecutionError(
ex = ScriptExecutionError(
message_format="Execution failure in '{func_name}': {error_type_and_message}",
func_name=self._func.__qualname__,
func_name=self._func_name,
error_type_and_message=error_type_and_message,
error=e,
)
if not traces:
traces = Tracer.end_tracing(line_run_id)
run_tracker.end_run(line_run_id, ex=e, traces=traces)
run_tracker.end_run(line_run_id, ex=ex, traces=traces)
finally:
run_tracker.persist_flow_run(run_info)
return self._construct_line_result(output, run_info)
Expand Down Expand Up @@ -450,6 +461,7 @@ def _initialize_function(self):
else:
self._func = func
self._func_async = sync_to_async(func)
self._func_name = self._get_func_name(func=func)
return func

def _initialize_aggr_function(self, flow_obj: object):
Expand Down
6 changes: 6 additions & 0 deletions src/promptflow-core/tests/core/e2etests/test_eager_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,3 +203,9 @@ def test_aggregation_error(self):
aggr_result = executor._exec_aggregation(inputs=[line_result.output])
# exec aggregation won't fail with error
assert aggr_result.metrics == {}

def test_get_function_name(self):
expected_names = ["ClassEntry.__call__", "func_entry", "func_entry_async"]
for (entry, _, _), expected_name in zip(function_entries, expected_names):
executor = FlowExecutor.create(entry, {})
assert executor._func_name == expected_name
Original file line number Diff line number Diff line change
Expand Up @@ -1853,6 +1853,27 @@ def assert_func(details_dict):
run = pf.runs.create_or_update(run=run)
assert_batch_run_result(run, pf, assert_func)

def test_flow_run_with_enriched_error_message(self, pf):
config = AzureOpenAIModelConfiguration(
connection="azure_open_ai_connection", azure_deployment="gpt-35-turbo-0125"
)
flow_path = Path(f"{EAGER_FLOWS_DIR}/stream_prompty")
init_config = {"model_config": config}

run = pf.run(
flow=flow_path,
data=f"{EAGER_FLOWS_DIR}/stream_prompty/inputs.jsonl",
column_mapping={
"question": "${data.question}",
"chat_history": "${data.chat_history}",
},
init=init_config,
)
run_dict = run._to_dict()
error = run_dict["error"]["additionalInfo"][0]["info"]["errors"][0]["error"]
assert "Execution failure in 'ChatFlow.__call__" in error["message"]
assert "raise Exception" in error["additionalInfo"][0]["info"]["traceback"]


def assert_batch_run_result(run: Run, pf: PFClient, assert_func):
assert run.status == "Completed"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def test_class_based_eager_flow_test_without_yaml(self):
flow_path = Path(f"{EAGER_FLOWS_DIR}/basic_callable_class_without_yaml/").absolute()
with _change_working_dir(flow_path):
result = _client._flows.test(
flow="simple_callable_class:MyFlow", inputs={"func_input": "input"}, init={"obj_input": "val"}
flow="callable_without_yaml:MyFlow", inputs={"func_input": "input"}, init={"obj_input": "val"}
)
assert result["func_input"] == "input"

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
entry: callable_without_yaml:MyFlow
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
---
name: Stream Chat
description: Chat with stream enabled.
model:
api: chat
configuration:
type: azure_openai
azure_deployment: gpt-35-turbo
parameters:
temperature: 0.2
stream: true
inputs:
question:
type: string
chat_history:
type: list
sample: sample.json
---

system:
You are a helpful assistant.

{% for item in chat_history %}
{{item.role}}:
{{item.content}}
{% endfor %}

user:
{{question}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
entry: stream_prompty:ChatFlow
environment:
# image: mcr.microsoft.com/azureml/promptflow/promptflow-python
python_requirements_txt: requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"question": "What is Prompt flow?", "chat_history":[]}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from pathlib import Path

from promptflow.tracing import trace
from promptflow.core import AzureOpenAIModelConfiguration, Prompty

BASE_DIR = Path(__file__).absolute().parent


class ChatFlow:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
self.model_config = model_config

@trace
def __call__(
self, question: str = "What is ChatGPT?", chat_history: list = None
) -> str:
"""Flow entry function."""

raise Exception("Exception")


if __name__ == "__main__":
from promptflow.tracing import start_trace

start_trace()
config = AzureOpenAIModelConfiguration(
connection="open_ai_connection", azure_deployment="gpt-35-turbo"
)
flow = ChatFlow(model_config=config)
result = flow("What's Azure Machine Learning?", [])

# print result in stream manner
for r in result:
print(result, end="")
Loading