Add simulator. #14742
28 fail, 13 skipped, 632 pass in 57m 38s
Annotations
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_pf_flow_test_with_non_english_input_output (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
> return func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:464: in test_flow
_test_flow_standard(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:546: in _test_flow_standard
result = pf_client.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='3c550a47-6449-4225-abb0-868d6ccd9517_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.013365}, result=None, message_format='basic')})
show_trace = False
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "\u4ec0\u4e48\u662f chat gpt"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
During handling of the above exception, another exception occurred:
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e49a54c0>
capsys = <_pytest.capture.CaptureFixture object at 0x7f85c8f93fa0>
def test_pf_flow_test_with_non_english_input_output(self, capsys):
# disable trace to not invoke prompt flow service, which will print unexpected content to stdout
with mock.patch("promptflow._sdk._tracing.is_trace_feature_disabled", return_value=True):
question = "什么是 chat gpt"
> run_pf_command("flow", "test", "--flow", f"{FLOWS_DIR}/chat_flow", "--inputs", f'question="{question}"')
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:372:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
> sys.exit(1)
E SystemExit: 1
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_init_chat_flow (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='ch...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
> return func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:464: in test_flow
_test_flow_standard(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:546: in _test_flow_standard
result = pf_client.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='b5302699-c6ff-4b0d-b80f-4eec552de736_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.013767}, result=None, message_format='basic')})
show_trace = False
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "hi"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
During handling of the above exception, another exception occurred:
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e499bf70>
def test_init_chat_flow(self):
temp_dir = mkdtemp()
with _change_working_dir(temp_dir):
flow_name = "chat_flow"
# Init standard flow
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"chat",
)
ignore_file_path = Path(temp_dir) / flow_name / ".gitignore"
assert ignore_file_path.exists()
ignore_file_path.unlink()
# Only azure openai connection in test env
with open(Path(temp_dir) / flow_name / "flow.dag.yaml", "r") as f:
flow_dict = load_yaml(f)
flow_dict["nodes"][0]["provider"] = "AzureOpenAI"
flow_dict["nodes"][0]["connection"] = "azure_open_ai_connection"
with open(Path(temp_dir) / flow_name / "flow.dag.yaml", "w") as f:
dump_yaml(flow_dict, f)
> run_pf_command("flow", "test", "--flow", flow_name, "--inputs", "question=hi")
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:774:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='ch...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
> sys.exit(1)
E SystemExit: 1
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_flow_chat (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
> return func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:462: in test_flow
_test_flow_interactive(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:534: in _test_flow_interactive
pf_client.flows._chat(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:360: in _chat
submitter._chat_flow(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:567: in _chat_flow
self._raise_error_when_test_failed(flow_result, show_trace=True)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='a285ba59-5c44-421f-9987-dd9e74026da5_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.012627}, result=None, message_format='basic')})
show_trace = True
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
During handling of the above exception, another exception occurred:
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e4989ee0>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7f85c9459340>
capsys = <_pytest.capture.CaptureFixture object at 0x7f85c93930d0>
def test_flow_chat(self, monkeypatch, capsys):
chat_list = ["hi", "what is chat gpt?"]
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
> run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow",
"--interactive",
)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:980:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
> sys.exit(1)
E SystemExit: 1
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_invalid_chat_flow (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert "Execution failure in 'show_answer': (Exception) mock exception" in 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n'
+ where 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n' = CaptureResult(out='Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n', err='').out
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e4980040>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7f85c8bff160>
capsys = <_pytest.capture.CaptureFixture object at 0x7f85c8bff1f0>
def test_invalid_chat_flow(self, monkeypatch, capsys):
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
chat_list = ["hi", "what is chat gpt?"]
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_exception",
"--interactive",
)
outerr = capsys.readouterr()
> assert "Execution failure in 'show_answer': (Exception) mock exception" in outerr.out
E assert "Execution failure in 'show_answer': (Exception) mock exception" in 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n'
E + where 'Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n' = CaptureResult(out='Prompt flow service has started...\n===============================================\nWelcome to chat flow, chat_flow_with_exception.\nPress Enter to send your message.\nYou can quit with ctrl+C.\n===============================================\nUser: \npf.flow.test failed with UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.\nvalues: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}\n\n', err='').out
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1037: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_chat_with_stream_output (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
> return func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:462: in test_flow
_test_flow_interactive(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:534: in _test_flow_interactive
pf_client.flows._chat(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:360: in _chat
submitter._chat_flow(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:567: in _chat_flow
self._raise_error_when_test_failed(flow_result, show_trace=True)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='5cd2bd54-a524-4118-8e9e-62c03cd6804f_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.013146}, result=None, message_format='basic')})
show_trace = True
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": true, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
During handling of the above exception, another exception occurred:
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e49806d0>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7f85c922c940>
capsys = <_pytest.capture.CaptureFixture object at 0x7f85c922cf70>
def test_chat_with_stream_output(self, monkeypatch, capsys):
chat_list = ["hi", "what is chat gpt?"]
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
# Test streaming output
chat_list = ["hi", "what is chat gpt?"]
> run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_stream_output",
"--interactive",
)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1089:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
> sys.exit(1)
E SystemExit: 1
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_flow_test_with_default_chat_history (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
> return func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:464: in test_flow
_test_flow_standard(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:546: in _test_flow_standard
result = pf_client.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='4cd43dd1-9283-4f30-a2c7-a38d0944f6c6_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.016748}, result=None, message_format='basic')})
show_trace = False
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "hi"}, {"role": "assistant", "content": "hi"}, {"role": "user", "content": "who are you"}, {"role": "assistant", "content": "who are you"}, {"role": "user", "content": "What is ChatGPT?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
During handling of the above exception, another exception occurred:
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e4980af0>
def test_flow_test_with_default_chat_history(self):
> run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_default_history",
)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1123:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
> sys.exit(1)
E SystemExit: 1
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_cli.TestCli
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_flow_test_with_user_defined_chat_history (tests.sdk_cli_test.e2etests.test_cli.TestCli)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
SystemExit: 1
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
> return func(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:88: in run_command
raise ex
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:62: in run_command
dispatch_flow_commands(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:78: in dispatch_flow_commands
test_flow(args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:462: in test_flow
_test_flow_interactive(args, pf_client, inputs, environment_variables)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/_flow.py:534: in _test_flow_interactive
pf_client.flows._chat(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:360: in _chat
submitter._chat_flow(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:567: in _chat_flow
self._raise_error_when_test_failed(flow_result, show_trace=True)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='ceb33b51-47f0-4f8e-afb7-0603e9c65394_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.012276}, result=None, message_format='basic')})
show_trace = True
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what is chat gpt?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": false, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
During handling of the above exception, another exception occurred:
self = <sdk_cli_test.e2etests.test_cli.TestCli object at 0x7f85e4980e20>
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x7f85c906b850>
capsys = <_pytest.capture.CaptureFixture object at 0x7f85c906b0d0>
def test_flow_test_with_user_defined_chat_history(self, monkeypatch, capsys):
chat_list = ["hi", "what is chat gpt?"]
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
> run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_defined_chat_history",
"--interactive",
)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:1151:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_cli.py:61: in run_pf_command
main()
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:156: in main
entry(command_args)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_pf/entry.py:137: in entry
cli_exception_and_telemetry_handler(run_command, activity_name)(args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = (Namespace(action='flow', collection=None, config=None, debug=False, detail=None, environment_variables=None, flow='/h...ser=False, sub_action='test', ui=False, url_params=None, user_agent=None, variant=None, verbose=False, version=False),)
kwargs = {}, telemetry_logger = <Logger promptflow._sdk._telemetry (INFO)>
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
> sys.exit(1)
E SystemExit: 1
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_cli/_utils.py:292: SystemExit
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_pf_test_with_streaming_output (tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is ChatGPT?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": true, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
self = <sdk_cli_test.e2etests.test_flow_test.TestFlowTest object at 0x7f85cb5c1220>
def test_pf_test_with_streaming_output(self):
flow_path = Path(f"{FLOWS_DIR}/chat_flow_with_stream_output")
> result = _client.test(flow=flow_path)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_test.py:115:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_pf_client.py:472: in test
return self.flows.test(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_flow_operations.py:164: in test
TestSubmitter._raise_error_when_test_failed(result, show_trace=node is not None)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_result = LineResult(output={}, aggregation_inputs={}, run_info=FlowRunInfo(run_id='ba1208a6-571a-43f8-8360-eb07fb739737_0', sta...[], "_func": "Completions.create"}\n\n'}, system_metrics={'duration': 0.013452}, result=None, message_format='basic')})
show_trace = False
@staticmethod
def _raise_error_when_test_failed(test_result, show_trace=False):
from promptflow.executor._result import LineResult
test_status = test_result.run_info.status if isinstance(test_result, LineResult) else test_result.status
if test_status == Status.Failed:
error_dict = test_result.run_info.error if isinstance(test_result, LineResult) else test_result.error
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
> raise UserErrorException(f"{error_type}: {error_message}", error=stack_trace)
E promptflow.exceptions.UserErrorException: Exception: OpenAI API hits exception: RecordItemMissingException: Record item not found in file /home/runner/work/promptflow/promptflow/src/promptflow-recording/recordings/local/node_cache.shelve.
E values: {"model": "gpt-35-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is ChatGPT?"}], "temperature": 0.7, "top_p": 1.0, "n": 1, "stream": true, "user": "", "max_tokens": 256, "_args": [], "_func": "Completions.create"}
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orchestrator/test_submitter.py:618: UserErrorException
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_output_with_builtin_llm (tests.sdk_cli_test.e2etests.test_flow_test.TestFlowTest)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
AssertionError: {'additionalInfo': [{'info': {'message': 'OpenAI API hits exception: RecordItemMissingException: Record item not found...ll last):
', 'type': 'ScriptExecutionError'}, 'innerError': {'code': 'ScriptExecutionError', 'innerError': None}, ...}
assert 'Failed' == 'Completed'
- Completed
+ Failed
self = <sdk_cli_test.e2etests.test_flow_test.TestFlowTest object at 0x7f85cb5bf3d0>
def test_stream_output_with_builtin_llm(self):
flow_path = Path(f"{EAGER_FLOWS_DIR}/builtin_llm/").absolute()
result = _client._flows._test(
flow=flow_path,
inputs={"stream": True},
environment_variables={
"OPENAI_API_KEY": "${azure_open_ai_connection.api_key}",
"AZURE_OPENAI_ENDPOINT": "${azure_open_ai_connection.api_base}",
},
)
> assert result.run_info.status.value == "Completed", result.run_info.error
E AssertionError: {'additionalInfo': [{'info': {'message': 'OpenAI API hits exception: RecordItemMissingException: Record item not found...ll last):
E ', 'type': 'ScriptExecutionError'}, 'innerError': {'code': 'ScriptExecutionError', 'innerError': None}, ...}
E assert 'Failed' == 'Completed'
E
E - Completed
E + Failed
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_test.py:435: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[text/event-stream-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'text/event-stream', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[text/html-406-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 406
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'text/html', expected_status_code = 406
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 406
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[application/json-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'application/json', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[*/*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = '*/*', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[text/event-stream, application/json-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'text/event-stream, application/json', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[application/json, */*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = 'application/json, */*', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
serving_client_llm_chat = <FlaskClient <PromptflowServingApp 'promptflow.core._serving.app'>>
accept = '', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <WrapperTestResponse streamed [400 BAD REQUEST]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve.py:259: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[text/event-stream-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c89ccd00>
accept = 'text/event-stream', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[text/html-406-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 406
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c8161e20>
accept = 'text/html', expected_status_code = 406
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 406
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[application/json-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c8a1bd60>
accept = 'application/json', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[*/*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c8b42d60>
accept = '*/*', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[text/event-stream, application/json-200-text/event-stream; charset=utf-8] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c8bff400>
accept = 'text/event-stream, application/json', expected_status_code = 200
expected_content_type = 'text/event-stream; charset=utf-8'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[application/json, */*-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c8b06df0>
accept = 'application/json, */*', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_flow_serve_fastapi
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
All 4 runs failed: test_stream_llm_chat[-200-application/json] (tests.sdk_cli_test.e2etests.test_flow_serve_fastapi)
artifacts/Test Results (Python 3.10) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 0s]
artifacts/Test Results (Python 3.9) (OS ubuntu-latest)/test-results.xml [took 0s]
Raw output
assert 400 == 200
+ where 400 = <Response [400 Bad Request]>.status_code
fastapi_serving_client_llm_chat = <starlette.testclient.TestClient object at 0x7f85c88ce820>
accept = '', expected_status_code = 200
expected_content_type = 'application/json'
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
fastapi_serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = fastapi_serving_client_llm_chat.post("/score", json=payload, headers=headers)
res_content_type = response.headers.get("content-type")
> assert response.status_code == expected_status_code
E assert 400 == 200
E + where 400 = <Response [400 Bad Request]>.status_code
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_flow_serve_fastapi.py:261: AssertionError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
1 out of 4 runs failed: test_delete_traces_three_tables (tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations)
artifacts/Test Results (Python 3.11) (OS ubuntu-latest)/test-results.xml [took 1s]
Raw output
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
[SQL: DELETE FROM events WHERE events.trace_id IN (?)]
[parameters: ('9f5084b2-3fa2-4401-9113-a7154c8be7c5',)]
(Background on this error at: https://sqlalche.me/e/20/e3q8)
self = <sqlalchemy.engine.base.Connection object at 0x7f2c5e160a10>
dialect = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f2c5e7d1910>
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f2c5ee51690>
statement = <sqlalchemy.dialects.sqlite.base.SQLiteCompiler object at 0x7f2c5ee52e90>
parameters = [('9f5084b2-3fa2-4401-9113-a7154c8be7c5',)]
def _exec_single_context(
self,
dialect: Dialect,
context: ExecutionContext,
statement: Union[str, Compiled],
parameters: Optional[_AnyMultiExecuteParams],
) -> CursorResult[Any]:
"""continue the _execute_context() method for a single DBAPI
cursor.execute() or cursor.executemany() call.
"""
if dialect.bind_typing is BindTyping.SETINPUTSIZES:
generic_setinputsizes = context._prepare_set_input_sizes()
if generic_setinputsizes:
try:
dialect.do_set_input_sizes(
context.cursor, generic_setinputsizes, context
)
except BaseException as e:
self._handle_dbapi_exception(
e, str(statement), parameters, None, context
)
cursor, str_statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
effective_parameters: Optional[_AnyExecuteParams]
if not context.executemany:
effective_parameters = parameters[0]
else:
effective_parameters = parameters
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
str_statement, effective_parameters = fn(
self,
cursor,
str_statement,
effective_parameters,
context,
context.executemany,
)
if self._echo:
self._log_info(str_statement)
stats = context._get_cache_stats()
if not self.engine.hide_parameters:
self._log_info(
"[%s] %r",
stats,
sql_util._repr_params(
effective_parameters,
batches=10,
ismulti=context.executemany,
),
)
else:
self._log_info(
"[%s] [SQL parameters hidden due to hide_parameters=True]",
stats,
)
evt_handled: bool = False
try:
if context.execute_style is ExecuteStyle.EXECUTEMANY:
effective_parameters = cast(
"_CoreMultiExecuteParams", effective_parameters
)
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(
cursor,
str_statement,
effective_parameters,
context,
):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor,
str_statement,
effective_parameters,
context,
)
elif not effective_parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, str_statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, str_statement, context
)
else:
effective_parameters = cast(
"_CoreSingleExecuteParams", effective_parameters
)
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(
cursor,
str_statement,
effective_parameters,
context,
):
evt_handled = True
break
if not evt_handled:
> self.dialect.do_execute(
cursor, str_statement, effective_parameters, context
)
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1967:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f2c5e7d1910>
cursor = <sqlite3.Cursor object at 0x7f2c2c097f40>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('9f5084b2-3fa2-4401-9113-a7154c8be7c5',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f2c5ee51690>
def do_execute(self, cursor, statement, parameters, context=None):
> cursor.execute(statement, parameters)
E sqlite3.OperationalError: database is locked
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/default.py:924: OperationalError
The above exception was the direct cause of the following exception:
self = <sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations object at 0x7f2c494f2550>
pf = <promptflow._sdk._pf_client.PFClient object at 0x7f2c5ea88090>
def test_delete_traces_three_tables(self, pf: PFClient) -> None:
# trace operation does not expose API for events and spans
# so directly use ORM class to list and assert events and spans existence and deletion
from promptflow._sdk._orm.trace import Event as ORMEvent
from promptflow._sdk._orm.trace import LineRun as ORMLineRun
from promptflow._sdk._orm.trace import Span as ORMSpan
mock_run = str(uuid.uuid4())
mock_span = mock_span_for_delete_tests(run=mock_run)
# assert events, span and line_run are persisted
assert len(ORMEvent.list(trace_id=mock_span.trace_id, span_id=mock_span.span_id)) == 2
assert len(ORMSpan.list(trace_ids=[mock_span.trace_id])) == 1
assert len(ORMLineRun.list(runs=[mock_run])) == 1
# delete traces and assert all traces are deleted
> pf.traces.delete(run=mock_run)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_trace.py:319:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:175: in delete
return self._delete_within_transaction(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orm/retry.py:50: in f_retry
return f(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:240: in _delete_within_transaction
event_cnt = session.query(ORMEvent).filter(ORMEvent.trace_id.in_(trace_ids)).delete()
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/query.py:3161: in delete
result: CursorResult[Any] = self.session.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/session.py:2351: in execute
return self._execute_internal(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/session.py:2236: in _execute_internal
result: Result[Any] = compile_state_cls.orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/bulk_persistence.py:1953: in orm_execute_statement
return super().orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/orm/context.py:293: in orm_execute_statement
result = conn.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1418: in execute
return meth(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/sql/elements.py:515: in _execute_on_connection
return connection._execute_clauseelement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1640: in _execute_clauseelement
ret = self._execute_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1846: in _execute_context
return self._exec_single_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1986: in _exec_single_context
self._handle_dbapi_exception(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:2353: in _handle_dbapi_exception
raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/base.py:1967: in _exec_single_context
self.dialect.do_execute(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f2c5e7d1910>
cursor = <sqlite3.Cursor object at 0x7f2c2c097f40>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('9f5084b2-3fa2-4401-9113-a7154c8be7c5',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f2c5ee51690>
def do_execute(self, cursor, statement, parameters, context=None):
> cursor.execute(statement, parameters)
E sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
E [SQL: DELETE FROM events WHERE events.trace_id IN (?)]
E [parameters: ('9f5084b2-3fa2-4401-9113-a7154c8be7c5',)]
E (Background on this error at: https://sqlalche.me/e/20/e3q8)
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.11/lib/python3.11/site-packages/sqlalchemy/engine/default.py:924: OperationalError
Check warning on line 0 in tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations
github-actions / SDK CLI Test Result [task/addSimulator](https://github.com/microsoft/promptflow/actions/workflows/promptflow-sdk-cli-test.yml?query=branch:task/addSimulator++)
1 out of 4 runs failed: test_delete_traces_with_collection (tests.sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations)
artifacts/Test Results (Python 3.8) (OS ubuntu-latest)/test-results.xml [took 1s]
Raw output
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
[SQL: DELETE FROM events WHERE events.trace_id IN (?)]
[parameters: ('bbd9b6e9-758e-4911-a2f1-6aa860aed1c6',)]
(Background on this error at: https://sqlalche.me/e/20/e3q8)
self = <sqlalchemy.engine.base.Connection object at 0x7f85a5fb8e80>
dialect = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f85a5f91c40>
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f85a5d663d0>
statement = <sqlalchemy.dialects.sqlite.base.SQLiteCompiler object at 0x7f85c80c0130>
parameters = [('bbd9b6e9-758e-4911-a2f1-6aa860aed1c6',)]
def _exec_single_context(
self,
dialect: Dialect,
context: ExecutionContext,
statement: Union[str, Compiled],
parameters: Optional[_AnyMultiExecuteParams],
) -> CursorResult[Any]:
"""continue the _execute_context() method for a single DBAPI
cursor.execute() or cursor.executemany() call.
"""
if dialect.bind_typing is BindTyping.SETINPUTSIZES:
generic_setinputsizes = context._prepare_set_input_sizes()
if generic_setinputsizes:
try:
dialect.do_set_input_sizes(
context.cursor, generic_setinputsizes, context
)
except BaseException as e:
self._handle_dbapi_exception(
e, str(statement), parameters, None, context
)
cursor, str_statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
effective_parameters: Optional[_AnyExecuteParams]
if not context.executemany:
effective_parameters = parameters[0]
else:
effective_parameters = parameters
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
str_statement, effective_parameters = fn(
self,
cursor,
str_statement,
effective_parameters,
context,
context.executemany,
)
if self._echo:
self._log_info(str_statement)
stats = context._get_cache_stats()
if not self.engine.hide_parameters:
self._log_info(
"[%s] %r",
stats,
sql_util._repr_params(
effective_parameters,
batches=10,
ismulti=context.executemany,
),
)
else:
self._log_info(
"[%s] [SQL parameters hidden due to hide_parameters=True]",
stats,
)
evt_handled: bool = False
try:
if context.execute_style is ExecuteStyle.EXECUTEMANY:
effective_parameters = cast(
"_CoreMultiExecuteParams", effective_parameters
)
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(
cursor,
str_statement,
effective_parameters,
context,
):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor,
str_statement,
effective_parameters,
context,
)
elif not effective_parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, str_statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, str_statement, context
)
else:
effective_parameters = cast(
"_CoreSingleExecuteParams", effective_parameters
)
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(
cursor,
str_statement,
effective_parameters,
context,
):
evt_handled = True
break
if not evt_handled:
> self.dialect.do_execute(
cursor, str_statement, effective_parameters, context
)
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:1967:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f85a5f91c40>
cursor = <sqlite3.Cursor object at 0x7f85c80d7180>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('bbd9b6e9-758e-4911-a2f1-6aa860aed1c6',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f85a5d663d0>
def do_execute(self, cursor, statement, parameters, context=None):
> cursor.execute(statement, parameters)
E sqlite3.OperationalError: database is locked
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/default.py:924: OperationalError
The above exception was the direct cause of the following exception:
self = <sdk_cli_test.e2etests.test_trace.TestTraceEntitiesAndOperations object at 0x7f85d07f1250>
pf = <promptflow._sdk._pf_client.PFClient object at 0x7f85c8fa46d0>
def test_delete_traces_with_collection(self, pf: PFClient) -> None:
mock_collection = str(uuid.uuid4())
mock_span_for_delete_tests(collection=mock_collection)
assert len(pf.traces.list_line_runs(collection=mock_collection)) == 1
> pf.traces.delete(collection=mock_collection)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_trace.py:335:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_telemetry/activity.py:265: in wrapper
return f(self, *args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:175: in delete
return self._delete_within_transaction(
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/_orm/retry.py:50: in f_retry
return f(*args, **kwargs)
/home/runner/work/promptflow/promptflow/src/promptflow-devkit/promptflow/_sdk/operations/_trace_operations.py:240: in _delete_within_transaction
event_cnt = session.query(ORMEvent).filter(ORMEvent.trace_id.in_(trace_ids)).delete()
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/orm/query.py:3161: in delete
result: CursorResult[Any] = self.session.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/orm/session.py:2351: in execute
return self._execute_internal(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/orm/session.py:2236: in _execute_internal
result: Result[Any] = compile_state_cls.orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/orm/bulk_persistence.py:1953: in orm_execute_statement
return super().orm_execute_statement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/orm/context.py:293: in orm_execute_statement
result = conn.execute(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:1418: in execute
return meth(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/sql/elements.py:515: in _execute_on_connection
return connection._execute_clauseelement(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:1640: in _execute_clauseelement
ret = self._execute_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:1846: in _execute_context
return self._exec_single_context(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:1986: in _exec_single_context
self._handle_dbapi_exception(
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:2353: in _handle_dbapi_exception
raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/base.py:1967: in _exec_single_context
self.dialect.do_execute(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite object at 0x7f85a5f91c40>
cursor = <sqlite3.Cursor object at 0x7f85c80d7180>
statement = 'DELETE FROM events WHERE events.trace_id IN (?)'
parameters = ('bbd9b6e9-758e-4911-a2f1-6aa860aed1c6',)
context = <sqlalchemy.dialects.sqlite.base.SQLiteExecutionContext object at 0x7f85a5d663d0>
def do_execute(self, cursor, statement, parameters, context=None):
> cursor.execute(statement, parameters)
E sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) database is locked
E [SQL: DELETE FROM events WHERE events.trace_id IN (?)]
E [parameters: ('bbd9b6e9-758e-4911-a2f1-6aa860aed1c6',)]
E (Background on this error at: https://sqlalche.me/e/20/e3q8)
/home/runner/.cache/pypoetry/virtualenvs/promptflow-devkit-bFW165Ac-py3.8/lib/python3.8/site-packages/sqlalchemy/engine/default.py:924: OperationalError