From 08cc0d48b0409cd8ab60983d8297a1642c65d972 Mon Sep 17 00:00:00 2001 From: Zhengfei Wang <38847871+zhengfeiwang@users.noreply.github.com> Date: Tue, 18 Jun 2024 15:42:44 +0800 Subject: [PATCH] [fundamental][bugfix] Replace retired model `"text-ada-001"` (#3429) # Description `"text-ada-001"` retires in 6/18 (reference: ), this PR replaces it with the suggested model. # All Promptflow Contribution checklist: - [x] **The pull request does not introduce [breaking changes].** - [ ] **CHANGELOG is updated for new features, bug fixes or other significant changes.** - [x] **I have read the [contribution guidelines](../CONTRIBUTING.md).** - [ ] **Create an issue and link to the pull request to get dedicated review from promptflow team. Learn more: [suggested workflow](../CONTRIBUTING.md#suggested-workflow).** ## General Guidelines and Best Practices - [x] Title of the pull request is clear and informative. - [x] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. For more information on cleaning up the commits in your PR, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). ### Testing Guidelines - [x] Pull request includes test coverage for the included changes. --- .../promptflow-executor-unit-test.yml | 1 + .../_sdk/_utilities/tracing_utils.py | 6 +++--- .../local/executor_node_cache.shelve.bak | 2 ++ .../local/executor_node_cache.shelve.dat | Bin 121911 -> 127380 bytes .../local/executor_node_cache.shelve.dir | 2 ++ .../openai_completion_api_flow/completion.py | 4 ++-- 6 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/promptflow-executor-unit-test.yml b/.github/workflows/promptflow-executor-unit-test.yml index 969fcff2757..4f8288bd6a6 100644 --- a/.github/workflows/promptflow-executor-unit-test.yml +++ b/.github/workflows/promptflow-executor-unit-test.yml @@ -18,6 +18,7 @@ on: - src/promptflow/promptflow/storage/** - src/promptflow/tests/* - src/promptflow/tests/executor/** + - src/promptflow/tests/test_configs/** - src/promptflow-tracing/promptflow/** - src/promptflow-core/promptflow/** - src/promptflow-devkit/promptflow/** diff --git a/src/promptflow-devkit/promptflow/_sdk/_utilities/tracing_utils.py b/src/promptflow-devkit/promptflow/_sdk/_utilities/tracing_utils.py index f01c882bfd6..604e54fbc31 100644 --- a/src/promptflow-devkit/promptflow/_sdk/_utilities/tracing_utils.py +++ b/src/promptflow-devkit/promptflow/_sdk/_utilities/tracing_utils.py @@ -34,7 +34,7 @@ from promptflow._sdk._user_agent import USER_AGENT from promptflow._sdk.entities._trace import Span from promptflow._utils.logger_utils import get_cli_sdk_logger -from promptflow._utils.user_agent_utils import ClientUserAgentUtil, setup_user_agent_to_operation_context +from promptflow._utils.user_agent_utils import setup_user_agent_to_operation_context from promptflow.core._errors import MissingRequiredPackage from .general_utils import convert_time_unix_nano_to_timestamp, json_load @@ -347,8 +347,8 @@ class TraceTelemetryHelper: CUSTOM_DIMENSIONS_TRACE_COUNT = "trace_count" def __init__(self): - setup_user_agent_to_operation_context(USER_AGENT) - self._user_agent = ClientUserAgentUtil.get_user_agent() + # `setup_user_agent_to_operation_context` will get user agent and return + self._user_agent = setup_user_agent_to_operation_context(USER_AGENT) self._telemetry_logger = get_telemetry_logger() self._lock = multiprocessing.Lock() self._summary: typing.Dict[TraceCountKey, int] = dict() diff --git a/src/promptflow-recording/recordings/local/executor_node_cache.shelve.bak b/src/promptflow-recording/recordings/local/executor_node_cache.shelve.bak index b1ed1b12c3f..708f20db4e2 100644 --- a/src/promptflow-recording/recordings/local/executor_node_cache.shelve.bak +++ b/src/promptflow-recording/recordings/local/executor_node_cache.shelve.bak @@ -35,3 +35,5 @@ 'ad585ee1806aae44c095f4b3e473e472bb8be141', (91136, 1272) 'ea48203d881e43bd9e027a19525ba88816c9a639', (92672, 14393) 'e53962d6670e3c446a659b93e8ff5900f82bce76', (107520, 14391) +'b3e2c3c192f72b517f5d32e5f416b1f818922bbd', (122368, 3698) +'17d268bf2d53b839d08502d3a92c6ce0f5e67fdd', (126464, 916) diff --git a/src/promptflow-recording/recordings/local/executor_node_cache.shelve.dat b/src/promptflow-recording/recordings/local/executor_node_cache.shelve.dat index 494a0a8173635366ae79026ad8425c989b784bfa..a1397df2e98c7010764c0fd3d0936bee412cc22f 100644 GIT binary patch delta 2939 zcmdUxTWb?R6o50iuqCBeQQM}gYc@$+=_X`DHK|WtNh95c+8aWV-R(AKOPbwoX2A%> z`Y0?aWMtm`0X_-oqln<25J9y6!;KXTNK@#t!M@DH3^Vg_&YUw}>yRNon1c`K=GRu? z9r}WpK;R(!wlYW~QM7sKV#UNlZcf0qr5J)UAXm~O!f#@4%AE(Nuj^(Nc)*xi) zH5Og)mo&r~+a4f$70@t;Jtv;>EE@4QnhdvE)7wSj;fbPNq@Cd&VYiy=|2^nAd7pjU z?On~%RoO#8dsXndr>ys=gZ6Xlavk(q1sk=q#x$a0x8wJqiK5xFBK{_H|0L}xO|fXu z-|iIT?s?{ if IS_LEGACY_OPENAI: completion = openai.Completion.create( prompt=prompt, - engine="text-ada-001", + engine="gpt-35-turbo-instruct", max_tokens=256, temperature=0.8, top_p=1.0, @@ -40,7 +40,7 @@ def completion(connection: AzureOpenAIConnection, prompt: str, stream: bool) -> else: completion = get_client(connection).completions.create( prompt=prompt, - model="text-ada-001", + model="gpt-35-turbo-instruct", max_tokens=256, temperature=0.8, top_p=1.0,