diff --git a/examples/flows/standard/basic-with-connection/flow.dag.yaml b/examples/flows/standard/basic-with-connection/flow.dag.yaml index f75def07d03..f0c275ef860 100644 --- a/examples/flows/standard/basic-with-connection/flow.dag.yaml +++ b/examples/flows/standard/basic-with-connection/flow.dag.yaml @@ -22,7 +22,7 @@ nodes: path: hello.py inputs: connection: basic_custom_connection - deployment_name: text-davinci-003 + deployment_name: gpt-35-turbo-instruct max_tokens: "120" prompt: ${hello_prompt.output} environment: diff --git a/examples/flows/standard/basic/flow.dag.yaml b/examples/flows/standard/basic/flow.dag.yaml index 31f1ea32312..f269faa7aff 100644 --- a/examples/flows/standard/basic/flow.dag.yaml +++ b/examples/flows/standard/basic/flow.dag.yaml @@ -24,5 +24,5 @@ nodes: path: hello.py inputs: prompt: ${hello_prompt.output} - deployment_name: text-davinci-003 + deployment_name: gpt-35-turbo-instruct max_tokens: "120" diff --git a/examples/tutorials/tracing/custom-otlp-collector/llm.py b/examples/tutorials/tracing/custom-otlp-collector/llm.py index cdc72f92fe8..eaa9d6f67e2 100644 --- a/examples/tutorials/tracing/custom-otlp-collector/llm.py +++ b/examples/tutorials/tracing/custom-otlp-collector/llm.py @@ -45,7 +45,7 @@ def my_llm_tool(prompt: str, deployment_name: str) -> str: if __name__ == "__main__": result = my_llm_tool( - prompt="Write a simple Hello, world! program that displays the greeting message.", - deployment_name="text-davinci-003", + prompt="Write a simple Hello, world! python program that displays the greeting message. Output code only.", + deployment_name="gpt-4o", ) print(result) diff --git a/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb b/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb index 3c1306ddb60..886fe8cf3bf 100644 --- a/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb +++ b/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb @@ -162,7 +162,7 @@ "outputs": [], "source": [ "result = my_llm_tool(\n", - " prompt=\"Write a simple Hello, world! program that displays the greeting message when executed.\",\n", + " prompt=\"Write a simple Hello, world! python program that displays the greeting message. Output code only.\",\n", " deployment_name=deployment_name,\n", ")\n", "result\n", diff --git a/examples/tutorials/tracing/langchain/requirements.txt b/examples/tutorials/tracing/langchain/requirements.txt index fab24c4abd6..ec00dd53400 100644 --- a/examples/tutorials/tracing/langchain/requirements.txt +++ b/examples/tutorials/tracing/langchain/requirements.txt @@ -1,4 +1,5 @@ promptflow langchain>=0.1.5 +langchain_community opentelemetry-instrumentation-langchain python-dotenv \ No newline at end of file