From 261a76b61b15681036c247d70cda586e132e3fa8 Mon Sep 17 00:00:00 2001 From: Didrik Date: Tue, 17 Dec 2024 16:35:45 +0100 Subject: [PATCH 1/2] Add store flag for chat completion request --- async-openai/src/types/chat.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/async-openai/src/types/chat.rs b/async-openai/src/types/chat.rs index 138d6852..ce1d10b4 100644 --- a/async-openai/src/types/chat.rs +++ b/async-openai/src/types/chat.rs @@ -506,6 +506,12 @@ pub struct CreateChatCompletionRequest { /// See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. pub model: String, + /// Whether or not to store the output of this chat completion request + /// + /// for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + #[serde(skip_serializing_if = "Option::is_none")] + pub store: Option, // nullable: true, default: false + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details) From 608a079f0857e6396b6db46df6c8f0df4fefb254 Mon Sep 17 00:00:00 2001 From: Didrik Date: Tue, 17 Dec 2024 17:02:01 +0100 Subject: [PATCH 2/2] Add metadata and example used in evals and distillation guides --- async-openai/src/types/chat.rs | 4 +++ examples/chat-store/Cargo.toml | 10 +++++++ examples/chat-store/README.md | 5 ++++ examples/chat-store/src/main.rs | 49 +++++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+) create mode 100644 examples/chat-store/Cargo.toml create mode 100644 examples/chat-store/README.md create mode 100644 examples/chat-store/src/main.rs diff --git a/async-openai/src/types/chat.rs b/async-openai/src/types/chat.rs index ce1d10b4..13cad2b7 100644 --- a/async-openai/src/types/chat.rs +++ b/async-openai/src/types/chat.rs @@ -512,6 +512,10 @@ pub struct CreateChatCompletionRequest { #[serde(skip_serializing_if = "Option::is_none")] pub store: Option, // nullable: true, default: false + /// Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions). + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, // nullable: true + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. /// /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details) diff --git a/examples/chat-store/Cargo.toml b/examples/chat-store/Cargo.toml new file mode 100644 index 00000000..3576f03d --- /dev/null +++ b/examples/chat-store/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "chat-store" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +async-openai = {path = "../../async-openai"} +serde_json = "1.0.117" +tokio = { version = "1.38.0", features = ["full"] } diff --git a/examples/chat-store/README.md b/examples/chat-store/README.md new file mode 100644 index 00000000..b933bb7d --- /dev/null +++ b/examples/chat-store/README.md @@ -0,0 +1,5 @@ +### Output + +> Response: +> +> 0: Role: assistant Content: To hide the dock on a Mac, you can follow these steps:\n\n1. Click on the Apple logo in the top-left corner of the screen.\n2. Select \"System Preferences\" from the drop-down menu.\n3. Click on \"Dock & Menu Bar\".\n4. Under the \"Dock\" section, you will see an option to \"Automatically hide and show the Dock\". Check the box next to this option.\n5. The dock will now be hidden until you move your cursor to the bottom of the screen, at which point it will slide back into view.\n\nYou can also change the size and position of the dock in the Dock preferences \ No newline at end of file diff --git a/examples/chat-store/src/main.rs b/examples/chat-store/src/main.rs new file mode 100644 index 00000000..9c967ea7 --- /dev/null +++ b/examples/chat-store/src/main.rs @@ -0,0 +1,49 @@ +use std::error::Error; +use async_openai::{ + types::{ + ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs, + ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, + }, + Client, +}; +use serde_json::json; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let client = Client::new(); + + let request = CreateChatCompletionRequestArgs::default() + .max_tokens(512u32) + .model("gpt-3.5-turbo") + .store(true) + .metadata(json!({ + "role": "manager", + "department": "accounting", + "source": "homepage", + })) + .messages([ + ChatCompletionRequestSystemMessageArgs::default() + .content("You are a corporate IT support expert.") + .build()? + .into(), + ChatCompletionRequestUserMessageArgs::default() + .content("How can I hide the dock on my Mac?") + .build()? + .into(), + ]) + .build()?; + + println!("{}", serde_json::to_string(&request).unwrap()); + + let response = client.chat().create(request).await?; + + println!("\nResponse:\n"); + for choice in response.choices { + println!( + "{}: Role: {} Content: {:?}", + choice.index, choice.message.role, choice.message.content + ); + } + + Ok(()) +}