Skip to content

Commit

Permalink
com.openai.unity 3.1.1 (#43)
Browse files Browse the repository at this point in the history
- refactored model validation
- added additional default models
- deprecate `OpenAIClient.DefaultModel`
- closes #42
  • Loading branch information
StephenHodgson authored Mar 9, 2023
1 parent 18a59e9 commit 412273d
Show file tree
Hide file tree
Showing 25 changed files with 416 additions and 112 deletions.
34 changes: 33 additions & 1 deletion Documentation~/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,39 @@ Debug.Log(result.FirstChoice);
##### [Chat Streaming](https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream)

```csharp
TODO
var api = new OpenAIClient();
var chatPrompts = new List<ChatPrompt>
{
new ChatPrompt("system", "You are a helpful assistant."),
new ChatPrompt("user", "Who won the world series in 2020?"),
new ChatPrompt("assistant", "The Los Angeles Dodgers won the World Series in 2020."),
new ChatPrompt("user", "Where was it played?"),
};
var chatRequest = new ChatRequest(chatPrompts, Model.GPT3_5_Turbo);

await api.ChatEndpoint.StreamCompletionAsync(chatRequest, result =>
{
Debug.Log(result.FirstChoice);
});
```

Or if using [`IAsyncEnumerable{T}`](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1?view=net-5.0) ([C# 8.0+](https://docs.microsoft.com/en-us/archive/msdn-magazine/2019/november/csharp-iterating-with-async-enumerables-in-csharp-8))

```csharp
var api = new OpenAIClient();
var chatPrompts = new List<ChatPrompt>
{
new ChatPrompt("system", "You are a helpful assistant."),
new ChatPrompt("user", "Who won the world series in 2020?"),
new ChatPrompt("assistant", "The Los Angeles Dodgers won the World Series in 2020."),
new ChatPrompt("user", "Where was it played?"),
};
var chatRequest = new ChatRequest(chatPrompts, Model.GPT3_5_Turbo);

await foreach (var result in api.ChatEndpoint.StreamCompletionEnumerableAsync(chatRequest))
{
Debug.Log(result.FirstChoice);
}
```

### [Edits](https://beta.openai.com/docs/api-reference/edits)
Expand Down
4 changes: 2 additions & 2 deletions Runtime/Audio/AudioTranscriptionRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ public AudioTranscriptionRequest(

AudioName = audioName;

Model = model ?? new Model("whisper-1");
Model = model ?? Models.Model.Whisper1;

if (!Model.Contains("whisper"))
{
throw new ArgumentException(nameof(model), $"{Model} is not supported.");
throw new ArgumentException($"{Model} is not supported", nameof(model));
}

Prompt = prompt;
Expand Down
4 changes: 2 additions & 2 deletions Runtime/Audio/AudioTranslationRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,11 @@ public AudioTranslationRequest(

AudioName = audioName;

Model = model ?? new Model("whisper-1");
Model = model ?? Models.Model.Whisper1;

if (!Model.Contains("whisper"))
{
throw new ArgumentException(nameof(model), $"{Model} is not supported.");
throw new ArgumentException($"{Model} is not supported", nameof(model));
}

Prompt = prompt;
Expand Down
96 changes: 91 additions & 5 deletions Runtime/Chat/ChatEndpoint.cs
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Net.Http;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;

Expand All @@ -16,19 +21,100 @@ protected override string GetEndpoint()
=> $"{Api.BaseUrl}chat";

/// <summary>
/// Creates a completion for the chat message
/// Creates a completion for the chat message.
/// </summary>
/// <param name="chatRequest">The chat request which contains the message content.</param>
/// <param name="cancellationToken">Optional, <see cref="CancellationToken"/>.</param>
/// <returns><see cref="ChatResponse"/>.</returns>
/// <exception cref="HttpRequestException">Raised when the HTTP request fails</exception>
public async Task<ChatResponse> GetCompletionAsync(ChatRequest chatRequest, CancellationToken cancellationToken = default)
{
var payload = JsonConvert.SerializeObject(chatRequest, Api.JsonSerializationOptions).ToJsonStringContent();
var result = await Api.Client.PostAsync($"{GetEndpoint()}/completions", payload, cancellationToken);
var resultAsString = await result.ReadAsStringAsync();
return JsonConvert.DeserializeObject<ChatResponse>(resultAsString, Api.JsonSerializationOptions);
var response = await Api.Client.PostAsync($"{GetEndpoint()}/completions", payload, cancellationToken);
var responseAsString = await response.ReadAsStringAsync();
return response.DeserializeResponse<ChatResponse>(responseAsString, Api.JsonSerializationOptions);
}

// TODO Streaming endpoints
/// <summary>
/// Created a completion for the chat message and stream the results to the <paramref name="resultHandler"/> as they come in.
/// </summary>
/// <param name="chatRequest">The chat request which contains the message content.</param>
/// <param name="resultHandler">An action to be called as each new result arrives.</param>
/// <param name="cancellationToken">Optional, <see cref="CancellationToken"/>.</param>
/// <returns><see cref="ChatResponse"/>.</returns>
/// <exception cref="HttpRequestException">Raised when the HTTP request fails</exception>
public async Task StreamCompletionAsync(ChatRequest chatRequest, Action<ChatResponse> resultHandler, CancellationToken cancellationToken = default)
{
chatRequest.Stream = true;
var jsonContent = JsonConvert.SerializeObject(chatRequest, Api.JsonSerializationOptions);
using var request = new HttpRequestMessage(HttpMethod.Post, $"{GetEndpoint()}/completions")
{
Content = jsonContent.ToJsonStringContent()
};
var response = await Api.Client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancellationToken);
await response.CheckResponseAsync(cancellationToken);
await using var stream = await response.Content.ReadAsStreamAsync();
using var reader = new StreamReader(stream);

while (await reader.ReadLineAsync() is { } line)
{
if (line.StartsWith("data: "))
{
line = line["data: ".Length..];
}

if (line == "[DONE]")
{
return;
}

if (!string.IsNullOrWhiteSpace(line))
{
resultHandler(response.DeserializeResponse<ChatResponse>(line.Trim(), Api.JsonSerializationOptions));
}
}
}

/// <summary>
/// Created a completion for the chat message and stream the results as they come in.<br/>
/// If you are not using C# 8 supporting IAsyncEnumerable{T} or if you are using the .NET Framework,
/// you may need to use <see cref="StreamCompletionAsync(ChatRequest, Action{ChatResponse}, CancellationToken)"/> instead.
/// </summary>
/// <param name="chatRequest">The chat request which contains the message content.</param>
/// <param name="cancellationToken">Optional, <see cref="CancellationToken"/>.</param>
/// <returns><see cref="ChatResponse"/>.</returns>
/// <exception cref="HttpRequestException">Raised when the HTTP request fails</exception>
public async IAsyncEnumerable<ChatResponse> StreamCompletionEnumerableAsync(ChatRequest chatRequest, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
chatRequest.Stream = true;
var jsonContent = JsonConvert.SerializeObject(chatRequest, Api.JsonSerializationOptions);
using var request = new HttpRequestMessage(HttpMethod.Post, $"{GetEndpoint()}/completions")
{
Content = jsonContent.ToJsonStringContent()
};
var response = await Api.Client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancellationToken);
await response.CheckResponseAsync(cancellationToken);
await using var stream = await response.Content.ReadAsStreamAsync();
using var reader = new StreamReader(stream);

while (await reader.ReadLineAsync() is { } line &&
!cancellationToken.IsCancellationRequested)
{
if (line.StartsWith("data: "))
{
line = line["data: ".Length..];
}

if (line == "[DONE]")
{
yield break;
}

if (!string.IsNullOrWhiteSpace(line))
{
yield return response.DeserializeResponse<ChatResponse>(line.Trim(), Api.JsonSerializationOptions);
}
}
}
}
}
67 changes: 62 additions & 5 deletions Runtime/Chat/ChatRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,62 @@ namespace OpenAI.Chat
{
public sealed class ChatRequest
{
/// <summary>
/// Constructor.
/// </summary>
/// <param name="messages"></param>
/// <param name="model">
/// ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
/// </param>
/// <param name="temperature">
/// What sampling temperature to use, between 0 and 2.
/// Higher values like 0.8 will make the output more random, while lower values like 0.2 will
/// make it more focused and deterministic.
/// We generally recommend altering this or top_p but not both.<br/>
/// Defaults to 1
/// </param>
/// <param name="topP">
/// An alternative to sampling with temperature, called nucleus sampling,
/// where the model considers the results of the tokens with top_p probability mass.
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.<br/>
/// Defaults to 1
/// </param>
/// <param name="number">
/// How many chat completion choices to generate for each input message.<br/>
/// Defaults to 1
/// </param>
/// <param name="stops">
/// Up to 4 sequences where the API will stop generating further tokens.
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens allowed for the generated answer.
/// By default, the number of tokens the model can return will be (4096 - prompt tokens).
/// </param>
/// <param name="presencePenalty">
/// Number between -2.0 and 2.0.
/// Positive values penalize new tokens based on whether they appear in the text so far,
/// increasing the model's likelihood to talk about new topics.<br/>
/// Defaults to 0
/// </param>
/// <param name="frequencyPenalty">
/// Number between -2.0 and 2.0.
/// Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.<br/>
/// Defaults to 0
/// </param>
/// <param name="logitBias">
/// Modify the likelihood of specified tokens appearing in the completion.
/// Accepts a json object that maps tokens(specified by their token ID in the tokenizer)
/// to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits
/// generated by the model prior to sampling.The exact effect will vary per model, but values between
/// -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result
/// in a ban or exclusive selection of the relevant token.<br/>
/// Defaults to null
/// </param>
/// <param name="user">
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// </param>
public ChatRequest(
IEnumerable<ChatPrompt> messages,
Model model = null,
Expand All @@ -23,12 +79,11 @@ public ChatRequest(
Dictionary<string, double> logitBias = null,
string user = null)
{
const string defaultModel = "gpt-3.5-turbo";
Model = model ?? Models.Model.GPT3_5_Turbo;

if (!Model.Contains(defaultModel))
if (!Model.Contains("turbo"))
{
throw new ArgumentException(nameof(model), $"{Model} not supported");
throw new ArgumentException($"{Model} is not supported", nameof(model));
}

Messages = messages?.ToList();
Expand Down Expand Up @@ -127,7 +182,8 @@ public ChatRequest(
[JsonProperty("frequency_penalty")]
public double? FrequencyPenalty { get; }

/// <summary>Modify the likelihood of specified tokens appearing in the completion.
/// <summary>
/// Modify the likelihood of specified tokens appearing in the completion.
/// Accepts a json object that maps tokens(specified by their token ID in the tokenizer)
/// to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits
/// generated by the model prior to sampling.The exact effect will vary per model, but values between
Expand All @@ -136,14 +192,15 @@ public ChatRequest(
/// Defaults to null
/// </summary>
[JsonProperty("logit_bias")]
public IReadOnlyDictionary<string, double> LogitBias { get; set; }
public IReadOnlyDictionary<string, double> LogitBias { get; }

/// <summary>
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// </summary>
[JsonProperty("user")]
public string User { get; }

/// <inheritdoc />
public override string ToString() => JsonConvert.SerializeObject(this);
}
}
5 changes: 2 additions & 3 deletions Runtime/Chat/ChatResponse.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

namespace OpenAI.Chat
{
public sealed class ChatResponse
public sealed class ChatResponse : BaseResponse
{
[JsonConstructor]
public ChatResponse(
Expand All @@ -15,8 +15,7 @@ public ChatResponse(
[JsonProperty("created")] int created,
[JsonProperty("model")] string model,
[JsonProperty("usage")] Usage usage,
[JsonProperty("choices")] List<Choice> choices
)
[JsonProperty("choices")] List<Choice> choices)
{
Id = id;
Object = @object;
Expand Down
7 changes: 6 additions & 1 deletion Runtime/Chat/Choice.cs
Original file line number Diff line number Diff line change
Expand Up @@ -9,24 +9,29 @@ public sealed class Choice
[JsonConstructor]
public Choice(
[JsonProperty("message")] Message message,
[JsonProperty("delta")] Delta delta,
[JsonProperty("finish_reason")] string finishReason,
[JsonProperty("index")] int index)
{
Message = message;
Delta = delta;
FinishReason = finishReason;
Index = index;
}

[JsonProperty("message")]
public Message Message { get; }

[JsonProperty("delta")]
public Delta Delta { get; }

[JsonProperty("finish_reason")]
public string FinishReason { get; }

[JsonProperty("index")]
public int Index { get; }

public override string ToString() => Message.ToString();
public override string ToString() => Message?.ToString() ?? Delta.Content;

public static implicit operator string(Choice choice) => choice.ToString();
}
Expand Down
24 changes: 24 additions & 0 deletions Runtime/Chat/Delta.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;

namespace OpenAI.Chat
{
public sealed class Delta
{
[JsonConstructor]
public Delta(
[JsonProperty("role")] string role,
[JsonProperty("content")] string content)
{
Role = role;
Content = content;
}

[JsonProperty("role")]
public string Role { get; }

[JsonProperty("content")]
public string Content { get; }
}
}
11 changes: 11 additions & 0 deletions Runtime/Chat/Delta.cs.meta

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 412273d

Please sign in to comment.