Skip to content

Commit

Permalink
Merge pull request #57 from pandapknaepel/feature-interfaces
Browse files Browse the repository at this point in the history
Implement Interfaces for endpoint and api classes Closes #49
  • Loading branch information
OkGoDoIt authored Mar 9, 2023
2 parents 30aca80 + b70077e commit 1adcae8
Show file tree
Hide file tree
Showing 10 changed files with 304 additions and 5 deletions.
2 changes: 1 addition & 1 deletion OpenAI_API/Completions/CompletionEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace OpenAI_API.Completions
/// <summary>
/// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
/// </summary>
public class CompletionEndpoint : EndpointBase
public class CompletionEndpoint : EndpointBase, ICompletionEndpoint
{
/// <summary>
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
Expand Down
135 changes: 135 additions & 0 deletions OpenAI_API/Completions/ICompletionEndpoint.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using OpenAI_API.Models;

namespace OpenAI_API.Completions
{
/// <summary>
/// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
/// </summary>
public interface ICompletionEndpoint
{
/// <summary>
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
/// </summary>
CompletionRequest DefaultCompletionRequestArgs { get; set; }

/// <summary>
/// Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
Task<CompletionResult> CreateCompletionAsync(CompletionRequest request);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
/// </summary>
/// <param name="prompt">The prompt to generate from</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
Task<CompletionResult> CreateCompletionAsync(string prompt,
Model model = null,
int? max_tokens = null,
double? temperature = null,
double? top_p = null,
int? numOutputs = null,
double? presencePenalty = null,
double? frequencyPenalty = null,
int? logProbs = null,
bool? echo = null,
params string[] stopSequences
);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified promptes, with other paramets being drawn from default values specified in <see cref="DefaultCompletionRequestArgs"/> if present. This is non-streaming, so it will wait until the API returns the full result.
/// </summary>
/// <param name="prompts">One or more prompts to generate from</param>
/// <returns></returns>
Task<CompletionResult> CreateCompletionAsync(params string[] prompts);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified request and a requested number of outputs. This is non-streaming, so it will wait until the API returns the full result.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <param name="numOutputs">Overrides <see cref="CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request, int numOutputs = 5);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamCompletionEnumerableAsync(CompletionRequest)"/> instead.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
Task StreamCompletionAsync(CompletionRequest request, Action<int, CompletionResult> resultHandler);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamCompletionEnumerableAsync(CompletionRequest)"/> instead.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <param name="resultHandler">An action to be called as each new result arrives.</param>
Task StreamCompletionAsync(CompletionRequest request, Action<CompletionResult> resultHandler);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(CompletionRequest request);

/// <summary>
/// Ask the API to complete the prompt(s) using the specified parameters.
/// Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
/// </summary>
/// <param name="prompt">The prompt to generate from</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(string prompt,
Model model = null,
int? max_tokens = null,
double? temperature = null,
double? top_p = null,
int? numOutputs = null,
double? presencePenalty = null,
double? frequencyPenalty = null,
int? logProbs = null,
bool? echo = null,
params string[] stopSequences);

/// <summary>
/// Simply returns a string of the prompt followed by the best completion
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <returns>A string of the prompt followed by the best completion</returns>
Task<string> CreateAndFormatCompletion(CompletionRequest request);

/// <summary>
/// Simply returns the best completion
/// </summary>
/// <param name="prompt">The prompt to complete</param>
/// <returns>The best completion</returns>
Task<string> GetCompletion(string prompt);
}
}
2 changes: 1 addition & 1 deletion OpenAI_API/Embedding/EmbeddingEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ namespace OpenAI_API.Embedding
/// <summary>
/// OpenAI’s text embeddings measure the relatedness of text strings by generating an embedding, which is a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
/// </summary>
public class EmbeddingEndpoint : EndpointBase
public class EmbeddingEndpoint : EndpointBase, IEmbeddingEndpoint
{
/// <summary>
/// This allows you to send request to the recommended model without needing to specify. Every request uses the <see cref="Model.AdaTextEmbedding"/> model
Expand Down
36 changes: 36 additions & 0 deletions OpenAI_API/Embedding/IEmbeddingEndpoint.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
using System.Threading.Tasks;

namespace OpenAI_API.Embedding
{
/// <summary>
/// OpenAI’s text embeddings measure the relatedness of text strings by generating an embedding, which is a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
/// </summary>
public interface IEmbeddingEndpoint
{
/// <summary>
/// This allows you to send request to the recommended model without needing to specify. Every request uses the <see cref="Model.AdaTextEmbedding"/> model
/// </summary>
EmbeddingRequest DefaultEmbeddingRequestArgs { get; set; }

/// <summary>
/// Ask the API to embedd text using the default embedding model <see cref="Model.AdaTextEmbedding"/>
/// </summary>
/// <param name="input">Text to be embedded</param>
/// <returns>Asynchronously returns the embedding result. Look in its <see cref="Data.Embedding"/> property of <see cref="EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
Task<EmbeddingResult> CreateEmbeddingAsync(string input);

/// <summary>
/// Ask the API to embedd text using a custom request
/// </summary>
/// <param name="request">Request to be send</param>
/// <returns>Asynchronously returns the embedding result. Look in its <see cref="Data.Embedding"/> property of <see cref="EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
Task<EmbeddingResult> CreateEmbeddingAsync(EmbeddingRequest request);

/// <summary>
/// Ask the API to embedd text using the default embedding model <see cref="Model.AdaTextEmbedding"/>
/// </summary>
/// <param name="input">Text to be embedded</param>
/// <returns>Asynchronously returns the first embedding result as an array of floats.</returns>
Task<float[]> GetEmbeddingsAsync(string input);
}
}
2 changes: 1 addition & 1 deletion OpenAI_API/Files/FilesEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace OpenAI_API.Files
/// <summary>
/// The API endpoint for operations List, Upload, Delete, Retrieve files
/// </summary>
public class FilesEndpoint : EndpointBase
public class FilesEndpoint : EndpointBase, IFilesEndpoint
{
/// <summary>
/// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="OpenAIAPI"/> as <see cref="OpenAIAPI.Files"/>.
Expand Down
46 changes: 46 additions & 0 deletions OpenAI_API/Files/IFilesEndpoint.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
using System.Collections.Generic;
using System.Threading.Tasks;

namespace OpenAI_API.Files
{
/// <summary>
/// The API endpoint for operations List, Upload, Delete, Retrieve files
/// </summary>
public interface IFilesEndpoint
{
/// <summary>
/// Get the list of all files
/// </summary>
/// <returns></returns>
/// <exception cref="HttpRequestException"></exception>
Task<List<File>> GetFilesAsync();

/// <summary>
/// Returns information about a specific file
/// </summary>
/// <param name="fileId">The ID of the file to use for this request</param>
/// <returns></returns>
Task<File> GetFileAsync(string fileId);

/// <summary>
/// Returns the contents of the specific file as string
/// </summary>
/// <param name="fileId">The ID of the file to use for this request</param>
/// <returns></returns>
Task<string> GetFileContentAsStringAsync(string fileId);

/// <summary>
/// Delete a file
/// </summary>
/// <param name="fileId">The ID of the file to use for this request</param>
/// <returns></returns>
Task<File> DeleteFileAsync(string fileId);

/// <summary>
/// Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact OpenAI if you need to increase the storage limit
/// </summary>
/// <param name="filePath">The name of the file to use for this request</param>
/// <param name="purpose">The intendend purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.</param>
Task<File> UploadFileAsync(string filePath, string purpose = "fine-tune");
}
}
Loading

0 comments on commit 1adcae8

Please sign in to comment.