Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor response format structure to enforce type safety and add response format in Run also. #395

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package com.aallam.openai.client

import com.aallam.openai.api.assistant.AssistantResponseFormat
import com.aallam.openai.api.assistant.AssistantTool
import com.aallam.openai.api.assistant.assistantRequest
import com.aallam.openai.api.chat.ToolCall
import com.aallam.openai.api.core.JsonSchema
import com.aallam.openai.api.core.ResponseFormat
import com.aallam.openai.api.core.Schema
import com.aallam.openai.api.model.ModelId
import com.aallam.openai.api.run.RequiredAction
import com.aallam.openai.api.run.Run
Expand All @@ -26,7 +28,7 @@ class TestAssistants : TestOpenAI() {
name = "Math Tutor"
tools = listOf(AssistantTool.CodeInterpreter)
model = ModelId("gpt-4o")
responseFormat = AssistantResponseFormat.TEXT
responseFormat = ResponseFormat.TextResponseFormat
}
val assistant = openAI.assistant(
request = request,
Expand All @@ -46,7 +48,7 @@ class TestAssistants : TestOpenAI() {

val updated = assistantRequest {
name = "Super Math Tutor"
responseFormat = AssistantResponseFormat.AUTO
responseFormat = ResponseFormat.AutoResponseFormat
}
val updatedAssistant = openAI.assistant(
assistant.id,
Expand Down Expand Up @@ -154,20 +156,22 @@ class TestAssistants : TestOpenAI() {

@Test
fun jsonSchemaAssistant() = test {
val jsonSchema = AssistantResponseFormat.JSON_SCHEMA(
name = "TestSchema",
description = "A test schema",
schema = buildJsonObject {
put("type", "object")
put("properties", buildJsonObject {
put("name", buildJsonObject {
put("type", "string")
val jsonSchema = ResponseFormat.JsonSchemaResponseFormat(
schema = JsonSchema(
name = "TestSchema",
description = "A test schema",
schema = Schema.buildJsonObject {
put("type", "object")
put("properties", buildJsonObject {
put("name", buildJsonObject {
put("type", "string")
})
})
})
put("required", JsonArray(listOf(JsonPrimitive("name"))))
put("additionalProperties", false)
},
strict = true
put("required", JsonArray(listOf(JsonPrimitive("name"))))
put("additionalProperties", false)
},
strict = true
)
)

val request = assistantRequest {
Expand All @@ -193,7 +197,7 @@ class TestAssistants : TestOpenAI() {

val updated = assistantRequest {
name = "Updated Schema Assistant"
responseFormat = AssistantResponseFormat.AUTO
responseFormat = ResponseFormat.AutoResponseFormat
}
val updatedAssistant = openAI.assistant(
assistant.id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package com.aallam.openai.api.assistant

import com.aallam.openai.api.BetaOpenAI
import com.aallam.openai.api.assistant.AssistantTool.*
import com.aallam.openai.api.core.ResponseFormat
import com.aallam.openai.api.model.ModelId
import kotlinx.serialization.SerialName
import kotlinx.serialization.Serializable
Expand Down Expand Up @@ -80,7 +81,7 @@ public data class Assistant(
* Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo
* models since gpt-3.5-turbo-1106.
*
* Setting to [AssistantResponseFormat.JsonObject] enables JSON mode, which guarantees the message the model
* Setting to [ResponseFormat.JsonObject] enables JSON mode, which guarantees the message the model
* generates is valid JSON.
*
* important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user
Expand All @@ -89,5 +90,5 @@ public data class Assistant(
* partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or
* the conversation exceeded the max context length.
*/
@SerialName("response_format") public val responseFormat: AssistantResponseFormat? = null,
@SerialName("response_format") public val responseFormat: ResponseFormat? = null,
)
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package com.aallam.openai.api.assistant

import com.aallam.openai.api.BetaOpenAI
import com.aallam.openai.api.OpenAIDsl
import com.aallam.openai.api.core.ResponseFormat
import com.aallam.openai.api.model.ModelId
import kotlinx.serialization.SerialName
import kotlinx.serialization.Serializable
Expand Down Expand Up @@ -67,25 +68,24 @@ public data class AssistantRequest(
* Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo
* models since gpt-3.5-turbo-1106.
*
* Setting to [AssistantResponseFormat.JSON_SCHEMA] enables Structured Outputs which ensures the model will match your supplied JSON schema.
* Setting to [ResponseFormat.JsonSchemaResponseFormat] enables Structured Outputs which ensures the model will match your supplied JSON schema.
*
* Structured Outputs ([AssistantResponseFormat.JSON_SCHEMA]) are available in our latest large language models, starting with GPT-4o:
* Structured Outputs [ResponseFormat.JsonSchemaResponseFormat] are available in our latest large language models, starting with GPT-4o:
* 1. gpt-4o-mini-2024-07-18 and later
* 2. gpt-4o-2024-08-06 and later
*
* Older models like gpt-4-turbo and earlier may use JSON mode ([AssistantResponseFormat.JSON_OBJECT]) instead.
* Older models like gpt-4-turbo and earlier may use JSON mode [ResponseFormat.JsonObjectResponseFormat] instead.
*
* Setting to [AssistantResponseFormat.JSON_OBJECT] enables JSON mode, which guarantees the message the model
* Setting to [ResponseFormat.JsonObjectResponseFormat] enables JSON mode, which guarantees the message the model
* generates is valid JSON.
*
* important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user
* message. Without this, the model may generate an unending stream of whitespace until the generation reaches the
* token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or
* the conversation exceeded the max context length.
*
*/
@SerialName("response_format") val responseFormat: AssistantResponseFormat? = null,
@SerialName("response_format") val responseFormat: ResponseFormat? = null,
)

@BetaOpenAI
Expand Down Expand Up @@ -141,10 +141,25 @@ public class AssistantRequestBuilder {
/**
* Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo
* models since gpt-3.5-turbo-1106.
*
* Setting to [OldResponseFormat.JSON_SCHEMA] enables Structured Outputs which ensures the model will match your supplied JSON schema.
*
* Structured Outputs ([OldResponseFormat.JSON_SCHEMA]) are available in our latest large language models, starting with GPT-4o:
* 1. gpt-4o-mini-2024-07-18 and later
* 2. gpt-4o-2024-08-06 and later
*
* Older models like gpt-4-turbo and earlier may use JSON mode ([OldResponseFormat.JSON_OBJECT]) instead.
*
* Setting to [OldResponseFormat.JSON_OBJECT] enables JSON mode, which guarantees the message the model
* generates is valid JSON.
*
* important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user
* message. Without this, the model may generate an unending stream of whitespace until the generation reaches the
* token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or
* the conversation exceeded the max context length.
*/
public var responseFormat: AssistantResponseFormat? = null


public var responseFormat: ResponseFormat? = null

/**
* Create [Assistant] instance.
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ public data class Function(
*/
@SerialName("name") val name: String,
/**
* The description of what the function does.
* The description of what the function does. used by the model to choose when and how to call the function.
*/
@SerialName("description") val description: String,
/**
Expand All @@ -25,6 +25,13 @@ public data class Function(
* To describe a function that accepts no parameters, provide [Parameters.Empty]`.
*/
@SerialName("parameters") val parameters: Parameters,
/**
* Whether to enable strict schema adherence when generating the function call.
* If set to true, the model will always follow the exact schema defined in the parameters field.
* Only a subset of JSON Schema is supported when strict is true.
* To learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/api-reference/assistants/docs/guides/function-calling).
*/
val strict: Boolean? = null
)

/**
Expand All @@ -49,13 +56,22 @@ public class FunctionBuilder {
*/
public var parameters: Parameters? = Parameters.Empty

/**
* Whether to enable strict schema adherence when generating the function call.
* If set to true, the model will always follow the exact schema defined in the parameters field.
* Only a subset of JSON Schema is supported when strict is true.
* To learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/api-reference/assistants/docs/guides/function-calling).
*/
public var strict: Boolean? = null

/**
* Create [Function] instance.
*/
public fun build(): Function = Function(
name = requireNotNull(name) { "name is required" },
description = requireNotNull(description) { "description is required" },
parameters = requireNotNull(parameters) { "parameters is required" }
parameters = requireNotNull(parameters) { "parameters is required" },
strict = strict
)
}

Expand Down
Loading
Loading