diff --git a/README.md b/README.md index 7aeefa19..de0f559c 100755 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This is a no-nonsense async Scala client for OpenAI API supporting all the avail * **Models**: [listModels](https://platform.openai.com/docs/api-reference/models/list), and [retrieveModel](https://platform.openai.com/docs/api-reference/models/retrieve) * **Completions**: [createCompletion](https://platform.openai.com/docs/api-reference/completions/create) -* **Chat Completions**: [createChatCompletion](https://platform.openai.com/docs/api-reference/chat/create) (also with GPT vision support!), [createChatFunCompletion](https://platform.openai.com/docs/api-reference/chat/create) (deprecated), and [createChatToolCompletion](https://platform.openai.com/docs/api-reference/chat/create) +* **Chat Completions**: [createChatCompletion](https://platform.openai.com/docs/api-reference/chat/create) (also with JSON schema support 🔥), [createChatFunCompletion](https://platform.openai.com/docs/api-reference/chat/create) (deprecated), and [createChatToolCompletion](https://platform.openai.com/docs/api-reference/chat/create) * **Edits**: [createEdit](https://platform.openai.com/docs/api-reference/edits/create) (deprecated) * **Images**: [createImage](https://platform.openai.com/docs/api-reference/images/create), [createImageEdit](https://platform.openai.com/docs/api-reference/images/create-edit), and [createImageVariation](https://platform.openai.com/docs/api-reference/images/create-variation) * **Embeddings**: [createEmbeddings](https://platform.openai.com/docs/api-reference/embeddings/create) @@ -33,10 +33,11 @@ In addition to the OpenAI API, this library also supports API-compatible provide - [Azure AI](https://azure.microsoft.com/en-us/products/ai-studio) - cloud-based, offers a vast selection of open-source models - [Anthropic](https://www.anthropic.com/api) - cloud-based, a major competitor to OpenAI, features proprietary/closed-source models such as Claude3 - Haiku, Sonnet, and Opus - [Google Vertex AI](https://cloud.google.com/vertex-ai) (🔥 **New**) - cloud-based, features proprietary/closed-source models such as Gemini 1.5 Pro and flash -- [Groq](https://wow.groq.com/) - cloud-based provider, known for its super-fast inference with LPUs +- [Groq](https://wow.groq.com/) - cloud-based provider, known for its superfast inference with LPUs - [Fireworks AI](https://fireworks.ai/) - cloud-based provider - [OctoAI](https://octo.ai/) - cloud-based provider - [TogetherAI](https://www.together.ai/) (🔥 **New**) - cloud-based provider +- [Cerebras](https://cerebras.ai/) (🔥 **New**) - cloud-based provider, superfast (akin to Groq) - [Mistral](https://mistral.ai/) (🔥 **New**) - cloud-based, leading open-source LLM company - [Ollama](https://ollama.com/) - runs locally, serves as an umbrella for open-source LLMs including LLaMA3, dbrx, and Command-R - [FastChat](https://github.com/lm-sys/FastChat) - runs locally, serves as an umbrella for open-source LLMs such as Vicuna, Alpaca, and FastChat-T5 diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/BaseMessage.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/BaseMessage.scala index 98cef8c4..6867d32e 100644 --- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/BaseMessage.scala +++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/BaseMessage.scala @@ -124,6 +124,7 @@ final case class FunMessage( /** * Deprecation warning: Use typed Message(s), such as SystemMessage, UserMessage, instead. + * Will be dropped in the next major version. */ @Deprecated final case class MessageSpec( diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/ChatCompletionProvider.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/ChatCompletionProvider.scala index 3c2d9ba3..0ce5a495 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/ChatCompletionProvider.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/ChatCompletionProvider.scala @@ -2,128 +2,102 @@ package io.cequence.openaiscala.examples import akka.stream.Materializer import io.cequence.openaiscala.anthropic.service.AnthropicServiceFactory -import io.cequence.openaiscala.service.{ - OpenAIChatCompletionService, - OpenAIChatCompletionServiceFactory, - OpenAIChatCompletionStreamedServiceExtra, - OpenAIChatCompletionStreamedServiceFactory -} +import io.cequence.openaiscala.service.OpenAIChatCompletionServiceFactory +import io.cequence.openaiscala.service.OpenAIStreamedServiceImplicits._ import io.cequence.openaiscala.vertexai.service.VertexAIServiceFactory import io.cequence.wsclient.domain.WsRequestContext import scala.concurrent.ExecutionContext -import io.cequence.openaiscala.service.StreamedServiceTypes +import io.cequence.openaiscala.service.StreamedServiceTypes.OpenAIChatCompletionStreamedService object ChatCompletionProvider { - case class ProviderSettings( + private case class ProviderSettings( coreUrl: String, apiKeyEnvVariable: String ) - val Cerebras = ProviderSettings("https://api.cerebras.ai/v1/", "CEREBRAS_API_KEY") - val Groq = ProviderSettings("https://api.groq.com/openai/v1/", "GROQ_API_KEY") - val Fireworks = + private val Cerebras = ProviderSettings("https://api.cerebras.ai/v1/", "CEREBRAS_API_KEY") + private val Groq = ProviderSettings("https://api.groq.com/openai/v1/", "GROQ_API_KEY") + private val Fireworks = ProviderSettings("https://api.fireworks.ai/inference/v1/", "FIREWORKS_API_KEY") - val Mistral = ProviderSettings("https://api.mistral.ai/v1/", "MISTRAL_API_KEY") - val OctoML = ProviderSettings("https://text.octoai.run/v1/", "OCTOAI_TOKEN") - val TogetherAI = ProviderSettings("https://api.together.xyz/v1/", "TOGETHERAI_API_KEY") + private val Mistral = ProviderSettings("https://api.mistral.ai/v1/", "MISTRAL_API_KEY") + private val OctoML = ProviderSettings("https://text.octoai.run/v1/", "OCTOAI_TOKEN") + private val TogetherAI = ProviderSettings("https://api.together.xyz/v1/", "TOGETHERAI_API_KEY") + /** + * Requires `CEREBRAS_API_KEY` + */ def cerebras( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = provide(Cerebras) + ): OpenAIChatCompletionStreamedService = provide(Cerebras) + /** + * Requires `GROQ_API_KEY` + */ def groq( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = provide(Groq) + ): OpenAIChatCompletionStreamedService = provide(Groq) + /** + * Requires `FIREWORKS_API_KEY` + */ def fireworks( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = provide(Fireworks) + ): OpenAIChatCompletionStreamedService = provide(Fireworks) + /** + * Requires `MISTRAL_API_KEY` + */ def mistral( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = provide(Mistral) + ): OpenAIChatCompletionStreamedService = provide(Mistral) + /** + * Requires `OCTOAI_TOKEN` + */ def octoML( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = provide(OctoML) + ): OpenAIChatCompletionStreamedService = provide(OctoML) + /** + * Requires `TOGETHERAI_API_KEY` + */ def togetherAI( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = provide(TogetherAI) + ): OpenAIChatCompletionStreamedService = provide(TogetherAI) + /** + * Requires `VERTEXAI_API_KEY` and "VERTEXAI_LOCATION" + */ def vertexAI( - implicit ec: ExecutionContext, - m: Materializer - ): StreamedServiceTypes.OpenAIChatCompletionStreamedService = + implicit ec: ExecutionContext + ): OpenAIChatCompletionStreamedService = VertexAIServiceFactory.asOpenAI() + /** + * Requires `ANTHROPIC_API_KEY` + */ def anthropic( implicit ec: ExecutionContext, m: Materializer - ): StreamedServiceTypes.OpenAIChatCompletionStreamedService = + ): OpenAIChatCompletionStreamedService = AnthropicServiceFactory.asOpenAI() - object streamed { - def cerebras( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = provideStreamed(Cerebras) - - def groq( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = provideStreamed(Groq) - - def fireworks( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = provideStreamed(Fireworks) - - def mistral( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = provideStreamed(Mistral) - - def octoML( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = provideStreamed(OctoML) - - def togetherAI( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = provideStreamed(TogetherAI) - } - private def provide( settings: ProviderSettings )( implicit ec: ExecutionContext, m: Materializer - ): OpenAIChatCompletionService = OpenAIChatCompletionServiceFactory( - coreUrl = settings.coreUrl, - WsRequestContext(authHeaders = - Seq(("Authorization", s"Bearer ${sys.env(settings.apiKeyEnvVariable)}")) - ) - ) - - private def provideStreamed( - settings: ProviderSettings - )( - implicit ec: ExecutionContext, - m: Materializer - ): OpenAIChatCompletionStreamedServiceExtra = OpenAIChatCompletionStreamedServiceFactory( + ): OpenAIChatCompletionStreamedService = OpenAIChatCompletionServiceFactory.withStreaming( coreUrl = settings.coreUrl, WsRequestContext(authHeaders = Seq(("Authorization", s"Bearer ${sys.env(settings.apiKeyEnvVariable)}")) ) ) - } diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionStreamedWithOpenAIAdapter.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionStreamedWithOpenAIAdapter.scala index f06df2d1..f583a7e9 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionStreamedWithOpenAIAdapter.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionStreamedWithOpenAIAdapter.scala @@ -18,8 +18,7 @@ object AnthropicCreateChatCompletionStreamedWithOpenAIAdapter private val logger = LoggerFactory.getLogger(this.getClass) - override val service: OpenAIChatCompletionStreamedService = - ChatCompletionProvider.anthropic + override val service: OpenAIChatCompletionStreamedService = ChatCompletionProvider.anthropic private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionWithOpenAIAdapter.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionWithOpenAIAdapter.scala index d2e58dff..dc5d1504 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionWithOpenAIAdapter.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/AnthropicCreateChatCompletionWithOpenAIAdapter.scala @@ -11,8 +11,7 @@ import scala.concurrent.Future object AnthropicCreateChatCompletionWithOpenAIAdapter extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.anthropic + override val service: OpenAIChatCompletionService = ChatCompletionProvider.anthropic private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletion.scala index 712e008b..50bd4e3e 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletion.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletion.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future */ object CerebrasCreateChatCompletion extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.cerebras + override val service: OpenAIChatCompletionService = ChatCompletionProvider.cerebras private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletionStreamed.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletionStreamed.scala index 4b03cf42..d27dbe6c 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletionStreamed.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/CerebrasCreateChatCompletionStreamed.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future object CerebrasCreateChatCompletionStreamed extends ExampleBase[OpenAIChatCompletionStreamedServiceExtra] { - override val service: OpenAIChatCompletionStreamedServiceExtra = - ChatCompletionProvider.streamed.cerebras + override val service: OpenAIChatCompletionStreamedServiceExtra = ChatCompletionProvider.cerebras private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletion.scala index 1ea2d91b..8d024c0a 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletion.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletion.scala @@ -16,8 +16,7 @@ import scala.concurrent.Future object FireworksAICreateChatCompletion extends ExampleBase[OpenAIChatCompletionService] { private val fireworksModelPrefix = "accounts/fireworks/models/" - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.fireworks + override val service: OpenAIChatCompletionService = ChatCompletionProvider.fireworks private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletionStreamed.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletionStreamed.scala index 7d09339f..fc1f51f8 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletionStreamed.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/FireworksAICreateChatCompletionStreamed.scala @@ -13,8 +13,7 @@ object FireworksAICreateChatCompletionStreamed extends ExampleBase[OpenAIChatCompletionStreamedServiceExtra] { private val fireworksModelPrefix = "accounts/fireworks/models/" - override val service: OpenAIChatCompletionStreamedServiceExtra = - ChatCompletionProvider.streamed.fireworks + override val service: OpenAIChatCompletionStreamedServiceExtra = ChatCompletionProvider.fireworks private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletion.scala index 3b9db127..b282697e 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletion.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletion.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future */ object GroqCreateChatCompletion extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.groq + override val service: OpenAIChatCompletionService = ChatCompletionProvider.groq private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletionStreamed.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletionStreamed.scala index b07ef748..0df098b4 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletionStreamed.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/GroqCreateChatCompletionStreamed.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future object GroqCreateChatCompletionStreamed extends ExampleBase[OpenAIChatCompletionStreamedServiceExtra] { - override val service: OpenAIChatCompletionStreamedServiceExtra = - ChatCompletionProvider.streamed.groq + override val service: OpenAIChatCompletionStreamedServiceExtra = ChatCompletionProvider.groq private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletion.scala index d30d350b..7cde313e 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletion.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletion.scala @@ -10,8 +10,7 @@ import scala.concurrent.Future // requires `MISTRAL_API_KEY` environment variable to be set object MistralCreateChatCompletion extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.mistral + override val service: OpenAIChatCompletionService = ChatCompletionProvider.mistral private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletionStreamed.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletionStreamed.scala index 7cf2c3f3..dd2a0a06 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletionStreamed.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/MistralCreateChatCompletionStreamed.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future object MistralCreateChatCompletionStreamed extends ExampleBase[OpenAIChatCompletionStreamedServiceExtra] { - override val service: OpenAIChatCompletionStreamedServiceExtra = - ChatCompletionProvider.streamed.mistral + override val service: OpenAIChatCompletionStreamedServiceExtra = ChatCompletionProvider.mistral private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletion.scala index 0e7d3fd4..8987f4a3 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletion.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletion.scala @@ -10,8 +10,7 @@ import scala.concurrent.Future // requires `OCTOAI_TOKEN` environment variable to be set object OctoMLCreateChatCompletion extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.octoML + override val service: OpenAIChatCompletionService = ChatCompletionProvider.octoML private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletionStreamed.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletionStreamed.scala index b5bfc6c7..16c871f3 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletionStreamed.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/OctoMLCreateChatCompletionStreamed.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future object OctoMLCreateChatCompletionStreamed extends ExampleBase[OpenAIChatCompletionStreamedServiceExtra] { - override val service: OpenAIChatCompletionStreamedServiceExtra = - ChatCompletionProvider.streamed.octoML + override val service: OpenAIChatCompletionStreamedServiceExtra = ChatCompletionProvider.octoML private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/TogetherAICreateChatCompletion.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/TogetherAICreateChatCompletion.scala index 241153ca..74253a75 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/TogetherAICreateChatCompletion.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/TogetherAICreateChatCompletion.scala @@ -12,8 +12,7 @@ import scala.concurrent.Future */ object TogetherAICreateChatCompletion extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.togetherAI + override val service: OpenAIChatCompletionService = ChatCompletionProvider.togetherAI private val messages = Seq( SystemMessage("You are a helpful assistant."), diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionStreamedWithOpenAIAdapter.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionStreamedWithOpenAIAdapter.scala index 3ac306f9..af22e912 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionStreamedWithOpenAIAdapter.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionStreamedWithOpenAIAdapter.scala @@ -13,8 +13,7 @@ import scala.concurrent.Future object VertexAICreateChatCompletionStreamedWithOpenAIAdapter extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionStreamedService = - ChatCompletionProvider.vertexAI + override val service: OpenAIChatCompletionStreamedService = ChatCompletionProvider.vertexAI private val model = NonOpenAIModelId.gemini_1_5_flash_001 diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionWithOpenAIAdapter.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionWithOpenAIAdapter.scala index c73ae4f9..cc0ee688 100644 --- a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionWithOpenAIAdapter.scala +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/nonopenai/VertexAICreateChatCompletionWithOpenAIAdapter.scala @@ -11,8 +11,7 @@ import scala.concurrent.Future object VertexAICreateChatCompletionWithOpenAIAdapter extends ExampleBase[OpenAIChatCompletionService] { - override val service: OpenAIChatCompletionService = - ChatCompletionProvider.vertexAI + override val service: OpenAIChatCompletionService = ChatCompletionProvider.vertexAI private val model = NonOpenAIModelId.gemini_1_5_pro_001