From 761aea9c8df31270f02904b7662ba548a4642bfc Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:30:50 +0530 Subject: [PATCH 01/30] migrating docs from premai org to personal --- docs/docs/integrations/chat/premai.ipynb | 271 +++++++++++++++++++++++ 1 file changed, 271 insertions(+) create mode 100644 docs/docs/integrations/chat/premai.ipynb diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb new file mode 100644 index 0000000000000..6679dc02177ed --- /dev/null +++ b/docs/docs/integrations/chat/premai.ipynb @@ -0,0 +1,271 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Prem\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ChatPrem\n", + "\n", + ">[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. \n", + "\n", + "\n", + "This example goes over how to use LangChain to interact with `ChatPremAI`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installation and setup\n", + "\n", + "We start by installing langchain and premai-sdk. You can type the following command to install:\n", + "\n", + "```bash\n", + "pip install premai langchain\n", + "```\n", + "\n", + "Before proceeding further, please make sure that you have made an account on Prem and already started a project. If not, then here's how you can start for free:\n", + "\n", + "1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/).\n", + "\n", + "2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. \n", + "\n", + "3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. \n", + "\n", + "4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation paramters (like: max-tokens, temperature etc) and also pre-set your system prompt. \n", + "\n", + "Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatPremAI" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup ChatPrem instance in LangChain \n", + "\n", + "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error.\n", + "\n", + "In order to use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. All of those will use the default model name and paramters of the LaunchPad model. \n", + "\n", + "`NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "if os.environ.get(\"PREMAI_API_KEY\") is None:\n", + " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")\n", + "\n", + "chat = ChatPremAI(project_id=8)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Calling the Model\n", + "\n", + "Now you are all set. We can now start with interacting with our application. `ChatPrem` supports two methods `invoke` (which is same as `generate`) and `stream`. \n", + "\n", + "The first one will give us a static result. Where as the second one will stream tokens one by one. Here's how you can generate chat like completions. \n", + "\n", + "### Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=\"Ahoy there, matey! I be a virtual assistant here to assist ye with all yer needs. What can I be helpin' ye with today?\")" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "human_message = HumanMessage(content=\"Who are you?\")\n", + "\n", + "chat.invoke([human_message])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Above looks interesting right? I set my default lanchpad system-prompt as: `Always sound like a pirate` You can also, override the default system prompt if you need to. Here's how you can do it. " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='I am a friendly assistant here to help you with any questions or tasks you may have. How can I assist you today?')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "system_message = SystemMessage(content=\"You are a friendly assistant.\")\n", + "human_message = HumanMessage(content=\"Who are you?\")\n", + "\n", + "chat.invoke([system_message, human_message])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also change generation parameters while calling the model. Here's how you can do that" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='I am a friendly assistant here to help you with any questions or tasks you may have. Feel free')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chat.invoke(\n", + " [system_message, human_message],\n", + " temperature = 0.7, max_tokens = 20, top_p = 0.95\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Important notes:\n", + "\n", + "Before proceeding further, please note that the current version of ChatPrem does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) are not supported. \n", + "\n", + "We will provide support for those two above parameters in sooner versions. \n", + "\n", + "### Streaming\n", + "\n", + "And finally, here's how you do token streaming for dynamic chat like applications. " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ahoy there! I be doin' well, me hearty! And yerself? Arrr!" + ] + } + ], + "source": [ + "import sys\n", + "\n", + "for chunk in chat.stream(\"hello how are you\"):\n", + " sys.stdout.write(chunk.content)\n", + " sys.stdout.flush()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similar to above, if you want to override the system-prompt and the generation parameters, here's how you can do it. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello! I'm here to help you. How can I assist you today?" + ] + } + ], + "source": [ + "import sys\n", + "\n", + "for chunk in chat.stream(\n", + " \"hello how are you\",\n", + " system_prompt = \"You are an helpful assistant\", temperature = 0.7, max_tokens = 20\n", + "):\n", + " sys.stdout.write(chunk.content)\n", + " sys.stdout.flush()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 0b94317926527779a3ff1f6ddca20865622ce41a Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:31:19 +0530 Subject: [PATCH 02/30] Add 'poetry add premai' changes --- poetry.lock | 27 ++++++++++++++++++++++----- pyproject.toml | 1 + 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 014e89c8d3e9f..b452163f70984 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiohttp" @@ -1094,13 +1094,13 @@ trio = ["trio (>=0.22.0,<0.25.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.26.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"}, + {file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"}, ] [package.dependencies] @@ -2623,6 +2623,23 @@ files = [ docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +[[package]] +name = "premai" +version = "0.3.25" +description = "A client library for accessing Prem APIs" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "premai-0.3.25-py3-none-any.whl", hash = "sha256:bddace7340e1827f048b410748d365e8663e4bbeb6bf7e8b8657f3cc267f7f28"}, + {file = "premai-0.3.25.tar.gz", hash = "sha256:c387980ecf3bdcb07886dd4f7a1c0f0701df67e772e62f444394cea97d5970a0"}, +] + +[package.dependencies] +attrs = ">=21.3.0" +httpx = ">=0.20.0,<0.27.0" +python-dateutil = ">=2.8.0,<3.0.0" +typing_extensions = ">=4.9.0" + [[package]] name = "prometheus-client" version = "0.20.0" @@ -4317,4 +4334,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "801d7e68178472d1086f428b5944899fb61db3ba6e415b5231413237754d7879" +content-hash = "fc9d4dfbd769eba0ae2062ec5da366fac856a6233a7c34c48486eeef9c735b26" diff --git a/pyproject.toml b/pyproject.toml index 5dcdcd50f618a..91c463b7d7033 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ repository = "https://www.github.com/langchain-ai/langchain" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" +premai = "^0.3.25" [tool.poetry.group.docs.dependencies] langchain = { path = "libs/langchain/", develop = true } From 5f6fe4e7aeebd7158dccaf2213947b0fce07c668 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:33:20 +0530 Subject: [PATCH 03/30] Migrate providers prem.md from prem org to personal --- docs/docs/integrations/providers/prem.md | 181 +++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 docs/docs/integrations/providers/prem.md diff --git a/docs/docs/integrations/providers/prem.md b/docs/docs/integrations/providers/prem.md new file mode 100644 index 0000000000000..e1d78d607c5a5 --- /dev/null +++ b/docs/docs/integrations/providers/prem.md @@ -0,0 +1,181 @@ +# PremAI + +>[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. + + +## ChatPrem + +This example goes over how to use LangChain to interact with different chat models with `ChatPrem` + +### Installation and setup + +We start by installing langchain and premai-sdk. You can type the following command to install: + +```bash +pip install premai langchain +``` + +Before proceeding further, please make sure that you have made an account on Prem and already started a project. If not, then here's how you can start for free: + +1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/). + +2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. + +3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. + +4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation paramters (like: max-tokens, temperature etc) and also pre-set your system prompt. + +Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. + +```python +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_community.chat_models import ChatPrem +``` + +### Setup ChatPrem instance in LangChain + +Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error. + +In order to use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. All of those will use the default model name and paramters of the LaunchPad model. + +`NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. + +```python +import os +import getpass + +if "PREMAI_API_KEY" not in os.environ: + os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:") + +chat = ChatPrem(project_id=8) +``` + +### Calling the Model + +Now you are all set. We can now start with interacting with our application. `ChatPrem` supports two methods `invoke` (which is same as `generate`) and `stream`. + +The first one will give us a static result. Where as the second one will stream tokens one by one. Here's how you can generate chat like completions. + +### Generation + +```python +human_message = HumanMessage(content="Who are you?") + +chat.invoke([human_message]) +``` + +Above looks interesting right? I set my default lanchpad system-prompt as: `Always sound like a pirate` You can also, override the default system prompt if you need to. Here's how you can do it. + +```python +system_message = SystemMessage(content="You are a friendly assistant.") +human_message = HumanMessage(content="Who are you?") + +chat.invoke([system_message, human_message]) +``` + +You can also change generation parameters while calling the model. Here's how you can do that: + +```python +chat.invoke( + [system_message, human_message], + temperature = 0.7, max_tokens = 20, top_p = 0.95 +) +``` + + +### Important notes: + +Before proceeding further, please note that the current version of ChatPrem does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) are not supported. + +We will provide support for those two above parameters in sooner versions. + +### Streaming + +And finally, here's how you do token streaming for dynamic chat like applications. + +```python +import sys + +for chunk in chat.stream("hello how are you"): + sys.stdout.write(chunk.content) + sys.stdout.flush() +``` + +Similar to above, if you want to override the system-prompt and the generation parameters, here's how you can do it. + +```python +import sys + +for chunk in chat.stream( + "hello how are you", + system_prompt = "You are an helpful assistant", temperature = 0.7, max_tokens = 20 +): + sys.stdout.write(chunk.content) + sys.stdout.flush() +``` + +## Embedding + +In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings`. Let's start by doing some imports and define our embedding object + +```python +from langchain_community.embeddings import PremEmbeddings +``` + +Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error. + + +```python + +import os +import getpass + +if os.environ.get("PREMAI_API_KEY") is None: + os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:") + +# Define a model is a required paramter here, since there is no default embedding model + +model = "text-embedding-3-large" +embedder = PremEmbeddings(project_id=8, model=model) +``` + +We have defined our embedding model. We support a lot of embedding models. Here is a table that shows the number of embedding models we support. + + +| Provider | Slug | Context Tokens | +|-------------|------------------------------------------|----------------| +| cohere | embed-english-v3.0 | N/A | +| openai | text-embedding-3-small | 8191 | +| openai | text-embedding-3-large | 8191 | +| openai | text-embedding-ada-002 | 8191 | +| replicate | replicate/all-mpnet-base-v2 | N/A | +| together | togethercomputer/Llama-2-7B-32K-Instruct | N/A | +| mistralai | mistral-embed | 4096 | + +To change the model, you simply need to copy the `slug` and access your embedding model. Now let's start using our embedding model with a single query followed by multiple queries (which is also called as a document) + +```python +query = "Hello, this is a test query" +query_result = embedder.embed_query(query) + +# Let's print the first five elements of the query embedding vector + +print(query_result[:5]) +``` + +Finally let's embed a document + +```python +documents = [ + "This is document1", + "This is document2", + "This is document3" +] + +doc_result = embedder.embed_documents(documents) + +# Similar to previous result, let's print the first five element +# of the first document vector + +print(doc_result[0][:5]) +``` \ No newline at end of file From a48325a4f47fcb6d51741a4b5857a0fa9fa5e005 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:35:53 +0530 Subject: [PATCH 04/30] Migrate docs for text embeddings for premai from prem org to personal --- .../integrations/text_embedding/premai.ipynb | 170 ++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 docs/docs/integrations/text_embedding/premai.ipynb diff --git a/docs/docs/integrations/text_embedding/premai.ipynb b/docs/docs/integrations/text_embedding/premai.ipynb new file mode 100644 index 0000000000000..ca3bcb38e8869 --- /dev/null +++ b/docs/docs/integrations/text_embedding/premai.ipynb @@ -0,0 +1,170 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PremAI\n", + "\n", + ">[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. In this section we are going to dicuss how we can get access to different embedding model using `PremAIEmbeddings`\n", + "\n", + "## Installation and Setup\n", + "\n", + "We start by installing langchain and premai-sdk. You can type the following command to install:\n", + "\n", + "```bash\n", + "pip install premai langchain\n", + "```\n", + "\n", + "Before proceeding further, please make sure that you have made an account on Prem and already started a project. If not, then here's how you can start for free:\n", + "\n", + "1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/).\n", + "\n", + "2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. \n", + "\n", + "3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. \n", + "\n", + "Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by doing some imports and define our embedding object\n", + "\n", + "from langchain_community.embeddings import PremAIEmbeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "if os.environ.get(\"PREMAI_API_KEY\") is None:\n", + " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model = \"text-embedding-3-large\"\n", + "embedder = PremAIEmbeddings(project_id=8, model=model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have defined our embedding model. We support a lot of embedding models. Here is a table that shows the number of embedding models we support. \n", + "\n", + "\n", + "| Provider | Slug | Context Tokens |\n", + "|-------------|------------------------------------------|----------------|\n", + "| cohere | embed-english-v3.0 | N/A |\n", + "| openai | text-embedding-3-small | 8191 |\n", + "| openai | text-embedding-3-large | 8191 |\n", + "| openai | text-embedding-ada-002 | 8191 |\n", + "| replicate | replicate/all-mpnet-base-v2 | N/A |\n", + "| together | togethercomputer/Llama-2-7B-32K-Instruct | N/A |\n", + "| mistralai | mistral-embed | 4096 |\n", + "\n", + "To change the model, you simply need to copy the `slug` and access your embedding model. Now let's start using our embedding model with a single query followed by multiple queries (which is also called as a document)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[-0.02129288576543331, 0.0008162345038726926, -0.004556538071483374, 0.02918623760342598, -0.02547479420900345]\n" + ] + } + ], + "source": [ + "query = \"Hello, this is a test query\"\n", + "query_result = embedder.embed_query(query)\n", + "\n", + "# Let's print the first five elements of the query embedding vector\n", + "\n", + "print(query_result[:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally let's embed a document" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[-0.0030691148713231087, -0.045334383845329285, -0.0161729846149683, 0.04348714277148247, -0.0036920777056366205]\n" + ] + } + ], + "source": [ + "documents = [\n", + " \"This is document1\",\n", + " \"This is document2\",\n", + " \"This is document3\"\n", + "]\n", + "\n", + "doc_result = embedder.embed_documents(documents)\n", + "\n", + "# Similar to previous result, let's print the first five element\n", + "# of the first document vector\n", + "\n", + "print(doc_result[0][:5])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 2c267b5e4912eee0f2e5755d579844970887e404 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:43:17 +0530 Subject: [PATCH 05/30] migrate ChatPremAI changes from premai org to personal --- .../chat_models/__init__.py | 1 + .../langchain_community/chat_models/premai.py | 407 ++++++++++++++++++ 2 files changed, 408 insertions(+) create mode 100644 libs/community/langchain_community/chat_models/premai.py diff --git a/libs/community/langchain_community/chat_models/__init__.py b/libs/community/langchain_community/chat_models/__init__.py index 76ede64dc1a1e..18f202813b599 100644 --- a/libs/community/langchain_community/chat_models/__init__.py +++ b/libs/community/langchain_community/chat_models/__init__.py @@ -64,6 +64,7 @@ "PromptLayerChatOpenAI": "langchain_community.chat_models.promptlayer_openai", "QianfanChatEndpoint": "langchain_community.chat_models.baidu_qianfan_endpoint", "VolcEngineMaasChat": "langchain_community.chat_models.volcengine_maas", + "ChatPremAI": "langchain_community.chat_models.premai", } diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py new file mode 100644 index 0000000000000..adaf6155155b4 --- /dev/null +++ b/libs/community/langchain_community/chat_models/premai.py @@ -0,0 +1,407 @@ +"""Wrapper around Prem's Chat API.""" + +from __future__ import annotations + +import logging +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Tuple, + Type, + Union, +) + +from langchain_core.callbacks import ( + CallbackManagerForLLMRun, +) +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.language_models.llms import create_base_retry_decorator +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + BaseMessage, + BaseMessageChunk, + ChatMessage, + ChatMessageChunk, + HumanMessage, + HumanMessageChunk, + SystemMessage, + SystemMessageChunk, +) +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult +from langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator +from langchain_core.utils import get_from_dict_or_env + +if TYPE_CHECKING: + from premai.api.chat_completions.v1_chat_completions_create import ( + ChatCompletionResponseStream, + ) + from premai.models.chat_completion_response import ChatCompletionResponse + +logger = logging.getLogger(__name__) + + +class ChatPremAPIError(Exception): + """Error with the `PremAI` API.""" + + +def _truncate_at_stop_tokens( + text: str, + stop: Optional[List[str]], +) -> str: + """Truncates text at the earliest stop token found.""" + if stop is None: + return text + + for stop_token in stop: + stop_token_idx = text.find(stop_token) + if stop_token_idx != -1: + text = text[:stop_token_idx] + return text + + +def _response_to_result( + response: ChatCompletionResponse, + stop: Optional[List[str]], +) -> ChatResult: + """Converts a Prem API response into a LangChain result""" + + if not response.choices: + raise ChatPremAPIError("ChatResponse must have at least one candidate") + generations: List[ChatGeneration] = [] + for choice in response.choices: + role = choice.message.role + if role is None: + raise ChatPremAPIError(f"ChatResponse {choice} must have a role.") + + # If content is None then it will be replaced by "" + content = _truncate_at_stop_tokens(text=choice.message.content or "", stop=stop) + if content is None: + raise ChatPremAPIError(f"ChatResponse must have a content: {content}") + + if role == "assistant": + generations.append( + ChatGeneration(text=content, message=AIMessage(content=content)) + ) + elif role == "user": + generations.append( + ChatGeneration(text=content, message=HumanMessage(content=content)) + ) + else: + generations.append( + ChatGeneration( + text=content, message=ChatMessage(role=role, content=content) + ) + ) + return ChatResult(generations=generations) + + +def _convert_delta_response_to_message_chunk( + response: ChatCompletionResponseStream, default_class: Type[BaseMessageChunk] +) -> BaseMessageChunk: + """Converts delta response to message chunk""" + _delta = response.choices[0].delta + role = _delta["role"] + content = _delta["content"] + additional_kwargs: Dict = {} + + finish_reasons = response.choices[0].finish_reason + + if role == "user" or default_class == HumanMessageChunk: + return HumanMessageChunk(content=content), finish_reasons + elif role == "assistant" or default_class == AIMessageChunk: + return ( + AIMessageChunk(content=content, additional_kwargs=additional_kwargs), + finish_reasons, + ) + elif role == "system" or default_class == SystemMessageChunk: + return SystemMessageChunk(content=content), finish_reasons + elif role or default_class == ChatMessageChunk: + return ChatMessageChunk(content=content, role=role), finish_reasons + else: + return default_class(content=content), finish_reasons + + +def _messages_to_prompt_dict( + input_messages: List[BaseMessage], +) -> Tuple[str, List[dict]]: + """Converts a list of LangChain Messages into a simple dict + which is the message structure in Prem""" + + system_prompt: str = None + examples_and_messages: List[Dict[str, str]] = [] + + for input_msg in input_messages: + if isinstance(input_msg, SystemMessage): + system_prompt = input_msg.content + elif isinstance(input_msg, HumanMessage): + examples_and_messages.append({"role": "user", "content": input_msg.content}) + elif isinstance(input_msg, AIMessage): + examples_and_messages.append( + {"role": "assistant", "content": input_msg.content} + ) + else: + raise ChatPremAPIError("No such role explicitly exists") + return system_prompt, examples_and_messages + + +class ChatPremAI(BaseChatModel, BaseModel): + """Use any LLM provider with Prem and Langchain. + + To use, you will need to have an API key. You can find your existing API Key + or generate a new one here: https://app.premai.io/api_keys/ + """ + + # TODO: Need to add the default parameters through prem-sdk here + + project_id: int + """The project ID in which the experiments or deployments are carried out. + You can find all your projects here: https://app.premai.io/projects/""" + premai_api_key: Optional[SecretStr] = None + """Prem AI API Key. Get it here: https://app.premai.io/api_keys/""" + + model: Optional[str] = None + """Name of the model. This is an optional parameter. + The default model is the one deployed from Prem's LaunchPad: https://app.premai.io/projects/8/launchpad + If model name is other than default model then it will override the calls + from the model deployed from launchpad.""" + + session_id: Optional[str] = None + """The ID of the session to use. It helps to track the chat history.""" + + temperature: Optional[float] = None + """Model temperature. Value should be >= 0 and <= 1.0""" + + top_p: Optional[float] = None + """top_p adjusts the number of choices for each predicted tokens based on + cumulative probabilities. Value should be ranging between 0.0 and 1.0. + """ + + max_tokens: Optional[int] = None + """The maximum number of tokens to generate""" + + max_retries: Optional[int] = 1 + """Max number of retries to call the API""" + + system_prompt: Optional[str] = "" + """Acts like a default instruction that helps the LLM act or generate + in a specific way.This is an Optional Parameter. By default the + system prompt would be using Prem's Launchpad models system prompt. + Changing the system prompt would override the default system prompt. + """ + + streaming: Optional[bool] = False + """Whether to stream the responses or not.""" + + tools: Optional[Dict[str, Any]] = None + """A list of tools the model may call. Currently, only functions are + supported as a tool""" + + frequency_penalty: Optional[float] = None + """Number between -2.0 and 2.0. Positive values penalize new tokens based""" + + presence_penalty: Optional[float] = None + """Number between -2.0 and 2.0. Positive values penalize new tokens based + on whether they appear in the text so far.""" + + logit_bias: Optional[dict] = None + """JSON object that maps tokens to an associated bias value from -100 to 100.""" + + stop: Optional[Union[str, List[str]]] = None + """Up to 4 sequences where the API will stop generating further tokens.""" + + seed: Optional[int] = None + """This feature is in Beta. If specified, our system will make a best effort + to sample deterministically.""" + + client: Any + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + @root_validator() + def validate_environments(cls, values: Dict) -> Dict: + """Validate that the package is installed and that the API token is valid""" + try: + from premai import Prem + except ImportError as error: + raise ImportError( + "Could not import Prem Python package." + "Please install it with: `pip install premai`" + ) from error + + try: + premai_api_key = get_from_dict_or_env( + values, "premai_api_key", "PREMAI_API_KEY" + ) + values["client"] = Prem(api_key=premai_api_key) + except Exception as error: + raise ValueError("Your API Key is incorrect. Please try again.") from error + return values + + @property + def _llm_type(self) -> str: + return "premai" + + @property + def _default_params(self) -> Dict[str, Any]: + # FIXME: n and stop is not supported, so hardcoding to current default value + return { + "model": self.model, + "system_prompt": self.system_prompt, + "top_p": self.top_p, + "temperature": self.temperature, + "logit_bias": self.logit_bias, + "max_tokens": self.max_tokens, + "presence_penalty": self.presence_penalty, + "frequency_penalty": self.frequency_penalty, + "seed": self.seed, + "stop": None, + } + + def _get_all_kwargs(self, **kwargs) -> Dict[str, Any]: + all_kwargs = {**self._default_params, **kwargs} + for key in list(self._default_params.keys()): + if all_kwargs.get(key) is None or all_kwargs.get(key) == "": + all_kwargs.pop(key, None) + return all_kwargs + + def _generate( + self, + messages: List[List[BaseMessage]], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs, + ) -> ChatResult: + system_prompt, messages = _messages_to_prompt_dict(messages) + + kwargs["stop"] = stop + if system_prompt is not None and system_prompt != "": + kwargs["system_prompt"] = system_prompt + + all_kwargs = self._get_all_kwargs(**kwargs) + response = chat_with_retry( + self, + project_id=self.project_id, + messages=messages, + stream=False, + run_manager=run_manager, + **all_kwargs, + ) + + return _response_to_result(response=response, stop=stop) + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs, + ) -> Iterator[ChatGenerationChunk]: + system_prompt, messages = _messages_to_prompt_dict(messages) + + kwargs["stop"] = stop + if system_prompt is not None and system_prompt != "": + kwargs["system_prompt"] = system_prompt + + all_kwargs = self._get_all_kwargs(**kwargs) + + default_chunk_class = AIMessageChunk + + for streamed_response in chat_with_retry( + self, + project_id=self.project_id, + messages=messages, + stream=True, + run_manager=run_manager, + **all_kwargs, + ): + try: + chunk, finish_reason = _convert_delta_response_to_message_chunk( + response=streamed_response, default_class=default_chunk_class + ) + generation_info = ( + dict(finish_reason=finish_reason) + if finish_reason is not None + else None + ) + cg_chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info + ) + if run_manager: + run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk) + yield cg_chunk + except Exception as _: + continue + + +def create_prem_retry_decorator( + llm: ChatPremAI, + *, + max_retries: int = 1, + run_manager: Optional[Union[CallbackManagerForLLMRun]] = None, +) -> Callable[[Any], Any]: + import premai.models + + errors = [ + premai.models.api_response_validation_error.APIResponseValidationError, + premai.models.conflict_error.ConflictError, + premai.models.model_not_found_error.ModelNotFoundError, + premai.models.permission_denied_error.PermissionDeniedError, + premai.models.provider_api_connection_error.ProviderAPIConnectionError, + premai.models.provider_api_status_error.ProviderAPIStatusError, + premai.models.provider_api_timeout_error.ProviderAPITimeoutError, + premai.models.provider_internal_server_error.ProviderInternalServerError, + premai.models.provider_not_found_error.ProviderNotFoundError, + premai.models.rate_limit_error.RateLimitError, + premai.models.unprocessable_entity_error.UnprocessableEntityError, + premai.models.validation_error.ValidationError, + ] + + decorator = create_base_retry_decorator( + error_types=errors, max_retries=max_retries, run_manager=run_manager + ) + return decorator + + +def chat_with_retry( + llm: ChatPremAI, + project_id: int, + messages: List[dict], + stream: bool = False, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, +) -> Any: + """Using tenacity for retry in completion call""" + retry_decorator = create_prem_retry_decorator( + llm, max_retries=llm.max_retries, run_manager=run_manager + ) + + @retry_decorator + def _completion_with_retry( + project_id: int, + messages: List[dict], + stream: Optional[bool] = False, + **kwargs: Any, + ) -> Any: + response = llm.client.chat.completions.create( + project_id=project_id, + messages=messages, + stream=stream, + **kwargs, + ) + return response + + return _completion_with_retry( + project_id=project_id, + messages=messages, + stream=stream, + **kwargs, + ) From 434cdce11debc67fe4358ef37668ab134369154b Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:45:21 +0530 Subject: [PATCH 06/30] migrate PremAIEmbeddings changes from premai org to personal --- .../embeddings/__init__.py | 1 + .../langchain_community/embeddings/premai.py | 121 ++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 libs/community/langchain_community/embeddings/premai.py diff --git a/libs/community/langchain_community/embeddings/__init__.py b/libs/community/langchain_community/embeddings/__init__.py index 9cc358011c3d4..a0b575ef8bf4f 100644 --- a/libs/community/langchain_community/embeddings/__init__.py +++ b/libs/community/langchain_community/embeddings/__init__.py @@ -79,6 +79,7 @@ "VolcanoEmbeddings": "langchain_community.embeddings.volcengine", "VoyageEmbeddings": "langchain_community.embeddings.voyageai", "XinferenceEmbeddings": "langchain_community.embeddings.xinference", + "PremAIEmbeddings": "langchain_community.embeddings.premai", } diff --git a/libs/community/langchain_community/embeddings/premai.py b/libs/community/langchain_community/embeddings/premai.py new file mode 100644 index 0000000000000..51c732a5324cd --- /dev/null +++ b/libs/community/langchain_community/embeddings/premai.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +import logging +from typing import Any, Callable, Dict, List, Optional, Union + +from langchain_core.embeddings import Embeddings +from langchain_core.language_models.llms import create_base_retry_decorator +from langchain_core.pydantic_v1 import BaseModel, SecretStr, root_validator +from langchain_core.utils import get_from_dict_or_env + +logger = logging.getLogger(__name__) + + +class PremAIEmbeddings(BaseModel, Embeddings): + """Prem's Embedding APIs""" + + project_id: int + """The project ID in which the experiments or deployments are carried out. + You can find all your projects here: https://app.premai.io/projects/""" + + premai_api_key: Optional[SecretStr] = None + """Prem AI API Key. Get it here: https://app.premai.io/api_keys/""" + + model: str + """The Embedding model to choose from""" + + show_progress_bar: bool = False + """Whether to show a tqdm progress bar. Must have `tqdm` installed.""" + + max_retries: Optional[int] = 1 + """Max number of retries for tenacity""" + + client: Any + + @root_validator() + def validate_environments(cls, values: Dict) -> Dict: + """Validate that the package is installed and that the API token is valid""" + try: + from premai import Prem + except ImportError as error: + raise ImportError( + "Could not import Prem Python package." + "Please install it with: `pip install premai`" + ) from error + + try: + premai_api_key = get_from_dict_or_env( + values, "premai_api_key", "PREMAI_API_KEY" + ) + values["client"] = Prem(api_key=premai_api_key) + except Exception as error: + raise ValueError("Your API Key is incorrect. Please try again.") from error + return values + + def embed_query(self, text: str) -> List[float]: + """Embed query text""" + embeddings = embed_with_retry( + self, model=self.model, project_id=self.project_id, input=text + ) + return embeddings.data[0].embedding + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + embeddings = embed_with_retry( + self, model=self.model, project_id=self.project_id, input=texts + ).data + + return [embedding.embedding for embedding in embeddings] + + +def create_prem_retry_decorator( + embedder: PremAIEmbeddings, + *, + max_retries: int = 1, +) -> Callable[[Any], Any]: + import premai.models + + errors = [ + premai.models.api_response_validation_error.APIResponseValidationError, + premai.models.conflict_error.ConflictError, + premai.models.model_not_found_error.ModelNotFoundError, + premai.models.permission_denied_error.PermissionDeniedError, + premai.models.provider_api_connection_error.ProviderAPIConnectionError, + premai.models.provider_api_status_error.ProviderAPIStatusError, + premai.models.provider_api_timeout_error.ProviderAPITimeoutError, + premai.models.provider_internal_server_error.ProviderInternalServerError, + premai.models.provider_not_found_error.ProviderNotFoundError, + premai.models.rate_limit_error.RateLimitError, + premai.models.unprocessable_entity_error.UnprocessableEntityError, + premai.models.validation_error.ValidationError, + ] + + decorator = create_base_retry_decorator( + error_types=errors, max_retries=max_retries, run_manager=None + ) + return decorator + + +def embed_with_retry( + embedder: PremAIEmbeddings, + model: str, + project_id: int, + input: Union[str, List[str]], +) -> Any: + """Using tenacity for retry in embedding calls""" + retry_decorator = create_prem_retry_decorator( + embedder, max_retries=embedder.max_retries + ) + + @retry_decorator + def _embed_with_retry( + embedder: PremAIEmbeddings, + project_id: int, + model: str, + input: Union[str, List[str]], + ) -> Any: + embedding_response = embedder.client.embeddings.create( + project_id=project_id, model=model, input=input + ) + return embedding_response + + return _embed_with_retry(embedder, project_id=project_id, model=model, input=input) From 7e5b4d726b1c6177b1b3ce9d50f22198df342e0d Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:52:30 +0530 Subject: [PATCH 07/30] migrate premai integration tests from premai org to personal --- .../chat_models/test_premai.py | 75 +++++++++++++++++++ .../embeddings/test_premai.py | 40 ++++++++++ 2 files changed, 115 insertions(+) create mode 100644 libs/community/tests/integration_tests/chat_models/test_premai.py create mode 100644 libs/community/tests/integration_tests/embeddings/test_premai.py diff --git a/libs/community/tests/integration_tests/chat_models/test_premai.py b/libs/community/tests/integration_tests/chat_models/test_premai.py new file mode 100644 index 0000000000000..48e53b8614ff0 --- /dev/null +++ b/libs/community/tests/integration_tests/chat_models/test_premai.py @@ -0,0 +1,75 @@ +"""Test ChatPremAI from PremAI API wrapper. + +Note: This test must be run with the PREMAI_API_KEY environment variable set to a valid +API key and a valid project_id. +For this we need to have a project setup in PremAI's platform: https://app.premai.io +""" + +import pytest +from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage +from langchain_core.outputs import ChatGeneration, LLMResult + +from langchain_community.chat_models import ChatPremAI + +@pytest.fixture +def chat() -> ChatPremAI: + return ChatPremAI(project_id=8) + + +@pytest.mark.scheduled +def test_chat_premai() -> None: + """Test ChatPremAI wrapper.""" + chat = ChatPremAI(project_id=8) + message = HumanMessage(content="Hello") + response = chat([message]) + assert isinstance(response, BaseMessage) + assert isinstance(response.content, str) + + +@pytest.mark.scheduled +def test_chat_prem_system_message() -> None: + """Test ChatPremAI wrapper for system message""" + chat = ChatPremAI(project_id=8) + system_message = SystemMessage(content="You are to chat with the user.") + human_message = HumanMessage(content="Hello") + response = chat([system_message, human_message]) + assert isinstance(response, BaseMessage) + assert isinstance(response.content, str) + + +@pytest.mark.scheduled +def test_chat_prem_model() -> None: + """Test ChatPremAI wrapper handles model_name.""" + chat = ChatPremAI(model="foo", project_id=8) + assert chat.model == "foo" + + +@pytest.mark.scheduled +def test_chat_prem_generate() -> None: + """Test ChatPremAI wrapper with generate.""" + chat = ChatPremAI(project_id=8) + message = HumanMessage(content="Hello") + response = chat.generate([[message], [message]]) + assert isinstance(response, LLMResult) + assert len(response.generations) == 2 + for generations in response.generations: + for generation in generations: + assert isinstance(generation, ChatGeneration) + assert isinstance(generation.text, str) + assert generation.text == generation.message.content + + +@pytest.mark.scheduled +async def test_prem_invoke(chat: ChatPremAI) -> None: + """Tests chat completion with invoke""" + result = chat.invoke("How is the weather in New York today?") + assert isinstance(result.content, str) + + +@pytest.mark.scheduled +def test_prem_streaming() -> None: + """Test streaming tokens from Prem.""" + chat = ChatPremAI(project_id=8, streaming=True) + + for token in chat.stream("I'm Pickle Rick"): + assert isinstance(token.content, str) diff --git a/libs/community/tests/integration_tests/embeddings/test_premai.py b/libs/community/tests/integration_tests/embeddings/test_premai.py new file mode 100644 index 0000000000000..f0848760bfae9 --- /dev/null +++ b/libs/community/tests/integration_tests/embeddings/test_premai.py @@ -0,0 +1,40 @@ +"""Test PremAIEmbeddings from PremAI API wrapper. + +Note: This test must be run with the PREMAI_API_KEY environment variable set to a valid +API key and a valid project_id. This needs to setup a project in PremAI's platform. +You can check it out here: https://app.premai.io +""" + +import pytest + +from langchain_community.embeddings.premai import PremAIEmbeddings + + +@pytest.fixture +def embedder() -> PremAIEmbeddings: + return PremAIEmbeddings(project_id=8, model="text-embedding-3-small") + + +def test_prem_embedding_documents(embedder: PremAIEmbeddings) -> None: + """Test Prem embeddings.""" + documents = ["foo bar"] + output = embedder.embed_documents(documents) + assert len(output) == 1 + assert len(output[0]) == 1536 + + +def test_prem_embedding_documents_multiple(embedder: PremAIEmbeddings) -> None: + """Test prem embeddings for multiple queries or documents.""" + documents = ["foo bar", "bar foo", "foo"] + output = embedder.embed_documents(documents) + assert len(output) == 3 + assert len(output[0]) == 1536 + assert len(output[1]) == 1536 + assert len(output[2]) == 1536 + + +def test_prem_embedding_query(embedder: PremAIEmbeddings) -> None: + """Test Prem embeddings for single query""" + document = "foo bar" + output = embedder.embed_query(document) + assert len(output) == 1536 From 8a5ea43c2b43399d3ae2a85df8cb385ab6bfeb9d Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:56:14 +0530 Subject: [PATCH 08/30] migrate premai unit tests from premai org to personal --- .../unit_tests/chat_models/test_imports.py | 1 + .../unit_tests/chat_models/test_premai.py | 47 +++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 libs/community/tests/unit_tests/chat_models/test_premai.py diff --git a/libs/community/tests/unit_tests/chat_models/test_imports.py b/libs/community/tests/unit_tests/chat_models/test_imports.py index 07f64c7ad6440..cca1330eaa599 100644 --- a/libs/community/tests/unit_tests/chat_models/test_imports.py +++ b/libs/community/tests/unit_tests/chat_models/test_imports.py @@ -44,6 +44,7 @@ "ChatPerplexity", "ChatKinetica", "ChatFriendli", + "ChatPremAI", ] diff --git a/libs/community/tests/unit_tests/chat_models/test_premai.py b/libs/community/tests/unit_tests/chat_models/test_premai.py new file mode 100644 index 0000000000000..c72d4f0ec865b --- /dev/null +++ b/libs/community/tests/unit_tests/chat_models/test_premai.py @@ -0,0 +1,47 @@ +"""Test PremChat model""" + +import pytest +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from langchain_core.pydantic_v1 import SecretStr +from pytest import CaptureFixture + +from langchain_community.chat_models import ChatPremAI +from langchain_community.chat_models.premai import _messages_to_prompt_dict + + +@pytest.mark.requires("premai") +def test_api_key_is_string() -> None: + llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) + assert isinstance(llm.premai_api_key, SecretStr) + + +@pytest.mark.requires("premai") +def test_api_key_masked_when_passed_via_constructor( + capsys: CaptureFixture, +) -> None: + llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) + print(llm.premai_api_key, end="") # noqa: T201 + captured = capsys.readouterr() + + assert captured.out == "**********" + + +def test_messages_to_prompt_dict_with_valid_messages() -> None: + system_message, result = _messages_to_prompt_dict( + [ + SystemMessage(content="System Prompt"), + HumanMessage(content="User message #1"), + AIMessage(content="AI message #1"), + HumanMessage(content="User message #2"), + AIMessage(content="AI message #2"), + ] + ) + expected = [ + {"role": "user", "content": "User message #1"}, + {"role": "assistant", "content": "AI message #1"}, + {"role": "user", "content": "User message #2"}, + {"role": "assistant", "content": "AI message #2"}, + ] + + assert system_message == "System Prompt" + assert result == expected From 77a1705628edcbaebbfe662c886bd7dd0a3e8ae9 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Fri, 15 Mar 2024 12:58:57 +0530 Subject: [PATCH 09/30] migrate premai embeddings unit tests from premai org to personal --- .../unit_tests/embeddings/test_imports.py | 1 + .../unit_tests/embeddings/test_premai.py | 28 +++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 libs/community/tests/unit_tests/embeddings/test_premai.py diff --git a/libs/community/tests/unit_tests/embeddings/test_imports.py b/libs/community/tests/unit_tests/embeddings/test_imports.py index 5ca203d9344d8..cdbfaaf582259 100644 --- a/libs/community/tests/unit_tests/embeddings/test_imports.py +++ b/libs/community/tests/unit_tests/embeddings/test_imports.py @@ -64,6 +64,7 @@ "QuantizedBiEncoderEmbeddings", "NeMoEmbeddings", "SparkLLMTextEmbeddings", + "PremAIEmbeddings", ] diff --git a/libs/community/tests/unit_tests/embeddings/test_premai.py b/libs/community/tests/unit_tests/embeddings/test_premai.py new file mode 100644 index 0000000000000..3b06b19026ae2 --- /dev/null +++ b/libs/community/tests/unit_tests/embeddings/test_premai.py @@ -0,0 +1,28 @@ +"""Test EmbaasEmbeddings embeddings""" + +import pytest +from langchain_core.pydantic_v1 import SecretStr +from pytest import CaptureFixture + +from langchain_community.embeddings import PremAIEmbeddings + + +@pytest.mark.requires("premai") +def test_api_key_is_string() -> None: + llm = PremAIEmbeddings( + premai_api_key="secret-api-key", project_id=8, model="fake-model" + ) + assert isinstance(llm.premai_api_key, SecretStr) + + +@pytest.mark.requires("premai") +def test_api_key_masked_when_passed_via_constructor( + capsys: CaptureFixture, +) -> None: + llm = PremAIEmbeddings( + premai_api_key="secret-api-key", project_id=8, model="fake-model" + ) + print(llm.premai_api_key, end="") # noqa: T201 + captured = capsys.readouterr() + + assert captured.out == "**********" From 91bb80e8a6354460dbdf9cabb357308eed25919a Mon Sep 17 00:00:00 2001 From: Bagatur Date: Fri, 15 Mar 2024 15:16:46 -0700 Subject: [PATCH 10/30] undo --- poetry.lock | 27 +++++---------------------- pyproject.toml | 1 - 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/poetry.lock b/poetry.lock index b452163f70984..014e89c8d3e9f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "aiohttp" @@ -1094,13 +1094,13 @@ trio = ["trio (>=0.22.0,<0.25.0)"] [[package]] name = "httpx" -version = "0.26.0" +version = "0.27.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"}, - {file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"}, + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, ] [package.dependencies] @@ -2623,23 +2623,6 @@ files = [ docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -[[package]] -name = "premai" -version = "0.3.25" -description = "A client library for accessing Prem APIs" -optional = false -python-versions = ">=3.8,<4.0" -files = [ - {file = "premai-0.3.25-py3-none-any.whl", hash = "sha256:bddace7340e1827f048b410748d365e8663e4bbeb6bf7e8b8657f3cc267f7f28"}, - {file = "premai-0.3.25.tar.gz", hash = "sha256:c387980ecf3bdcb07886dd4f7a1c0f0701df67e772e62f444394cea97d5970a0"}, -] - -[package.dependencies] -attrs = ">=21.3.0" -httpx = ">=0.20.0,<0.27.0" -python-dateutil = ">=2.8.0,<3.0.0" -typing_extensions = ">=4.9.0" - [[package]] name = "prometheus-client" version = "0.20.0" @@ -4334,4 +4317,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "fc9d4dfbd769eba0ae2062ec5da366fac856a6233a7c34c48486eeef9c735b26" +content-hash = "801d7e68178472d1086f428b5944899fb61db3ba6e415b5231413237754d7879" diff --git a/pyproject.toml b/pyproject.toml index 91c463b7d7033..5dcdcd50f618a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,6 @@ repository = "https://www.github.com/langchain-ai/langchain" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -premai = "^0.3.25" [tool.poetry.group.docs.dependencies] langchain = { path = "libs/langchain/", develop = true } From 376eee5e24a56ec471e6eb0e3e9151f9b2f43e34 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Fri, 15 Mar 2024 15:17:17 -0700 Subject: [PATCH 11/30] fmt --- docs/docs/integrations/chat/premai.ipynb | 17 ++++++++--------- .../integrations/text_embedding/premai.ipynb | 8 ++------ .../chat_models/test_premai.py | 1 + 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb index 6679dc02177ed..9972cbc955dab 100644 --- a/docs/docs/integrations/chat/premai.ipynb +++ b/docs/docs/integrations/chat/premai.ipynb @@ -52,8 +52,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.messages import HumanMessage, SystemMessage\n", - "from langchain_community.chat_models import ChatPremAI" + "from langchain_community.chat_models import ChatPremAI\n", + "from langchain_core.messages import HumanMessage, SystemMessage" ] }, { @@ -75,13 +75,13 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "if os.environ.get(\"PREMAI_API_KEY\") is None:\n", " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")\n", "\n", - "chat = ChatPremAI(project_id=8)\n" + "chat = ChatPremAI(project_id=8)" ] }, { @@ -173,10 +173,7 @@ } ], "source": [ - "chat.invoke(\n", - " [system_message, human_message],\n", - " temperature = 0.7, max_tokens = 20, top_p = 0.95\n", - ")" + "chat.invoke([system_message, human_message], temperature=0.7, max_tokens=20, top_p=0.95)" ] }, { @@ -240,7 +237,9 @@ "\n", "for chunk in chat.stream(\n", " \"hello how are you\",\n", - " system_prompt = \"You are an helpful assistant\", temperature = 0.7, max_tokens = 20\n", + " system_prompt=\"You are an helpful assistant\",\n", + " temperature=0.7,\n", + " max_tokens=20,\n", "):\n", " sys.stdout.write(chunk.content)\n", " sys.stdout.flush()" diff --git a/docs/docs/integrations/text_embedding/premai.ipynb b/docs/docs/integrations/text_embedding/premai.ipynb index ca3bcb38e8869..d8bf54fd43f53 100644 --- a/docs/docs/integrations/text_embedding/premai.ipynb +++ b/docs/docs/integrations/text_embedding/premai.ipynb @@ -51,8 +51,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "if os.environ.get(\"PREMAI_API_KEY\") is None:\n", " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")" @@ -131,11 +131,7 @@ } ], "source": [ - "documents = [\n", - " \"This is document1\",\n", - " \"This is document2\",\n", - " \"This is document3\"\n", - "]\n", + "documents = [\"This is document1\", \"This is document2\", \"This is document3\"]\n", "\n", "doc_result = embedder.embed_documents(documents)\n", "\n", diff --git a/libs/community/tests/integration_tests/chat_models/test_premai.py b/libs/community/tests/integration_tests/chat_models/test_premai.py index 48e53b8614ff0..30f4ab5723760 100644 --- a/libs/community/tests/integration_tests/chat_models/test_premai.py +++ b/libs/community/tests/integration_tests/chat_models/test_premai.py @@ -11,6 +11,7 @@ from langchain_community.chat_models import ChatPremAI + @pytest.fixture def chat() -> ChatPremAI: return ChatPremAI(project_id=8) From 63c962f59a05ddf0637ebb3dda24ff8228a8188e Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Sat, 16 Mar 2024 09:56:47 +0530 Subject: [PATCH 12/30] Added lint checks for ChatPremAI --- libs/community/langchain_community/chat_models/premai.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index adaf6155155b4..e1560b44b047e 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -98,7 +98,7 @@ def _response_to_result( text=content, message=ChatMessage(role=role, content=content) ) ) - return ChatResult(generations=generations) + return ChatResult(generations=generations) def _convert_delta_response_to_message_chunk( @@ -106,9 +106,12 @@ def _convert_delta_response_to_message_chunk( ) -> BaseMessageChunk: """Converts delta response to message chunk""" _delta = response.choices[0].delta - role = _delta["role"] - content = _delta["content"] + role = _delta.get("role", "") + content = _delta.get("content", "") additional_kwargs: Dict = {} + + if role is None or role == "": + raise ChatPremAPIError("Role can not be None. Please check the response") finish_reasons = response.choices[0].finish_reason From e93a715c16152737f1d4e96f402f8e71b4ccec61 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Sat, 16 Mar 2024 09:57:12 +0530 Subject: [PATCH 13/30] Added lint checks for PremAIEmbeddings --- libs/community/langchain_community/embeddings/premai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/embeddings/premai.py b/libs/community/langchain_community/embeddings/premai.py index 51c732a5324cd..e811b1bae49f4 100644 --- a/libs/community/langchain_community/embeddings/premai.py +++ b/libs/community/langchain_community/embeddings/premai.py @@ -27,7 +27,7 @@ class PremAIEmbeddings(BaseModel, Embeddings): show_progress_bar: bool = False """Whether to show a tqdm progress bar. Must have `tqdm` installed.""" - max_retries: Optional[int] = 1 + max_retries: int = 1 """Max number of retries for tenacity""" client: Any From fd9fbaac9b55675a975b295c4b62965e041cb59f Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Sat, 16 Mar 2024 09:58:14 +0530 Subject: [PATCH 14/30] run make format --- libs/community/langchain_community/chat_models/premai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index e1560b44b047e..c107068eef610 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -109,7 +109,7 @@ def _convert_delta_response_to_message_chunk( role = _delta.get("role", "") content = _delta.get("content", "") additional_kwargs: Dict = {} - + if role is None or role == "": raise ChatPremAPIError("Role can not be None. Please check the response") From a82f15ff92e45589fa93dbc8df4242b3b95c76fc Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Sat, 16 Mar 2024 10:00:40 +0530 Subject: [PATCH 15/30] fix mypy type checks for ChatPremAI _delta --- libs/community/langchain_community/chat_models/premai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index c107068eef610..fd49c7f19a8b4 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -105,7 +105,7 @@ def _convert_delta_response_to_message_chunk( response: ChatCompletionResponseStream, default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: """Converts delta response to message chunk""" - _delta = response.choices[0].delta + _delta: Dict[str, Any] = response.choices[0].delta role = _delta.get("role", "") content = _delta.get("content", "") additional_kwargs: Dict = {} From d513fec1e2df2ea6fefa6f5e93764cd8bf44670d Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Sat, 16 Mar 2024 10:02:56 +0530 Subject: [PATCH 16/30] add type ignore in _delta --- libs/community/langchain_community/chat_models/premai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index fd49c7f19a8b4..2563e4310e75b 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -105,7 +105,7 @@ def _convert_delta_response_to_message_chunk( response: ChatCompletionResponseStream, default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: """Converts delta response to message chunk""" - _delta: Dict[str, Any] = response.choices[0].delta + _delta = response.choices[0].delta # type: ignore role = _delta.get("role", "") content = _delta.get("content", "") additional_kwargs: Dict = {} From d268b76a2b74af388d28e0b2db38407a1726005c Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Sat, 16 Mar 2024 10:03:57 +0530 Subject: [PATCH 17/30] run make format --- libs/community/langchain_community/chat_models/premai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index 2563e4310e75b..67e325ce6ded8 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -105,7 +105,7 @@ def _convert_delta_response_to_message_chunk( response: ChatCompletionResponseStream, default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: """Converts delta response to message chunk""" - _delta = response.choices[0].delta # type: ignore + _delta = response.choices[0].delta # type: ignore role = _delta.get("role", "") content = _delta.get("content", "") additional_kwargs: Dict = {} From 44a0d6fa978e5a391b8798c0e0594377f520d3db Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 01:31:11 +0530 Subject: [PATCH 18/30] changed file name from prem to premai and fix code spell checks --- .../docs/integrations/providers/{prem.md => premai.md} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename docs/docs/integrations/providers/{prem.md => premai.md} (92%) diff --git a/docs/docs/integrations/providers/prem.md b/docs/docs/integrations/providers/premai.md similarity index 92% rename from docs/docs/integrations/providers/prem.md rename to docs/docs/integrations/providers/premai.md index e1d78d607c5a5..7547f8ea0053e 100644 --- a/docs/docs/integrations/providers/prem.md +++ b/docs/docs/integrations/providers/premai.md @@ -23,20 +23,20 @@ Before proceeding further, please make sure that you have made an account on Pre 3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. -4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation paramters (like: max-tokens, temperature etc) and also pre-set your system prompt. +4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like: max-tokens, temperature etc) and also pre-set your system prompt. Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. ```python from langchain_core.messages import HumanMessage, SystemMessage -from langchain_community.chat_models import ChatPrem +from langchain_community.chat_models import ChatPremAI ``` ### Setup ChatPrem instance in LangChain Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error. -In order to use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. All of those will use the default model name and paramters of the LaunchPad model. +In order to use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. All of those will use the default model name and parameters of the LaunchPad model. `NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. @@ -116,7 +116,7 @@ for chunk in chat.stream( ## Embedding -In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings`. Let's start by doing some imports and define our embedding object +In this section we are going to discuss how we can get access to different embedding model using `PremEmbeddings`. Let's start by doing some imports and define our embedding object ```python from langchain_community.embeddings import PremEmbeddings @@ -133,7 +133,7 @@ import getpass if os.environ.get("PREMAI_API_KEY") is None: os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:") -# Define a model is a required paramter here, since there is no default embedding model +# Define a model is a required parameter here, since there is no default embedding model model = "text-embedding-3-large" embedder = PremEmbeddings(project_id=8, model=model) From 6bd818aed1db224fcfc6d3b401b9134501183c35 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 01:37:00 +0530 Subject: [PATCH 19/30] Added premai in pyproject toml file --- libs/community/pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index 7a80c33e5d1a5..2733f8345ada1 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -97,6 +97,7 @@ rdflib = {version = "7.0.0", optional = true} nvidia-riva-client = {version = "^2.14.0", optional = true} tidb-vector = {version = ">=0.0.3,<1.0.0", optional = true} friendli-client = {version = "^1.2.4", optional = true} +premai = {version = "^0.3.25", optional = true} [tool.poetry.group.test] optional = true @@ -155,6 +156,7 @@ tiktoken = ">=0.3.2,<0.6.0" anthropic = "^0.3.11" langchain-core = { path = "../core", develop = true } fireworks-ai = "^0.9.0" +premai = "^0.3.25" [tool.poetry.group.lint] optional = true @@ -267,7 +269,8 @@ extended_testing = [ "rdflib", "tidb-vector", "cloudpickle", - "friendli-client" + "friendli-client", + "premai" ] [tool.ruff] From 34f9899b46cd0dd7f1ec2cb180dbe21d32570378 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 17:16:39 +0530 Subject: [PATCH 20/30] name changed from ChatPrem to ChatPremAI --- docs/docs/integrations/chat/premai.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb index 9972cbc955dab..c4db42cb6c2fb 100644 --- a/docs/docs/integrations/chat/premai.ipynb +++ b/docs/docs/integrations/chat/premai.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# ChatPrem\n", + "# ChatPremAI\n", "\n", ">[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. \n", "\n", From 2c094fe6252f5d4328e2e95c5242cf490f6c460f Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 17:33:25 +0530 Subject: [PATCH 21/30] fix: lint issues --- .../langchain_community/chat_models/premai.py | 48 +++++++++++-------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index 67e325ce6ded8..b0e9c83cf389c 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -103,17 +103,20 @@ def _response_to_result( def _convert_delta_response_to_message_chunk( response: ChatCompletionResponseStream, default_class: Type[BaseMessageChunk] -) -> BaseMessageChunk: +) -> Tuple[ + Union[BaseMessageChunk, HumanMessageChunk, AIMessageChunk, SystemMessageChunk], + Optional[str], +]: """Converts delta response to message chunk""" _delta = response.choices[0].delta # type: ignore - role = _delta.get("role", "") - content = _delta.get("content", "") + role = _delta.get("role", "") # type: ignore + content = _delta.get("content", "") # type: ignore additional_kwargs: Dict = {} if role is None or role == "": raise ChatPremAPIError("Role can not be None. Please check the response") - finish_reasons = response.choices[0].finish_reason + finish_reasons: Optional[str] = response.choices[0].finish_reason if role == "user" or default_class == HumanMessageChunk: return HumanMessageChunk(content=content), finish_reasons @@ -132,21 +135,23 @@ def _convert_delta_response_to_message_chunk( def _messages_to_prompt_dict( input_messages: List[BaseMessage], -) -> Tuple[str, List[dict]]: +) -> Tuple[Optional[str], List[Dict[str, str]]]: """Converts a list of LangChain Messages into a simple dict which is the message structure in Prem""" - system_prompt: str = None + system_prompt: Optional[str] = None examples_and_messages: List[Dict[str, str]] = [] for input_msg in input_messages: if isinstance(input_msg, SystemMessage): - system_prompt = input_msg.content + system_prompt = str(input_msg.content) elif isinstance(input_msg, HumanMessage): - examples_and_messages.append({"role": "user", "content": input_msg.content}) + examples_and_messages.append( + {"role": "user", "content": str(input_msg.content)} + ) elif isinstance(input_msg, AIMessage): examples_and_messages.append( - {"role": "assistant", "content": input_msg.content} + {"role": "assistant", "content": str(input_msg.content)} ) else: raise ChatPremAPIError("No such role explicitly exists") @@ -188,7 +193,7 @@ class ChatPremAI(BaseChatModel, BaseModel): max_tokens: Optional[int] = None """The maximum number of tokens to generate""" - max_retries: Optional[int] = 1 + max_retries: int = 1 """Max number of retries to call the API""" system_prompt: Optional[str] = "" @@ -269,7 +274,7 @@ def _default_params(self) -> Dict[str, Any]: "stop": None, } - def _get_all_kwargs(self, **kwargs) -> Dict[str, Any]: + def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: all_kwargs = {**self._default_params, **kwargs} for key in list(self._default_params.keys()): if all_kwargs.get(key) is None or all_kwargs.get(key) == "": @@ -278,12 +283,12 @@ def _get_all_kwargs(self, **kwargs) -> Dict[str, Any]: def _generate( self, - messages: List[List[BaseMessage]], + messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs, + **kwargs: Any, ) -> ChatResult: - system_prompt, messages = _messages_to_prompt_dict(messages) + system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore kwargs["stop"] = stop if system_prompt is not None and system_prompt != "": @@ -293,7 +298,7 @@ def _generate( response = chat_with_retry( self, project_id=self.project_id, - messages=messages, + messages=messages_to_pass, stream=False, run_manager=run_manager, **all_kwargs, @@ -306,13 +311,14 @@ def _stream( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs, + **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: - system_prompt, messages = _messages_to_prompt_dict(messages) - + system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) kwargs["stop"] = stop - if system_prompt is not None and system_prompt != "": - kwargs["system_prompt"] = system_prompt + + if "system_prompt" not in kwargs: + if system_prompt is not None and system_prompt != "": + kwargs["system_prompt"] = system_prompt all_kwargs = self._get_all_kwargs(**kwargs) @@ -321,7 +327,7 @@ def _stream( for streamed_response in chat_with_retry( self, project_id=self.project_id, - messages=messages, + messages=messages_to_pass, stream=True, run_manager=run_manager, **all_kwargs, From 74f026e1d9b63406d47d6e202587ec4205c51bf7 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 17:34:22 +0530 Subject: [PATCH 22/30] Changed name from ChatPrem to ChatPremAI --- docs/docs/integrations/providers/premai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/integrations/providers/premai.md b/docs/docs/integrations/providers/premai.md index 7547f8ea0053e..5827cd944a4c1 100644 --- a/docs/docs/integrations/providers/premai.md +++ b/docs/docs/integrations/providers/premai.md @@ -47,7 +47,7 @@ import getpass if "PREMAI_API_KEY" not in os.environ: os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:") -chat = ChatPrem(project_id=8) +chat = ChatPremAI(project_id=8) ``` ### Calling the Model From 6d30402b1ddf030ef2f02f6c5b9c1fd148d6077d Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 17:57:50 +0530 Subject: [PATCH 23/30] changed name from ChatPrem to ChatPremAI --- docs/docs/integrations/chat/premai.ipynb | 70 +++++++++++++--------- docs/docs/integrations/providers/premai.md | 8 +-- 2 files changed, 47 insertions(+), 31 deletions(-) diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb index c4db42cb6c2fb..958ac091ccf1b 100644 --- a/docs/docs/integrations/chat/premai.ipynb +++ b/docs/docs/integrations/chat/premai.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "---\n", - "sidebar_label: Prem\n", + "sidebar_label: PremAI\n", "---" ] }, @@ -60,7 +60,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Setup ChatPrem instance in LangChain \n", + "## Setup ChatPremAI instance in LangChain \n", "\n", "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error.\n", "\n", @@ -78,8 +78,22 @@ "import getpass\n", "import os\n", "\n", + "# First step is to set up the env variable. \n", + "# you can also pass the API key while instantiating the model but this \n", + "# comes under a best practices to set it as env variable. \n", + "\n", "if os.environ.get(\"PREMAI_API_KEY\") is None:\n", - " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")\n", + " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# By default it will use the model which was deployed through the platform\n", + "# in my case it will is \"claude-3-haiku\"\n", "\n", "chat = ChatPremAI(project_id=8)" ] @@ -99,24 +113,22 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "AIMessage(content=\"Ahoy there, matey! I be a virtual assistant here to assist ye with all yer needs. What can I be helpin' ye with today?\")" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "I am an artificial intelligence created by Anthropic. I'm here to help with a wide variety of tasks, from research and analysis to creative projects and open-ended conversation. I have general knowledge and capabilities, but I'm not a real person - I'm an AI assistant. Please let me know if you have any other questions!\n" + ] } ], "source": [ "human_message = HumanMessage(content=\"Who are you?\")\n", "\n", - "chat.invoke([human_message])" + "response = chat.invoke([human_message])\n", + "print(response.content)" ] }, { @@ -128,16 +140,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='I am a friendly assistant here to help you with any questions or tasks you may have. How can I assist you today?')" + "AIMessage(content=\"I am an artificial intelligence created by Anthropic. My purpose is to assist and converse with humans in a friendly and helpful way. I have a broad knowledge base that I can use to provide information, answer questions, and engage in discussions on a wide range of topics. Please let me know if you have any other questions - I'm here to help!\")" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -158,22 +170,22 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='I am a friendly assistant here to help you with any questions or tasks you may have. Feel free')" + "AIMessage(content='I am an artificial intelligence created by Anthropic')" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chat.invoke([system_message, human_message], temperature=0.7, max_tokens=20, top_p=0.95)" + "chat.invoke([system_message, human_message], temperature=0.7, max_tokens=10, top_p=0.95)" ] }, { @@ -193,14 +205,14 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Ahoy there! I be doin' well, me hearty! And yerself? Arrr!" + "Hello! As an AI language model, I don't have feelings or a physical state, but I'm functioning properly and ready to assist you with any questions or tasks you might have. How can I help you today?" ] } ], @@ -221,25 +233,29 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Hello! I'm here to help you. How can I assist you today?" + "Hello! As an AI language model, I don't have feelings or a physical form, but I'm functioning properly and ready to assist you. How can I help you today?" ] } ], "source": [ "import sys\n", "\n", + "# For some experimental reasons if you want to override the system prompt then you \n", + "# can pass that here too. However it is not recommended to override system prompt\n", + "# of an already deployed model. \n", + "\n", "for chunk in chat.stream(\n", " \"hello how are you\",\n", - " system_prompt=\"You are an helpful assistant\",\n", + " system_prompt=\"act like a dog\",\n", " temperature=0.7,\n", - " max_tokens=20,\n", + " max_tokens=200,\n", "):\n", " sys.stdout.write(chunk.content)\n", " sys.stdout.flush()" @@ -262,7 +278,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/docs/docs/integrations/providers/premai.md b/docs/docs/integrations/providers/premai.md index 5827cd944a4c1..e9e41a38b394e 100644 --- a/docs/docs/integrations/providers/premai.md +++ b/docs/docs/integrations/providers/premai.md @@ -3,9 +3,9 @@ >[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. -## ChatPrem +## ChatPremAI -This example goes over how to use LangChain to interact with different chat models with `ChatPrem` +This example goes over how to use LangChain to interact with different chat models with `ChatPremAI` ### Installation and setup @@ -15,7 +15,7 @@ We start by installing langchain and premai-sdk. You can type the following comm pip install premai langchain ``` -Before proceeding further, please make sure that you have made an account on Prem and already started a project. If not, then here's how you can start for free: +Before proceeding further, please make sure that you have made an account on PremAI and already started a project. If not, then here's how you can start for free: 1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/). @@ -25,7 +25,7 @@ Before proceeding further, please make sure that you have made an account on Pre 4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like: max-tokens, temperature etc) and also pre-set your system prompt. -Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. +Congratulations on creating your first deployed application on PremAI 🎉 Now we can use langchain to interact with our application. ```python from langchain_core.messages import HumanMessage, SystemMessage From f96cf148f6b2db0f0bdec852e7b12f85122e2236 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 18:02:54 +0530 Subject: [PATCH 24/30] fix: spell checks and grammar --- docs/docs/integrations/providers/premai.md | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/docs/integrations/providers/premai.md b/docs/docs/integrations/providers/premai.md index e9e41a38b394e..64f8f4621a033 100644 --- a/docs/docs/integrations/providers/premai.md +++ b/docs/docs/integrations/providers/premai.md @@ -1,6 +1,6 @@ # PremAI ->[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. +>[PremAI](https://app.premai.io) is a unified platform that lets you build powerful production-ready GenAI-powered applications with the least effort so that you can focus more on user experience and overall growth. ## ChatPremAI @@ -23,7 +23,7 @@ Before proceeding further, please make sure that you have made an account on Pre 3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. -4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like: max-tokens, temperature etc) and also pre-set your system prompt. +4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like max-tokens, temperature, etc) and also pre-set your system prompt. Congratulations on creating your first deployed application on PremAI 🎉 Now we can use langchain to interact with our application. @@ -34,9 +34,9 @@ from langchain_community.chat_models import ChatPremAI ### Setup ChatPrem instance in LangChain -Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error. +Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error. -In order to use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. All of those will use the default model name and parameters of the LaunchPad model. +To use langchain with prem, you do not need to pass any model name or set any parameters with our chat client. All of those will use the default model name and parameters of the LaunchPad model. `NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. @@ -52,9 +52,9 @@ chat = ChatPremAI(project_id=8) ### Calling the Model -Now you are all set. We can now start with interacting with our application. `ChatPrem` supports two methods `invoke` (which is same as `generate`) and `stream`. +Now you are all set. We can now start by interacting with our application. `ChatPrem` supports two methods `invoke` (which is the same as `generate`) and `stream`. -The first one will give us a static result. Where as the second one will stream tokens one by one. Here's how you can generate chat like completions. +The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. ### Generation @@ -64,7 +64,7 @@ human_message = HumanMessage(content="Who are you?") chat.invoke([human_message]) ``` -Above looks interesting right? I set my default lanchpad system-prompt as: `Always sound like a pirate` You can also, override the default system prompt if you need to. Here's how you can do it. +The above looks interesting, right? I set my default launchpad system-prompt as: `Always sound like a pirate` You can also, override the default system prompt if you need to. Here's how you can do it. ```python system_message = SystemMessage(content="You are a friendly assistant.") @@ -87,11 +87,11 @@ chat.invoke( Before proceeding further, please note that the current version of ChatPrem does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) are not supported. -We will provide support for those two above parameters in sooner versions. +We will provide support for those two above parameters in later versions. ### Streaming -And finally, here's how you do token streaming for dynamic chat like applications. +And finally, here's how you do token streaming for dynamic chat-like applications. ```python import sys @@ -116,13 +116,13 @@ for chunk in chat.stream( ## Embedding -In this section we are going to discuss how we can get access to different embedding model using `PremEmbeddings`. Let's start by doing some imports and define our embedding object +In this section, we are going to discuss how we can get access to different embedding models using `PremEmbeddings`. Let's start by doing some imports and defining our embedding object ```python from langchain_community.embeddings import PremEmbeddings ``` -Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error. +Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error. ```python @@ -133,7 +133,7 @@ import getpass if os.environ.get("PREMAI_API_KEY") is None: os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:") -# Define a model is a required parameter here, since there is no default embedding model +# Define a model as a required parameter here since there is no default embedding model model = "text-embedding-3-large" embedder = PremEmbeddings(project_id=8, model=model) @@ -163,7 +163,7 @@ query_result = embedder.embed_query(query) print(query_result[:5]) ``` -Finally let's embed a document +Finally, let's embed a document ```python documents = [ @@ -174,7 +174,7 @@ documents = [ doc_result = embedder.embed_documents(documents) -# Similar to previous result, let's print the first five element +# Similar to the previous result, let's print the first five element # of the first document vector print(doc_result[0][:5]) From 477b33738130bd8ab71dc23ff4b4fa0dbe31cf8a Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 18 Mar 2024 18:08:39 +0530 Subject: [PATCH 25/30] changed name from ChatPrem to ChatPremAI and more spell fixes --- docs/docs/integrations/chat/premai.ipynb | 18 +++++++++--------- docs/docs/integrations/providers/premai.md | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb index 958ac091ccf1b..bd9a5482d8eb8 100644 --- a/docs/docs/integrations/chat/premai.ipynb +++ b/docs/docs/integrations/chat/premai.ipynb @@ -15,7 +15,7 @@ "source": [ "# ChatPremAI\n", "\n", - ">[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. \n", + ">[PremAI](https://app.premai.io) is a unified platform that lets you build powerful production-ready GenAI-powered applications with the least effort so that you can focus more on user experience and overall growth. \n", "\n", "\n", "This example goes over how to use LangChain to interact with `ChatPremAI`. " @@ -25,7 +25,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Installation and setup\n", + "### Installation and setup\n", "\n", "We start by installing langchain and premai-sdk. You can type the following command to install:\n", "\n", @@ -33,7 +33,7 @@ "pip install premai langchain\n", "```\n", "\n", - "Before proceeding further, please make sure that you have made an account on Prem and already started a project. If not, then here's how you can start for free:\n", + "Before proceeding further, please make sure that you have made an account on PremAI and already started a project. If not, then here's how you can start for free:\n", "\n", "1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/).\n", "\n", @@ -41,9 +41,9 @@ "\n", "3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. \n", "\n", - "4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation paramters (like: max-tokens, temperature etc) and also pre-set your system prompt. \n", + "4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like max-tokens, temperature, etc) and also pre-set your system prompt. \n", "\n", - "Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. " + "Congratulations on creating your first deployed application on PremAI 🎉 Now we can use langchain to interact with our application. " ] }, { @@ -62,9 +62,9 @@ "source": [ "## Setup ChatPremAI instance in LangChain \n", "\n", - "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error.\n", + "Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error.\n", "\n", - "In order to use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. All of those will use the default model name and paramters of the LaunchPad model. \n", + "To use langchain with prem, you do not need to pass any model name or set any parameters with our chat client. All of those will use the default model name and parameters of the LaunchPad model. \n", "\n", "`NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. " ] @@ -104,9 +104,9 @@ "source": [ "## Calling the Model\n", "\n", - "Now you are all set. We can now start with interacting with our application. `ChatPrem` supports two methods `invoke` (which is same as `generate`) and `stream`. \n", + "Now you are all set. We can now start by interacting with our application. `ChatPremAI` supports two methods `invoke` (which is the same as `generate`) and `stream`. \n", "\n", - "The first one will give us a static result. Where as the second one will stream tokens one by one. Here's how you can generate chat like completions. \n", + "The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. \n", "\n", "### Generation" ] diff --git a/docs/docs/integrations/providers/premai.md b/docs/docs/integrations/providers/premai.md index 64f8f4621a033..7ec2ca5fa84a7 100644 --- a/docs/docs/integrations/providers/premai.md +++ b/docs/docs/integrations/providers/premai.md @@ -52,7 +52,7 @@ chat = ChatPremAI(project_id=8) ### Calling the Model -Now you are all set. We can now start by interacting with our application. `ChatPrem` supports two methods `invoke` (which is the same as `generate`) and `stream`. +Now you are all set. We can now start by interacting with our application. `ChatPremAI` supports two methods `invoke` (which is the same as `generate`) and `stream`. The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. From c293d0474e2d7f6a11d0706cbb9273e0819b3296 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Tue, 19 Mar 2024 09:19:38 +0530 Subject: [PATCH 26/30] run poetry run --no-update to update packages --- libs/community/poetry.lock | 51 +++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock index f38759ce41e97..3dda64c6b17ac 100644 --- a/libs/community/poetry.lock +++ b/libs/community/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aenum" @@ -4251,7 +4251,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.1.31" +version = "0.1.33-rc.1" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -5485,9 +5485,9 @@ numpy = [ {version = ">=1.21.0", markers = "python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\" and python_version >= \"3.8\""}, {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.17.3", markers = "(platform_system != \"Darwin\" and platform_system != \"Linux\") and python_version >= \"3.8\" and python_version < \"3.9\" or platform_system != \"Darwin\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_machine != \"aarch64\" or platform_machine != \"arm64\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_system != \"Linux\" or (platform_machine != \"arm64\" and platform_machine != \"aarch64\") and python_version >= \"3.8\" and python_version < \"3.9\""}, + {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, ] [[package]] @@ -5660,8 +5660,8 @@ files = [ [package.dependencies] numpy = [ {version = ">=1.20.3", markers = "python_version < \"3.10\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -5965,6 +5965,23 @@ dev = ["packaging", "prawcore[lint]", "prawcore[test]"] lint = ["pre-commit", "ruff (>=0.0.291)"] test = ["betamax (>=0.8,<0.9)", "pytest (>=2.7.3)", "urllib3 (==1.26.*)"] +[[package]] +name = "premai" +version = "0.3.25" +description = "A client library for accessing Prem APIs" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "premai-0.3.25-py3-none-any.whl", hash = "sha256:bddace7340e1827f048b410748d365e8663e4bbeb6bf7e8b8657f3cc267f7f28"}, + {file = "premai-0.3.25.tar.gz", hash = "sha256:c387980ecf3bdcb07886dd4f7a1c0f0701df67e772e62f444394cea97d5970a0"}, +] + +[package.dependencies] +attrs = ">=21.3.0" +httpx = ">=0.20.0,<0.27.0" +python-dateutil = ">=2.8.0,<3.0.0" +typing_extensions = ">=4.9.0" + [[package]] name = "prometheus-client" version = "0.20.0" @@ -6670,26 +6687,31 @@ python-versions = ">=3.8" files = [ {file = "PyMuPDF-1.23.26-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:645a05321aecc8c45739f71f0eb574ce33138d19189582ffa5241fea3a8e2549"}, {file = "PyMuPDF-1.23.26-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2dfc9e010669ae92fade6fb72aaea49ebe3b8dcd7ee4dcbbe50115abcaa4d3fe"}, + {file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:734ee380b3abd038602be79114194a3cb74ac102b7c943bcb333104575922c50"}, {file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:b22f8d854f8196ad5b20308c1cebad3d5189ed9f0988acbafa043947ea7e6c55"}, {file = "PyMuPDF-1.23.26-cp310-none-win32.whl", hash = "sha256:cc0f794e3466bc96b5bf79d42fbc1551428751e3fef38ebc10ac70396b676144"}, {file = "PyMuPDF-1.23.26-cp310-none-win_amd64.whl", hash = "sha256:2eb701247d8e685a24e45899d1175f01a3ce5fc792a4431c91fbb68633b29298"}, {file = "PyMuPDF-1.23.26-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:e2804a64bb57da414781e312fb0561f6be67658ad57ed4a73dce008b23fc70a6"}, {file = "PyMuPDF-1.23.26-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:97b40bb22e3056874634617a90e0ed24a5172cf71791b9e25d1d91c6743bc567"}, + {file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:fab8833559bc47ab26ce736f915b8fc1dd37c108049b90396f7cd5e1004d7593"}, {file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:f25aafd3e7fb9d7761a22acf2b67d704f04cc36d4dc33a3773f0eb3f4ec3606f"}, {file = "PyMuPDF-1.23.26-cp311-none-win32.whl", hash = "sha256:05e672ed3e82caca7ef02a88ace30130b1dd392a1190f03b2b58ffe7aa331400"}, {file = "PyMuPDF-1.23.26-cp311-none-win_amd64.whl", hash = "sha256:92b3c4dd4d0491d495f333be2d41f4e1c155a409bc9d04b5ff29655dccbf4655"}, {file = "PyMuPDF-1.23.26-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:a217689ede18cc6991b4e6a78afee8a440b3075d53b9dec4ba5ef7487d4547e9"}, {file = "PyMuPDF-1.23.26-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:42ad2b819b90ce1947e11b90ec5085889df0a2e3aa0207bc97ecacfc6157cabc"}, + {file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:99607649f89a02bba7d8ebe96e2410664316adc95e9337f7dfeff6a154f93049"}, {file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:bb42d4b8407b4de7cb58c28f01449f16f32a6daed88afb41108f1aeb3552bdd4"}, {file = "PyMuPDF-1.23.26-cp312-none-win32.whl", hash = "sha256:c40d044411615e6f0baa7d3d933b3032cf97e168c7fa77d1be8a46008c109aee"}, {file = "PyMuPDF-1.23.26-cp312-none-win_amd64.whl", hash = "sha256:3f876533aa7f9a94bcd9a0225ce72571b7808260903fec1d95c120bc842fb52d"}, {file = "PyMuPDF-1.23.26-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:52df831d46beb9ff494f5fba3e5d069af6d81f49abf6b6e799ee01f4f8fa6799"}, {file = "PyMuPDF-1.23.26-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:0bbb0cf6593e53524f3fc26fb5e6ead17c02c64791caec7c4afe61b677dedf80"}, + {file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:5ef4360f20015673c20cf59b7e19afc97168795188c584254ed3778cde43ce77"}, {file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:d7cd88842b2e7f4c71eef4d87c98c35646b80b60e6375392d7ce40e519261f59"}, {file = "PyMuPDF-1.23.26-cp38-none-win32.whl", hash = "sha256:6577e2f473625e2d0df5f5a3bf1e4519e94ae749733cc9937994d1b256687bfa"}, {file = "PyMuPDF-1.23.26-cp38-none-win_amd64.whl", hash = "sha256:fbe1a3255b2cd0d769b2da2c4efdd0c0f30d4961a1aac02c0f75cf951b337aa4"}, {file = "PyMuPDF-1.23.26-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:73fce034f2afea886a59ead2d0caedf27e2b2a8558b5da16d0286882e0b1eb82"}, {file = "PyMuPDF-1.23.26-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:b3de8618b7cb5b36db611083840b3bcf09b11a893e2d8262f4e042102c7e65de"}, + {file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:879e7f5ad35709d8760ab6103c3d5dac8ab8043a856ab3653fd324af7358ee87"}, {file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:deee96c2fd415ded7b5070d8d5b2c60679aee6ed0e28ac0d2cb998060d835c2c"}, {file = "PyMuPDF-1.23.26-cp39-none-win32.whl", hash = "sha256:9f7f4ef99dd8ac97fb0b852efa3dcbee515798078b6c79a6a13c7b1e7c5d41a4"}, {file = "PyMuPDF-1.23.26-cp39-none-win_amd64.whl", hash = "sha256:ba9a54552c7afb9ec85432c765e2fa9a81413acfaa7d70db7c9b528297749e5b"}, @@ -9285,6 +9307,23 @@ typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] +[[package]] +name = "vcrpy" +version = "4.3.0" +description = "Automatically mock your HTTP interactions to simplify and speed up testing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "vcrpy-4.3.0-py2.py3-none-any.whl", hash = "sha256:8fbd4be412e8a7f35f623dd61034e6380a1c8dbd0edf6e87277a3289f6e98093"}, + {file = "vcrpy-4.3.0.tar.gz", hash = "sha256:49c270ce67e826dba027d83e20d25b67a5885487697e97bca6dbdf53d750a0ac"}, +] + +[package.dependencies] +PyYAML = "*" +six = ">=1.5" +wrapt = "*" +yarl = "*" + [[package]] name = "vcrpy" version = "6.0.1" @@ -9851,9 +9890,9 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [extras] cli = ["typer"] -extended-testing = ["aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "assemblyai", "atlassian-python-api", "azure-ai-documentintelligence", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "cloudpickle", "cloudpickle", "cohere", "databricks-vectorsearch", "datasets", "dgml-utils", "elasticsearch", "esprima", "faiss-cpu", "feedparser", "fireworks-ai", "friendli-client", "geopandas", "gitpython", "google-cloud-documentai", "gql", "gradientai", "hdbcli", "hologres-vector", "html2text", "httpx", "javelin-sdk", "jinja2", "jq", "jsonschema", "lxml", "markdownify", "motor", "msal", "mwparserfromhell", "mwxml", "newspaper3k", "numexpr", "nvidia-riva-client", "oci", "openai", "openapi-pydantic", "oracle-ads", "pandas", "pdfminer-six", "pgvector", "praw", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "rapidocr-onnxruntime", "rdflib", "requests-toolbelt", "rspace_client", "scikit-learn", "sqlite-vss", "streamlit", "sympy", "telethon", "tidb-vector", "timescale-vector", "tqdm", "tree-sitter", "tree-sitter-languages", "upstash-redis", "xata", "xmltodict", "zhipuai"] +extended-testing = ["aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "assemblyai", "atlassian-python-api", "azure-ai-documentintelligence", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "cloudpickle", "cloudpickle", "cohere", "databricks-vectorsearch", "datasets", "dgml-utils", "elasticsearch", "esprima", "faiss-cpu", "feedparser", "fireworks-ai", "friendli-client", "geopandas", "gitpython", "google-cloud-documentai", "gql", "gradientai", "hdbcli", "hologres-vector", "html2text", "httpx", "javelin-sdk", "jinja2", "jq", "jsonschema", "lxml", "markdownify", "motor", "msal", "mwparserfromhell", "mwxml", "newspaper3k", "numexpr", "nvidia-riva-client", "oci", "openai", "openapi-pydantic", "oracle-ads", "pandas", "pdfminer-six", "pgvector", "praw", "premai", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "rapidocr-onnxruntime", "rdflib", "requests-toolbelt", "rspace_client", "scikit-learn", "sqlite-vss", "streamlit", "sympy", "telethon", "tidb-vector", "timescale-vector", "tqdm", "tree-sitter", "tree-sitter-languages", "upstash-redis", "xata", "xmltodict", "zhipuai"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "3bf95cf1fbf56e32eb64b0630fd6dbacad0a22274adee66b897ea30b0a6c75b1" +content-hash = "515cedebe2eeca609e6a6b3418653cccb08d0dd3e4f49e4406b0870e3e71a7ef" From 69f356900f74d62531153d4210b0e843eacf3ae2 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Tue, 19 Mar 2024 10:02:59 +0530 Subject: [PATCH 27/30] run format for docs --- docs/docs/integrations/chat/premai.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb index bd9a5482d8eb8..13a2ece273304 100644 --- a/docs/docs/integrations/chat/premai.ipynb +++ b/docs/docs/integrations/chat/premai.ipynb @@ -78,9 +78,9 @@ "import getpass\n", "import os\n", "\n", - "# First step is to set up the env variable. \n", - "# you can also pass the API key while instantiating the model but this \n", - "# comes under a best practices to set it as env variable. \n", + "# First step is to set up the env variable.\n", + "# you can also pass the API key while instantiating the model but this\n", + "# comes under a best practices to set it as env variable.\n", "\n", "if os.environ.get(\"PREMAI_API_KEY\") is None:\n", " os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")" @@ -247,9 +247,9 @@ "source": [ "import sys\n", "\n", - "# For some experimental reasons if you want to override the system prompt then you \n", + "# For some experimental reasons if you want to override the system prompt then you\n", "# can pass that here too. However it is not recommended to override system prompt\n", - "# of an already deployed model. \n", + "# of an already deployed model.\n", "\n", "for chunk in chat.stream(\n", " \"hello how are you\",\n", From 3bc4037108c19aa0477fc9254c4522efabac075f Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 25 Mar 2024 22:15:01 +0530 Subject: [PATCH 28/30] removed premai from test_integration grp --- libs/community/pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index 864ffea253264..3fc06bfb4eb31 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -156,7 +156,6 @@ tiktoken = ">=0.3.2,<0.6.0" anthropic = "^0.3.11" langchain-core = { path = "../core", develop = true } fireworks-ai = "^0.9.0" -premai = "^0.3.25" [tool.poetry.group.lint] optional = true From 9f1ba02ce4a2838b2aba7f0d6e202c5f83774946 Mon Sep 17 00:00:00 2001 From: Anindyadeep Sannigrahi Date: Mon, 25 Mar 2024 22:15:13 +0530 Subject: [PATCH 29/30] run poetry --no-update --- libs/community/poetry.lock | 82 ++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 48 deletions(-) diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock index c99200c9f8ef3..afcfe6f35ae99 100644 --- a/libs/community/poetry.lock +++ b/libs/community/poetry.lock @@ -1167,10 +1167,7 @@ files = [ [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" -urllib3 = [ - {version = ">=1.25.4,<1.27", markers = "python_version < \"3.10\""}, - {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""}, -] +urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} [package.extras] crt = ["awscrt (==0.19.19)"] @@ -2689,6 +2686,37 @@ uvicorn = ">=0.23.2,<0.24.0" [package.extras] mllib = ["accelerate (==0.21.0)", "datasets (==2.16.0)", "einops (>=0.6.1,<0.7.0)", "h5py (>=3.9.0,<4.0.0)", "peft (==0.6.0)", "transformers (==4.36.2)"] +[[package]] +name = "friendli-client" +version = "1.3.1" +description = "Client of Friendli Suite." +optional = true +python-versions = "<4.0.0,>=3.8.1" +files = [ + {file = "friendli_client-1.3.1-py3-none-any.whl", hash = "sha256:1a77b046c57b0d70bac8d13ac6ecc861f8fc84d3c63e39b34543f862373a670b"}, + {file = "friendli_client-1.3.1.tar.gz", hash = "sha256:85f87976f7bb75eb424f384e3e73ac3256b7aad477361b51341e520c2aed3a0e"}, +] + +[package.dependencies] +fastapi = ">=0.104.0,<0.105.0" +gql = ">=3.4.1,<4.0.0" +httpx = ">=0.24.1,<0.25.0" +injector = ">=0.21.0,<0.22.0" +jsonschema = ">=4.17.3,<5.0.0" +pathspec = ">=0.9.0,<0.10.0" +protobuf = ">=4.24.2,<5.0.0" +pydantic = {version = ">=1.9.0,<3", extras = ["email"]} +PyYaml = ">=6.0.1,<7.0.0" +requests = ">=2,<3" +rich = ">=12.2.0,<13.0.0" +tqdm = ">=4.48.0,<5.0.0" +typer = ">=0.9.0,<0.10.0" +types-protobuf = ">=4.24.0.1,<5.0.0.0" +uvicorn = ">=0.23.2,<0.24.0" + +[package.extras] +mllib = ["accelerate (==0.21.0)", "datasets (==2.16.0)", "einops (>=0.6.1,<0.7.0)", "h5py (>=3.9.0,<4.0.0)", "peft (==0.6.0)", "transformers (==4.36.2)"] + [[package]] name = "frozenlist" version = "1.4.1" @@ -5969,7 +5997,7 @@ test = ["betamax (>=0.8,<0.9)", "pytest (>=2.7.3)", "urllib3 (==1.26.*)"] name = "premai" version = "0.3.25" description = "A client library for accessing Prem APIs" -optional = false +optional = true python-versions = ">=3.8,<4.0" files = [ {file = "premai-0.3.25-py3-none-any.whl", hash = "sha256:bddace7340e1827f048b410748d365e8663e4bbeb6bf7e8b8657f3cc267f7f28"}, @@ -9088,20 +9116,6 @@ files = [ cryptography = ">=35.0.0" types-pyOpenSSL = "*" -[[package]] -name = "types-requests" -version = "2.31.0.6" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.7" -files = [ - {file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"}, - {file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"}, -] - -[package.dependencies] -types-urllib3 = "*" - [[package]] name = "types-requests" version = "2.31.0.20240311" @@ -9138,17 +9152,6 @@ files = [ {file = "types_toml-0.10.8.20240310-py3-none-any.whl", hash = "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d"}, ] -[[package]] -name = "types-urllib3" -version = "1.26.25.14" -description = "Typing stubs for urllib3" -optional = false -python-versions = "*" -files = [ - {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, - {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, -] - [[package]] name = "typing" version = "3.7.4.3" @@ -9245,22 +9248,6 @@ files = [ [package.extras] dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] -[[package]] -name = "urllib3" -version = "1.26.18" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, -] - -[package.extras] -brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - [[package]] name = "urllib3" version = "2.0.7" @@ -9336,7 +9323,6 @@ files = [ [package.dependencies] PyYAML = "*" -urllib3 = {version = "<2", markers = "platform_python_implementation == \"PyPy\" or python_version < \"3.10\""} wrapt = "*" yarl = "*" @@ -9895,4 +9881,4 @@ extended-testing = ["aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "as [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "5b2a17ed079fa4cc1776f0474a9e73a428c10bf30a22b7185d2f7a77b2d146e5" +content-hash = "dcaae2110a70843fa3cb375618bebbe16b3da9bfdbc1e471e57f144d0906f58b" From e1182a17eb6db103640b8aa6d79b9f91ef244044 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 25 Mar 2024 18:22:26 -0700 Subject: [PATCH 30/30] fmt --- .../tests/integration_tests/chat_models/test_premai.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/libs/community/tests/integration_tests/chat_models/test_premai.py b/libs/community/tests/integration_tests/chat_models/test_premai.py index 30f4ab5723760..fae9b4135fb3c 100644 --- a/libs/community/tests/integration_tests/chat_models/test_premai.py +++ b/libs/community/tests/integration_tests/chat_models/test_premai.py @@ -17,7 +17,6 @@ def chat() -> ChatPremAI: return ChatPremAI(project_id=8) -@pytest.mark.scheduled def test_chat_premai() -> None: """Test ChatPremAI wrapper.""" chat = ChatPremAI(project_id=8) @@ -27,7 +26,6 @@ def test_chat_premai() -> None: assert isinstance(response.content, str) -@pytest.mark.scheduled def test_chat_prem_system_message() -> None: """Test ChatPremAI wrapper for system message""" chat = ChatPremAI(project_id=8) @@ -38,14 +36,12 @@ def test_chat_prem_system_message() -> None: assert isinstance(response.content, str) -@pytest.mark.scheduled def test_chat_prem_model() -> None: """Test ChatPremAI wrapper handles model_name.""" chat = ChatPremAI(model="foo", project_id=8) assert chat.model == "foo" -@pytest.mark.scheduled def test_chat_prem_generate() -> None: """Test ChatPremAI wrapper with generate.""" chat = ChatPremAI(project_id=8) @@ -60,14 +56,12 @@ def test_chat_prem_generate() -> None: assert generation.text == generation.message.content -@pytest.mark.scheduled async def test_prem_invoke(chat: ChatPremAI) -> None: """Tests chat completion with invoke""" result = chat.invoke("How is the weather in New York today?") assert isinstance(result.content, str) -@pytest.mark.scheduled def test_prem_streaming() -> None: """Test streaming tokens from Prem.""" chat = ChatPremAI(project_id=8, streaming=True)