Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes #149: Add Temperature and Max Tokens Configuration #176

Open
wants to merge 8 commits into
base: staging
Choose a base branch
from
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
feat: add model temperature and max tokens config (#149)
ahmad2b committed Nov 4, 2024
commit e1b358b104ab56da7105092f00c1d56718f65cb1
3 changes: 3 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
@@ -45,6 +45,8 @@
"@radix-ui/react-hover-card": "^1.1.2",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-navigation-menu": "^1.2.1",
"@radix-ui/react-popover": "^1.1.2",
"@radix-ui/react-progress": "^1.1.0",
"@radix-ui/react-select": "^2.1.1",
"@radix-ui/react-slider": "^1.2.1",
@@ -60,6 +62,7 @@
"@vercel/kv": "^2.0.0",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "1.0.0",
"date-fns": "^4.1.0",
"dotenv": "^16.4.5",
"eslint-plugin-unused-imports": "^4.1.4",
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/customAction.ts
Original file line number Diff line number Diff line change
@@ -39,11 +39,13 @@ export const customAction = async (
throw new Error("No custom quick action ID found.");
}

const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
16 changes: 9 additions & 7 deletions src/agent/open-canvas/nodes/generateArtifact.ts
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";
import { NEW_ARTIFACT_PROMPT } from "../prompts";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { initChatModel } from "langchain/chat_models/universal";
import { z } from "zod";
import {
ArtifactCodeV3,
ArtifactMarkdownV3,
ArtifactV3,
PROGRAMMING_LANGUAGES,
Reflections,
} from "../../../types";
import { z } from "zod";
import {
ensureStoreInConfig,
formatReflections,
getModelNameAndProviderFromConfig,
} from "../../utils";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { initChatModel } from "langchain/chat_models/universal";
import { NEW_ARTIFACT_PROMPT } from "../prompts";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";

/**
* Generate a new artifact based on the user's query.
@@ -23,11 +23,13 @@ export const generateArtifact = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/replyToGeneralInput.ts
Original file line number Diff line number Diff line change
@@ -18,11 +18,13 @@ export const replyToGeneralInput = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const prompt = `You are an AI assistant tasked with responding to the users question.
42 changes: 22 additions & 20 deletions src/agent/open-canvas/nodes/rewriteArtifact.ts
Original file line number Diff line number Diff line change
@@ -1,30 +1,30 @@
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";
import {
GET_TITLE_TYPE_REWRITE_ARTIFACT,
OPTIONALLY_UPDATE_META_PROMPT,
UPDATE_ENTIRE_ARTIFACT_PROMPT,
} from "../prompts";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { initChatModel } from "langchain/chat_models/universal";
import { z } from "zod";
import { getArtifactContent } from "../../../contexts/utils";
import {
ensureStoreInConfig,
formatArtifactContent,
formatReflections,
getModelNameAndProviderFromConfig,
} from "../../utils";
isArtifactCodeContent,
isArtifactMarkdownContent,
} from "../../../lib/artifact_content_types";
import {
ArtifactCodeV3,
ArtifactMarkdownV3,
ArtifactV3,
PROGRAMMING_LANGUAGES,
Reflections,
} from "../../../types";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { z } from "zod";
import { getArtifactContent } from "../../../contexts/utils";
import {
isArtifactCodeContent,
isArtifactMarkdownContent,
} from "../../../lib/artifact_content_types";
import { initChatModel } from "langchain/chat_models/universal";
ensureStoreInConfig,
formatArtifactContent,
formatReflections,
getModelNameAndProviderFromConfig,
} from "../../utils";
import {
GET_TITLE_TYPE_REWRITE_ARTIFACT,
OPTIONALLY_UPDATE_META_PROMPT,
UPDATE_ENTIRE_ARTIFACT_PROMPT,
} from "../prompts";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";

export const rewriteArtifact = async (
state: typeof OpenCanvasGraphAnnotation.State,
@@ -51,12 +51,14 @@ export const rewriteArtifact = async (
"The language of the code artifact. This should be populated with the programming language if the user is requesting code to be written, or 'other', in all other cases."
),
});
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const toolCallingModel = (
await initChatModel(modelName, {
temperature: 0,
modelProvider,
temperature: 0,
// temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
})
)
.bindTools(
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/rewriteArtifactTheme.ts
Original file line number Diff line number Diff line change
@@ -21,11 +21,13 @@ export const rewriteArtifactTheme = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/rewriteCodeArtifactTheme.ts
Original file line number Diff line number Diff line change
@@ -16,11 +16,13 @@ export const rewriteCodeArtifactTheme = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const currentArtifactContent = state.artifact
17 changes: 12 additions & 5 deletions src/agent/open-canvas/nodes/updateArtifact.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import { ChatOpenAI } from "@langchain/openai";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";
import { UPDATE_HIGHLIGHTED_ARTIFACT_PROMPT } from "../prompts";
import { ensureStoreInConfig, formatReflections } from "../../utils";
import { ArtifactCodeV3, ArtifactV3, Reflections } from "../../../types";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
import { getArtifactContent } from "../../../contexts/utils";
import { isArtifactCodeContent } from "../../../lib/artifact_content_types";
import { ArtifactCodeV3, ArtifactV3, Reflections } from "../../../types";
import {
ensureStoreInConfig,
formatReflections,
getModelNameAndProviderFromConfig,
} from "../../utils";
import { UPDATE_HIGHLIGHTED_ARTIFACT_PROMPT } from "../prompts";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";

/**
* Update an existing artifact based on the user's query.
@@ -14,9 +18,12 @@ export const updateArtifact = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelConfig } = getModelNameAndProviderFromConfig(config);
const smallModel = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
// temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
12 changes: 9 additions & 3 deletions src/agent/open-canvas/nodes/updateHighlightedText.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import { getModelNameAndProviderFromConfig } from "@/agent/utils";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";
import { ArtifactMarkdownV3 } from "../../../types";
import { getArtifactContent } from "../../../contexts/utils";
import { isArtifactMarkdownContent } from "../../../lib/artifact_content_types";
import { ArtifactMarkdownV3 } from "../../../types";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";

const PROMPT = `You are an expert AI writing assistant, tasked with rewriting some text a user has selected. The selected text is nested inside a larger 'block'. You should always respond with ONLY the updated text block in accordance with the user's request.
You should always respond with the full markdown text block, as it will simply replace the existing block in the artifact.
@@ -27,11 +29,15 @@ Ensure you reply with the FULL text block, including the updated selected text.
* Update an existing artifact based on the user's query.
*/
export const updateHighlightedText = async (
state: typeof OpenCanvasGraphAnnotation.State
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelConfig } = getModelNameAndProviderFromConfig(config);
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
// temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
}).withConfig({ runName: "update_highlighted_markdown" });

const currentArtifactContent = state.artifact
17 changes: 16 additions & 1 deletion src/agent/utils.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { isArtifactCodeContent } from "@/lib/artifact_content_types";
import { CustomModelConfig } from "@/types";
import { BaseStore, LangGraphRunnableConfig } from "@langchain/langgraph";
import { ArtifactCodeV3, ArtifactMarkdownV3, Reflections } from "../types";

@@ -114,33 +115,47 @@ export const formatArtifactContentWithTemplate = (

export const getModelNameAndProviderFromConfig = (
config: LangGraphRunnableConfig
): { modelName: string; modelProvider: string } => {
): {
modelName: string;
modelProvider: string;
modelConfig: CustomModelConfig;
} => {
const customModelName = config.configurable?.customModelName as string;
if (!customModelName) {
throw new Error("Model name is missing in config.");
}

const modelConfig = config.configurable?.modelConfig as CustomModelConfig;
if (!modelConfig) {
throw new Error("Custom Model config is missing in config.");
}

if (customModelName.includes("gpt-")) {
return {
modelName: customModelName,
modelProvider: "openai",
modelConfig,
};
}
if (customModelName.includes("claude-")) {
return {
modelName: customModelName,
modelProvider: "anthropic",
modelConfig,
};
}
if (customModelName.includes("fireworks/")) {
return {
modelName: customModelName,
modelProvider: "fireworks",
modelConfig,
};
}
if (customModelName.includes("gemini-")) {
return {
modelName: customModelName,
modelProvider: "google-genai",
modelConfig,
};
}

14 changes: 9 additions & 5 deletions src/components/canvas/canvas.tsx
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
"use client";

import { ArtifactRenderer } from "@/components/artifacts/ArtifactRenderer";
import { ContentComposerChatInterface } from "./content-composer";
import { ALL_MODEL_NAMES } from "@/constants";
import { useGraphContext } from "@/contexts/GraphContext";
import { useToast } from "@/hooks/use-toast";
import { getLanguageTemplate } from "@/lib/get_language_template";
import { cn } from "@/lib/utils";
import {
ArtifactCodeV3,
ArtifactMarkdownV3,
ArtifactV3,
CustomModelConfig,
ProgrammingLanguageOptions,
} from "@/types";
import { useEffect, useState } from "react";
import { useGraphContext } from "@/contexts/GraphContext";
import React from "react";
import React, { useEffect, useState } from "react";
import { ContentComposerChatInterface } from "./content-composer";

export function CanvasComponent() {
const { threadData, graphData, userData } = useGraphContext();
const { user } = userData;
const { threadId, clearThreadsWithNoValues, setModelName } = threadData;
const { threadId, clearThreadsWithNoValues, setModelName, setModelConfig } =
threadData;
const { setArtifact } = graphData;
const { toast } = useToast();
const [chatStarted, setChatStarted] = useState(false);
@@ -91,6 +92,9 @@ export function CanvasComponent() {
setModelName(
thread?.metadata?.customModelName as ALL_MODEL_NAMES
);
setModelConfig(
thread?.metadata?.modelConfig as CustomModelConfig
);
} else {
setChatStarted(false);
}
Loading