From a459a3667002f00890dd75ef5393720b647f590c Mon Sep 17 00:00:00 2001 From: colegottdank Date: Tue, 10 Dec 2024 09:42:47 -0800 Subject: [PATCH] Update LLM Cost SEO (#3032) --- bifrost/app/llm-cost/CalculatorInfo.tsx | 141 +++++++--- bifrost/app/llm-cost/ModelPriceCalculator.tsx | 266 +++--------------- .../[provider]/model/[model]/page.tsx | 12 + 3 files changed, 162 insertions(+), 257 deletions(-) diff --git a/bifrost/app/llm-cost/CalculatorInfo.tsx b/bifrost/app/llm-cost/CalculatorInfo.tsx index 6f6784fcad..6e386bbe97 100644 --- a/bifrost/app/llm-cost/CalculatorInfo.tsx +++ b/bifrost/app/llm-cost/CalculatorInfo.tsx @@ -1,9 +1,12 @@ +import { providers } from "@/packages/cost/providers/mappings"; import { Accordion, AccordionItem, AccordionTrigger, AccordionContent, } from "@radix-ui/react-accordion"; +import { ChevronRight } from "lucide-react"; +import Link from "next/link"; import React from "react"; // Function to format provider names @@ -25,6 +28,24 @@ export function formatProviderName(provider: string): string { return formattingMap[provider.toUpperCase()] || provider.toUpperCase(); } +const getParentModelInfo = (provider: string, model: string) => { + const providerData = providers.find( + (p) => p.provider.toLowerCase() === provider.toLowerCase() + ); + if (!providerData?.modelDetails) return null; + + for (const [parentModel, details] of Object.entries( + providerData.modelDetails + )) { + if (details.matches.includes(model)) { + return { + name: details.searchTerms[0], + matches: details.matches, + }; + } + } + return null; +}; // Reusable FAQ component const LLMPricingFAQ = () => { @@ -41,10 +62,15 @@ const LLMPricingFAQ = () => { - setFaq1Open(!faq1Open)}> + setFaq1Open(!faq1Open)} + > How does LLM API pricing work? { LLM API pricing typically works on a pay-per-use model, where you're charged based on the number of tokens processed. Tokens - are pieces of text, with prices varying for input (prompts) and output - (completions). Prices can differ significantly between providers and - models. To optimize costs, consider using Helicone's caching - feature, which can significantly reduce API calls and save money. - Learn more about caching at{" "} + are pieces of text, with prices varying for input (prompts) and + output (completions). Prices can differ significantly between + providers and models. To optimize costs, consider using + Helicone's caching feature, which can significantly reduce API + calls and save money. Learn more about caching at{" "} {
- setFaq2Open(!faq2Open)}> - What are the most cost-effective LLM APIs for startups? + setFaq2Open(!faq2Open)} + > + What are the most cost-effective LLM APIs for startups? { - The most cost-effective LLM API depends on your specific use case and - volume. Generally, open-source and smaller parameter models like Llama - 3 or Mistral can be more affordable for startups compared to larger - providers like OpenAI and Anthropic. Our calculator allows you to - compare prices across providers. Additionally, Helicone offers tools - to monitor costs and optimize usage, helping startups make informed - decisions and control expenses. + The most cost-effective LLM API depends on your specific use case + and volume. Generally, open-source and smaller parameter models like + Llama 3 or Mistral can be more affordable for startups compared to + larger providers like OpenAI and Anthropic. Our calculator allows + you to compare prices across providers. Additionally, Helicone + offers tools to monitor costs and optimize usage, helping startups + make informed decisions and control expenses.
- setFaq3Open(!faq3Open)}> + setFaq3Open(!faq3Open)} + > How can I reduce my LLM API costs? {
- setFaq4Open(!faq4Open)}> - What's the difference between input and output tokens in LLM API pricing? + setFaq4Open(!faq4Open)} + > + What's the difference between input and output tokens in LLM + API pricing? {
- setFaq5Open(!faq5Open)}> + setFaq5Open(!faq5Open)} + > How accurate is this LLM API pricing calculator? { d="m6 9 6 6 6-6" > - + Our LLM API pricing calculator is highly accurate and regularly updated with the latest pricing information from various providers. The same collection of LLM API pricing data is used within the Helicone platform, which supports thousands of companies in tracking - their spending for projects to ensure profitability. This requirement - for accuracy in a production environment ensures that our calculator - provides reliable estimates. For enterprise-level estimates or custom - pricing agreements, it's best to contact providers directly. + their spending for projects to ensure profitability. This + requirement for accuracy in a production environment ensures that + our calculator provides reliable estimates. For enterprise-level + estimates or custom pricing agreements, it's best to contact + providers directly.
@@ -270,8 +318,8 @@ const ContributingSection = () => ( Contribute to the open-source LLM API pricing database

- As an open-source project, we welcome contributions from the community to keep the pricing data accurate and - up-to-date. + As an open-source project, we welcome contributions from the community to + keep the pricing data accurate and up-to-date.

How to contribute:

@@ -330,6 +378,25 @@ const CalculatorInfo: React.FC = ({ model, provider }) => {
{model && provider ? ( <> + {(() => { + const parentInfo = getParentModelInfo(provider, model); + return ( + parentInfo && + parentInfo.name !== model && ( +
+ + + View {parentInfo.name} model family pricing + +
+ ) + ); + })()}

What is the {formatProviderName(provider)} {model} API Pricing @@ -690,7 +757,9 @@ const CalculatorInfo: React.FC = ({ model, provider }) => { />
-

Compare multiple models

+

+ Compare multiple models +

Use the calculator to compare costs across different models and providers to find the best fit for your needs. @@ -713,7 +782,9 @@ const CalculatorInfo: React.FC = ({ model, provider }) => { />

-

Adjust token counts

+

+ Adjust token counts +

Experiment with different input and output token counts to estimate costs for various use cases. @@ -736,7 +807,9 @@ const CalculatorInfo: React.FC = ({ model, provider }) => { />

-

Consider your usage volume

+

+ Consider your usage volume +

Remember to factor in your expected usage volume when comparing costs across different providers. diff --git a/bifrost/app/llm-cost/ModelPriceCalculator.tsx b/bifrost/app/llm-cost/ModelPriceCalculator.tsx index 0c8bf29a9f..c84e753ac6 100644 --- a/bifrost/app/llm-cost/ModelPriceCalculator.tsx +++ b/bifrost/app/llm-cost/ModelPriceCalculator.tsx @@ -40,208 +40,6 @@ type CostData = { totalCost: number; }; -// Custom MultiSelect component -const MultiSelect = ({ - label, - options, - selected, - onToggle, -}: { - label: string; - options: { value: string; label: string }[]; - selected: string[]; - onToggle: (value: string) => void; -}) => { - const [isOpen, setIsOpen] = useState(false); - - return ( -

- - {isOpen && ( -
- {options.map((option) => ( -
- onToggle(option.value)} - /> - -
- ))} -
- )} -
- ); -}; - -const ProviderPill = ({ - provider, - models, - selectedModels, - onRemoveProvider, - onAddModel, - onRemoveModel, -}: { - provider: string; - models: string[]; - selectedModels: string[]; - onRemoveProvider: (provider: string) => void; - onAddModel: (model: string) => void; - onRemoveModel: (model: string) => void; -}) => { - const [isOpen, setIsOpen] = useState(false); - - return ( -
-
- - - {isOpen && ( -
- -
- {models.map((model) => ( - - ))} -
-
-
- )} -
-
- {selectedModels - .filter((model) => models.includes(model)) - .map((model) => ( - - {model} - - - ))} -
-
- ); -}; - -const ProviderItem = ({ - provider, - models, - onAddModel, -}: { - provider: string; - models: string[]; - onAddModel: (model: string) => void; -}) => { - const [showModels, setShowModels] = useState(false); - const timeoutRef = useRef(null); - const itemRef = useRef(null); - - const handleMouseEnter = () => { - if (timeoutRef.current) clearTimeout(timeoutRef.current); - timeoutRef.current = setTimeout(() => setShowModels(true), 200); - }; - - const handleMouseLeave = () => { - if (timeoutRef.current) clearTimeout(timeoutRef.current); - timeoutRef.current = setTimeout(() => setShowModels(false), 300); - }; - - const handleClick = () => { - setShowModels(!showModels); - }; - - useEffect(() => { - return () => { - if (timeoutRef.current) clearTimeout(timeoutRef.current); - }; - }, []); - - return ( -
-
- {formatProviderName(provider)} - -
- {showModels && ( -
{ - if (timeoutRef.current) clearTimeout(timeoutRef.current); - setShowModels(true); - }} - onMouseLeave={handleMouseLeave} - > - -
- {models.map((model) => ( - - ))} -
-
-
- )} -
- ); -}; - const FilterSection = ({ providers, selectedProviders, @@ -280,13 +78,6 @@ const FilterSection = ({
- {/* */}
- data.model.toLowerCase() === - decodeURIComponent(urlModel).toLowerCase() && - data.provider.toLowerCase() === - decodeURIComponent(urlProvider).toLowerCase() + data.model.toLowerCase() === decodedModel.toLowerCase() && + data.provider.toLowerCase() === decodedProvider.toLowerCase() ); + + // If not found, check if it's a parent model + if (!selectedModel) { + const providerData = providers.find( + (p) => p.provider.toLowerCase() === decodedProvider.toLowerCase() + ); + if (providerData?.modelDetails) { + for (const [_, details] of Object.entries( + providerData.modelDetails + )) { + if ( + details.searchTerms[0].toLowerCase() === + decodedModel.toLowerCase() + ) { + // Use the first matching model's costs + const firstMatchModel = details.matches[0]; + selectedModel = costData.find( + (data) => + data.model === firstMatchModel && + data.provider.toLowerCase() === decodedProvider.toLowerCase() + ); + break; + } + } + } + } + if (selectedModel) { setSelectedModelData(selectedModel); } @@ -586,7 +406,7 @@ Optimize your AI API costs:`; maxWidth: "100px", }} /> -

+

{provider && model ? ( <> {formatProviderName(provider)}{" "} @@ -597,7 +417,7 @@ Optimize your AI API costs:`; "LLM API " )} Pricing Calculator -

+

{provider && model ? `Calculate the cost of using ${model} with Helicone's free pricing tool.` @@ -834,7 +654,7 @@ Optimize your AI API costs:`; @@ -844,7 +664,7 @@ Optimize your AI API costs:`; @@ -854,7 +674,7 @@ Optimize your AI API costs:`; @@ -864,7 +684,7 @@ Optimize your AI API costs:`; @@ -874,7 +694,7 @@ Optimize your AI API costs:`; @@ -884,7 +704,7 @@ Optimize your AI API costs:`; @@ -894,7 +714,7 @@ Optimize your AI API costs:`; diff --git a/bifrost/app/llm-cost/provider/[provider]/model/[model]/page.tsx b/bifrost/app/llm-cost/provider/[provider]/model/[model]/page.tsx index 7db1a5ec6d..55d4f02f65 100644 --- a/bifrost/app/llm-cost/provider/[provider]/model/[model]/page.tsx +++ b/bifrost/app/llm-cost/provider/[provider]/model/[model]/page.tsx @@ -32,6 +32,18 @@ export async function generateStaticParams() { model: encodeURIComponent(cost.model.value), }); } + + if (provider.modelDetails) { + for (const parentModel in provider.modelDetails) { + const searchTerms = provider.modelDetails[parentModel].searchTerms; + if (searchTerms && searchTerms.length > 0) { + paths.push({ + provider: encodeURIComponent(provider.provider.toLowerCase()), + model: encodeURIComponent(searchTerms[0]), + }); + } + } + } } return paths;