Skip to content

Commit

Permalink
feat: Add LLM service to standardize LLM calls
Browse files Browse the repository at this point in the history
  • Loading branch information
pranavrajs committed Dec 8, 2024
1 parent d397f04 commit f183f75
Show file tree
Hide file tree
Showing 11 changed files with 461 additions and 16 deletions.
1 change: 0 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,5 @@ jobs:
- uses: pnpm/action-setup@v4
- name: Install pnpm dependencies
run: pnpm i

- name: Run tests
run: pnpm test
3 changes: 3 additions & 0 deletions packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,8 @@
},
"devDependencies": {
"vitest": "^2.1.8"
},
"dependencies": {
"openai": "^4.76.0"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,28 @@ export class ExecutionError extends AgentError {
super(message, 'EXECUTION_ERROR');
}
}


export class InvalidProviderError extends AgentError {
constructor(message: string) {
super(message, 'INVALID_PROVIDER_ERROR');
}
}
export class ContentParsingError extends AgentError {
constructor(message: string) {
super(message, 'CONTENT_PARSER_ERROR');
}
}


export class ProviderError extends AgentError {
constructor(message: string) {
super(message, 'PROVIDER_ERROR');
}
}

export class LLMModelError extends AgentError {
constructor(message: string) {
super(message, 'LLM_MODEL_ERROR');
}
}
61 changes: 61 additions & 0 deletions packages/core/src/llm/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# LLM

## Overview
The LLM service provides a unified interface for interacting with various Large Language Model providers (OpenAI, Anthropic, Google). Currently implements OpenAI with future support planned for other providers.

## Configuration
### LLMServiceConfig
- `provider`: Optional string, defaults to 'openai'
- Supported values: 'openai', Future: ['anthropic', 'google']
- `apiKey`: Required string
- Authentication key for the selected provider
- `defaultModel`: Optional string, defaults to 'gpt-4o'
- Model identifier to use for requests
- `logger`: Optional Console interface, defaults to global console
- Used for error logging and debugging

### Call Method
**Input Parameters:**
- `messages`: Array of chat completion messages
- `tools`: Optional array of chat completion tools

**Returns:**
- Promise<LLMResult> containing either:
- `{ content: string }` for standard responses
- `{ content: string | null, toolsCall: array }` for tool-based responses

## Response Formats
### Standard Response
```typescript
{
content: string // JSON-parsed content from LLM
}
```

### Tool Call Response
```typescript
{
content: string | null,
toolsCall: Array<{
id: string,
type: string,
function: {
name: string,
arguments: string
}
}>
}
```

## Error Handling
### Error Types
- `ContentParsingError`: JSON parsing failures
- `InvalidProviderError`: Unsupported or invalid providers
- `LLMModelError`: Invalid model specifications
- `ProviderError`: API communication errors


## Notes
- Currently only supports OpenAI
- Requires JSON response format
- Todo: Streaming support
163 changes: 163 additions & 0 deletions packages/core/src/llm/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
import OpenAI from 'openai';
import { LLMResult, LLMServiceConfig, Provider } from '../types';
import { ContentParsingError, InvalidProviderError, LLMModelError, ProviderError } from '../errors';
import { ensureError } from '../utils';


const DEFAULT_MODEL = 'gpt-4o' as const;

export class LLM {
private readonly client: OpenAI;
private readonly model: string;
private readonly logger: Console;
private readonly provider: Provider;

constructor({
provider = 'openai',
apiKey,
defaultModel = DEFAULT_MODEL,
logger = console
}: LLMServiceConfig) {
this.provider = this.validateProvider(provider);
this.client = this.initializeClient(apiKey);
this.model = this.validateModel(defaultModel);
this.logger = logger;
}

async call(
messages: OpenAI.Chat.ChatCompletionMessageParam[],
tools: OpenAI.Chat.ChatCompletionTool[] = [],
): Promise<LLMResult> {
try {
switch (this.provider) {
case 'openai':
return this.callOpenAI(messages, tools, this.model);
case 'anthropic':
throw new InvalidProviderError('Anthropic support not yet implemented');
case 'google':
throw new InvalidProviderError('Google support not yet implemented');
default:
throw new InvalidProviderError(`Unsupported provider: ${this.provider}`);
}
} catch (error) {
return this.handleError(error);
}
}

private async callOpenAI(
messages: OpenAI.Chat.ChatCompletionMessageParam[],
tools: OpenAI.Chat.ChatCompletionTool[],
model: string
): Promise<LLMResult> {
try {
const response = await this.client.chat.completions.create({
model,
messages,
response_format: { type: "json_object" },
tools: tools?.length ? tools : undefined,
});

const message = response.choices[0].message;
return this.prepareResult(message);
} catch (error) {
throw new ProviderError(
`Failed to call OpenAI API: ${ensureError(error).message}`,
);
}
}

private prepareResult(message: OpenAI.Chat.ChatCompletionMessage): LLMResult {
if (message.tool_calls) {
return this.prepareToolCallResult(message);
}
return this.prepareContentResult(message.content);
}

private prepareToolCallResult(message: OpenAI.Chat.ChatCompletionMessage): LLMResult {
return {
toolsCall: message.tool_calls,
content: message.content
};
}

private prepareContentResult(content: string | null | undefined): LLMResult {
try {
const trimmedContent = content?.trim() ?? "";
const parsed = this.parseJsonContent(trimmedContent);
return { content: parsed.content };
} catch (error) {
throw new ContentParsingError(
`Failed to prepare content result: ${ensureError(error).message}`
);
}
}

private parseJsonContent(content: string): { content: string } {
try {
return JSON.parse(content);
} catch (error) {
throw new ContentParsingError(
`Failed to parse JSON content: ${ensureError(error).message}`
);
}
}

private initializeClient(apiKey: string): OpenAI {
try {
switch (this.provider) {
case 'openai':
return new OpenAI({ apiKey });
case 'anthropic':
throw new Error('Anthropic support is not yet implemented');
case 'google':
throw new Error('Google support is not yet implemented');
default:
throw new Error(`Unsupported provider: ${this.provider}`);
}
} catch (error) {
throw new InvalidProviderError(
`Failed to initialize ${this.provider} client: ${ensureError(error).message}`
);
}
}

private validateProvider(provider: Provider): Provider {
const validProviders: Provider[] = ['openai', 'anthropic', 'google'];
if (!validProviders.includes(provider)) {
throw new InvalidProviderError(
`Invalid provider. Must be one of: ${validProviders.join(', ')}`
);
}
return provider;
}

private validateModel(model: string): string {
if (!model || typeof model !== 'string' || model.trim().length === 0) {
throw new LLMModelError('Model name must be a non-empty string');
}
return model.trim();
}

private handleError(error: unknown): LLMResult {
const normalizedError = ensureError(error);
let errorMessage: string;

if (error instanceof ContentParsingError) {
errorMessage = `Content parsing error: ${normalizedError.message}`;
} else if (error instanceof ProviderError) {
errorMessage = `Provider error: ${normalizedError.message}`;
} else if (error instanceof LLMModelError) {
errorMessage = `LLM model error: ${normalizedError.message}`;
} else {
errorMessage = `Unexpected error: ${normalizedError.message}`;
}

this.logger.error({
message: errorMessage,
timestamp: new Date().toISOString(),
stack: normalizedError.stack,
});

return { content: "An error occurred while processing your request. ${errorMessage}" };
}
}
File renamed without changes.
File renamed without changes.
21 changes: 21 additions & 0 deletions packages/core/src/types/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import OpenAI from "openai";

export type FunctionInput = {
type: string;
description: string;
required?: boolean;
};

export type Provider = 'openai' | 'anthropic' | 'google';

export interface LLMResult {
content?: string | null;
toolsCall?: OpenAI.Chat.ChatCompletionMessage['tool_calls'];
}

export interface LLMServiceConfig {
provider?: Provider;
apiKey: string;
defaultModel?: string;
logger?: Console;
}
5 changes: 0 additions & 5 deletions packages/core/types/index.ts

This file was deleted.

Loading

0 comments on commit f183f75

Please sign in to comment.