diff --git a/providers/src/anthropic/v00.00.00000/provider.yaml b/providers/src/anthropic/v00.00.00000/provider.yaml new file mode 100644 index 00000000..8ee6ace3 --- /dev/null +++ b/providers/src/anthropic/v00.00.00000/provider.yaml @@ -0,0 +1,19 @@ +id: anthropic +name: anthropic +version: v00.00.00000 +providerServices: + messages: + id: 'messages:v00.00.00000' + name: messages + preferred: true + service: + $ref: anthropic/v00.00.00000/services/messages.yaml + title: Anthropic API - Messages + version: v00.00.00000 + description: 'Given A List Of Messages Comprising A Conversation, The Model Will Return A Response.' +config: + auth: + type: custom + location: header + name: x-api-key + credentialsenvvar: "ANTHROPIC_API_KEY" \ No newline at end of file diff --git a/providers/src/anthropic/v00.00.00000/services/messages.yaml b/providers/src/anthropic/v00.00.00000/services/messages.yaml new file mode 100644 index 00000000..5f160d4d --- /dev/null +++ b/providers/src/anthropic/v00.00.00000/services/messages.yaml @@ -0,0 +1,295 @@ +openapi: 3.0.3 +info: + title: Anthropic Messages API + description: API for creating messages with Anthropic models for single queries or multi-turn conversations. + version: 1.0.0 +servers: + - url: https://api.anthropic.com/v1 + description: Anthropic API server +paths: + /messages: + post: + summary: Create a Message + description: Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation. + operationId: createMessage + tags: + - Messages + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - model + - messages + - max_tokens + properties: + model: + type: string + description: The model that will complete your prompt. + messages: + type: array + description: Input messages specifying prior conversational turns. + items: + type: object + required: + - role + - content + properties: + role: + type: string + enum: [user, assistant] + description: The role of the message sender. + content: + oneOf: + - type: string + description: A single string content block. + - type: array + items: + type: object + required: + - type + properties: + type: + type: string + enum: [text, image] + description: The type of content block. + text: + type: string + description: Text content for the message. + source: + type: object + required: + - type + - media_type + - data + properties: + type: + type: string + enum: [base64] + description: The type of source for the image. + media_type: + type: string + enum: [image/jpeg, image/png, image/gif, image/webp] + description: The media type of the image. + data: + type: string + format: byte + description: Base64-encoded image data. + max_tokens: + type: integer + description: The maximum number of tokens to generate before stopping. + metadata: + type: object + description: Metadata about the request. + stop_sequences: + type: array + items: + type: string + description: Custom sequences that will cause the model to stop generating. + stream: + type: boolean + description: Whether to incrementally stream the response using server-sent events. + system: + type: string + description: A system prompt providing context and instructions to Claude. + temperature: + type: number + format: float + minimum: 0.0 + maximum: 1.0 + default: 1.0 + description: The amount of randomness injected into the response. + tool_choice: + type: object + properties: + choice: + type: string + enum: [auto, any, specific_tool] + description: Specifies how the model should choose a tool. + tools: + type: array + items: + type: object + required: + - name + - input_schema + properties: + name: + type: string + description: The name of the tool. + description: + type: string + description: A description of the tool. + input_schema: + type: object + description: JSON schema for the tool input shape. + top_k: + type: integer + description: Only sample from the top K options for each subsequent token. + top_p: + type: number + format: float + description: Use nucleus sampling for token generation. + parameters: + - name: anthropic-beta + in: header + required: false + schema: + type: string + description: Optional header to specify beta version(s) you want to use. Use a comma-separated list for multiple versions. + - name: anthropic-version + in: header + required: true + schema: + type: string + description: The version of the Anthropic API you want to use. + # - name: x-api-key + # in: header + # required: true + # schema: + # type: string + # description: Your unique API key for authentication. + responses: + '200': + description: A successful response from the API + content: + application/json: + schema: + type: object + required: + - id + - type + - role + - content + - model + - stop_reason + - usage + properties: + id: + type: string + description: Unique object identifier. + type: + type: string + enum: [message] + default: message + description: Object type, which is always "message" for Messages API. + role: + type: string + enum: [assistant] + default: assistant + description: The conversational role of the generated message. + content: + type: array + description: The generated content blocks by the model. + items: + type: object + required: + - type + properties: + type: + type: string + enum: [text, tool_use, tool_result] + description: The type of content block. + text: + type: string + description: Text content (if type is "text"). + tool_use_id: + type: string + description: The ID of the tool use (if type is "tool_result"). + input: + type: object + description: Tool input (if type is "tool_use"). + model: + type: string + description: The model that handled the request. + stop_reason: + type: string + enum: [end_turn, max_tokens, stop_sequence, tool_use] + nullable: true + description: The reason the model stopped generating. + stop_sequence: + type: string + nullable: true + description: The stop sequence that caused the model to stop, if applicable. + usage: + type: object + description: Information about token usage and rate limits. + properties: + input_tokens: + type: integer + description: Number of tokens in the input. + output_tokens: + type: integer + description: Number of tokens in the output. + total_tokens: + type: integer + description: Total number of tokens used. + '400': + description: Invalid request parameters. + '401': + description: Unauthorized. Invalid API key. + '500': + description: Internal server error. +components: + x-stackQL-resources: + message: + id: anthropic.messages.message + name: message + title: Message + resTokens: [] + methods: + create_message: + operation: + $ref: '#/paths/~1messages/post' + operationId: createMessage + response: + mediaType: application/json + openAPIDocKey: '200' + sqlVerbs: + select: + - $ref: '#/components/x-stackQL-resources/message/methods/create_message' + insert: [] + update: [] + replace: [] + delete: [] + claude_35_chat: + name: claude_35_chat + id: anthropic.messages.claude_35_chat + config: + views: + select: + predicate: sqlDialect == "sqlite3" + ddl: |- + SELECT + model as model, + role as role, + stop_reason as stop_reason, + stop_sequence as stop_sequence, + JSON_EXTRACT(usage, '$.input_tokens') as input_tokens, + JSON_EXTRACT(usage, '$.output_tokens') as output_tokens, + JSON_EXTRACT(json_each.value, '$.text') as content + FROM + anthropic.messages.message, JSON_EACH(content) + WHERE "anthropic-version" = '2023-06-01' + AND data__model = 'claude-3-5-sonnet-20240620' + AND data__max_tokens = 1024 + AND data__messages = '[{"role": "user", "content": "one sentence summary of stackql"}]' + fallback: + predicate: sqlDialect == "postgres" + ddl: |- + SELECT + model as model, + role as role, + stop_reason as stop_reason, + stop_sequence as stop_sequence, + json_extract_path_text(usage, 'input_tokens') as input_tokens, + json_extract_path_text(usage, 'output_tokens') as output_tokens, + json_extract_path_text(json_each.value, 'text') as content + FROM + anthropic.messages.message, json_array_elements_text(content) + WHERE "anthropic-version" = '2023-06-01' + AND data__model = 'claude-3-5-sonnet-20240620' + AND data__max_tokens = 1024 + AND data__messages = '[{"role": "user", "content": "one sentence summary of stackql"}]'