Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(participant) Wire up telemetry for user prompts VSCODE-606 #836

Merged
merged 7 commits into from
Sep 27, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 26 additions & 23 deletions src/participant/participant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import {
} from '../telemetry/telemetryService';
import { DocsChatbotAIService } from './docsChatbotAIService';
import type TelemetryService from '../telemetry/telemetryService';
import type { PromptResult } from './prompts/promptBase';
import { processStreamWithIdentifiers } from './streamParsing';
import type { PromptIntent } from './prompts/intent';

Expand Down Expand Up @@ -164,10 +165,10 @@ export default class ParticipantController {
}

async _getChatResponse({
messages,
prompt,
token,
}: {
messages: vscode.LanguageModelChatMessage[];
prompt: PromptResult;
Anemy marked this conversation as resolved.
Show resolved Hide resolved
token: vscode.CancellationToken;
}): Promise<vscode.LanguageModelChatResponse> {
const model = await getCopilotModel();
Expand All @@ -176,20 +177,22 @@ export default class ParticipantController {
throw new Error('Copilot model not found');
}

return await model.sendRequest(messages, {}, token);
this._telemetryService.trackCopilotParticipantPrompt(prompt.stats);
Anemy marked this conversation as resolved.
Show resolved Hide resolved

return await model.sendRequest(prompt.messages, {}, token);
}

async streamChatResponse({
messages,
prompt,
stream,
token,
}: {
messages: vscode.LanguageModelChatMessage[];
prompt: PromptResult;
stream: vscode.ChatResponseStream;
token: vscode.CancellationToken;
}): Promise<void> {
const chatResponse = await this._getChatResponse({
messages,
prompt,
token,
});
for await (const fragment of chatResponse.text) {
Expand Down Expand Up @@ -226,16 +229,16 @@ export default class ParticipantController {
}

async streamChatResponseContentWithCodeActions({
messages,
prompt,
stream,
token,
}: {
messages: vscode.LanguageModelChatMessage[];
prompt: PromptResult;
stream: vscode.ChatResponseStream;
token: vscode.CancellationToken;
}): Promise<void> {
const chatResponse = await this._getChatResponse({
messages,
prompt,
token,
});

Expand All @@ -254,15 +257,15 @@ export default class ParticipantController {
// This will stream all of the response content and create a string from it.
// It should only be used when the entire response is needed at one time.
async getChatResponseContent({
messages,
prompt,
token,
}: {
messages: vscode.LanguageModelChatMessage[];
prompt: PromptResult;
token: vscode.CancellationToken;
}): Promise<string> {
let responseContent = '';
const chatResponse = await this._getChatResponse({
messages,
prompt,
token,
});
for await (const fragment of chatResponse.text) {
Expand All @@ -278,14 +281,14 @@ export default class ParticipantController {
stream: vscode.ChatResponseStream,
token: vscode.CancellationToken
): Promise<ChatResult> {
const messages = await Prompts.generic.buildMessages({
const prompt = await Prompts.generic.buildMessages({
request,
context,
connectionNames: this._getConnectionNames(),
});

await this.streamChatResponseContentWithCodeActions({
messages,
prompt,
token,
stream,
});
Expand Down Expand Up @@ -334,14 +337,14 @@ export default class ParticipantController {
request: vscode.ChatRequest;
token: vscode.CancellationToken;
}): Promise<PromptIntent> {
const messages = await Prompts.intent.buildMessages({
const prompt = await Prompts.intent.buildMessages({
connectionNames: this._getConnectionNames(),
request,
context,
});

const responseContent = await this.getChatResponseContent({
messages,
prompt,
token,
});

Expand Down Expand Up @@ -708,7 +711,7 @@ export default class ParticipantController {
connectionNames: this._getConnectionNames(),
});
const responseContentWithNamespace = await this.getChatResponseContent({
messages: messagesWithNamespace,
prompt: messagesWithNamespace,
token,
});
const { databaseName, collectionName } =
Expand Down Expand Up @@ -1043,7 +1046,7 @@ export default class ParticipantController {
return schemaRequestChatResult(context.history);
}

const messages = await Prompts.schema.buildMessages({
const prompt = await Prompts.schema.buildMessages({
request,
context,
databaseName,
Expand All @@ -1054,7 +1057,7 @@ export default class ParticipantController {
...(sampleDocuments ? { sampleDocuments } : {}),
});
await this.streamChatResponse({
messages,
prompt,
stream,
token,
});
Expand Down Expand Up @@ -1147,7 +1150,7 @@ export default class ParticipantController {
);
}

const messages = await Prompts.query.buildMessages({
const prompt = await Prompts.query.buildMessages({
request,
context,
databaseName,
Expand All @@ -1158,7 +1161,7 @@ export default class ParticipantController {
});

await this.streamChatResponseContentWithCodeActions({
messages,
prompt,
stream,
token,
});
Expand Down Expand Up @@ -1230,14 +1233,14 @@ export default class ParticipantController {
]
): Promise<void> {
const [request, context, stream, token] = args;
const messages = await Prompts.generic.buildMessages({
const prompt = await Prompts.generic.buildMessages({
request,
context,
connectionNames: this._getConnectionNames(),
});

await this.streamChatResponseContentWithCodeActions({
messages,
prompt,
stream,
token,
});
Expand Down
46 changes: 41 additions & 5 deletions src/participant/prompts/promptBase.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import * as vscode from 'vscode';
import type { ChatResult, ParticipantResponseType } from '../constants';
import type { ParticipantPromptProperties } from '../../telemetry/telemetryService';

export interface PromptArgsBase {
request: {
Expand All @@ -10,14 +11,27 @@ export interface PromptArgsBase {
connectionNames: string[];
}

export interface UserPromptResponse {
prompt: string;
hasSampleDocs: boolean;
}

export interface PromptResult {
messages: vscode.LanguageModelChatMessage[];
stats: ParticipantPromptProperties;
}

export abstract class PromptBase<TArgs extends PromptArgsBase> {
protected abstract getAssistantPrompt(args: TArgs): string;

protected getUserPrompt(args: TArgs): Promise<string> {
return Promise.resolve(args.request.prompt);
protected getUserPrompt(args: TArgs): Promise<UserPromptResponse> {
return Promise.resolve({
prompt: args.request.prompt,
hasSampleDocs: false,
});
}

async buildMessages(args: TArgs): Promise<vscode.LanguageModelChatMessage[]> {
async buildMessages(args: TArgs): Promise<PromptResult> {
let historyMessages = this.getHistoryMessages(args);
// If the current user's prompt is a connection name, and the last
// message was to connect. We want to use the last
Expand Down Expand Up @@ -49,13 +63,35 @@ export abstract class PromptBase<TArgs extends PromptArgsBase> {
}
}

return [
const { prompt, hasSampleDocs } = await this.getUserPrompt(args);
const messages = [
// eslint-disable-next-line new-cap
vscode.LanguageModelChatMessage.Assistant(this.getAssistantPrompt(args)),
...historyMessages,
// eslint-disable-next-line new-cap
vscode.LanguageModelChatMessage.User(await this.getUserPrompt(args)),
vscode.LanguageModelChatMessage.User(prompt),
];

return {
messages,
stats: this.getStats(messages, args, hasSampleDocs),
};
}

protected getStats(
messages: vscode.LanguageModelChatMessage[],
{ request }: TArgs,
hasSampleDocs: boolean
): ParticipantPromptProperties {
return {
total_message_length: messages.reduce(
(acc, message) => acc + message.content.length,
0
),
user_input_length: request.prompt.length,
has_sample_documents: hasSampleDocs,
command: request.command || 'generic',
};
}

// When passing the history to the model we only want contextual messages
Expand Down
20 changes: 11 additions & 9 deletions src/participant/prompts/query.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ import * as vscode from 'vscode';
import type { Document } from 'bson';

import { getStringifiedSampleDocuments } from '../sampleDocuments';
import type { PromptArgsBase, UserPromptResponse } from './promptBase';
import { codeBlockIdentifier } from '../constants';
import type { PromptArgsBase } from './promptBase';
import { PromptBase } from './promptBase';

interface QueryPromptArgs extends PromptArgsBase {
Expand Down Expand Up @@ -59,21 +59,23 @@ db.getCollection('');\n`;
request,
schema,
sampleDocuments,
}: QueryPromptArgs): Promise<string> {
}: QueryPromptArgs): Promise<UserPromptResponse> {
let prompt = request.prompt;
prompt += `\nDatabase name: ${databaseName}\n`;
prompt += `Collection name: ${collectionName}\n`;
if (schema) {
prompt += `Collection schema: ${schema}\n`;
}
if (sampleDocuments) {
prompt += await getStringifiedSampleDocuments({
sampleDocuments,
prompt,
});
}

return prompt;
const sampleDocumentsPrompt = await getStringifiedSampleDocuments({
sampleDocuments,
prompt,
});

return {
prompt: `${prompt}${sampleDocumentsPrompt}`,
hasSampleDocs: !!sampleDocumentsPrompt,
};
}

get emptyRequestResponse(): string {
Expand Down
15 changes: 9 additions & 6 deletions src/participant/prompts/schema.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import type { UserPromptResponse } from './promptBase';
import { PromptBase, type PromptArgsBase } from './promptBase';

export const DOCUMENTS_TO_SAMPLE_FOR_SCHEMA_PROMPT = 100;
Expand All @@ -11,7 +12,6 @@ export interface SchemaPromptArgs extends PromptArgsBase {
collectionName: string;
schema: string;
amountOfDocumentsSampled: number;
connectionNames: string[];
}

export class SchemaPrompt extends PromptBase<SchemaPromptArgs> {
Expand All @@ -30,13 +30,16 @@ Amount of documents sampled: ${amountOfDocumentsSampled}.`;
collectionName,
request,
schema,
}: SchemaPromptArgs): Promise<string> {
}: SchemaPromptArgs): Promise<UserPromptResponse> {
const prompt = request.prompt;
return Promise.resolve(`${
prompt ? `The user provided additional information: "${prompt}"\n` : ''
}Database name: ${databaseName}
return Promise.resolve({
prompt: `${
prompt ? `The user provided additional information: "${prompt}"\n` : ''
}Database name: ${databaseName}
Collection name: ${collectionName}
Schema:
${schema}`);
${schema}`,
hasSampleDocs: false,
});
}
}
12 changes: 12 additions & 0 deletions src/telemetry/telemetryService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,13 @@ type ParticipantResponseFailedProperties = {
error_name: ParticipantErrorTypes;
};

export type ParticipantPromptProperties = {
command: string;
user_input_length: number;
total_message_length: number;
has_sample_documents: boolean;
Anemy marked this conversation as resolved.
Show resolved Hide resolved
};

export function chatResultFeedbackKindToTelemetryValue(
kind: vscode.ChatResultFeedbackKind
): TelemetryFeedbackKind {
Expand Down Expand Up @@ -160,6 +167,7 @@ export enum TelemetryEventTypes {
PARTICIPANT_FEEDBACK = 'Participant Feedback',
PARTICIPANT_WELCOME_SHOWN = 'Participant Welcome Shown',
PARTICIPANT_RESPONSE_FAILED = 'Participant Response Failed',
PARTICIPANT_PROMPT_SUBMITTED = 'Participant Prompt Submitted',
}

export enum ParticipantErrorTypes {
Expand Down Expand Up @@ -422,4 +430,8 @@ export default class TelemetryService {
trackCopilotParticipantFeedback(props: ParticipantFeedbackProperties): void {
this.track(TelemetryEventTypes.PARTICIPANT_FEEDBACK, props);
}

trackCopilotParticipantPrompt(stats: ParticipantPromptProperties): void {
this.track(TelemetryEventTypes.PARTICIPANT_PROMPT_SUBMITTED, stats);
}
}
Loading
Loading