diff --git a/js/plugins/googleai/src/context-caching/helpers.ts b/js/plugins/googleai/src/context-caching/helpers.ts index 543c75b37..e7c4bb159 100644 --- a/js/plugins/googleai/src/context-caching/helpers.ts +++ b/js/plugins/googleai/src/context-caching/helpers.ts @@ -41,8 +41,6 @@ export function getContentForCache( throw new Error('No history provided for context caching'); } - logger.info('model version', modelVersion); - const cachedContent: CachedContent = { model: modelVersion, contents: [], @@ -55,10 +53,7 @@ export function getContentForCache( // We split history into two parts: the part that should be cached and the part that should not const slicedHistory = chatRequest.history.slice(0, endOfCachedContents); - logger.info( - 'last of cached contents', - JSON.stringify(slicedHistory.map((m) => m.role)) - ); + cachedContent.contents = slicedHistory; let newHistory; @@ -70,8 +65,6 @@ export function getContentForCache( } chatRequest.history = newHistory; - logger.info('new history', JSON.stringify(newHistory.map((m) => m.role))); - if (request.config?.contextCache?.context) { cachedContent.systemInstruction = toGeminiSystemInstruction({ role: 'system', @@ -140,7 +133,7 @@ export function validateContextCacheRequest( ): boolean { // Check if contextCache is requested in the config if (!request.config?.contextCache) { - logger.info('Context caching is not requested'); + logger.debug('Context caching is not requested'); return false; } @@ -183,13 +176,13 @@ export function validateContextCacheRequest( }); } - logger.info('Context caching is valid for this request'); + logger.debug('Context caching is valid for this request'); // If all checks pass, content should be cached return true; } /** - * Clears ALL + * Utility to clear ALL Caches * @param cacheManager * @param maxPages * @param pageSize diff --git a/js/plugins/googleai/src/context-caching/index.ts b/js/plugins/googleai/src/context-caching/index.ts index 4321e014a..0a68fa9fd 100644 --- a/js/plugins/googleai/src/context-caching/index.ts +++ b/js/plugins/googleai/src/context-caching/index.ts @@ -38,7 +38,6 @@ export async function handleContextCache( chatRequest: StartChatParams, modelVersion: string ): Promise<{ cache: CachedContent; newChatRequest: StartChatParams }> { - logger.info('Using context cache feature'); const cacheManager = new GoogleAICacheManager(apiKey); const { cachedContent, chatRequest: newChatRequest } = getContentForCache( @@ -46,21 +45,19 @@ export async function handleContextCache( chatRequest, modelVersion ); - - logger.info('Cached content:', cachedContent); cachedContent.model = modelVersion; const cacheKey = generateCacheKey(cachedContent); cachedContent.displayName = cacheKey; - logger.info(`Generated cache key: ${cacheKey}`); + logger.debug(`Generated cache key: ${cacheKey}`); let cache = await lookupContextCache(cacheManager, cacheKey); - logger.info(`Found cache: ${cache ? 'true' : 'false'}`); + logger.debug(`Cache hit: ${cache ? 'true' : 'false'}`); if (!cache) { try { logger.debug('No cache found, creating one.'); cache = await cacheManager.create(cachedContent); - logger.info(`Created new cache entry with key: ${cacheKey}`); + logger.debug(`Created new cache entry with key: ${cacheKey}`); } catch (cacheError) { throw new GenkitError({ status: 'INTERNAL', diff --git a/js/plugins/googleai/src/gemini.ts b/js/plugins/googleai/src/gemini.ts index 6a405cac0..954fc086c 100644 --- a/js/plugins/googleai/src/gemini.ts +++ b/js/plugins/googleai/src/gemini.ts @@ -580,8 +580,6 @@ export function googleAIModel( return fromGeminiCandidate(candidate, jsonMode); }; - logger.info('request config', request.config); - let chatRequest = { systemInstruction, generationConfig, @@ -618,7 +616,7 @@ export function googleAIModel( const client = new GoogleGenerativeAI(apiKey!); if (cache) { - logger.info('Using cached content'); + logger.debug('Using Context Cache'); genModel = client.getGenerativeModelFromCachedContent(cache, options); } else { genModel = client.getGenerativeModel( @@ -628,7 +626,6 @@ export function googleAIModel( options ); } - logger.info('created generative model client'); if (streamingCallback) { const result = await genModel @@ -653,7 +650,6 @@ export function googleAIModel( custom: response, }; } else { - logger.info(chatRequest!.history![0].role); const result = await genModel .startChat(chatRequest) .sendMessage(msg.parts, options);