diff --git a/CODEOWNERS b/CODEOWNERS index 6257ebd48e..d9e0907476 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -50,6 +50,7 @@ monitoring/opencensus @GoogleCloudPlatform/nodejs-samples-reviewers # Data & AI ai-platform @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers +generative-ai @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers automl @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers cloud-language @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers contact-center-insights @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers diff --git a/generative-ai/snippets/countTokens.js b/generative-ai/snippets/countTokens.js new file mode 100644 index 0000000000..a9f75dabbb --- /dev/null +++ b/generative-ai/snippets/countTokens.js @@ -0,0 +1,52 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { VertexAI } = require('@google-cloud/vertexai'); + +async function countTokens( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_token_count] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + // const model = 'gemini-pro'; + + // Initialize Vertex with your Cloud project and location + const vertex_ai = new VertexAI({ project: projectId, location: location }); + + // Instantiate the model + const generativeModel = vertex_ai.preview.getGenerativeModel({ + model: model, + }); + + const req = { + contents: [{ role: 'user', parts: [{ text: 'How are you doing today?' }] }], + }; + + const countTokensResp = await generativeModel.countTokens(req); + console.log('count tokens response: ', countTokensResp); + + // [END aiplatform_gemini_token_count] +} + +countTokens(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/index.js b/generative-ai/snippets/index.js new file mode 100644 index 0000000000..0449960d8e --- /dev/null +++ b/generative-ai/snippets/index.js @@ -0,0 +1,124 @@ +const { + VertexAI, + HarmBlockThreshold, + HarmCategory, +} = require('@google-cloud/vertexai'); + +const project = 'cloud-llm-preview1'; +const location = 'us-central1'; + +// Initialize Vertex with your Cloud project and location +const vertex_ai = new VertexAI({project: project, location: location}); + +// Instantiate the models +const generativeModel = vertex_ai.preview.getGenerativeModel({ + model: 'gemini-pro', + // The following parameters are optional + // They can also be passed to individual content generation requests + safety_settings: [ + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, + }, + ], + generation_config: {max_output_tokens: 256}, +}); + +const generativeVisionModel = vertex_ai.preview.getGenerativeModel({ + model: 'gemini-pro-vision', +}); + +async function streamContentTextOnly() { + const req = { + contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}], + }; + + const streamingResp = await generativeModel.generateContentStream(req); + + for await (const item of streamingResp.stream) { + console.log('stream chunk:', item); + } + + console.log('aggregated response: ', await streamingResp.response); +} + +async function nonStreamingTextOnly() { + const req = { + contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}], + }; + + const nonstreamingResp = await generativeModel.generateContent(req); + console.log('non-streaming response: ', await nonstreamingResp.response); +} + +async function countTokens() { + const req = { + contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}], + }; + + const countTokensResp = await generativeModel.countTokens(req); + console.log('count tokens response: ', countTokensResp); +} + +async function nonStreamingChat() { + const chat = generativeModel.startChat({}); + const result1 = await chat.sendMessage('hello'); + console.log('send message result1: ', result1); + const resp1 = result1.response; + console.log('send message response1: ', resp1); + const result2 = await chat.sendMessage('what day is it today?'); + console.log('result2: ', result2); + const resp2 = result2.response; + console.log('send message response2: ', resp2); + const result3 = await chat.sendMessage('what day is it tomorrow?'); + console.log('result3: ', result3); + const resp3 = result3.response; + console.log('send message response3: ', resp3); +} + +async function streamingChat() { + const chat = generativeModel.startChat({}); + const streamResult1 = await chat.sendMessageStream('hello again'); + console.log('stream result1: ', streamResult1); + const streamResp1 = await streamResult1.response; + console.log('stream send message response1: ', streamResp1); + const streamResult2 = await chat.sendMessageStream('what is the date today?'); + console.log('stream result2: ', streamResult2); + const streamResp2 = await streamResult2.response; + console.log('stream send message response2: ', streamResp2); + const streamResult3 = await chat.sendMessageStream( + 'what is the date tomorrow?' + ); + console.log('stream result3: ', streamResult3); + const streamResp3 = await streamResult3.response; + console.log('stream send message response3: ', streamResp3); +} + +async function multiPartContent() { + const filePart = { + file_data: { + file_uri: 'gs://sararob_imagegeneration_test/kitten.jpeg', + mime_type: 'image/jpeg', + }, + }; + const textPart = {text: 'What is this a picture of?'}; + + const request = { + contents: [{role: 'user', parts: [textPart, filePart]}], + }; + + const generativeVisionModel = vertex_ai.preview.getGenerativeModel({ + model: 'gemini-pro-vision', + }); + + const resp = await generativeVisionModel.generateContentStream(request); + const contentResponse = await resp.response; + console.log(contentResponse.candidates[0].content); +} + +nonStreamingTextOnly(); +streamContentTextOnly(); +countTokens(); +nonStreamingChat(); +streamingChat(); +multiPartContent(); diff --git a/generative-ai/snippets/nonStreamingChat.js b/generative-ai/snippets/nonStreamingChat.js new file mode 100644 index 0000000000..f64503f5e8 --- /dev/null +++ b/generative-ai/snippets/nonStreamingChat.js @@ -0,0 +1,73 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +function wait(time) { + return new Promise(resolve => { + setTimeout(resolve, time); + }); +} + +async function createNonStreamingChat( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // TODO: Find better method. Setting delay to give api time to respond, otherwise it will 404 + // await wait(10); + + // [START aiplatform_gemini_multiturn_chat] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const chat = generativeModel.startChat({}); + + const chatInput1 = 'Hello'; + console.log(`User: ${chatInput1}`); + + const result1 = await chat.sendMessage(chatInput1); + const response1 = result1.response.candidates[0].content.parts[0].text; + console.log('Chat bot: ', response1); + + const chatInput2 = 'Can you tell me a scientific fun fact?'; + console.log(`User: ${chatInput2}`); + const result2 = await chat.sendMessage(chatInput2); + const response2 = result2.response.candidates[0].content.parts[0].text; + console.log('Chat bot: ', response2); + + const chatInput3 = 'How can I learn more about that?'; + console.log(`User: ${chatInput3}`); + const result3 = await chat.sendMessage(chatInput3); + const response3 = result3.response.candidates[0].content.parts[0].text; + console.log('Chat bot: ', response3); + + // [END aiplatform_gemini_multiturn_chat] +} + +createNonStreamingChat(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); \ No newline at end of file diff --git a/generative-ai/snippets/nonStreamingContent.js b/generative-ai/snippets/nonStreamingContent.js new file mode 100644 index 0000000000..c1b5ff9ea8 --- /dev/null +++ b/generative-ai/snippets/nonStreamingContent.js @@ -0,0 +1,64 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createNonStreamingContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_function_calling] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const request = { + contents: [{role: 'user', parts: [{text: 'What is Node.js?'}]}], + }; + + console.log('Prompt:'); + console.log(request.contents[0].parts[0].text); + console.log('Non-Streaming Response Text:'); + + // Create the response stream + const responseStream = await generativeModel.generateContentStream(request); + + // Wait for the response stream to complete + const aggregatedResponse = await responseStream.response; + + // Select the text from the response + const fullTextResponse = + aggregatedResponse.candidates[0].content.parts[0].text; + + console.log(fullTextResponse); + + // [END aiplatform_gemini_function_calling] +} + +createNonStreamingContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/nonStreamingMultipartContent.js b/generative-ai/snippets/nonStreamingMultipartContent.js new file mode 100644 index 0000000000..6e26c6ed7d --- /dev/null +++ b/generative-ai/snippets/nonStreamingMultipartContent.js @@ -0,0 +1,81 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createNonStreamingMultipartContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL', + image = 'gs://generativeai-downloads/images/scones.jpg', + mimeType = 'image/jpeg' +) { + // [START aiplatform_gemini_get_started] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + // const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image + // const mimeType = 'image/jpeg'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + // For images, the SDK supports both Google Cloud Storage URI and base64 strings + const filePart = { + file_data: { + file_uri: image, + mime_type: mimeType, + }, + }; + + const textPart = { + text: 'Use several paragraphs to describe what is happening in this picture.', + }; + + const request = { + contents: [{role: 'user', parts: [textPart, filePart]}], + }; + + console.log('Prompt Text:'); + console.log(request.contents[0].parts[0].text); + console.log('Non-Streaming Response Text:'); + + // Create the response stream + const responseStream = + await generativeVisionModel.generateContentStream(request); + + // Wait for the response stream to complete + const aggregatedResponse = await responseStream.response; + + // Select the text from the response + const fullTextResponse = + aggregatedResponse.candidates[0].content.parts[0].text; + + console.log(fullTextResponse); + + // [END aiplatform_gemini_get_started] +} + +createNonStreamingMultipartContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/package.json b/generative-ai/snippets/package.json index b1c9614f6c..765acfa217 100644 --- a/generative-ai/snippets/package.json +++ b/generative-ai/snippets/package.json @@ -10,16 +10,18 @@ "*.js" ], "scripts": { - "test": "c8 mocha -p -j 2 --timeout 2400000 test/*.js" + "test": "c8 mocha -p -j 2 --timeout 2400000 test/*.test.js" }, "dependencies": { - "@google-cloud/aiplatform": "^3.0.0" + "@google-cloud/aiplatform": "^3.0.0", + "@google-cloud/vertexai": "github:googleapis/nodejs-vertexai", + "supertest": "^6.3.3" }, "devDependencies": { "c8": "^8.0.0", "chai": "^4.2.0", "mocha": "^10.0.0", - "uuid": "^9.0.0", - "sinon": "^16.0.0" + "sinon": "^16.0.0", + "uuid": "^9.0.0" } } diff --git a/generative-ai/snippets/safetySettings.js b/generative-ai/snippets/safetySettings.js new file mode 100644 index 0000000000..92c6dad687 --- /dev/null +++ b/generative-ai/snippets/safetySettings.js @@ -0,0 +1,70 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { VertexAI, HarmCategory, HarmBlockThreshold } = require('@google-cloud/vertexai'); + +async function createStreamContent( +) { + // [START aiplatform_gemini_safety_settings] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + const projectId = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro' + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({ project: projectId, location: location }); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + // The following parameters are optional + // They can also be passed to individual content generation requests + safety_settings: [ + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + }, + ], + generation_config: {max_output_tokens: 256}, + }); + + const request = { + contents: [{ role: 'user', parts: [{ text: 'Tell me something dangerous.' }] }], + }; + + console.log('Prompt:'); + console.log(request.contents[0].parts[0].text); + console.log('Streaming Response Text:'); + + // Create the response stream + const responseStream = await generativeModel.generateContentStream(request); + + // Log the text response as it streams + for await (const item of responseStream.stream) { + if (item.candidates[0].finishReason === 'SAFETY') { + console.log('This response stream terminated due to safety concerns.') + } else { + process.stdout.write(item.candidates[0].content.parts[0].text); + } + } + // [END aiplatform_gemini_safety_settings] +} + + +createStreamContent(...process.argv.slice(3)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/sendMultiModalPromptWithImage.js b/generative-ai/snippets/sendMultiModalPromptWithImage.js new file mode 100644 index 0000000000..f777c68ec0 --- /dev/null +++ b/generative-ai/snippets/sendMultiModalPromptWithImage.js @@ -0,0 +1,32 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function sendMultiModalPromptWithImage( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_single_turn_multi_image] + + + + // [END aiplatform_gemini_single_turn_multi_image] +} + +sendMultiModalPromptWithImage(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); \ No newline at end of file diff --git a/generative-ai/snippets/sendMultiModalPromptWithVideo.js b/generative-ai/snippets/sendMultiModalPromptWithVideo.js new file mode 100644 index 0000000000..391a1fea9c --- /dev/null +++ b/generative-ai/snippets/sendMultiModalPromptWithVideo.js @@ -0,0 +1,32 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function sendMultiModalPromptWithImage( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_single_turn_video] + + + + // [END aiplatform_gemini_single_turn_video] +} + +sendMultiModalPromptWithImage(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); \ No newline at end of file diff --git a/generative-ai/snippets/streamChat.js b/generative-ai/snippets/streamChat.js new file mode 100644 index 0000000000..1d02257996 --- /dev/null +++ b/generative-ai/snippets/streamChat.js @@ -0,0 +1,53 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createStreamChat( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_multiturn_chat] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const chat = generativeModel.startChat({}); + + const chatInput1 = 'How can I learn more about that?'; + console.log(`User: ${chatInput1}`); + const result1 = await chat.sendMessageStream(chatInput1); + for await (const item of result1.stream) { + console.log(item.candidates[0].content.parts[0].text); + } + + // [END aiplatform_gemini_multiturn_chat] +} + +createStreamChat(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/streamContent.js b/generative-ai/snippets/streamContent.js new file mode 100644 index 0000000000..17c71ff87b --- /dev/null +++ b/generative-ai/snippets/streamContent.js @@ -0,0 +1,60 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createStreamContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_function_calling] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const request = { + contents: [{role: 'user', parts: [{text: 'What is Node.js?'}]}], + }; + + console.log('Prompt:'); + console.log(request.contents[0].parts[0].text); + console.log('Streaming Response Text:'); + + // Create the response stream + const responseStream = await generativeModel.generateContentStream(request); + + // Log the text response as it streams + for await (const item of responseStream.stream) { + process.stdout.write(item.candidates[0].content.parts[0].text); + } + + // [END aiplatform_gemini_function_calling] +} + +createStreamContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/streamMultipartContent.js b/generative-ai/snippets/streamMultipartContent.js new file mode 100644 index 0000000000..f75f414dc5 --- /dev/null +++ b/generative-ai/snippets/streamMultipartContent.js @@ -0,0 +1,77 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createStreamMultipartContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL', + image = 'gs://generativeai-downloads/images/scones.jpg', + mimeType = 'image/jpeg' +) { + // [START aiplatform_gemini_get_started] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + // const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image + // const mimeType = 'image/jpeg'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + // For images, the SDK supports both Google Cloud Storage URI and base64 strings + const filePart = { + file_data: { + file_uri: image, + mime_type: mimeType, + }, + }; + + const textPart = { + text: 'Use several paragraphs to describe what is happening in this picture.', + }; + + const request = { + contents: [{role: 'user', parts: [textPart, filePart]}], + }; + + console.log('Prompt Text:'); + console.log(request.contents[0].parts[0].text); + console.log('Streaming Response Text:'); + + // Create the response stream + const responseStream = + await generativeVisionModel.generateContentStream(request); + + // Log the text response as it streams + for await (const item of responseStream.stream) { + process.stdout.write(item.candidates[0].content.parts[0].text); + } + + // [END aiplatform_gemini_get_started] +} + +createStreamMultipartContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/test/countTokens.test.js b/generative-ai/snippets/test/countTokens.test.js new file mode 100644 index 0000000000..df15c3cd3c --- /dev/null +++ b/generative-ai/snippets/test/countTokens.test.js @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Count tokens', async () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should count tokens', async () => { + const output = execSync( + `node ./countTokens.js ${project} ${location} ${model}` + ); + + // Expect 6 tokens + assert(output.match('totalTokens: 6')); + }); +}); diff --git a/generative-ai/snippets/test/nonStreamingChat.test.js b/generative-ai/snippets/test/nonStreamingChat.test.js new file mode 100644 index 0000000000..b127e33286 --- /dev/null +++ b/generative-ai/snippets/test/nonStreamingChat.test.js @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI NonStreaming Chat', async () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create nonstreaming chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingChat.js ${project} ${location} ${model}` + ); + // Split up conversation output + const conversation = output.split('\n'); + + // Ensure that the beginning of the conversation is consistent + assert(conversation[0].match(/User: Hello/)); + assert(conversation[1].match(/Chat bot: Hello! How may I assist you?/)); + assert(conversation[2].match(/User: Can you tell me a scientific fun fact?/)); + assert(conversation[3].match(/Chat bot: Sure, here's a scientific fun fact for you:?/)); + assert(conversation[4] === ''); + + // Assert that user prompts are getting through + assert(output.match(/User: How can I learn more about that?/)); + }); +}); diff --git a/generative-ai/snippets/test/nonStreamingContent.test.js b/generative-ai/snippets/test/nonStreamingContent.test.js new file mode 100644 index 0000000000..2c7ead4e3a --- /dev/null +++ b/generative-ai/snippets/test/nonStreamingContent.test.js @@ -0,0 +1,42 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI NonStreaming Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create nonstreaming content and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingContent.js ${project} ${location} ${model}` + ); + // Split up conversation output + const conversation = output.split('\n'); + + // Ensure that the beginning of the conversation is consistent + assert(conversation[0].match(/Prompt:/)); + assert(conversation[1].match(/What is Node.js?/)); + assert(conversation[2].match(/Non-Streaming Response Text:/)); + assert(conversation[3].match(/Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. It is designed to build scalable network applications. Node.js runs JavaScript code outside of a browser./)); + assert(conversation[5].match(/Here are some key features of Node.js:/)); + }); +}); diff --git a/generative-ai/snippets/test/nonStreamingMultipartContent.test.js b/generative-ai/snippets/test/nonStreamingMultipartContent.test.js new file mode 100644 index 0000000000..af83187c70 --- /dev/null +++ b/generative-ai/snippets/test/nonStreamingMultipartContent.test.js @@ -0,0 +1,42 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI NonStreaming Multipart Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro-vision'; + const image = 'gs://generativeai-downloads/images/scones.jpg'; + + it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingMultipartContent.js ${project} ${location} ${model} ${image}` + ); + // Split up conversation output + const conversation = output.split('\n'); + + // Ensure that the conversation is what we expect for this scone image + assert(conversation[0].match(/Prompt Text:/)); + assert(conversation[1].match(/Use several paragraphs to describe what is happening in this picture./)); + assert(conversation[2].match(/Non-Streaming Response Text:/)); + assert(conversation[3].match(/There are several blueberry scones on a table. They are arranged on a white surface that is covered in blue stains. There is a bowl of blueberries next to the scones. There is a cup of coffee on the table. There are pink flowers on the table. The scones are round and have a crumbly texture. They are topped with blueberries and sugar. The coffee is hot and steaming. The flowers are in bloom and have a sweet fragrance. The table is made of wood and has a rustic appearance. The overall effect of the image is one of beauty and tranquility./)); + }); +}); diff --git a/generative-ai/snippets/test/safetySettings.test.js b/generative-ai/snippets/test/safetySettings.test.js new file mode 100644 index 0000000000..6cf7293395 --- /dev/null +++ b/generative-ai/snippets/test/safetySettings.test.js @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Safety settings', async () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should reject a dangerous request', async () => { + const output = execSync( + `node ./safetySettings.js ${project} ${location} ${model}` + ); + + // Expect rejection due to safety concerns + assert(output.match('This response stream terminated due to safety concerns')); + }); +}); diff --git a/generative-ai/snippets/test/streamChat.test.js b/generative-ai/snippets/test/streamChat.test.js new file mode 100644 index 0000000000..26e5c27a9b --- /dev/null +++ b/generative-ai/snippets/test/streamChat.test.js @@ -0,0 +1,42 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Stream Chat', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create stream chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./streamChat.js ${project} ${location} ${model}` + ); + + // Assert that the advice given for learning is what we expect + assert(output.match(/User: How can I learn more about that?/)); + assert(output.match(/**Read books, articles, and reports:**/)); + assert(output.match(/**Attend seminars, workshops, and conferences:**/)); + assert(output.match(/**Take online courses or tutorials:**/)); + assert(output.match(/**Watch documentaries and videos:**/)); + assert(output.match(/**Interview experts:**/)); + assert(output.match(/**Do your own research:**/)); + }); +}); diff --git a/generative-ai/snippets/test/streamContent.test.js b/generative-ai/snippets/test/streamContent.test.js new file mode 100644 index 0000000000..c9c78e50d8 --- /dev/null +++ b/generative-ai/snippets/test/streamContent.test.js @@ -0,0 +1,42 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Stream Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create stream content', async () => { + const output = execSync( + `node ./streamContent.js ${project} ${location} ${model}` + ); + // Split up conversation output + const conversation = output.split('\n'); +; + // Ensure that the beginning of the conversation is consistent + assert(conversation[0].match(/Prompt:/)); + assert(conversation[1].match(/What is Node.js?/)); + assert(conversation[2].match(/Streaming Response Text:/)); + assert(conversation[3].match(/Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. It is designed to build scalable network applications. Node.js runs JavaScript code outside of a browser./)); + assert(conversation[5].match(/Here are some key features of Node.js:/)); + }); +}); diff --git a/generative-ai/snippets/test/streamMultipartContent.test.js b/generative-ai/snippets/test/streamMultipartContent.test.js new file mode 100644 index 0000000000..939ae2fe8d --- /dev/null +++ b/generative-ai/snippets/test/streamMultipartContent.test.js @@ -0,0 +1,42 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Stream Multipart Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro-vision'; + const image = 'gs://generativeai-downloads/images/scones.jpg'; + + it('should create stream multipart content', async () => { + const output = execSync( + `node ./streamMultipartContent.js ${project} ${location} ${model} ${image}` + ); + // Split up conversation output + const conversation = output.split('\n'); + + // Ensure that the conversation is what we expect for this scone image + assert(conversation[0].match(/Prompt Text:/)); + assert(conversation[1].match(/Use several paragraphs to describe what is happening in this picture./)); + assert(conversation[2].match(/Streaming Response Text:/)); + assert(conversation[3].match(/scones/)); + }); +});