Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: [Many APIs] fix typings for IAM methods #4459

Merged
merged 5 commits into from
Jul 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions packages/google-ai-generativelanguage/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -44,29 +44,30 @@
"prelint": "cd samples; npm link ../; npm i"
},
"dependencies": {
"google-gax": "^3.5.8"
"google-gax": "^4.0.3"
},
"devDependencies": {
"@types/mocha": "^9.0.0",
"@types/node": "^18.0.0",
"@types/node": "^20.4.5",
"@types/sinon": "^10.0.0",
"c8": "^7.3.5",
"gts": "^3.1.0",
"gapic-tools": "^0.1.8",
"gts": "^5.0.0",
"jsdoc": "^4.0.0",
"jsdoc-fresh": "^2.0.0",
"jsdoc-region-tag": "^2.0.0",
"long": "^5.2.3",
"linkinator": "4.1.2",
"long": "^5.2.3",
"mocha": "^9.2.2",
"null-loader": "^4.0.1",
"pack-n-play": "^1.0.0-2",
"sinon": "^15.0.0",
"ts-loader": "^9.0.0",
"typescript": "^4.6.4",
"typescript": "^5.1.6",
"webpack": "^5.9.0",
"webpack-cli": "^5.0.0"
},
"engines": {
"node": ">=12.0.0"
"node": ">=14.0.0"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,319 @@
{
"clientLibrary": {
"name": "nodejs-generativelanguage",
"version": "0.2.1",
"language": "TYPESCRIPT",
"apis": [
{
"id": "google.ai.generativelanguage.v1beta2",
"version": "v1beta2"
}
]
},
"snippets": [
{
"regionTag": "generativelanguage_v1beta2_generated_DiscussService_GenerateMessage_async",
"title": "DiscussService generateMessage Sample",
"origin": "API_DEFINITION",
"description": " Generates a response from the model given an input `MessagePrompt`.",
"canonical": true,
"file": "discuss_service.generate_message.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 25,
"end": 90,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "GenerateMessage",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussService.GenerateMessage",
"async": true,
"parameters": [
{
"name": "model",
"type": "TYPE_STRING"
},
{
"name": "prompt",
"type": ".google.ai.generativelanguage.v1beta2.MessagePrompt"
},
{
"name": "temperature",
"type": "TYPE_FLOAT"
},
{
"name": "candidate_count",
"type": "TYPE_INT32"
},
{
"name": "top_p",
"type": "TYPE_FLOAT"
},
{
"name": "top_k",
"type": "TYPE_INT32"
}
],
"resultType": ".google.ai.generativelanguage.v1beta2.GenerateMessageResponse",
"client": {
"shortName": "DiscussServiceClient",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussServiceClient"
},
"method": {
"shortName": "GenerateMessage",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussService.GenerateMessage",
"service": {
"shortName": "DiscussService",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussService"
}
}
}
},
{
"regionTag": "generativelanguage_v1beta2_generated_DiscussService_CountMessageTokens_async",
"title": "DiscussService countMessageTokens Sample",
"origin": "API_DEFINITION",
"description": " Runs a model's tokenizer on a string and returns the token count.",
"canonical": true,
"file": "discuss_service.count_message_tokens.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 25,
"end": 61,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "CountMessageTokens",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussService.CountMessageTokens",
"async": true,
"parameters": [
{
"name": "model",
"type": "TYPE_STRING"
},
{
"name": "prompt",
"type": ".google.ai.generativelanguage.v1beta2.MessagePrompt"
}
],
"resultType": ".google.ai.generativelanguage.v1beta2.CountMessageTokensResponse",
"client": {
"shortName": "DiscussServiceClient",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussServiceClient"
},
"method": {
"shortName": "CountMessageTokens",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussService.CountMessageTokens",
"service": {
"shortName": "DiscussService",
"fullName": "google.ai.generativelanguage.v1beta2.DiscussService"
}
}
}
},
{
"regionTag": "generativelanguage_v1beta2_generated_ModelService_GetModel_async",
"title": "DiscussService getModel Sample",
"origin": "API_DEFINITION",
"description": " Gets information about a specific Model.",
"canonical": true,
"file": "model_service.get_model.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 25,
"end": 55,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "GetModel",
"fullName": "google.ai.generativelanguage.v1beta2.ModelService.GetModel",
"async": true,
"parameters": [
{
"name": "name",
"type": "TYPE_STRING"
}
],
"resultType": ".google.ai.generativelanguage.v1beta2.Model",
"client": {
"shortName": "ModelServiceClient",
"fullName": "google.ai.generativelanguage.v1beta2.ModelServiceClient"
},
"method": {
"shortName": "GetModel",
"fullName": "google.ai.generativelanguage.v1beta2.ModelService.GetModel",
"service": {
"shortName": "ModelService",
"fullName": "google.ai.generativelanguage.v1beta2.ModelService"
}
}
}
},
{
"regionTag": "generativelanguage_v1beta2_generated_ModelService_ListModels_async",
"title": "DiscussService listModels Sample",
"origin": "API_DEFINITION",
"description": " Lists models available through the API.",
"canonical": true,
"file": "model_service.list_models.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 25,
"end": 66,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "ListModels",
"fullName": "google.ai.generativelanguage.v1beta2.ModelService.ListModels",
"async": true,
"parameters": [
{
"name": "page_size",
"type": "TYPE_INT32"
},
{
"name": "page_token",
"type": "TYPE_STRING"
}
],
"resultType": ".google.ai.generativelanguage.v1beta2.ListModelsResponse",
"client": {
"shortName": "ModelServiceClient",
"fullName": "google.ai.generativelanguage.v1beta2.ModelServiceClient"
},
"method": {
"shortName": "ListModels",
"fullName": "google.ai.generativelanguage.v1beta2.ModelService.ListModels",
"service": {
"shortName": "ModelService",
"fullName": "google.ai.generativelanguage.v1beta2.ModelService"
}
}
}
},
{
"regionTag": "generativelanguage_v1beta2_generated_TextService_GenerateText_async",
"title": "DiscussService generateText Sample",
"origin": "API_DEFINITION",
"description": " Generates a response from the model given an input message.",
"canonical": true,
"file": "text_service.generate_text.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 25,
"end": 119,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "GenerateText",
"fullName": "google.ai.generativelanguage.v1beta2.TextService.GenerateText",
"async": true,
"parameters": [
{
"name": "model",
"type": "TYPE_STRING"
},
{
"name": "prompt",
"type": ".google.ai.generativelanguage.v1beta2.TextPrompt"
},
{
"name": "temperature",
"type": "TYPE_FLOAT"
},
{
"name": "candidate_count",
"type": "TYPE_INT32"
},
{
"name": "max_output_tokens",
"type": "TYPE_INT32"
},
{
"name": "top_p",
"type": "TYPE_FLOAT"
},
{
"name": "top_k",
"type": "TYPE_INT32"
},
{
"name": "safety_settings",
"type": "TYPE_MESSAGE[]"
},
{
"name": "stop_sequences",
"type": "TYPE_STRING[]"
}
],
"resultType": ".google.ai.generativelanguage.v1beta2.GenerateTextResponse",
"client": {
"shortName": "TextServiceClient",
"fullName": "google.ai.generativelanguage.v1beta2.TextServiceClient"
},
"method": {
"shortName": "GenerateText",
"fullName": "google.ai.generativelanguage.v1beta2.TextService.GenerateText",
"service": {
"shortName": "TextService",
"fullName": "google.ai.generativelanguage.v1beta2.TextService"
}
}
}
},
{
"regionTag": "generativelanguage_v1beta2_generated_TextService_EmbedText_async",
"title": "DiscussService embedText Sample",
"origin": "API_DEFINITION",
"description": " Generates an embedding from the model given an input message.",
"canonical": true,
"file": "text_service.embed_text.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 25,
"end": 59,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "EmbedText",
"fullName": "google.ai.generativelanguage.v1beta2.TextService.EmbedText",
"async": true,
"parameters": [
{
"name": "model",
"type": "TYPE_STRING"
},
{
"name": "text",
"type": "TYPE_STRING"
}
],
"resultType": ".google.ai.generativelanguage.v1beta2.EmbedTextResponse",
"client": {
"shortName": "TextServiceClient",
"fullName": "google.ai.generativelanguage.v1beta2.TextServiceClient"
},
"method": {
"shortName": "EmbedText",
"fullName": "google.ai.generativelanguage.v1beta2.TextService.EmbedText",
"service": {
"shortName": "TextService",
"fullName": "google.ai.generativelanguage.v1beta2.TextService"
}
}
}
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,13 @@ function main(model, prompt) {
* `SafetyCategory` provided in the list, the API will use the default safety
* setting for that category.
*/
// const safetySettings = 1234
// const safetySettings = [1,2,3,4]
/**
* The set of character sequences (up to 5) that will stop output generation.
* If specified, the API will stop at the first appearance of a stop
* sequence. The stop sequence will not be included as part of the response.
*/
// const stopSequences = 'abc123'
// const stopSequences = ['abc','def']

// Imports the Generativelanguage library
const {TextServiceClient} = require('@google-ai/generativelanguage').v1beta2;
Expand Down
4 changes: 2 additions & 2 deletions packages/google-ai-generativelanguage/samples/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"license": "Apache-2.0",
"author": "Google LLC",
"engines": {
"node": ">=12.0.0"
"node": ">=14.0.0"
},
"files": [
"*.js"
Expand All @@ -21,4 +21,4 @@
"chai": "^4.2.0",
"mocha": "^8.0.0"
}
}
}
Loading