diff --git a/speech/.eslintrc.yml b/speech/.eslintrc.yml index 282535f55f..98634adbef 100644 --- a/speech/.eslintrc.yml +++ b/speech/.eslintrc.yml @@ -1,3 +1,4 @@ --- rules: no-console: off + node/no-unsupported-features/node-builtins: off diff --git a/speech/infiniteStreaming.js b/speech/infiniteStreaming.js index a4b646b2c4..3acaf8aaa9 100644 --- a/speech/infiniteStreaming.js +++ b/speech/infiniteStreaming.js @@ -31,6 +31,11 @@ 'use strict'; +// sample-metadata: +// title: Infinite Streaming +// description: Performs infinite streaming using the streamingRecognize operation with the Cloud Speech API. +// usage: node infiniteStreaming.js + /** * Note: Correct microphone settings required: check enclosed link, and make * sure the following conditions are met: @@ -45,11 +50,11 @@ * Maximum streaming limit should be 1/2 of SpeechAPI Streaming Limit. */ -function infiniteStream( - encoding, - sampleRateHertz, - languageCode, - streamingLimit +function main( + encoding = 'LINEAR16', + sampleRateHertz = 16000, + languageCode = 'en-US', + streamingLimit = 290000 ) { // [START speech_transcribe_infinite_streaming] @@ -60,8 +65,6 @@ function infiniteStream( const chalk = require('chalk'); const {Writable} = require('stream'); - - // Node-Record-lpcm16 const recorder = require('node-record-lpcm16'); // Imports the Google Cloud client library @@ -240,53 +243,9 @@ function infiniteStream( // [END speech_transcribe_infinite_streaming] } -require('yargs') - .demand(1) - .command( - 'infiniteStream', - 'infinitely streams audio input from microphone to speech API', - {}, - opts => - infiniteStream( - opts.encoding, - opts.sampleRateHertz, - opts.languageCode, - opts.streamingLimit - ) - ) - .options({ - encoding: { - alias: 'e', - default: 'LINEAR16', - global: true, - requiresArg: true, - type: 'string', - }, - sampleRateHertz: { - alias: 'r', - default: 16000, - global: true, - requiresArg: true, - type: 'number', - }, - languageCode: { - alias: 'l', - default: 'en-US', - global: true, - requiresArg: true, - type: 'string', - }, - streamingLimit: { - alias: 's', - default: 290000, - global: true, - requiresArg: true, - type: 'number', - }, - }) - .example('node $0 infiniteStream') - .wrap(120) - .recommendCommands() - .epilogue('For more information, see https://cloud.google.com/speech/docs') - .help() - .strict().argv; +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); + +main(...process.argv.slice(2)); diff --git a/speech/package.json b/speech/package.json index a918915b1d..2aedbad1fb 100644 --- a/speech/package.json +++ b/speech/package.json @@ -2,11 +2,15 @@ "name": "nodejs-docs-samples-speech", "private": true, "license": "Apache-2.0", - "author": "Google Inc.", + "author": "Google LLC", "repository": "googleapis/nodejs-speech", "engines": { - "node": ">=8" + "node": ">=10.17.0" }, + "files": [ + "*.js", + "resources/" + ], "scripts": { "test": "c8 mocha system-test --timeout 600000" }, diff --git a/speech/quickstart.js b/speech/quickstart.js index 85fcd4be5f..9bb1a940c4 100644 --- a/speech/quickstart.js +++ b/speech/quickstart.js @@ -14,42 +14,51 @@ 'use strict'; -// [START speech_quickstart] -async function main() { +function main() { + // [START speech_quickstart] // Imports the Google Cloud client library const speech = require('@google-cloud/speech'); - const fs = require('fs'); + const fs = require('fs').promises; // Creates a client const client = new speech.SpeechClient(); - // The name of the audio file to transcribe - const fileName = './resources/audio.raw'; - - // Reads a local audio file and converts it to base64 - const file = fs.readFileSync(fileName); - const audioBytes = file.toString('base64'); - - // The audio file's encoding, sample rate in hertz, and BCP-47 language code - const audio = { - content: audioBytes, - }; - const config = { - encoding: 'LINEAR16', - sampleRateHertz: 16000, - languageCode: 'en-US', - }; - const request = { - audio: audio, - config: config, - }; - - // Detects speech in the audio file - const [response] = await client.recognize(request); - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); + async function quickstart() { + // The name of the audio file to transcribe + const fileName = './resources/audio.raw'; + + // Reads a local audio file and converts it to base64 + const file = await fs.readFile(fileName); + const audioBytes = file.toString('base64'); + + // The audio file's encoding, sample rate in hertz, and BCP-47 language code + const audio = { + content: audioBytes, + }; + const config = { + encoding: 'LINEAR16', + sampleRateHertz: 16000, + languageCode: 'en-US', + }; + const request = { + audio: audio, + config: config, + }; + + // Detects speech in the audio file + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); + } + quickstart(); + // [END speech_quickstart] } -main().catch(console.error); -// [END speech_quickstart] + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); + +main(...process.argv.slice(2)); diff --git a/speech/recognize.v1p1beta1.js b/speech/recognize.v1p1beta1.js index b7c9c04638..77bbf3d2ab 100644 --- a/speech/recognize.v1p1beta1.js +++ b/speech/recognize.v1p1beta1.js @@ -22,11 +22,16 @@ 'use strict'; -async function syncRecognizeWithMetaData( +// sample-metadata: +// title: Recognize speech with metadata +// description: Analyzes an audio stream, and detects speech along with metadata. +// usage: node recognize.v1p1beta1.js ./resources/commercial_mono.wav + +function main( filename, - encoding, - sampleRateHertz, - languageCode + encoding = 'LINEAR16', + sampleRateHertz = 16000, + languageCode = 'en-US' ) { // [START speech_transcribe_recognition_metadata_beta] // Imports the Google Cloud client library for Beta API @@ -40,87 +45,53 @@ async function syncRecognizeWithMetaData( // Creates a client const client = new speech.SpeechClient(); - /** - * TODO(developer): Uncomment the following lines before running the sample. - */ - // const filename = 'Local path to audio file, e.g. /path/to/audio.raw'; - // const encoding = 'Encoding of the audio file, e.g. LINEAR16'; - // const sampleRateHertz = 16000; - // const languageCode = 'BCP-47 language code, e.g. en-US'; + async function syncRecognizeWithMetaData() { + /** + * TODO(developer): Uncomment the following lines before running the sample. + */ + // const filename = 'Local path to audio file, e.g. /path/to/audio.raw'; + // const encoding = 'Encoding of the audio file, e.g. LINEAR16'; + // const sampleRateHertz = 16000; + // const languageCode = 'BCP-47 language code, e.g. en-US'; - const recognitionMetadata = { - interactionType: 'DISCUSSION', - microphoneDistance: 'NEARFIELD', - recordingDeviceType: 'SMARTPHONE', - recordingDeviceName: 'Pixel 2 XL', - industryNaicsCodeOfAudio: 519190, - }; + const recognitionMetadata = { + interactionType: 'DISCUSSION', + microphoneDistance: 'NEARFIELD', + recordingDeviceType: 'SMARTPHONE', + recordingDeviceName: 'Pixel 2 XL', + industryNaicsCodeOfAudio: 519190, + }; - const config = { - encoding: encoding, - sampleRateHertz: sampleRateHertz, - languageCode: languageCode, - metadata: recognitionMetadata, - }; + const config = { + encoding: encoding, + sampleRateHertz: sampleRateHertz, + languageCode: languageCode, + metadata: recognitionMetadata, + }; - const audio = { - content: fs.readFileSync(filename).toString('base64'), - }; + const audio = { + content: fs.readFileSync(filename).toString('base64'), + }; - const request = { - config: config, - audio: audio, - }; + const request = { + config: config, + audio: audio, + }; - // Detects speech in the audio file - const [response] = await client.recognize(request); - response.results.forEach(result => { - const alternative = result.alternatives[0]; - console.log(alternative.transcript); - }); - // [END speech_transcribe_recognition_metadata_beta] + // Detects speech in the audio file + const [response] = await client.recognize(request); + response.results.forEach(result => { + const alternative = result.alternatives[0]; + console.log(alternative.transcript); + }); + // [END speech_transcribe_recognition_metadata_beta] + } + syncRecognizeWithMetaData(); } -require('yargs') - .demand(1) - .command( - 'sync-metadata ', - 'Detects speech in a local audio file with metadata.', - {}, - opts => - syncRecognizeWithMetaData( - opts.filename, - opts.encoding, - opts.sampleRateHertz, - opts.languageCode - ) - ) - .options({ - encoding: { - alias: 'e', - default: 'LINEAR16', - global: true, - requiresArg: true, - type: 'string', - }, - sampleRateHertz: { - alias: 'r', - default: 16000, - global: true, - requiresArg: true, - type: 'number', - }, - languageCode: { - alias: 'l', - default: 'en-US', - global: true, - requiresArg: true, - type: 'string', - }, - }) - .example('node $0 sync-metadata ./resources/commercial_mono.wav') - .wrap(120) - .recommendCommands() - .epilogue('For more information, see https://cloud.google.com/speech/docs') - .help() - .strict().argv; +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); + +main(...process.argv.slice(2)); diff --git a/speech/system-test/.eslintrc.yml b/speech/system-test/.eslintrc.yml deleted file mode 100644 index c0289282a6..0000000000 --- a/speech/system-test/.eslintrc.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -rules: - node/no-unpublished-require: off - node/no-unsupported-features: off - no-empty: off diff --git a/speech/system-test/MicrophoneStream.test.js b/speech/system-test/MicrophoneStream.test.js deleted file mode 100644 index 0fd1ef3717..0000000000 --- a/speech/system-test/MicrophoneStream.test.js +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -'use strict'; - -const path = require('path'); -const {assert} = require('chai'); -const {describe, it} = require('mocha'); -const cp = require('child_process'); - -const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); - -const cmd = 'node MicrophoneStream.js'; -const cwd = path.join(__dirname, '..'); - -describe('MicrophoneStream', () => { - it('should load and display Yaaaarghs(!) correctly', async () => { - const stdout = execSync(`${cmd} --help`, {cwd}); - assert.match( - stdout, - /Streams audio input from microphone, translates to text/ - ); - }); -}); diff --git a/speech/system-test/recognize.v1p1beta1.test.js b/speech/system-test/recognize.v1p1beta1.test.js index 1a877f4f80..aadbe26545 100644 --- a/speech/system-test/recognize.v1p1beta1.test.js +++ b/speech/system-test/recognize.v1p1beta1.test.js @@ -25,7 +25,7 @@ const filepath = path.join(__dirname, '..', 'resources', 'audio.raw'); describe('Recognize v1p1beta1', () => { it('should run sync recognize with metadata', async () => { - const output = execSync(`${cmd} sync-metadata ${filepath}`); + const output = execSync(`${cmd} ${filepath}`); assert.match(output, /how old is the Brooklyn Bridge/); }); }); diff --git a/speech/transcribeContextClasses.js b/speech/transcribeContextClasses.js index 9ad10aa075..4b4061996c 100644 --- a/speech/transcribeContextClasses.js +++ b/speech/transcribeContextClasses.js @@ -63,8 +63,13 @@ function main(storageUri) { }); } - transcribeContextClasses().catch(console.error); + transcribeContextClasses(); // [END speech_transcribe_sync] } +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); + main(...process.argv.slice(2));