@mastra/voice-google 0.0.0-vector-query-sources-20250516172905 → 0.0.0-vector-query-tool-provider-options-20250828222356
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +11 -42
- package/dist/index.cjs +10 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +78 -2
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +10 -0
- package/dist/index.js.map +1 -0
- package/package.json +17 -13
- package/dist/_tsup-dts-rollup.d.cts +0 -73
- package/dist/_tsup-dts-rollup.d.ts +0 -73
- package/dist/index.d.cts +0 -2
package/LICENSE.md
CHANGED
|
@@ -1,46 +1,15 @@
|
|
|
1
|
-
#
|
|
1
|
+
# Apache License 2.0
|
|
2
2
|
|
|
3
|
-
Copyright (c) 2025
|
|
3
|
+
Copyright (c) 2025 Kepler Software, Inc.
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
|
|
5
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
you may not use this file except in compliance with the License.
|
|
7
|
+
You may obtain a copy of the License at
|
|
7
8
|
|
|
8
|
-
|
|
9
|
-
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below
|
|
9
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law.
|
|
17
|
-
|
|
18
|
-
**Patents**
|
|
19
|
-
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
|
|
20
|
-
|
|
21
|
-
**Notices**
|
|
22
|
-
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
|
|
23
|
-
|
|
24
|
-
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
|
|
25
|
-
|
|
26
|
-
**No Other Rights**
|
|
27
|
-
These terms do not imply any licenses other than those expressly granted in these terms.
|
|
28
|
-
|
|
29
|
-
**Termination**
|
|
30
|
-
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
|
|
31
|
-
|
|
32
|
-
**No Liability**
|
|
33
|
-
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
|
|
34
|
-
|
|
35
|
-
**Definitions**
|
|
36
|
-
The _licensor_ is the entity offering these terms, and the _software_ is the software the licensor makes available under these terms, including any portion of it.
|
|
37
|
-
|
|
38
|
-
_you_ refers to the individual or entity agreeing to these terms.
|
|
39
|
-
|
|
40
|
-
_your company_ is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. _control_ means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
|
|
41
|
-
|
|
42
|
-
_your licenses_ are all the licenses granted to you for the software under these terms.
|
|
43
|
-
|
|
44
|
-
_use_ means anything you do with the software requiring one of your licenses.
|
|
45
|
-
|
|
46
|
-
_trademark_ means trademarks, service marks, and similar rights.
|
|
11
|
+
Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
See the License for the specific language governing permissions and
|
|
15
|
+
limitations under the License.
|
package/dist/index.cjs
CHANGED
|
@@ -105,6 +105,14 @@ var GoogleVoice = class extends voice.MastraVoice {
|
|
|
105
105
|
return stream$1;
|
|
106
106
|
}, "voice.google.speak")();
|
|
107
107
|
}
|
|
108
|
+
/**
|
|
109
|
+
* Checks if listening capabilities are enabled.
|
|
110
|
+
*
|
|
111
|
+
* @returns {Promise<{ enabled: boolean }>}
|
|
112
|
+
*/
|
|
113
|
+
async getListener() {
|
|
114
|
+
return { enabled: true };
|
|
115
|
+
}
|
|
108
116
|
/**
|
|
109
117
|
* Converts speech to text
|
|
110
118
|
* @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.
|
|
@@ -154,3 +162,5 @@ var GoogleVoice = class extends voice.MastraVoice {
|
|
|
154
162
|
};
|
|
155
163
|
|
|
156
164
|
exports.GoogleVoice = GoogleVoice;
|
|
165
|
+
//# sourceMappingURL=index.cjs.map
|
|
166
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"names":["MastraVoice","TextToSpeechClient","SpeechClient","stream","PassThrough"],"mappings":";;;;;;;;AAiBA,IAAM,aAAA,GAAgB,gBAAA;AAOf,IAAM,WAAA,GAAN,cAA0BA,iBAAA,CAAY;AAAA,EACnC,SAAA;AAAA,EACA,YAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA,WAAA;AAAA,IACA;AAAA,GACF,GAII,EAAC,EAAG;AACN,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,cAAA;AAClC,IAAA,MAAM,cAAA,GAAiB,aAAA;AAEvB,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,aAAa,MAAA,IAAU;AAAA,OACjC;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,gBAAgB,MAAA,IAAU;AAAA,OACpC;AAAA,MACA,SAAS,OAAA,IAAW;AAAA,KACrB,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,aAAA,IAAiB,WAAA,EAAa,MAAA,IAAU,cAAA,EAAgB,MAAA;AACvE,IAAA,IAAI,CAAC,MAAA,EAAQ;AACX,MAAA,MAAM,IAAI,KAAA;AAAA,QACR;AAAA,OACF;AAAA,IACF;AAEA,IAAA,IAAA,CAAK,SAAA,GAAY,IAAIC,+BAAA,CAAmB;AAAA,MACtC,MAAA,EAAQ,IAAA,CAAK,WAAA,EAAa,MAAA,IAAU;AAAA,KACrC,CAAA;AAED,IAAA,IAAA,CAAK,YAAA,GAAe,IAAIC,mBAAA,CAAa;AAAA,MACnC,MAAA,EAAQ,IAAA,CAAK,cAAA,EAAgB,MAAA,IAAU;AAAA,KACxC,CAAA;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAA,CAAY,EAAE,eAAe,OAAA,EAAQ,GAA+B,EAAC,EAAG;AAC5E,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,KAAK,SAAA,CAAU,UAAA,CAAW,EAAE,YAAA,EAA4B,CAAA;AACjF,MAAA,OAAA,CAAQ,QAAA,EAAU,MAAA,IAAU,EAAC,EAC1B,MAAA,CAAO,CAAA,KAAA,KAAS,KAAA,CAAM,IAAA,IAAQ,KAAA,CAAM,aAAa,CAAA,CACjD,GAAA,CAAI,CAAA,KAAA,MAAU;AAAA,QACb,SAAS,KAAA,CAAM,IAAA;AAAA,QACf,eAAe,KAAA,CAAM;AAAA,OACvB,CAAE,CAAA;AAAA,IACN,CAAA,EAAG,0BAA0B,CAAA,EAAE;AAAA,EACjC;AAAA,EAEA,MAAc,eAAe,MAAA,EAAgD;AAC3E,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAChC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,OAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAKgC;AAChC,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,IAAA,GAAO,OAAO,KAAA,KAAU,QAAA,GAAW,QAAQ,MAAM,IAAA,CAAK,eAAe,KAAK,CAAA;AAEhF,MAAA,MAAM,OAAA,GAA4E;AAAA,QAChF,KAAA,EAAO,EAAE,IAAA,EAAK;AAAA,QACd,KAAA,EAAO;AAAA,UACL,IAAA,EAAM,OAAA,EAAS,OAAA,IAAW,IAAA,CAAK,OAAA;AAAA,UAC/B,YAAA,EAAc,OAAA,EAAS,YAAA,IAAgB,OAAA,EAAS,SAAS,KAAA,CAAM,GAAG,CAAA,CAAE,KAAA,CAAM,CAAA,EAAG,CAAC,CAAA,CAAE,IAAA,CAAK,GAAG,CAAA,IAAK;AAAA,SAC/F;AAAA,QACA,WAAA,EAAa,OAAA,EAAS,WAAA,IAAe,EAAE,eAAe,UAAA;AAAW,OACnE;AAEA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,SAAA,CAAU,iBAAiB,OAAO,CAAA;AAEhE,MAAA,IAAI,CAAC,SAAS,YAAA,EAAc;AAC1B,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,IAAI,OAAO,QAAA,CAAS,YAAA,KAAiB,QAAA,EAAU;AAC7C,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,MAAMC,QAAA,GAAS,IAAIC,kBAAA,EAAY;AAC/B,MAAAD,QAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAA,CAAS,YAAY,CAAC,CAAA;AAC7C,MAAA,OAAOA,QAAA;AAAA,IACT,CAAA,EAAG,oBAAoB,CAAA,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EACiB;AACjB,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,MAAA,IAAI,OAAA,GAAU;AAAA,QACZ,MAAA,EAAQ;AAAA,UACN,QAAA,EAAU,UAAA;AAAA,UACV,YAAA,EAAc,OAAA;AAAA,UACd,GAAG,OAAA,EAAS;AAAA,SACd;AAAA,QACA,KAAA,EAAO;AAAA,UACL,OAAA,EAAS,MAAA,CAAO,QAAA,CAAS,QAAQ;AAAA;AACnC,OACF;AACA,MAAA,OAAA,CAAQ,IAAI,CAAA,cAAA,CAAgB,CAAA;AAC5B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,YAAA,CAAa,UAAU,OAAwD,CAAA;AAC7G,MAAA,OAAA,CAAQ,IAAI,CAAA,aAAA,CAAe,CAAA;AAE3B,MAAA,IAAI,CAAC,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,WAAW,CAAA,EAAG;AACtD,QAAA,MAAM,IAAI,MAAM,mCAAmC,CAAA;AAAA,MACrD;AAEA,MAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,OAAA,CAC5B,GAAA,CAAI,CAAC,MAAA,KAAgB;AACpB,QAAA,IAAI,CAAC,MAAA,CAAO,YAAA,IAAgB,MAAA,CAAO,YAAA,CAAa,WAAW,CAAA,EAAG;AAC5D,UAAA,OAAO,EAAA;AAAA,QACT;AACA,QAAA,OAAO,MAAA,CAAO,YAAA,CAAa,CAAC,CAAA,CAAE,UAAA,IAAc,EAAA;AAAA,MAC9C,CAAC,CAAA,CACA,MAAA,CAAO,CAAC,IAAA,KAAiB,KAAK,MAAA,GAAS,CAAC,CAAA,CACxC,IAAA,CAAK,GAAG,CAAA;AAEX,MAAA,IAAI,CAAC,aAAA,EAAe;AAClB,QAAA,MAAM,IAAI,MAAM,yCAAyC,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO,aAAA;AAAA,IACT,CAAA,EAAG,qBAAqB,CAAA,EAAE;AAAA,EAC5B;AACF","file":"index.cjs","sourcesContent":["import { PassThrough } from 'stream';\n\nimport { SpeechClient } from '@google-cloud/speech';\nimport type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';\nimport { TextToSpeechClient } from '@google-cloud/text-to-speech';\nimport type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';\nimport { MastraVoice } from '@mastra/core/voice';\n\n/**\n * Configuration for Google Cloud Voice models\n * @interface GoogleModelConfig\n * @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable\n */\nexport interface GoogleModelConfig {\n apiKey?: string;\n}\n\nconst DEFAULT_VOICE = 'en-US-Casual-K';\n\n/**\n * GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services\n * @class GoogleVoice\n * @extends MastraVoice\n */\nexport class GoogleVoice extends MastraVoice {\n private ttsClient: TextToSpeechClient;\n private speechClient: SpeechClient;\n\n /**\n * Creates an instance of GoogleVoice\n * @param {Object} config - Configuration options\n * @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis\n * @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition\n * @param {string} [config.speaker] - Default voice ID to use for speech synthesis\n * @throws {Error} If no API key is provided via config or environment variable\n */\n constructor({\n listeningModel,\n speechModel,\n speaker,\n }: {\n listeningModel?: GoogleModelConfig;\n speechModel?: GoogleModelConfig;\n speaker?: string;\n } = {}) {\n const defaultApiKey = process.env.GOOGLE_API_KEY;\n const defaultSpeaker = DEFAULT_VOICE;\n\n super({\n speechModel: {\n name: '',\n apiKey: speechModel?.apiKey ?? defaultApiKey,\n },\n listeningModel: {\n name: '',\n apiKey: listeningModel?.apiKey ?? defaultApiKey,\n },\n speaker: speaker ?? defaultSpeaker,\n });\n\n const apiKey = defaultApiKey || speechModel?.apiKey || listeningModel?.apiKey;\n if (!apiKey) {\n throw new Error(\n 'Google API key is not set, set GOOGLE_API_KEY environment variable or pass apiKey to constructor',\n );\n }\n\n this.ttsClient = new TextToSpeechClient({\n apiKey: this.speechModel?.apiKey || defaultApiKey,\n });\n\n this.speechClient = new SpeechClient({\n apiKey: this.listeningModel?.apiKey || defaultApiKey,\n });\n }\n\n /**\n * Gets a list of available voices\n * @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.\n */\n async getSpeakers({ languageCode = 'en-US' }: { languageCode?: string } = {}) {\n return this.traced(async () => {\n const [response] = await this.ttsClient.listVoices({ languageCode: languageCode });\n return (response?.voices || [])\n .filter(voice => voice.name && voice.languageCodes)\n .map(voice => ({\n voiceId: voice.name!,\n languageCodes: voice.languageCodes!,\n }));\n }, 'voice.google.getSpeakers')();\n }\n\n private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {\n const chunks: Buffer[] = [];\n for await (const chunk of stream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n return Buffer.concat(chunks).toString('utf-8');\n }\n\n /**\n * Converts text to speech\n * @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech\n * @param {Object} [options] - Speech synthesis options\n * @param {string} [options.speaker] - Voice ID to use\n * @param {string} [options.languageCode] - Language code for the voice\n * @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options\n * @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.\n */\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n languageCode?: string;\n audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];\n },\n ): Promise<NodeJS.ReadableStream> {\n return this.traced(async () => {\n const text = typeof input === 'string' ? input : await this.streamToString(input);\n\n const request: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest = {\n input: { text },\n voice: {\n name: options?.speaker || this.speaker,\n languageCode: options?.languageCode || options?.speaker?.split('-').slice(0, 2).join('-') || 'en-US',\n },\n audioConfig: options?.audioConfig || { audioEncoding: 'LINEAR16' },\n };\n\n const [response] = await this.ttsClient.synthesizeSpeech(request);\n\n if (!response.audioContent) {\n throw new Error('No audio content returned.');\n }\n\n if (typeof response.audioContent === 'string') {\n throw new Error('Audio content is a string.');\n }\n\n const stream = new PassThrough();\n stream.end(Buffer.from(response.audioContent));\n return stream;\n }, 'voice.google.speak')();\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n /**\n * Converts speech to text\n * @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.\n * @param {Object} [options] - Recognition options\n * @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration\n * @returns {Promise<string>} Transcribed text\n */\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: { stream?: boolean; config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig },\n ): Promise<string> {\n return this.traced(async () => {\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n let request = {\n config: {\n encoding: 'LINEAR16',\n languageCode: 'en-US',\n ...options?.config,\n },\n audio: {\n content: buffer.toString('base64'),\n },\n };\n console.log(`BEFORE REQUEST`);\n const [response] = await this.speechClient.recognize(request as SpeechTypes.cloud.speech.v1.IRecognizeRequest);\n console.log(`AFTER REQUEST`);\n\n if (!response.results || response.results.length === 0) {\n throw new Error('No transcription results returned');\n }\n\n const transcription = response.results\n .map((result: any) => {\n if (!result.alternatives || result.alternatives.length === 0) {\n return '';\n }\n return result.alternatives[0].transcript || '';\n })\n .filter((text: string) => text.length > 0)\n .join(' ');\n\n if (!transcription) {\n throw new Error('No valid transcription found in results');\n }\n\n return transcription;\n }, 'voice.google.listen')();\n }\n}\n"]}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,2 +1,78 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
import type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';
|
|
2
|
+
import type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';
|
|
3
|
+
import { MastraVoice } from '@mastra/core/voice';
|
|
4
|
+
/**
|
|
5
|
+
* Configuration for Google Cloud Voice models
|
|
6
|
+
* @interface GoogleModelConfig
|
|
7
|
+
* @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable
|
|
8
|
+
*/
|
|
9
|
+
export interface GoogleModelConfig {
|
|
10
|
+
apiKey?: string;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services
|
|
14
|
+
* @class GoogleVoice
|
|
15
|
+
* @extends MastraVoice
|
|
16
|
+
*/
|
|
17
|
+
export declare class GoogleVoice extends MastraVoice {
|
|
18
|
+
private ttsClient;
|
|
19
|
+
private speechClient;
|
|
20
|
+
/**
|
|
21
|
+
* Creates an instance of GoogleVoice
|
|
22
|
+
* @param {Object} config - Configuration options
|
|
23
|
+
* @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis
|
|
24
|
+
* @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition
|
|
25
|
+
* @param {string} [config.speaker] - Default voice ID to use for speech synthesis
|
|
26
|
+
* @throws {Error} If no API key is provided via config or environment variable
|
|
27
|
+
*/
|
|
28
|
+
constructor({ listeningModel, speechModel, speaker, }?: {
|
|
29
|
+
listeningModel?: GoogleModelConfig;
|
|
30
|
+
speechModel?: GoogleModelConfig;
|
|
31
|
+
speaker?: string;
|
|
32
|
+
});
|
|
33
|
+
/**
|
|
34
|
+
* Gets a list of available voices
|
|
35
|
+
* @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.
|
|
36
|
+
*/
|
|
37
|
+
getSpeakers({ languageCode }?: {
|
|
38
|
+
languageCode?: string;
|
|
39
|
+
}): Promise<{
|
|
40
|
+
voiceId: string;
|
|
41
|
+
languageCodes: string[];
|
|
42
|
+
}[]>;
|
|
43
|
+
private streamToString;
|
|
44
|
+
/**
|
|
45
|
+
* Converts text to speech
|
|
46
|
+
* @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech
|
|
47
|
+
* @param {Object} [options] - Speech synthesis options
|
|
48
|
+
* @param {string} [options.speaker] - Voice ID to use
|
|
49
|
+
* @param {string} [options.languageCode] - Language code for the voice
|
|
50
|
+
* @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options
|
|
51
|
+
* @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.
|
|
52
|
+
*/
|
|
53
|
+
speak(input: string | NodeJS.ReadableStream, options?: {
|
|
54
|
+
speaker?: string;
|
|
55
|
+
languageCode?: string;
|
|
56
|
+
audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];
|
|
57
|
+
}): Promise<NodeJS.ReadableStream>;
|
|
58
|
+
/**
|
|
59
|
+
* Checks if listening capabilities are enabled.
|
|
60
|
+
*
|
|
61
|
+
* @returns {Promise<{ enabled: boolean }>}
|
|
62
|
+
*/
|
|
63
|
+
getListener(): Promise<{
|
|
64
|
+
enabled: boolean;
|
|
65
|
+
}>;
|
|
66
|
+
/**
|
|
67
|
+
* Converts speech to text
|
|
68
|
+
* @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.
|
|
69
|
+
* @param {Object} [options] - Recognition options
|
|
70
|
+
* @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration
|
|
71
|
+
* @returns {Promise<string>} Transcribed text
|
|
72
|
+
*/
|
|
73
|
+
listen(audioStream: NodeJS.ReadableStream, options?: {
|
|
74
|
+
stream?: boolean;
|
|
75
|
+
config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig;
|
|
76
|
+
}): Promise<string>;
|
|
77
|
+
}
|
|
78
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,MAAM,IAAI,WAAW,EAAE,MAAM,0CAA0C,CAAC;AAEtF,OAAO,KAAK,EAAE,MAAM,IAAI,iBAAiB,EAAE,MAAM,kDAAkD,CAAC;AACpG,OAAO,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAEjD;;;;GAIG;AACH,MAAM,WAAW,iBAAiB;IAChC,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB;AAID;;;;GAIG;AACH,qBAAa,WAAY,SAAQ,WAAW;IAC1C,OAAO,CAAC,SAAS,CAAqB;IACtC,OAAO,CAAC,YAAY,CAAe;IAEnC;;;;;;;OAOG;gBACS,EACV,cAAc,EACd,WAAW,EACX,OAAO,GACR,GAAE;QACD,cAAc,CAAC,EAAE,iBAAiB,CAAC;QACnC,WAAW,CAAC,EAAE,iBAAiB,CAAC;QAChC,OAAO,CAAC,EAAE,MAAM,CAAC;KACb;IAgCN;;;OAGG;IACG,WAAW,CAAC,EAAE,YAAsB,EAAE,GAAE;QAAE,YAAY,CAAC,EAAE,MAAM,CAAA;KAAO;;;;YAY9D,cAAc;IAY5B;;;;;;;;OAQG;IACG,KAAK,CACT,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC,cAAc,EACrC,OAAO,CAAC,EAAE;QACR,OAAO,CAAC,EAAE,MAAM,CAAC;QACjB,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,WAAW,CAAC,EAAE,iBAAiB,CAAC,KAAK,CAAC,YAAY,CAAC,EAAE,CAAC,wBAAwB,CAAC,aAAa,CAAC,CAAC;KAC/F,GACA,OAAO,CAAC,MAAM,CAAC,cAAc,CAAC;IA6BjC;;;;OAIG;IACG,WAAW;;;IAIjB;;;;;;OAMG;IACG,MAAM,CACV,WAAW,EAAE,MAAM,CAAC,cAAc,EAClC,OAAO,CAAC,EAAE;QAAE,MAAM,CAAC,EAAE,OAAO,CAAC;QAAC,MAAM,CAAC,EAAE,WAAW,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,kBAAkB,CAAA;KAAE,GACtF,OAAO,CAAC,MAAM,CAAC;CA+CnB"}
|
package/dist/index.js
CHANGED
|
@@ -103,6 +103,14 @@ var GoogleVoice = class extends MastraVoice {
|
|
|
103
103
|
return stream;
|
|
104
104
|
}, "voice.google.speak")();
|
|
105
105
|
}
|
|
106
|
+
/**
|
|
107
|
+
* Checks if listening capabilities are enabled.
|
|
108
|
+
*
|
|
109
|
+
* @returns {Promise<{ enabled: boolean }>}
|
|
110
|
+
*/
|
|
111
|
+
async getListener() {
|
|
112
|
+
return { enabled: true };
|
|
113
|
+
}
|
|
106
114
|
/**
|
|
107
115
|
* Converts speech to text
|
|
108
116
|
* @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.
|
|
@@ -152,3 +160,5 @@ var GoogleVoice = class extends MastraVoice {
|
|
|
152
160
|
};
|
|
153
161
|
|
|
154
162
|
export { GoogleVoice };
|
|
163
|
+
//# sourceMappingURL=index.js.map
|
|
164
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"names":[],"mappings":";;;;;;AAiBA,IAAM,aAAA,GAAgB,gBAAA;AAOf,IAAM,WAAA,GAAN,cAA0B,WAAA,CAAY;AAAA,EACnC,SAAA;AAAA,EACA,YAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA,WAAA;AAAA,IACA;AAAA,GACF,GAII,EAAC,EAAG;AACN,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,cAAA;AAClC,IAAA,MAAM,cAAA,GAAiB,aAAA;AAEvB,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,aAAa,MAAA,IAAU;AAAA,OACjC;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,gBAAgB,MAAA,IAAU;AAAA,OACpC;AAAA,MACA,SAAS,OAAA,IAAW;AAAA,KACrB,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,aAAA,IAAiB,WAAA,EAAa,MAAA,IAAU,cAAA,EAAgB,MAAA;AACvE,IAAA,IAAI,CAAC,MAAA,EAAQ;AACX,MAAA,MAAM,IAAI,KAAA;AAAA,QACR;AAAA,OACF;AAAA,IACF;AAEA,IAAA,IAAA,CAAK,SAAA,GAAY,IAAI,kBAAA,CAAmB;AAAA,MACtC,MAAA,EAAQ,IAAA,CAAK,WAAA,EAAa,MAAA,IAAU;AAAA,KACrC,CAAA;AAED,IAAA,IAAA,CAAK,YAAA,GAAe,IAAI,YAAA,CAAa;AAAA,MACnC,MAAA,EAAQ,IAAA,CAAK,cAAA,EAAgB,MAAA,IAAU;AAAA,KACxC,CAAA;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAA,CAAY,EAAE,eAAe,OAAA,EAAQ,GAA+B,EAAC,EAAG;AAC5E,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,KAAK,SAAA,CAAU,UAAA,CAAW,EAAE,YAAA,EAA4B,CAAA;AACjF,MAAA,OAAA,CAAQ,QAAA,EAAU,MAAA,IAAU,EAAC,EAC1B,MAAA,CAAO,CAAA,KAAA,KAAS,KAAA,CAAM,IAAA,IAAQ,KAAA,CAAM,aAAa,CAAA,CACjD,GAAA,CAAI,CAAA,KAAA,MAAU;AAAA,QACb,SAAS,KAAA,CAAM,IAAA;AAAA,QACf,eAAe,KAAA,CAAM;AAAA,OACvB,CAAE,CAAA;AAAA,IACN,CAAA,EAAG,0BAA0B,CAAA,EAAE;AAAA,EACjC;AAAA,EAEA,MAAc,eAAe,MAAA,EAAgD;AAC3E,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAChC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,OAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAKgC;AAChC,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,IAAA,GAAO,OAAO,KAAA,KAAU,QAAA,GAAW,QAAQ,MAAM,IAAA,CAAK,eAAe,KAAK,CAAA;AAEhF,MAAA,MAAM,OAAA,GAA4E;AAAA,QAChF,KAAA,EAAO,EAAE,IAAA,EAAK;AAAA,QACd,KAAA,EAAO;AAAA,UACL,IAAA,EAAM,OAAA,EAAS,OAAA,IAAW,IAAA,CAAK,OAAA;AAAA,UAC/B,YAAA,EAAc,OAAA,EAAS,YAAA,IAAgB,OAAA,EAAS,SAAS,KAAA,CAAM,GAAG,CAAA,CAAE,KAAA,CAAM,CAAA,EAAG,CAAC,CAAA,CAAE,IAAA,CAAK,GAAG,CAAA,IAAK;AAAA,SAC/F;AAAA,QACA,WAAA,EAAa,OAAA,EAAS,WAAA,IAAe,EAAE,eAAe,UAAA;AAAW,OACnE;AAEA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,SAAA,CAAU,iBAAiB,OAAO,CAAA;AAEhE,MAAA,IAAI,CAAC,SAAS,YAAA,EAAc;AAC1B,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,IAAI,OAAO,QAAA,CAAS,YAAA,KAAiB,QAAA,EAAU;AAC7C,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,MAAM,MAAA,GAAS,IAAI,WAAA,EAAY;AAC/B,MAAA,MAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAA,CAAS,YAAY,CAAC,CAAA;AAC7C,MAAA,OAAO,MAAA;AAAA,IACT,CAAA,EAAG,oBAAoB,CAAA,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EACiB;AACjB,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,MAAA,IAAI,OAAA,GAAU;AAAA,QACZ,MAAA,EAAQ;AAAA,UACN,QAAA,EAAU,UAAA;AAAA,UACV,YAAA,EAAc,OAAA;AAAA,UACd,GAAG,OAAA,EAAS;AAAA,SACd;AAAA,QACA,KAAA,EAAO;AAAA,UACL,OAAA,EAAS,MAAA,CAAO,QAAA,CAAS,QAAQ;AAAA;AACnC,OACF;AACA,MAAA,OAAA,CAAQ,IAAI,CAAA,cAAA,CAAgB,CAAA;AAC5B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,YAAA,CAAa,UAAU,OAAwD,CAAA;AAC7G,MAAA,OAAA,CAAQ,IAAI,CAAA,aAAA,CAAe,CAAA;AAE3B,MAAA,IAAI,CAAC,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,WAAW,CAAA,EAAG;AACtD,QAAA,MAAM,IAAI,MAAM,mCAAmC,CAAA;AAAA,MACrD;AAEA,MAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,OAAA,CAC5B,GAAA,CAAI,CAAC,MAAA,KAAgB;AACpB,QAAA,IAAI,CAAC,MAAA,CAAO,YAAA,IAAgB,MAAA,CAAO,YAAA,CAAa,WAAW,CAAA,EAAG;AAC5D,UAAA,OAAO,EAAA;AAAA,QACT;AACA,QAAA,OAAO,MAAA,CAAO,YAAA,CAAa,CAAC,CAAA,CAAE,UAAA,IAAc,EAAA;AAAA,MAC9C,CAAC,CAAA,CACA,MAAA,CAAO,CAAC,IAAA,KAAiB,KAAK,MAAA,GAAS,CAAC,CAAA,CACxC,IAAA,CAAK,GAAG,CAAA;AAEX,MAAA,IAAI,CAAC,aAAA,EAAe;AAClB,QAAA,MAAM,IAAI,MAAM,yCAAyC,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO,aAAA;AAAA,IACT,CAAA,EAAG,qBAAqB,CAAA,EAAE;AAAA,EAC5B;AACF","file":"index.js","sourcesContent":["import { PassThrough } from 'stream';\n\nimport { SpeechClient } from '@google-cloud/speech';\nimport type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';\nimport { TextToSpeechClient } from '@google-cloud/text-to-speech';\nimport type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';\nimport { MastraVoice } from '@mastra/core/voice';\n\n/**\n * Configuration for Google Cloud Voice models\n * @interface GoogleModelConfig\n * @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable\n */\nexport interface GoogleModelConfig {\n apiKey?: string;\n}\n\nconst DEFAULT_VOICE = 'en-US-Casual-K';\n\n/**\n * GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services\n * @class GoogleVoice\n * @extends MastraVoice\n */\nexport class GoogleVoice extends MastraVoice {\n private ttsClient: TextToSpeechClient;\n private speechClient: SpeechClient;\n\n /**\n * Creates an instance of GoogleVoice\n * @param {Object} config - Configuration options\n * @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis\n * @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition\n * @param {string} [config.speaker] - Default voice ID to use for speech synthesis\n * @throws {Error} If no API key is provided via config or environment variable\n */\n constructor({\n listeningModel,\n speechModel,\n speaker,\n }: {\n listeningModel?: GoogleModelConfig;\n speechModel?: GoogleModelConfig;\n speaker?: string;\n } = {}) {\n const defaultApiKey = process.env.GOOGLE_API_KEY;\n const defaultSpeaker = DEFAULT_VOICE;\n\n super({\n speechModel: {\n name: '',\n apiKey: speechModel?.apiKey ?? defaultApiKey,\n },\n listeningModel: {\n name: '',\n apiKey: listeningModel?.apiKey ?? defaultApiKey,\n },\n speaker: speaker ?? defaultSpeaker,\n });\n\n const apiKey = defaultApiKey || speechModel?.apiKey || listeningModel?.apiKey;\n if (!apiKey) {\n throw new Error(\n 'Google API key is not set, set GOOGLE_API_KEY environment variable or pass apiKey to constructor',\n );\n }\n\n this.ttsClient = new TextToSpeechClient({\n apiKey: this.speechModel?.apiKey || defaultApiKey,\n });\n\n this.speechClient = new SpeechClient({\n apiKey: this.listeningModel?.apiKey || defaultApiKey,\n });\n }\n\n /**\n * Gets a list of available voices\n * @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.\n */\n async getSpeakers({ languageCode = 'en-US' }: { languageCode?: string } = {}) {\n return this.traced(async () => {\n const [response] = await this.ttsClient.listVoices({ languageCode: languageCode });\n return (response?.voices || [])\n .filter(voice => voice.name && voice.languageCodes)\n .map(voice => ({\n voiceId: voice.name!,\n languageCodes: voice.languageCodes!,\n }));\n }, 'voice.google.getSpeakers')();\n }\n\n private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {\n const chunks: Buffer[] = [];\n for await (const chunk of stream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n return Buffer.concat(chunks).toString('utf-8');\n }\n\n /**\n * Converts text to speech\n * @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech\n * @param {Object} [options] - Speech synthesis options\n * @param {string} [options.speaker] - Voice ID to use\n * @param {string} [options.languageCode] - Language code for the voice\n * @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options\n * @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.\n */\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n languageCode?: string;\n audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];\n },\n ): Promise<NodeJS.ReadableStream> {\n return this.traced(async () => {\n const text = typeof input === 'string' ? input : await this.streamToString(input);\n\n const request: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest = {\n input: { text },\n voice: {\n name: options?.speaker || this.speaker,\n languageCode: options?.languageCode || options?.speaker?.split('-').slice(0, 2).join('-') || 'en-US',\n },\n audioConfig: options?.audioConfig || { audioEncoding: 'LINEAR16' },\n };\n\n const [response] = await this.ttsClient.synthesizeSpeech(request);\n\n if (!response.audioContent) {\n throw new Error('No audio content returned.');\n }\n\n if (typeof response.audioContent === 'string') {\n throw new Error('Audio content is a string.');\n }\n\n const stream = new PassThrough();\n stream.end(Buffer.from(response.audioContent));\n return stream;\n }, 'voice.google.speak')();\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n /**\n * Converts speech to text\n * @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.\n * @param {Object} [options] - Recognition options\n * @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration\n * @returns {Promise<string>} Transcribed text\n */\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: { stream?: boolean; config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig },\n ): Promise<string> {\n return this.traced(async () => {\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n let request = {\n config: {\n encoding: 'LINEAR16',\n languageCode: 'en-US',\n ...options?.config,\n },\n audio: {\n content: buffer.toString('base64'),\n },\n };\n console.log(`BEFORE REQUEST`);\n const [response] = await this.speechClient.recognize(request as SpeechTypes.cloud.speech.v1.IRecognizeRequest);\n console.log(`AFTER REQUEST`);\n\n if (!response.results || response.results.length === 0) {\n throw new Error('No transcription results returned');\n }\n\n const transcription = response.results\n .map((result: any) => {\n if (!result.alternatives || result.alternatives.length === 0) {\n return '';\n }\n return result.alternatives[0].transcript || '';\n })\n .filter((text: string) => text.length > 0)\n .join(' ');\n\n if (!transcription) {\n throw new Error('No valid transcription found in results');\n }\n\n return transcription;\n }, 'voice.google.listen')();\n }\n}\n"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/voice-google",
|
|
3
|
-
"version": "0.0.0-vector-query-
|
|
3
|
+
"version": "0.0.0-vector-query-tool-provider-options-20250828222356",
|
|
4
4
|
"description": "Mastra Google voice integration",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"files": [
|
|
@@ -15,30 +15,34 @@
|
|
|
15
15
|
"default": "./dist/index.js"
|
|
16
16
|
},
|
|
17
17
|
"require": {
|
|
18
|
-
"types": "./dist/index.d.
|
|
18
|
+
"types": "./dist/index.d.ts",
|
|
19
19
|
"default": "./dist/index.cjs"
|
|
20
20
|
}
|
|
21
21
|
},
|
|
22
22
|
"./package.json": "./package.json"
|
|
23
23
|
},
|
|
24
|
-
"license": "
|
|
24
|
+
"license": "Apache-2.0",
|
|
25
25
|
"dependencies": {
|
|
26
26
|
"@google-cloud/speech": "^6.7.1",
|
|
27
|
-
"@google-cloud/text-to-speech": "^6.0
|
|
28
|
-
"zod": "^3.24.3",
|
|
29
|
-
"@mastra/core": "0.0.0-vector-query-sources-20250516172905"
|
|
27
|
+
"@google-cloud/text-to-speech": "^6.2.0"
|
|
30
28
|
},
|
|
31
29
|
"devDependencies": {
|
|
32
|
-
"@types/node": "^20.
|
|
33
|
-
"eslint": "^9.
|
|
34
|
-
"tsup": "^8.
|
|
30
|
+
"@types/node": "^20.19.0",
|
|
31
|
+
"eslint": "^9.30.1",
|
|
32
|
+
"tsup": "^8.5.0",
|
|
35
33
|
"typescript": "^5.8.3",
|
|
36
|
-
"vitest": "^2.
|
|
37
|
-
"@internal/lint": "0.0.0-vector-query-
|
|
34
|
+
"vitest": "^3.2.4",
|
|
35
|
+
"@internal/lint": "0.0.0-vector-query-tool-provider-options-20250828222356",
|
|
36
|
+
"@mastra/core": "0.0.0-vector-query-tool-provider-options-20250828222356",
|
|
37
|
+
"@internal/types-builder": "0.0.0-vector-query-tool-provider-options-20250828222356"
|
|
38
|
+
},
|
|
39
|
+
"peerDependencies": {
|
|
40
|
+
"zod": "^3.25.0 || ^4.0.0",
|
|
41
|
+
"@mastra/core": "0.0.0-vector-query-tool-provider-options-20250828222356"
|
|
38
42
|
},
|
|
39
43
|
"scripts": {
|
|
40
|
-
"build": "tsup
|
|
41
|
-
"build:watch": "
|
|
44
|
+
"build": "tsup --silent --config tsup.config.ts",
|
|
45
|
+
"build:watch": "tsup --watch --silent --config tsup.config.ts",
|
|
42
46
|
"test": "vitest run",
|
|
43
47
|
"lint": "eslint ."
|
|
44
48
|
}
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
import type { google } from '@google-cloud/text-to-speech/build/protos/protos';
|
|
2
|
-
import type { google as google_2 } from '@google-cloud/speech/build/protos/protos';
|
|
3
|
-
import { MastraVoice } from '@mastra/core/voice';
|
|
4
|
-
|
|
5
|
-
/**
|
|
6
|
-
* Configuration for Google Cloud Voice models
|
|
7
|
-
* @interface GoogleModelConfig
|
|
8
|
-
* @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable
|
|
9
|
-
*/
|
|
10
|
-
export declare interface GoogleModelConfig {
|
|
11
|
-
apiKey?: string;
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
/**
|
|
15
|
-
* GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services
|
|
16
|
-
* @class GoogleVoice
|
|
17
|
-
* @extends MastraVoice
|
|
18
|
-
*/
|
|
19
|
-
export declare class GoogleVoice extends MastraVoice {
|
|
20
|
-
private ttsClient;
|
|
21
|
-
private speechClient;
|
|
22
|
-
/**
|
|
23
|
-
* Creates an instance of GoogleVoice
|
|
24
|
-
* @param {Object} config - Configuration options
|
|
25
|
-
* @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis
|
|
26
|
-
* @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition
|
|
27
|
-
* @param {string} [config.speaker] - Default voice ID to use for speech synthesis
|
|
28
|
-
* @throws {Error} If no API key is provided via config or environment variable
|
|
29
|
-
*/
|
|
30
|
-
constructor({ listeningModel, speechModel, speaker, }?: {
|
|
31
|
-
listeningModel?: GoogleModelConfig;
|
|
32
|
-
speechModel?: GoogleModelConfig;
|
|
33
|
-
speaker?: string;
|
|
34
|
-
});
|
|
35
|
-
/**
|
|
36
|
-
* Gets a list of available voices
|
|
37
|
-
* @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.
|
|
38
|
-
*/
|
|
39
|
-
getSpeakers({ languageCode }?: {
|
|
40
|
-
languageCode?: string;
|
|
41
|
-
}): Promise<{
|
|
42
|
-
voiceId: string;
|
|
43
|
-
languageCodes: string[];
|
|
44
|
-
}[]>;
|
|
45
|
-
private streamToString;
|
|
46
|
-
/**
|
|
47
|
-
* Converts text to speech
|
|
48
|
-
* @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech
|
|
49
|
-
* @param {Object} [options] - Speech synthesis options
|
|
50
|
-
* @param {string} [options.speaker] - Voice ID to use
|
|
51
|
-
* @param {string} [options.languageCode] - Language code for the voice
|
|
52
|
-
* @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options
|
|
53
|
-
* @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.
|
|
54
|
-
*/
|
|
55
|
-
speak(input: string | NodeJS.ReadableStream, options?: {
|
|
56
|
-
speaker?: string;
|
|
57
|
-
languageCode?: string;
|
|
58
|
-
audioConfig?: google.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];
|
|
59
|
-
}): Promise<NodeJS.ReadableStream>;
|
|
60
|
-
/**
|
|
61
|
-
* Converts speech to text
|
|
62
|
-
* @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.
|
|
63
|
-
* @param {Object} [options] - Recognition options
|
|
64
|
-
* @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration
|
|
65
|
-
* @returns {Promise<string>} Transcribed text
|
|
66
|
-
*/
|
|
67
|
-
listen(audioStream: NodeJS.ReadableStream, options?: {
|
|
68
|
-
stream?: boolean;
|
|
69
|
-
config?: google_2.cloud.speech.v1.IRecognitionConfig;
|
|
70
|
-
}): Promise<string>;
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
export { }
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
import type { google } from '@google-cloud/text-to-speech/build/protos/protos';
|
|
2
|
-
import type { google as google_2 } from '@google-cloud/speech/build/protos/protos';
|
|
3
|
-
import { MastraVoice } from '@mastra/core/voice';
|
|
4
|
-
|
|
5
|
-
/**
|
|
6
|
-
* Configuration for Google Cloud Voice models
|
|
7
|
-
* @interface GoogleModelConfig
|
|
8
|
-
* @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable
|
|
9
|
-
*/
|
|
10
|
-
export declare interface GoogleModelConfig {
|
|
11
|
-
apiKey?: string;
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
/**
|
|
15
|
-
* GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services
|
|
16
|
-
* @class GoogleVoice
|
|
17
|
-
* @extends MastraVoice
|
|
18
|
-
*/
|
|
19
|
-
export declare class GoogleVoice extends MastraVoice {
|
|
20
|
-
private ttsClient;
|
|
21
|
-
private speechClient;
|
|
22
|
-
/**
|
|
23
|
-
* Creates an instance of GoogleVoice
|
|
24
|
-
* @param {Object} config - Configuration options
|
|
25
|
-
* @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis
|
|
26
|
-
* @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition
|
|
27
|
-
* @param {string} [config.speaker] - Default voice ID to use for speech synthesis
|
|
28
|
-
* @throws {Error} If no API key is provided via config or environment variable
|
|
29
|
-
*/
|
|
30
|
-
constructor({ listeningModel, speechModel, speaker, }?: {
|
|
31
|
-
listeningModel?: GoogleModelConfig;
|
|
32
|
-
speechModel?: GoogleModelConfig;
|
|
33
|
-
speaker?: string;
|
|
34
|
-
});
|
|
35
|
-
/**
|
|
36
|
-
* Gets a list of available voices
|
|
37
|
-
* @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.
|
|
38
|
-
*/
|
|
39
|
-
getSpeakers({ languageCode }?: {
|
|
40
|
-
languageCode?: string;
|
|
41
|
-
}): Promise<{
|
|
42
|
-
voiceId: string;
|
|
43
|
-
languageCodes: string[];
|
|
44
|
-
}[]>;
|
|
45
|
-
private streamToString;
|
|
46
|
-
/**
|
|
47
|
-
* Converts text to speech
|
|
48
|
-
* @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech
|
|
49
|
-
* @param {Object} [options] - Speech synthesis options
|
|
50
|
-
* @param {string} [options.speaker] - Voice ID to use
|
|
51
|
-
* @param {string} [options.languageCode] - Language code for the voice
|
|
52
|
-
* @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options
|
|
53
|
-
* @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.
|
|
54
|
-
*/
|
|
55
|
-
speak(input: string | NodeJS.ReadableStream, options?: {
|
|
56
|
-
speaker?: string;
|
|
57
|
-
languageCode?: string;
|
|
58
|
-
audioConfig?: google.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];
|
|
59
|
-
}): Promise<NodeJS.ReadableStream>;
|
|
60
|
-
/**
|
|
61
|
-
* Converts speech to text
|
|
62
|
-
* @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.
|
|
63
|
-
* @param {Object} [options] - Recognition options
|
|
64
|
-
* @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration
|
|
65
|
-
* @returns {Promise<string>} Transcribed text
|
|
66
|
-
*/
|
|
67
|
-
listen(audioStream: NodeJS.ReadableStream, options?: {
|
|
68
|
-
stream?: boolean;
|
|
69
|
-
config?: google_2.cloud.speech.v1.IRecognitionConfig;
|
|
70
|
-
}): Promise<string>;
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
export { }
|
package/dist/index.d.cts
DELETED