@mastra/voice-google 0.11.6 → 0.11.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/README.md +6 -3
- package/dist/index.cjs +42 -15
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +8 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +42 -15
- package/dist/index.js.map +1 -1
- package/package.json +5 -5
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,27 @@
|
|
|
1
1
|
# @mastra/voice-google
|
|
2
2
|
|
|
3
|
+
## 0.11.7
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Update peer deps ([#8154](https://github.com/mastra-ai/mastra/pull/8154))
|
|
8
|
+
|
|
9
|
+
- feat(voice-google): support additional authentication ([#8086](https://github.com/mastra-ai/mastra/pull/8086))
|
|
10
|
+
|
|
11
|
+
- Updated dependencies [[`dc099b4`](https://github.com/mastra-ai/mastra/commit/dc099b40fb31147ba3f362f98d991892033c4c67), [`504438b`](https://github.com/mastra-ai/mastra/commit/504438b961bde211071186bba63a842c4e3db879), [`b342a68`](https://github.com/mastra-ai/mastra/commit/b342a68e1399cf1ece9ba11bda112db89d21118c), [`a7243e2`](https://github.com/mastra-ai/mastra/commit/a7243e2e58762667a6e3921e755e89d6bb0a3282), [`7fceb0a`](https://github.com/mastra-ai/mastra/commit/7fceb0a327d678e812f90f5387c5bc4f38bd039e), [`303a9c0`](https://github.com/mastra-ai/mastra/commit/303a9c0d7dd58795915979f06a0512359e4532fb), [`df64f9e`](https://github.com/mastra-ai/mastra/commit/df64f9ef814916fff9baedd861c988084e7c41de), [`370f8a6`](https://github.com/mastra-ai/mastra/commit/370f8a6480faec70fef18d72e5f7538f27004301), [`809eea0`](https://github.com/mastra-ai/mastra/commit/809eea092fa80c3f69b9eaf078d843b57fd2a88e), [`683e5a1`](https://github.com/mastra-ai/mastra/commit/683e5a1466e48b686825b2c11f84680f296138e4), [`3679378`](https://github.com/mastra-ai/mastra/commit/3679378673350aa314741dc826f837b1984149bc), [`7775bc2`](https://github.com/mastra-ai/mastra/commit/7775bc20bb1ad1ab24797fb420e4f96c65b0d8ec), [`623ffaf`](https://github.com/mastra-ai/mastra/commit/623ffaf2d969e11e99a0224633cf7b5a0815c857), [`9fc1613`](https://github.com/mastra-ai/mastra/commit/9fc16136400186648880fd990119ac15f7c02ee4), [`61f62aa`](https://github.com/mastra-ai/mastra/commit/61f62aa31bc88fe4ddf8da6240dbcfbeb07358bd), [`db1891a`](https://github.com/mastra-ai/mastra/commit/db1891a4707443720b7cd8a260dc7e1d49b3609c), [`e8f379d`](https://github.com/mastra-ai/mastra/commit/e8f379d390efa264c4e0874f9ac0cf8839b07777), [`652066b`](https://github.com/mastra-ai/mastra/commit/652066bd1efc6bb6813ba950ed1d7573e8b7d9d4), [`3e292ba`](https://github.com/mastra-ai/mastra/commit/3e292ba00837886d5d68a34cbc0d9b703c991883), [`418c136`](https://github.com/mastra-ai/mastra/commit/418c1366843d88e491bca3f87763899ce855ca29), [`ea8d386`](https://github.com/mastra-ai/mastra/commit/ea8d386cd8c5593664515fd5770c06bf2aa980ef), [`67b0f00`](https://github.com/mastra-ai/mastra/commit/67b0f005b520335c71fb85cbaa25df4ce8484a81), [`c2a4919`](https://github.com/mastra-ai/mastra/commit/c2a4919ba6797d8bdb1509e02287496eef69303e), [`c84b7d0`](https://github.com/mastra-ai/mastra/commit/c84b7d093c4657772140cbfd2b15ef72f3315ed5), [`0130986`](https://github.com/mastra-ai/mastra/commit/0130986fc62d0edcc626dd593282661dbb9af141)]:
|
|
12
|
+
- @mastra/core@0.19.0
|
|
13
|
+
|
|
14
|
+
## 0.11.7-alpha.0
|
|
15
|
+
|
|
16
|
+
### Patch Changes
|
|
17
|
+
|
|
18
|
+
- Update peer deps ([#8154](https://github.com/mastra-ai/mastra/pull/8154))
|
|
19
|
+
|
|
20
|
+
- feat(voice-google): support additional authentication ([#8086](https://github.com/mastra-ai/mastra/pull/8086))
|
|
21
|
+
|
|
22
|
+
- Updated dependencies [[`504438b`](https://github.com/mastra-ai/mastra/commit/504438b961bde211071186bba63a842c4e3db879), [`a7243e2`](https://github.com/mastra-ai/mastra/commit/a7243e2e58762667a6e3921e755e89d6bb0a3282), [`7fceb0a`](https://github.com/mastra-ai/mastra/commit/7fceb0a327d678e812f90f5387c5bc4f38bd039e), [`df64f9e`](https://github.com/mastra-ai/mastra/commit/df64f9ef814916fff9baedd861c988084e7c41de), [`809eea0`](https://github.com/mastra-ai/mastra/commit/809eea092fa80c3f69b9eaf078d843b57fd2a88e), [`683e5a1`](https://github.com/mastra-ai/mastra/commit/683e5a1466e48b686825b2c11f84680f296138e4), [`3679378`](https://github.com/mastra-ai/mastra/commit/3679378673350aa314741dc826f837b1984149bc), [`7775bc2`](https://github.com/mastra-ai/mastra/commit/7775bc20bb1ad1ab24797fb420e4f96c65b0d8ec), [`db1891a`](https://github.com/mastra-ai/mastra/commit/db1891a4707443720b7cd8a260dc7e1d49b3609c), [`e8f379d`](https://github.com/mastra-ai/mastra/commit/e8f379d390efa264c4e0874f9ac0cf8839b07777), [`652066b`](https://github.com/mastra-ai/mastra/commit/652066bd1efc6bb6813ba950ed1d7573e8b7d9d4), [`ea8d386`](https://github.com/mastra-ai/mastra/commit/ea8d386cd8c5593664515fd5770c06bf2aa980ef), [`c2a4919`](https://github.com/mastra-ai/mastra/commit/c2a4919ba6797d8bdb1509e02287496eef69303e), [`0130986`](https://github.com/mastra-ai/mastra/commit/0130986fc62d0edcc626dd593282661dbb9af141)]:
|
|
23
|
+
- @mastra/core@0.19.0-alpha.1
|
|
24
|
+
|
|
3
25
|
## 0.11.6
|
|
4
26
|
|
|
5
27
|
### Patch Changes
|
package/README.md
CHANGED
|
@@ -12,10 +12,12 @@ npm install @mastra/voice-google
|
|
|
12
12
|
|
|
13
13
|
## Configuration
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
You can authenticate in one of the following ways:
|
|
16
16
|
|
|
17
17
|
```bash
|
|
18
18
|
GOOGLE_API_KEY=your_api_key
|
|
19
|
+
# or provide a service account key picked up by ADC
|
|
20
|
+
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json
|
|
19
21
|
```
|
|
20
22
|
|
|
21
23
|
## Usage
|
|
@@ -26,10 +28,11 @@ import { GoogleVoice } from '@mastra/voice-google';
|
|
|
26
28
|
// Initialize with configuration
|
|
27
29
|
const voice = new GoogleVoice({
|
|
28
30
|
speechModel: {
|
|
29
|
-
apiKey: 'your-api-key', // Optional, can
|
|
31
|
+
apiKey: 'your-api-key', // Optional, can rely on GOOGLE_API_KEY or ADC
|
|
32
|
+
keyFilename: '/path/to/service-account.json', // Optional, can rely on GOOGLE_APPLICATION_CREDENTIALS
|
|
30
33
|
},
|
|
31
34
|
listeningModel: {
|
|
32
|
-
|
|
35
|
+
keyFilename: '/path/to/service-account.json', // Optional, can rely on ADC
|
|
33
36
|
},
|
|
34
37
|
speaker: 'en-US-Standard-F', // Default voice
|
|
35
38
|
});
|
package/dist/index.cjs
CHANGED
|
@@ -6,6 +6,34 @@ var textToSpeech = require('@google-cloud/text-to-speech');
|
|
|
6
6
|
var voice = require('@mastra/core/voice');
|
|
7
7
|
|
|
8
8
|
// src/index.ts
|
|
9
|
+
var resolveAuthConfig = (modelConfig, fallback) => {
|
|
10
|
+
const resolved = {};
|
|
11
|
+
const apiKey = modelConfig?.apiKey ?? fallback.apiKey;
|
|
12
|
+
if (apiKey) {
|
|
13
|
+
resolved.apiKey = apiKey;
|
|
14
|
+
}
|
|
15
|
+
const keyFilename = modelConfig?.keyFilename ?? fallback.keyFilename;
|
|
16
|
+
if (keyFilename) {
|
|
17
|
+
resolved.keyFilename = keyFilename;
|
|
18
|
+
}
|
|
19
|
+
const credentials = modelConfig?.credentials ?? fallback.credentials;
|
|
20
|
+
if (credentials) {
|
|
21
|
+
resolved.credentials = credentials;
|
|
22
|
+
}
|
|
23
|
+
return resolved;
|
|
24
|
+
};
|
|
25
|
+
var buildAuthOptions = (config) => {
|
|
26
|
+
if (config.credentials) {
|
|
27
|
+
return { credentials: config.credentials };
|
|
28
|
+
}
|
|
29
|
+
if (config.keyFilename) {
|
|
30
|
+
return { keyFilename: config.keyFilename };
|
|
31
|
+
}
|
|
32
|
+
if (config.apiKey) {
|
|
33
|
+
return { apiKey: config.apiKey };
|
|
34
|
+
}
|
|
35
|
+
return {};
|
|
36
|
+
};
|
|
9
37
|
var DEFAULT_VOICE = "en-US-Casual-K";
|
|
10
38
|
var GoogleVoice = class extends voice.MastraVoice {
|
|
11
39
|
ttsClient;
|
|
@@ -16,7 +44,6 @@ var GoogleVoice = class extends voice.MastraVoice {
|
|
|
16
44
|
* @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis
|
|
17
45
|
* @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition
|
|
18
46
|
* @param {string} [config.speaker] - Default voice ID to use for speech synthesis
|
|
19
|
-
* @throws {Error} If no API key is provided via config or environment variable
|
|
20
47
|
*/
|
|
21
48
|
constructor({
|
|
22
49
|
listeningModel,
|
|
@@ -24,30 +51,30 @@ var GoogleVoice = class extends voice.MastraVoice {
|
|
|
24
51
|
speaker
|
|
25
52
|
} = {}) {
|
|
26
53
|
const defaultApiKey = process.env.GOOGLE_API_KEY;
|
|
54
|
+
const defaultKeyFilename = process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
|
27
55
|
const defaultSpeaker = DEFAULT_VOICE;
|
|
56
|
+
const sharedFallback = {
|
|
57
|
+
apiKey: defaultApiKey ?? speechModel?.apiKey ?? listeningModel?.apiKey,
|
|
58
|
+
keyFilename: defaultKeyFilename ?? speechModel?.keyFilename ?? listeningModel?.keyFilename,
|
|
59
|
+
credentials: speechModel?.credentials ?? listeningModel?.credentials
|
|
60
|
+
};
|
|
61
|
+
const speechAuthConfig = resolveAuthConfig(speechModel, sharedFallback);
|
|
62
|
+
const listeningAuthConfig = resolveAuthConfig(listeningModel, sharedFallback);
|
|
28
63
|
super({
|
|
29
64
|
speechModel: {
|
|
30
65
|
name: "",
|
|
31
|
-
apiKey:
|
|
66
|
+
apiKey: speechAuthConfig.apiKey ?? defaultApiKey
|
|
32
67
|
},
|
|
33
68
|
listeningModel: {
|
|
34
69
|
name: "",
|
|
35
|
-
apiKey:
|
|
70
|
+
apiKey: listeningAuthConfig.apiKey ?? defaultApiKey
|
|
36
71
|
},
|
|
37
72
|
speaker: speaker ?? defaultSpeaker
|
|
38
73
|
});
|
|
39
|
-
const
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
);
|
|
44
|
-
}
|
|
45
|
-
this.ttsClient = new textToSpeech.TextToSpeechClient({
|
|
46
|
-
apiKey: this.speechModel?.apiKey || defaultApiKey
|
|
47
|
-
});
|
|
48
|
-
this.speechClient = new speech.SpeechClient({
|
|
49
|
-
apiKey: this.listeningModel?.apiKey || defaultApiKey
|
|
50
|
-
});
|
|
74
|
+
const ttsOptions = buildAuthOptions(speechAuthConfig);
|
|
75
|
+
const speechOptions = buildAuthOptions(listeningAuthConfig);
|
|
76
|
+
this.ttsClient = new textToSpeech.TextToSpeechClient(ttsOptions);
|
|
77
|
+
this.speechClient = new speech.SpeechClient(speechOptions);
|
|
51
78
|
}
|
|
52
79
|
/**
|
|
53
80
|
* Gets a list of available voices
|
package/dist/index.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/index.ts"],"names":["MastraVoice","TextToSpeechClient","SpeechClient","stream","PassThrough"],"mappings":";;;;;;;;AAiBA,IAAM,aAAA,GAAgB,gBAAA;AAOf,IAAM,WAAA,GAAN,cAA0BA,iBAAA,CAAY;AAAA,EACnC,SAAA;AAAA,EACA,YAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA,WAAA;AAAA,IACA;AAAA,GACF,GAII,EAAC,EAAG;AACN,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,cAAA;AAClC,IAAA,MAAM,cAAA,GAAiB,aAAA;AAEvB,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,aAAa,MAAA,IAAU;AAAA,OACjC;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,gBAAgB,MAAA,IAAU;AAAA,OACpC;AAAA,MACA,SAAS,OAAA,IAAW;AAAA,KACrB,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,aAAA,IAAiB,WAAA,EAAa,MAAA,IAAU,cAAA,EAAgB,MAAA;AACvE,IAAA,IAAI,CAAC,MAAA,EAAQ;AACX,MAAA,MAAM,IAAI,KAAA;AAAA,QACR;AAAA,OACF;AAAA,IACF;AAEA,IAAA,IAAA,CAAK,SAAA,GAAY,IAAIC,+BAAA,CAAmB;AAAA,MACtC,MAAA,EAAQ,IAAA,CAAK,WAAA,EAAa,MAAA,IAAU;AAAA,KACrC,CAAA;AAED,IAAA,IAAA,CAAK,YAAA,GAAe,IAAIC,mBAAA,CAAa;AAAA,MACnC,MAAA,EAAQ,IAAA,CAAK,cAAA,EAAgB,MAAA,IAAU;AAAA,KACxC,CAAA;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAA,CAAY,EAAE,eAAe,OAAA,EAAQ,GAA+B,EAAC,EAAG;AAC5E,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,KAAK,SAAA,CAAU,UAAA,CAAW,EAAE,YAAA,EAA4B,CAAA;AACjF,MAAA,OAAA,CAAQ,QAAA,EAAU,MAAA,IAAU,EAAC,EAC1B,MAAA,CAAO,CAAA,KAAA,KAAS,KAAA,CAAM,IAAA,IAAQ,KAAA,CAAM,aAAa,CAAA,CACjD,GAAA,CAAI,CAAA,KAAA,MAAU;AAAA,QACb,SAAS,KAAA,CAAM,IAAA;AAAA,QACf,eAAe,KAAA,CAAM;AAAA,OACvB,CAAE,CAAA;AAAA,IACN,CAAA,EAAG,0BAA0B,CAAA,EAAE;AAAA,EACjC;AAAA,EAEA,MAAc,eAAe,MAAA,EAAgD;AAC3E,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAChC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,OAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAKgC;AAChC,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,IAAA,GAAO,OAAO,KAAA,KAAU,QAAA,GAAW,QAAQ,MAAM,IAAA,CAAK,eAAe,KAAK,CAAA;AAEhF,MAAA,MAAM,OAAA,GAA4E;AAAA,QAChF,KAAA,EAAO,EAAE,IAAA,EAAK;AAAA,QACd,KAAA,EAAO;AAAA,UACL,IAAA,EAAM,OAAA,EAAS,OAAA,IAAW,IAAA,CAAK,OAAA;AAAA,UAC/B,YAAA,EAAc,OAAA,EAAS,YAAA,IAAgB,OAAA,EAAS,SAAS,KAAA,CAAM,GAAG,CAAA,CAAE,KAAA,CAAM,CAAA,EAAG,CAAC,CAAA,CAAE,IAAA,CAAK,GAAG,CAAA,IAAK;AAAA,SAC/F;AAAA,QACA,WAAA,EAAa,OAAA,EAAS,WAAA,IAAe,EAAE,eAAe,UAAA;AAAW,OACnE;AAEA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,SAAA,CAAU,iBAAiB,OAAO,CAAA;AAEhE,MAAA,IAAI,CAAC,SAAS,YAAA,EAAc;AAC1B,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,IAAI,OAAO,QAAA,CAAS,YAAA,KAAiB,QAAA,EAAU;AAC7C,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,MAAMC,QAAA,GAAS,IAAIC,kBAAA,EAAY;AAC/B,MAAAD,QAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAA,CAAS,YAAY,CAAC,CAAA;AAC7C,MAAA,OAAOA,QAAA;AAAA,IACT,CAAA,EAAG,oBAAoB,CAAA,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EACiB;AACjB,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,MAAA,IAAI,OAAA,GAAU;AAAA,QACZ,MAAA,EAAQ;AAAA,UACN,QAAA,EAAU,UAAA;AAAA,UACV,YAAA,EAAc,OAAA;AAAA,UACd,GAAG,OAAA,EAAS;AAAA,SACd;AAAA,QACA,KAAA,EAAO;AAAA,UACL,OAAA,EAAS,MAAA,CAAO,QAAA,CAAS,QAAQ;AAAA;AACnC,OACF;AACA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,YAAA,CAAa,UAAU,OAAwD,CAAA;AAE7G,MAAA,IAAI,CAAC,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,WAAW,CAAA,EAAG;AACtD,QAAA,MAAM,IAAI,MAAM,mCAAmC,CAAA;AAAA,MACrD;AAEA,MAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,OAAA,CAC5B,GAAA,CAAI,CAAC,MAAA,KAAgB;AACpB,QAAA,IAAI,CAAC,MAAA,CAAO,YAAA,IAAgB,MAAA,CAAO,YAAA,CAAa,WAAW,CAAA,EAAG;AAC5D,UAAA,OAAO,EAAA;AAAA,QACT;AACA,QAAA,OAAO,MAAA,CAAO,YAAA,CAAa,CAAC,CAAA,CAAE,UAAA,IAAc,EAAA;AAAA,MAC9C,CAAC,CAAA,CACA,MAAA,CAAO,CAAC,IAAA,KAAiB,KAAK,MAAA,GAAS,CAAC,CAAA,CACxC,IAAA,CAAK,GAAG,CAAA;AAEX,MAAA,IAAI,CAAC,aAAA,EAAe;AAClB,QAAA,MAAM,IAAI,MAAM,yCAAyC,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO,aAAA;AAAA,IACT,CAAA,EAAG,qBAAqB,CAAA,EAAE;AAAA,EAC5B;AACF","file":"index.cjs","sourcesContent":["import { PassThrough } from 'stream';\n\nimport { SpeechClient } from '@google-cloud/speech';\nimport type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';\nimport { TextToSpeechClient } from '@google-cloud/text-to-speech';\nimport type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';\nimport { MastraVoice } from '@mastra/core/voice';\n\n/**\n * Configuration for Google Cloud Voice models\n * @interface GoogleModelConfig\n * @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable\n */\nexport interface GoogleModelConfig {\n apiKey?: string;\n}\n\nconst DEFAULT_VOICE = 'en-US-Casual-K';\n\n/**\n * GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services\n * @class GoogleVoice\n * @extends MastraVoice\n */\nexport class GoogleVoice extends MastraVoice {\n private ttsClient: TextToSpeechClient;\n private speechClient: SpeechClient;\n\n /**\n * Creates an instance of GoogleVoice\n * @param {Object} config - Configuration options\n * @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis\n * @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition\n * @param {string} [config.speaker] - Default voice ID to use for speech synthesis\n * @throws {Error} If no API key is provided via config or environment variable\n */\n constructor({\n listeningModel,\n speechModel,\n speaker,\n }: {\n listeningModel?: GoogleModelConfig;\n speechModel?: GoogleModelConfig;\n speaker?: string;\n } = {}) {\n const defaultApiKey = process.env.GOOGLE_API_KEY;\n const defaultSpeaker = DEFAULT_VOICE;\n\n super({\n speechModel: {\n name: '',\n apiKey: speechModel?.apiKey ?? defaultApiKey,\n },\n listeningModel: {\n name: '',\n apiKey: listeningModel?.apiKey ?? defaultApiKey,\n },\n speaker: speaker ?? defaultSpeaker,\n });\n\n const apiKey = defaultApiKey || speechModel?.apiKey || listeningModel?.apiKey;\n if (!apiKey) {\n throw new Error(\n 'Google API key is not set, set GOOGLE_API_KEY environment variable or pass apiKey to constructor',\n );\n }\n\n this.ttsClient = new TextToSpeechClient({\n apiKey: this.speechModel?.apiKey || defaultApiKey,\n });\n\n this.speechClient = new SpeechClient({\n apiKey: this.listeningModel?.apiKey || defaultApiKey,\n });\n }\n\n /**\n * Gets a list of available voices\n * @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.\n */\n async getSpeakers({ languageCode = 'en-US' }: { languageCode?: string } = {}) {\n return this.traced(async () => {\n const [response] = await this.ttsClient.listVoices({ languageCode: languageCode });\n return (response?.voices || [])\n .filter(voice => voice.name && voice.languageCodes)\n .map(voice => ({\n voiceId: voice.name!,\n languageCodes: voice.languageCodes!,\n }));\n }, 'voice.google.getSpeakers')();\n }\n\n private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {\n const chunks: Buffer[] = [];\n for await (const chunk of stream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n return Buffer.concat(chunks).toString('utf-8');\n }\n\n /**\n * Converts text to speech\n * @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech\n * @param {Object} [options] - Speech synthesis options\n * @param {string} [options.speaker] - Voice ID to use\n * @param {string} [options.languageCode] - Language code for the voice\n * @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options\n * @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.\n */\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n languageCode?: string;\n audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];\n },\n ): Promise<NodeJS.ReadableStream> {\n return this.traced(async () => {\n const text = typeof input === 'string' ? input : await this.streamToString(input);\n\n const request: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest = {\n input: { text },\n voice: {\n name: options?.speaker || this.speaker,\n languageCode: options?.languageCode || options?.speaker?.split('-').slice(0, 2).join('-') || 'en-US',\n },\n audioConfig: options?.audioConfig || { audioEncoding: 'LINEAR16' },\n };\n\n const [response] = await this.ttsClient.synthesizeSpeech(request);\n\n if (!response.audioContent) {\n throw new Error('No audio content returned.');\n }\n\n if (typeof response.audioContent === 'string') {\n throw new Error('Audio content is a string.');\n }\n\n const stream = new PassThrough();\n stream.end(Buffer.from(response.audioContent));\n return stream;\n }, 'voice.google.speak')();\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n /**\n * Converts speech to text\n * @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.\n * @param {Object} [options] - Recognition options\n * @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration\n * @returns {Promise<string>} Transcribed text\n */\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: { stream?: boolean; config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig },\n ): Promise<string> {\n return this.traced(async () => {\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n let request = {\n config: {\n encoding: 'LINEAR16',\n languageCode: 'en-US',\n ...options?.config,\n },\n audio: {\n content: buffer.toString('base64'),\n },\n };\n const [response] = await this.speechClient.recognize(request as SpeechTypes.cloud.speech.v1.IRecognizeRequest);\n\n if (!response.results || response.results.length === 0) {\n throw new Error('No transcription results returned');\n }\n\n const transcription = response.results\n .map((result: any) => {\n if (!result.alternatives || result.alternatives.length === 0) {\n return '';\n }\n return result.alternatives[0].transcript || '';\n })\n .filter((text: string) => text.length > 0)\n .join(' ');\n\n if (!transcription) {\n throw new Error('No valid transcription found in results');\n }\n\n return transcription;\n }, 'voice.google.listen')();\n }\n}\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"names":["MastraVoice","TextToSpeechClient","SpeechClient","stream","PassThrough"],"mappings":";;;;;;;;AA6BA,IAAM,iBAAA,GAAoB,CAAC,WAAA,EAA4C,QAAA,KAAqC;AAC1G,EAAA,MAAM,WAAuB,EAAC;AAE9B,EAAA,MAAM,MAAA,GAAS,WAAA,EAAa,MAAA,IAAU,QAAA,CAAS,MAAA;AAC/C,EAAA,IAAI,MAAA,EAAQ;AACV,IAAA,QAAA,CAAS,MAAA,GAAS,MAAA;AAAA,EACpB;AAEA,EAAA,MAAM,WAAA,GAAc,WAAA,EAAa,WAAA,IAAe,QAAA,CAAS,WAAA;AACzD,EAAA,IAAI,WAAA,EAAa;AACf,IAAA,QAAA,CAAS,WAAA,GAAc,WAAA;AAAA,EACzB;AAEA,EAAA,MAAM,WAAA,GAAc,WAAA,EAAa,WAAA,IAAe,QAAA,CAAS,WAAA;AACzD,EAAA,IAAI,WAAA,EAAa;AACf,IAAA,QAAA,CAAS,WAAA,GAAc,WAAA;AAAA,EACzB;AAEA,EAAA,OAAO,QAAA;AACT,CAAA;AAEA,IAAM,gBAAA,GAAmB,CAAC,MAAA,KAA4C;AACpE,EAAA,IAAI,OAAO,WAAA,EAAa;AACtB,IAAA,OAAO,EAAE,WAAA,EAAa,MAAA,CAAO,WAAA,EAAY;AAAA,EAC3C;AAEA,EAAA,IAAI,OAAO,WAAA,EAAa;AACtB,IAAA,OAAO,EAAE,WAAA,EAAa,MAAA,CAAO,WAAA,EAAY;AAAA,EAC3C;AAEA,EAAA,IAAI,OAAO,MAAA,EAAQ;AACjB,IAAA,OAAO,EAAE,MAAA,EAAQ,MAAA,CAAO,MAAA,EAAO;AAAA,EACjC;AAEA,EAAA,OAAO,EAAC;AACV,CAAA;AAEA,IAAM,aAAA,GAAgB,gBAAA;AAOf,IAAM,WAAA,GAAN,cAA0BA,iBAAA,CAAY;AAAA,EACnC,SAAA;AAAA,EACA,YAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA,WAAA;AAAA,IACA;AAAA,GACF,GAII,EAAC,EAAG;AACN,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,cAAA;AAClC,IAAA,MAAM,kBAAA,GAAqB,QAAQ,GAAA,CAAI,8BAAA;AACvC,IAAA,MAAM,cAAA,GAAiB,aAAA;AAEvB,IAAA,MAAM,cAAA,GAA6B;AAAA,MACjC,MAAA,EAAQ,aAAA,IAAiB,WAAA,EAAa,MAAA,IAAU,cAAA,EAAgB,MAAA;AAAA,MAChE,WAAA,EAAa,kBAAA,IAAsB,WAAA,EAAa,WAAA,IAAe,cAAA,EAAgB,WAAA;AAAA,MAC/E,WAAA,EAAa,WAAA,EAAa,WAAA,IAAe,cAAA,EAAgB;AAAA,KAC3D;AAEA,IAAA,MAAM,gBAAA,GAAmB,iBAAA,CAAkB,WAAA,EAAa,cAAc,CAAA;AACtE,IAAA,MAAM,mBAAA,GAAsB,iBAAA,CAAkB,cAAA,EAAgB,cAAc,CAAA;AAE5E,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,iBAAiB,MAAA,IAAU;AAAA,OACrC;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,oBAAoB,MAAA,IAAU;AAAA,OACxC;AAAA,MACA,SAAS,OAAA,IAAW;AAAA,KACrB,CAAA;AAED,IAAA,MAAM,UAAA,GAAa,iBAAiB,gBAAgB,CAAA;AACpD,IAAA,MAAM,aAAA,GAAgB,iBAAiB,mBAAmB,CAAA;AAE1D,IAAA,IAAA,CAAK,SAAA,GAAY,IAAIC,+BAAA,CAAmB,UAAU,CAAA;AAElD,IAAA,IAAA,CAAK,YAAA,GAAe,IAAIC,mBAAA,CAAa,aAAa,CAAA;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAA,CAAY,EAAE,eAAe,OAAA,EAAQ,GAA+B,EAAC,EAAG;AAC5E,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,KAAK,SAAA,CAAU,UAAA,CAAW,EAAE,YAAA,EAA4B,CAAA;AACjF,MAAA,OAAA,CAAQ,QAAA,EAAU,MAAA,IAAU,EAAC,EAC1B,MAAA,CAAO,CAAA,KAAA,KAAS,KAAA,CAAM,IAAA,IAAQ,KAAA,CAAM,aAAa,CAAA,CACjD,GAAA,CAAI,CAAA,KAAA,MAAU;AAAA,QACb,SAAS,KAAA,CAAM,IAAA;AAAA,QACf,eAAe,KAAA,CAAM;AAAA,OACvB,CAAE,CAAA;AAAA,IACN,CAAA,EAAG,0BAA0B,CAAA,EAAE;AAAA,EACjC;AAAA,EAEA,MAAc,eAAe,MAAA,EAAgD;AAC3E,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAChC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,OAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAKgC;AAChC,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,IAAA,GAAO,OAAO,KAAA,KAAU,QAAA,GAAW,QAAQ,MAAM,IAAA,CAAK,eAAe,KAAK,CAAA;AAEhF,MAAA,MAAM,OAAA,GAA4E;AAAA,QAChF,KAAA,EAAO,EAAE,IAAA,EAAK;AAAA,QACd,KAAA,EAAO;AAAA,UACL,IAAA,EAAM,OAAA,EAAS,OAAA,IAAW,IAAA,CAAK,OAAA;AAAA,UAC/B,YAAA,EAAc,OAAA,EAAS,YAAA,IAAgB,OAAA,EAAS,SAAS,KAAA,CAAM,GAAG,CAAA,CAAE,KAAA,CAAM,CAAA,EAAG,CAAC,CAAA,CAAE,IAAA,CAAK,GAAG,CAAA,IAAK;AAAA,SAC/F;AAAA,QACA,WAAA,EAAa,OAAA,EAAS,WAAA,IAAe,EAAE,eAAe,UAAA;AAAW,OACnE;AAEA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,SAAA,CAAU,iBAAiB,OAAO,CAAA;AAEhE,MAAA,IAAI,CAAC,SAAS,YAAA,EAAc;AAC1B,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,IAAI,OAAO,QAAA,CAAS,YAAA,KAAiB,QAAA,EAAU;AAC7C,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,MAAMC,QAAA,GAAS,IAAIC,kBAAA,EAAY;AAC/B,MAAAD,QAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAA,CAAS,YAAY,CAAC,CAAA;AAC7C,MAAA,OAAOA,QAAA;AAAA,IACT,CAAA,EAAG,oBAAoB,CAAA,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EACiB;AACjB,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,MAAA,IAAI,OAAA,GAAU;AAAA,QACZ,MAAA,EAAQ;AAAA,UACN,QAAA,EAAU,UAAA;AAAA,UACV,YAAA,EAAc,OAAA;AAAA,UACd,GAAG,OAAA,EAAS;AAAA,SACd;AAAA,QACA,KAAA,EAAO;AAAA,UACL,OAAA,EAAS,MAAA,CAAO,QAAA,CAAS,QAAQ;AAAA;AACnC,OACF;AACA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,YAAA,CAAa,UAAU,OAAwD,CAAA;AAE7G,MAAA,IAAI,CAAC,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,WAAW,CAAA,EAAG;AACtD,QAAA,MAAM,IAAI,MAAM,mCAAmC,CAAA;AAAA,MACrD;AAEA,MAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,OAAA,CAC5B,GAAA,CAAI,CAAC,MAAA,KAAgB;AACpB,QAAA,IAAI,CAAC,MAAA,CAAO,YAAA,IAAgB,MAAA,CAAO,YAAA,CAAa,WAAW,CAAA,EAAG;AAC5D,UAAA,OAAO,EAAA;AAAA,QACT;AACA,QAAA,OAAO,MAAA,CAAO,YAAA,CAAa,CAAC,CAAA,CAAE,UAAA,IAAc,EAAA;AAAA,MAC9C,CAAC,CAAA,CACA,MAAA,CAAO,CAAC,IAAA,KAAiB,KAAK,MAAA,GAAS,CAAC,CAAA,CACxC,IAAA,CAAK,GAAG,CAAA;AAEX,MAAA,IAAI,CAAC,aAAA,EAAe;AAClB,QAAA,MAAM,IAAI,MAAM,yCAAyC,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO,aAAA;AAAA,IACT,CAAA,EAAG,qBAAqB,CAAA,EAAE;AAAA,EAC5B;AACF","file":"index.cjs","sourcesContent":["import { PassThrough } from 'stream';\n\nimport { SpeechClient } from '@google-cloud/speech';\nimport type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';\nimport { TextToSpeechClient } from '@google-cloud/text-to-speech';\nimport type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';\nimport { MastraVoice } from '@mastra/core/voice';\n\n/**\n * Configuration for Google Cloud Voice models\n * @interface GoogleModelConfig\n * @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable\n * @property {string} [keyFilename] - Optional path to a service account key file. If not provided, will use GOOGLE_APPLICATION_CREDENTIALS environment variable\n * @property {{ client_email?: string; private_key?: string }} [credentials] - Optional in-memory service account credentials\n */\nexport interface GoogleModelConfig {\n apiKey?: string;\n keyFilename?: string;\n credentials?: {\n client_email?: string;\n private_key?: string;\n [key: string]: unknown;\n };\n}\n\ntype AuthConfig = Pick<GoogleModelConfig, 'apiKey' | 'keyFilename' | 'credentials'>;\n\ntype GoogleClientOptions = AuthConfig;\n\nconst resolveAuthConfig = (modelConfig: GoogleModelConfig | undefined, fallback: AuthConfig): AuthConfig => {\n const resolved: AuthConfig = {};\n\n const apiKey = modelConfig?.apiKey ?? fallback.apiKey;\n if (apiKey) {\n resolved.apiKey = apiKey;\n }\n\n const keyFilename = modelConfig?.keyFilename ?? fallback.keyFilename;\n if (keyFilename) {\n resolved.keyFilename = keyFilename;\n }\n\n const credentials = modelConfig?.credentials ?? fallback.credentials;\n if (credentials) {\n resolved.credentials = credentials;\n }\n\n return resolved;\n};\n\nconst buildAuthOptions = (config: AuthConfig): GoogleClientOptions => {\n if (config.credentials) {\n return { credentials: config.credentials };\n }\n\n if (config.keyFilename) {\n return { keyFilename: config.keyFilename };\n }\n\n if (config.apiKey) {\n return { apiKey: config.apiKey };\n }\n\n return {};\n};\n\nconst DEFAULT_VOICE = 'en-US-Casual-K';\n\n/**\n * GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services\n * @class GoogleVoice\n * @extends MastraVoice\n */\nexport class GoogleVoice extends MastraVoice {\n private ttsClient: TextToSpeechClient;\n private speechClient: SpeechClient;\n\n /**\n * Creates an instance of GoogleVoice\n * @param {Object} config - Configuration options\n * @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis\n * @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition\n * @param {string} [config.speaker] - Default voice ID to use for speech synthesis\n */\n constructor({\n listeningModel,\n speechModel,\n speaker,\n }: {\n listeningModel?: GoogleModelConfig;\n speechModel?: GoogleModelConfig;\n speaker?: string;\n } = {}) {\n const defaultApiKey = process.env.GOOGLE_API_KEY;\n const defaultKeyFilename = process.env.GOOGLE_APPLICATION_CREDENTIALS;\n const defaultSpeaker = DEFAULT_VOICE;\n\n const sharedFallback: AuthConfig = {\n apiKey: defaultApiKey ?? speechModel?.apiKey ?? listeningModel?.apiKey,\n keyFilename: defaultKeyFilename ?? speechModel?.keyFilename ?? listeningModel?.keyFilename,\n credentials: speechModel?.credentials ?? listeningModel?.credentials,\n };\n\n const speechAuthConfig = resolveAuthConfig(speechModel, sharedFallback);\n const listeningAuthConfig = resolveAuthConfig(listeningModel, sharedFallback);\n\n super({\n speechModel: {\n name: '',\n apiKey: speechAuthConfig.apiKey ?? defaultApiKey,\n },\n listeningModel: {\n name: '',\n apiKey: listeningAuthConfig.apiKey ?? defaultApiKey,\n },\n speaker: speaker ?? defaultSpeaker,\n });\n\n const ttsOptions = buildAuthOptions(speechAuthConfig);\n const speechOptions = buildAuthOptions(listeningAuthConfig);\n\n this.ttsClient = new TextToSpeechClient(ttsOptions);\n\n this.speechClient = new SpeechClient(speechOptions);\n }\n\n /**\n * Gets a list of available voices\n * @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.\n */\n async getSpeakers({ languageCode = 'en-US' }: { languageCode?: string } = {}) {\n return this.traced(async () => {\n const [response] = await this.ttsClient.listVoices({ languageCode: languageCode });\n return (response?.voices || [])\n .filter(voice => voice.name && voice.languageCodes)\n .map(voice => ({\n voiceId: voice.name!,\n languageCodes: voice.languageCodes!,\n }));\n }, 'voice.google.getSpeakers')();\n }\n\n private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {\n const chunks: Buffer[] = [];\n for await (const chunk of stream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n return Buffer.concat(chunks).toString('utf-8');\n }\n\n /**\n * Converts text to speech\n * @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech\n * @param {Object} [options] - Speech synthesis options\n * @param {string} [options.speaker] - Voice ID to use\n * @param {string} [options.languageCode] - Language code for the voice\n * @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options\n * @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.\n */\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n languageCode?: string;\n audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];\n },\n ): Promise<NodeJS.ReadableStream> {\n return this.traced(async () => {\n const text = typeof input === 'string' ? input : await this.streamToString(input);\n\n const request: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest = {\n input: { text },\n voice: {\n name: options?.speaker || this.speaker,\n languageCode: options?.languageCode || options?.speaker?.split('-').slice(0, 2).join('-') || 'en-US',\n },\n audioConfig: options?.audioConfig || { audioEncoding: 'LINEAR16' },\n };\n\n const [response] = await this.ttsClient.synthesizeSpeech(request);\n\n if (!response.audioContent) {\n throw new Error('No audio content returned.');\n }\n\n if (typeof response.audioContent === 'string') {\n throw new Error('Audio content is a string.');\n }\n\n const stream = new PassThrough();\n stream.end(Buffer.from(response.audioContent));\n return stream;\n }, 'voice.google.speak')();\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n /**\n * Converts speech to text\n * @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.\n * @param {Object} [options] - Recognition options\n * @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration\n * @returns {Promise<string>} Transcribed text\n */\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: { stream?: boolean; config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig },\n ): Promise<string> {\n return this.traced(async () => {\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n let request = {\n config: {\n encoding: 'LINEAR16',\n languageCode: 'en-US',\n ...options?.config,\n },\n audio: {\n content: buffer.toString('base64'),\n },\n };\n const [response] = await this.speechClient.recognize(request as SpeechTypes.cloud.speech.v1.IRecognizeRequest);\n\n if (!response.results || response.results.length === 0) {\n throw new Error('No transcription results returned');\n }\n\n const transcription = response.results\n .map((result: any) => {\n if (!result.alternatives || result.alternatives.length === 0) {\n return '';\n }\n return result.alternatives[0].transcript || '';\n })\n .filter((text: string) => text.length > 0)\n .join(' ');\n\n if (!transcription) {\n throw new Error('No valid transcription found in results');\n }\n\n return transcription;\n }, 'voice.google.listen')();\n }\n}\n"]}
|
package/dist/index.d.ts
CHANGED
|
@@ -5,9 +5,17 @@ import { MastraVoice } from '@mastra/core/voice';
|
|
|
5
5
|
* Configuration for Google Cloud Voice models
|
|
6
6
|
* @interface GoogleModelConfig
|
|
7
7
|
* @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable
|
|
8
|
+
* @property {string} [keyFilename] - Optional path to a service account key file. If not provided, will use GOOGLE_APPLICATION_CREDENTIALS environment variable
|
|
9
|
+
* @property {{ client_email?: string; private_key?: string }} [credentials] - Optional in-memory service account credentials
|
|
8
10
|
*/
|
|
9
11
|
export interface GoogleModelConfig {
|
|
10
12
|
apiKey?: string;
|
|
13
|
+
keyFilename?: string;
|
|
14
|
+
credentials?: {
|
|
15
|
+
client_email?: string;
|
|
16
|
+
private_key?: string;
|
|
17
|
+
[key: string]: unknown;
|
|
18
|
+
};
|
|
11
19
|
}
|
|
12
20
|
/**
|
|
13
21
|
* GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services
|
|
@@ -23,7 +31,6 @@ export declare class GoogleVoice extends MastraVoice {
|
|
|
23
31
|
* @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis
|
|
24
32
|
* @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition
|
|
25
33
|
* @param {string} [config.speaker] - Default voice ID to use for speech synthesis
|
|
26
|
-
* @throws {Error} If no API key is provided via config or environment variable
|
|
27
34
|
*/
|
|
28
35
|
constructor({ listeningModel, speechModel, speaker, }?: {
|
|
29
36
|
listeningModel?: GoogleModelConfig;
|
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,MAAM,IAAI,WAAW,EAAE,MAAM,0CAA0C,CAAC;AAEtF,OAAO,KAAK,EAAE,MAAM,IAAI,iBAAiB,EAAE,MAAM,kDAAkD,CAAC;AACpG,OAAO,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAEjD
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,MAAM,IAAI,WAAW,EAAE,MAAM,0CAA0C,CAAC;AAEtF,OAAO,KAAK,EAAE,MAAM,IAAI,iBAAiB,EAAE,MAAM,kDAAkD,CAAC;AACpG,OAAO,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAEjD;;;;;;GAMG;AACH,MAAM,WAAW,iBAAiB;IAChC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,WAAW,CAAC,EAAE;QACZ,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;KACxB,CAAC;CACH;AA6CD;;;;GAIG;AACH,qBAAa,WAAY,SAAQ,WAAW;IAC1C,OAAO,CAAC,SAAS,CAAqB;IACtC,OAAO,CAAC,YAAY,CAAe;IAEnC;;;;;;OAMG;gBACS,EACV,cAAc,EACd,WAAW,EACX,OAAO,GACR,GAAE;QACD,cAAc,CAAC,EAAE,iBAAiB,CAAC;QACnC,WAAW,CAAC,EAAE,iBAAiB,CAAC;QAChC,OAAO,CAAC,EAAE,MAAM,CAAC;KACb;IAkCN;;;OAGG;IACG,WAAW,CAAC,EAAE,YAAsB,EAAE,GAAE;QAAE,YAAY,CAAC,EAAE,MAAM,CAAA;KAAO;;;;YAY9D,cAAc;IAY5B;;;;;;;;OAQG;IACG,KAAK,CACT,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC,cAAc,EACrC,OAAO,CAAC,EAAE;QACR,OAAO,CAAC,EAAE,MAAM,CAAC;QACjB,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,WAAW,CAAC,EAAE,iBAAiB,CAAC,KAAK,CAAC,YAAY,CAAC,EAAE,CAAC,wBAAwB,CAAC,aAAa,CAAC,CAAC;KAC/F,GACA,OAAO,CAAC,MAAM,CAAC,cAAc,CAAC;IA6BjC;;;;OAIG;IACG,WAAW;;;IAIjB;;;;;;OAMG;IACG,MAAM,CACV,WAAW,EAAE,MAAM,CAAC,cAAc,EAClC,OAAO,CAAC,EAAE;QAAE,MAAM,CAAC,EAAE,OAAO,CAAC;QAAC,MAAM,CAAC,EAAE,WAAW,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC,kBAAkB,CAAA;KAAE,GACtF,OAAO,CAAC,MAAM,CAAC;CA6CnB"}
|
package/dist/index.js
CHANGED
|
@@ -4,6 +4,34 @@ import { TextToSpeechClient } from '@google-cloud/text-to-speech';
|
|
|
4
4
|
import { MastraVoice } from '@mastra/core/voice';
|
|
5
5
|
|
|
6
6
|
// src/index.ts
|
|
7
|
+
var resolveAuthConfig = (modelConfig, fallback) => {
|
|
8
|
+
const resolved = {};
|
|
9
|
+
const apiKey = modelConfig?.apiKey ?? fallback.apiKey;
|
|
10
|
+
if (apiKey) {
|
|
11
|
+
resolved.apiKey = apiKey;
|
|
12
|
+
}
|
|
13
|
+
const keyFilename = modelConfig?.keyFilename ?? fallback.keyFilename;
|
|
14
|
+
if (keyFilename) {
|
|
15
|
+
resolved.keyFilename = keyFilename;
|
|
16
|
+
}
|
|
17
|
+
const credentials = modelConfig?.credentials ?? fallback.credentials;
|
|
18
|
+
if (credentials) {
|
|
19
|
+
resolved.credentials = credentials;
|
|
20
|
+
}
|
|
21
|
+
return resolved;
|
|
22
|
+
};
|
|
23
|
+
var buildAuthOptions = (config) => {
|
|
24
|
+
if (config.credentials) {
|
|
25
|
+
return { credentials: config.credentials };
|
|
26
|
+
}
|
|
27
|
+
if (config.keyFilename) {
|
|
28
|
+
return { keyFilename: config.keyFilename };
|
|
29
|
+
}
|
|
30
|
+
if (config.apiKey) {
|
|
31
|
+
return { apiKey: config.apiKey };
|
|
32
|
+
}
|
|
33
|
+
return {};
|
|
34
|
+
};
|
|
7
35
|
var DEFAULT_VOICE = "en-US-Casual-K";
|
|
8
36
|
var GoogleVoice = class extends MastraVoice {
|
|
9
37
|
ttsClient;
|
|
@@ -14,7 +42,6 @@ var GoogleVoice = class extends MastraVoice {
|
|
|
14
42
|
* @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis
|
|
15
43
|
* @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition
|
|
16
44
|
* @param {string} [config.speaker] - Default voice ID to use for speech synthesis
|
|
17
|
-
* @throws {Error} If no API key is provided via config or environment variable
|
|
18
45
|
*/
|
|
19
46
|
constructor({
|
|
20
47
|
listeningModel,
|
|
@@ -22,30 +49,30 @@ var GoogleVoice = class extends MastraVoice {
|
|
|
22
49
|
speaker
|
|
23
50
|
} = {}) {
|
|
24
51
|
const defaultApiKey = process.env.GOOGLE_API_KEY;
|
|
52
|
+
const defaultKeyFilename = process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
|
25
53
|
const defaultSpeaker = DEFAULT_VOICE;
|
|
54
|
+
const sharedFallback = {
|
|
55
|
+
apiKey: defaultApiKey ?? speechModel?.apiKey ?? listeningModel?.apiKey,
|
|
56
|
+
keyFilename: defaultKeyFilename ?? speechModel?.keyFilename ?? listeningModel?.keyFilename,
|
|
57
|
+
credentials: speechModel?.credentials ?? listeningModel?.credentials
|
|
58
|
+
};
|
|
59
|
+
const speechAuthConfig = resolveAuthConfig(speechModel, sharedFallback);
|
|
60
|
+
const listeningAuthConfig = resolveAuthConfig(listeningModel, sharedFallback);
|
|
26
61
|
super({
|
|
27
62
|
speechModel: {
|
|
28
63
|
name: "",
|
|
29
|
-
apiKey:
|
|
64
|
+
apiKey: speechAuthConfig.apiKey ?? defaultApiKey
|
|
30
65
|
},
|
|
31
66
|
listeningModel: {
|
|
32
67
|
name: "",
|
|
33
|
-
apiKey:
|
|
68
|
+
apiKey: listeningAuthConfig.apiKey ?? defaultApiKey
|
|
34
69
|
},
|
|
35
70
|
speaker: speaker ?? defaultSpeaker
|
|
36
71
|
});
|
|
37
|
-
const
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
);
|
|
42
|
-
}
|
|
43
|
-
this.ttsClient = new TextToSpeechClient({
|
|
44
|
-
apiKey: this.speechModel?.apiKey || defaultApiKey
|
|
45
|
-
});
|
|
46
|
-
this.speechClient = new SpeechClient({
|
|
47
|
-
apiKey: this.listeningModel?.apiKey || defaultApiKey
|
|
48
|
-
});
|
|
72
|
+
const ttsOptions = buildAuthOptions(speechAuthConfig);
|
|
73
|
+
const speechOptions = buildAuthOptions(listeningAuthConfig);
|
|
74
|
+
this.ttsClient = new TextToSpeechClient(ttsOptions);
|
|
75
|
+
this.speechClient = new SpeechClient(speechOptions);
|
|
49
76
|
}
|
|
50
77
|
/**
|
|
51
78
|
* Gets a list of available voices
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/index.ts"],"names":[],"mappings":";;;;;;AAiBA,IAAM,aAAA,GAAgB,gBAAA;AAOf,IAAM,WAAA,GAAN,cAA0B,WAAA,CAAY;AAAA,EACnC,SAAA;AAAA,EACA,YAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA,WAAA;AAAA,IACA;AAAA,GACF,GAII,EAAC,EAAG;AACN,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,cAAA;AAClC,IAAA,MAAM,cAAA,GAAiB,aAAA;AAEvB,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,aAAa,MAAA,IAAU;AAAA,OACjC;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,gBAAgB,MAAA,IAAU;AAAA,OACpC;AAAA,MACA,SAAS,OAAA,IAAW;AAAA,KACrB,CAAA;AAED,IAAA,MAAM,MAAA,GAAS,aAAA,IAAiB,WAAA,EAAa,MAAA,IAAU,cAAA,EAAgB,MAAA;AACvE,IAAA,IAAI,CAAC,MAAA,EAAQ;AACX,MAAA,MAAM,IAAI,KAAA;AAAA,QACR;AAAA,OACF;AAAA,IACF;AAEA,IAAA,IAAA,CAAK,SAAA,GAAY,IAAI,kBAAA,CAAmB;AAAA,MACtC,MAAA,EAAQ,IAAA,CAAK,WAAA,EAAa,MAAA,IAAU;AAAA,KACrC,CAAA;AAED,IAAA,IAAA,CAAK,YAAA,GAAe,IAAI,YAAA,CAAa;AAAA,MACnC,MAAA,EAAQ,IAAA,CAAK,cAAA,EAAgB,MAAA,IAAU;AAAA,KACxC,CAAA;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAA,CAAY,EAAE,eAAe,OAAA,EAAQ,GAA+B,EAAC,EAAG;AAC5E,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,KAAK,SAAA,CAAU,UAAA,CAAW,EAAE,YAAA,EAA4B,CAAA;AACjF,MAAA,OAAA,CAAQ,QAAA,EAAU,MAAA,IAAU,EAAC,EAC1B,MAAA,CAAO,CAAA,KAAA,KAAS,KAAA,CAAM,IAAA,IAAQ,KAAA,CAAM,aAAa,CAAA,CACjD,GAAA,CAAI,CAAA,KAAA,MAAU;AAAA,QACb,SAAS,KAAA,CAAM,IAAA;AAAA,QACf,eAAe,KAAA,CAAM;AAAA,OACvB,CAAE,CAAA;AAAA,IACN,CAAA,EAAG,0BAA0B,CAAA,EAAE;AAAA,EACjC;AAAA,EAEA,MAAc,eAAe,MAAA,EAAgD;AAC3E,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAChC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,OAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAKgC;AAChC,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,IAAA,GAAO,OAAO,KAAA,KAAU,QAAA,GAAW,QAAQ,MAAM,IAAA,CAAK,eAAe,KAAK,CAAA;AAEhF,MAAA,MAAM,OAAA,GAA4E;AAAA,QAChF,KAAA,EAAO,EAAE,IAAA,EAAK;AAAA,QACd,KAAA,EAAO;AAAA,UACL,IAAA,EAAM,OAAA,EAAS,OAAA,IAAW,IAAA,CAAK,OAAA;AAAA,UAC/B,YAAA,EAAc,OAAA,EAAS,YAAA,IAAgB,OAAA,EAAS,SAAS,KAAA,CAAM,GAAG,CAAA,CAAE,KAAA,CAAM,CAAA,EAAG,CAAC,CAAA,CAAE,IAAA,CAAK,GAAG,CAAA,IAAK;AAAA,SAC/F;AAAA,QACA,WAAA,EAAa,OAAA,EAAS,WAAA,IAAe,EAAE,eAAe,UAAA;AAAW,OACnE;AAEA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,SAAA,CAAU,iBAAiB,OAAO,CAAA;AAEhE,MAAA,IAAI,CAAC,SAAS,YAAA,EAAc;AAC1B,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,IAAI,OAAO,QAAA,CAAS,YAAA,KAAiB,QAAA,EAAU;AAC7C,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,MAAM,MAAA,GAAS,IAAI,WAAA,EAAY;AAC/B,MAAA,MAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAA,CAAS,YAAY,CAAC,CAAA;AAC7C,MAAA,OAAO,MAAA;AAAA,IACT,CAAA,EAAG,oBAAoB,CAAA,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EACiB;AACjB,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,MAAA,IAAI,OAAA,GAAU;AAAA,QACZ,MAAA,EAAQ;AAAA,UACN,QAAA,EAAU,UAAA;AAAA,UACV,YAAA,EAAc,OAAA;AAAA,UACd,GAAG,OAAA,EAAS;AAAA,SACd;AAAA,QACA,KAAA,EAAO;AAAA,UACL,OAAA,EAAS,MAAA,CAAO,QAAA,CAAS,QAAQ;AAAA;AACnC,OACF;AACA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,YAAA,CAAa,UAAU,OAAwD,CAAA;AAE7G,MAAA,IAAI,CAAC,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,WAAW,CAAA,EAAG;AACtD,QAAA,MAAM,IAAI,MAAM,mCAAmC,CAAA;AAAA,MACrD;AAEA,MAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,OAAA,CAC5B,GAAA,CAAI,CAAC,MAAA,KAAgB;AACpB,QAAA,IAAI,CAAC,MAAA,CAAO,YAAA,IAAgB,MAAA,CAAO,YAAA,CAAa,WAAW,CAAA,EAAG;AAC5D,UAAA,OAAO,EAAA;AAAA,QACT;AACA,QAAA,OAAO,MAAA,CAAO,YAAA,CAAa,CAAC,CAAA,CAAE,UAAA,IAAc,EAAA;AAAA,MAC9C,CAAC,CAAA,CACA,MAAA,CAAO,CAAC,IAAA,KAAiB,KAAK,MAAA,GAAS,CAAC,CAAA,CACxC,IAAA,CAAK,GAAG,CAAA;AAEX,MAAA,IAAI,CAAC,aAAA,EAAe;AAClB,QAAA,MAAM,IAAI,MAAM,yCAAyC,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO,aAAA;AAAA,IACT,CAAA,EAAG,qBAAqB,CAAA,EAAE;AAAA,EAC5B;AACF","file":"index.js","sourcesContent":["import { PassThrough } from 'stream';\n\nimport { SpeechClient } from '@google-cloud/speech';\nimport type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';\nimport { TextToSpeechClient } from '@google-cloud/text-to-speech';\nimport type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';\nimport { MastraVoice } from '@mastra/core/voice';\n\n/**\n * Configuration for Google Cloud Voice models\n * @interface GoogleModelConfig\n * @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable\n */\nexport interface GoogleModelConfig {\n apiKey?: string;\n}\n\nconst DEFAULT_VOICE = 'en-US-Casual-K';\n\n/**\n * GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services\n * @class GoogleVoice\n * @extends MastraVoice\n */\nexport class GoogleVoice extends MastraVoice {\n private ttsClient: TextToSpeechClient;\n private speechClient: SpeechClient;\n\n /**\n * Creates an instance of GoogleVoice\n * @param {Object} config - Configuration options\n * @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis\n * @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition\n * @param {string} [config.speaker] - Default voice ID to use for speech synthesis\n * @throws {Error} If no API key is provided via config or environment variable\n */\n constructor({\n listeningModel,\n speechModel,\n speaker,\n }: {\n listeningModel?: GoogleModelConfig;\n speechModel?: GoogleModelConfig;\n speaker?: string;\n } = {}) {\n const defaultApiKey = process.env.GOOGLE_API_KEY;\n const defaultSpeaker = DEFAULT_VOICE;\n\n super({\n speechModel: {\n name: '',\n apiKey: speechModel?.apiKey ?? defaultApiKey,\n },\n listeningModel: {\n name: '',\n apiKey: listeningModel?.apiKey ?? defaultApiKey,\n },\n speaker: speaker ?? defaultSpeaker,\n });\n\n const apiKey = defaultApiKey || speechModel?.apiKey || listeningModel?.apiKey;\n if (!apiKey) {\n throw new Error(\n 'Google API key is not set, set GOOGLE_API_KEY environment variable or pass apiKey to constructor',\n );\n }\n\n this.ttsClient = new TextToSpeechClient({\n apiKey: this.speechModel?.apiKey || defaultApiKey,\n });\n\n this.speechClient = new SpeechClient({\n apiKey: this.listeningModel?.apiKey || defaultApiKey,\n });\n }\n\n /**\n * Gets a list of available voices\n * @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.\n */\n async getSpeakers({ languageCode = 'en-US' }: { languageCode?: string } = {}) {\n return this.traced(async () => {\n const [response] = await this.ttsClient.listVoices({ languageCode: languageCode });\n return (response?.voices || [])\n .filter(voice => voice.name && voice.languageCodes)\n .map(voice => ({\n voiceId: voice.name!,\n languageCodes: voice.languageCodes!,\n }));\n }, 'voice.google.getSpeakers')();\n }\n\n private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {\n const chunks: Buffer[] = [];\n for await (const chunk of stream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n return Buffer.concat(chunks).toString('utf-8');\n }\n\n /**\n * Converts text to speech\n * @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech\n * @param {Object} [options] - Speech synthesis options\n * @param {string} [options.speaker] - Voice ID to use\n * @param {string} [options.languageCode] - Language code for the voice\n * @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options\n * @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.\n */\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n languageCode?: string;\n audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];\n },\n ): Promise<NodeJS.ReadableStream> {\n return this.traced(async () => {\n const text = typeof input === 'string' ? input : await this.streamToString(input);\n\n const request: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest = {\n input: { text },\n voice: {\n name: options?.speaker || this.speaker,\n languageCode: options?.languageCode || options?.speaker?.split('-').slice(0, 2).join('-') || 'en-US',\n },\n audioConfig: options?.audioConfig || { audioEncoding: 'LINEAR16' },\n };\n\n const [response] = await this.ttsClient.synthesizeSpeech(request);\n\n if (!response.audioContent) {\n throw new Error('No audio content returned.');\n }\n\n if (typeof response.audioContent === 'string') {\n throw new Error('Audio content is a string.');\n }\n\n const stream = new PassThrough();\n stream.end(Buffer.from(response.audioContent));\n return stream;\n }, 'voice.google.speak')();\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n /**\n * Converts speech to text\n * @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.\n * @param {Object} [options] - Recognition options\n * @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration\n * @returns {Promise<string>} Transcribed text\n */\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: { stream?: boolean; config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig },\n ): Promise<string> {\n return this.traced(async () => {\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n let request = {\n config: {\n encoding: 'LINEAR16',\n languageCode: 'en-US',\n ...options?.config,\n },\n audio: {\n content: buffer.toString('base64'),\n },\n };\n const [response] = await this.speechClient.recognize(request as SpeechTypes.cloud.speech.v1.IRecognizeRequest);\n\n if (!response.results || response.results.length === 0) {\n throw new Error('No transcription results returned');\n }\n\n const transcription = response.results\n .map((result: any) => {\n if (!result.alternatives || result.alternatives.length === 0) {\n return '';\n }\n return result.alternatives[0].transcript || '';\n })\n .filter((text: string) => text.length > 0)\n .join(' ');\n\n if (!transcription) {\n throw new Error('No valid transcription found in results');\n }\n\n return transcription;\n }, 'voice.google.listen')();\n }\n}\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"names":[],"mappings":";;;;;;AA6BA,IAAM,iBAAA,GAAoB,CAAC,WAAA,EAA4C,QAAA,KAAqC;AAC1G,EAAA,MAAM,WAAuB,EAAC;AAE9B,EAAA,MAAM,MAAA,GAAS,WAAA,EAAa,MAAA,IAAU,QAAA,CAAS,MAAA;AAC/C,EAAA,IAAI,MAAA,EAAQ;AACV,IAAA,QAAA,CAAS,MAAA,GAAS,MAAA;AAAA,EACpB;AAEA,EAAA,MAAM,WAAA,GAAc,WAAA,EAAa,WAAA,IAAe,QAAA,CAAS,WAAA;AACzD,EAAA,IAAI,WAAA,EAAa;AACf,IAAA,QAAA,CAAS,WAAA,GAAc,WAAA;AAAA,EACzB;AAEA,EAAA,MAAM,WAAA,GAAc,WAAA,EAAa,WAAA,IAAe,QAAA,CAAS,WAAA;AACzD,EAAA,IAAI,WAAA,EAAa;AACf,IAAA,QAAA,CAAS,WAAA,GAAc,WAAA;AAAA,EACzB;AAEA,EAAA,OAAO,QAAA;AACT,CAAA;AAEA,IAAM,gBAAA,GAAmB,CAAC,MAAA,KAA4C;AACpE,EAAA,IAAI,OAAO,WAAA,EAAa;AACtB,IAAA,OAAO,EAAE,WAAA,EAAa,MAAA,CAAO,WAAA,EAAY;AAAA,EAC3C;AAEA,EAAA,IAAI,OAAO,WAAA,EAAa;AACtB,IAAA,OAAO,EAAE,WAAA,EAAa,MAAA,CAAO,WAAA,EAAY;AAAA,EAC3C;AAEA,EAAA,IAAI,OAAO,MAAA,EAAQ;AACjB,IAAA,OAAO,EAAE,MAAA,EAAQ,MAAA,CAAO,MAAA,EAAO;AAAA,EACjC;AAEA,EAAA,OAAO,EAAC;AACV,CAAA;AAEA,IAAM,aAAA,GAAgB,gBAAA;AAOf,IAAM,WAAA,GAAN,cAA0B,WAAA,CAAY;AAAA,EACnC,SAAA;AAAA,EACA,YAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA,WAAA;AAAA,IACA;AAAA,GACF,GAII,EAAC,EAAG;AACN,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,cAAA;AAClC,IAAA,MAAM,kBAAA,GAAqB,QAAQ,GAAA,CAAI,8BAAA;AACvC,IAAA,MAAM,cAAA,GAAiB,aAAA;AAEvB,IAAA,MAAM,cAAA,GAA6B;AAAA,MACjC,MAAA,EAAQ,aAAA,IAAiB,WAAA,EAAa,MAAA,IAAU,cAAA,EAAgB,MAAA;AAAA,MAChE,WAAA,EAAa,kBAAA,IAAsB,WAAA,EAAa,WAAA,IAAe,cAAA,EAAgB,WAAA;AAAA,MAC/E,WAAA,EAAa,WAAA,EAAa,WAAA,IAAe,cAAA,EAAgB;AAAA,KAC3D;AAEA,IAAA,MAAM,gBAAA,GAAmB,iBAAA,CAAkB,WAAA,EAAa,cAAc,CAAA;AACtE,IAAA,MAAM,mBAAA,GAAsB,iBAAA,CAAkB,cAAA,EAAgB,cAAc,CAAA;AAE5E,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,iBAAiB,MAAA,IAAU;AAAA,OACrC;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,EAAA;AAAA,QACN,MAAA,EAAQ,oBAAoB,MAAA,IAAU;AAAA,OACxC;AAAA,MACA,SAAS,OAAA,IAAW;AAAA,KACrB,CAAA;AAED,IAAA,MAAM,UAAA,GAAa,iBAAiB,gBAAgB,CAAA;AACpD,IAAA,MAAM,aAAA,GAAgB,iBAAiB,mBAAmB,CAAA;AAE1D,IAAA,IAAA,CAAK,SAAA,GAAY,IAAI,kBAAA,CAAmB,UAAU,CAAA;AAElD,IAAA,IAAA,CAAK,YAAA,GAAe,IAAI,YAAA,CAAa,aAAa,CAAA;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,WAAA,CAAY,EAAE,eAAe,OAAA,EAAQ,GAA+B,EAAC,EAAG;AAC5E,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,KAAK,SAAA,CAAU,UAAA,CAAW,EAAE,YAAA,EAA4B,CAAA;AACjF,MAAA,OAAA,CAAQ,QAAA,EAAU,MAAA,IAAU,EAAC,EAC1B,MAAA,CAAO,CAAA,KAAA,KAAS,KAAA,CAAM,IAAA,IAAQ,KAAA,CAAM,aAAa,CAAA,CACjD,GAAA,CAAI,CAAA,KAAA,MAAU;AAAA,QACb,SAAS,KAAA,CAAM,IAAA;AAAA,QACf,eAAe,KAAA,CAAM;AAAA,OACvB,CAAE,CAAA;AAAA,IACN,CAAA,EAAG,0BAA0B,CAAA,EAAE;AAAA,EACjC;AAAA,EAEA,MAAc,eAAe,MAAA,EAAgD;AAC3E,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,MAAA,EAAQ;AAChC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,OAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAKgC;AAChC,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,IAAA,GAAO,OAAO,KAAA,KAAU,QAAA,GAAW,QAAQ,MAAM,IAAA,CAAK,eAAe,KAAK,CAAA;AAEhF,MAAA,MAAM,OAAA,GAA4E;AAAA,QAChF,KAAA,EAAO,EAAE,IAAA,EAAK;AAAA,QACd,KAAA,EAAO;AAAA,UACL,IAAA,EAAM,OAAA,EAAS,OAAA,IAAW,IAAA,CAAK,OAAA;AAAA,UAC/B,YAAA,EAAc,OAAA,EAAS,YAAA,IAAgB,OAAA,EAAS,SAAS,KAAA,CAAM,GAAG,CAAA,CAAE,KAAA,CAAM,CAAA,EAAG,CAAC,CAAA,CAAE,IAAA,CAAK,GAAG,CAAA,IAAK;AAAA,SAC/F;AAAA,QACA,WAAA,EAAa,OAAA,EAAS,WAAA,IAAe,EAAE,eAAe,UAAA;AAAW,OACnE;AAEA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,SAAA,CAAU,iBAAiB,OAAO,CAAA;AAEhE,MAAA,IAAI,CAAC,SAAS,YAAA,EAAc;AAC1B,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,IAAI,OAAO,QAAA,CAAS,YAAA,KAAiB,QAAA,EAAU;AAC7C,QAAA,MAAM,IAAI,MAAM,4BAA4B,CAAA;AAAA,MAC9C;AAEA,MAAA,MAAM,MAAA,GAAS,IAAI,WAAA,EAAY;AAC/B,MAAA,MAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAA,CAAS,YAAY,CAAC,CAAA;AAC7C,MAAA,OAAO,MAAA;AAAA,IACT,CAAA,EAAG,oBAAoB,CAAA,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EACiB;AACjB,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,MAAA,IAAI,OAAA,GAAU;AAAA,QACZ,MAAA,EAAQ;AAAA,UACN,QAAA,EAAU,UAAA;AAAA,UACV,YAAA,EAAc,OAAA;AAAA,UACd,GAAG,OAAA,EAAS;AAAA,SACd;AAAA,QACA,KAAA,EAAO;AAAA,UACL,OAAA,EAAS,MAAA,CAAO,QAAA,CAAS,QAAQ;AAAA;AACnC,OACF;AACA,MAAA,MAAM,CAAC,QAAQ,CAAA,GAAI,MAAM,IAAA,CAAK,YAAA,CAAa,UAAU,OAAwD,CAAA;AAE7G,MAAA,IAAI,CAAC,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,WAAW,CAAA,EAAG;AACtD,QAAA,MAAM,IAAI,MAAM,mCAAmC,CAAA;AAAA,MACrD;AAEA,MAAA,MAAM,aAAA,GAAgB,QAAA,CAAS,OAAA,CAC5B,GAAA,CAAI,CAAC,MAAA,KAAgB;AACpB,QAAA,IAAI,CAAC,MAAA,CAAO,YAAA,IAAgB,MAAA,CAAO,YAAA,CAAa,WAAW,CAAA,EAAG;AAC5D,UAAA,OAAO,EAAA;AAAA,QACT;AACA,QAAA,OAAO,MAAA,CAAO,YAAA,CAAa,CAAC,CAAA,CAAE,UAAA,IAAc,EAAA;AAAA,MAC9C,CAAC,CAAA,CACA,MAAA,CAAO,CAAC,IAAA,KAAiB,KAAK,MAAA,GAAS,CAAC,CAAA,CACxC,IAAA,CAAK,GAAG,CAAA;AAEX,MAAA,IAAI,CAAC,aAAA,EAAe;AAClB,QAAA,MAAM,IAAI,MAAM,yCAAyC,CAAA;AAAA,MAC3D;AAEA,MAAA,OAAO,aAAA;AAAA,IACT,CAAA,EAAG,qBAAqB,CAAA,EAAE;AAAA,EAC5B;AACF","file":"index.js","sourcesContent":["import { PassThrough } from 'stream';\n\nimport { SpeechClient } from '@google-cloud/speech';\nimport type { google as SpeechTypes } from '@google-cloud/speech/build/protos/protos';\nimport { TextToSpeechClient } from '@google-cloud/text-to-speech';\nimport type { google as TextToSpeechTypes } from '@google-cloud/text-to-speech/build/protos/protos';\nimport { MastraVoice } from '@mastra/core/voice';\n\n/**\n * Configuration for Google Cloud Voice models\n * @interface GoogleModelConfig\n * @property {string} [apiKey] - Optional Google Cloud API key. If not provided, will use GOOGLE_API_KEY environment variable\n * @property {string} [keyFilename] - Optional path to a service account key file. If not provided, will use GOOGLE_APPLICATION_CREDENTIALS environment variable\n * @property {{ client_email?: string; private_key?: string }} [credentials] - Optional in-memory service account credentials\n */\nexport interface GoogleModelConfig {\n apiKey?: string;\n keyFilename?: string;\n credentials?: {\n client_email?: string;\n private_key?: string;\n [key: string]: unknown;\n };\n}\n\ntype AuthConfig = Pick<GoogleModelConfig, 'apiKey' | 'keyFilename' | 'credentials'>;\n\ntype GoogleClientOptions = AuthConfig;\n\nconst resolveAuthConfig = (modelConfig: GoogleModelConfig | undefined, fallback: AuthConfig): AuthConfig => {\n const resolved: AuthConfig = {};\n\n const apiKey = modelConfig?.apiKey ?? fallback.apiKey;\n if (apiKey) {\n resolved.apiKey = apiKey;\n }\n\n const keyFilename = modelConfig?.keyFilename ?? fallback.keyFilename;\n if (keyFilename) {\n resolved.keyFilename = keyFilename;\n }\n\n const credentials = modelConfig?.credentials ?? fallback.credentials;\n if (credentials) {\n resolved.credentials = credentials;\n }\n\n return resolved;\n};\n\nconst buildAuthOptions = (config: AuthConfig): GoogleClientOptions => {\n if (config.credentials) {\n return { credentials: config.credentials };\n }\n\n if (config.keyFilename) {\n return { keyFilename: config.keyFilename };\n }\n\n if (config.apiKey) {\n return { apiKey: config.apiKey };\n }\n\n return {};\n};\n\nconst DEFAULT_VOICE = 'en-US-Casual-K';\n\n/**\n * GoogleVoice class provides Text-to-Speech and Speech-to-Text capabilities using Google Cloud services\n * @class GoogleVoice\n * @extends MastraVoice\n */\nexport class GoogleVoice extends MastraVoice {\n private ttsClient: TextToSpeechClient;\n private speechClient: SpeechClient;\n\n /**\n * Creates an instance of GoogleVoice\n * @param {Object} config - Configuration options\n * @param {GoogleModelConfig} [config.speechModel] - Configuration for speech synthesis\n * @param {GoogleModelConfig} [config.listeningModel] - Configuration for speech recognition\n * @param {string} [config.speaker] - Default voice ID to use for speech synthesis\n */\n constructor({\n listeningModel,\n speechModel,\n speaker,\n }: {\n listeningModel?: GoogleModelConfig;\n speechModel?: GoogleModelConfig;\n speaker?: string;\n } = {}) {\n const defaultApiKey = process.env.GOOGLE_API_KEY;\n const defaultKeyFilename = process.env.GOOGLE_APPLICATION_CREDENTIALS;\n const defaultSpeaker = DEFAULT_VOICE;\n\n const sharedFallback: AuthConfig = {\n apiKey: defaultApiKey ?? speechModel?.apiKey ?? listeningModel?.apiKey,\n keyFilename: defaultKeyFilename ?? speechModel?.keyFilename ?? listeningModel?.keyFilename,\n credentials: speechModel?.credentials ?? listeningModel?.credentials,\n };\n\n const speechAuthConfig = resolveAuthConfig(speechModel, sharedFallback);\n const listeningAuthConfig = resolveAuthConfig(listeningModel, sharedFallback);\n\n super({\n speechModel: {\n name: '',\n apiKey: speechAuthConfig.apiKey ?? defaultApiKey,\n },\n listeningModel: {\n name: '',\n apiKey: listeningAuthConfig.apiKey ?? defaultApiKey,\n },\n speaker: speaker ?? defaultSpeaker,\n });\n\n const ttsOptions = buildAuthOptions(speechAuthConfig);\n const speechOptions = buildAuthOptions(listeningAuthConfig);\n\n this.ttsClient = new TextToSpeechClient(ttsOptions);\n\n this.speechClient = new SpeechClient(speechOptions);\n }\n\n /**\n * Gets a list of available voices\n * @returns {Promise<Array<{voiceId: string, languageCodes: string[]}>>} List of available voices and their supported languages. Default language is en-US.\n */\n async getSpeakers({ languageCode = 'en-US' }: { languageCode?: string } = {}) {\n return this.traced(async () => {\n const [response] = await this.ttsClient.listVoices({ languageCode: languageCode });\n return (response?.voices || [])\n .filter(voice => voice.name && voice.languageCodes)\n .map(voice => ({\n voiceId: voice.name!,\n languageCodes: voice.languageCodes!,\n }));\n }, 'voice.google.getSpeakers')();\n }\n\n private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {\n const chunks: Buffer[] = [];\n for await (const chunk of stream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n return Buffer.concat(chunks).toString('utf-8');\n }\n\n /**\n * Converts text to speech\n * @param {string | NodeJS.ReadableStream} input - Text or stream to convert to speech\n * @param {Object} [options] - Speech synthesis options\n * @param {string} [options.speaker] - Voice ID to use\n * @param {string} [options.languageCode] - Language code for the voice\n * @param {TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig']} [options.audioConfig] - Audio configuration options\n * @returns {Promise<NodeJS.ReadableStream>} Stream of synthesized audio. Default encoding is LINEAR16.\n */\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n languageCode?: string;\n audioConfig?: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest['audioConfig'];\n },\n ): Promise<NodeJS.ReadableStream> {\n return this.traced(async () => {\n const text = typeof input === 'string' ? input : await this.streamToString(input);\n\n const request: TextToSpeechTypes.cloud.texttospeech.v1.ISynthesizeSpeechRequest = {\n input: { text },\n voice: {\n name: options?.speaker || this.speaker,\n languageCode: options?.languageCode || options?.speaker?.split('-').slice(0, 2).join('-') || 'en-US',\n },\n audioConfig: options?.audioConfig || { audioEncoding: 'LINEAR16' },\n };\n\n const [response] = await this.ttsClient.synthesizeSpeech(request);\n\n if (!response.audioContent) {\n throw new Error('No audio content returned.');\n }\n\n if (typeof response.audioContent === 'string') {\n throw new Error('Audio content is a string.');\n }\n\n const stream = new PassThrough();\n stream.end(Buffer.from(response.audioContent));\n return stream;\n }, 'voice.google.speak')();\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n /**\n * Converts speech to text\n * @param {NodeJS.ReadableStream} audioStream - Audio stream to transcribe. Default encoding is LINEAR16.\n * @param {Object} [options] - Recognition options\n * @param {SpeechTypes.cloud.speech.v1.IRecognitionConfig} [options.config] - Recognition configuration\n * @returns {Promise<string>} Transcribed text\n */\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: { stream?: boolean; config?: SpeechTypes.cloud.speech.v1.IRecognitionConfig },\n ): Promise<string> {\n return this.traced(async () => {\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n let request = {\n config: {\n encoding: 'LINEAR16',\n languageCode: 'en-US',\n ...options?.config,\n },\n audio: {\n content: buffer.toString('base64'),\n },\n };\n const [response] = await this.speechClient.recognize(request as SpeechTypes.cloud.speech.v1.IRecognizeRequest);\n\n if (!response.results || response.results.length === 0) {\n throw new Error('No transcription results returned');\n }\n\n const transcription = response.results\n .map((result: any) => {\n if (!result.alternatives || result.alternatives.length === 0) {\n return '';\n }\n return result.alternatives[0].transcript || '';\n })\n .filter((text: string) => text.length > 0)\n .join(' ');\n\n if (!transcription) {\n throw new Error('No valid transcription found in results');\n }\n\n return transcription;\n }, 'voice.google.listen')();\n }\n}\n"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/voice-google",
|
|
3
|
-
"version": "0.11.
|
|
3
|
+
"version": "0.11.7",
|
|
4
4
|
"description": "Mastra Google voice integration",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"files": [
|
|
@@ -33,12 +33,12 @@
|
|
|
33
33
|
"tsup": "^8.5.0",
|
|
34
34
|
"typescript": "^5.8.3",
|
|
35
35
|
"vitest": "^3.2.4",
|
|
36
|
-
"@internal/lint": "0.0.
|
|
37
|
-
"@
|
|
38
|
-
"@
|
|
36
|
+
"@internal/lint": "0.0.43",
|
|
37
|
+
"@mastra/core": "0.19.0",
|
|
38
|
+
"@internal/types-builder": "0.0.18"
|
|
39
39
|
},
|
|
40
40
|
"peerDependencies": {
|
|
41
|
-
"@mastra/core": ">=0.
|
|
41
|
+
"@mastra/core": ">=0.18.1-0 <0.20.0-0",
|
|
42
42
|
"zod": "^3.25.0 || ^4.0.0"
|
|
43
43
|
},
|
|
44
44
|
"homepage": "https://mastra.ai",
|