@mastra/voice-deepgram 0.0.0-share-agent-metadata-with-cloud-20250718123411 → 0.0.0-sidebar-window-undefined-fix-20251029233656

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -65,11 +65,9 @@ var DeepgramVoice = class extends voice.MastraVoice {
65
65
  this.speaker = speaker || "asteria-en";
66
66
  }
67
67
  async getSpeakers() {
68
- return this.traced(async () => {
69
- return DEEPGRAM_VOICES.map((voice) => ({
70
- voiceId: voice
71
- }));
72
- }, "voice.deepgram.getSpeakers")();
68
+ return DEEPGRAM_VOICES.map((voice) => ({
69
+ voiceId: voice
70
+ }));
73
71
  }
74
72
  async speak(input, options) {
75
73
  if (!this.speechClient) {
@@ -92,48 +90,46 @@ var DeepgramVoice = class extends voice.MastraVoice {
92
90
  if (text.trim().length === 0) {
93
91
  throw new Error("Input text is empty");
94
92
  }
95
- return this.traced(async () => {
96
- if (!this.speechClient) {
97
- throw new Error("No speech client configured");
98
- }
99
- let model;
100
- if (options?.speaker) {
101
- model = this.speechModel?.name + "-" + options.speaker;
102
- } else if (this.speaker) {
103
- model = this.speechModel?.name + "-" + this.speaker;
104
- }
105
- const speakClient = this.speechClient.speak;
106
- const response = await speakClient.request(
107
- { text },
108
- {
109
- model,
110
- ...options
111
- }
112
- );
113
- const webStream = await response.getStream();
114
- if (!webStream) {
115
- throw new Error("No stream returned from Deepgram");
93
+ if (!this.speechClient) {
94
+ throw new Error("No speech client configured");
95
+ }
96
+ let model;
97
+ if (options?.speaker) {
98
+ model = this.speechModel?.name + "-" + options.speaker;
99
+ } else if (this.speaker) {
100
+ model = this.speechModel?.name + "-" + this.speaker;
101
+ }
102
+ const speakClient = this.speechClient.speak;
103
+ const response = await speakClient.request(
104
+ { text },
105
+ {
106
+ model,
107
+ ...options
116
108
  }
117
- const reader = webStream.getReader();
118
- const nodeStream = new stream.PassThrough();
119
- (async () => {
120
- try {
121
- while (true) {
122
- const { done, value } = await reader.read();
123
- if (done) {
124
- nodeStream.end();
125
- break;
126
- }
127
- nodeStream.write(value);
109
+ );
110
+ const webStream = await response.getStream();
111
+ if (!webStream) {
112
+ throw new Error("No stream returned from Deepgram");
113
+ }
114
+ const reader = webStream.getReader();
115
+ const nodeStream = new stream.PassThrough();
116
+ (async () => {
117
+ try {
118
+ while (true) {
119
+ const { done, value } = await reader.read();
120
+ if (done) {
121
+ nodeStream.end();
122
+ break;
128
123
  }
129
- } catch (error) {
130
- nodeStream.destroy(error);
124
+ nodeStream.write(value);
131
125
  }
132
- })().catch((error) => {
126
+ } catch (error) {
133
127
  nodeStream.destroy(error);
134
- });
135
- return nodeStream;
136
- }, "voice.deepgram.speak")();
128
+ }
129
+ })().catch((error) => {
130
+ nodeStream.destroy(error);
131
+ });
132
+ return nodeStream;
137
133
  }
138
134
  /**
139
135
  * Checks if listening capabilities are enabled.
@@ -156,24 +152,24 @@ var DeepgramVoice = class extends voice.MastraVoice {
156
152
  }
157
153
  }
158
154
  const buffer = Buffer.concat(chunks);
159
- return this.traced(async () => {
160
- if (!this.listeningClient) {
161
- throw new Error("No listening client configured");
162
- }
163
- const { result, error } = await this.listeningClient.listen.prerecorded.transcribeFile(buffer, {
164
- model: this.listeningModel?.name,
165
- ...options
166
- });
167
- if (error) {
168
- throw error;
169
- }
170
- const transcript = result.results?.channels?.[0]?.alternatives?.[0]?.transcript;
171
- if (!transcript) {
172
- throw new Error("No transcript found in Deepgram response");
173
- }
174
- return transcript;
175
- }, "voice.deepgram.listen")();
155
+ if (!this.listeningClient) {
156
+ throw new Error("No listening client configured");
157
+ }
158
+ const { result, error } = await this.listeningClient.listen.prerecorded.transcribeFile(buffer, {
159
+ model: this.listeningModel?.name,
160
+ ...options
161
+ });
162
+ if (error) {
163
+ throw error;
164
+ }
165
+ const transcript = result.results?.channels?.[0]?.alternatives?.[0]?.transcript;
166
+ if (!transcript) {
167
+ throw new Error("No transcript found in Deepgram response");
168
+ }
169
+ return transcript;
176
170
  }
177
171
  };
178
172
 
179
173
  exports.DeepgramVoice = DeepgramVoice;
174
+ //# sourceMappingURL=index.cjs.map
175
+ //# sourceMappingURL=index.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/voices.ts","../src/index.ts"],"names":["MastraVoice","createClient","PassThrough"],"mappings":";;;;;;;;;AAKO,IAAM,eAAA,GAAkB;AAAA,EAC7B,YAAA;AAAA,EACA,SAAA;AAAA,EACA,WAAA;AAAA,EACA,WAAA;AAAA,EACA,SAAA;AAAA,EACA,UAAA;AAAA,EACA,UAAA;AAAA,EACA,YAAA;AAAA,EACA,UAAA;AAAA,EACA,YAAA;AAAA,EACA,WAAA;AAAA,EACA;AACF,CAAA;;;ACHO,IAAM,aAAA,GAAN,cAA4BA,iBAAA,CAAY;AAAA,EACrC,YAAA;AAAA,EACA,eAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,WAAA;AAAA,IACA,cAAA;AAAA,IACA;AAAA,GACF,GAA4G,EAAC,EAAG;AAC9G,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,gBAAA;AAElC,IAAA,MAAM,kBAAA,GAAqB;AAAA,MACzB,IAAA,EAAM,MAAA;AAAA,MACN,MAAA,EAAQ;AAAA,KACV;AAEA,IAAA,MAAM,qBAAA,GAAwB;AAAA,MAC5B,IAAA,EAAM,MAAA;AAAA,MACN,MAAA,EAAQ;AAAA,KACV;AAEA,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,WAAA,EAAa,IAAA,IAAQ,kBAAA,CAAmB,IAAA;AAAA,QAC9C,MAAA,EAAQ,WAAA,EAAa,MAAA,IAAU,kBAAA,CAAmB;AAAA,OACpD;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,cAAA,EAAgB,IAAA,IAAQ,qBAAA,CAAsB,IAAA;AAAA,QACpD,MAAA,EAAQ,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB;AAAA,OAC1D;AAAA,MACA;AAAA,KACD,CAAA;AAED,IAAA,MAAM,YAAA,GAAe,aAAa,MAAA,IAAU,aAAA;AAC5C,IAAA,MAAM,eAAA,GAAkB,gBAAgB,MAAA,IAAU,aAAA;AAElD,IAAA,IAAI,CAAC,YAAA,IAAgB,CAAC,eAAA,EAAiB;AACrC,MAAA,MAAM,IAAI,MAAM,4FAA4F,CAAA;AAAA,IAC9G;AAEA,IAAA,IAAI,YAAA,EAAc;AAChB,MAAA,IAAA,CAAK,YAAA,GAAeC,iBAAa,YAAY,CAAA;AAAA,IAC/C;AACA,IAAA,IAAI,eAAA,EAAiB;AACnB,MAAA,IAAA,CAAK,eAAA,GAAkBA,iBAAa,eAAe,CAAA;AAAA,IACrD;AAEA,IAAA,IAAA,CAAK,UAAU,OAAA,IAAW,YAAA;AAAA,EAC5B;AAAA,EAEA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,eAAA,CAAgB,IAAI,CAAA,KAAA,MAAU;AAAA,MACnC,OAAA,EAAS;AAAA,KACX,CAAE,CAAA;AAAA,EACJ;AAAA,EAEA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAIgC;AAChC,IAAA,IAAI,CAAC,KAAK,YAAA,EAAc;AACtB,MAAA,MAAM,IAAI,MAAM,uCAAuC,CAAA;AAAA,IACzD;AAEA,IAAA,IAAI,IAAA;AACJ,IAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,KAAA,EAAO;AAC/B,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,IAAA,GAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,IAC/C,CAAA,MAAO;AACL,MAAA,IAAA,GAAO,KAAA;AAAA,IACT;AAEA,IAAA,IAAI,IAAA,CAAK,IAAA,EAAK,CAAE,MAAA,KAAW,CAAA,EAAG;AAC5B,MAAA,MAAM,IAAI,MAAM,qBAAqB,CAAA;AAAA,IACvC;AAEA,IAAA,IAAI,CAAC,KAAK,YAAA,EAAc;AACtB,MAAA,MAAM,IAAI,MAAM,6BAA6B,CAAA;AAAA,IAC/C;AAEA,IAAA,IAAI,KAAA;AACJ,IAAA,IAAI,SAAS,OAAA,EAAS;AACpB,MAAA,KAAA,GAAQ,IAAA,CAAK,WAAA,EAAa,IAAA,GAAO,GAAA,GAAM,OAAA,CAAQ,OAAA;AAAA,IACjD,CAAA,MAAA,IAAW,KAAK,OAAA,EAAS;AACvB,MAAA,KAAA,GAAQ,IAAA,CAAK,WAAA,EAAa,IAAA,GAAO,GAAA,GAAM,IAAA,CAAK,OAAA;AAAA,IAC9C;AAEA,IAAA,MAAM,WAAA,GAAc,KAAK,YAAA,CAAa,KAAA;AACtC,IAAA,MAAM,QAAA,GAAW,MAAM,WAAA,CAAY,OAAA;AAAA,MACjC,EAAE,IAAA,EAAK;AAAA,MACP;AAAA,QACE,KAAA;AAAA,QACA,GAAG;AAAA;AACL,KACF;AAEA,IAAA,MAAM,SAAA,GAAY,MAAM,QAAA,CAAS,SAAA,EAAU;AAC3C,IAAA,IAAI,CAAC,SAAA,EAAW;AACd,MAAA,MAAM,IAAI,MAAM,kCAAkC,CAAA;AAAA,IACpD;AAEA,IAAA,MAAM,MAAA,GAAS,UAAU,SAAA,EAAU;AACnC,IAAA,MAAM,UAAA,GAAa,IAAIC,kBAAA,EAAY;AAGnC,IAAA,CAAC,YAAY;AACX,MAAA,IAAI;AACF,QAAA,OAAO,IAAA,EAAM;AACX,UAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,UAAA,IAAI,IAAA,EAAM;AACR,YAAA,UAAA,CAAW,GAAA,EAAI;AACf,YAAA;AAAA,UACF;AACA,UAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA,QACxB;AAAA,MACF,SAAS,KAAA,EAAO;AACd,QAAA,UAAA,CAAW,QAAQ,KAAc,CAAA;AAAA,MACnC;AAAA,IACF,CAAA,GAAG,CAAE,KAAA,CAAM,CAAA,KAAA,KAAS;AAClB,MAAA,UAAA,CAAW,QAAQ,KAAc,CAAA;AAAA,IACnC,CAAC,CAAA;AAED,IAAA,OAAO,UAAA;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA,EAEA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EAGiB;AACjB,IAAA,IAAI,CAAC,KAAK,eAAA,EAAiB;AACzB,MAAA,MAAM,IAAI,MAAM,0CAA0C,CAAA;AAAA,IAC5D;AAEA,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,IAAA,IAAI,CAAC,KAAK,eAAA,EAAiB;AACzB,MAAA,MAAM,IAAI,MAAM,gCAAgC,CAAA;AAAA,IAClD;AACA,IAAA,MAAM,EAAE,MAAA,EAAQ,KAAA,EAAM,GAAI,MAAM,KAAK,eAAA,CAAgB,MAAA,CAAO,WAAA,CAAY,cAAA,CAAe,MAAA,EAAQ;AAAA,MAC7F,KAAA,EAAO,KAAK,cAAA,EAAgB,IAAA;AAAA,MAC5B,GAAG;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,KAAA,EAAO;AACT,MAAA,MAAM,KAAA;AAAA,IACR;AAEA,IAAA,MAAM,UAAA,GAAa,OAAO,OAAA,EAAS,QAAA,GAAW,CAAC,CAAA,EAAG,YAAA,GAAe,CAAC,CAAA,EAAG,UAAA;AACrE,IAAA,IAAI,CAAC,UAAA,EAAY;AACf,MAAA,MAAM,IAAI,MAAM,0CAA0C,CAAA;AAAA,IAC5D;AAEA,IAAA,OAAO,UAAA;AAAA,EACT;AACF","file":"index.cjs","sourcesContent":["/**\n * List of available Deepgram voice models for text-to-speech\n * Each voice is designed for specific use cases and languages\n * Format: {name}-{language} (e.g. asteria-en)\n */\nexport const DEEPGRAM_VOICES = [\n 'asteria-en',\n 'luna-en',\n 'stella-en',\n 'athena-en',\n 'hera-en',\n 'orion-en',\n 'arcas-en',\n 'perseus-en',\n 'angus-en',\n 'orpheus-en',\n 'helios-en',\n 'zeus-en',\n] as const;\n\nexport type DeepgramVoiceId = (typeof DEEPGRAM_VOICES)[number];\n\n/**\n * List of available Deepgram models for text-to-speech and speech-to-text\n */\nexport const DEEPGRAM_MODELS = ['aura', 'whisper', 'base', 'enhanced', 'nova', 'nova-2', 'nova-3'] as const;\n\nexport type DeepgramModel = (typeof DEEPGRAM_MODELS)[number];\n","import { PassThrough } from 'stream';\n\nimport { createClient } from '@deepgram/sdk';\nimport { MastraVoice } from '@mastra/core/voice';\n\nimport { DEEPGRAM_VOICES } from './voices';\nimport type { DeepgramVoiceId, DeepgramModel } from './voices';\n\ninterface DeepgramVoiceConfig {\n name?: DeepgramModel;\n apiKey?: string;\n properties?: Record<string, any>;\n language?: string;\n}\n\nexport class DeepgramVoice extends MastraVoice {\n private speechClient?: ReturnType<typeof createClient>;\n private listeningClient?: ReturnType<typeof createClient>;\n\n constructor({\n speechModel,\n listeningModel,\n speaker,\n }: { speechModel?: DeepgramVoiceConfig; listeningModel?: DeepgramVoiceConfig; speaker?: DeepgramVoiceId } = {}) {\n const defaultApiKey = process.env.DEEPGRAM_API_KEY;\n\n const defaultSpeechModel = {\n name: 'aura',\n apiKey: defaultApiKey,\n };\n\n const defaultListeningModel = {\n name: 'nova',\n apiKey: defaultApiKey,\n };\n\n super({\n speechModel: {\n name: speechModel?.name ?? defaultSpeechModel.name,\n apiKey: speechModel?.apiKey ?? defaultSpeechModel.apiKey,\n },\n listeningModel: {\n name: listeningModel?.name ?? defaultListeningModel.name,\n apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,\n },\n speaker,\n });\n\n const speechApiKey = speechModel?.apiKey || defaultApiKey;\n const listeningApiKey = listeningModel?.apiKey || defaultApiKey;\n\n if (!speechApiKey && !listeningApiKey) {\n throw new Error('At least one of DEEPGRAM_API_KEY, speechModel.apiKey, or listeningModel.apiKey must be set');\n }\n\n if (speechApiKey) {\n this.speechClient = createClient(speechApiKey);\n }\n if (listeningApiKey) {\n this.listeningClient = createClient(listeningApiKey);\n }\n\n this.speaker = speaker || 'asteria-en';\n }\n\n async getSpeakers() {\n return DEEPGRAM_VOICES.map(voice => ({\n voiceId: voice,\n }));\n }\n\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n [key: string]: any;\n },\n ): Promise<NodeJS.ReadableStream> {\n if (!this.speechClient) {\n throw new Error('Deepgram speech client not configured');\n }\n\n let text: string;\n if (typeof input !== 'string') {\n const chunks: Buffer[] = [];\n for await (const chunk of input) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n text = Buffer.concat(chunks).toString('utf-8');\n } else {\n text = input;\n }\n\n if (text.trim().length === 0) {\n throw new Error('Input text is empty');\n }\n\n if (!this.speechClient) {\n throw new Error('No speech client configured');\n }\n\n let model;\n if (options?.speaker) {\n model = this.speechModel?.name + '-' + options.speaker;\n } else if (this.speaker) {\n model = this.speechModel?.name + '-' + this.speaker;\n }\n\n const speakClient = this.speechClient.speak;\n const response = await speakClient.request(\n { text },\n {\n model,\n ...options,\n },\n );\n\n const webStream = await response.getStream();\n if (!webStream) {\n throw new Error('No stream returned from Deepgram');\n }\n\n const reader = webStream.getReader();\n const nodeStream = new PassThrough();\n\n // Add error handling for the stream processing\n (async () => {\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n nodeStream.end();\n break;\n }\n nodeStream.write(value);\n }\n } catch (error) {\n nodeStream.destroy(error as Error);\n }\n })().catch(error => {\n nodeStream.destroy(error as Error);\n });\n\n return nodeStream;\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: {\n [key: string]: any;\n },\n ): Promise<string> {\n if (!this.listeningClient) {\n throw new Error('Deepgram listening client not configured');\n }\n\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n if (!this.listeningClient) {\n throw new Error('No listening client configured');\n }\n const { result, error } = await this.listeningClient.listen.prerecorded.transcribeFile(buffer, {\n model: this.listeningModel?.name,\n ...options,\n });\n\n if (error) {\n throw error;\n }\n\n const transcript = result.results?.channels?.[0]?.alternatives?.[0]?.transcript;\n if (!transcript) {\n throw new Error('No transcript found in Deepgram response');\n }\n\n return transcript;\n }\n}\n\nexport type { DeepgramVoiceConfig, DeepgramVoiceId, DeepgramModel };\n"]}
package/dist/index.d.ts CHANGED
@@ -1,4 +1,37 @@
1
- export { DeepgramVoice } from './_tsup-dts-rollup.js';
2
- export { DeepgramVoiceConfig } from './_tsup-dts-rollup.js';
3
- export { DeepgramVoiceId } from './_tsup-dts-rollup.js';
4
- export { DeepgramModel } from './_tsup-dts-rollup.js';
1
+ import { MastraVoice } from '@mastra/core/voice';
2
+ import type { DeepgramVoiceId, DeepgramModel } from './voices.js';
3
+ interface DeepgramVoiceConfig {
4
+ name?: DeepgramModel;
5
+ apiKey?: string;
6
+ properties?: Record<string, any>;
7
+ language?: string;
8
+ }
9
+ export declare class DeepgramVoice extends MastraVoice {
10
+ private speechClient?;
11
+ private listeningClient?;
12
+ constructor({ speechModel, listeningModel, speaker, }?: {
13
+ speechModel?: DeepgramVoiceConfig;
14
+ listeningModel?: DeepgramVoiceConfig;
15
+ speaker?: DeepgramVoiceId;
16
+ });
17
+ getSpeakers(): Promise<{
18
+ voiceId: "asteria-en" | "luna-en" | "stella-en" | "athena-en" | "hera-en" | "orion-en" | "arcas-en" | "perseus-en" | "angus-en" | "orpheus-en" | "helios-en" | "zeus-en";
19
+ }[]>;
20
+ speak(input: string | NodeJS.ReadableStream, options?: {
21
+ speaker?: string;
22
+ [key: string]: any;
23
+ }): Promise<NodeJS.ReadableStream>;
24
+ /**
25
+ * Checks if listening capabilities are enabled.
26
+ *
27
+ * @returns {Promise<{ enabled: boolean }>}
28
+ */
29
+ getListener(): Promise<{
30
+ enabled: boolean;
31
+ }>;
32
+ listen(audioStream: NodeJS.ReadableStream, options?: {
33
+ [key: string]: any;
34
+ }): Promise<string>;
35
+ }
36
+ export type { DeepgramVoiceConfig, DeepgramVoiceId, DeepgramModel };
37
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAGjD,OAAO,KAAK,EAAE,eAAe,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAE/D,UAAU,mBAAmB;IAC3B,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IACjC,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB;AAED,qBAAa,aAAc,SAAQ,WAAW;IAC5C,OAAO,CAAC,YAAY,CAAC,CAAkC;IACvD,OAAO,CAAC,eAAe,CAAC,CAAkC;gBAE9C,EACV,WAAW,EACX,cAAc,EACd,OAAO,GACR,GAAE;QAAE,WAAW,CAAC,EAAE,mBAAmB,CAAC;QAAC,cAAc,CAAC,EAAE,mBAAmB,CAAC;QAAC,OAAO,CAAC,EAAE,eAAe,CAAA;KAAO;IA0CxG,WAAW;;;IAMX,KAAK,CACT,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC,cAAc,EACrC,OAAO,CAAC,EAAE;QACR,OAAO,CAAC,EAAE,MAAM,CAAC;QACjB,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAC;KACpB,GACA,OAAO,CAAC,MAAM,CAAC,cAAc,CAAC;IAyEjC;;;;OAIG;IACG,WAAW;;;IAIX,MAAM,CACV,WAAW,EAAE,MAAM,CAAC,cAAc,EAClC,OAAO,CAAC,EAAE;QACR,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAC;KACpB,GACA,OAAO,CAAC,MAAM,CAAC;CAkCnB;AAED,YAAY,EAAE,mBAAmB,EAAE,eAAe,EAAE,aAAa,EAAE,CAAC"}
package/dist/index.js CHANGED
@@ -63,11 +63,9 @@ var DeepgramVoice = class extends MastraVoice {
63
63
  this.speaker = speaker || "asteria-en";
64
64
  }
65
65
  async getSpeakers() {
66
- return this.traced(async () => {
67
- return DEEPGRAM_VOICES.map((voice) => ({
68
- voiceId: voice
69
- }));
70
- }, "voice.deepgram.getSpeakers")();
66
+ return DEEPGRAM_VOICES.map((voice) => ({
67
+ voiceId: voice
68
+ }));
71
69
  }
72
70
  async speak(input, options) {
73
71
  if (!this.speechClient) {
@@ -90,48 +88,46 @@ var DeepgramVoice = class extends MastraVoice {
90
88
  if (text.trim().length === 0) {
91
89
  throw new Error("Input text is empty");
92
90
  }
93
- return this.traced(async () => {
94
- if (!this.speechClient) {
95
- throw new Error("No speech client configured");
96
- }
97
- let model;
98
- if (options?.speaker) {
99
- model = this.speechModel?.name + "-" + options.speaker;
100
- } else if (this.speaker) {
101
- model = this.speechModel?.name + "-" + this.speaker;
102
- }
103
- const speakClient = this.speechClient.speak;
104
- const response = await speakClient.request(
105
- { text },
106
- {
107
- model,
108
- ...options
109
- }
110
- );
111
- const webStream = await response.getStream();
112
- if (!webStream) {
113
- throw new Error("No stream returned from Deepgram");
91
+ if (!this.speechClient) {
92
+ throw new Error("No speech client configured");
93
+ }
94
+ let model;
95
+ if (options?.speaker) {
96
+ model = this.speechModel?.name + "-" + options.speaker;
97
+ } else if (this.speaker) {
98
+ model = this.speechModel?.name + "-" + this.speaker;
99
+ }
100
+ const speakClient = this.speechClient.speak;
101
+ const response = await speakClient.request(
102
+ { text },
103
+ {
104
+ model,
105
+ ...options
114
106
  }
115
- const reader = webStream.getReader();
116
- const nodeStream = new PassThrough();
117
- (async () => {
118
- try {
119
- while (true) {
120
- const { done, value } = await reader.read();
121
- if (done) {
122
- nodeStream.end();
123
- break;
124
- }
125
- nodeStream.write(value);
107
+ );
108
+ const webStream = await response.getStream();
109
+ if (!webStream) {
110
+ throw new Error("No stream returned from Deepgram");
111
+ }
112
+ const reader = webStream.getReader();
113
+ const nodeStream = new PassThrough();
114
+ (async () => {
115
+ try {
116
+ while (true) {
117
+ const { done, value } = await reader.read();
118
+ if (done) {
119
+ nodeStream.end();
120
+ break;
126
121
  }
127
- } catch (error) {
128
- nodeStream.destroy(error);
122
+ nodeStream.write(value);
129
123
  }
130
- })().catch((error) => {
124
+ } catch (error) {
131
125
  nodeStream.destroy(error);
132
- });
133
- return nodeStream;
134
- }, "voice.deepgram.speak")();
126
+ }
127
+ })().catch((error) => {
128
+ nodeStream.destroy(error);
129
+ });
130
+ return nodeStream;
135
131
  }
136
132
  /**
137
133
  * Checks if listening capabilities are enabled.
@@ -154,24 +150,24 @@ var DeepgramVoice = class extends MastraVoice {
154
150
  }
155
151
  }
156
152
  const buffer = Buffer.concat(chunks);
157
- return this.traced(async () => {
158
- if (!this.listeningClient) {
159
- throw new Error("No listening client configured");
160
- }
161
- const { result, error } = await this.listeningClient.listen.prerecorded.transcribeFile(buffer, {
162
- model: this.listeningModel?.name,
163
- ...options
164
- });
165
- if (error) {
166
- throw error;
167
- }
168
- const transcript = result.results?.channels?.[0]?.alternatives?.[0]?.transcript;
169
- if (!transcript) {
170
- throw new Error("No transcript found in Deepgram response");
171
- }
172
- return transcript;
173
- }, "voice.deepgram.listen")();
153
+ if (!this.listeningClient) {
154
+ throw new Error("No listening client configured");
155
+ }
156
+ const { result, error } = await this.listeningClient.listen.prerecorded.transcribeFile(buffer, {
157
+ model: this.listeningModel?.name,
158
+ ...options
159
+ });
160
+ if (error) {
161
+ throw error;
162
+ }
163
+ const transcript = result.results?.channels?.[0]?.alternatives?.[0]?.transcript;
164
+ if (!transcript) {
165
+ throw new Error("No transcript found in Deepgram response");
166
+ }
167
+ return transcript;
174
168
  }
175
169
  };
176
170
 
177
171
  export { DeepgramVoice };
172
+ //# sourceMappingURL=index.js.map
173
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/voices.ts","../src/index.ts"],"names":[],"mappings":";;;;;;;AAKO,IAAM,eAAA,GAAkB;AAAA,EAC7B,YAAA;AAAA,EACA,SAAA;AAAA,EACA,WAAA;AAAA,EACA,WAAA;AAAA,EACA,SAAA;AAAA,EACA,UAAA;AAAA,EACA,UAAA;AAAA,EACA,YAAA;AAAA,EACA,UAAA;AAAA,EACA,YAAA;AAAA,EACA,WAAA;AAAA,EACA;AACF,CAAA;;;ACHO,IAAM,aAAA,GAAN,cAA4B,WAAA,CAAY;AAAA,EACrC,YAAA;AAAA,EACA,eAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,WAAA;AAAA,IACA,cAAA;AAAA,IACA;AAAA,GACF,GAA4G,EAAC,EAAG;AAC9G,IAAA,MAAM,aAAA,GAAgB,QAAQ,GAAA,CAAI,gBAAA;AAElC,IAAA,MAAM,kBAAA,GAAqB;AAAA,MACzB,IAAA,EAAM,MAAA;AAAA,MACN,MAAA,EAAQ;AAAA,KACV;AAEA,IAAA,MAAM,qBAAA,GAAwB;AAAA,MAC5B,IAAA,EAAM,MAAA;AAAA,MACN,MAAA,EAAQ;AAAA,KACV;AAEA,IAAA,KAAA,CAAM;AAAA,MACJ,WAAA,EAAa;AAAA,QACX,IAAA,EAAM,WAAA,EAAa,IAAA,IAAQ,kBAAA,CAAmB,IAAA;AAAA,QAC9C,MAAA,EAAQ,WAAA,EAAa,MAAA,IAAU,kBAAA,CAAmB;AAAA,OACpD;AAAA,MACA,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,cAAA,EAAgB,IAAA,IAAQ,qBAAA,CAAsB,IAAA;AAAA,QACpD,MAAA,EAAQ,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB;AAAA,OAC1D;AAAA,MACA;AAAA,KACD,CAAA;AAED,IAAA,MAAM,YAAA,GAAe,aAAa,MAAA,IAAU,aAAA;AAC5C,IAAA,MAAM,eAAA,GAAkB,gBAAgB,MAAA,IAAU,aAAA;AAElD,IAAA,IAAI,CAAC,YAAA,IAAgB,CAAC,eAAA,EAAiB;AACrC,MAAA,MAAM,IAAI,MAAM,4FAA4F,CAAA;AAAA,IAC9G;AAEA,IAAA,IAAI,YAAA,EAAc;AAChB,MAAA,IAAA,CAAK,YAAA,GAAe,aAAa,YAAY,CAAA;AAAA,IAC/C;AACA,IAAA,IAAI,eAAA,EAAiB;AACnB,MAAA,IAAA,CAAK,eAAA,GAAkB,aAAa,eAAe,CAAA;AAAA,IACrD;AAEA,IAAA,IAAA,CAAK,UAAU,OAAA,IAAW,YAAA;AAAA,EAC5B;AAAA,EAEA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,eAAA,CAAgB,IAAI,CAAA,KAAA,MAAU;AAAA,MACnC,OAAA,EAAS;AAAA,KACX,CAAE,CAAA;AAAA,EACJ;AAAA,EAEA,MAAM,KAAA,CACJ,KAAA,EACA,OAAA,EAIgC;AAChC,IAAA,IAAI,CAAC,KAAK,YAAA,EAAc;AACtB,MAAA,MAAM,IAAI,MAAM,uCAAuC,CAAA;AAAA,IACzD;AAEA,IAAA,IAAI,IAAA;AACJ,IAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,KAAA,EAAO;AAC/B,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,IAAA,GAAO,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA,CAAE,SAAS,OAAO,CAAA;AAAA,IAC/C,CAAA,MAAO;AACL,MAAA,IAAA,GAAO,KAAA;AAAA,IACT;AAEA,IAAA,IAAI,IAAA,CAAK,IAAA,EAAK,CAAE,MAAA,KAAW,CAAA,EAAG;AAC5B,MAAA,MAAM,IAAI,MAAM,qBAAqB,CAAA;AAAA,IACvC;AAEA,IAAA,IAAI,CAAC,KAAK,YAAA,EAAc;AACtB,MAAA,MAAM,IAAI,MAAM,6BAA6B,CAAA;AAAA,IAC/C;AAEA,IAAA,IAAI,KAAA;AACJ,IAAA,IAAI,SAAS,OAAA,EAAS;AACpB,MAAA,KAAA,GAAQ,IAAA,CAAK,WAAA,EAAa,IAAA,GAAO,GAAA,GAAM,OAAA,CAAQ,OAAA;AAAA,IACjD,CAAA,MAAA,IAAW,KAAK,OAAA,EAAS;AACvB,MAAA,KAAA,GAAQ,IAAA,CAAK,WAAA,EAAa,IAAA,GAAO,GAAA,GAAM,IAAA,CAAK,OAAA;AAAA,IAC9C;AAEA,IAAA,MAAM,WAAA,GAAc,KAAK,YAAA,CAAa,KAAA;AACtC,IAAA,MAAM,QAAA,GAAW,MAAM,WAAA,CAAY,OAAA;AAAA,MACjC,EAAE,IAAA,EAAK;AAAA,MACP;AAAA,QACE,KAAA;AAAA,QACA,GAAG;AAAA;AACL,KACF;AAEA,IAAA,MAAM,SAAA,GAAY,MAAM,QAAA,CAAS,SAAA,EAAU;AAC3C,IAAA,IAAI,CAAC,SAAA,EAAW;AACd,MAAA,MAAM,IAAI,MAAM,kCAAkC,CAAA;AAAA,IACpD;AAEA,IAAA,MAAM,MAAA,GAAS,UAAU,SAAA,EAAU;AACnC,IAAA,MAAM,UAAA,GAAa,IAAI,WAAA,EAAY;AAGnC,IAAA,CAAC,YAAY;AACX,MAAA,IAAI;AACF,QAAA,OAAO,IAAA,EAAM;AACX,UAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,UAAA,IAAI,IAAA,EAAM;AACR,YAAA,UAAA,CAAW,GAAA,EAAI;AACf,YAAA;AAAA,UACF;AACA,UAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA,QACxB;AAAA,MACF,SAAS,KAAA,EAAO;AACd,QAAA,UAAA,CAAW,QAAQ,KAAc,CAAA;AAAA,MACnC;AAAA,IACF,CAAA,GAAG,CAAE,KAAA,CAAM,CAAA,KAAA,KAAS;AAClB,MAAA,UAAA,CAAW,QAAQ,KAAc,CAAA;AAAA,IACnC,CAAC,CAAA;AAED,IAAA,OAAO,UAAA;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA,EAEA,MAAM,MAAA,CACJ,WAAA,EACA,OAAA,EAGiB;AACjB,IAAA,IAAI,CAAC,KAAK,eAAA,EAAiB;AACzB,MAAA,MAAM,IAAI,MAAM,0CAA0C,CAAA;AAAA,IAC5D;AAEA,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,MAAM,MAAA,GAAS,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AAEnC,IAAA,IAAI,CAAC,KAAK,eAAA,EAAiB;AACzB,MAAA,MAAM,IAAI,MAAM,gCAAgC,CAAA;AAAA,IAClD;AACA,IAAA,MAAM,EAAE,MAAA,EAAQ,KAAA,EAAM,GAAI,MAAM,KAAK,eAAA,CAAgB,MAAA,CAAO,WAAA,CAAY,cAAA,CAAe,MAAA,EAAQ;AAAA,MAC7F,KAAA,EAAO,KAAK,cAAA,EAAgB,IAAA;AAAA,MAC5B,GAAG;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,KAAA,EAAO;AACT,MAAA,MAAM,KAAA;AAAA,IACR;AAEA,IAAA,MAAM,UAAA,GAAa,OAAO,OAAA,EAAS,QAAA,GAAW,CAAC,CAAA,EAAG,YAAA,GAAe,CAAC,CAAA,EAAG,UAAA;AACrE,IAAA,IAAI,CAAC,UAAA,EAAY;AACf,MAAA,MAAM,IAAI,MAAM,0CAA0C,CAAA;AAAA,IAC5D;AAEA,IAAA,OAAO,UAAA;AAAA,EACT;AACF","file":"index.js","sourcesContent":["/**\n * List of available Deepgram voice models for text-to-speech\n * Each voice is designed for specific use cases and languages\n * Format: {name}-{language} (e.g. asteria-en)\n */\nexport const DEEPGRAM_VOICES = [\n 'asteria-en',\n 'luna-en',\n 'stella-en',\n 'athena-en',\n 'hera-en',\n 'orion-en',\n 'arcas-en',\n 'perseus-en',\n 'angus-en',\n 'orpheus-en',\n 'helios-en',\n 'zeus-en',\n] as const;\n\nexport type DeepgramVoiceId = (typeof DEEPGRAM_VOICES)[number];\n\n/**\n * List of available Deepgram models for text-to-speech and speech-to-text\n */\nexport const DEEPGRAM_MODELS = ['aura', 'whisper', 'base', 'enhanced', 'nova', 'nova-2', 'nova-3'] as const;\n\nexport type DeepgramModel = (typeof DEEPGRAM_MODELS)[number];\n","import { PassThrough } from 'stream';\n\nimport { createClient } from '@deepgram/sdk';\nimport { MastraVoice } from '@mastra/core/voice';\n\nimport { DEEPGRAM_VOICES } from './voices';\nimport type { DeepgramVoiceId, DeepgramModel } from './voices';\n\ninterface DeepgramVoiceConfig {\n name?: DeepgramModel;\n apiKey?: string;\n properties?: Record<string, any>;\n language?: string;\n}\n\nexport class DeepgramVoice extends MastraVoice {\n private speechClient?: ReturnType<typeof createClient>;\n private listeningClient?: ReturnType<typeof createClient>;\n\n constructor({\n speechModel,\n listeningModel,\n speaker,\n }: { speechModel?: DeepgramVoiceConfig; listeningModel?: DeepgramVoiceConfig; speaker?: DeepgramVoiceId } = {}) {\n const defaultApiKey = process.env.DEEPGRAM_API_KEY;\n\n const defaultSpeechModel = {\n name: 'aura',\n apiKey: defaultApiKey,\n };\n\n const defaultListeningModel = {\n name: 'nova',\n apiKey: defaultApiKey,\n };\n\n super({\n speechModel: {\n name: speechModel?.name ?? defaultSpeechModel.name,\n apiKey: speechModel?.apiKey ?? defaultSpeechModel.apiKey,\n },\n listeningModel: {\n name: listeningModel?.name ?? defaultListeningModel.name,\n apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,\n },\n speaker,\n });\n\n const speechApiKey = speechModel?.apiKey || defaultApiKey;\n const listeningApiKey = listeningModel?.apiKey || defaultApiKey;\n\n if (!speechApiKey && !listeningApiKey) {\n throw new Error('At least one of DEEPGRAM_API_KEY, speechModel.apiKey, or listeningModel.apiKey must be set');\n }\n\n if (speechApiKey) {\n this.speechClient = createClient(speechApiKey);\n }\n if (listeningApiKey) {\n this.listeningClient = createClient(listeningApiKey);\n }\n\n this.speaker = speaker || 'asteria-en';\n }\n\n async getSpeakers() {\n return DEEPGRAM_VOICES.map(voice => ({\n voiceId: voice,\n }));\n }\n\n async speak(\n input: string | NodeJS.ReadableStream,\n options?: {\n speaker?: string;\n [key: string]: any;\n },\n ): Promise<NodeJS.ReadableStream> {\n if (!this.speechClient) {\n throw new Error('Deepgram speech client not configured');\n }\n\n let text: string;\n if (typeof input !== 'string') {\n const chunks: Buffer[] = [];\n for await (const chunk of input) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n text = Buffer.concat(chunks).toString('utf-8');\n } else {\n text = input;\n }\n\n if (text.trim().length === 0) {\n throw new Error('Input text is empty');\n }\n\n if (!this.speechClient) {\n throw new Error('No speech client configured');\n }\n\n let model;\n if (options?.speaker) {\n model = this.speechModel?.name + '-' + options.speaker;\n } else if (this.speaker) {\n model = this.speechModel?.name + '-' + this.speaker;\n }\n\n const speakClient = this.speechClient.speak;\n const response = await speakClient.request(\n { text },\n {\n model,\n ...options,\n },\n );\n\n const webStream = await response.getStream();\n if (!webStream) {\n throw new Error('No stream returned from Deepgram');\n }\n\n const reader = webStream.getReader();\n const nodeStream = new PassThrough();\n\n // Add error handling for the stream processing\n (async () => {\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) {\n nodeStream.end();\n break;\n }\n nodeStream.write(value);\n }\n } catch (error) {\n nodeStream.destroy(error as Error);\n }\n })().catch(error => {\n nodeStream.destroy(error as Error);\n });\n\n return nodeStream;\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n async listen(\n audioStream: NodeJS.ReadableStream,\n options?: {\n [key: string]: any;\n },\n ): Promise<string> {\n if (!this.listeningClient) {\n throw new Error('Deepgram listening client not configured');\n }\n\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const buffer = Buffer.concat(chunks);\n\n if (!this.listeningClient) {\n throw new Error('No listening client configured');\n }\n const { result, error } = await this.listeningClient.listen.prerecorded.transcribeFile(buffer, {\n model: this.listeningModel?.name,\n ...options,\n });\n\n if (error) {\n throw error;\n }\n\n const transcript = result.results?.channels?.[0]?.alternatives?.[0]?.transcript;\n if (!transcript) {\n throw new Error('No transcript found in Deepgram response');\n }\n\n return transcript;\n }\n}\n\nexport type { DeepgramVoiceConfig, DeepgramVoiceId, DeepgramModel };\n"]}
@@ -0,0 +1,13 @@
1
+ /**
2
+ * List of available Deepgram voice models for text-to-speech
3
+ * Each voice is designed for specific use cases and languages
4
+ * Format: {name}-{language} (e.g. asteria-en)
5
+ */
6
+ export declare const DEEPGRAM_VOICES: readonly ["asteria-en", "luna-en", "stella-en", "athena-en", "hera-en", "orion-en", "arcas-en", "perseus-en", "angus-en", "orpheus-en", "helios-en", "zeus-en"];
7
+ export type DeepgramVoiceId = (typeof DEEPGRAM_VOICES)[number];
8
+ /**
9
+ * List of available Deepgram models for text-to-speech and speech-to-text
10
+ */
11
+ export declare const DEEPGRAM_MODELS: readonly ["aura", "whisper", "base", "enhanced", "nova", "nova-2", "nova-3"];
12
+ export type DeepgramModel = (typeof DEEPGRAM_MODELS)[number];
13
+ //# sourceMappingURL=voices.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"voices.d.ts","sourceRoot":"","sources":["../src/voices.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH,eAAO,MAAM,eAAe,iKAalB,CAAC;AAEX,MAAM,MAAM,eAAe,GAAG,CAAC,OAAO,eAAe,CAAC,CAAC,MAAM,CAAC,CAAC;AAE/D;;GAEG;AACH,eAAO,MAAM,eAAe,8EAA+E,CAAC;AAE5G,MAAM,MAAM,aAAa,GAAG,CAAC,OAAO,eAAe,CAAC,CAAC,MAAM,CAAC,CAAC"}
package/package.json CHANGED
@@ -1,10 +1,11 @@
1
1
  {
2
2
  "name": "@mastra/voice-deepgram",
3
- "version": "0.0.0-share-agent-metadata-with-cloud-20250718123411",
3
+ "version": "0.0.0-sidebar-window-undefined-fix-20251029233656",
4
4
  "description": "Mastra Deepgram voice integration",
5
5
  "type": "module",
6
6
  "files": [
7
- "dist"
7
+ "dist",
8
+ "CHANGELOG.md"
8
9
  ],
9
10
  "main": "dist/index.js",
10
11
  "types": "dist/index.d.ts",
@@ -15,7 +16,7 @@
15
16
  "default": "./dist/index.js"
16
17
  },
17
18
  "require": {
18
- "types": "./dist/index.d.cts",
19
+ "types": "./dist/index.d.ts",
19
20
  "default": "./dist/index.cjs"
20
21
  }
21
22
  },
@@ -23,25 +24,35 @@
23
24
  },
24
25
  "license": "Apache-2.0",
25
26
  "dependencies": {
26
- "@deepgram/sdk": "^3.13.0",
27
- "zod": "^3.25.67"
27
+ "@deepgram/sdk": "^3.13.0"
28
28
  },
29
29
  "devDependencies": {
30
30
  "@microsoft/api-extractor": "^7.52.8",
31
31
  "@types/node": "^20.19.0",
32
- "eslint": "^9.30.1",
32
+ "eslint": "^9.37.0",
33
33
  "tsup": "^8.5.0",
34
34
  "typescript": "^5.8.3",
35
35
  "vitest": "^3.2.4",
36
- "@internal/lint": "0.0.0-share-agent-metadata-with-cloud-20250718123411",
37
- "@mastra/core": "0.0.0-share-agent-metadata-with-cloud-20250718123411"
36
+ "@mastra/core": "0.0.0-sidebar-window-undefined-fix-20251029233656",
37
+ "@internal/types-builder": "0.0.0-sidebar-window-undefined-fix-20251029233656",
38
+ "@internal/lint": "0.0.0-sidebar-window-undefined-fix-20251029233656"
38
39
  },
39
40
  "peerDependencies": {
40
- "@mastra/core": "0.0.0-share-agent-metadata-with-cloud-20250718123411"
41
+ "zod": "^3.25.0 || ^4.0.0",
42
+ "@mastra/core": "0.0.0-sidebar-window-undefined-fix-20251029233656"
43
+ },
44
+ "homepage": "https://mastra.ai",
45
+ "repository": {
46
+ "type": "git",
47
+ "url": "git+https://github.com/mastra-ai/mastra.git",
48
+ "directory": "voice/deepgram"
49
+ },
50
+ "bugs": {
51
+ "url": "https://github.com/mastra-ai/mastra/issues"
41
52
  },
42
53
  "scripts": {
43
- "build": "tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting",
44
- "build:watch": "pnpm build --watch",
54
+ "build": "tsup --silent --config tsup.config.ts",
55
+ "build:watch": "tsup --watch --silent --config tsup.config.ts",
45
56
  "test": "vitest run",
46
57
  "lint": "eslint ."
47
58
  }
@@ -1,58 +0,0 @@
1
- import { MastraVoice } from '@mastra/core/voice';
2
-
3
- /**
4
- * List of available Deepgram models for text-to-speech and speech-to-text
5
- */
6
- export declare const DEEPGRAM_MODELS: readonly ["aura", "whisper", "base", "enhanced", "nova", "nova-2", "nova-3"];
7
-
8
- /**
9
- * List of available Deepgram voice models for text-to-speech
10
- * Each voice is designed for specific use cases and languages
11
- * Format: {name}-{language} (e.g. asteria-en)
12
- */
13
- export declare const DEEPGRAM_VOICES: readonly ["asteria-en", "luna-en", "stella-en", "athena-en", "hera-en", "orion-en", "arcas-en", "perseus-en", "angus-en", "orpheus-en", "helios-en", "zeus-en"];
14
-
15
- declare type DeepgramModel = (typeof DEEPGRAM_MODELS)[number];
16
- export { DeepgramModel }
17
- export { DeepgramModel as DeepgramModel_alias_1 }
18
-
19
- export declare class DeepgramVoice extends MastraVoice {
20
- private speechClient?;
21
- private listeningClient?;
22
- constructor({ speechModel, listeningModel, speaker, }?: {
23
- speechModel?: DeepgramVoiceConfig;
24
- listeningModel?: DeepgramVoiceConfig;
25
- speaker?: DeepgramVoiceId;
26
- });
27
- getSpeakers(): Promise<{
28
- voiceId: "asteria-en" | "luna-en" | "stella-en" | "athena-en" | "hera-en" | "orion-en" | "arcas-en" | "perseus-en" | "angus-en" | "orpheus-en" | "helios-en" | "zeus-en";
29
- }[]>;
30
- speak(input: string | NodeJS.ReadableStream, options?: {
31
- speaker?: string;
32
- [key: string]: any;
33
- }): Promise<NodeJS.ReadableStream>;
34
- /**
35
- * Checks if listening capabilities are enabled.
36
- *
37
- * @returns {Promise<{ enabled: boolean }>}
38
- */
39
- getListener(): Promise<{
40
- enabled: boolean;
41
- }>;
42
- listen(audioStream: NodeJS.ReadableStream, options?: {
43
- [key: string]: any;
44
- }): Promise<string>;
45
- }
46
-
47
- export declare interface DeepgramVoiceConfig {
48
- name?: DeepgramModel;
49
- apiKey?: string;
50
- properties?: Record<string, any>;
51
- language?: string;
52
- }
53
-
54
- declare type DeepgramVoiceId = (typeof DEEPGRAM_VOICES)[number];
55
- export { DeepgramVoiceId }
56
- export { DeepgramVoiceId as DeepgramVoiceId_alias_1 }
57
-
58
- export { }
@@ -1,58 +0,0 @@
1
- import { MastraVoice } from '@mastra/core/voice';
2
-
3
- /**
4
- * List of available Deepgram models for text-to-speech and speech-to-text
5
- */
6
- export declare const DEEPGRAM_MODELS: readonly ["aura", "whisper", "base", "enhanced", "nova", "nova-2", "nova-3"];
7
-
8
- /**
9
- * List of available Deepgram voice models for text-to-speech
10
- * Each voice is designed for specific use cases and languages
11
- * Format: {name}-{language} (e.g. asteria-en)
12
- */
13
- export declare const DEEPGRAM_VOICES: readonly ["asteria-en", "luna-en", "stella-en", "athena-en", "hera-en", "orion-en", "arcas-en", "perseus-en", "angus-en", "orpheus-en", "helios-en", "zeus-en"];
14
-
15
- declare type DeepgramModel = (typeof DEEPGRAM_MODELS)[number];
16
- export { DeepgramModel }
17
- export { DeepgramModel as DeepgramModel_alias_1 }
18
-
19
- export declare class DeepgramVoice extends MastraVoice {
20
- private speechClient?;
21
- private listeningClient?;
22
- constructor({ speechModel, listeningModel, speaker, }?: {
23
- speechModel?: DeepgramVoiceConfig;
24
- listeningModel?: DeepgramVoiceConfig;
25
- speaker?: DeepgramVoiceId;
26
- });
27
- getSpeakers(): Promise<{
28
- voiceId: "asteria-en" | "luna-en" | "stella-en" | "athena-en" | "hera-en" | "orion-en" | "arcas-en" | "perseus-en" | "angus-en" | "orpheus-en" | "helios-en" | "zeus-en";
29
- }[]>;
30
- speak(input: string | NodeJS.ReadableStream, options?: {
31
- speaker?: string;
32
- [key: string]: any;
33
- }): Promise<NodeJS.ReadableStream>;
34
- /**
35
- * Checks if listening capabilities are enabled.
36
- *
37
- * @returns {Promise<{ enabled: boolean }>}
38
- */
39
- getListener(): Promise<{
40
- enabled: boolean;
41
- }>;
42
- listen(audioStream: NodeJS.ReadableStream, options?: {
43
- [key: string]: any;
44
- }): Promise<string>;
45
- }
46
-
47
- export declare interface DeepgramVoiceConfig {
48
- name?: DeepgramModel;
49
- apiKey?: string;
50
- properties?: Record<string, any>;
51
- language?: string;
52
- }
53
-
54
- declare type DeepgramVoiceId = (typeof DEEPGRAM_VOICES)[number];
55
- export { DeepgramVoiceId }
56
- export { DeepgramVoiceId as DeepgramVoiceId_alias_1 }
57
-
58
- export { }
package/dist/index.d.cts DELETED
@@ -1,4 +0,0 @@
1
- export { DeepgramVoice } from './_tsup-dts-rollup.cjs';
2
- export { DeepgramVoiceConfig } from './_tsup-dts-rollup.cjs';
3
- export { DeepgramVoiceId } from './_tsup-dts-rollup.cjs';
4
- export { DeepgramModel } from './_tsup-dts-rollup.cjs';