@mastra/voice-cloudflare 0.11.12 → 0.12.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,72 @@
1
+ # Voice API Reference
2
+
3
+ > API reference for voice - 1 entries
4
+
5
+
6
+ ---
7
+
8
+ ## Reference: Cloudflare
9
+
10
+ > Documentation for the CloudflareVoice class, providing text-to-speech capabilities using Cloudflare Workers AI.
11
+
12
+ The CloudflareVoice class in Mastra provides text-to-speech capabilities using Cloudflare Workers AI. This provider specializes in efficient, low-latency speech synthesis suitable for edge computing environments.
13
+
14
+ ## Usage Example
15
+
16
+ ```typescript
17
+ import { CloudflareVoice } from "@mastra/voice-cloudflare";
18
+
19
+ // Initialize with configuration
20
+ const voice = new CloudflareVoice({
21
+ speechModel: {
22
+ name: "@cf/meta/m2m100-1.2b",
23
+ apiKey: "your-cloudflare-api-token",
24
+ accountId: "your-cloudflare-account-id",
25
+ },
26
+ speaker: "en-US-1", // Default voice
27
+ });
28
+
29
+ // Convert text to speech
30
+ const audioStream = await voice.speak("Hello, how can I help you?", {
31
+ speaker: "en-US-2", // Override default voice
32
+ });
33
+
34
+ // Get available voices
35
+ const speakers = await voice.getSpeakers();
36
+ console.log(speakers);
37
+ ```
38
+
39
+ ## Configuration
40
+
41
+ ### Constructor Options
42
+
43
+ ### CloudflareSpeechConfig
44
+
45
+ ## Methods
46
+
47
+ ### speak()
48
+
49
+ Converts text to speech using Cloudflare's text-to-speech service.
50
+
51
+ Returns: `Promise<NodeJS.ReadableStream>`
52
+
53
+ ### getSpeakers()
54
+
55
+ Returns an array of available voice options, where each node contains:
56
+
57
+ ## Notes
58
+
59
+ - API tokens can be provided via constructor options or environment variables (CLOUDFLARE_API_TOKEN and CLOUDFLARE_ACCOUNT_ID)
60
+ - Cloudflare Workers AI is optimized for edge computing with low latency
61
+ - This provider only supports text-to-speech (TTS) functionality, not speech-to-text (STT)
62
+ - The service integrates well with other Cloudflare Workers products
63
+ - For production use, ensure your Cloudflare account has the appropriate Workers AI subscription
64
+ - Voice options are more limited compared to some other providers, but performance at the edge is excellent
65
+
66
+ ## Related Providers
67
+
68
+ If you need speech-to-text capabilities in addition to text-to-speech, consider using one of these providers:
69
+
70
+ - [OpenAI](./openai) - Provides both TTS and STT
71
+ - [Google](./google) - Provides both TTS and STT
72
+ - [Azure](./azure) - Provides both TTS and STT
package/dist/index.cjs CHANGED
@@ -45,31 +45,29 @@ var CloudflareVoice = class extends voice.MastraVoice {
45
45
  return { enabled: true };
46
46
  }
47
47
  async listen(audioStream, options) {
48
- return this.traced(async () => {
49
- const chunks = [];
50
- for await (const chunk of audioStream) {
51
- if (typeof chunk === "string") {
52
- chunks.push(Buffer.from(chunk));
53
- } else {
54
- chunks.push(chunk);
55
- }
56
- }
57
- const audioBuffer = Buffer.concat(chunks);
58
- const base64Audio = audioBuffer.toString("base64");
59
- const model = options?.model || defaultListeningModel.model;
60
- if (this.binding) {
61
- const response = await this.binding.run(model, {
62
- audio: base64Audio
63
- });
64
- return response.text;
65
- } else if (this.client) {
66
- const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };
67
- const response = await this.client.ai.run(model, payload);
68
- return response.text;
48
+ const chunks = [];
49
+ for await (const chunk of audioStream) {
50
+ if (typeof chunk === "string") {
51
+ chunks.push(Buffer.from(chunk));
69
52
  } else {
70
- throw new Error("Neither binding nor REST client is configured");
53
+ chunks.push(chunk);
71
54
  }
72
- }, "voice.cloudflare.listen")();
55
+ }
56
+ const audioBuffer = Buffer.concat(chunks);
57
+ const base64Audio = audioBuffer.toString("base64");
58
+ const model = options?.model || defaultListeningModel.model;
59
+ if (this.binding) {
60
+ const response = await this.binding.run(model, {
61
+ audio: base64Audio
62
+ });
63
+ return response.text;
64
+ } else if (this.client) {
65
+ const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };
66
+ const response = await this.client.ai.run(model, payload);
67
+ return response.text;
68
+ } else {
69
+ throw new Error("Neither binding nor REST client is configured");
70
+ }
73
71
  }
74
72
  async speak() {
75
73
  throw new Error("This feature is not yet implemented.");
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts"],"names":["MastraVoice","Cloudflare"],"mappings":";;;;;;;;;;AAoBA,IAAM,qBAAA,GAAwB;AAAA,EAC5B,KAAA,EAAO,mCAAA;AAAA,EACP,MAAA,EAAQ,QAAQ,GAAA,CAAI,qBAAA;AAAA,EACpB,UAAA,EAAY,QAAQ,GAAA,CAAI;AAC1B,CAAA;AAEO,IAAM,eAAA,GAAN,cAA8BA,iBAAA,CAAY;AAAA,EACvC,QAAA;AAAA,EACA,MAAA,GAA4B,IAAA;AAAA,EAC5B,OAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA;AAAA,GACF,GAGI,EAAC,EAAG;AACN,IAAA,KAAA,CAAM;AAAA,MACJ,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,cAAA,EAAgB,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAAA,QACrD,MAAA,EAAQ,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB;AAAA;AAC1D,KACD,CAAA;AAGD,IAAA,IAAA,CAAK,OAAA,GAAU,OAAA;AAGf,IAAA,IAAI,CAAC,OAAA,EAAS;AACZ,MAAA,IAAA,CAAK,QAAA,GAAW,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB,MAAA;AAChE,MAAA,IAAI,CAAC,KAAK,QAAA,EAAU;AAClB,QAAA,MAAM,IAAI,MAAM,2DAA2D,CAAA;AAAA,MAC7E;AACA,MAAA,IAAA,CAAK,SAAS,IAAIC,2BAAA,CAAW,EAAE,QAAA,EAAU,IAAA,CAAK,UAAU,CAAA;AAAA,IAC1D;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA,EAEA,MAAM,MAAA,CAAO,WAAA,EAAoC,OAAA,EAAoD;AACnG,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAE7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,WAAA,GAAc,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AACxC,MAAA,MAAM,WAAA,GAAc,WAAA,CAAY,QAAA,CAAS,QAAQ,CAAA;AAEjD,MAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAGtD,MAAA,IAAI,KAAK,OAAA,EAAS;AAEhB,QAAA,MAAM,QAAA,GAAY,MAAM,IAAA,CAAK,OAAA,CAAQ,IAAI,KAAA,EAAO;AAAA,UAC9C,KAAA,EAAO;AAAA,SACR,CAAA;AACD,QAAA,OAAO,QAAA,CAAS,IAAA;AAAA,MAClB,CAAA,MAAA,IAAW,KAAK,MAAA,EAAQ;AAEtB,QAAA,MAAM,OAAA,GAAU,EAAE,KAAA,EAAO,WAAA,EAAa,YAAY,OAAA,EAAS,UAAA,IAAc,sBAAsB,UAAA,EAAW;AAC1G,QAAA,MAAM,WAAY,MAAM,IAAA,CAAK,OAAO,EAAA,CAAG,GAAA,CAAI,OAAO,OAAO,CAAA;AACzD,QAAA,OAAO,QAAA,CAAS,IAAA;AAAA,MAClB,CAAA,MAAO;AACL,QAAA,MAAM,IAAI,MAAM,+CAA+C,CAAA;AAAA,MACjE;AAAA,IACF,CAAA,EAAG,yBAAyB,CAAA,EAAE;AAAA,EAChC;AAAA,EACA,MAAM,KAAA,GAAwC;AAC5C,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AAAA,EACA,MAAM,WAAA,GAAuE;AAC3E,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AACF","file":"index.cjs","sourcesContent":["import type {\n Ai,\n Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output,\n Ai_Cf_Openai_Whisper_Output,\n Ai_Cf_Openai_Whisper_Tiny_En_Output,\n} from '@cloudflare/workers-types';\nimport { MastraVoice } from '@mastra/core/voice';\nimport Cloudflare from 'cloudflare';\n\ninterface CloudflareListenOptions {\n apiKey?: string;\n model?: '@cf/openai/whisper-tiny-en' | '@cf/openai/whisper' | '@cf/openai/whisper-large-v3-turbo';\n account_id?: string;\n}\n\ntype CloudflareListenOutput =\n | Ai_Cf_Openai_Whisper_Tiny_En_Output\n | Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output\n | Ai_Cf_Openai_Whisper_Output;\n\nconst defaultListeningModel = {\n model: '@cf/openai/whisper-large-v3-turbo' as const,\n apiKey: process.env.CLOUDFLARE_AI_API_KEY,\n account_id: process.env.CLOUDFLARE_ACCOUNT_ID!,\n};\n\nexport class CloudflareVoice extends MastraVoice {\n private apiToken?: string;\n private client: Cloudflare | null = null;\n private binding?: Ai;\n\n constructor({\n listeningModel,\n binding,\n }: {\n listeningModel?: CloudflareListenOptions;\n binding?: Ai;\n } = {}) {\n super({\n listeningModel: {\n name: listeningModel?.model ?? defaultListeningModel.model,\n apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,\n },\n });\n\n // Store Workers AI binding if provided\n this.binding = binding;\n\n // Only setup REST client if no binding provided or both are needed\n if (!binding) {\n this.apiToken = listeningModel?.apiKey || defaultListeningModel.apiKey;\n if (!this.apiToken) {\n throw new Error('CLOUDFLARE_AI_API_KEY must be set when not using bindings');\n }\n this.client = new Cloudflare({ apiToken: this.apiToken });\n }\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n async listen(audioStream: NodeJS.ReadableStream, options?: CloudflareListenOptions): Promise<string> {\n return this.traced(async () => {\n // Collect audio data into buffer\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const audioBuffer = Buffer.concat(chunks);\n const base64Audio = audioBuffer.toString('base64');\n\n const model = options?.model || defaultListeningModel.model;\n\n // Use native binding if available, otherwise use REST API\n if (this.binding) {\n // Using Workers AI binding\n const response = (await this.binding.run(model, {\n audio: base64Audio,\n })) as CloudflareListenOutput;\n return response.text;\n } else if (this.client) {\n // Using REST API client\n const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };\n const response = (await this.client.ai.run(model, payload)) as any;\n return response.text as string;\n } else {\n throw new Error('Neither binding nor REST client is configured');\n }\n }, 'voice.cloudflare.listen')();\n }\n async speak(): Promise<NodeJS.ReadableStream> {\n throw new Error('This feature is not yet implemented.');\n }\n async getSpeakers(): Promise<Array<{ voiceId: string; [key: string]: any }>> {\n throw new Error('This feature is not yet implemented.');\n }\n}\n"]}
1
+ {"version":3,"sources":["../src/index.ts"],"names":["MastraVoice","Cloudflare"],"mappings":";;;;;;;;;;AAoBA,IAAM,qBAAA,GAAwB;AAAA,EAC5B,KAAA,EAAO,mCAAA;AAAA,EACP,MAAA,EAAQ,QAAQ,GAAA,CAAI,qBAAA;AAAA,EACpB,UAAA,EAAY,QAAQ,GAAA,CAAI;AAC1B,CAAA;AAEO,IAAM,eAAA,GAAN,cAA8BA,iBAAA,CAAY;AAAA,EACvC,QAAA;AAAA,EACA,MAAA,GAA4B,IAAA;AAAA,EAC5B,OAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA;AAAA,GACF,GAGI,EAAC,EAAG;AACN,IAAA,KAAA,CAAM;AAAA,MACJ,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,cAAA,EAAgB,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAAA,QACrD,MAAA,EAAQ,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB;AAAA;AAC1D,KACD,CAAA;AAGD,IAAA,IAAA,CAAK,OAAA,GAAU,OAAA;AAGf,IAAA,IAAI,CAAC,OAAA,EAAS;AACZ,MAAA,IAAA,CAAK,QAAA,GAAW,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB,MAAA;AAChE,MAAA,IAAI,CAAC,KAAK,QAAA,EAAU;AAClB,QAAA,MAAM,IAAI,MAAM,2DAA2D,CAAA;AAAA,MAC7E;AACA,MAAA,IAAA,CAAK,SAAS,IAAIC,2BAAA,CAAW,EAAE,QAAA,EAAU,IAAA,CAAK,UAAU,CAAA;AAAA,IAC1D;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA,EAEA,MAAM,MAAA,CAAO,WAAA,EAAoC,OAAA,EAAoD;AAEnG,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,MAAM,WAAA,GAAc,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AACxC,IAAA,MAAM,WAAA,GAAc,WAAA,CAAY,QAAA,CAAS,QAAQ,CAAA;AAEjD,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAGtD,IAAA,IAAI,KAAK,OAAA,EAAS;AAEhB,MAAA,MAAM,QAAA,GAAY,MAAM,IAAA,CAAK,OAAA,CAAQ,IAAI,KAAA,EAAO;AAAA,QAC9C,KAAA,EAAO;AAAA,OACR,CAAA;AACD,MAAA,OAAO,QAAA,CAAS,IAAA;AAAA,IAClB,CAAA,MAAA,IAAW,KAAK,MAAA,EAAQ;AAEtB,MAAA,MAAM,OAAA,GAAU,EAAE,KAAA,EAAO,WAAA,EAAa,YAAY,OAAA,EAAS,UAAA,IAAc,sBAAsB,UAAA,EAAW;AAC1G,MAAA,MAAM,WAAY,MAAM,IAAA,CAAK,OAAO,EAAA,CAAG,GAAA,CAAI,OAAO,OAAO,CAAA;AACzD,MAAA,OAAO,QAAA,CAAS,IAAA;AAAA,IAClB,CAAA,MAAO;AACL,MAAA,MAAM,IAAI,MAAM,+CAA+C,CAAA;AAAA,IACjE;AAAA,EACF;AAAA,EACA,MAAM,KAAA,GAAwC;AAC5C,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AAAA,EACA,MAAM,WAAA,GAAuE;AAC3E,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AACF","file":"index.cjs","sourcesContent":["import type {\n Ai,\n Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output,\n Ai_Cf_Openai_Whisper_Output,\n Ai_Cf_Openai_Whisper_Tiny_En_Output,\n} from '@cloudflare/workers-types';\nimport { MastraVoice } from '@mastra/core/voice';\nimport Cloudflare from 'cloudflare';\n\ninterface CloudflareListenOptions {\n apiKey?: string;\n model?: '@cf/openai/whisper-tiny-en' | '@cf/openai/whisper' | '@cf/openai/whisper-large-v3-turbo';\n account_id?: string;\n}\n\ntype CloudflareListenOutput =\n | Ai_Cf_Openai_Whisper_Tiny_En_Output\n | Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output\n | Ai_Cf_Openai_Whisper_Output;\n\nconst defaultListeningModel = {\n model: '@cf/openai/whisper-large-v3-turbo' as const,\n apiKey: process.env.CLOUDFLARE_AI_API_KEY,\n account_id: process.env.CLOUDFLARE_ACCOUNT_ID!,\n};\n\nexport class CloudflareVoice extends MastraVoice {\n private apiToken?: string;\n private client: Cloudflare | null = null;\n private binding?: Ai;\n\n constructor({\n listeningModel,\n binding,\n }: {\n listeningModel?: CloudflareListenOptions;\n binding?: Ai;\n } = {}) {\n super({\n listeningModel: {\n name: listeningModel?.model ?? defaultListeningModel.model,\n apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,\n },\n });\n\n // Store Workers AI binding if provided\n this.binding = binding;\n\n // Only setup REST client if no binding provided or both are needed\n if (!binding) {\n this.apiToken = listeningModel?.apiKey || defaultListeningModel.apiKey;\n if (!this.apiToken) {\n throw new Error('CLOUDFLARE_AI_API_KEY must be set when not using bindings');\n }\n this.client = new Cloudflare({ apiToken: this.apiToken });\n }\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n async listen(audioStream: NodeJS.ReadableStream, options?: CloudflareListenOptions): Promise<string> {\n // Collect audio data into buffer\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const audioBuffer = Buffer.concat(chunks);\n const base64Audio = audioBuffer.toString('base64');\n\n const model = options?.model || defaultListeningModel.model;\n\n // Use native binding if available, otherwise use REST API\n if (this.binding) {\n // Using Workers AI binding\n const response = (await this.binding.run(model, {\n audio: base64Audio,\n })) as CloudflareListenOutput;\n return response.text;\n } else if (this.client) {\n // Using REST API client\n const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };\n const response = (await this.client.ai.run(model, payload)) as any;\n return response.text as string;\n } else {\n throw new Error('Neither binding nor REST client is configured');\n }\n }\n async speak(): Promise<NodeJS.ReadableStream> {\n throw new Error('This feature is not yet implemented.');\n }\n async getSpeakers(): Promise<Array<{ voiceId: string; [key: string]: any }>> {\n throw new Error('This feature is not yet implemented.');\n }\n}\n"]}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,EAAE,EAIH,MAAM,2BAA2B,CAAC;AACnC,OAAO,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAGjD,UAAU,uBAAuB;IAC/B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,KAAK,CAAC,EAAE,4BAA4B,GAAG,oBAAoB,GAAG,mCAAmC,CAAC;IAClG,UAAU,CAAC,EAAE,MAAM,CAAC;CACrB;AAaD,qBAAa,eAAgB,SAAQ,WAAW;IAC9C,OAAO,CAAC,QAAQ,CAAC,CAAS;IAC1B,OAAO,CAAC,MAAM,CAA2B;IACzC,OAAO,CAAC,OAAO,CAAC,CAAK;gBAET,EACV,cAAc,EACd,OAAO,GACR,GAAE;QACD,cAAc,CAAC,EAAE,uBAAuB,CAAC;QACzC,OAAO,CAAC,EAAE,EAAE,CAAC;KACT;IAqBN;;;;OAIG;IACG,WAAW;;;IAIX,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,cAAc,EAAE,OAAO,CAAC,EAAE,uBAAuB,GAAG,OAAO,CAAC,MAAM,CAAC;IAiC9F,KAAK,IAAI,OAAO,CAAC,MAAM,CAAC,cAAc,CAAC;IAGvC,WAAW,IAAI,OAAO,CAAC,KAAK,CAAC;QAAE,OAAO,EAAE,MAAM,CAAC;QAAC,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAA;KAAE,CAAC,CAAC;CAG7E"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,EAAE,EAIH,MAAM,2BAA2B,CAAC;AACnC,OAAO,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAGjD,UAAU,uBAAuB;IAC/B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,KAAK,CAAC,EAAE,4BAA4B,GAAG,oBAAoB,GAAG,mCAAmC,CAAC;IAClG,UAAU,CAAC,EAAE,MAAM,CAAC;CACrB;AAaD,qBAAa,eAAgB,SAAQ,WAAW;IAC9C,OAAO,CAAC,QAAQ,CAAC,CAAS;IAC1B,OAAO,CAAC,MAAM,CAA2B;IACzC,OAAO,CAAC,OAAO,CAAC,CAAK;gBAET,EACV,cAAc,EACd,OAAO,GACR,GAAE;QACD,cAAc,CAAC,EAAE,uBAAuB,CAAC;QACzC,OAAO,CAAC,EAAE,EAAE,CAAC;KACT;IAqBN;;;;OAIG;IACG,WAAW;;;IAIX,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,cAAc,EAAE,OAAO,CAAC,EAAE,uBAAuB,GAAG,OAAO,CAAC,MAAM,CAAC;IA+B9F,KAAK,IAAI,OAAO,CAAC,MAAM,CAAC,cAAc,CAAC;IAGvC,WAAW,IAAI,OAAO,CAAC,KAAK,CAAC;QAAE,OAAO,EAAE,MAAM,CAAC;QAAC,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAA;KAAE,CAAC,CAAC;CAG7E"}
package/dist/index.js CHANGED
@@ -39,31 +39,29 @@ var CloudflareVoice = class extends MastraVoice {
39
39
  return { enabled: true };
40
40
  }
41
41
  async listen(audioStream, options) {
42
- return this.traced(async () => {
43
- const chunks = [];
44
- for await (const chunk of audioStream) {
45
- if (typeof chunk === "string") {
46
- chunks.push(Buffer.from(chunk));
47
- } else {
48
- chunks.push(chunk);
49
- }
50
- }
51
- const audioBuffer = Buffer.concat(chunks);
52
- const base64Audio = audioBuffer.toString("base64");
53
- const model = options?.model || defaultListeningModel.model;
54
- if (this.binding) {
55
- const response = await this.binding.run(model, {
56
- audio: base64Audio
57
- });
58
- return response.text;
59
- } else if (this.client) {
60
- const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };
61
- const response = await this.client.ai.run(model, payload);
62
- return response.text;
42
+ const chunks = [];
43
+ for await (const chunk of audioStream) {
44
+ if (typeof chunk === "string") {
45
+ chunks.push(Buffer.from(chunk));
63
46
  } else {
64
- throw new Error("Neither binding nor REST client is configured");
47
+ chunks.push(chunk);
65
48
  }
66
- }, "voice.cloudflare.listen")();
49
+ }
50
+ const audioBuffer = Buffer.concat(chunks);
51
+ const base64Audio = audioBuffer.toString("base64");
52
+ const model = options?.model || defaultListeningModel.model;
53
+ if (this.binding) {
54
+ const response = await this.binding.run(model, {
55
+ audio: base64Audio
56
+ });
57
+ return response.text;
58
+ } else if (this.client) {
59
+ const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };
60
+ const response = await this.client.ai.run(model, payload);
61
+ return response.text;
62
+ } else {
63
+ throw new Error("Neither binding nor REST client is configured");
64
+ }
67
65
  }
68
66
  async speak() {
69
67
  throw new Error("This feature is not yet implemented.");
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts"],"names":[],"mappings":";;;;AAoBA,IAAM,qBAAA,GAAwB;AAAA,EAC5B,KAAA,EAAO,mCAAA;AAAA,EACP,MAAA,EAAQ,QAAQ,GAAA,CAAI,qBAAA;AAAA,EACpB,UAAA,EAAY,QAAQ,GAAA,CAAI;AAC1B,CAAA;AAEO,IAAM,eAAA,GAAN,cAA8B,WAAA,CAAY;AAAA,EACvC,QAAA;AAAA,EACA,MAAA,GAA4B,IAAA;AAAA,EAC5B,OAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA;AAAA,GACF,GAGI,EAAC,EAAG;AACN,IAAA,KAAA,CAAM;AAAA,MACJ,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,cAAA,EAAgB,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAAA,QACrD,MAAA,EAAQ,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB;AAAA;AAC1D,KACD,CAAA;AAGD,IAAA,IAAA,CAAK,OAAA,GAAU,OAAA;AAGf,IAAA,IAAI,CAAC,OAAA,EAAS;AACZ,MAAA,IAAA,CAAK,QAAA,GAAW,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB,MAAA;AAChE,MAAA,IAAI,CAAC,KAAK,QAAA,EAAU;AAClB,QAAA,MAAM,IAAI,MAAM,2DAA2D,CAAA;AAAA,MAC7E;AACA,MAAA,IAAA,CAAK,SAAS,IAAI,UAAA,CAAW,EAAE,QAAA,EAAU,IAAA,CAAK,UAAU,CAAA;AAAA,IAC1D;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA,EAEA,MAAM,MAAA,CAAO,WAAA,EAAoC,OAAA,EAAoD;AACnG,IAAA,OAAO,IAAA,CAAK,OAAO,YAAY;AAE7B,MAAA,MAAM,SAAmB,EAAC;AAC1B,MAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,QAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,UAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,QAChC,CAAA,MAAO;AACL,UAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,QACnB;AAAA,MACF;AACA,MAAA,MAAM,WAAA,GAAc,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AACxC,MAAA,MAAM,WAAA,GAAc,WAAA,CAAY,QAAA,CAAS,QAAQ,CAAA;AAEjD,MAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAGtD,MAAA,IAAI,KAAK,OAAA,EAAS;AAEhB,QAAA,MAAM,QAAA,GAAY,MAAM,IAAA,CAAK,OAAA,CAAQ,IAAI,KAAA,EAAO;AAAA,UAC9C,KAAA,EAAO;AAAA,SACR,CAAA;AACD,QAAA,OAAO,QAAA,CAAS,IAAA;AAAA,MAClB,CAAA,MAAA,IAAW,KAAK,MAAA,EAAQ;AAEtB,QAAA,MAAM,OAAA,GAAU,EAAE,KAAA,EAAO,WAAA,EAAa,YAAY,OAAA,EAAS,UAAA,IAAc,sBAAsB,UAAA,EAAW;AAC1G,QAAA,MAAM,WAAY,MAAM,IAAA,CAAK,OAAO,EAAA,CAAG,GAAA,CAAI,OAAO,OAAO,CAAA;AACzD,QAAA,OAAO,QAAA,CAAS,IAAA;AAAA,MAClB,CAAA,MAAO;AACL,QAAA,MAAM,IAAI,MAAM,+CAA+C,CAAA;AAAA,MACjE;AAAA,IACF,CAAA,EAAG,yBAAyB,CAAA,EAAE;AAAA,EAChC;AAAA,EACA,MAAM,KAAA,GAAwC;AAC5C,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AAAA,EACA,MAAM,WAAA,GAAuE;AAC3E,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AACF","file":"index.js","sourcesContent":["import type {\n Ai,\n Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output,\n Ai_Cf_Openai_Whisper_Output,\n Ai_Cf_Openai_Whisper_Tiny_En_Output,\n} from '@cloudflare/workers-types';\nimport { MastraVoice } from '@mastra/core/voice';\nimport Cloudflare from 'cloudflare';\n\ninterface CloudflareListenOptions {\n apiKey?: string;\n model?: '@cf/openai/whisper-tiny-en' | '@cf/openai/whisper' | '@cf/openai/whisper-large-v3-turbo';\n account_id?: string;\n}\n\ntype CloudflareListenOutput =\n | Ai_Cf_Openai_Whisper_Tiny_En_Output\n | Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output\n | Ai_Cf_Openai_Whisper_Output;\n\nconst defaultListeningModel = {\n model: '@cf/openai/whisper-large-v3-turbo' as const,\n apiKey: process.env.CLOUDFLARE_AI_API_KEY,\n account_id: process.env.CLOUDFLARE_ACCOUNT_ID!,\n};\n\nexport class CloudflareVoice extends MastraVoice {\n private apiToken?: string;\n private client: Cloudflare | null = null;\n private binding?: Ai;\n\n constructor({\n listeningModel,\n binding,\n }: {\n listeningModel?: CloudflareListenOptions;\n binding?: Ai;\n } = {}) {\n super({\n listeningModel: {\n name: listeningModel?.model ?? defaultListeningModel.model,\n apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,\n },\n });\n\n // Store Workers AI binding if provided\n this.binding = binding;\n\n // Only setup REST client if no binding provided or both are needed\n if (!binding) {\n this.apiToken = listeningModel?.apiKey || defaultListeningModel.apiKey;\n if (!this.apiToken) {\n throw new Error('CLOUDFLARE_AI_API_KEY must be set when not using bindings');\n }\n this.client = new Cloudflare({ apiToken: this.apiToken });\n }\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n async listen(audioStream: NodeJS.ReadableStream, options?: CloudflareListenOptions): Promise<string> {\n return this.traced(async () => {\n // Collect audio data into buffer\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const audioBuffer = Buffer.concat(chunks);\n const base64Audio = audioBuffer.toString('base64');\n\n const model = options?.model || defaultListeningModel.model;\n\n // Use native binding if available, otherwise use REST API\n if (this.binding) {\n // Using Workers AI binding\n const response = (await this.binding.run(model, {\n audio: base64Audio,\n })) as CloudflareListenOutput;\n return response.text;\n } else if (this.client) {\n // Using REST API client\n const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };\n const response = (await this.client.ai.run(model, payload)) as any;\n return response.text as string;\n } else {\n throw new Error('Neither binding nor REST client is configured');\n }\n }, 'voice.cloudflare.listen')();\n }\n async speak(): Promise<NodeJS.ReadableStream> {\n throw new Error('This feature is not yet implemented.');\n }\n async getSpeakers(): Promise<Array<{ voiceId: string; [key: string]: any }>> {\n throw new Error('This feature is not yet implemented.');\n }\n}\n"]}
1
+ {"version":3,"sources":["../src/index.ts"],"names":[],"mappings":";;;;AAoBA,IAAM,qBAAA,GAAwB;AAAA,EAC5B,KAAA,EAAO,mCAAA;AAAA,EACP,MAAA,EAAQ,QAAQ,GAAA,CAAI,qBAAA;AAAA,EACpB,UAAA,EAAY,QAAQ,GAAA,CAAI;AAC1B,CAAA;AAEO,IAAM,eAAA,GAAN,cAA8B,WAAA,CAAY;AAAA,EACvC,QAAA;AAAA,EACA,MAAA,GAA4B,IAAA;AAAA,EAC5B,OAAA;AAAA,EAER,WAAA,CAAY;AAAA,IACV,cAAA;AAAA,IACA;AAAA,GACF,GAGI,EAAC,EAAG;AACN,IAAA,KAAA,CAAM;AAAA,MACJ,cAAA,EAAgB;AAAA,QACd,IAAA,EAAM,cAAA,EAAgB,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAAA,QACrD,MAAA,EAAQ,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB;AAAA;AAC1D,KACD,CAAA;AAGD,IAAA,IAAA,CAAK,OAAA,GAAU,OAAA;AAGf,IAAA,IAAI,CAAC,OAAA,EAAS;AACZ,MAAA,IAAA,CAAK,QAAA,GAAW,cAAA,EAAgB,MAAA,IAAU,qBAAA,CAAsB,MAAA;AAChE,MAAA,IAAI,CAAC,KAAK,QAAA,EAAU;AAClB,QAAA,MAAM,IAAI,MAAM,2DAA2D,CAAA;AAAA,MAC7E;AACA,MAAA,IAAA,CAAK,SAAS,IAAI,UAAA,CAAW,EAAE,QAAA,EAAU,IAAA,CAAK,UAAU,CAAA;AAAA,IAC1D;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,WAAA,GAAc;AAClB,IAAA,OAAO,EAAE,SAAS,IAAA,EAAK;AAAA,EACzB;AAAA,EAEA,MAAM,MAAA,CAAO,WAAA,EAAoC,OAAA,EAAoD;AAEnG,IAAA,MAAM,SAAmB,EAAC;AAC1B,IAAA,WAAA,MAAiB,SAAS,WAAA,EAAa;AACrC,MAAA,IAAI,OAAO,UAAU,QAAA,EAAU;AAC7B,QAAA,MAAA,CAAO,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,KAAK,CAAC,CAAA;AAAA,MAChC,CAAA,MAAO;AACL,QAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAAA,MACnB;AAAA,IACF;AACA,IAAA,MAAM,WAAA,GAAc,MAAA,CAAO,MAAA,CAAO,MAAM,CAAA;AACxC,IAAA,MAAM,WAAA,GAAc,WAAA,CAAY,QAAA,CAAS,QAAQ,CAAA;AAEjD,IAAA,MAAM,KAAA,GAAQ,OAAA,EAAS,KAAA,IAAS,qBAAA,CAAsB,KAAA;AAGtD,IAAA,IAAI,KAAK,OAAA,EAAS;AAEhB,MAAA,MAAM,QAAA,GAAY,MAAM,IAAA,CAAK,OAAA,CAAQ,IAAI,KAAA,EAAO;AAAA,QAC9C,KAAA,EAAO;AAAA,OACR,CAAA;AACD,MAAA,OAAO,QAAA,CAAS,IAAA;AAAA,IAClB,CAAA,MAAA,IAAW,KAAK,MAAA,EAAQ;AAEtB,MAAA,MAAM,OAAA,GAAU,EAAE,KAAA,EAAO,WAAA,EAAa,YAAY,OAAA,EAAS,UAAA,IAAc,sBAAsB,UAAA,EAAW;AAC1G,MAAA,MAAM,WAAY,MAAM,IAAA,CAAK,OAAO,EAAA,CAAG,GAAA,CAAI,OAAO,OAAO,CAAA;AACzD,MAAA,OAAO,QAAA,CAAS,IAAA;AAAA,IAClB,CAAA,MAAO;AACL,MAAA,MAAM,IAAI,MAAM,+CAA+C,CAAA;AAAA,IACjE;AAAA,EACF;AAAA,EACA,MAAM,KAAA,GAAwC;AAC5C,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AAAA,EACA,MAAM,WAAA,GAAuE;AAC3E,IAAA,MAAM,IAAI,MAAM,sCAAsC,CAAA;AAAA,EACxD;AACF","file":"index.js","sourcesContent":["import type {\n Ai,\n Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output,\n Ai_Cf_Openai_Whisper_Output,\n Ai_Cf_Openai_Whisper_Tiny_En_Output,\n} from '@cloudflare/workers-types';\nimport { MastraVoice } from '@mastra/core/voice';\nimport Cloudflare from 'cloudflare';\n\ninterface CloudflareListenOptions {\n apiKey?: string;\n model?: '@cf/openai/whisper-tiny-en' | '@cf/openai/whisper' | '@cf/openai/whisper-large-v3-turbo';\n account_id?: string;\n}\n\ntype CloudflareListenOutput =\n | Ai_Cf_Openai_Whisper_Tiny_En_Output\n | Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output\n | Ai_Cf_Openai_Whisper_Output;\n\nconst defaultListeningModel = {\n model: '@cf/openai/whisper-large-v3-turbo' as const,\n apiKey: process.env.CLOUDFLARE_AI_API_KEY,\n account_id: process.env.CLOUDFLARE_ACCOUNT_ID!,\n};\n\nexport class CloudflareVoice extends MastraVoice {\n private apiToken?: string;\n private client: Cloudflare | null = null;\n private binding?: Ai;\n\n constructor({\n listeningModel,\n binding,\n }: {\n listeningModel?: CloudflareListenOptions;\n binding?: Ai;\n } = {}) {\n super({\n listeningModel: {\n name: listeningModel?.model ?? defaultListeningModel.model,\n apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,\n },\n });\n\n // Store Workers AI binding if provided\n this.binding = binding;\n\n // Only setup REST client if no binding provided or both are needed\n if (!binding) {\n this.apiToken = listeningModel?.apiKey || defaultListeningModel.apiKey;\n if (!this.apiToken) {\n throw new Error('CLOUDFLARE_AI_API_KEY must be set when not using bindings');\n }\n this.client = new Cloudflare({ apiToken: this.apiToken });\n }\n }\n\n /**\n * Checks if listening capabilities are enabled.\n *\n * @returns {Promise<{ enabled: boolean }>}\n */\n async getListener() {\n return { enabled: true };\n }\n\n async listen(audioStream: NodeJS.ReadableStream, options?: CloudflareListenOptions): Promise<string> {\n // Collect audio data into buffer\n const chunks: Buffer[] = [];\n for await (const chunk of audioStream) {\n if (typeof chunk === 'string') {\n chunks.push(Buffer.from(chunk));\n } else {\n chunks.push(chunk);\n }\n }\n const audioBuffer = Buffer.concat(chunks);\n const base64Audio = audioBuffer.toString('base64');\n\n const model = options?.model || defaultListeningModel.model;\n\n // Use native binding if available, otherwise use REST API\n if (this.binding) {\n // Using Workers AI binding\n const response = (await this.binding.run(model, {\n audio: base64Audio,\n })) as CloudflareListenOutput;\n return response.text;\n } else if (this.client) {\n // Using REST API client\n const payload = { audio: base64Audio, account_id: options?.account_id || defaultListeningModel.account_id };\n const response = (await this.client.ai.run(model, payload)) as any;\n return response.text as string;\n } else {\n throw new Error('Neither binding nor REST client is configured');\n }\n }\n async speak(): Promise<NodeJS.ReadableStream> {\n throw new Error('This feature is not yet implemented.');\n }\n async getSpeakers(): Promise<Array<{ voiceId: string; [key: string]: any }>> {\n throw new Error('This feature is not yet implemented.');\n }\n}\n"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/voice-cloudflare",
3
- "version": "0.11.12",
3
+ "version": "0.12.0-beta.1",
4
4
  "description": "Mastra Cloudflare AI voice integration",
5
5
  "type": "module",
6
6
  "files": [
@@ -27,17 +27,18 @@
27
27
  "cloudflare": "^4.5.0"
28
28
  },
29
29
  "devDependencies": {
30
- "@cloudflare/workers-types": "^4.20251008.0",
31
- "@microsoft/api-extractor": "^7.52.8",
32
- "@types/node": "^20.19.0",
30
+ "@cloudflare/workers-types": "^4.20251111.0",
31
+ "@types/node": "22.13.17",
32
+ "@vitest/coverage-v8": "4.0.12",
33
+ "@vitest/ui": "4.0.12",
33
34
  "eslint": "^9.37.0",
34
35
  "tsup": "^8.5.0",
35
- "typescript": "^5.8.3",
36
- "vitest": "^3.2.4",
36
+ "typescript": "^5.9.3",
37
+ "vitest": "4.0.16",
37
38
  "zod": "^3.25.76",
38
- "@mastra/core": "0.24.0",
39
- "@internal/types-builder": "0.0.33",
40
- "@internal/lint": "0.0.58"
39
+ "@internal/types-builder": "0.0.28",
40
+ "@mastra/core": "1.0.0-beta.20",
41
+ "@internal/lint": "0.0.53"
41
42
  },
42
43
  "keywords": [
43
44
  "mastra",
@@ -50,7 +51,7 @@
50
51
  "speech-recognition"
51
52
  ],
52
53
  "peerDependencies": {
53
- "@mastra/core": ">=0.18.1-0 <0.25.0-0",
54
+ "@mastra/core": ">=1.0.0-0 <2.0.0-0",
54
55
  "zod": "^3.25.0 || ^4.0.0"
55
56
  },
56
57
  "homepage": "https://mastra.ai",
@@ -62,15 +63,12 @@
62
63
  "bugs": {
63
64
  "url": "https://github.com/mastra-ai/mastra/issues"
64
65
  },
65
- "publishConfig": {
66
- "access": "public",
67
- "publish-branch": [
68
- "main",
69
- "0.x"
70
- ]
66
+ "engines": {
67
+ "node": ">=22.13.0"
71
68
  },
72
69
  "scripts": {
73
70
  "build": "tsup --silent --config tsup.config.ts",
71
+ "postbuild": "pnpx tsx ../../scripts/generate-package-docs.ts voice/cloudflare",
74
72
  "build:watch": "tsup --watch --silent --config tsup.config.ts",
75
73
  "test": "vitest run",
76
74
  "test:watch": "vitest watch",