@mastra/voice-elevenlabs 0.1.1-alpha.0 → 0.1.1-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,18 +1,23 @@
1
1
 
2
- > @mastra/voice-elevenlabs@0.1.1-alpha.0 build /home/runner/work/mastra/mastra/voice/elevenlabs
3
- > tsup src/index.ts --format esm --experimental-dts --clean --treeshake
2
+ > @mastra/voice-elevenlabs@0.1.1-alpha.3 build /home/runner/work/mastra/mastra/voice/elevenlabs
3
+ > tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake
4
4
 
5
5
  CLI Building entry: src/index.ts
6
6
  CLI Using tsconfig: tsconfig.json
7
7
  CLI tsup v8.3.6
8
8
  TSC Build start
9
- TSC ⚡️ Build success in 7640ms
9
+ TSC ⚡️ Build success in 7812ms
10
10
  DTS Build start
11
11
  CLI Target: es2022
12
12
  Analysis will use the bundled TypeScript version 5.7.3
13
13
  Writing package typings: /home/runner/work/mastra/mastra/voice/elevenlabs/dist/_tsup-dts-rollup.d.ts
14
- DTS ⚡️ Build success in 6363ms
14
+ Analysis will use the bundled TypeScript version 5.7.3
15
+ Writing package typings: /home/runner/work/mastra/mastra/voice/elevenlabs/dist/_tsup-dts-rollup.d.cts
16
+ DTS ⚡️ Build success in 8313ms
15
17
  CLI Cleaning output folder
16
18
  ESM Build start
17
- ESM dist/index.js 3.36 KB
18
- ESM ⚡️ Build success in 447ms
19
+ CJS Build start
20
+ ESM dist/index.js 5.00 KB
21
+ ESM ⚡️ Build success in 582ms
22
+ CJS dist/index.cjs 5.04 KB
23
+ CJS ⚡️ Build success in 582ms
package/CHANGELOG.md CHANGED
@@ -1,5 +1,39 @@
1
1
  # @mastra/voice-elevenlabs
2
2
 
3
+ ## 0.1.1-alpha.3
4
+
5
+ ### Patch Changes
6
+
7
+ - bb4f447: Add support for commonjs
8
+ - Updated dependencies [0fd78ac]
9
+ - Updated dependencies [0d25b75]
10
+ - Updated dependencies [fd14a3f]
11
+ - Updated dependencies [3f369a2]
12
+ - Updated dependencies [4d4e1e1]
13
+ - Updated dependencies [bb4f447]
14
+ - @mastra/core@0.4.3-alpha.3
15
+
16
+ ## 0.1.1-alpha.2
17
+
18
+ ### Patch Changes
19
+
20
+ - Updated dependencies [2512a93]
21
+ - Updated dependencies [e62de74]
22
+ - @mastra/core@0.4.3-alpha.2
23
+
24
+ ## 0.1.1-alpha.1
25
+
26
+ ### Patch Changes
27
+
28
+ - 705d69b: Add STT for ElevenlabsVoice
29
+ - Updated dependencies [0d185b1]
30
+ - Updated dependencies [ed55f1d]
31
+ - Updated dependencies [8d13b14]
32
+ - Updated dependencies [3ee4831]
33
+ - Updated dependencies [108793c]
34
+ - Updated dependencies [5f28f44]
35
+ - @mastra/core@0.4.3-alpha.1
36
+
3
37
  ## 0.1.1-alpha.0
4
38
 
5
39
  ### Patch Changes
@@ -0,0 +1,90 @@
1
+ import { MastraVoice } from '@mastra/core/voice';
2
+
3
+ declare type ElevenLabsListenOptions = SpeechToTextOptions & RequestOptions;
4
+
5
+ declare type ElevenLabsModel = 'eleven_multilingual_v2' | 'eleven_flash_v2_5' | 'eleven_flash_v2' | 'eleven_multilingual_sts_v2' | 'eleven_english_sts_v2' | 'scribe_v1';
6
+
7
+ export declare class ElevenLabsVoice extends MastraVoice {
8
+ private client;
9
+ /**
10
+ * Creates an instance of the ElevenLabsVoice class.
11
+ *
12
+ * @param {Object} options - The options for the voice configuration.
13
+ * @param {ElevenLabsVoiceConfig} [options.speechModel] - The configuration for the speech model, including the model name and API key.
14
+ * @param {string} [options.speaker] - The ID of the speaker to use. If not provided, a default speaker will be used.
15
+ *
16
+ * @throws {Error} If the ELEVENLABS_API_KEY is not set in the environment variables.
17
+ */
18
+ constructor({ speechModel, listeningModel, speaker, }?: {
19
+ speechModel?: ElevenLabsVoiceConfig;
20
+ listeningModel?: ElevenLabsVoiceConfig;
21
+ speaker?: string;
22
+ });
23
+ /**
24
+ * Retrieves a list of available speakers from the Eleven Labs API.
25
+ * Each speaker includes their ID, name, language, and gender.
26
+ *
27
+ * @returns {Promise<Array<{ voiceId: string, name: string, language: string, gender: string }>>}
28
+ * A promise that resolves to an array of speaker objects.
29
+ */
30
+ getSpeakers(): Promise<{
31
+ voiceId: string;
32
+ name: string | undefined;
33
+ language: string;
34
+ gender: string;
35
+ }[]>;
36
+ private streamToString;
37
+ /**
38
+ * Converts text or audio input into speech using the Eleven Labs API.
39
+ *
40
+ * @param {string | NodeJS.ReadableStream} input - The text to be converted to speech or a stream containing audio data.
41
+ * @param {Object} [options] - Optional parameters for the speech generation.
42
+ * @param {string} [options.speaker] - The ID of the speaker to use for the speech. If not provided, the default speaker will be used.
43
+ *
44
+ * @returns {Promise<NodeJS.ReadableStream>} A promise that resolves to a readable stream of the generated speech.
45
+ *
46
+ * @throws {Error} If no speaker is specified or if no speech model is set.
47
+ */
48
+ speak(input: string | NodeJS.ReadableStream, options?: {
49
+ speaker?: string;
50
+ }): Promise<NodeJS.ReadableStream>;
51
+ /**
52
+ * Converts audio input to text using ElevenLabs Speech-to-Text API.
53
+ *
54
+ * @param input - A readable stream containing the audio data to transcribe
55
+ * @param options - Configuration options for the transcription
56
+ * @param options.language_code - ISO language code (e.g., 'en', 'fr', 'es')
57
+ * @param options.tag_audio_events - Whether to tag audio events like [MUSIC], [LAUGHTER], etc.
58
+ * @param options.num_speakers - Number of speakers to detect in the audio
59
+ * @param options.filetype - Audio file format (e.g., 'mp3', 'wav', 'ogg')
60
+ * @param options.timeoutInSeconds - Request timeout in seconds
61
+ * @param options.maxRetries - Maximum number of retry attempts
62
+ * @param options.abortSignal - Signal to abort the request
63
+ *
64
+ * @returns A Promise that resolves to the transcribed text
65
+ *
66
+ */
67
+ listen(input: NodeJS.ReadableStream, options?: ElevenLabsListenOptions): Promise<string>;
68
+ }
69
+
70
+ declare interface ElevenLabsVoiceConfig {
71
+ name?: ElevenLabsModel;
72
+ apiKey?: string;
73
+ }
74
+
75
+ declare interface RequestOptions {
76
+ timeoutInSeconds?: number;
77
+ maxRetries?: number;
78
+ abortSignal?: AbortSignal;
79
+ apiKey?: string | undefined;
80
+ headers?: Record<string, string>;
81
+ }
82
+
83
+ declare interface SpeechToTextOptions {
84
+ language_code?: string;
85
+ tag_audio_events?: boolean;
86
+ num_speakers?: number;
87
+ filetype?: string;
88
+ }
89
+
90
+ export { }
@@ -1,6 +1,8 @@
1
1
  import { MastraVoice } from '@mastra/core/voice';
2
2
 
3
- declare type ElevenLabsModel = 'eleven_multilingual_v2' | 'eleven_flash_v2_5' | 'eleven_flash_v2' | 'eleven_multilingual_sts_v2' | 'eleven_english_sts_v2';
3
+ declare type ElevenLabsListenOptions = SpeechToTextOptions & RequestOptions;
4
+
5
+ declare type ElevenLabsModel = 'eleven_multilingual_v2' | 'eleven_flash_v2_5' | 'eleven_flash_v2' | 'eleven_multilingual_sts_v2' | 'eleven_english_sts_v2' | 'scribe_v1';
4
6
 
5
7
  export declare class ElevenLabsVoice extends MastraVoice {
6
8
  private client;
@@ -13,8 +15,9 @@ export declare class ElevenLabsVoice extends MastraVoice {
13
15
  *
14
16
  * @throws {Error} If the ELEVENLABS_API_KEY is not set in the environment variables.
15
17
  */
16
- constructor({ speechModel, speaker }?: {
18
+ constructor({ speechModel, listeningModel, speaker, }?: {
17
19
  speechModel?: ElevenLabsVoiceConfig;
20
+ listeningModel?: ElevenLabsVoiceConfig;
18
21
  speaker?: string;
19
22
  });
20
23
  /**
@@ -45,7 +48,23 @@ export declare class ElevenLabsVoice extends MastraVoice {
45
48
  speak(input: string | NodeJS.ReadableStream, options?: {
46
49
  speaker?: string;
47
50
  }): Promise<NodeJS.ReadableStream>;
48
- listen(_input: NodeJS.ReadableStream | Buffer, _options?: Record<string, unknown>): Promise<string>;
51
+ /**
52
+ * Converts audio input to text using ElevenLabs Speech-to-Text API.
53
+ *
54
+ * @param input - A readable stream containing the audio data to transcribe
55
+ * @param options - Configuration options for the transcription
56
+ * @param options.language_code - ISO language code (e.g., 'en', 'fr', 'es')
57
+ * @param options.tag_audio_events - Whether to tag audio events like [MUSIC], [LAUGHTER], etc.
58
+ * @param options.num_speakers - Number of speakers to detect in the audio
59
+ * @param options.filetype - Audio file format (e.g., 'mp3', 'wav', 'ogg')
60
+ * @param options.timeoutInSeconds - Request timeout in seconds
61
+ * @param options.maxRetries - Maximum number of retry attempts
62
+ * @param options.abortSignal - Signal to abort the request
63
+ *
64
+ * @returns A Promise that resolves to the transcribed text
65
+ *
66
+ */
67
+ listen(input: NodeJS.ReadableStream, options?: ElevenLabsListenOptions): Promise<string>;
49
68
  }
50
69
 
51
70
  declare interface ElevenLabsVoiceConfig {
@@ -53,4 +72,19 @@ declare interface ElevenLabsVoiceConfig {
53
72
  apiKey?: string;
54
73
  }
55
74
 
75
+ declare interface RequestOptions {
76
+ timeoutInSeconds?: number;
77
+ maxRetries?: number;
78
+ abortSignal?: AbortSignal;
79
+ apiKey?: string | undefined;
80
+ headers?: Record<string, string>;
81
+ }
82
+
83
+ declare interface SpeechToTextOptions {
84
+ language_code?: string;
85
+ tag_audio_events?: boolean;
86
+ num_speakers?: number;
87
+ filetype?: string;
88
+ }
89
+
56
90
  export { }
package/dist/index.cjs ADDED
@@ -0,0 +1,141 @@
1
+ 'use strict';
2
+
3
+ var buffer = require('buffer');
4
+ var voice = require('@mastra/core/voice');
5
+ var elevenlabs = require('elevenlabs');
6
+
7
+ // src/index.ts
8
+ var ElevenLabsVoice = class extends voice.MastraVoice {
9
+ client;
10
+ /**
11
+ * Creates an instance of the ElevenLabsVoice class.
12
+ *
13
+ * @param {Object} options - The options for the voice configuration.
14
+ * @param {ElevenLabsVoiceConfig} [options.speechModel] - The configuration for the speech model, including the model name and API key.
15
+ * @param {string} [options.speaker] - The ID of the speaker to use. If not provided, a default speaker will be used.
16
+ *
17
+ * @throws {Error} If the ELEVENLABS_API_KEY is not set in the environment variables.
18
+ */
19
+ constructor({
20
+ speechModel,
21
+ listeningModel,
22
+ speaker
23
+ } = {}) {
24
+ const apiKey = speechModel?.apiKey ?? process.env.ELEVENLABS_API_KEY;
25
+ super({
26
+ speechModel: {
27
+ name: speechModel?.name ?? "eleven_multilingual_v2",
28
+ apiKey: speechModel?.apiKey
29
+ },
30
+ listeningModel: {
31
+ name: listeningModel?.name ?? "scribe_v1",
32
+ apiKey: listeningModel?.apiKey
33
+ },
34
+ speaker
35
+ });
36
+ if (!apiKey) {
37
+ throw new Error("ELEVENLABS_API_KEY is not set");
38
+ }
39
+ this.client = new elevenlabs.ElevenLabsClient({
40
+ apiKey
41
+ });
42
+ this.speaker = speaker || "9BWtsMINqrJLrRacOk9x";
43
+ }
44
+ /**
45
+ * Retrieves a list of available speakers from the Eleven Labs API.
46
+ * Each speaker includes their ID, name, language, and gender.
47
+ *
48
+ * @returns {Promise<Array<{ voiceId: string, name: string, language: string, gender: string }>>}
49
+ * A promise that resolves to an array of speaker objects.
50
+ */
51
+ async getSpeakers() {
52
+ const res = await this.traced(async () => {
53
+ const voices = await this.client.voices.getAll();
54
+ return voices?.voices?.map((voice) => ({
55
+ voiceId: voice.voice_id,
56
+ name: voice.name,
57
+ language: voice.labels?.language || "en",
58
+ gender: voice.labels?.gender || "neutral"
59
+ })) ?? [];
60
+ }, "voice.elevenlabs.voices")();
61
+ return res;
62
+ }
63
+ async streamToString(stream) {
64
+ const chunks = [];
65
+ for await (const chunk of stream) {
66
+ chunks.push(Buffer.from(chunk));
67
+ }
68
+ return Buffer.concat(chunks).toString("utf-8");
69
+ }
70
+ /**
71
+ * Converts text or audio input into speech using the Eleven Labs API.
72
+ *
73
+ * @param {string | NodeJS.ReadableStream} input - The text to be converted to speech or a stream containing audio data.
74
+ * @param {Object} [options] - Optional parameters for the speech generation.
75
+ * @param {string} [options.speaker] - The ID of the speaker to use for the speech. If not provided, the default speaker will be used.
76
+ *
77
+ * @returns {Promise<NodeJS.ReadableStream>} A promise that resolves to a readable stream of the generated speech.
78
+ *
79
+ * @throws {Error} If no speaker is specified or if no speech model is set.
80
+ */
81
+ async speak(input, options) {
82
+ const speaker = options?.speaker || this.speaker;
83
+ if (!speaker) {
84
+ throw new Error("No speaker specified");
85
+ }
86
+ if (!this.speechModel?.name) {
87
+ throw new Error("No speech model specified");
88
+ }
89
+ const text = typeof input === "string" ? input : await this.streamToString(input);
90
+ const res = await this.traced(async () => {
91
+ return await this.client.generate({
92
+ text,
93
+ voice: speaker,
94
+ model_id: this.speechModel?.name,
95
+ stream: true
96
+ });
97
+ }, "voice.elevenlabs.speak")();
98
+ return res;
99
+ }
100
+ /**
101
+ * Converts audio input to text using ElevenLabs Speech-to-Text API.
102
+ *
103
+ * @param input - A readable stream containing the audio data to transcribe
104
+ * @param options - Configuration options for the transcription
105
+ * @param options.language_code - ISO language code (e.g., 'en', 'fr', 'es')
106
+ * @param options.tag_audio_events - Whether to tag audio events like [MUSIC], [LAUGHTER], etc.
107
+ * @param options.num_speakers - Number of speakers to detect in the audio
108
+ * @param options.filetype - Audio file format (e.g., 'mp3', 'wav', 'ogg')
109
+ * @param options.timeoutInSeconds - Request timeout in seconds
110
+ * @param options.maxRetries - Maximum number of retry attempts
111
+ * @param options.abortSignal - Signal to abort the request
112
+ *
113
+ * @returns A Promise that resolves to the transcribed text
114
+ *
115
+ */
116
+ async listen(input, options) {
117
+ const res = await this.traced(async () => {
118
+ const chunks = [];
119
+ for await (const chunk of input) {
120
+ chunks.push(Buffer.from(chunk));
121
+ }
122
+ const buffer$1 = Buffer.concat(chunks);
123
+ const { language_code, tag_audio_events, num_speakers, filetype, ...requestOptions } = options || {};
124
+ const file = new buffer.File([buffer$1], `audio.${filetype || "mp3"}`);
125
+ const transcription = await this.client.speechToText.convert(
126
+ {
127
+ file,
128
+ model_id: this.listeningModel?.name,
129
+ language_code,
130
+ tag_audio_events,
131
+ num_speakers
132
+ },
133
+ requestOptions
134
+ );
135
+ return transcription.text;
136
+ }, "voice.elevenlabs.listen")();
137
+ return res;
138
+ }
139
+ };
140
+
141
+ exports.ElevenLabsVoice = ElevenLabsVoice;
@@ -0,0 +1 @@
1
+ export { ElevenLabsVoice } from './_tsup-dts-rollup.cjs';
package/dist/index.js CHANGED
@@ -1,3 +1,4 @@
1
+ import { File } from 'node:buffer';
1
2
  import { MastraVoice } from '@mastra/core/voice';
2
3
  import { ElevenLabsClient } from 'elevenlabs';
3
4
 
@@ -13,13 +14,21 @@ var ElevenLabsVoice = class extends MastraVoice {
13
14
  *
14
15
  * @throws {Error} If the ELEVENLABS_API_KEY is not set in the environment variables.
15
16
  */
16
- constructor({ speechModel, speaker } = {}) {
17
+ constructor({
18
+ speechModel,
19
+ listeningModel,
20
+ speaker
21
+ } = {}) {
17
22
  const apiKey = speechModel?.apiKey ?? process.env.ELEVENLABS_API_KEY;
18
23
  super({
19
24
  speechModel: {
20
25
  name: speechModel?.name ?? "eleven_multilingual_v2",
21
26
  apiKey: speechModel?.apiKey
22
27
  },
28
+ listeningModel: {
29
+ name: listeningModel?.name ?? "scribe_v1",
30
+ apiKey: listeningModel?.apiKey
31
+ },
23
32
  speaker
24
33
  });
25
34
  if (!apiKey) {
@@ -86,8 +95,44 @@ var ElevenLabsVoice = class extends MastraVoice {
86
95
  }, "voice.elevenlabs.speak")();
87
96
  return res;
88
97
  }
89
- async listen(_input, _options) {
90
- throw new Error("ElevenLabs does not support transcription");
98
+ /**
99
+ * Converts audio input to text using ElevenLabs Speech-to-Text API.
100
+ *
101
+ * @param input - A readable stream containing the audio data to transcribe
102
+ * @param options - Configuration options for the transcription
103
+ * @param options.language_code - ISO language code (e.g., 'en', 'fr', 'es')
104
+ * @param options.tag_audio_events - Whether to tag audio events like [MUSIC], [LAUGHTER], etc.
105
+ * @param options.num_speakers - Number of speakers to detect in the audio
106
+ * @param options.filetype - Audio file format (e.g., 'mp3', 'wav', 'ogg')
107
+ * @param options.timeoutInSeconds - Request timeout in seconds
108
+ * @param options.maxRetries - Maximum number of retry attempts
109
+ * @param options.abortSignal - Signal to abort the request
110
+ *
111
+ * @returns A Promise that resolves to the transcribed text
112
+ *
113
+ */
114
+ async listen(input, options) {
115
+ const res = await this.traced(async () => {
116
+ const chunks = [];
117
+ for await (const chunk of input) {
118
+ chunks.push(Buffer.from(chunk));
119
+ }
120
+ const buffer = Buffer.concat(chunks);
121
+ const { language_code, tag_audio_events, num_speakers, filetype, ...requestOptions } = options || {};
122
+ const file = new File([buffer], `audio.${filetype || "mp3"}`);
123
+ const transcription = await this.client.speechToText.convert(
124
+ {
125
+ file,
126
+ model_id: this.listeningModel?.name,
127
+ language_code,
128
+ tag_audio_events,
129
+ num_speakers
130
+ },
131
+ requestOptions
132
+ );
133
+ return transcription.text;
134
+ }, "voice.elevenlabs.listen")();
135
+ return res;
91
136
  }
92
137
  };
93
138
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/voice-elevenlabs",
3
- "version": "0.1.1-alpha.0",
3
+ "version": "0.1.1-alpha.3",
4
4
  "description": "Mastra ElevenLabs voice integration",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -10,6 +10,10 @@
10
10
  "import": {
11
11
  "types": "./dist/index.d.ts",
12
12
  "default": "./dist/index.js"
13
+ },
14
+ "require": {
15
+ "types": "./dist/index.d.cts",
16
+ "default": "./dist/index.cjs"
13
17
  }
14
18
  },
15
19
  "./package.json": "./package.json"
@@ -17,7 +21,7 @@
17
21
  "dependencies": {
18
22
  "elevenlabs": "^1.50.2",
19
23
  "zod": "^3.24.1",
20
- "@mastra/core": "^0.4.3-alpha.0"
24
+ "@mastra/core": "^0.4.3-alpha.3"
21
25
  },
22
26
  "devDependencies": {
23
27
  "@microsoft/api-extractor": "^7.49.2",
@@ -29,7 +33,7 @@
29
33
  "@internal/lint": "0.0.0"
30
34
  },
31
35
  "scripts": {
32
- "build": "tsup src/index.ts --format esm --experimental-dts --clean --treeshake",
36
+ "build": "tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake",
33
37
  "build:watch": "tsup build --watch",
34
38
  "test": "vitest run",
35
39
  "lint": "eslint ."
package/src/index.test.ts CHANGED
@@ -1,6 +1,5 @@
1
- import { createWriteStream, writeFileSync, mkdirSync } from 'fs';
1
+ import { createWriteStream, writeFileSync, mkdirSync, createReadStream } from 'fs';
2
2
  import path from 'path';
3
- import { Readable } from 'stream';
4
3
  import { describe, expect, it, beforeAll } from 'vitest';
5
4
 
6
5
  import { ElevenLabsVoice } from './index.js';
@@ -10,7 +9,6 @@ describe('ElevenLabsVoice Integration Tests', () => {
10
9
  const outputDir = path.join(process.cwd(), 'test-outputs');
11
10
 
12
11
  beforeAll(() => {
13
- // Create output directory if it doesn't exist
14
12
  try {
15
13
  mkdirSync(outputDir, { recursive: true });
16
14
  } catch (err) {
@@ -100,14 +98,44 @@ describe('ElevenLabsVoice Integration Tests', () => {
100
98
  });
101
99
 
102
100
  describe('listen', () => {
103
- it('should throw error as transcription is not supported', async () => {
104
- const dummyStream = new Readable({
105
- read() {
106
- this.push(null);
101
+ it('should convert audio to text', async () => {
102
+ const outputPath = path.join(outputDir, 'elevenlabs-speech-test-params.mp3');
103
+ const audio = createReadStream(outputPath);
104
+ const result = await voice.listen(audio);
105
+
106
+ if (typeof result !== 'string') {
107
+ return expect(result).toBeInstanceOf(String);
108
+ }
109
+
110
+ expect(typeof result).toBe('string');
111
+ expect(result.length).toBeGreaterThan(0);
112
+ });
113
+
114
+ it('should handle API errors gracefully', async () => {
115
+ // Create a voice instance with an invalid API key to force an error
116
+ const invalidVoice = new ElevenLabsVoice({
117
+ listeningModel: {
118
+ name: 'eleven_multilingual_v2',
119
+ apiKey: 'invalid-api-key',
107
120
  },
108
121
  });
109
122
 
110
- await expect(voice.listen(dummyStream)).rejects.toThrow('ElevenLabs does not support transcription');
123
+ const outputPath = path.join(outputDir, 'elevenlabs-speech-test-params.mp3');
124
+ const audio = createReadStream(outputPath);
125
+
126
+ // The API call should fail with an authentication error
127
+ await expect(invalidVoice.listen(audio)).rejects.toThrow();
128
+ });
129
+
130
+ it('should handle invalid audio input', async () => {
131
+ // Create a path to a non-existent file
132
+ const nonExistentPath = path.join(outputDir, 'non-existent-file.mp3');
133
+
134
+ // Attempting to create a read stream from a non-existent file should throw
135
+ await expect(async () => {
136
+ const audio = createReadStream(nonExistentPath);
137
+ await voice.listen(audio);
138
+ }).rejects.toThrow();
111
139
  });
112
140
  });
113
141
  });
package/src/index.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import { File } from 'node:buffer';
1
2
  import { MastraVoice } from '@mastra/core/voice';
2
3
  import { ElevenLabsClient } from 'elevenlabs';
3
4
 
@@ -6,13 +7,32 @@ type ElevenLabsModel =
6
7
  | 'eleven_flash_v2_5'
7
8
  | 'eleven_flash_v2'
8
9
  | 'eleven_multilingual_sts_v2'
9
- | 'eleven_english_sts_v2';
10
+ | 'eleven_english_sts_v2'
11
+ | 'scribe_v1';
10
12
 
11
13
  interface ElevenLabsVoiceConfig {
12
14
  name?: ElevenLabsModel;
13
15
  apiKey?: string;
14
16
  }
15
17
 
18
+ interface SpeechToTextOptions {
19
+ language_code?: string;
20
+ tag_audio_events?: boolean;
21
+ num_speakers?: number;
22
+ filetype?: string;
23
+ }
24
+
25
+ interface RequestOptions {
26
+ timeoutInSeconds?: number;
27
+ maxRetries?: number;
28
+ abortSignal?: AbortSignal;
29
+ apiKey?: string | undefined;
30
+ headers?: Record<string, string>;
31
+ }
32
+
33
+ // Combined options type
34
+ type ElevenLabsListenOptions = SpeechToTextOptions & RequestOptions;
35
+
16
36
  export class ElevenLabsVoice extends MastraVoice {
17
37
  private client: ElevenLabsClient;
18
38
 
@@ -25,13 +45,21 @@ export class ElevenLabsVoice extends MastraVoice {
25
45
  *
26
46
  * @throws {Error} If the ELEVENLABS_API_KEY is not set in the environment variables.
27
47
  */
28
- constructor({ speechModel, speaker }: { speechModel?: ElevenLabsVoiceConfig; speaker?: string } = {}) {
48
+ constructor({
49
+ speechModel,
50
+ listeningModel,
51
+ speaker,
52
+ }: { speechModel?: ElevenLabsVoiceConfig; listeningModel?: ElevenLabsVoiceConfig; speaker?: string } = {}) {
29
53
  const apiKey = speechModel?.apiKey ?? process.env.ELEVENLABS_API_KEY;
30
54
  super({
31
55
  speechModel: {
32
56
  name: speechModel?.name ?? 'eleven_multilingual_v2',
33
57
  apiKey: speechModel?.apiKey,
34
58
  },
59
+ listeningModel: {
60
+ name: listeningModel?.name ?? 'scribe_v1',
61
+ apiKey: listeningModel?.apiKey,
62
+ },
35
63
  speaker,
36
64
  });
37
65
 
@@ -110,7 +138,48 @@ export class ElevenLabsVoice extends MastraVoice {
110
138
  return res;
111
139
  }
112
140
 
113
- async listen(_input: NodeJS.ReadableStream | Buffer, _options?: Record<string, unknown>): Promise<string> {
114
- throw new Error('ElevenLabs does not support transcription');
141
+ /**
142
+ * Converts audio input to text using ElevenLabs Speech-to-Text API.
143
+ *
144
+ * @param input - A readable stream containing the audio data to transcribe
145
+ * @param options - Configuration options for the transcription
146
+ * @param options.language_code - ISO language code (e.g., 'en', 'fr', 'es')
147
+ * @param options.tag_audio_events - Whether to tag audio events like [MUSIC], [LAUGHTER], etc.
148
+ * @param options.num_speakers - Number of speakers to detect in the audio
149
+ * @param options.filetype - Audio file format (e.g., 'mp3', 'wav', 'ogg')
150
+ * @param options.timeoutInSeconds - Request timeout in seconds
151
+ * @param options.maxRetries - Maximum number of retry attempts
152
+ * @param options.abortSignal - Signal to abort the request
153
+ *
154
+ * @returns A Promise that resolves to the transcribed text
155
+ *
156
+ */
157
+ async listen(input: NodeJS.ReadableStream, options?: ElevenLabsListenOptions): Promise<string> {
158
+ const res = await this.traced(async () => {
159
+ const chunks: Buffer[] = [];
160
+ for await (const chunk of input) {
161
+ chunks.push(Buffer.from(chunk));
162
+ }
163
+ const buffer = Buffer.concat(chunks);
164
+
165
+ const { language_code, tag_audio_events, num_speakers, filetype, ...requestOptions } = options || {};
166
+
167
+ const file = new File([buffer], `audio.${filetype || 'mp3'}`);
168
+
169
+ const transcription = await this.client.speechToText.convert(
170
+ {
171
+ file: file,
172
+ model_id: this.listeningModel?.name as ElevenLabsModel,
173
+ language_code,
174
+ tag_audio_events,
175
+ num_speakers,
176
+ },
177
+ requestOptions,
178
+ );
179
+
180
+ return transcription.text;
181
+ }, 'voice.elevenlabs.listen')();
182
+
183
+ return res;
115
184
  }
116
185
  }