@mastra/voice-openai 0.1.4-alpha.9 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -100,7 +100,11 @@ var OpenAIVoice = class extends voice.MastraVoice {
100
100
  if (typeof input !== "string") {
101
101
  const chunks = [];
102
102
  for await (const chunk of input) {
103
- chunks.push(Buffer.from(chunk));
103
+ if (typeof chunk === "string") {
104
+ chunks.push(Buffer.from(chunk));
105
+ } else {
106
+ chunks.push(chunk);
107
+ }
104
108
  }
105
109
  input = Buffer.concat(chunks).toString("utf-8");
106
110
  }
@@ -137,7 +141,11 @@ var OpenAIVoice = class extends voice.MastraVoice {
137
141
  }
138
142
  const chunks = [];
139
143
  for await (const chunk of audioStream) {
140
- chunks.push(Buffer.from(chunk));
144
+ if (typeof chunk === "string") {
145
+ chunks.push(Buffer.from(chunk));
146
+ } else {
147
+ chunks.push(chunk);
148
+ }
141
149
  }
142
150
  const audioBuffer = Buffer.concat(chunks);
143
151
  const text = await this.traced(async () => {
package/dist/index.js CHANGED
@@ -94,7 +94,11 @@ var OpenAIVoice = class extends MastraVoice {
94
94
  if (typeof input !== "string") {
95
95
  const chunks = [];
96
96
  for await (const chunk of input) {
97
- chunks.push(Buffer.from(chunk));
97
+ if (typeof chunk === "string") {
98
+ chunks.push(Buffer.from(chunk));
99
+ } else {
100
+ chunks.push(chunk);
101
+ }
98
102
  }
99
103
  input = Buffer.concat(chunks).toString("utf-8");
100
104
  }
@@ -131,7 +135,11 @@ var OpenAIVoice = class extends MastraVoice {
131
135
  }
132
136
  const chunks = [];
133
137
  for await (const chunk of audioStream) {
134
- chunks.push(Buffer.from(chunk));
138
+ if (typeof chunk === "string") {
139
+ chunks.push(Buffer.from(chunk));
140
+ } else {
141
+ chunks.push(chunk);
142
+ }
135
143
  }
136
144
  const audioBuffer = Buffer.concat(chunks);
137
145
  const text = await this.traced(async () => {
package/package.json CHANGED
@@ -1,8 +1,11 @@
1
1
  {
2
2
  "name": "@mastra/voice-openai",
3
- "version": "0.1.4-alpha.9",
3
+ "version": "0.1.4",
4
4
  "description": "Mastra OpenAI speech integration",
5
5
  "type": "module",
6
+ "files": [
7
+ "dist"
8
+ ],
6
9
  "main": "dist/index.js",
7
10
  "types": "dist/index.d.ts",
8
11
  "exports": {
@@ -19,21 +22,21 @@
19
22
  "./package.json": "./package.json"
20
23
  },
21
24
  "dependencies": {
22
- "openai": "^4.28.0",
23
- "zod": "^3.24.1",
24
- "@mastra/core": "^0.5.0-alpha.9"
25
+ "openai": "^4.86.2",
26
+ "zod": "^3.24.2",
27
+ "@mastra/core": "^0.5.0"
25
28
  },
26
29
  "devDependencies": {
27
- "@microsoft/api-extractor": "^7.49.2",
28
- "@types/node": "^22.13.1",
29
- "tsup": "^8.3.6",
30
- "typescript": "^5.7.3",
31
- "vitest": "^2.1.8",
32
- "eslint": "^9.20.1",
33
- "@internal/lint": "0.0.0"
30
+ "@microsoft/api-extractor": "^7.52.1",
31
+ "@types/node": "^22.13.10",
32
+ "eslint": "^9.22.0",
33
+ "tsup": "^8.4.0",
34
+ "typescript": "^5.8.2",
35
+ "vitest": "^2.1.9",
36
+ "@internal/lint": "0.0.1"
34
37
  },
35
38
  "scripts": {
36
- "build": "tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake",
39
+ "build": "tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting",
37
40
  "build:watch": "pnpm build --watch",
38
41
  "test": "vitest run",
39
42
  "lint": "eslint ."
@@ -1,23 +0,0 @@
1
-
2
- > @mastra/voice-openai@0.1.4-alpha.9 build /home/runner/work/mastra/mastra/voice/openai
3
- > tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake
4
-
5
- CLI Building entry: src/index.ts
6
- CLI Using tsconfig: tsconfig.json
7
- CLI tsup v8.3.6
8
- TSC Build start
9
- TSC ⚡️ Build success in 7687ms
10
- DTS Build start
11
- CLI Target: es2022
12
- Analysis will use the bundled TypeScript version 5.7.3
13
- Writing package typings: /home/runner/work/mastra/mastra/voice/openai/dist/_tsup-dts-rollup.d.ts
14
- Analysis will use the bundled TypeScript version 5.7.3
15
- Writing package typings: /home/runner/work/mastra/mastra/voice/openai/dist/_tsup-dts-rollup.d.cts
16
- DTS ⚡️ Build success in 9276ms
17
- CLI Cleaning output folder
18
- ESM Build start
19
- CJS Build start
20
- CJS dist/index.cjs 6.00 KB
21
- CJS ⚡️ Build success in 588ms
22
- ESM dist/index.js 5.80 KB
23
- ESM ⚡️ Build success in 589ms
package/CHANGELOG.md DELETED
@@ -1,230 +0,0 @@
1
- # @mastra/voice-openai
2
-
3
- ## 0.1.4-alpha.9
4
-
5
- ### Patch Changes
6
-
7
- - Updated dependencies [e9fbac5]
8
- - Updated dependencies [1e8bcbc]
9
- - Updated dependencies [aeb5e36]
10
- - Updated dependencies [f2301de]
11
- - @mastra/core@0.5.0-alpha.9
12
-
13
- ## 0.1.4-alpha.8
14
-
15
- ### Patch Changes
16
-
17
- - Updated dependencies [506f1d5]
18
- - @mastra/core@0.5.0-alpha.8
19
-
20
- ## 0.1.4-alpha.7
21
-
22
- ### Patch Changes
23
-
24
- - Updated dependencies [ee667a2]
25
- - @mastra/core@0.5.0-alpha.7
26
-
27
- ## 0.1.4-alpha.6
28
-
29
- ### Patch Changes
30
-
31
- - Updated dependencies [f6678e4]
32
- - @mastra/core@0.5.0-alpha.6
33
-
34
- ## 0.1.4-alpha.5
35
-
36
- ### Patch Changes
37
-
38
- - Updated dependencies [22643eb]
39
- - Updated dependencies [6feb23f]
40
- - Updated dependencies [f2d6727]
41
- - Updated dependencies [301e4ee]
42
- - Updated dependencies [dfbe4e9]
43
- - Updated dependencies [9e81f35]
44
- - Updated dependencies [caefaa2]
45
- - Updated dependencies [c151ae6]
46
- - Updated dependencies [52e0418]
47
- - Updated dependencies [03236ec]
48
- - Updated dependencies [3764e71]
49
- - Updated dependencies [df982db]
50
- - Updated dependencies [0461849]
51
- - Updated dependencies [2259379]
52
- - Updated dependencies [358f069]
53
- - @mastra/core@0.5.0-alpha.5
54
-
55
- ## 0.1.4-alpha.4
56
-
57
- ### Patch Changes
58
-
59
- - Updated dependencies [d79aedf]
60
- - @mastra/core@0.5.0-alpha.4
61
-
62
- ## 0.1.4-alpha.3
63
-
64
- ### Patch Changes
65
-
66
- - Updated dependencies [3d0e290]
67
- - @mastra/core@0.5.0-alpha.3
68
-
69
- ## 0.1.4-alpha.2
70
-
71
- ### Patch Changes
72
-
73
- - Updated dependencies [02ffb7b]
74
- - @mastra/core@0.5.0-alpha.2
75
-
76
- ## 0.1.4-alpha.1
77
-
78
- ### Patch Changes
79
-
80
- - Updated dependencies [dab255b]
81
- - @mastra/core@0.5.0-alpha.1
82
-
83
- ## 0.1.4-alpha.0
84
-
85
- ### Patch Changes
86
-
87
- - Updated dependencies [59df7b6]
88
- - Updated dependencies [29f3a82]
89
- - Updated dependencies [59df7b6]
90
- - Updated dependencies [c139344]
91
- - @mastra/core@0.5.0-alpha.0
92
-
93
- ## 0.1.3
94
-
95
- ### Patch Changes
96
-
97
- - Updated dependencies [1da20e7]
98
- - @mastra/core@0.4.4
99
-
100
- ## 0.1.3-alpha.0
101
-
102
- ### Patch Changes
103
-
104
- - Updated dependencies [1da20e7]
105
- - @mastra/core@0.4.4-alpha.0
106
-
107
- ## 0.1.2
108
-
109
- ### Patch Changes
110
-
111
- - bb4f447: Add support for commonjs
112
- - Updated dependencies [0d185b1]
113
- - Updated dependencies [ed55f1d]
114
- - Updated dependencies [06aa827]
115
- - Updated dependencies [0fd78ac]
116
- - Updated dependencies [2512a93]
117
- - Updated dependencies [e62de74]
118
- - Updated dependencies [0d25b75]
119
- - Updated dependencies [fd14a3f]
120
- - Updated dependencies [8d13b14]
121
- - Updated dependencies [3f369a2]
122
- - Updated dependencies [3ee4831]
123
- - Updated dependencies [4d4e1e1]
124
- - Updated dependencies [bb4f447]
125
- - Updated dependencies [108793c]
126
- - Updated dependencies [5f28f44]
127
- - Updated dependencies [dabecf4]
128
- - @mastra/core@0.4.3
129
-
130
- ## 0.1.2-alpha.4
131
-
132
- ### Patch Changes
133
-
134
- - Updated dependencies [dabecf4]
135
- - @mastra/core@0.4.3-alpha.4
136
-
137
- ## 0.1.2-alpha.3
138
-
139
- ### Patch Changes
140
-
141
- - bb4f447: Add support for commonjs
142
- - Updated dependencies [0fd78ac]
143
- - Updated dependencies [0d25b75]
144
- - Updated dependencies [fd14a3f]
145
- - Updated dependencies [3f369a2]
146
- - Updated dependencies [4d4e1e1]
147
- - Updated dependencies [bb4f447]
148
- - @mastra/core@0.4.3-alpha.3
149
-
150
- ## 0.1.2-alpha.2
151
-
152
- ### Patch Changes
153
-
154
- - Updated dependencies [2512a93]
155
- - Updated dependencies [e62de74]
156
- - @mastra/core@0.4.3-alpha.2
157
-
158
- ## 0.1.2-alpha.1
159
-
160
- ### Patch Changes
161
-
162
- - Updated dependencies [0d185b1]
163
- - Updated dependencies [ed55f1d]
164
- - Updated dependencies [8d13b14]
165
- - Updated dependencies [3ee4831]
166
- - Updated dependencies [108793c]
167
- - Updated dependencies [5f28f44]
168
- - @mastra/core@0.4.3-alpha.1
169
-
170
- ## 0.1.2-alpha.0
171
-
172
- ### Patch Changes
173
-
174
- - Updated dependencies [06aa827]
175
- - @mastra/core@0.4.3-alpha.0
176
-
177
- ## 0.1.1
178
-
179
- ### Patch Changes
180
-
181
- - Updated dependencies [7fceae1]
182
- - Updated dependencies [8d94c3e]
183
- - Updated dependencies [99dcdb5]
184
- - Updated dependencies [6cb63e0]
185
- - Updated dependencies [f626fbb]
186
- - Updated dependencies [e752340]
187
- - Updated dependencies [eb91535]
188
- - @mastra/core@0.4.2
189
-
190
- ## 0.1.1-alpha.2
191
-
192
- ### Patch Changes
193
-
194
- - Updated dependencies [8d94c3e]
195
- - Updated dependencies [99dcdb5]
196
- - Updated dependencies [e752340]
197
- - Updated dependencies [eb91535]
198
- - @mastra/core@0.4.2-alpha.2
199
-
200
- ## 0.1.1-alpha.1
201
-
202
- ### Patch Changes
203
-
204
- - Updated dependencies [6cb63e0]
205
- - @mastra/core@0.4.2-alpha.1
206
-
207
- ## 0.1.1-alpha.0
208
-
209
- ### Patch Changes
210
-
211
- - Updated dependencies [7fceae1]
212
- - Updated dependencies [f626fbb]
213
- - @mastra/core@0.4.2-alpha.0
214
-
215
- ## 0.1.0
216
-
217
- ### Patch Changes
218
-
219
- - 0821d6b: Deprecate @mastra/speech-openai for @mastra/voice-openai
220
- - Updated dependencies [ce44b9b]
221
- - Updated dependencies [967da43]
222
- - Updated dependencies [b405f08]
223
- - @mastra/core@0.4.1
224
-
225
- ## 0.1.0
226
-
227
- ### Changes
228
-
229
- - `@mastra/speech-openai` is now deprecated. Please use `@mastra/voice-openai` instead.
230
- - This package provides both Text-to-Speech (TTS) and Speech-to-Text (STT) capabilities through OpenAI's API.
Binary file
package/eslint.config.js DELETED
@@ -1,6 +0,0 @@
1
- import { createConfig } from '@internal/lint/eslint';
2
-
3
- const config = await createConfig();
4
-
5
- /** @type {import("eslint").Linter.Config[]} */
6
- export default [...config];
package/src/index.test.ts DELETED
@@ -1,191 +0,0 @@
1
- import { writeFileSync, mkdirSync, createReadStream } from 'fs';
2
- import path from 'path';
3
- import { PassThrough } from 'stream';
4
- import { describe, expect, it, beforeAll } from 'vitest';
5
-
6
- import { OpenAIVoice } from './index.js';
7
-
8
- describe('OpenAIVoice Integration Tests', () => {
9
- let voice: OpenAIVoice;
10
- const outputDir = path.join(process.cwd(), 'test-outputs');
11
-
12
- beforeAll(() => {
13
- try {
14
- mkdirSync(outputDir, { recursive: true });
15
- } catch (err) {
16
- // Ignore if directory already exists
17
- console.log('Directory already exists: ', err);
18
- }
19
-
20
- voice = new OpenAIVoice({
21
- speechModel: {
22
- name: 'tts-1',
23
- },
24
- listeningModel: {
25
- name: 'whisper-1',
26
- },
27
- });
28
- });
29
-
30
- describe('getSpeakers', () => {
31
- it('should list available voices', async () => {
32
- const speakers = await voice.getSpeakers();
33
- expect(speakers).toContainEqual({ voiceId: 'alloy' });
34
- expect(speakers).toContainEqual({ voiceId: 'nova' });
35
- });
36
- });
37
-
38
- it('should initialize with default parameters', async () => {
39
- const defaultVoice = new OpenAIVoice();
40
- const speakers = await defaultVoice.getSpeakers();
41
- expect(speakers).toBeInstanceOf(Array);
42
- expect(speakers.length).toBeGreaterThan(0);
43
- });
44
-
45
- describe('speak', () => {
46
- it('should speak with default parameters', async () => {
47
- const defaultVoice = new OpenAIVoice();
48
- const audioStream = await defaultVoice.speak('Hello with defaults');
49
-
50
- const chunks: Buffer[] = [];
51
- for await (const chunk of audioStream) {
52
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
53
- }
54
- const audioBuffer = Buffer.concat(chunks);
55
-
56
- expect(audioBuffer.length).toBeGreaterThan(0);
57
- });
58
-
59
- it('should generate audio stream from text', async () => {
60
- const audioStream = await voice.speak('Hello World', {
61
- speaker: 'alloy',
62
- });
63
-
64
- const chunks: Buffer[] = [];
65
- for await (const chunk of audioStream) {
66
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
67
- }
68
- const audioBuffer = Buffer.concat(chunks);
69
-
70
- expect(audioBuffer.length).toBeGreaterThan(0);
71
-
72
- const outputPath = path.join(outputDir, 'speech-test.mp3');
73
- writeFileSync(outputPath, audioBuffer);
74
- }, 10000);
75
-
76
- it('should work with different parameters', async () => {
77
- const audioStream = await voice.speak('Test with parameters', {
78
- speaker: 'nova',
79
- speed: 0.5,
80
- });
81
-
82
- const chunks: Buffer[] = [];
83
- for await (const chunk of audioStream) {
84
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
85
- }
86
- const audioBuffer = Buffer.concat(chunks);
87
-
88
- expect(audioBuffer.length).toBeGreaterThan(0);
89
-
90
- const outputPath = path.join(outputDir, 'speech-test-params.mp3');
91
- writeFileSync(outputPath, audioBuffer);
92
- }, 10000);
93
-
94
- it('should accept text stream as input', async () => {
95
- const inputStream = new PassThrough();
96
- inputStream.end('Hello from stream');
97
-
98
- const audioStream = await voice.speak(inputStream, {
99
- speaker: 'alloy',
100
- });
101
-
102
- const chunks: Buffer[] = [];
103
- for await (const chunk of audioStream) {
104
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
105
- }
106
- const audioBuffer = Buffer.concat(chunks);
107
-
108
- expect(audioBuffer.length).toBeGreaterThan(0);
109
-
110
- const outputPath = path.join(outputDir, 'speech-stream-input.mp3');
111
- writeFileSync(outputPath, audioBuffer);
112
- }, 10000);
113
- });
114
-
115
- describe('listen', () => {
116
- it('should listen with default parameters', async () => {
117
- const defaultVoice = new OpenAIVoice();
118
- const audioStream = await defaultVoice.speak('Listening test with defaults');
119
-
120
- const text = await defaultVoice.listen(audioStream);
121
-
122
- expect(text).toBeTruthy();
123
- expect(typeof text).toBe('string');
124
- expect(text.toLowerCase()).toContain('listening test');
125
- });
126
-
127
- it('should transcribe audio from fixture file', async () => {
128
- const fixturePath = path.join(process.cwd(), '__fixtures__', 'voice-test.m4a');
129
- const audioStream = createReadStream(fixturePath);
130
-
131
- const text = await voice.listen(audioStream, {
132
- filetype: 'm4a',
133
- });
134
-
135
- expect(text).toBeTruthy();
136
- console.log(text);
137
- expect(typeof text).toBe('string');
138
- expect(text.length).toBeGreaterThan(0);
139
- }, 15000);
140
-
141
- it('should transcribe audio stream', async () => {
142
- // First generate some test audio
143
- const audioStream = await voice.speak('This is a test for transcription', {
144
- speaker: 'alloy',
145
- });
146
-
147
- // Then transcribe it
148
- const text = await voice.listen(audioStream, {
149
- filetype: 'm4a',
150
- });
151
-
152
- expect(text).toBeTruthy();
153
- expect(typeof text).toBe('string');
154
- expect(text.toLowerCase()).toContain('test');
155
- }, 15000);
156
-
157
- it('should accept options', async () => {
158
- const audioStream = await voice.speak('Test with language option', {
159
- speaker: 'nova',
160
- });
161
-
162
- const text = await voice.listen(audioStream, {
163
- language: 'en',
164
- filetype: 'm4a',
165
- });
166
-
167
- expect(text).toBeTruthy();
168
- expect(typeof text).toBe('string');
169
- expect(text.toLowerCase()).toContain('test');
170
- }, 15000);
171
- });
172
-
173
- // Error cases
174
- describe('error handling', () => {
175
- it('should handle invalid speaker names', async () => {
176
- await expect(
177
- voice.speak('Test', {
178
- speaker: 'invalid_voice',
179
- }),
180
- ).rejects.toThrow();
181
- });
182
-
183
- it('should handle empty text', async () => {
184
- await expect(
185
- voice.speak('', {
186
- speaker: 'alloy',
187
- }),
188
- ).rejects.toThrow();
189
- });
190
- });
191
- });
package/src/index.ts DELETED
@@ -1,205 +0,0 @@
1
- import { PassThrough } from 'stream';
2
-
3
- import { MastraVoice } from '@mastra/core/voice';
4
- import OpenAI from 'openai';
5
-
6
- type OpenAIVoiceId = 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer' | 'ash' | 'coral' | 'sage';
7
- type OpenAIModel = 'tts-1' | 'tts-1-hd' | 'whisper-1';
8
-
9
- export interface OpenAIConfig {
10
- name?: OpenAIModel;
11
- apiKey?: string;
12
- }
13
-
14
- export interface OpenAIVoiceConfig {
15
- speech?: {
16
- model: 'tts-1' | 'tts-1-hd';
17
- apiKey?: string;
18
- speaker?: OpenAIVoiceId;
19
- };
20
- listening?: {
21
- model: 'whisper-1';
22
- apiKey?: string;
23
- };
24
- }
25
-
26
- export class OpenAIVoice extends MastraVoice {
27
- speechClient?: OpenAI;
28
- listeningClient?: OpenAI;
29
-
30
- /**
31
- * Constructs an instance of OpenAIVoice with optional configurations for speech and listening models.
32
- *
33
- * @param {Object} [config] - Configuration options for the OpenAIVoice instance.
34
- * @param {OpenAIConfig} [config.listeningModel] - Configuration for the listening model, including model name and API key.
35
- * @param {OpenAIConfig} [config.speechModel] - Configuration for the speech model, including model name and API key.
36
- * @param {string} [config.speaker] - The default speaker's voice to use for speech synthesis.
37
- * @throws {Error} - Throws an error if no API key is provided for either the speech or listening model.
38
- */
39
- constructor({
40
- listeningModel,
41
- speechModel,
42
- speaker,
43
- }: {
44
- listeningModel?: OpenAIConfig;
45
- speechModel?: OpenAIConfig;
46
- speaker?: string;
47
- } = {}) {
48
- const defaultApiKey = process.env.OPENAI_API_KEY;
49
- const defaultSpeechModel = {
50
- name: 'tts-1',
51
- apiKey: defaultApiKey,
52
- };
53
- const defaultListeningModel = {
54
- name: 'whisper-1',
55
- apiKey: defaultApiKey,
56
- };
57
-
58
- super({
59
- speechModel: {
60
- name: speechModel?.name ?? defaultSpeechModel.name,
61
- apiKey: speechModel?.apiKey ?? defaultSpeechModel.apiKey,
62
- },
63
- listeningModel: {
64
- name: listeningModel?.name ?? defaultListeningModel.name,
65
- apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,
66
- },
67
- speaker: speaker ?? 'alloy',
68
- });
69
-
70
- const speechApiKey = speechModel?.apiKey || defaultApiKey;
71
- if (!speechApiKey) {
72
- throw new Error('No API key provided for speech model');
73
- }
74
- this.speechClient = new OpenAI({ apiKey: speechApiKey });
75
-
76
- const listeningApiKey = listeningModel?.apiKey || defaultApiKey;
77
- if (!listeningApiKey) {
78
- throw new Error('No API key provided for listening model');
79
- }
80
- this.listeningClient = new OpenAI({ apiKey: listeningApiKey });
81
-
82
- if (!this.speechClient && !this.listeningClient) {
83
- throw new Error('At least one of OPENAI_API_KEY, speechModel.apiKey, or listeningModel.apiKey must be set');
84
- }
85
- }
86
-
87
- /**
88
- * Retrieves a list of available speakers for the speech model.
89
- *
90
- * @returns {Promise<Array<{ voiceId: OpenAIVoiceId }>>} - A promise that resolves to an array of objects,
91
- * each containing a `voiceId` representing an available speaker.
92
- * @throws {Error} - Throws an error if the speech model is not configured.
93
- */
94
- async getSpeakers(): Promise<Array<{ voiceId: OpenAIVoiceId }>> {
95
- if (!this.speechModel) {
96
- throw new Error('Speech model not configured');
97
- }
98
-
99
- return [
100
- { voiceId: 'alloy' },
101
- { voiceId: 'echo' },
102
- { voiceId: 'fable' },
103
- { voiceId: 'onyx' },
104
- { voiceId: 'nova' },
105
- { voiceId: 'shimmer' },
106
- { voiceId: 'ash' },
107
- { voiceId: 'coral' },
108
- { voiceId: 'sage' },
109
- ];
110
- }
111
-
112
- /**
113
- * Converts text or audio input into speech using the configured speech model.
114
- *
115
- * @param {string | NodeJS.ReadableStream} input - The text or audio stream to be converted into speech.
116
- * @param {Object} [options] - Optional parameters for the speech synthesis.
117
- * @param {string} [options.speaker] - The speaker's voice to use for the speech synthesis.
118
- * @param {number} [options.speed] - The speed at which the speech should be synthesized.
119
- * @returns {Promise<NodeJS.ReadableStream>} - A promise that resolves to a readable stream of the synthesized audio.
120
- * @throws {Error} - Throws an error if the speech model is not configured or if the input text is empty.
121
- */
122
- async speak(
123
- input: string | NodeJS.ReadableStream,
124
- options?: {
125
- speaker?: string;
126
- speed?: number;
127
- [key: string]: any;
128
- },
129
- ): Promise<NodeJS.ReadableStream> {
130
- if (!this.speechClient) {
131
- throw new Error('Speech model not configured');
132
- }
133
-
134
- if (typeof input !== 'string') {
135
- const chunks: Buffer[] = [];
136
- for await (const chunk of input) {
137
- chunks.push(Buffer.from(chunk));
138
- }
139
- input = Buffer.concat(chunks).toString('utf-8');
140
- }
141
-
142
- if (input.trim().length === 0) {
143
- throw new Error('Input text is empty');
144
- }
145
-
146
- const audio = await this.traced(async () => {
147
- const response = await this.speechClient!.audio.speech.create({
148
- model: this.speechModel?.name ?? 'tts-1',
149
- voice: (options?.speaker ?? this.speaker) as OpenAIVoiceId,
150
- input,
151
- speed: options?.speed || 1.0,
152
- });
153
-
154
- const passThrough = new PassThrough();
155
- const buffer = Buffer.from(await response.arrayBuffer());
156
- passThrough.end(buffer);
157
- return passThrough;
158
- }, 'voice.openai.speak')();
159
-
160
- return audio;
161
- }
162
-
163
- /**
164
- * Transcribes audio from a given stream using the configured listening model.
165
- *
166
- * @param {NodeJS.ReadableStream} audioStream - The audio stream to be transcribed.
167
- * @param {Object} [options] - Optional parameters for the transcription.
168
- * @param {string} [options.filetype] - The file type of the audio stream.
169
- * Supported types include 'mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm'.
170
- * @returns {Promise<string>} - A promise that resolves to the transcribed text.
171
- * @throws {Error} - Throws an error if the listening model is not configured.
172
- */
173
- async listen(
174
- audioStream: NodeJS.ReadableStream,
175
- options?: {
176
- filetype?: 'mp3' | 'mp4' | 'mpeg' | 'mpga' | 'm4a' | 'wav' | 'webm';
177
- [key: string]: any;
178
- },
179
- ): Promise<string> {
180
- if (!this.listeningClient) {
181
- throw new Error('Listening model not configured');
182
- }
183
-
184
- const chunks: Buffer[] = [];
185
- for await (const chunk of audioStream) {
186
- chunks.push(Buffer.from(chunk));
187
- }
188
- const audioBuffer = Buffer.concat(chunks);
189
-
190
- const text = await this.traced(async () => {
191
- const { filetype, ...otherOptions } = options || {};
192
- const file = new File([audioBuffer], `audio.${filetype || 'mp3'}`);
193
-
194
- const response = await this.listeningClient!.audio.transcriptions.create({
195
- model: this.listeningModel?.name || 'whisper-1',
196
- file: file as any,
197
- ...otherOptions,
198
- });
199
-
200
- return response.text;
201
- }, 'voice.openai.listen')();
202
-
203
- return text;
204
- }
205
- }
Binary file
Binary file
package/tsconfig.json DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "extends": "../../tsconfig.node.json",
3
- "include": ["src/**/*"],
4
- "exclude": ["node_modules", "**/*.test.ts"]
5
- }
package/vitest.config.ts DELETED
@@ -1,8 +0,0 @@
1
- import { defineConfig } from 'vitest/config';
2
-
3
- export default defineConfig({
4
- test: {
5
- globals: true,
6
- include: ['src/**/*.test.ts'],
7
- },
8
- });