@mastra/voice-openai 0.1.4-alpha.8 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -100,7 +100,11 @@ var OpenAIVoice = class extends voice.MastraVoice {
100
100
  if (typeof input !== "string") {
101
101
  const chunks = [];
102
102
  for await (const chunk of input) {
103
- chunks.push(Buffer.from(chunk));
103
+ if (typeof chunk === "string") {
104
+ chunks.push(Buffer.from(chunk));
105
+ } else {
106
+ chunks.push(chunk);
107
+ }
104
108
  }
105
109
  input = Buffer.concat(chunks).toString("utf-8");
106
110
  }
@@ -137,7 +141,11 @@ var OpenAIVoice = class extends voice.MastraVoice {
137
141
  }
138
142
  const chunks = [];
139
143
  for await (const chunk of audioStream) {
140
- chunks.push(Buffer.from(chunk));
144
+ if (typeof chunk === "string") {
145
+ chunks.push(Buffer.from(chunk));
146
+ } else {
147
+ chunks.push(chunk);
148
+ }
141
149
  }
142
150
  const audioBuffer = Buffer.concat(chunks);
143
151
  const text = await this.traced(async () => {
package/dist/index.js CHANGED
@@ -94,7 +94,11 @@ var OpenAIVoice = class extends MastraVoice {
94
94
  if (typeof input !== "string") {
95
95
  const chunks = [];
96
96
  for await (const chunk of input) {
97
- chunks.push(Buffer.from(chunk));
97
+ if (typeof chunk === "string") {
98
+ chunks.push(Buffer.from(chunk));
99
+ } else {
100
+ chunks.push(chunk);
101
+ }
98
102
  }
99
103
  input = Buffer.concat(chunks).toString("utf-8");
100
104
  }
@@ -131,7 +135,11 @@ var OpenAIVoice = class extends MastraVoice {
131
135
  }
132
136
  const chunks = [];
133
137
  for await (const chunk of audioStream) {
134
- chunks.push(Buffer.from(chunk));
138
+ if (typeof chunk === "string") {
139
+ chunks.push(Buffer.from(chunk));
140
+ } else {
141
+ chunks.push(chunk);
142
+ }
135
143
  }
136
144
  const audioBuffer = Buffer.concat(chunks);
137
145
  const text = await this.traced(async () => {
package/package.json CHANGED
@@ -1,8 +1,11 @@
1
1
  {
2
2
  "name": "@mastra/voice-openai",
3
- "version": "0.1.4-alpha.8",
3
+ "version": "0.1.4",
4
4
  "description": "Mastra OpenAI speech integration",
5
5
  "type": "module",
6
+ "files": [
7
+ "dist"
8
+ ],
6
9
  "main": "dist/index.js",
7
10
  "types": "dist/index.d.ts",
8
11
  "exports": {
@@ -19,21 +22,21 @@
19
22
  "./package.json": "./package.json"
20
23
  },
21
24
  "dependencies": {
22
- "openai": "^4.28.0",
23
- "zod": "^3.24.1",
24
- "@mastra/core": "^0.5.0-alpha.8"
25
+ "openai": "^4.86.2",
26
+ "zod": "^3.24.2",
27
+ "@mastra/core": "^0.5.0"
25
28
  },
26
29
  "devDependencies": {
27
- "@microsoft/api-extractor": "^7.49.2",
28
- "@types/node": "^22.13.1",
29
- "tsup": "^8.3.6",
30
- "typescript": "^5.7.3",
31
- "vitest": "^2.1.8",
32
- "eslint": "^9.20.1",
33
- "@internal/lint": "0.0.0"
30
+ "@microsoft/api-extractor": "^7.52.1",
31
+ "@types/node": "^22.13.10",
32
+ "eslint": "^9.22.0",
33
+ "tsup": "^8.4.0",
34
+ "typescript": "^5.8.2",
35
+ "vitest": "^2.1.9",
36
+ "@internal/lint": "0.0.1"
34
37
  },
35
38
  "scripts": {
36
- "build": "tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake",
39
+ "build": "tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting",
37
40
  "build:watch": "pnpm build --watch",
38
41
  "test": "vitest run",
39
42
  "lint": "eslint ."
@@ -1,23 +0,0 @@
1
-
2
- > @mastra/voice-openai@0.1.4-alpha.8 build /home/runner/work/mastra/mastra/voice/openai
3
- > tsup src/index.ts --format esm,cjs --experimental-dts --clean --treeshake
4
-
5
- CLI Building entry: src/index.ts
6
- CLI Using tsconfig: tsconfig.json
7
- CLI tsup v8.3.6
8
- TSC Build start
9
- TSC ⚡️ Build success in 6607ms
10
- DTS Build start
11
- CLI Target: es2022
12
- CLI Cleaning output folder
13
- ESM Build start
14
- CJS Build start
15
- Analysis will use the bundled TypeScript version 5.7.3
16
- Writing package typings: /home/runner/work/mastra/mastra/voice/openai/dist/_tsup-dts-rollup.d.ts
17
- Analysis will use the bundled TypeScript version 5.7.3
18
- Writing package typings: /home/runner/work/mastra/mastra/voice/openai/dist/_tsup-dts-rollup.d.cts
19
- DTS ⚡️ Build success in 9623ms
20
- ESM dist/index.js 5.80 KB
21
- ESM ⚡️ Build success in 10045ms
22
- CJS dist/index.cjs 6.00 KB
23
- CJS ⚡️ Build success in 10045ms
package/CHANGELOG.md DELETED
@@ -1,220 +0,0 @@
1
- # @mastra/voice-openai
2
-
3
- ## 0.1.4-alpha.8
4
-
5
- ### Patch Changes
6
-
7
- - Updated dependencies [506f1d5]
8
- - @mastra/core@0.5.0-alpha.8
9
-
10
- ## 0.1.4-alpha.7
11
-
12
- ### Patch Changes
13
-
14
- - Updated dependencies [ee667a2]
15
- - @mastra/core@0.5.0-alpha.7
16
-
17
- ## 0.1.4-alpha.6
18
-
19
- ### Patch Changes
20
-
21
- - Updated dependencies [f6678e4]
22
- - @mastra/core@0.5.0-alpha.6
23
-
24
- ## 0.1.4-alpha.5
25
-
26
- ### Patch Changes
27
-
28
- - Updated dependencies [22643eb]
29
- - Updated dependencies [6feb23f]
30
- - Updated dependencies [f2d6727]
31
- - Updated dependencies [301e4ee]
32
- - Updated dependencies [dfbe4e9]
33
- - Updated dependencies [9e81f35]
34
- - Updated dependencies [caefaa2]
35
- - Updated dependencies [c151ae6]
36
- - Updated dependencies [52e0418]
37
- - Updated dependencies [03236ec]
38
- - Updated dependencies [3764e71]
39
- - Updated dependencies [df982db]
40
- - Updated dependencies [0461849]
41
- - Updated dependencies [2259379]
42
- - Updated dependencies [358f069]
43
- - @mastra/core@0.5.0-alpha.5
44
-
45
- ## 0.1.4-alpha.4
46
-
47
- ### Patch Changes
48
-
49
- - Updated dependencies [d79aedf]
50
- - @mastra/core@0.5.0-alpha.4
51
-
52
- ## 0.1.4-alpha.3
53
-
54
- ### Patch Changes
55
-
56
- - Updated dependencies [3d0e290]
57
- - @mastra/core@0.5.0-alpha.3
58
-
59
- ## 0.1.4-alpha.2
60
-
61
- ### Patch Changes
62
-
63
- - Updated dependencies [02ffb7b]
64
- - @mastra/core@0.5.0-alpha.2
65
-
66
- ## 0.1.4-alpha.1
67
-
68
- ### Patch Changes
69
-
70
- - Updated dependencies [dab255b]
71
- - @mastra/core@0.5.0-alpha.1
72
-
73
- ## 0.1.4-alpha.0
74
-
75
- ### Patch Changes
76
-
77
- - Updated dependencies [59df7b6]
78
- - Updated dependencies [29f3a82]
79
- - Updated dependencies [59df7b6]
80
- - Updated dependencies [c139344]
81
- - @mastra/core@0.5.0-alpha.0
82
-
83
- ## 0.1.3
84
-
85
- ### Patch Changes
86
-
87
- - Updated dependencies [1da20e7]
88
- - @mastra/core@0.4.4
89
-
90
- ## 0.1.3-alpha.0
91
-
92
- ### Patch Changes
93
-
94
- - Updated dependencies [1da20e7]
95
- - @mastra/core@0.4.4-alpha.0
96
-
97
- ## 0.1.2
98
-
99
- ### Patch Changes
100
-
101
- - bb4f447: Add support for commonjs
102
- - Updated dependencies [0d185b1]
103
- - Updated dependencies [ed55f1d]
104
- - Updated dependencies [06aa827]
105
- - Updated dependencies [0fd78ac]
106
- - Updated dependencies [2512a93]
107
- - Updated dependencies [e62de74]
108
- - Updated dependencies [0d25b75]
109
- - Updated dependencies [fd14a3f]
110
- - Updated dependencies [8d13b14]
111
- - Updated dependencies [3f369a2]
112
- - Updated dependencies [3ee4831]
113
- - Updated dependencies [4d4e1e1]
114
- - Updated dependencies [bb4f447]
115
- - Updated dependencies [108793c]
116
- - Updated dependencies [5f28f44]
117
- - Updated dependencies [dabecf4]
118
- - @mastra/core@0.4.3
119
-
120
- ## 0.1.2-alpha.4
121
-
122
- ### Patch Changes
123
-
124
- - Updated dependencies [dabecf4]
125
- - @mastra/core@0.4.3-alpha.4
126
-
127
- ## 0.1.2-alpha.3
128
-
129
- ### Patch Changes
130
-
131
- - bb4f447: Add support for commonjs
132
- - Updated dependencies [0fd78ac]
133
- - Updated dependencies [0d25b75]
134
- - Updated dependencies [fd14a3f]
135
- - Updated dependencies [3f369a2]
136
- - Updated dependencies [4d4e1e1]
137
- - Updated dependencies [bb4f447]
138
- - @mastra/core@0.4.3-alpha.3
139
-
140
- ## 0.1.2-alpha.2
141
-
142
- ### Patch Changes
143
-
144
- - Updated dependencies [2512a93]
145
- - Updated dependencies [e62de74]
146
- - @mastra/core@0.4.3-alpha.2
147
-
148
- ## 0.1.2-alpha.1
149
-
150
- ### Patch Changes
151
-
152
- - Updated dependencies [0d185b1]
153
- - Updated dependencies [ed55f1d]
154
- - Updated dependencies [8d13b14]
155
- - Updated dependencies [3ee4831]
156
- - Updated dependencies [108793c]
157
- - Updated dependencies [5f28f44]
158
- - @mastra/core@0.4.3-alpha.1
159
-
160
- ## 0.1.2-alpha.0
161
-
162
- ### Patch Changes
163
-
164
- - Updated dependencies [06aa827]
165
- - @mastra/core@0.4.3-alpha.0
166
-
167
- ## 0.1.1
168
-
169
- ### Patch Changes
170
-
171
- - Updated dependencies [7fceae1]
172
- - Updated dependencies [8d94c3e]
173
- - Updated dependencies [99dcdb5]
174
- - Updated dependencies [6cb63e0]
175
- - Updated dependencies [f626fbb]
176
- - Updated dependencies [e752340]
177
- - Updated dependencies [eb91535]
178
- - @mastra/core@0.4.2
179
-
180
- ## 0.1.1-alpha.2
181
-
182
- ### Patch Changes
183
-
184
- - Updated dependencies [8d94c3e]
185
- - Updated dependencies [99dcdb5]
186
- - Updated dependencies [e752340]
187
- - Updated dependencies [eb91535]
188
- - @mastra/core@0.4.2-alpha.2
189
-
190
- ## 0.1.1-alpha.1
191
-
192
- ### Patch Changes
193
-
194
- - Updated dependencies [6cb63e0]
195
- - @mastra/core@0.4.2-alpha.1
196
-
197
- ## 0.1.1-alpha.0
198
-
199
- ### Patch Changes
200
-
201
- - Updated dependencies [7fceae1]
202
- - Updated dependencies [f626fbb]
203
- - @mastra/core@0.4.2-alpha.0
204
-
205
- ## 0.1.0
206
-
207
- ### Patch Changes
208
-
209
- - 0821d6b: Deprecate @mastra/speech-openai for @mastra/voice-openai
210
- - Updated dependencies [ce44b9b]
211
- - Updated dependencies [967da43]
212
- - Updated dependencies [b405f08]
213
- - @mastra/core@0.4.1
214
-
215
- ## 0.1.0
216
-
217
- ### Changes
218
-
219
- - `@mastra/speech-openai` is now deprecated. Please use `@mastra/voice-openai` instead.
220
- - This package provides both Text-to-Speech (TTS) and Speech-to-Text (STT) capabilities through OpenAI's API.
Binary file
package/eslint.config.js DELETED
@@ -1,6 +0,0 @@
1
- import { createConfig } from '@internal/lint/eslint';
2
-
3
- const config = await createConfig();
4
-
5
- /** @type {import("eslint").Linter.Config[]} */
6
- export default [...config];
package/src/index.test.ts DELETED
@@ -1,191 +0,0 @@
1
- import { writeFileSync, mkdirSync, createReadStream } from 'fs';
2
- import path from 'path';
3
- import { PassThrough } from 'stream';
4
- import { describe, expect, it, beforeAll } from 'vitest';
5
-
6
- import { OpenAIVoice } from './index.js';
7
-
8
- describe('OpenAIVoice Integration Tests', () => {
9
- let voice: OpenAIVoice;
10
- const outputDir = path.join(process.cwd(), 'test-outputs');
11
-
12
- beforeAll(() => {
13
- try {
14
- mkdirSync(outputDir, { recursive: true });
15
- } catch (err) {
16
- // Ignore if directory already exists
17
- console.log('Directory already exists: ', err);
18
- }
19
-
20
- voice = new OpenAIVoice({
21
- speechModel: {
22
- name: 'tts-1',
23
- },
24
- listeningModel: {
25
- name: 'whisper-1',
26
- },
27
- });
28
- });
29
-
30
- describe('getSpeakers', () => {
31
- it('should list available voices', async () => {
32
- const speakers = await voice.getSpeakers();
33
- expect(speakers).toContainEqual({ voiceId: 'alloy' });
34
- expect(speakers).toContainEqual({ voiceId: 'nova' });
35
- });
36
- });
37
-
38
- it('should initialize with default parameters', async () => {
39
- const defaultVoice = new OpenAIVoice();
40
- const speakers = await defaultVoice.getSpeakers();
41
- expect(speakers).toBeInstanceOf(Array);
42
- expect(speakers.length).toBeGreaterThan(0);
43
- });
44
-
45
- describe('speak', () => {
46
- it('should speak with default parameters', async () => {
47
- const defaultVoice = new OpenAIVoice();
48
- const audioStream = await defaultVoice.speak('Hello with defaults');
49
-
50
- const chunks: Buffer[] = [];
51
- for await (const chunk of audioStream) {
52
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
53
- }
54
- const audioBuffer = Buffer.concat(chunks);
55
-
56
- expect(audioBuffer.length).toBeGreaterThan(0);
57
- });
58
-
59
- it('should generate audio stream from text', async () => {
60
- const audioStream = await voice.speak('Hello World', {
61
- speaker: 'alloy',
62
- });
63
-
64
- const chunks: Buffer[] = [];
65
- for await (const chunk of audioStream) {
66
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
67
- }
68
- const audioBuffer = Buffer.concat(chunks);
69
-
70
- expect(audioBuffer.length).toBeGreaterThan(0);
71
-
72
- const outputPath = path.join(outputDir, 'speech-test.mp3');
73
- writeFileSync(outputPath, audioBuffer);
74
- }, 10000);
75
-
76
- it('should work with different parameters', async () => {
77
- const audioStream = await voice.speak('Test with parameters', {
78
- speaker: 'nova',
79
- speed: 0.5,
80
- });
81
-
82
- const chunks: Buffer[] = [];
83
- for await (const chunk of audioStream) {
84
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
85
- }
86
- const audioBuffer = Buffer.concat(chunks);
87
-
88
- expect(audioBuffer.length).toBeGreaterThan(0);
89
-
90
- const outputPath = path.join(outputDir, 'speech-test-params.mp3');
91
- writeFileSync(outputPath, audioBuffer);
92
- }, 10000);
93
-
94
- it('should accept text stream as input', async () => {
95
- const inputStream = new PassThrough();
96
- inputStream.end('Hello from stream');
97
-
98
- const audioStream = await voice.speak(inputStream, {
99
- speaker: 'alloy',
100
- });
101
-
102
- const chunks: Buffer[] = [];
103
- for await (const chunk of audioStream) {
104
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
105
- }
106
- const audioBuffer = Buffer.concat(chunks);
107
-
108
- expect(audioBuffer.length).toBeGreaterThan(0);
109
-
110
- const outputPath = path.join(outputDir, 'speech-stream-input.mp3');
111
- writeFileSync(outputPath, audioBuffer);
112
- }, 10000);
113
- });
114
-
115
- describe('listen', () => {
116
- it('should listen with default parameters', async () => {
117
- const defaultVoice = new OpenAIVoice();
118
- const audioStream = await defaultVoice.speak('Listening test with defaults');
119
-
120
- const text = await defaultVoice.listen(audioStream);
121
-
122
- expect(text).toBeTruthy();
123
- expect(typeof text).toBe('string');
124
- expect(text.toLowerCase()).toContain('listening test');
125
- });
126
-
127
- it('should transcribe audio from fixture file', async () => {
128
- const fixturePath = path.join(process.cwd(), '__fixtures__', 'voice-test.m4a');
129
- const audioStream = createReadStream(fixturePath);
130
-
131
- const text = await voice.listen(audioStream, {
132
- filetype: 'm4a',
133
- });
134
-
135
- expect(text).toBeTruthy();
136
- console.log(text);
137
- expect(typeof text).toBe('string');
138
- expect(text.length).toBeGreaterThan(0);
139
- }, 15000);
140
-
141
- it('should transcribe audio stream', async () => {
142
- // First generate some test audio
143
- const audioStream = await voice.speak('This is a test for transcription', {
144
- speaker: 'alloy',
145
- });
146
-
147
- // Then transcribe it
148
- const text = await voice.listen(audioStream, {
149
- filetype: 'm4a',
150
- });
151
-
152
- expect(text).toBeTruthy();
153
- expect(typeof text).toBe('string');
154
- expect(text.toLowerCase()).toContain('test');
155
- }, 15000);
156
-
157
- it('should accept options', async () => {
158
- const audioStream = await voice.speak('Test with language option', {
159
- speaker: 'nova',
160
- });
161
-
162
- const text = await voice.listen(audioStream, {
163
- language: 'en',
164
- filetype: 'm4a',
165
- });
166
-
167
- expect(text).toBeTruthy();
168
- expect(typeof text).toBe('string');
169
- expect(text.toLowerCase()).toContain('test');
170
- }, 15000);
171
- });
172
-
173
- // Error cases
174
- describe('error handling', () => {
175
- it('should handle invalid speaker names', async () => {
176
- await expect(
177
- voice.speak('Test', {
178
- speaker: 'invalid_voice',
179
- }),
180
- ).rejects.toThrow();
181
- });
182
-
183
- it('should handle empty text', async () => {
184
- await expect(
185
- voice.speak('', {
186
- speaker: 'alloy',
187
- }),
188
- ).rejects.toThrow();
189
- });
190
- });
191
- });
package/src/index.ts DELETED
@@ -1,205 +0,0 @@
1
- import { PassThrough } from 'stream';
2
-
3
- import { MastraVoice } from '@mastra/core/voice';
4
- import OpenAI from 'openai';
5
-
6
- type OpenAIVoiceId = 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer' | 'ash' | 'coral' | 'sage';
7
- type OpenAIModel = 'tts-1' | 'tts-1-hd' | 'whisper-1';
8
-
9
- export interface OpenAIConfig {
10
- name?: OpenAIModel;
11
- apiKey?: string;
12
- }
13
-
14
- export interface OpenAIVoiceConfig {
15
- speech?: {
16
- model: 'tts-1' | 'tts-1-hd';
17
- apiKey?: string;
18
- speaker?: OpenAIVoiceId;
19
- };
20
- listening?: {
21
- model: 'whisper-1';
22
- apiKey?: string;
23
- };
24
- }
25
-
26
- export class OpenAIVoice extends MastraVoice {
27
- speechClient?: OpenAI;
28
- listeningClient?: OpenAI;
29
-
30
- /**
31
- * Constructs an instance of OpenAIVoice with optional configurations for speech and listening models.
32
- *
33
- * @param {Object} [config] - Configuration options for the OpenAIVoice instance.
34
- * @param {OpenAIConfig} [config.listeningModel] - Configuration for the listening model, including model name and API key.
35
- * @param {OpenAIConfig} [config.speechModel] - Configuration for the speech model, including model name and API key.
36
- * @param {string} [config.speaker] - The default speaker's voice to use for speech synthesis.
37
- * @throws {Error} - Throws an error if no API key is provided for either the speech or listening model.
38
- */
39
- constructor({
40
- listeningModel,
41
- speechModel,
42
- speaker,
43
- }: {
44
- listeningModel?: OpenAIConfig;
45
- speechModel?: OpenAIConfig;
46
- speaker?: string;
47
- } = {}) {
48
- const defaultApiKey = process.env.OPENAI_API_KEY;
49
- const defaultSpeechModel = {
50
- name: 'tts-1',
51
- apiKey: defaultApiKey,
52
- };
53
- const defaultListeningModel = {
54
- name: 'whisper-1',
55
- apiKey: defaultApiKey,
56
- };
57
-
58
- super({
59
- speechModel: {
60
- name: speechModel?.name ?? defaultSpeechModel.name,
61
- apiKey: speechModel?.apiKey ?? defaultSpeechModel.apiKey,
62
- },
63
- listeningModel: {
64
- name: listeningModel?.name ?? defaultListeningModel.name,
65
- apiKey: listeningModel?.apiKey ?? defaultListeningModel.apiKey,
66
- },
67
- speaker: speaker ?? 'alloy',
68
- });
69
-
70
- const speechApiKey = speechModel?.apiKey || defaultApiKey;
71
- if (!speechApiKey) {
72
- throw new Error('No API key provided for speech model');
73
- }
74
- this.speechClient = new OpenAI({ apiKey: speechApiKey });
75
-
76
- const listeningApiKey = listeningModel?.apiKey || defaultApiKey;
77
- if (!listeningApiKey) {
78
- throw new Error('No API key provided for listening model');
79
- }
80
- this.listeningClient = new OpenAI({ apiKey: listeningApiKey });
81
-
82
- if (!this.speechClient && !this.listeningClient) {
83
- throw new Error('At least one of OPENAI_API_KEY, speechModel.apiKey, or listeningModel.apiKey must be set');
84
- }
85
- }
86
-
87
- /**
88
- * Retrieves a list of available speakers for the speech model.
89
- *
90
- * @returns {Promise<Array<{ voiceId: OpenAIVoiceId }>>} - A promise that resolves to an array of objects,
91
- * each containing a `voiceId` representing an available speaker.
92
- * @throws {Error} - Throws an error if the speech model is not configured.
93
- */
94
- async getSpeakers(): Promise<Array<{ voiceId: OpenAIVoiceId }>> {
95
- if (!this.speechModel) {
96
- throw new Error('Speech model not configured');
97
- }
98
-
99
- return [
100
- { voiceId: 'alloy' },
101
- { voiceId: 'echo' },
102
- { voiceId: 'fable' },
103
- { voiceId: 'onyx' },
104
- { voiceId: 'nova' },
105
- { voiceId: 'shimmer' },
106
- { voiceId: 'ash' },
107
- { voiceId: 'coral' },
108
- { voiceId: 'sage' },
109
- ];
110
- }
111
-
112
- /**
113
- * Converts text or audio input into speech using the configured speech model.
114
- *
115
- * @param {string | NodeJS.ReadableStream} input - The text or audio stream to be converted into speech.
116
- * @param {Object} [options] - Optional parameters for the speech synthesis.
117
- * @param {string} [options.speaker] - The speaker's voice to use for the speech synthesis.
118
- * @param {number} [options.speed] - The speed at which the speech should be synthesized.
119
- * @returns {Promise<NodeJS.ReadableStream>} - A promise that resolves to a readable stream of the synthesized audio.
120
- * @throws {Error} - Throws an error if the speech model is not configured or if the input text is empty.
121
- */
122
- async speak(
123
- input: string | NodeJS.ReadableStream,
124
- options?: {
125
- speaker?: string;
126
- speed?: number;
127
- [key: string]: any;
128
- },
129
- ): Promise<NodeJS.ReadableStream> {
130
- if (!this.speechClient) {
131
- throw new Error('Speech model not configured');
132
- }
133
-
134
- if (typeof input !== 'string') {
135
- const chunks: Buffer[] = [];
136
- for await (const chunk of input) {
137
- chunks.push(Buffer.from(chunk));
138
- }
139
- input = Buffer.concat(chunks).toString('utf-8');
140
- }
141
-
142
- if (input.trim().length === 0) {
143
- throw new Error('Input text is empty');
144
- }
145
-
146
- const audio = await this.traced(async () => {
147
- const response = await this.speechClient!.audio.speech.create({
148
- model: this.speechModel?.name ?? 'tts-1',
149
- voice: (options?.speaker ?? this.speaker) as OpenAIVoiceId,
150
- input,
151
- speed: options?.speed || 1.0,
152
- });
153
-
154
- const passThrough = new PassThrough();
155
- const buffer = Buffer.from(await response.arrayBuffer());
156
- passThrough.end(buffer);
157
- return passThrough;
158
- }, 'voice.openai.speak')();
159
-
160
- return audio;
161
- }
162
-
163
- /**
164
- * Transcribes audio from a given stream using the configured listening model.
165
- *
166
- * @param {NodeJS.ReadableStream} audioStream - The audio stream to be transcribed.
167
- * @param {Object} [options] - Optional parameters for the transcription.
168
- * @param {string} [options.filetype] - The file type of the audio stream.
169
- * Supported types include 'mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm'.
170
- * @returns {Promise<string>} - A promise that resolves to the transcribed text.
171
- * @throws {Error} - Throws an error if the listening model is not configured.
172
- */
173
- async listen(
174
- audioStream: NodeJS.ReadableStream,
175
- options?: {
176
- filetype?: 'mp3' | 'mp4' | 'mpeg' | 'mpga' | 'm4a' | 'wav' | 'webm';
177
- [key: string]: any;
178
- },
179
- ): Promise<string> {
180
- if (!this.listeningClient) {
181
- throw new Error('Listening model not configured');
182
- }
183
-
184
- const chunks: Buffer[] = [];
185
- for await (const chunk of audioStream) {
186
- chunks.push(Buffer.from(chunk));
187
- }
188
- const audioBuffer = Buffer.concat(chunks);
189
-
190
- const text = await this.traced(async () => {
191
- const { filetype, ...otherOptions } = options || {};
192
- const file = new File([audioBuffer], `audio.${filetype || 'mp3'}`);
193
-
194
- const response = await this.listeningClient!.audio.transcriptions.create({
195
- model: this.listeningModel?.name || 'whisper-1',
196
- file: file as any,
197
- ...otherOptions,
198
- });
199
-
200
- return response.text;
201
- }, 'voice.openai.listen')();
202
-
203
- return text;
204
- }
205
- }
Binary file
Binary file
package/tsconfig.json DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "extends": "../../tsconfig.node.json",
3
- "include": ["src/**/*"],
4
- "exclude": ["node_modules", "**/*.test.ts"]
5
- }
package/vitest.config.ts DELETED
@@ -1,8 +0,0 @@
1
- import { defineConfig } from 'vitest/config';
2
-
3
- export default defineConfig({
4
- test: {
5
- globals: true,
6
- include: ['src/**/*.test.ts'],
7
- },
8
- });