@juspay/neurolink 8.15.0 → 8.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/adapters/tts/googleTTSHandler.d.ts +20 -2
- package/dist/adapters/tts/googleTTSHandler.js +103 -3
- package/dist/lib/adapters/tts/googleTTSHandler.d.ts +20 -2
- package/dist/lib/adapters/tts/googleTTSHandler.js +103 -3
- package/dist/lib/types/generateTypes.d.ts +63 -0
- package/dist/lib/types/streamTypes.d.ts +95 -0
- package/dist/lib/types/ttsTypes.d.ts +37 -3
- package/dist/types/generateTypes.d.ts +63 -0
- package/dist/types/streamTypes.d.ts +95 -0
- package/dist/types/ttsTypes.d.ts +37 -3
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,15 @@
|
|
|
1
|
+
## [8.17.0](https://github.com/juspay/neurolink/compare/v8.16.0...v8.17.0) (2025-12-16)
|
|
2
|
+
|
|
3
|
+
### Features
|
|
4
|
+
|
|
5
|
+
- **(tts):** Add TTS type integration to GenerateOptions, GenerateResult, and StreamChunk ([e290330](https://github.com/juspay/neurolink/commit/e290330e8fe22a4cd0427185cbddbb8856fbd5ca))
|
|
6
|
+
|
|
7
|
+
## [8.16.0](https://github.com/juspay/neurolink/compare/v8.15.0...v8.16.0) (2025-12-16)
|
|
8
|
+
|
|
9
|
+
### Features
|
|
10
|
+
|
|
11
|
+
- **(tts):** Implement GoogleTTSHandler.getVoices() API ([15d39f7](https://github.com/juspay/neurolink/commit/15d39f7e6bfe093971bc822e8f4251b7e8711bb9))
|
|
12
|
+
|
|
1
13
|
## [8.15.0](https://github.com/juspay/neurolink/compare/v8.14.0...v8.15.0) (2025-12-14)
|
|
2
14
|
|
|
3
15
|
### Features
|
|
@@ -2,6 +2,8 @@ import type { TTSHandler } from "../../utils/ttsProcessor.js";
|
|
|
2
2
|
import type { TTSOptions, TTSResult, TTSVoice } from "../../types/ttsTypes.js";
|
|
3
3
|
export declare class GoogleTTSHandler implements TTSHandler {
|
|
4
4
|
private client;
|
|
5
|
+
private voicesCache;
|
|
6
|
+
private static readonly CACHE_TTL_MS;
|
|
5
7
|
/**
|
|
6
8
|
* Google Cloud TTS maximum input size.
|
|
7
9
|
* ~5000 bytes INCLUDING SSML tags.
|
|
@@ -35,12 +37,11 @@ export declare class GoogleTTSHandler implements TTSHandler {
|
|
|
35
37
|
*
|
|
36
38
|
* Note: This method is optional in the TTSHandler interface, but Google Cloud TTS
|
|
37
39
|
* fully implements it to provide comprehensive voice discovery capabilities.
|
|
38
|
-
* Will be Implemented in ISSUE - TTS-014
|
|
39
40
|
*
|
|
40
41
|
* @param languageCode - Optional language filter (e.g., "en-US")
|
|
41
42
|
* @returns List of available voices
|
|
42
43
|
*/
|
|
43
|
-
getVoices(
|
|
44
|
+
getVoices(languageCode?: string): Promise<TTSVoice[]>;
|
|
44
45
|
/**
|
|
45
46
|
* Generate audio from text using provider-specific TTS API
|
|
46
47
|
*
|
|
@@ -67,4 +68,21 @@ export declare class GoogleTTSHandler implements TTSHandler {
|
|
|
67
68
|
* @throws Error if format is unsupported
|
|
68
69
|
*/
|
|
69
70
|
private mapFormat;
|
|
71
|
+
/**
|
|
72
|
+
* Detect the voice type from a Google Cloud TTS voice name
|
|
73
|
+
*
|
|
74
|
+
* Parses the voice name to identify the underlying voice technology/model type.
|
|
75
|
+
* Google Cloud TTS offers different voice types with varying quality and pricing.
|
|
76
|
+
*
|
|
77
|
+
* @param name - The full Google Cloud voice name (e.g., "en-US-Neural2-C")
|
|
78
|
+
* @returns The detected voice type
|
|
79
|
+
*
|
|
80
|
+
* @example
|
|
81
|
+
* detectVoiceType("en-US-Neural2-C") // returns "neural"
|
|
82
|
+
* detectVoiceType("en-US-Wavenet-A") // returns "wavenet"
|
|
83
|
+
* detectVoiceType("en-US-Standard-B") // returns "standard"
|
|
84
|
+
* detectVoiceType("en-US-Chirp-A") // returns "chirp"
|
|
85
|
+
* detectVoiceType("en-US-Journey-D") // returns "unknown" (unrecognized type)
|
|
86
|
+
*/
|
|
87
|
+
private detectVoiceType;
|
|
70
88
|
}
|
|
@@ -9,8 +9,11 @@
|
|
|
9
9
|
import { TextToSpeechClient } from "@google-cloud/text-to-speech";
|
|
10
10
|
import { TTSError, TTS_ERROR_CODES } from "../../utils/ttsProcessor.js";
|
|
11
11
|
import { ErrorCategory, ErrorSeverity } from "../../constants/enums.js";
|
|
12
|
+
import { logger } from "../../utils/logger.js";
|
|
12
13
|
export class GoogleTTSHandler {
|
|
13
14
|
client = null;
|
|
15
|
+
voicesCache = null;
|
|
16
|
+
static CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes
|
|
14
17
|
/**
|
|
15
18
|
* Google Cloud TTS maximum input size.
|
|
16
19
|
* ~5000 bytes INCLUDING SSML tags.
|
|
@@ -51,13 +54,78 @@ export class GoogleTTSHandler {
|
|
|
51
54
|
*
|
|
52
55
|
* Note: This method is optional in the TTSHandler interface, but Google Cloud TTS
|
|
53
56
|
* fully implements it to provide comprehensive voice discovery capabilities.
|
|
54
|
-
* Will be Implemented in ISSUE - TTS-014
|
|
55
57
|
*
|
|
56
58
|
* @param languageCode - Optional language filter (e.g., "en-US")
|
|
57
59
|
* @returns List of available voices
|
|
58
60
|
*/
|
|
59
|
-
async getVoices(
|
|
60
|
-
|
|
61
|
+
async getVoices(languageCode) {
|
|
62
|
+
if (!this.client) {
|
|
63
|
+
throw new TTSError({
|
|
64
|
+
code: TTS_ERROR_CODES.PROVIDER_NOT_CONFIGURED,
|
|
65
|
+
message: "Google Cloud TTS client not initialized. Set GOOGLE_APPLICATION_CREDENTIALS or pass credentials path.",
|
|
66
|
+
category: ErrorCategory.CONFIGURATION,
|
|
67
|
+
severity: ErrorSeverity.HIGH,
|
|
68
|
+
retriable: false,
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
try {
|
|
72
|
+
// Return cached voices if available, valid, and no language filter is specified
|
|
73
|
+
if (this.voicesCache &&
|
|
74
|
+
Date.now() - this.voicesCache.timestamp <
|
|
75
|
+
GoogleTTSHandler.CACHE_TTL_MS &&
|
|
76
|
+
!languageCode) {
|
|
77
|
+
return this.voicesCache.voices;
|
|
78
|
+
}
|
|
79
|
+
// Call Google Cloud listVoices API
|
|
80
|
+
const [response] = await this.client.listVoices(languageCode ? { languageCode } : {});
|
|
81
|
+
if (!response.voices || response.voices.length === 0) {
|
|
82
|
+
logger.warn("Google Cloud TTS returned no voices");
|
|
83
|
+
return [];
|
|
84
|
+
}
|
|
85
|
+
const voices = [];
|
|
86
|
+
for (const voice of response.voices ?? []) {
|
|
87
|
+
// Validate required fields
|
|
88
|
+
if (!voice.name ||
|
|
89
|
+
!Array.isArray(voice.languageCodes) ||
|
|
90
|
+
voice.languageCodes.length === 0) {
|
|
91
|
+
logger.warn("Skipping voice with missing required fields", {
|
|
92
|
+
name: voice.name,
|
|
93
|
+
languageCodesCount: voice.languageCodes?.length,
|
|
94
|
+
});
|
|
95
|
+
continue;
|
|
96
|
+
}
|
|
97
|
+
const voiceName = voice.name;
|
|
98
|
+
const languageCodes = voice.languageCodes;
|
|
99
|
+
const primaryLanguageCode = languageCodes[0];
|
|
100
|
+
const voiceType = this.detectVoiceType(voiceName);
|
|
101
|
+
// Map Google's ssmlGender → internal Gender
|
|
102
|
+
const gender = voice.ssmlGender === "MALE"
|
|
103
|
+
? "male"
|
|
104
|
+
: voice.ssmlGender === "FEMALE"
|
|
105
|
+
? "female"
|
|
106
|
+
: "neutral";
|
|
107
|
+
voices.push({
|
|
108
|
+
id: voiceName,
|
|
109
|
+
name: voiceName,
|
|
110
|
+
languageCode: primaryLanguageCode,
|
|
111
|
+
languageCodes,
|
|
112
|
+
gender,
|
|
113
|
+
type: voiceType,
|
|
114
|
+
naturalSampleRateHertz: voice.naturalSampleRateHertz ?? undefined,
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
// Cache the result with timestamp if no language filter
|
|
118
|
+
if (!languageCode) {
|
|
119
|
+
this.voicesCache = { voices, timestamp: Date.now() };
|
|
120
|
+
}
|
|
121
|
+
return voices;
|
|
122
|
+
}
|
|
123
|
+
catch (err) {
|
|
124
|
+
// Log error but return empty array for graceful degradation
|
|
125
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
126
|
+
logger.error(`Failed to fetch Google TTS voices: ${message}`);
|
|
127
|
+
return [];
|
|
128
|
+
}
|
|
61
129
|
}
|
|
62
130
|
/**
|
|
63
131
|
* Generate audio from text using provider-specific TTS API
|
|
@@ -216,4 +284,36 @@ export class GoogleTTSHandler {
|
|
|
216
284
|
});
|
|
217
285
|
}
|
|
218
286
|
}
|
|
287
|
+
/**
|
|
288
|
+
* Detect the voice type from a Google Cloud TTS voice name
|
|
289
|
+
*
|
|
290
|
+
* Parses the voice name to identify the underlying voice technology/model type.
|
|
291
|
+
* Google Cloud TTS offers different voice types with varying quality and pricing.
|
|
292
|
+
*
|
|
293
|
+
* @param name - The full Google Cloud voice name (e.g., "en-US-Neural2-C")
|
|
294
|
+
* @returns The detected voice type
|
|
295
|
+
*
|
|
296
|
+
* @example
|
|
297
|
+
* detectVoiceType("en-US-Neural2-C") // returns "neural"
|
|
298
|
+
* detectVoiceType("en-US-Wavenet-A") // returns "wavenet"
|
|
299
|
+
* detectVoiceType("en-US-Standard-B") // returns "standard"
|
|
300
|
+
* detectVoiceType("en-US-Chirp-A") // returns "chirp"
|
|
301
|
+
* detectVoiceType("en-US-Journey-D") // returns "unknown" (unrecognized type)
|
|
302
|
+
*/
|
|
303
|
+
detectVoiceType(name) {
|
|
304
|
+
const tokens = name.toLowerCase().split("-");
|
|
305
|
+
if (tokens.some((t) => t.startsWith("chirp"))) {
|
|
306
|
+
return "chirp";
|
|
307
|
+
}
|
|
308
|
+
if (tokens.includes("neural2")) {
|
|
309
|
+
return "neural";
|
|
310
|
+
}
|
|
311
|
+
if (tokens.includes("wavenet")) {
|
|
312
|
+
return "wavenet";
|
|
313
|
+
}
|
|
314
|
+
if (tokens.includes("standard")) {
|
|
315
|
+
return "standard";
|
|
316
|
+
}
|
|
317
|
+
return "unknown";
|
|
318
|
+
}
|
|
219
319
|
}
|
|
@@ -2,6 +2,8 @@ import type { TTSHandler } from "../../utils/ttsProcessor.js";
|
|
|
2
2
|
import type { TTSOptions, TTSResult, TTSVoice } from "../../types/ttsTypes.js";
|
|
3
3
|
export declare class GoogleTTSHandler implements TTSHandler {
|
|
4
4
|
private client;
|
|
5
|
+
private voicesCache;
|
|
6
|
+
private static readonly CACHE_TTL_MS;
|
|
5
7
|
/**
|
|
6
8
|
* Google Cloud TTS maximum input size.
|
|
7
9
|
* ~5000 bytes INCLUDING SSML tags.
|
|
@@ -35,12 +37,11 @@ export declare class GoogleTTSHandler implements TTSHandler {
|
|
|
35
37
|
*
|
|
36
38
|
* Note: This method is optional in the TTSHandler interface, but Google Cloud TTS
|
|
37
39
|
* fully implements it to provide comprehensive voice discovery capabilities.
|
|
38
|
-
* Will be Implemented in ISSUE - TTS-014
|
|
39
40
|
*
|
|
40
41
|
* @param languageCode - Optional language filter (e.g., "en-US")
|
|
41
42
|
* @returns List of available voices
|
|
42
43
|
*/
|
|
43
|
-
getVoices(
|
|
44
|
+
getVoices(languageCode?: string): Promise<TTSVoice[]>;
|
|
44
45
|
/**
|
|
45
46
|
* Generate audio from text using provider-specific TTS API
|
|
46
47
|
*
|
|
@@ -67,4 +68,21 @@ export declare class GoogleTTSHandler implements TTSHandler {
|
|
|
67
68
|
* @throws Error if format is unsupported
|
|
68
69
|
*/
|
|
69
70
|
private mapFormat;
|
|
71
|
+
/**
|
|
72
|
+
* Detect the voice type from a Google Cloud TTS voice name
|
|
73
|
+
*
|
|
74
|
+
* Parses the voice name to identify the underlying voice technology/model type.
|
|
75
|
+
* Google Cloud TTS offers different voice types with varying quality and pricing.
|
|
76
|
+
*
|
|
77
|
+
* @param name - The full Google Cloud voice name (e.g., "en-US-Neural2-C")
|
|
78
|
+
* @returns The detected voice type
|
|
79
|
+
*
|
|
80
|
+
* @example
|
|
81
|
+
* detectVoiceType("en-US-Neural2-C") // returns "neural"
|
|
82
|
+
* detectVoiceType("en-US-Wavenet-A") // returns "wavenet"
|
|
83
|
+
* detectVoiceType("en-US-Standard-B") // returns "standard"
|
|
84
|
+
* detectVoiceType("en-US-Chirp-A") // returns "chirp"
|
|
85
|
+
* detectVoiceType("en-US-Journey-D") // returns "unknown" (unrecognized type)
|
|
86
|
+
*/
|
|
87
|
+
private detectVoiceType;
|
|
70
88
|
}
|
|
@@ -9,8 +9,11 @@
|
|
|
9
9
|
import { TextToSpeechClient } from "@google-cloud/text-to-speech";
|
|
10
10
|
import { TTSError, TTS_ERROR_CODES } from "../../utils/ttsProcessor.js";
|
|
11
11
|
import { ErrorCategory, ErrorSeverity } from "../../constants/enums.js";
|
|
12
|
+
import { logger } from "../../utils/logger.js";
|
|
12
13
|
export class GoogleTTSHandler {
|
|
13
14
|
client = null;
|
|
15
|
+
voicesCache = null;
|
|
16
|
+
static CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes
|
|
14
17
|
/**
|
|
15
18
|
* Google Cloud TTS maximum input size.
|
|
16
19
|
* ~5000 bytes INCLUDING SSML tags.
|
|
@@ -51,13 +54,78 @@ export class GoogleTTSHandler {
|
|
|
51
54
|
*
|
|
52
55
|
* Note: This method is optional in the TTSHandler interface, but Google Cloud TTS
|
|
53
56
|
* fully implements it to provide comprehensive voice discovery capabilities.
|
|
54
|
-
* Will be Implemented in ISSUE - TTS-014
|
|
55
57
|
*
|
|
56
58
|
* @param languageCode - Optional language filter (e.g., "en-US")
|
|
57
59
|
* @returns List of available voices
|
|
58
60
|
*/
|
|
59
|
-
async getVoices(
|
|
60
|
-
|
|
61
|
+
async getVoices(languageCode) {
|
|
62
|
+
if (!this.client) {
|
|
63
|
+
throw new TTSError({
|
|
64
|
+
code: TTS_ERROR_CODES.PROVIDER_NOT_CONFIGURED,
|
|
65
|
+
message: "Google Cloud TTS client not initialized. Set GOOGLE_APPLICATION_CREDENTIALS or pass credentials path.",
|
|
66
|
+
category: ErrorCategory.CONFIGURATION,
|
|
67
|
+
severity: ErrorSeverity.HIGH,
|
|
68
|
+
retriable: false,
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
try {
|
|
72
|
+
// Return cached voices if available, valid, and no language filter is specified
|
|
73
|
+
if (this.voicesCache &&
|
|
74
|
+
Date.now() - this.voicesCache.timestamp <
|
|
75
|
+
GoogleTTSHandler.CACHE_TTL_MS &&
|
|
76
|
+
!languageCode) {
|
|
77
|
+
return this.voicesCache.voices;
|
|
78
|
+
}
|
|
79
|
+
// Call Google Cloud listVoices API
|
|
80
|
+
const [response] = await this.client.listVoices(languageCode ? { languageCode } : {});
|
|
81
|
+
if (!response.voices || response.voices.length === 0) {
|
|
82
|
+
logger.warn("Google Cloud TTS returned no voices");
|
|
83
|
+
return [];
|
|
84
|
+
}
|
|
85
|
+
const voices = [];
|
|
86
|
+
for (const voice of response.voices ?? []) {
|
|
87
|
+
// Validate required fields
|
|
88
|
+
if (!voice.name ||
|
|
89
|
+
!Array.isArray(voice.languageCodes) ||
|
|
90
|
+
voice.languageCodes.length === 0) {
|
|
91
|
+
logger.warn("Skipping voice with missing required fields", {
|
|
92
|
+
name: voice.name,
|
|
93
|
+
languageCodesCount: voice.languageCodes?.length,
|
|
94
|
+
});
|
|
95
|
+
continue;
|
|
96
|
+
}
|
|
97
|
+
const voiceName = voice.name;
|
|
98
|
+
const languageCodes = voice.languageCodes;
|
|
99
|
+
const primaryLanguageCode = languageCodes[0];
|
|
100
|
+
const voiceType = this.detectVoiceType(voiceName);
|
|
101
|
+
// Map Google's ssmlGender → internal Gender
|
|
102
|
+
const gender = voice.ssmlGender === "MALE"
|
|
103
|
+
? "male"
|
|
104
|
+
: voice.ssmlGender === "FEMALE"
|
|
105
|
+
? "female"
|
|
106
|
+
: "neutral";
|
|
107
|
+
voices.push({
|
|
108
|
+
id: voiceName,
|
|
109
|
+
name: voiceName,
|
|
110
|
+
languageCode: primaryLanguageCode,
|
|
111
|
+
languageCodes,
|
|
112
|
+
gender,
|
|
113
|
+
type: voiceType,
|
|
114
|
+
naturalSampleRateHertz: voice.naturalSampleRateHertz ?? undefined,
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
// Cache the result with timestamp if no language filter
|
|
118
|
+
if (!languageCode) {
|
|
119
|
+
this.voicesCache = { voices, timestamp: Date.now() };
|
|
120
|
+
}
|
|
121
|
+
return voices;
|
|
122
|
+
}
|
|
123
|
+
catch (err) {
|
|
124
|
+
// Log error but return empty array for graceful degradation
|
|
125
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
126
|
+
logger.error(`Failed to fetch Google TTS voices: ${message}`);
|
|
127
|
+
return [];
|
|
128
|
+
}
|
|
61
129
|
}
|
|
62
130
|
/**
|
|
63
131
|
* Generate audio from text using provider-specific TTS API
|
|
@@ -216,5 +284,37 @@ export class GoogleTTSHandler {
|
|
|
216
284
|
});
|
|
217
285
|
}
|
|
218
286
|
}
|
|
287
|
+
/**
|
|
288
|
+
* Detect the voice type from a Google Cloud TTS voice name
|
|
289
|
+
*
|
|
290
|
+
* Parses the voice name to identify the underlying voice technology/model type.
|
|
291
|
+
* Google Cloud TTS offers different voice types with varying quality and pricing.
|
|
292
|
+
*
|
|
293
|
+
* @param name - The full Google Cloud voice name (e.g., "en-US-Neural2-C")
|
|
294
|
+
* @returns The detected voice type
|
|
295
|
+
*
|
|
296
|
+
* @example
|
|
297
|
+
* detectVoiceType("en-US-Neural2-C") // returns "neural"
|
|
298
|
+
* detectVoiceType("en-US-Wavenet-A") // returns "wavenet"
|
|
299
|
+
* detectVoiceType("en-US-Standard-B") // returns "standard"
|
|
300
|
+
* detectVoiceType("en-US-Chirp-A") // returns "chirp"
|
|
301
|
+
* detectVoiceType("en-US-Journey-D") // returns "unknown" (unrecognized type)
|
|
302
|
+
*/
|
|
303
|
+
detectVoiceType(name) {
|
|
304
|
+
const tokens = name.toLowerCase().split("-");
|
|
305
|
+
if (tokens.some((t) => t.startsWith("chirp"))) {
|
|
306
|
+
return "chirp";
|
|
307
|
+
}
|
|
308
|
+
if (tokens.includes("neural2")) {
|
|
309
|
+
return "neural";
|
|
310
|
+
}
|
|
311
|
+
if (tokens.includes("wavenet")) {
|
|
312
|
+
return "wavenet";
|
|
313
|
+
}
|
|
314
|
+
if (tokens.includes("standard")) {
|
|
315
|
+
return "standard";
|
|
316
|
+
}
|
|
317
|
+
return "unknown";
|
|
318
|
+
}
|
|
219
319
|
}
|
|
220
320
|
//# sourceMappingURL=googleTTSHandler.js.map
|
|
@@ -7,6 +7,7 @@ import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
|
|
|
7
7
|
import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
|
|
8
8
|
import type { JsonValue } from "./common.js";
|
|
9
9
|
import type { Content, ImageWithAltText } from "./content.js";
|
|
10
|
+
import type { TTSOptions, TTSResult } from "./ttsTypes.js";
|
|
10
11
|
/**
|
|
11
12
|
* Generate function options type - Primary method for content generation
|
|
12
13
|
* Supports multimodal content while maintaining backward compatibility
|
|
@@ -52,6 +53,39 @@ export type GenerateOptions = {
|
|
|
52
53
|
format?: "jpeg" | "png";
|
|
53
54
|
transcribeAudio?: boolean;
|
|
54
55
|
};
|
|
56
|
+
/**
|
|
57
|
+
* Text-to-Speech (TTS) configuration
|
|
58
|
+
*
|
|
59
|
+
* Enable audio generation from the text response. The generated audio will be
|
|
60
|
+
* returned in the result's `audio` field as a TTSResult object.
|
|
61
|
+
*
|
|
62
|
+
* @example Basic TTS
|
|
63
|
+
* ```typescript
|
|
64
|
+
* const result = await neurolink.generate({
|
|
65
|
+
* input: { text: "Tell me a story" },
|
|
66
|
+
* provider: "google-ai",
|
|
67
|
+
* tts: { enabled: true, voice: "en-US-Neural2-C" }
|
|
68
|
+
* });
|
|
69
|
+
* console.log(result.audio?.buffer); // Audio Buffer
|
|
70
|
+
* ```
|
|
71
|
+
*
|
|
72
|
+
* @example Advanced TTS with options
|
|
73
|
+
* ```typescript
|
|
74
|
+
* const result = await neurolink.generate({
|
|
75
|
+
* input: { text: "Speak slowly and clearly" },
|
|
76
|
+
* provider: "google-ai",
|
|
77
|
+
* tts: {
|
|
78
|
+
* enabled: true,
|
|
79
|
+
* voice: "en-US-Neural2-D",
|
|
80
|
+
* speed: 0.8,
|
|
81
|
+
* pitch: 2.0,
|
|
82
|
+
* format: "mp3",
|
|
83
|
+
* quality: "standard"
|
|
84
|
+
* }
|
|
85
|
+
* });
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
tts?: TTSOptions;
|
|
55
89
|
provider?: AIProviderName | string;
|
|
56
90
|
model?: string;
|
|
57
91
|
region?: string;
|
|
@@ -144,6 +178,35 @@ export type GenerateResult = {
|
|
|
144
178
|
outputs?: {
|
|
145
179
|
text: string;
|
|
146
180
|
};
|
|
181
|
+
/**
|
|
182
|
+
* Text-to-Speech audio result
|
|
183
|
+
*
|
|
184
|
+
* Contains the generated audio buffer and metadata when TTS is enabled.
|
|
185
|
+
* Generated by TTSProcessor.synthesize() using the specified provider.
|
|
186
|
+
*
|
|
187
|
+
* @example Accessing TTS audio
|
|
188
|
+
* ```typescript
|
|
189
|
+
* const result = await neurolink.generate({
|
|
190
|
+
* input: { text: "Hello world" },
|
|
191
|
+
* provider: "google-ai",
|
|
192
|
+
* tts: { enabled: true, voice: "en-US-Neural2-C" }
|
|
193
|
+
* });
|
|
194
|
+
*
|
|
195
|
+
* if (result.audio) {
|
|
196
|
+
* console.log(`Audio size: ${result.audio.size} bytes`);
|
|
197
|
+
* console.log(`Format: ${result.audio.format}`);
|
|
198
|
+
* if (result.audio.duration) {
|
|
199
|
+
* console.log(`Duration: ${result.audio.duration}s`);
|
|
200
|
+
* }
|
|
201
|
+
* if (result.audio.voice) {
|
|
202
|
+
* console.log(`Voice: ${result.audio.voice}`);
|
|
203
|
+
* }
|
|
204
|
+
* // Save or play the audio buffer
|
|
205
|
+
* fs.writeFileSync('output.mp3', result.audio.buffer);
|
|
206
|
+
* }
|
|
207
|
+
* ```
|
|
208
|
+
*/
|
|
209
|
+
audio?: TTSResult;
|
|
147
210
|
provider?: string;
|
|
148
211
|
model?: string;
|
|
149
212
|
usage?: TokenUsage;
|
|
@@ -9,6 +9,7 @@ import type { EvaluationData } from "../index.js";
|
|
|
9
9
|
import type { UnknownRecord, JsonValue } from "./common.js";
|
|
10
10
|
import type { MiddlewareFactoryOptions } from "../types/middlewareTypes.js";
|
|
11
11
|
import type { ChatMessage } from "./conversation.js";
|
|
12
|
+
import type { TTSOptions, TTSChunk } from "./ttsTypes.js";
|
|
12
13
|
/**
|
|
13
14
|
* Progress tracking and metadata for streaming operations
|
|
14
15
|
*/
|
|
@@ -121,6 +122,60 @@ export type AudioChunk = {
|
|
|
121
122
|
channels: number;
|
|
122
123
|
encoding: PCMEncoding;
|
|
123
124
|
};
|
|
125
|
+
/**
|
|
126
|
+
* Stream chunk type using discriminated union for type safety
|
|
127
|
+
*
|
|
128
|
+
* Used in streaming responses to deliver either text or TTS audio chunks.
|
|
129
|
+
* The discriminated union ensures type safety - only one variant can exist at a time.
|
|
130
|
+
*
|
|
131
|
+
* @example Processing text chunks
|
|
132
|
+
* ```typescript
|
|
133
|
+
* for await (const chunk of result.stream) {
|
|
134
|
+
* if (chunk.type === "text") {
|
|
135
|
+
* console.log(chunk.content); // TypeScript knows 'content' exists
|
|
136
|
+
* }
|
|
137
|
+
* }
|
|
138
|
+
* ```
|
|
139
|
+
*
|
|
140
|
+
* @example Processing audio chunks
|
|
141
|
+
* ```typescript
|
|
142
|
+
* const audioBuffer: Buffer[] = [];
|
|
143
|
+
* for await (const chunk of result.stream) {
|
|
144
|
+
* if (chunk.type === "audio") {
|
|
145
|
+
* audioBuffer.push(chunk.audioChunk.data); // TypeScript knows 'audioChunk' exists
|
|
146
|
+
* if (chunk.audioChunk.isFinal) {
|
|
147
|
+
* const fullAudio = Buffer.concat(audioBuffer);
|
|
148
|
+
* fs.writeFileSync('output.mp3', fullAudio);
|
|
149
|
+
* }
|
|
150
|
+
* }
|
|
151
|
+
* }
|
|
152
|
+
* ```
|
|
153
|
+
*
|
|
154
|
+
* @example Processing both text and audio
|
|
155
|
+
* ```typescript
|
|
156
|
+
* for await (const chunk of result.stream) {
|
|
157
|
+
* switch (chunk.type) {
|
|
158
|
+
* case "text":
|
|
159
|
+
* process.stdout.write(chunk.content);
|
|
160
|
+
* break;
|
|
161
|
+
* case "audio":
|
|
162
|
+
* playAudioChunk(chunk.audioChunk.data);
|
|
163
|
+
* break;
|
|
164
|
+
* }
|
|
165
|
+
* }
|
|
166
|
+
* ```
|
|
167
|
+
*/
|
|
168
|
+
export type StreamChunk = {
|
|
169
|
+
/** Discriminator for text chunks */
|
|
170
|
+
type: "text";
|
|
171
|
+
/** Text content chunk */
|
|
172
|
+
content: string;
|
|
173
|
+
} | {
|
|
174
|
+
/** Discriminator for audio chunks */
|
|
175
|
+
type: "audio";
|
|
176
|
+
/** TTS audio chunk data */
|
|
177
|
+
audioChunk: TTSChunk;
|
|
178
|
+
};
|
|
124
179
|
export type StreamOptions = {
|
|
125
180
|
input: {
|
|
126
181
|
text: string;
|
|
@@ -168,6 +223,46 @@ export type StreamOptions = {
|
|
|
168
223
|
format?: "jpeg" | "png";
|
|
169
224
|
transcribeAudio?: boolean;
|
|
170
225
|
};
|
|
226
|
+
/**
|
|
227
|
+
* Text-to-Speech (TTS) configuration for streaming
|
|
228
|
+
*
|
|
229
|
+
* Enable audio generation from the streamed text response. Audio chunks will be
|
|
230
|
+
* delivered through the stream alongside text chunks as TTSChunk objects.
|
|
231
|
+
*
|
|
232
|
+
* @example Basic streaming TTS
|
|
233
|
+
* ```typescript
|
|
234
|
+
* const result = await neurolink.stream({
|
|
235
|
+
* input: { text: "Tell me a story" },
|
|
236
|
+
* provider: "google-ai",
|
|
237
|
+
* tts: { enabled: true, voice: "en-US-Neural2-C" }
|
|
238
|
+
* });
|
|
239
|
+
*
|
|
240
|
+
* for await (const chunk of result.stream) {
|
|
241
|
+
* if (chunk.type === "text") {
|
|
242
|
+
* process.stdout.write(chunk.content);
|
|
243
|
+
* } else if (chunk.type === "audio") {
|
|
244
|
+
* // Handle audio chunk
|
|
245
|
+
* playAudioChunk(chunk.audioChunk.data);
|
|
246
|
+
* }
|
|
247
|
+
* }
|
|
248
|
+
* ```
|
|
249
|
+
*
|
|
250
|
+
* @example Advanced streaming TTS with audio buffer
|
|
251
|
+
* ```typescript
|
|
252
|
+
* const result = await neurolink.stream({
|
|
253
|
+
* input: { text: "Speak slowly" },
|
|
254
|
+
* provider: "google-ai",
|
|
255
|
+
* tts: {
|
|
256
|
+
* enabled: true,
|
|
257
|
+
* voice: "en-US-Neural2-D",
|
|
258
|
+
* speed: 0.8,
|
|
259
|
+
* format: "mp3",
|
|
260
|
+
* quality: "hd"
|
|
261
|
+
* }
|
|
262
|
+
* });
|
|
263
|
+
* ```
|
|
264
|
+
*/
|
|
265
|
+
tts?: TTSOptions;
|
|
171
266
|
provider?: AIProviderName | string;
|
|
172
267
|
model?: string;
|
|
173
268
|
region?: string;
|
|
@@ -75,6 +75,10 @@ export type AudioSaveResult = {
|
|
|
75
75
|
/** Error message if failed */
|
|
76
76
|
error?: string;
|
|
77
77
|
};
|
|
78
|
+
/** Allowed TTS voice types */
|
|
79
|
+
export type VoiceType = "standard" | "wavenet" | "neural" | "chirp" | "unknown";
|
|
80
|
+
/** Allowed genders for TTS voices */
|
|
81
|
+
export type Gender = "male" | "female" | "neutral";
|
|
78
82
|
/**
|
|
79
83
|
* TTS voice information
|
|
80
84
|
*/
|
|
@@ -83,12 +87,18 @@ export type TTSVoice = {
|
|
|
83
87
|
id: string;
|
|
84
88
|
/** Display name */
|
|
85
89
|
name: string;
|
|
86
|
-
/**
|
|
90
|
+
/** Primary language code (e.g., "en-US") */
|
|
87
91
|
languageCode: string;
|
|
92
|
+
/** All supported language codes */
|
|
93
|
+
languageCodes: string[];
|
|
88
94
|
/** Gender */
|
|
89
|
-
gender:
|
|
95
|
+
gender: Gender;
|
|
90
96
|
/** Voice type */
|
|
91
|
-
type
|
|
97
|
+
type?: VoiceType;
|
|
98
|
+
/** Voice description (optional) */
|
|
99
|
+
description?: string;
|
|
100
|
+
/** Natural sample rate in Hz (optional) */
|
|
101
|
+
naturalSampleRateHertz?: number;
|
|
92
102
|
};
|
|
93
103
|
/** Valid audio formats as an array for runtime validation */
|
|
94
104
|
export declare const VALID_AUDIO_FORMATS: readonly AudioFormat[];
|
|
@@ -104,3 +114,27 @@ export declare function isTTSResult(value: unknown): value is TTSResult;
|
|
|
104
114
|
* Type guard to check if TTSOptions are valid
|
|
105
115
|
*/
|
|
106
116
|
export declare function isValidTTSOptions(options: unknown): options is TTSOptions;
|
|
117
|
+
/**
|
|
118
|
+
* TTS audio chunk for streaming Text-to-Speech output
|
|
119
|
+
*
|
|
120
|
+
* Represents a chunk of audio data generated during streaming TTS.
|
|
121
|
+
* Used in StreamChunk type to deliver audio alongside text content.
|
|
122
|
+
*/
|
|
123
|
+
export type TTSChunk = {
|
|
124
|
+
/** Audio data chunk as Buffer */
|
|
125
|
+
data: Buffer;
|
|
126
|
+
/** Audio format of this chunk */
|
|
127
|
+
format: AudioFormat;
|
|
128
|
+
/** Chunk sequence number (0-indexed) */
|
|
129
|
+
index: number;
|
|
130
|
+
/** Whether this is the final audio chunk */
|
|
131
|
+
isFinal: boolean;
|
|
132
|
+
/** Cumulative audio size in bytes so far */
|
|
133
|
+
cumulativeSize?: number;
|
|
134
|
+
/** Estimated total duration in seconds (if available) */
|
|
135
|
+
estimatedDuration?: number;
|
|
136
|
+
/** Voice used for generation */
|
|
137
|
+
voice?: string;
|
|
138
|
+
/** Sample rate in Hz */
|
|
139
|
+
sampleRate?: number;
|
|
140
|
+
};
|
|
@@ -7,6 +7,7 @@ import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
|
|
|
7
7
|
import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
|
|
8
8
|
import type { JsonValue } from "./common.js";
|
|
9
9
|
import type { Content, ImageWithAltText } from "./content.js";
|
|
10
|
+
import type { TTSOptions, TTSResult } from "./ttsTypes.js";
|
|
10
11
|
/**
|
|
11
12
|
* Generate function options type - Primary method for content generation
|
|
12
13
|
* Supports multimodal content while maintaining backward compatibility
|
|
@@ -52,6 +53,39 @@ export type GenerateOptions = {
|
|
|
52
53
|
format?: "jpeg" | "png";
|
|
53
54
|
transcribeAudio?: boolean;
|
|
54
55
|
};
|
|
56
|
+
/**
|
|
57
|
+
* Text-to-Speech (TTS) configuration
|
|
58
|
+
*
|
|
59
|
+
* Enable audio generation from the text response. The generated audio will be
|
|
60
|
+
* returned in the result's `audio` field as a TTSResult object.
|
|
61
|
+
*
|
|
62
|
+
* @example Basic TTS
|
|
63
|
+
* ```typescript
|
|
64
|
+
* const result = await neurolink.generate({
|
|
65
|
+
* input: { text: "Tell me a story" },
|
|
66
|
+
* provider: "google-ai",
|
|
67
|
+
* tts: { enabled: true, voice: "en-US-Neural2-C" }
|
|
68
|
+
* });
|
|
69
|
+
* console.log(result.audio?.buffer); // Audio Buffer
|
|
70
|
+
* ```
|
|
71
|
+
*
|
|
72
|
+
* @example Advanced TTS with options
|
|
73
|
+
* ```typescript
|
|
74
|
+
* const result = await neurolink.generate({
|
|
75
|
+
* input: { text: "Speak slowly and clearly" },
|
|
76
|
+
* provider: "google-ai",
|
|
77
|
+
* tts: {
|
|
78
|
+
* enabled: true,
|
|
79
|
+
* voice: "en-US-Neural2-D",
|
|
80
|
+
* speed: 0.8,
|
|
81
|
+
* pitch: 2.0,
|
|
82
|
+
* format: "mp3",
|
|
83
|
+
* quality: "standard"
|
|
84
|
+
* }
|
|
85
|
+
* });
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
tts?: TTSOptions;
|
|
55
89
|
provider?: AIProviderName | string;
|
|
56
90
|
model?: string;
|
|
57
91
|
region?: string;
|
|
@@ -144,6 +178,35 @@ export type GenerateResult = {
|
|
|
144
178
|
outputs?: {
|
|
145
179
|
text: string;
|
|
146
180
|
};
|
|
181
|
+
/**
|
|
182
|
+
* Text-to-Speech audio result
|
|
183
|
+
*
|
|
184
|
+
* Contains the generated audio buffer and metadata when TTS is enabled.
|
|
185
|
+
* Generated by TTSProcessor.synthesize() using the specified provider.
|
|
186
|
+
*
|
|
187
|
+
* @example Accessing TTS audio
|
|
188
|
+
* ```typescript
|
|
189
|
+
* const result = await neurolink.generate({
|
|
190
|
+
* input: { text: "Hello world" },
|
|
191
|
+
* provider: "google-ai",
|
|
192
|
+
* tts: { enabled: true, voice: "en-US-Neural2-C" }
|
|
193
|
+
* });
|
|
194
|
+
*
|
|
195
|
+
* if (result.audio) {
|
|
196
|
+
* console.log(`Audio size: ${result.audio.size} bytes`);
|
|
197
|
+
* console.log(`Format: ${result.audio.format}`);
|
|
198
|
+
* if (result.audio.duration) {
|
|
199
|
+
* console.log(`Duration: ${result.audio.duration}s`);
|
|
200
|
+
* }
|
|
201
|
+
* if (result.audio.voice) {
|
|
202
|
+
* console.log(`Voice: ${result.audio.voice}`);
|
|
203
|
+
* }
|
|
204
|
+
* // Save or play the audio buffer
|
|
205
|
+
* fs.writeFileSync('output.mp3', result.audio.buffer);
|
|
206
|
+
* }
|
|
207
|
+
* ```
|
|
208
|
+
*/
|
|
209
|
+
audio?: TTSResult;
|
|
147
210
|
provider?: string;
|
|
148
211
|
model?: string;
|
|
149
212
|
usage?: TokenUsage;
|
|
@@ -9,6 +9,7 @@ import type { EvaluationData } from "../index.js";
|
|
|
9
9
|
import type { UnknownRecord, JsonValue } from "./common.js";
|
|
10
10
|
import type { MiddlewareFactoryOptions } from "../types/middlewareTypes.js";
|
|
11
11
|
import type { ChatMessage } from "./conversation.js";
|
|
12
|
+
import type { TTSOptions, TTSChunk } from "./ttsTypes.js";
|
|
12
13
|
/**
|
|
13
14
|
* Progress tracking and metadata for streaming operations
|
|
14
15
|
*/
|
|
@@ -121,6 +122,60 @@ export type AudioChunk = {
|
|
|
121
122
|
channels: number;
|
|
122
123
|
encoding: PCMEncoding;
|
|
123
124
|
};
|
|
125
|
+
/**
|
|
126
|
+
* Stream chunk type using discriminated union for type safety
|
|
127
|
+
*
|
|
128
|
+
* Used in streaming responses to deliver either text or TTS audio chunks.
|
|
129
|
+
* The discriminated union ensures type safety - only one variant can exist at a time.
|
|
130
|
+
*
|
|
131
|
+
* @example Processing text chunks
|
|
132
|
+
* ```typescript
|
|
133
|
+
* for await (const chunk of result.stream) {
|
|
134
|
+
* if (chunk.type === "text") {
|
|
135
|
+
* console.log(chunk.content); // TypeScript knows 'content' exists
|
|
136
|
+
* }
|
|
137
|
+
* }
|
|
138
|
+
* ```
|
|
139
|
+
*
|
|
140
|
+
* @example Processing audio chunks
|
|
141
|
+
* ```typescript
|
|
142
|
+
* const audioBuffer: Buffer[] = [];
|
|
143
|
+
* for await (const chunk of result.stream) {
|
|
144
|
+
* if (chunk.type === "audio") {
|
|
145
|
+
* audioBuffer.push(chunk.audioChunk.data); // TypeScript knows 'audioChunk' exists
|
|
146
|
+
* if (chunk.audioChunk.isFinal) {
|
|
147
|
+
* const fullAudio = Buffer.concat(audioBuffer);
|
|
148
|
+
* fs.writeFileSync('output.mp3', fullAudio);
|
|
149
|
+
* }
|
|
150
|
+
* }
|
|
151
|
+
* }
|
|
152
|
+
* ```
|
|
153
|
+
*
|
|
154
|
+
* @example Processing both text and audio
|
|
155
|
+
* ```typescript
|
|
156
|
+
* for await (const chunk of result.stream) {
|
|
157
|
+
* switch (chunk.type) {
|
|
158
|
+
* case "text":
|
|
159
|
+
* process.stdout.write(chunk.content);
|
|
160
|
+
* break;
|
|
161
|
+
* case "audio":
|
|
162
|
+
* playAudioChunk(chunk.audioChunk.data);
|
|
163
|
+
* break;
|
|
164
|
+
* }
|
|
165
|
+
* }
|
|
166
|
+
* ```
|
|
167
|
+
*/
|
|
168
|
+
export type StreamChunk = {
|
|
169
|
+
/** Discriminator for text chunks */
|
|
170
|
+
type: "text";
|
|
171
|
+
/** Text content chunk */
|
|
172
|
+
content: string;
|
|
173
|
+
} | {
|
|
174
|
+
/** Discriminator for audio chunks */
|
|
175
|
+
type: "audio";
|
|
176
|
+
/** TTS audio chunk data */
|
|
177
|
+
audioChunk: TTSChunk;
|
|
178
|
+
};
|
|
124
179
|
export type StreamOptions = {
|
|
125
180
|
input: {
|
|
126
181
|
text: string;
|
|
@@ -168,6 +223,46 @@ export type StreamOptions = {
|
|
|
168
223
|
format?: "jpeg" | "png";
|
|
169
224
|
transcribeAudio?: boolean;
|
|
170
225
|
};
|
|
226
|
+
/**
|
|
227
|
+
* Text-to-Speech (TTS) configuration for streaming
|
|
228
|
+
*
|
|
229
|
+
* Enable audio generation from the streamed text response. Audio chunks will be
|
|
230
|
+
* delivered through the stream alongside text chunks as TTSChunk objects.
|
|
231
|
+
*
|
|
232
|
+
* @example Basic streaming TTS
|
|
233
|
+
* ```typescript
|
|
234
|
+
* const result = await neurolink.stream({
|
|
235
|
+
* input: { text: "Tell me a story" },
|
|
236
|
+
* provider: "google-ai",
|
|
237
|
+
* tts: { enabled: true, voice: "en-US-Neural2-C" }
|
|
238
|
+
* });
|
|
239
|
+
*
|
|
240
|
+
* for await (const chunk of result.stream) {
|
|
241
|
+
* if (chunk.type === "text") {
|
|
242
|
+
* process.stdout.write(chunk.content);
|
|
243
|
+
* } else if (chunk.type === "audio") {
|
|
244
|
+
* // Handle audio chunk
|
|
245
|
+
* playAudioChunk(chunk.audioChunk.data);
|
|
246
|
+
* }
|
|
247
|
+
* }
|
|
248
|
+
* ```
|
|
249
|
+
*
|
|
250
|
+
* @example Advanced streaming TTS with audio buffer
|
|
251
|
+
* ```typescript
|
|
252
|
+
* const result = await neurolink.stream({
|
|
253
|
+
* input: { text: "Speak slowly" },
|
|
254
|
+
* provider: "google-ai",
|
|
255
|
+
* tts: {
|
|
256
|
+
* enabled: true,
|
|
257
|
+
* voice: "en-US-Neural2-D",
|
|
258
|
+
* speed: 0.8,
|
|
259
|
+
* format: "mp3",
|
|
260
|
+
* quality: "hd"
|
|
261
|
+
* }
|
|
262
|
+
* });
|
|
263
|
+
* ```
|
|
264
|
+
*/
|
|
265
|
+
tts?: TTSOptions;
|
|
171
266
|
provider?: AIProviderName | string;
|
|
172
267
|
model?: string;
|
|
173
268
|
region?: string;
|
package/dist/types/ttsTypes.d.ts
CHANGED
|
@@ -75,6 +75,10 @@ export type AudioSaveResult = {
|
|
|
75
75
|
/** Error message if failed */
|
|
76
76
|
error?: string;
|
|
77
77
|
};
|
|
78
|
+
/** Allowed TTS voice types */
|
|
79
|
+
export type VoiceType = "standard" | "wavenet" | "neural" | "chirp" | "unknown";
|
|
80
|
+
/** Allowed genders for TTS voices */
|
|
81
|
+
export type Gender = "male" | "female" | "neutral";
|
|
78
82
|
/**
|
|
79
83
|
* TTS voice information
|
|
80
84
|
*/
|
|
@@ -83,12 +87,18 @@ export type TTSVoice = {
|
|
|
83
87
|
id: string;
|
|
84
88
|
/** Display name */
|
|
85
89
|
name: string;
|
|
86
|
-
/**
|
|
90
|
+
/** Primary language code (e.g., "en-US") */
|
|
87
91
|
languageCode: string;
|
|
92
|
+
/** All supported language codes */
|
|
93
|
+
languageCodes: string[];
|
|
88
94
|
/** Gender */
|
|
89
|
-
gender:
|
|
95
|
+
gender: Gender;
|
|
90
96
|
/** Voice type */
|
|
91
|
-
type
|
|
97
|
+
type?: VoiceType;
|
|
98
|
+
/** Voice description (optional) */
|
|
99
|
+
description?: string;
|
|
100
|
+
/** Natural sample rate in Hz (optional) */
|
|
101
|
+
naturalSampleRateHertz?: number;
|
|
92
102
|
};
|
|
93
103
|
/** Valid audio formats as an array for runtime validation */
|
|
94
104
|
export declare const VALID_AUDIO_FORMATS: readonly AudioFormat[];
|
|
@@ -104,3 +114,27 @@ export declare function isTTSResult(value: unknown): value is TTSResult;
|
|
|
104
114
|
* Type guard to check if TTSOptions are valid
|
|
105
115
|
*/
|
|
106
116
|
export declare function isValidTTSOptions(options: unknown): options is TTSOptions;
|
|
117
|
+
/**
|
|
118
|
+
* TTS audio chunk for streaming Text-to-Speech output
|
|
119
|
+
*
|
|
120
|
+
* Represents a chunk of audio data generated during streaming TTS.
|
|
121
|
+
* Used in StreamChunk type to deliver audio alongside text content.
|
|
122
|
+
*/
|
|
123
|
+
export type TTSChunk = {
|
|
124
|
+
/** Audio data chunk as Buffer */
|
|
125
|
+
data: Buffer;
|
|
126
|
+
/** Audio format of this chunk */
|
|
127
|
+
format: AudioFormat;
|
|
128
|
+
/** Chunk sequence number (0-indexed) */
|
|
129
|
+
index: number;
|
|
130
|
+
/** Whether this is the final audio chunk */
|
|
131
|
+
isFinal: boolean;
|
|
132
|
+
/** Cumulative audio size in bytes so far */
|
|
133
|
+
cumulativeSize?: number;
|
|
134
|
+
/** Estimated total duration in seconds (if available) */
|
|
135
|
+
estimatedDuration?: number;
|
|
136
|
+
/** Voice used for generation */
|
|
137
|
+
voice?: string;
|
|
138
|
+
/** Sample rate in Hz */
|
|
139
|
+
sampleRate?: number;
|
|
140
|
+
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "8.
|
|
3
|
+
"version": "8.17.0",
|
|
4
4
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|