@contractspec/lib.voice 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/audio/audio-concatenator.d.ts +15 -0
- package/dist/audio/audio-concatenator.js +57 -0
- package/dist/audio/duration-estimator.d.ts +31 -0
- package/dist/audio/duration-estimator.js +22 -0
- package/dist/audio/format-converter.d.ts +17 -0
- package/dist/audio/format-converter.js +28 -0
- package/dist/audio/index.d.ts +4 -0
- package/dist/audio/index.js +121 -0
- package/dist/audio/silence-generator.d.ts +16 -0
- package/dist/audio/silence-generator.js +20 -0
- package/dist/browser/audio/audio-concatenator.js +56 -0
- package/dist/browser/audio/duration-estimator.js +21 -0
- package/dist/browser/audio/format-converter.js +27 -0
- package/dist/browser/audio/index.js +120 -0
- package/dist/browser/audio/silence-generator.js +19 -0
- package/dist/browser/conversational/index.js +241 -0
- package/dist/browser/conversational/response-orchestrator.js +62 -0
- package/dist/browser/conversational/transcript-builder.js +63 -0
- package/dist/browser/conversational/turn-detector.js +43 -0
- package/dist/browser/conversational/types.js +0 -0
- package/dist/browser/conversational/voice-session-manager.js +137 -0
- package/dist/browser/docs/conversational.docblock.js +5 -0
- package/dist/browser/docs/stt.docblock.js +5 -0
- package/dist/browser/docs/sync.docblock.js +5 -0
- package/dist/browser/docs/tts.docblock.js +5 -0
- package/dist/browser/docs/voice.docblock.js +5 -0
- package/dist/browser/i18n/catalogs/en.js +91 -0
- package/dist/browser/i18n/catalogs/es.js +91 -0
- package/dist/browser/i18n/catalogs/fr.js +91 -0
- package/dist/browser/i18n/catalogs/index.js +271 -0
- package/dist/browser/i18n/index.js +335 -0
- package/dist/browser/i18n/keys.js +38 -0
- package/dist/browser/i18n/locale.js +13 -0
- package/dist/browser/i18n/messages.js +283 -0
- package/dist/browser/index.js +1070 -0
- package/dist/browser/stt/diarization-mapper.js +42 -0
- package/dist/browser/stt/index.js +222 -0
- package/dist/browser/stt/segment-splitter.js +36 -0
- package/dist/browser/stt/subtitle-formatter.js +51 -0
- package/dist/browser/stt/transcriber.js +219 -0
- package/dist/browser/stt/types.js +0 -0
- package/dist/browser/sync/duration-negotiator.js +69 -0
- package/dist/browser/sync/index.js +165 -0
- package/dist/browser/sync/scene-adapter.js +52 -0
- package/dist/browser/sync/timing-calculator.js +46 -0
- package/dist/browser/tts/audio-assembler.js +120 -0
- package/dist/browser/tts/emphasis-planner.js +134 -0
- package/dist/browser/tts/index.js +439 -0
- package/dist/browser/tts/pace-analyzer.js +67 -0
- package/dist/browser/tts/segment-synthesizer.js +36 -0
- package/dist/browser/tts/types.js +0 -0
- package/dist/browser/tts/voice-synthesizer.js +435 -0
- package/dist/browser/types.js +0 -0
- package/dist/conversational/index.d.ts +5 -0
- package/dist/conversational/index.js +242 -0
- package/dist/conversational/response-orchestrator.d.ts +26 -0
- package/dist/conversational/response-orchestrator.js +63 -0
- package/dist/conversational/transcript-builder.d.ts +25 -0
- package/dist/conversational/transcript-builder.js +64 -0
- package/dist/conversational/turn-detector.d.ts +31 -0
- package/dist/conversational/turn-detector.js +44 -0
- package/dist/conversational/types.d.ts +55 -0
- package/dist/conversational/types.js +1 -0
- package/dist/conversational/voice-session-manager.d.ts +17 -0
- package/dist/conversational/voice-session-manager.js +138 -0
- package/dist/docs/conversational.docblock.d.ts +14 -0
- package/dist/docs/conversational.docblock.js +6 -0
- package/dist/docs/stt.docblock.d.ts +12 -0
- package/dist/docs/stt.docblock.js +6 -0
- package/dist/docs/sync.docblock.d.ts +12 -0
- package/dist/docs/sync.docblock.js +6 -0
- package/dist/docs/tts.docblock.d.ts +12 -0
- package/dist/docs/tts.docblock.js +6 -0
- package/dist/docs/voice.docblock.d.ts +22 -0
- package/dist/docs/voice.docblock.js +6 -0
- package/dist/i18n/catalogs/en.d.ts +6 -0
- package/dist/i18n/catalogs/en.js +92 -0
- package/dist/i18n/catalogs/es.d.ts +4 -0
- package/dist/i18n/catalogs/es.js +92 -0
- package/dist/i18n/catalogs/fr.d.ts +4 -0
- package/dist/i18n/catalogs/fr.js +92 -0
- package/dist/i18n/catalogs/index.d.ts +3 -0
- package/dist/i18n/catalogs/index.js +272 -0
- package/dist/i18n/index.d.ts +20 -0
- package/dist/i18n/index.js +336 -0
- package/dist/i18n/keys.d.ts +50 -0
- package/dist/i18n/keys.js +39 -0
- package/dist/i18n/locale.d.ts +6 -0
- package/dist/i18n/locale.js +14 -0
- package/dist/i18n/messages.d.ts +13 -0
- package/dist/i18n/messages.js +284 -0
- package/dist/index.d.ts +6 -0
- package/dist/index.js +1071 -0
- package/dist/node/audio/audio-concatenator.js +56 -0
- package/dist/node/audio/duration-estimator.js +21 -0
- package/dist/node/audio/format-converter.js +27 -0
- package/dist/node/audio/index.js +120 -0
- package/dist/node/audio/silence-generator.js +19 -0
- package/dist/node/conversational/index.js +241 -0
- package/dist/node/conversational/response-orchestrator.js +62 -0
- package/dist/node/conversational/transcript-builder.js +63 -0
- package/dist/node/conversational/turn-detector.js +43 -0
- package/dist/node/conversational/types.js +0 -0
- package/dist/node/conversational/voice-session-manager.js +137 -0
- package/dist/node/docs/conversational.docblock.js +5 -0
- package/dist/node/docs/stt.docblock.js +5 -0
- package/dist/node/docs/sync.docblock.js +5 -0
- package/dist/node/docs/tts.docblock.js +5 -0
- package/dist/node/docs/voice.docblock.js +5 -0
- package/dist/node/i18n/catalogs/en.js +91 -0
- package/dist/node/i18n/catalogs/es.js +91 -0
- package/dist/node/i18n/catalogs/fr.js +91 -0
- package/dist/node/i18n/catalogs/index.js +271 -0
- package/dist/node/i18n/index.js +335 -0
- package/dist/node/i18n/keys.js +38 -0
- package/dist/node/i18n/locale.js +13 -0
- package/dist/node/i18n/messages.js +283 -0
- package/dist/node/index.js +1070 -0
- package/dist/node/stt/diarization-mapper.js +42 -0
- package/dist/node/stt/index.js +222 -0
- package/dist/node/stt/segment-splitter.js +36 -0
- package/dist/node/stt/subtitle-formatter.js +51 -0
- package/dist/node/stt/transcriber.js +219 -0
- package/dist/node/stt/types.js +0 -0
- package/dist/node/sync/duration-negotiator.js +69 -0
- package/dist/node/sync/index.js +165 -0
- package/dist/node/sync/scene-adapter.js +52 -0
- package/dist/node/sync/timing-calculator.js +46 -0
- package/dist/node/tts/audio-assembler.js +120 -0
- package/dist/node/tts/emphasis-planner.js +134 -0
- package/dist/node/tts/index.js +439 -0
- package/dist/node/tts/pace-analyzer.js +67 -0
- package/dist/node/tts/segment-synthesizer.js +36 -0
- package/dist/node/tts/types.js +0 -0
- package/dist/node/tts/voice-synthesizer.js +435 -0
- package/dist/node/types.js +0 -0
- package/dist/stt/diarization-mapper.d.ts +19 -0
- package/dist/stt/diarization-mapper.js +43 -0
- package/dist/stt/index.d.ts +5 -0
- package/dist/stt/index.js +223 -0
- package/dist/stt/segment-splitter.d.ts +19 -0
- package/dist/stt/segment-splitter.js +37 -0
- package/dist/stt/subtitle-formatter.d.ts +19 -0
- package/dist/stt/subtitle-formatter.js +52 -0
- package/dist/stt/transcriber.d.ts +21 -0
- package/dist/stt/transcriber.js +220 -0
- package/dist/stt/types.d.ts +44 -0
- package/dist/stt/types.js +1 -0
- package/dist/sync/duration-negotiator.d.ts +37 -0
- package/dist/sync/duration-negotiator.js +70 -0
- package/dist/sync/index.d.ts +3 -0
- package/dist/sync/index.js +166 -0
- package/dist/sync/scene-adapter.d.ts +29 -0
- package/dist/sync/scene-adapter.js +53 -0
- package/dist/sync/timing-calculator.d.ts +21 -0
- package/dist/sync/timing-calculator.js +47 -0
- package/dist/tts/audio-assembler.d.ts +19 -0
- package/dist/tts/audio-assembler.js +121 -0
- package/dist/tts/emphasis-planner.d.ts +24 -0
- package/dist/tts/emphasis-planner.js +135 -0
- package/dist/tts/index.d.ts +6 -0
- package/dist/tts/index.js +440 -0
- package/dist/tts/pace-analyzer.d.ts +30 -0
- package/dist/tts/pace-analyzer.js +68 -0
- package/dist/tts/segment-synthesizer.d.ts +21 -0
- package/dist/tts/segment-synthesizer.js +37 -0
- package/dist/tts/types.d.ts +76 -0
- package/dist/tts/types.js +1 -0
- package/dist/tts/voice-synthesizer.d.ts +28 -0
- package/dist/tts/voice-synthesizer.js +436 -0
- package/dist/types.d.ts +12 -0
- package/dist/types.js +1 -0
- package/package.json +760 -0
|
@@ -0,0 +1,1070 @@
|
|
|
1
|
+
// src/audio/audio-concatenator.ts
|
|
2
|
+
class AudioConcatenator {
|
|
3
|
+
concatenate(segments) {
|
|
4
|
+
if (segments.length === 0) {
|
|
5
|
+
return {
|
|
6
|
+
data: new Uint8Array(0),
|
|
7
|
+
format: "wav",
|
|
8
|
+
sampleRateHz: 44100,
|
|
9
|
+
durationMs: 0,
|
|
10
|
+
channels: 1
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
const [firstSegment] = segments;
|
|
14
|
+
if (!firstSegment) {
|
|
15
|
+
return {
|
|
16
|
+
data: new Uint8Array(0),
|
|
17
|
+
format: "wav",
|
|
18
|
+
sampleRateHz: 44100,
|
|
19
|
+
durationMs: 0,
|
|
20
|
+
channels: 1
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
if (segments.length === 1) {
|
|
24
|
+
return { ...firstSegment };
|
|
25
|
+
}
|
|
26
|
+
const referenceFormat = firstSegment.format;
|
|
27
|
+
const referenceSampleRate = firstSegment.sampleRateHz;
|
|
28
|
+
const referenceChannels = firstSegment.channels ?? 1;
|
|
29
|
+
for (const seg of segments) {
|
|
30
|
+
if (seg.format !== referenceFormat) {
|
|
31
|
+
throw new Error(`Format mismatch: expected ${referenceFormat}, got ${seg.format}`);
|
|
32
|
+
}
|
|
33
|
+
if (seg.sampleRateHz !== referenceSampleRate) {
|
|
34
|
+
throw new Error(`Sample rate mismatch: expected ${referenceSampleRate}, got ${seg.sampleRateHz}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
const totalBytes = segments.reduce((sum, s) => sum + s.data.length, 0);
|
|
38
|
+
const combined = new Uint8Array(totalBytes);
|
|
39
|
+
let offset = 0;
|
|
40
|
+
for (const seg of segments) {
|
|
41
|
+
combined.set(seg.data, offset);
|
|
42
|
+
offset += seg.data.length;
|
|
43
|
+
}
|
|
44
|
+
const totalDurationMs = segments.reduce((sum, s) => sum + (s.durationMs ?? 0), 0);
|
|
45
|
+
return {
|
|
46
|
+
data: combined,
|
|
47
|
+
format: referenceFormat,
|
|
48
|
+
sampleRateHz: referenceSampleRate,
|
|
49
|
+
durationMs: totalDurationMs,
|
|
50
|
+
channels: referenceChannels
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// src/audio/duration-estimator.ts
|
|
56
|
+
class DurationEstimator {
|
|
57
|
+
static DEFAULT_WPM = 150;
|
|
58
|
+
estimateSeconds(text, wordsPerMinute) {
|
|
59
|
+
const wpm = wordsPerMinute ?? DurationEstimator.DEFAULT_WPM;
|
|
60
|
+
const wordCount = text.split(/\s+/).filter(Boolean).length;
|
|
61
|
+
return Math.ceil(wordCount / wpm * 60);
|
|
62
|
+
}
|
|
63
|
+
estimateMs(text, wordsPerMinute) {
|
|
64
|
+
const wpm = wordsPerMinute ?? DurationEstimator.DEFAULT_WPM;
|
|
65
|
+
const wordCount = text.split(/\s+/).filter(Boolean).length;
|
|
66
|
+
return Math.ceil(wordCount / wpm * 60 * 1000);
|
|
67
|
+
}
|
|
68
|
+
estimateWordCount(durationSeconds, wordsPerMinute) {
|
|
69
|
+
const wpm = wordsPerMinute ?? DurationEstimator.DEFAULT_WPM;
|
|
70
|
+
return Math.round(durationSeconds / 60 * wpm);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// src/audio/format-converter.ts
|
|
75
|
+
class FormatConverter {
|
|
76
|
+
convert(audio, targetFormat) {
|
|
77
|
+
if (audio.format === targetFormat) {
|
|
78
|
+
return audio;
|
|
79
|
+
}
|
|
80
|
+
return {
|
|
81
|
+
...audio,
|
|
82
|
+
format: targetFormat
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
isSupported(from, to) {
|
|
86
|
+
if (from === to)
|
|
87
|
+
return true;
|
|
88
|
+
const supportedPaths = {
|
|
89
|
+
wav: ["mp3", "ogg", "pcm", "opus"],
|
|
90
|
+
mp3: ["wav"],
|
|
91
|
+
ogg: ["wav"],
|
|
92
|
+
pcm: ["wav"],
|
|
93
|
+
opus: ["wav"]
|
|
94
|
+
};
|
|
95
|
+
return supportedPaths[from]?.includes(to) ?? false;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// src/audio/silence-generator.ts
|
|
100
|
+
class SilenceGenerator {
|
|
101
|
+
generate(durationMs, format = "wav", sampleRateHz = 44100, channels = 1) {
|
|
102
|
+
const totalSamples = Math.ceil(sampleRateHz * durationMs / 1000);
|
|
103
|
+
const bytesPerSample = 2;
|
|
104
|
+
const dataSize = totalSamples * bytesPerSample * channels;
|
|
105
|
+
const data = new Uint8Array(dataSize);
|
|
106
|
+
return {
|
|
107
|
+
data,
|
|
108
|
+
format,
|
|
109
|
+
sampleRateHz,
|
|
110
|
+
durationMs,
|
|
111
|
+
channels
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
// src/conversational/transcript-builder.ts
|
|
116
|
+
class TranscriptBuilder {
|
|
117
|
+
turns = [];
|
|
118
|
+
currentTurn = null;
|
|
119
|
+
sessionStartMs = Date.now();
|
|
120
|
+
getTranscript() {
|
|
121
|
+
return [...this.turns];
|
|
122
|
+
}
|
|
123
|
+
toText() {
|
|
124
|
+
return this.turns.map((t) => `[${t.role}] ${t.text}`).join(`
|
|
125
|
+
`);
|
|
126
|
+
}
|
|
127
|
+
getTurnCount() {
|
|
128
|
+
return this.turns.length;
|
|
129
|
+
}
|
|
130
|
+
processEvent(event) {
|
|
131
|
+
switch (event.type) {
|
|
132
|
+
case "session_started":
|
|
133
|
+
this.sessionStartMs = Date.now();
|
|
134
|
+
break;
|
|
135
|
+
case "user_speech_started":
|
|
136
|
+
this.currentTurn = {
|
|
137
|
+
role: "user",
|
|
138
|
+
startMs: Date.now() - this.sessionStartMs
|
|
139
|
+
};
|
|
140
|
+
break;
|
|
141
|
+
case "user_speech_ended":
|
|
142
|
+
if (this.currentTurn && this.currentTurn.role === "user") {
|
|
143
|
+
this.currentTurn.text = event.transcript;
|
|
144
|
+
this.currentTurn.endMs = Date.now() - this.sessionStartMs;
|
|
145
|
+
this.turns.push(this.currentTurn);
|
|
146
|
+
this.currentTurn = null;
|
|
147
|
+
}
|
|
148
|
+
break;
|
|
149
|
+
case "agent_speech_started":
|
|
150
|
+
this.currentTurn = {
|
|
151
|
+
role: "agent",
|
|
152
|
+
text: event.text,
|
|
153
|
+
startMs: Date.now() - this.sessionStartMs
|
|
154
|
+
};
|
|
155
|
+
break;
|
|
156
|
+
case "agent_speech_ended":
|
|
157
|
+
if (this.currentTurn && this.currentTurn.role === "agent") {
|
|
158
|
+
this.currentTurn.endMs = Date.now() - this.sessionStartMs;
|
|
159
|
+
this.turns.push(this.currentTurn);
|
|
160
|
+
this.currentTurn = null;
|
|
161
|
+
}
|
|
162
|
+
break;
|
|
163
|
+
case "transcript":
|
|
164
|
+
break;
|
|
165
|
+
default:
|
|
166
|
+
break;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
reset() {
|
|
170
|
+
this.turns.length = 0;
|
|
171
|
+
this.currentTurn = null;
|
|
172
|
+
this.sessionStartMs = Date.now();
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// src/conversational/voice-session-manager.ts
|
|
177
|
+
class VoiceSessionManager {
|
|
178
|
+
provider;
|
|
179
|
+
constructor(options) {
|
|
180
|
+
this.provider = options.conversational;
|
|
181
|
+
}
|
|
182
|
+
async startSession(config) {
|
|
183
|
+
const transcriptBuilder = new TranscriptBuilder;
|
|
184
|
+
const session = await this.provider.startSession({
|
|
185
|
+
voiceId: config.voiceId,
|
|
186
|
+
language: config.language,
|
|
187
|
+
systemPrompt: config.systemPrompt,
|
|
188
|
+
llmModel: config.llmModel,
|
|
189
|
+
inputFormat: config.inputFormat,
|
|
190
|
+
outputFormat: config.outputFormat,
|
|
191
|
+
turnDetection: config.turnDetection,
|
|
192
|
+
silenceThresholdMs: config.silenceThresholdMs,
|
|
193
|
+
maxDurationSeconds: config.maxDurationSeconds
|
|
194
|
+
});
|
|
195
|
+
const state = {
|
|
196
|
+
sessionId: "",
|
|
197
|
+
status: "connecting",
|
|
198
|
+
currentTurn: "idle",
|
|
199
|
+
turnCount: 0,
|
|
200
|
+
durationMs: 0,
|
|
201
|
+
transcript: []
|
|
202
|
+
};
|
|
203
|
+
const wrappedEvents = this.wrapEvents(session.events, state, transcriptBuilder);
|
|
204
|
+
return {
|
|
205
|
+
state,
|
|
206
|
+
sendAudio: (chunk) => session.sendAudio(chunk),
|
|
207
|
+
sendText: (text) => session.sendText(text),
|
|
208
|
+
interrupt: () => session.interrupt(),
|
|
209
|
+
close: async () => {
|
|
210
|
+
const summary = await session.close();
|
|
211
|
+
state.status = "ended";
|
|
212
|
+
return summary;
|
|
213
|
+
},
|
|
214
|
+
events: wrappedEvents
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
async* wrapEvents(events, state, transcriptBuilder) {
|
|
218
|
+
for await (const event of events) {
|
|
219
|
+
transcriptBuilder.processEvent(event);
|
|
220
|
+
switch (event.type) {
|
|
221
|
+
case "session_started":
|
|
222
|
+
state.sessionId = event.sessionId;
|
|
223
|
+
state.status = "active";
|
|
224
|
+
break;
|
|
225
|
+
case "user_speech_started":
|
|
226
|
+
state.currentTurn = "user";
|
|
227
|
+
break;
|
|
228
|
+
case "user_speech_ended":
|
|
229
|
+
state.currentTurn = "idle";
|
|
230
|
+
state.turnCount += 1;
|
|
231
|
+
break;
|
|
232
|
+
case "agent_speech_started":
|
|
233
|
+
state.currentTurn = "agent";
|
|
234
|
+
break;
|
|
235
|
+
case "agent_speech_ended":
|
|
236
|
+
state.currentTurn = "idle";
|
|
237
|
+
state.turnCount += 1;
|
|
238
|
+
break;
|
|
239
|
+
case "session_ended":
|
|
240
|
+
state.status = "ended";
|
|
241
|
+
state.durationMs = event.durationMs;
|
|
242
|
+
break;
|
|
243
|
+
}
|
|
244
|
+
state.transcript = transcriptBuilder.getTranscript();
|
|
245
|
+
yield event;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
// src/conversational/turn-detector.ts
|
|
251
|
+
class TurnDetector {
|
|
252
|
+
silenceThresholdMs;
|
|
253
|
+
energyThreshold;
|
|
254
|
+
silenceStartMs = null;
|
|
255
|
+
constructor(silenceThresholdMs = 800, energyThreshold = 0.01) {
|
|
256
|
+
this.silenceThresholdMs = silenceThresholdMs;
|
|
257
|
+
this.energyThreshold = energyThreshold;
|
|
258
|
+
}
|
|
259
|
+
processChunk(chunk, timestampMs) {
|
|
260
|
+
const energy = this.calculateEnergy(chunk);
|
|
261
|
+
const isSpeech = energy > this.energyThreshold;
|
|
262
|
+
if (isSpeech) {
|
|
263
|
+
this.silenceStartMs = null;
|
|
264
|
+
return false;
|
|
265
|
+
}
|
|
266
|
+
if (this.silenceStartMs === null) {
|
|
267
|
+
this.silenceStartMs = timestampMs;
|
|
268
|
+
}
|
|
269
|
+
const silenceDurationMs = timestampMs - this.silenceStartMs;
|
|
270
|
+
return silenceDurationMs >= this.silenceThresholdMs;
|
|
271
|
+
}
|
|
272
|
+
reset() {
|
|
273
|
+
this.silenceStartMs = null;
|
|
274
|
+
}
|
|
275
|
+
calculateEnergy(chunk) {
|
|
276
|
+
if (chunk.length < 2)
|
|
277
|
+
return 0;
|
|
278
|
+
let sum = 0;
|
|
279
|
+
const sampleCount = Math.floor(chunk.length / 2);
|
|
280
|
+
for (let i = 0;i < chunk.length - 1; i += 2) {
|
|
281
|
+
const low = chunk[i] ?? 0;
|
|
282
|
+
const high = chunk[i + 1] ?? 0;
|
|
283
|
+
const sample = (low | high << 8) << 16 >> 16;
|
|
284
|
+
const normalized = sample / 32768;
|
|
285
|
+
sum += normalized * normalized;
|
|
286
|
+
}
|
|
287
|
+
return Math.sqrt(sum / sampleCount);
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// src/conversational/response-orchestrator.ts
|
|
292
|
+
class ResponseOrchestrator {
|
|
293
|
+
stt;
|
|
294
|
+
llm;
|
|
295
|
+
tts;
|
|
296
|
+
conversationHistory = [];
|
|
297
|
+
constructor(stt, llm, tts) {
|
|
298
|
+
this.stt = stt;
|
|
299
|
+
this.llm = llm;
|
|
300
|
+
this.tts = tts;
|
|
301
|
+
}
|
|
302
|
+
async* processUserTurn(userAudio, config) {
|
|
303
|
+
const transcription = await this.stt.transcribe({
|
|
304
|
+
audio: userAudio,
|
|
305
|
+
language: config.language,
|
|
306
|
+
wordTimestamps: false
|
|
307
|
+
});
|
|
308
|
+
const userText = transcription.text;
|
|
309
|
+
yield { type: "user_speech_ended", transcript: userText };
|
|
310
|
+
yield {
|
|
311
|
+
type: "transcript",
|
|
312
|
+
role: "user",
|
|
313
|
+
text: userText,
|
|
314
|
+
timestamp: Date.now()
|
|
315
|
+
};
|
|
316
|
+
this.conversationHistory.push({ role: "user", content: userText });
|
|
317
|
+
const llmResponse = await this.llm.chat([
|
|
318
|
+
{
|
|
319
|
+
role: "system",
|
|
320
|
+
content: [{ type: "text", text: config.systemPrompt }]
|
|
321
|
+
},
|
|
322
|
+
...this.conversationHistory.map((msg) => ({
|
|
323
|
+
role: msg.role,
|
|
324
|
+
content: [{ type: "text", text: msg.content }]
|
|
325
|
+
}))
|
|
326
|
+
], { model: config.llmModel });
|
|
327
|
+
const responseText = llmResponse.message.content.find((p) => p.type === "text");
|
|
328
|
+
const agentText = responseText && responseText.type === "text" ? responseText.text : "I apologize, I could not generate a response.";
|
|
329
|
+
this.conversationHistory.push({ role: "assistant", content: agentText });
|
|
330
|
+
yield { type: "agent_speech_started", text: agentText };
|
|
331
|
+
const synthesis = await this.tts.synthesize({
|
|
332
|
+
text: agentText,
|
|
333
|
+
voiceId: config.voiceId,
|
|
334
|
+
language: config.language,
|
|
335
|
+
format: config.outputFormat
|
|
336
|
+
});
|
|
337
|
+
yield { type: "agent_audio", audio: synthesis.audio.data };
|
|
338
|
+
yield { type: "agent_speech_ended" };
|
|
339
|
+
yield {
|
|
340
|
+
type: "transcript",
|
|
341
|
+
role: "agent",
|
|
342
|
+
text: agentText,
|
|
343
|
+
timestamp: Date.now()
|
|
344
|
+
};
|
|
345
|
+
}
|
|
346
|
+
reset() {
|
|
347
|
+
this.conversationHistory.length = 0;
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
// src/tts/pace-analyzer.ts
|
|
351
|
+
var CONTENT_TYPE_PACING = {
|
|
352
|
+
intro: {
|
|
353
|
+
rate: 0.95,
|
|
354
|
+
emphasis: "normal",
|
|
355
|
+
tone: "authoritative",
|
|
356
|
+
leadingSilenceMs: 0,
|
|
357
|
+
trailingSilenceMs: 500
|
|
358
|
+
},
|
|
359
|
+
problem: {
|
|
360
|
+
rate: 0.9,
|
|
361
|
+
emphasis: "strong",
|
|
362
|
+
tone: "urgent",
|
|
363
|
+
leadingSilenceMs: 300,
|
|
364
|
+
trailingSilenceMs: 500
|
|
365
|
+
},
|
|
366
|
+
solution: {
|
|
367
|
+
rate: 1,
|
|
368
|
+
emphasis: "normal",
|
|
369
|
+
tone: "calm",
|
|
370
|
+
leadingSilenceMs: 300,
|
|
371
|
+
trailingSilenceMs: 500
|
|
372
|
+
},
|
|
373
|
+
metric: {
|
|
374
|
+
rate: 0.85,
|
|
375
|
+
emphasis: "strong",
|
|
376
|
+
tone: "excited",
|
|
377
|
+
leadingSilenceMs: 300,
|
|
378
|
+
trailingSilenceMs: 600
|
|
379
|
+
},
|
|
380
|
+
cta: {
|
|
381
|
+
rate: 0.9,
|
|
382
|
+
emphasis: "strong",
|
|
383
|
+
tone: "authoritative",
|
|
384
|
+
leadingSilenceMs: 400,
|
|
385
|
+
trailingSilenceMs: 0
|
|
386
|
+
},
|
|
387
|
+
transition: {
|
|
388
|
+
rate: 1.1,
|
|
389
|
+
emphasis: "reduced",
|
|
390
|
+
tone: "neutral",
|
|
391
|
+
leadingSilenceMs: 200,
|
|
392
|
+
trailingSilenceMs: 300
|
|
393
|
+
}
|
|
394
|
+
};
|
|
395
|
+
|
|
396
|
+
class PaceAnalyzer {
|
|
397
|
+
analyze(segments, baseRate = 1) {
|
|
398
|
+
return segments.map((segment) => {
|
|
399
|
+
const defaults = CONTENT_TYPE_PACING[segment.contentType];
|
|
400
|
+
return {
|
|
401
|
+
sceneId: segment.sceneId,
|
|
402
|
+
rate: defaults.rate * baseRate,
|
|
403
|
+
emphasis: defaults.emphasis,
|
|
404
|
+
tone: defaults.tone,
|
|
405
|
+
leadingSilenceMs: defaults.leadingSilenceMs,
|
|
406
|
+
trailingSilenceMs: defaults.trailingSilenceMs
|
|
407
|
+
};
|
|
408
|
+
});
|
|
409
|
+
}
|
|
410
|
+
getDefaults(contentType) {
|
|
411
|
+
return { ...CONTENT_TYPE_PACING[contentType] };
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
// src/tts/emphasis-planner.ts
|
|
416
|
+
class EmphasisPlanner {
|
|
417
|
+
llm;
|
|
418
|
+
model;
|
|
419
|
+
paceAnalyzer;
|
|
420
|
+
constructor(options) {
|
|
421
|
+
this.llm = options?.llm;
|
|
422
|
+
this.model = options?.model;
|
|
423
|
+
this.paceAnalyzer = new PaceAnalyzer;
|
|
424
|
+
}
|
|
425
|
+
async plan(segments, baseRate = 1) {
|
|
426
|
+
if (!this.llm) {
|
|
427
|
+
return this.paceAnalyzer.analyze(segments, baseRate);
|
|
428
|
+
}
|
|
429
|
+
try {
|
|
430
|
+
return await this.planWithLlm(segments, baseRate);
|
|
431
|
+
} catch {
|
|
432
|
+
return this.paceAnalyzer.analyze(segments, baseRate);
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
async planWithLlm(segments, baseRate) {
|
|
436
|
+
if (!this.llm) {
|
|
437
|
+
return this.paceAnalyzer.analyze(segments, baseRate);
|
|
438
|
+
}
|
|
439
|
+
const response = await this.llm.chat([
|
|
440
|
+
{
|
|
441
|
+
role: "system",
|
|
442
|
+
content: [
|
|
443
|
+
{
|
|
444
|
+
type: "text",
|
|
445
|
+
text: [
|
|
446
|
+
"You are a voice director planning emphasis and pacing for TTS narration.",
|
|
447
|
+
"For each segment, return a JSON array of directives.",
|
|
448
|
+
"Each directive has: sceneId, rate (0.7-1.3), emphasis (reduced|normal|strong),",
|
|
449
|
+
"tone (neutral|urgent|excited|calm|authoritative), leadingSilenceMs, trailingSilenceMs.",
|
|
450
|
+
"Return ONLY a JSON array, no other text."
|
|
451
|
+
].join(`
|
|
452
|
+
`)
|
|
453
|
+
}
|
|
454
|
+
]
|
|
455
|
+
},
|
|
456
|
+
{
|
|
457
|
+
role: "user",
|
|
458
|
+
content: [
|
|
459
|
+
{
|
|
460
|
+
type: "text",
|
|
461
|
+
text: JSON.stringify(segments.map((s) => ({
|
|
462
|
+
sceneId: s.sceneId,
|
|
463
|
+
text: s.text,
|
|
464
|
+
contentType: s.contentType
|
|
465
|
+
})))
|
|
466
|
+
}
|
|
467
|
+
]
|
|
468
|
+
}
|
|
469
|
+
], { model: this.model, temperature: 0.3, responseFormat: "json" });
|
|
470
|
+
const text = response.message.content.find((p) => p.type === "text");
|
|
471
|
+
if (!text || text.type !== "text") {
|
|
472
|
+
return this.paceAnalyzer.analyze(segments, baseRate);
|
|
473
|
+
}
|
|
474
|
+
const parsed = JSON.parse(text.text);
|
|
475
|
+
return parsed.map((d) => ({
|
|
476
|
+
...d,
|
|
477
|
+
rate: d.rate * baseRate
|
|
478
|
+
}));
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
// src/tts/segment-synthesizer.ts
|
|
483
|
+
class SegmentSynthesizer {
|
|
484
|
+
tts;
|
|
485
|
+
constructor(tts) {
|
|
486
|
+
this.tts = tts;
|
|
487
|
+
}
|
|
488
|
+
async synthesizeAll(segments, voice, directives) {
|
|
489
|
+
const directiveMap = new Map(directives.map((d) => [d.sceneId, d]));
|
|
490
|
+
const results = await Promise.all(segments.map((segment) => this.synthesizeOne(segment, voice, directiveMap.get(segment.sceneId))));
|
|
491
|
+
return results;
|
|
492
|
+
}
|
|
493
|
+
async synthesizeOne(segment, voice, directive) {
|
|
494
|
+
const result = await this.tts.synthesize({
|
|
495
|
+
text: segment.text,
|
|
496
|
+
voiceId: voice.voiceId,
|
|
497
|
+
language: voice.language,
|
|
498
|
+
style: voice.style,
|
|
499
|
+
stability: voice.stability,
|
|
500
|
+
rate: directive?.rate,
|
|
501
|
+
emphasis: directive?.emphasis
|
|
502
|
+
});
|
|
503
|
+
return {
|
|
504
|
+
sceneId: segment.sceneId,
|
|
505
|
+
audio: result.audio,
|
|
506
|
+
durationMs: result.audio.durationMs ?? 0,
|
|
507
|
+
wordTimings: result.wordTimings?.map((wt) => ({
|
|
508
|
+
word: wt.word,
|
|
509
|
+
startMs: wt.startMs,
|
|
510
|
+
endMs: wt.endMs
|
|
511
|
+
}))
|
|
512
|
+
};
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
// src/tts/audio-assembler.ts
|
|
517
|
+
class AudioAssembler {
|
|
518
|
+
concatenator = new AudioConcatenator;
|
|
519
|
+
silenceGenerator = new SilenceGenerator;
|
|
520
|
+
assemble(segments, directives, defaultPauseMs = 500) {
|
|
521
|
+
if (segments.length === 0) {
|
|
522
|
+
return {
|
|
523
|
+
data: new Uint8Array(0),
|
|
524
|
+
format: "wav",
|
|
525
|
+
sampleRateHz: 44100,
|
|
526
|
+
durationMs: 0,
|
|
527
|
+
channels: 1
|
|
528
|
+
};
|
|
529
|
+
}
|
|
530
|
+
const [firstSegment] = segments;
|
|
531
|
+
if (!firstSegment) {
|
|
532
|
+
return {
|
|
533
|
+
data: new Uint8Array(0),
|
|
534
|
+
format: "wav",
|
|
535
|
+
sampleRateHz: 44100,
|
|
536
|
+
durationMs: 0,
|
|
537
|
+
channels: 1
|
|
538
|
+
};
|
|
539
|
+
}
|
|
540
|
+
const directiveMap = new Map(directives.map((d) => [d.sceneId, d]));
|
|
541
|
+
const reference = firstSegment.audio;
|
|
542
|
+
const parts = [];
|
|
543
|
+
for (let i = 0;i < segments.length; i++) {
|
|
544
|
+
const segment = segments[i];
|
|
545
|
+
if (!segment) {
|
|
546
|
+
continue;
|
|
547
|
+
}
|
|
548
|
+
const directive = directiveMap.get(segment.sceneId);
|
|
549
|
+
const leadingSilenceMs = directive?.leadingSilenceMs ?? 0;
|
|
550
|
+
if (leadingSilenceMs > 0) {
|
|
551
|
+
parts.push(this.silenceGenerator.generate(leadingSilenceMs, reference.format, reference.sampleRateHz, reference.channels ?? 1));
|
|
552
|
+
}
|
|
553
|
+
parts.push(segment.audio);
|
|
554
|
+
const trailingSilenceMs = directive?.trailingSilenceMs ?? (i < segments.length - 1 ? defaultPauseMs : 0);
|
|
555
|
+
if (trailingSilenceMs > 0) {
|
|
556
|
+
parts.push(this.silenceGenerator.generate(trailingSilenceMs, reference.format, reference.sampleRateHz, reference.channels ?? 1));
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
return this.concatenator.concatenate(parts);
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
// src/tts/voice-synthesizer.ts
|
|
564
|
+
class VoiceSynthesizer {
|
|
565
|
+
segmentSynthesizer;
|
|
566
|
+
emphasisPlanner;
|
|
567
|
+
audioAssembler = new AudioAssembler;
|
|
568
|
+
durationEstimator = new DurationEstimator;
|
|
569
|
+
paceAnalyzer = new PaceAnalyzer;
|
|
570
|
+
options;
|
|
571
|
+
constructor(options) {
|
|
572
|
+
this.options = options;
|
|
573
|
+
this.segmentSynthesizer = new SegmentSynthesizer(options.tts);
|
|
574
|
+
this.emphasisPlanner = new EmphasisPlanner({
|
|
575
|
+
llm: options.llm,
|
|
576
|
+
model: options.model
|
|
577
|
+
});
|
|
578
|
+
}
|
|
579
|
+
async synthesize(brief) {
|
|
580
|
+
const script = this.buildScript(brief);
|
|
581
|
+
return this.executePipeline(script, brief.voice, brief.pacing);
|
|
582
|
+
}
|
|
583
|
+
async synthesizeForVideo(brief) {
|
|
584
|
+
const script = this.buildScriptFromScenePlan(brief);
|
|
585
|
+
return this.executePipeline(script, brief.voice, brief.pacing, brief.fps);
|
|
586
|
+
}
|
|
587
|
+
async executePipeline(script, voice, pacing, fps) {
|
|
588
|
+
const projectId = generateProjectId();
|
|
589
|
+
const baseRate = pacing?.baseRate ?? 1;
|
|
590
|
+
const pacingDirectives = await this.emphasisPlanner.plan(script.segments, baseRate);
|
|
591
|
+
const synthesized = await this.segmentSynthesizer.synthesizeAll(script.segments, voice, pacingDirectives);
|
|
592
|
+
const pauseMs = pacing?.segmentPauseMs ?? 500;
|
|
593
|
+
const assembledAudio = this.audioAssembler.assemble(synthesized, pacingDirectives, pauseMs);
|
|
594
|
+
const effectiveFps = fps ?? this.options.fps ?? 30;
|
|
595
|
+
const breathingRoomFactor = pacing?.breathingRoomFactor ?? 1.15;
|
|
596
|
+
const timingMap = this.buildTimingMap(synthesized, effectiveFps, breathingRoomFactor);
|
|
597
|
+
return {
|
|
598
|
+
id: projectId,
|
|
599
|
+
script,
|
|
600
|
+
pacingDirectives,
|
|
601
|
+
segments: synthesized,
|
|
602
|
+
assembledAudio,
|
|
603
|
+
timingMap
|
|
604
|
+
};
|
|
605
|
+
}
|
|
606
|
+
buildScript(brief) {
|
|
607
|
+
const segments = [];
|
|
608
|
+
const introText = `${brief.content.title}. ${brief.content.summary}`;
|
|
609
|
+
segments.push({
|
|
610
|
+
sceneId: "intro",
|
|
611
|
+
text: introText,
|
|
612
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(introText),
|
|
613
|
+
contentType: "intro"
|
|
614
|
+
});
|
|
615
|
+
if (brief.content.problems.length > 0) {
|
|
616
|
+
const text = brief.content.problems.join(". ");
|
|
617
|
+
segments.push({
|
|
618
|
+
sceneId: "problems",
|
|
619
|
+
text,
|
|
620
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(text),
|
|
621
|
+
contentType: "problem"
|
|
622
|
+
});
|
|
623
|
+
}
|
|
624
|
+
if (brief.content.solutions.length > 0) {
|
|
625
|
+
const text = brief.content.solutions.join(". ");
|
|
626
|
+
segments.push({
|
|
627
|
+
sceneId: "solutions",
|
|
628
|
+
text,
|
|
629
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(text),
|
|
630
|
+
contentType: "solution"
|
|
631
|
+
});
|
|
632
|
+
}
|
|
633
|
+
if (brief.content.metrics && brief.content.metrics.length > 0) {
|
|
634
|
+
const text = brief.content.metrics.join(". ");
|
|
635
|
+
segments.push({
|
|
636
|
+
sceneId: "metrics",
|
|
637
|
+
text,
|
|
638
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(text),
|
|
639
|
+
contentType: "metric"
|
|
640
|
+
});
|
|
641
|
+
}
|
|
642
|
+
if (brief.content.callToAction) {
|
|
643
|
+
segments.push({
|
|
644
|
+
sceneId: "cta",
|
|
645
|
+
text: brief.content.callToAction,
|
|
646
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(brief.content.callToAction),
|
|
647
|
+
contentType: "cta"
|
|
648
|
+
});
|
|
649
|
+
}
|
|
650
|
+
const fullText = segments.map((s) => s.text).join(" ");
|
|
651
|
+
const estimatedDurationSeconds = segments.reduce((sum, s) => sum + s.estimatedDurationSeconds, 0);
|
|
652
|
+
return { fullText, segments, estimatedDurationSeconds };
|
|
653
|
+
}
|
|
654
|
+
buildScriptFromScenePlan(brief) {
|
|
655
|
+
const segments = brief.scenePlan.scenes.filter((scene) => scene.narrationText).map((scene) => {
|
|
656
|
+
const text = scene.narrationText ?? "";
|
|
657
|
+
return {
|
|
658
|
+
sceneId: scene.id,
|
|
659
|
+
text,
|
|
660
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(text),
|
|
661
|
+
contentType: "intro"
|
|
662
|
+
};
|
|
663
|
+
});
|
|
664
|
+
const fullText = segments.map((s) => s.text).join(" ");
|
|
665
|
+
const estimatedDurationSeconds = segments.reduce((sum, s) => sum + s.estimatedDurationSeconds, 0);
|
|
666
|
+
return { fullText, segments, estimatedDurationSeconds };
|
|
667
|
+
}
|
|
668
|
+
buildTimingMap(segments, fps, breathingRoomFactor) {
|
|
669
|
+
const timingSegments = segments.map((seg) => {
|
|
670
|
+
const durationInFrames = Math.ceil(seg.durationMs / 1000 * fps);
|
|
671
|
+
return {
|
|
672
|
+
sceneId: seg.sceneId,
|
|
673
|
+
durationMs: seg.durationMs,
|
|
674
|
+
durationInFrames,
|
|
675
|
+
recommendedSceneDurationInFrames: Math.ceil(durationInFrames * breathingRoomFactor),
|
|
676
|
+
wordTimings: seg.wordTimings?.map((wt) => ({
|
|
677
|
+
word: wt.word,
|
|
678
|
+
startMs: wt.startMs,
|
|
679
|
+
endMs: wt.endMs
|
|
680
|
+
}))
|
|
681
|
+
};
|
|
682
|
+
});
|
|
683
|
+
const totalDurationMs = segments.reduce((sum, s) => sum + s.durationMs, 0);
|
|
684
|
+
return { totalDurationMs, segments: timingSegments, fps };
|
|
685
|
+
}
|
|
686
|
+
}
|
|
687
|
+
function generateProjectId() {
|
|
688
|
+
const timestamp = Date.now().toString(36);
|
|
689
|
+
const random = Math.random().toString(36).slice(2, 8);
|
|
690
|
+
return `tts_${timestamp}_${random}`;
|
|
691
|
+
}
|
|
692
|
+
// src/stt/segment-splitter.ts
|
|
693
|
+
class SegmentSplitter {
|
|
694
|
+
static DEFAULT_MAX_CHUNK_MS = 5 * 60 * 1000;
|
|
695
|
+
split(audio, maxChunkMs = SegmentSplitter.DEFAULT_MAX_CHUNK_MS) {
|
|
696
|
+
const totalDurationMs = audio.durationMs ?? this.estimateDurationMs(audio);
|
|
697
|
+
if (totalDurationMs <= maxChunkMs) {
|
|
698
|
+
return [audio];
|
|
699
|
+
}
|
|
700
|
+
const chunks = [];
|
|
701
|
+
const bytesPerMs = audio.data.length / Math.max(totalDurationMs, 1);
|
|
702
|
+
let offsetMs = 0;
|
|
703
|
+
while (offsetMs < totalDurationMs) {
|
|
704
|
+
const chunkDurationMs = Math.min(maxChunkMs, totalDurationMs - offsetMs);
|
|
705
|
+
const startByte = Math.floor(offsetMs * bytesPerMs);
|
|
706
|
+
const endByte = Math.floor((offsetMs + chunkDurationMs) * bytesPerMs);
|
|
707
|
+
chunks.push({
|
|
708
|
+
data: audio.data.slice(startByte, endByte),
|
|
709
|
+
format: audio.format,
|
|
710
|
+
sampleRateHz: audio.sampleRateHz,
|
|
711
|
+
durationMs: chunkDurationMs,
|
|
712
|
+
channels: audio.channels
|
|
713
|
+
});
|
|
714
|
+
offsetMs += chunkDurationMs;
|
|
715
|
+
}
|
|
716
|
+
return chunks;
|
|
717
|
+
}
|
|
718
|
+
estimateDurationMs(audio) {
|
|
719
|
+
const bytesPerSample = 2;
|
|
720
|
+
const channels = audio.channels ?? 1;
|
|
721
|
+
const totalSamples = audio.data.length / (bytesPerSample * channels);
|
|
722
|
+
return Math.ceil(totalSamples / audio.sampleRateHz * 1000);
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
// src/stt/diarization-mapper.ts
|
|
727
|
+
class DiarizationMapper {
|
|
728
|
+
map(segments, labelPrefix = "Speaker") {
|
|
729
|
+
const speakerOrder = [];
|
|
730
|
+
const speakerStats = new Map;
|
|
731
|
+
for (const seg of segments) {
|
|
732
|
+
if (seg.speakerId && !speakerOrder.includes(seg.speakerId)) {
|
|
733
|
+
speakerOrder.push(seg.speakerId);
|
|
734
|
+
speakerStats.set(seg.speakerId, {
|
|
735
|
+
segmentCount: 0,
|
|
736
|
+
totalSpeakingMs: 0
|
|
737
|
+
});
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
const labeledSegments = segments.map((seg) => {
|
|
741
|
+
if (!seg.speakerId)
|
|
742
|
+
return seg;
|
|
743
|
+
const index = speakerOrder.indexOf(seg.speakerId);
|
|
744
|
+
const label = `${labelPrefix} ${index + 1}`;
|
|
745
|
+
const stats = speakerStats.get(seg.speakerId);
|
|
746
|
+
if (!stats) {
|
|
747
|
+
return { ...seg, speakerLabel: label };
|
|
748
|
+
}
|
|
749
|
+
stats.segmentCount += 1;
|
|
750
|
+
stats.totalSpeakingMs += seg.endMs - seg.startMs;
|
|
751
|
+
return { ...seg, speakerLabel: label };
|
|
752
|
+
});
|
|
753
|
+
const speakers = speakerOrder.map((id, index) => {
|
|
754
|
+
const stats = speakerStats.get(id);
|
|
755
|
+
return {
|
|
756
|
+
id,
|
|
757
|
+
label: `${labelPrefix} ${index + 1}`,
|
|
758
|
+
segmentCount: stats?.segmentCount ?? 0,
|
|
759
|
+
totalSpeakingMs: stats?.totalSpeakingMs ?? 0
|
|
760
|
+
};
|
|
761
|
+
});
|
|
762
|
+
return { segments: labeledSegments, speakers };
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
// src/stt/subtitle-formatter.ts
|
|
767
|
+
class SubtitleFormatter {
|
|
768
|
+
toSRT(segments) {
|
|
769
|
+
return segments.map((seg, i) => {
|
|
770
|
+
const start = this.formatTimeSRT(seg.startMs);
|
|
771
|
+
const end = this.formatTimeSRT(seg.endMs);
|
|
772
|
+
const label = seg.speakerLabel ? `[${seg.speakerLabel}] ` : "";
|
|
773
|
+
return `${i + 1}
|
|
774
|
+
${start} --> ${end}
|
|
775
|
+
${label}${seg.text}`;
|
|
776
|
+
}).join(`
|
|
777
|
+
|
|
778
|
+
`);
|
|
779
|
+
}
|
|
780
|
+
toVTT(segments) {
|
|
781
|
+
const header = `WEBVTT
|
|
782
|
+
|
|
783
|
+
`;
|
|
784
|
+
const cues = segments.map((seg, i) => {
|
|
785
|
+
const start = this.formatTimeVTT(seg.startMs);
|
|
786
|
+
const end = this.formatTimeVTT(seg.endMs);
|
|
787
|
+
const label = seg.speakerLabel ? `<v ${seg.speakerLabel}>` : "";
|
|
788
|
+
return `${i + 1}
|
|
789
|
+
${start} --> ${end}
|
|
790
|
+
${label}${seg.text}`;
|
|
791
|
+
}).join(`
|
|
792
|
+
|
|
793
|
+
`);
|
|
794
|
+
return header + cues;
|
|
795
|
+
}
|
|
796
|
+
formatTimeSRT(ms) {
|
|
797
|
+
const hours = Math.floor(ms / 3600000);
|
|
798
|
+
const minutes = Math.floor(ms % 3600000 / 60000);
|
|
799
|
+
const seconds = Math.floor(ms % 60000 / 1000);
|
|
800
|
+
const millis = ms % 1000;
|
|
801
|
+
return `${this.pad(hours, 2)}:${this.pad(minutes, 2)}:${this.pad(seconds, 2)},${this.pad(millis, 3)}`;
|
|
802
|
+
}
|
|
803
|
+
formatTimeVTT(ms) {
|
|
804
|
+
const hours = Math.floor(ms / 3600000);
|
|
805
|
+
const minutes = Math.floor(ms % 3600000 / 60000);
|
|
806
|
+
const seconds = Math.floor(ms % 60000 / 1000);
|
|
807
|
+
const millis = ms % 1000;
|
|
808
|
+
return `${this.pad(hours, 2)}:${this.pad(minutes, 2)}:${this.pad(seconds, 2)}.${this.pad(millis, 3)}`;
|
|
809
|
+
}
|
|
810
|
+
pad(value, length) {
|
|
811
|
+
return value.toString().padStart(length, "0");
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
|
|
815
|
+
// src/stt/transcriber.ts
|
|
816
|
+
class Transcriber {
|
|
817
|
+
stt;
|
|
818
|
+
segmentSplitter = new SegmentSplitter;
|
|
819
|
+
diarizationMapper = new DiarizationMapper;
|
|
820
|
+
subtitleFormatter = new SubtitleFormatter;
|
|
821
|
+
constructor(options) {
|
|
822
|
+
this.stt = options.stt;
|
|
823
|
+
}
|
|
824
|
+
async transcribe(brief) {
|
|
825
|
+
const projectId = generateProjectId2();
|
|
826
|
+
const chunks = this.segmentSplitter.split(brief.audio);
|
|
827
|
+
const allSegments = [];
|
|
828
|
+
let fullText = "";
|
|
829
|
+
let totalDurationMs = 0;
|
|
830
|
+
let offsetMs = 0;
|
|
831
|
+
for (const chunk of chunks) {
|
|
832
|
+
const result = await this.stt.transcribe({
|
|
833
|
+
audio: chunk,
|
|
834
|
+
language: brief.language,
|
|
835
|
+
diarize: brief.diarize,
|
|
836
|
+
speakerCount: brief.speakerCount,
|
|
837
|
+
wordTimestamps: true,
|
|
838
|
+
vocabularyHints: brief.vocabularyHints
|
|
839
|
+
});
|
|
840
|
+
const offsetSegments = result.segments.map((seg) => ({
|
|
841
|
+
text: seg.text,
|
|
842
|
+
startMs: seg.startMs + offsetMs,
|
|
843
|
+
endMs: seg.endMs + offsetMs,
|
|
844
|
+
speakerId: seg.speakerId,
|
|
845
|
+
speakerName: seg.speakerName,
|
|
846
|
+
confidence: seg.confidence
|
|
847
|
+
}));
|
|
848
|
+
allSegments.push(...offsetSegments);
|
|
849
|
+
fullText += (fullText ? " " : "") + result.text;
|
|
850
|
+
totalDurationMs += result.durationMs;
|
|
851
|
+
offsetMs += chunk.durationMs ?? 0;
|
|
852
|
+
}
|
|
853
|
+
let mappedSegments = allSegments;
|
|
854
|
+
let speakers;
|
|
855
|
+
if (brief.diarize) {
|
|
856
|
+
const mapping = this.diarizationMapper.map(allSegments);
|
|
857
|
+
mappedSegments = mapping.segments;
|
|
858
|
+
speakers = mapping.speakers;
|
|
859
|
+
}
|
|
860
|
+
const transcript = {
|
|
861
|
+
text: fullText,
|
|
862
|
+
segments: mappedSegments,
|
|
863
|
+
language: brief.language ?? "en",
|
|
864
|
+
durationMs: totalDurationMs
|
|
865
|
+
};
|
|
866
|
+
let subtitles;
|
|
867
|
+
const format = brief.subtitleFormat ?? "none";
|
|
868
|
+
if (format === "srt") {
|
|
869
|
+
subtitles = this.subtitleFormatter.toSRT(mappedSegments);
|
|
870
|
+
} else if (format === "vtt") {
|
|
871
|
+
subtitles = this.subtitleFormatter.toVTT(mappedSegments);
|
|
872
|
+
}
|
|
873
|
+
return {
|
|
874
|
+
id: projectId,
|
|
875
|
+
transcript,
|
|
876
|
+
subtitles,
|
|
877
|
+
speakers
|
|
878
|
+
};
|
|
879
|
+
}
|
|
880
|
+
async* transcribeStream(audio, options) {
|
|
881
|
+
if (!this.stt.transcribeStream) {
|
|
882
|
+
throw new Error("Streaming transcription not supported by the current STT provider");
|
|
883
|
+
}
|
|
884
|
+
const stream = this.stt.transcribeStream(audio, {
|
|
885
|
+
language: options?.language,
|
|
886
|
+
diarize: options?.diarize,
|
|
887
|
+
speakerCount: options?.speakerCount,
|
|
888
|
+
wordTimestamps: true,
|
|
889
|
+
vocabularyHints: options?.vocabularyHints
|
|
890
|
+
});
|
|
891
|
+
for await (const segment of stream) {
|
|
892
|
+
yield {
|
|
893
|
+
text: segment.text,
|
|
894
|
+
startMs: segment.startMs,
|
|
895
|
+
endMs: segment.endMs,
|
|
896
|
+
speakerId: segment.speakerId,
|
|
897
|
+
speakerLabel: segment.speakerName,
|
|
898
|
+
confidence: segment.confidence
|
|
899
|
+
};
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
function generateProjectId2() {
|
|
904
|
+
const timestamp = Date.now().toString(36);
|
|
905
|
+
const random = Math.random().toString(36).slice(2, 8);
|
|
906
|
+
return `stt_${timestamp}_${random}`;
|
|
907
|
+
}
|
|
908
|
+
// src/sync/timing-calculator.ts
|
|
909
|
+
class TimingCalculator {
|
|
910
|
+
calculate(segments, fps, breathingRoomFactor = 1.15) {
|
|
911
|
+
const timingSegments = segments.map((seg) => {
|
|
912
|
+
const durationInFrames = Math.ceil(seg.durationMs / 1000 * fps);
|
|
913
|
+
const recommendedSceneDurationInFrames = Math.ceil(durationInFrames * breathingRoomFactor);
|
|
914
|
+
const wordTimings = seg.wordTimings?.map((wt) => ({
|
|
915
|
+
word: wt.word,
|
|
916
|
+
startMs: wt.startMs,
|
|
917
|
+
endMs: wt.endMs
|
|
918
|
+
}));
|
|
919
|
+
return {
|
|
920
|
+
sceneId: seg.sceneId,
|
|
921
|
+
durationMs: seg.durationMs,
|
|
922
|
+
durationInFrames,
|
|
923
|
+
recommendedSceneDurationInFrames,
|
|
924
|
+
wordTimings
|
|
925
|
+
};
|
|
926
|
+
});
|
|
927
|
+
const totalDurationMs = segments.reduce((sum, s) => sum + s.durationMs, 0);
|
|
928
|
+
return {
|
|
929
|
+
totalDurationMs,
|
|
930
|
+
segments: timingSegments,
|
|
931
|
+
fps
|
|
932
|
+
};
|
|
933
|
+
}
|
|
934
|
+
recalculateForFps(timingMap, newFps) {
|
|
935
|
+
const segments = timingMap.segments.map((seg) => {
|
|
936
|
+
const durationInFrames = Math.ceil(seg.durationMs / 1000 * newFps);
|
|
937
|
+
const ratio = seg.recommendedSceneDurationInFrames / Math.max(seg.durationInFrames, 1);
|
|
938
|
+
return {
|
|
939
|
+
...seg,
|
|
940
|
+
durationInFrames,
|
|
941
|
+
recommendedSceneDurationInFrames: Math.ceil(durationInFrames * ratio)
|
|
942
|
+
};
|
|
943
|
+
});
|
|
944
|
+
return {
|
|
945
|
+
...timingMap,
|
|
946
|
+
segments,
|
|
947
|
+
fps: newFps
|
|
948
|
+
};
|
|
949
|
+
}
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
// src/sync/scene-adapter.ts
|
|
953
|
+
class SceneAdapter {
|
|
954
|
+
durationEstimator = new DurationEstimator;
|
|
955
|
+
adapt(scenePlan) {
|
|
956
|
+
const scenesWithNarration = scenePlan.scenes.filter((s) => s.narrationText && s.narrationText.trim().length > 0);
|
|
957
|
+
const segments = scenesWithNarration.map((scene, index) => {
|
|
958
|
+
const text = scene.narrationText ?? "";
|
|
959
|
+
return {
|
|
960
|
+
sceneId: scene.id,
|
|
961
|
+
text,
|
|
962
|
+
estimatedDurationSeconds: this.durationEstimator.estimateSeconds(text),
|
|
963
|
+
contentType: this.inferContentType(index, scenesWithNarration.length)
|
|
964
|
+
};
|
|
965
|
+
});
|
|
966
|
+
const fullText = segments.map((s) => s.text).join(" ");
|
|
967
|
+
const estimatedDurationSeconds = segments.reduce((sum, s) => sum + s.estimatedDurationSeconds, 0);
|
|
968
|
+
return { fullText, segments, estimatedDurationSeconds };
|
|
969
|
+
}
|
|
970
|
+
inferContentType(index, total) {
|
|
971
|
+
if (index === 0)
|
|
972
|
+
return "intro";
|
|
973
|
+
if (index === total - 1)
|
|
974
|
+
return "cta";
|
|
975
|
+
if (index === 1 && total > 3)
|
|
976
|
+
return "problem";
|
|
977
|
+
if (index === total - 2 && total > 3)
|
|
978
|
+
return "metric";
|
|
979
|
+
return "solution";
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
// src/sync/duration-negotiator.ts
|
|
984
|
+
class DurationNegotiator {
|
|
985
|
+
static UPPER_THRESHOLD = 1.1;
|
|
986
|
+
static LOWER_THRESHOLD = 0.7;
|
|
987
|
+
static MAX_RATE = 1.3;
|
|
988
|
+
static MIN_RATE = 0.8;
|
|
989
|
+
negotiate(timingMap, sceneDurations) {
|
|
990
|
+
const adjustments = [];
|
|
991
|
+
const updatedSegments = timingMap.segments.map((seg) => {
|
|
992
|
+
const originalSceneDuration = sceneDurations.get(seg.sceneId);
|
|
993
|
+
if (originalSceneDuration === undefined) {
|
|
994
|
+
adjustments.push({
|
|
995
|
+
sceneId: seg.sceneId,
|
|
996
|
+
originalSceneDurationInFrames: seg.recommendedSceneDurationInFrames,
|
|
997
|
+
voiceDurationInFrames: seg.durationInFrames,
|
|
998
|
+
action: "no_change",
|
|
999
|
+
finalSceneDurationInFrames: seg.recommendedSceneDurationInFrames
|
|
1000
|
+
});
|
|
1001
|
+
return seg;
|
|
1002
|
+
}
|
|
1003
|
+
const ratio = seg.durationInFrames / originalSceneDuration;
|
|
1004
|
+
if (ratio > DurationNegotiator.UPPER_THRESHOLD) {
|
|
1005
|
+
const suggestedRate = Math.min(ratio, DurationNegotiator.MAX_RATE);
|
|
1006
|
+
adjustments.push({
|
|
1007
|
+
sceneId: seg.sceneId,
|
|
1008
|
+
originalSceneDurationInFrames: originalSceneDuration,
|
|
1009
|
+
voiceDurationInFrames: seg.durationInFrames,
|
|
1010
|
+
action: ratio > DurationNegotiator.MAX_RATE ? "extend_scene" : "suggest_rate_change",
|
|
1011
|
+
suggestedRate,
|
|
1012
|
+
finalSceneDurationInFrames: seg.recommendedSceneDurationInFrames
|
|
1013
|
+
});
|
|
1014
|
+
return seg;
|
|
1015
|
+
}
|
|
1016
|
+
if (ratio < DurationNegotiator.LOWER_THRESHOLD) {
|
|
1017
|
+
const suggestedRate = Math.max(ratio, DurationNegotiator.MIN_RATE);
|
|
1018
|
+
adjustments.push({
|
|
1019
|
+
sceneId: seg.sceneId,
|
|
1020
|
+
originalSceneDurationInFrames: originalSceneDuration,
|
|
1021
|
+
voiceDurationInFrames: seg.durationInFrames,
|
|
1022
|
+
action: "pad_silence",
|
|
1023
|
+
suggestedRate,
|
|
1024
|
+
finalSceneDurationInFrames: originalSceneDuration
|
|
1025
|
+
});
|
|
1026
|
+
return {
|
|
1027
|
+
...seg,
|
|
1028
|
+
recommendedSceneDurationInFrames: originalSceneDuration
|
|
1029
|
+
};
|
|
1030
|
+
}
|
|
1031
|
+
adjustments.push({
|
|
1032
|
+
sceneId: seg.sceneId,
|
|
1033
|
+
originalSceneDurationInFrames: originalSceneDuration,
|
|
1034
|
+
voiceDurationInFrames: seg.durationInFrames,
|
|
1035
|
+
action: "no_change",
|
|
1036
|
+
finalSceneDurationInFrames: seg.recommendedSceneDurationInFrames
|
|
1037
|
+
});
|
|
1038
|
+
return seg;
|
|
1039
|
+
});
|
|
1040
|
+
return {
|
|
1041
|
+
timingMap: {
|
|
1042
|
+
...timingMap,
|
|
1043
|
+
segments: updatedSegments
|
|
1044
|
+
},
|
|
1045
|
+
adjustments
|
|
1046
|
+
};
|
|
1047
|
+
}
|
|
1048
|
+
}
|
|
1049
|
+
export {
|
|
1050
|
+
VoiceSynthesizer,
|
|
1051
|
+
VoiceSessionManager,
|
|
1052
|
+
TurnDetector,
|
|
1053
|
+
TranscriptBuilder,
|
|
1054
|
+
Transcriber,
|
|
1055
|
+
TimingCalculator,
|
|
1056
|
+
SubtitleFormatter,
|
|
1057
|
+
SilenceGenerator,
|
|
1058
|
+
SegmentSynthesizer,
|
|
1059
|
+
SegmentSplitter,
|
|
1060
|
+
SceneAdapter,
|
|
1061
|
+
ResponseOrchestrator,
|
|
1062
|
+
PaceAnalyzer,
|
|
1063
|
+
FormatConverter,
|
|
1064
|
+
EmphasisPlanner,
|
|
1065
|
+
DurationNegotiator,
|
|
1066
|
+
DurationEstimator,
|
|
1067
|
+
DiarizationMapper,
|
|
1068
|
+
AudioConcatenator,
|
|
1069
|
+
AudioAssembler
|
|
1070
|
+
};
|