@meframe/core 0.1.9 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Meframe.d.ts.map +1 -1
- package/dist/Meframe.js +7 -0
- package/dist/Meframe.js.map +1 -1
- package/dist/cache/AudioMixBlockCache.d.ts +18 -0
- package/dist/cache/AudioMixBlockCache.d.ts.map +1 -0
- package/dist/cache/AudioMixBlockCache.js +57 -0
- package/dist/cache/AudioMixBlockCache.js.map +1 -0
- package/dist/cache/CacheManager.js +0 -1
- package/dist/cache/CacheManager.js.map +1 -1
- package/dist/cache/l1/AudioL1Cache.d.ts +0 -7
- package/dist/cache/l1/AudioL1Cache.d.ts.map +1 -1
- package/dist/cache/l1/AudioL1Cache.js +41 -40
- package/dist/cache/l1/AudioL1Cache.js.map +1 -1
- package/dist/controllers/PlaybackController.d.ts +0 -1
- package/dist/controllers/PlaybackController.d.ts.map +1 -1
- package/dist/controllers/PlaybackController.js +15 -26
- package/dist/controllers/PlaybackController.js.map +1 -1
- package/dist/controllers/PlaybackStateMachine.d.ts.map +1 -1
- package/dist/controllers/PlaybackStateMachine.js +2 -0
- package/dist/controllers/PlaybackStateMachine.js.map +1 -1
- package/dist/orchestrator/AudioExportSession.d.ts +28 -0
- package/dist/orchestrator/AudioExportSession.d.ts.map +1 -0
- package/dist/orchestrator/AudioExportSession.js +95 -0
- package/dist/orchestrator/AudioExportSession.js.map +1 -0
- package/dist/orchestrator/AudioPreviewSession.d.ts +61 -0
- package/dist/orchestrator/AudioPreviewSession.d.ts.map +1 -0
- package/dist/orchestrator/AudioPreviewSession.js +340 -0
- package/dist/orchestrator/AudioPreviewSession.js.map +1 -0
- package/dist/orchestrator/AudioWindowPreparer.d.ts +62 -0
- package/dist/orchestrator/AudioWindowPreparer.d.ts.map +1 -0
- package/dist/orchestrator/AudioWindowPreparer.js +259 -0
- package/dist/orchestrator/AudioWindowPreparer.js.map +1 -0
- package/dist/orchestrator/ExportScheduler.d.ts +2 -2
- package/dist/orchestrator/ExportScheduler.js.map +1 -1
- package/dist/orchestrator/Orchestrator.d.ts +8 -2
- package/dist/orchestrator/Orchestrator.d.ts.map +1 -1
- package/dist/orchestrator/Orchestrator.js +22 -16
- package/dist/orchestrator/Orchestrator.js.map +1 -1
- package/dist/stages/compose/OfflineAudioMixer.d.ts.map +1 -1
- package/dist/stages/compose/OfflineAudioMixer.js +4 -1
- package/dist/stages/compose/OfflineAudioMixer.js.map +1 -1
- package/dist/stages/mux/MP4Muxer.js.map +1 -1
- package/dist/stages/mux/MuxManager.d.ts +1 -4
- package/dist/stages/mux/MuxManager.d.ts.map +1 -1
- package/dist/stages/mux/MuxManager.js +1 -1
- package/dist/stages/mux/MuxManager.js.map +1 -1
- package/package.json +1 -1
- package/dist/orchestrator/GlobalAudioSession.d.ts +0 -119
- package/dist/orchestrator/GlobalAudioSession.d.ts.map +0 -1
- package/dist/orchestrator/GlobalAudioSession.js +0 -493
- package/dist/orchestrator/GlobalAudioSession.js.map +0 -1
|
@@ -1,493 +0,0 @@
|
|
|
1
|
-
import { OfflineAudioMixer } from "../stages/compose/OfflineAudioMixer.js";
|
|
2
|
-
import { MeframeEvent } from "../event/events.js";
|
|
3
|
-
import { AudioChunkEncoder } from "../stages/encode/AudioChunkEncoder.js";
|
|
4
|
-
import { AudioChunkDecoder } from "../stages/decode/AudioChunkDecoder.js";
|
|
5
|
-
import { hasResourceId, isAudioClip } from "../model/types.js";
|
|
6
|
-
class GlobalAudioSession {
|
|
7
|
-
mixer;
|
|
8
|
-
activeClips = /* @__PURE__ */ new Set();
|
|
9
|
-
deps;
|
|
10
|
-
model = null;
|
|
11
|
-
audioContext = null;
|
|
12
|
-
volume = 1;
|
|
13
|
-
playbackRate = 1;
|
|
14
|
-
isPlaying = false;
|
|
15
|
-
// Lookahead scheduling state
|
|
16
|
-
nextScheduleTime = 0;
|
|
17
|
-
// Next AudioContext time to schedule
|
|
18
|
-
nextContentTimeUs = 0;
|
|
19
|
-
// Next timeline position (Us)
|
|
20
|
-
scheduledSources = /* @__PURE__ */ new Set();
|
|
21
|
-
LOOKAHEAD_TIME = 0.2;
|
|
22
|
-
// 200ms lookahead
|
|
23
|
-
CHUNK_DURATION = 0.1;
|
|
24
|
-
// 100ms chunks
|
|
25
|
-
constructor(deps) {
|
|
26
|
-
this.deps = deps;
|
|
27
|
-
this.mixer = new OfflineAudioMixer(deps.cacheManager, () => this.model);
|
|
28
|
-
}
|
|
29
|
-
setModel(model) {
|
|
30
|
-
this.model = model;
|
|
31
|
-
}
|
|
32
|
-
onAudioData(message) {
|
|
33
|
-
const { sessionId, audioData, clipStartUs, clipDurationUs } = message;
|
|
34
|
-
const globalTimeUs = clipStartUs + (audioData.timestamp ?? 0);
|
|
35
|
-
this.deps.cacheManager.putClipAudioData(sessionId, audioData, clipDurationUs, globalTimeUs);
|
|
36
|
-
}
|
|
37
|
-
async ensureAudioForTime(timeUs, options) {
|
|
38
|
-
if (!this.model) return;
|
|
39
|
-
const mode = options?.mode ?? "blocking";
|
|
40
|
-
const WINDOW_DURATION = 3e6;
|
|
41
|
-
const windowEndUs = Math.min(this.model.durationUs, timeUs + WINDOW_DURATION);
|
|
42
|
-
if (mode === "probe") {
|
|
43
|
-
void this.ensureAudioForTimeRange(timeUs, windowEndUs, { mode, loadResource: true });
|
|
44
|
-
return;
|
|
45
|
-
}
|
|
46
|
-
await this.ensureAudioForTimeRange(timeUs, windowEndUs, { mode, loadResource: true });
|
|
47
|
-
}
|
|
48
|
-
/**
|
|
49
|
-
* Fast readiness probe for preview playback.
|
|
50
|
-
*
|
|
51
|
-
* This is intentionally synchronous and lightweight:
|
|
52
|
-
* - Only checks resource-level readiness (download + MP4 index parsing).
|
|
53
|
-
* - If any relevant resource isn't ready yet, return false.
|
|
54
|
-
* - Does NOT require audio samples / PCM window coverage (probe is resource-level only).
|
|
55
|
-
*
|
|
56
|
-
* Note: This probe does NOT gate on PCM coverage to avoid frequent buffering oscillation.
|
|
57
|
-
* PCM is prepared incrementally by scheduleAudio() / ensureAudioForTimeRange().
|
|
58
|
-
*/
|
|
59
|
-
isAudioResourceWindowReady(startUs, endUs) {
|
|
60
|
-
const model = this.model;
|
|
61
|
-
if (!model) return true;
|
|
62
|
-
const activeClips = model.getActiveClips(startUs, endUs);
|
|
63
|
-
for (const clip of activeClips) {
|
|
64
|
-
if (clip.trackKind !== "audio" && clip.trackKind !== "video") continue;
|
|
65
|
-
if (!hasResourceId(clip)) continue;
|
|
66
|
-
const resource = model.getResource(clip.resourceId);
|
|
67
|
-
if (resource?.state !== "ready") {
|
|
68
|
-
return false;
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
return true;
|
|
72
|
-
}
|
|
73
|
-
async activateAllAudioClips() {
|
|
74
|
-
const model = this.model;
|
|
75
|
-
if (!model) {
|
|
76
|
-
return;
|
|
77
|
-
}
|
|
78
|
-
const audioTracks = model.tracks.filter((track) => track.kind === "audio");
|
|
79
|
-
if (audioTracks.length === 0) return;
|
|
80
|
-
const maxClipCount = Math.max(...audioTracks.map((track) => track.clips.length));
|
|
81
|
-
for (let clipIndex = 0; clipIndex < maxClipCount; clipIndex++) {
|
|
82
|
-
for (const track of audioTracks) {
|
|
83
|
-
const clip = track.clips[clipIndex];
|
|
84
|
-
if (!clip || this.activeClips.has(clip.id)) continue;
|
|
85
|
-
if (!isAudioClip(clip)) {
|
|
86
|
-
throw new Error(`Clip ${clip.id} in audio track is not an audio clip`);
|
|
87
|
-
}
|
|
88
|
-
if (this.deps.cacheManager.audioSampleCache.has(clip.resourceId)) {
|
|
89
|
-
this.activeClips.add(clip.id);
|
|
90
|
-
this.deps.eventBus.emit(MeframeEvent.ClipActivated, { clipId: clip.id });
|
|
91
|
-
continue;
|
|
92
|
-
}
|
|
93
|
-
await this.deps.resourceLoader.load(clip.resourceId, {
|
|
94
|
-
isPreload: false,
|
|
95
|
-
clipId: clip.id,
|
|
96
|
-
trackId: track.id
|
|
97
|
-
});
|
|
98
|
-
this.activeClips.add(clip.id);
|
|
99
|
-
this.deps.eventBus.emit(MeframeEvent.ClipActivated, { clipId: clip.id });
|
|
100
|
-
}
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
async deactivateClip(clipId) {
|
|
104
|
-
if (!this.activeClips.has(clipId)) {
|
|
105
|
-
return;
|
|
106
|
-
}
|
|
107
|
-
this.activeClips.delete(clipId);
|
|
108
|
-
this.deps.cacheManager.clearClipAudioData(clipId);
|
|
109
|
-
}
|
|
110
|
-
async startPlayback(timeUs, audioContext) {
|
|
111
|
-
this.audioContext = audioContext;
|
|
112
|
-
if (audioContext.state === "suspended") {
|
|
113
|
-
await audioContext.resume();
|
|
114
|
-
}
|
|
115
|
-
await this.ensureAudioForTime(timeUs, { mode: "blocking" });
|
|
116
|
-
this.isPlaying = true;
|
|
117
|
-
this.resetPlaybackStates();
|
|
118
|
-
await this.scheduleAudio(timeUs, audioContext);
|
|
119
|
-
}
|
|
120
|
-
stopPlayback() {
|
|
121
|
-
this.isPlaying = false;
|
|
122
|
-
this.stopAllAudioSources();
|
|
123
|
-
}
|
|
124
|
-
updateTime(_timeUs) {
|
|
125
|
-
}
|
|
126
|
-
/**
|
|
127
|
-
* Schedule audio chunks ahead of playback cursor
|
|
128
|
-
* Uses OfflineAudioMixer for proper mixing, then plays the result
|
|
129
|
-
*/
|
|
130
|
-
async scheduleAudio(currentTimelineUs, audioContext) {
|
|
131
|
-
if (!this.isPlaying || !this.model || !this.audioContext) {
|
|
132
|
-
return;
|
|
133
|
-
}
|
|
134
|
-
const lookaheadTime = audioContext.currentTime + this.LOOKAHEAD_TIME;
|
|
135
|
-
if (this.nextScheduleTime === 0) {
|
|
136
|
-
this.nextScheduleTime = audioContext.currentTime + 0.01;
|
|
137
|
-
this.nextContentTimeUs = currentTimelineUs;
|
|
138
|
-
}
|
|
139
|
-
while (this.nextScheduleTime < lookaheadTime) {
|
|
140
|
-
if (this.nextScheduleTime < audioContext.currentTime) {
|
|
141
|
-
const timeDrift = audioContext.currentTime - this.nextScheduleTime;
|
|
142
|
-
if (timeDrift > 0.02) {
|
|
143
|
-
this.nextScheduleTime = audioContext.currentTime + 0.02;
|
|
144
|
-
const skippedUs = Math.round(timeDrift * 1e6);
|
|
145
|
-
this.nextContentTimeUs += skippedUs;
|
|
146
|
-
} else {
|
|
147
|
-
this.nextScheduleTime = audioContext.currentTime + 0.01;
|
|
148
|
-
}
|
|
149
|
-
}
|
|
150
|
-
const chunkDurationUs = Math.round(this.CHUNK_DURATION * 1e6);
|
|
151
|
-
const startUs = this.nextContentTimeUs;
|
|
152
|
-
const endUs = startUs + chunkDurationUs;
|
|
153
|
-
if (endUs > this.model.durationUs) {
|
|
154
|
-
break;
|
|
155
|
-
}
|
|
156
|
-
try {
|
|
157
|
-
await this.ensureAudioForTimeRange(startUs, endUs, {
|
|
158
|
-
mode: "blocking",
|
|
159
|
-
loadResource: true
|
|
160
|
-
});
|
|
161
|
-
const mixedBuffer = await this.mixer.mix(startUs, endUs);
|
|
162
|
-
if (this.nextScheduleTime < audioContext.currentTime) {
|
|
163
|
-
const timeDrift = audioContext.currentTime - this.nextScheduleTime;
|
|
164
|
-
if (timeDrift > 0.02) {
|
|
165
|
-
console.warn(
|
|
166
|
-
`[Audio] Skip chunk due to time drift: ${(timeDrift * 1e3).toFixed(1)}ms`
|
|
167
|
-
);
|
|
168
|
-
this.nextScheduleTime = audioContext.currentTime + 0.02;
|
|
169
|
-
this.nextContentTimeUs += chunkDurationUs;
|
|
170
|
-
continue;
|
|
171
|
-
}
|
|
172
|
-
this.nextScheduleTime = audioContext.currentTime + 0.01;
|
|
173
|
-
}
|
|
174
|
-
const source = audioContext.createBufferSource();
|
|
175
|
-
source.buffer = mixedBuffer;
|
|
176
|
-
source.playbackRate.value = this.playbackRate;
|
|
177
|
-
const gainNode = audioContext.createGain();
|
|
178
|
-
gainNode.gain.value = this.volume;
|
|
179
|
-
source.connect(gainNode);
|
|
180
|
-
gainNode.connect(audioContext.destination);
|
|
181
|
-
source.start(this.nextScheduleTime);
|
|
182
|
-
this.scheduledSources.add(source);
|
|
183
|
-
source.onended = () => {
|
|
184
|
-
source.disconnect();
|
|
185
|
-
gainNode.disconnect();
|
|
186
|
-
this.scheduledSources.delete(source);
|
|
187
|
-
};
|
|
188
|
-
const actualDuration = mixedBuffer.duration;
|
|
189
|
-
this.nextScheduleTime += actualDuration;
|
|
190
|
-
this.nextContentTimeUs += chunkDurationUs;
|
|
191
|
-
} catch (error) {
|
|
192
|
-
console.warn("[GlobalAudioSession] Mix error, skipping chunk:", error);
|
|
193
|
-
this.nextScheduleTime += this.CHUNK_DURATION;
|
|
194
|
-
this.nextContentTimeUs += chunkDurationUs;
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
/**
|
|
199
|
-
* Reset playback states (called on seek)
|
|
200
|
-
*/
|
|
201
|
-
resetPlaybackStates() {
|
|
202
|
-
this.stopAllAudioSources();
|
|
203
|
-
this.nextScheduleTime = 0;
|
|
204
|
-
this.nextContentTimeUs = 0;
|
|
205
|
-
}
|
|
206
|
-
setVolume(volume) {
|
|
207
|
-
this.volume = volume;
|
|
208
|
-
}
|
|
209
|
-
setPlaybackRate(rate) {
|
|
210
|
-
this.playbackRate = rate;
|
|
211
|
-
this.resetPlaybackStates();
|
|
212
|
-
}
|
|
213
|
-
reset() {
|
|
214
|
-
this.stopAllAudioSources();
|
|
215
|
-
this.deps.cacheManager.clearAudioCache();
|
|
216
|
-
this.activeClips.clear();
|
|
217
|
-
}
|
|
218
|
-
/**
|
|
219
|
-
* Mix and encode audio for a specific segment (used by ExportScheduler)
|
|
220
|
-
*/
|
|
221
|
-
async mixAndEncodeSegment(startUs, endUs, onChunk) {
|
|
222
|
-
await this.ensureAudioForSegment(startUs, endUs);
|
|
223
|
-
const mixedBuffer = await this.mixer.mix(startUs, endUs);
|
|
224
|
-
const audioData = this.audioBufferToAudioData(mixedBuffer, startUs);
|
|
225
|
-
if (!audioData) return;
|
|
226
|
-
if (!this.exportEncoder) {
|
|
227
|
-
this.exportEncoder = new AudioChunkEncoder();
|
|
228
|
-
await this.exportEncoder.initialize();
|
|
229
|
-
this.exportEncoderStream = this.exportEncoder.createStream();
|
|
230
|
-
this.exportEncoderWriter = this.exportEncoderStream.writable.getWriter();
|
|
231
|
-
void this.startExportEncoderReader(this.exportEncoderStream.readable, onChunk);
|
|
232
|
-
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
233
|
-
}
|
|
234
|
-
await this.exportEncoderWriter?.write(audioData);
|
|
235
|
-
}
|
|
236
|
-
/**
|
|
237
|
-
* Ensure audio clips in time range are decoded (for export)
|
|
238
|
-
* Decodes from AudioSampleCache (replaces Worker pipeline)
|
|
239
|
-
*/
|
|
240
|
-
async ensureAudioForSegment(startUs, endUs) {
|
|
241
|
-
await this.ensureAudioForTimeRange(startUs, endUs, {
|
|
242
|
-
mode: "blocking",
|
|
243
|
-
loadResource: false,
|
|
244
|
-
strictMode: true
|
|
245
|
-
});
|
|
246
|
-
}
|
|
247
|
-
exportEncoder = null;
|
|
248
|
-
exportEncoderStream = null;
|
|
249
|
-
exportEncoderWriter = null;
|
|
250
|
-
async startExportEncoderReader(stream, onChunk) {
|
|
251
|
-
const reader = stream.getReader();
|
|
252
|
-
try {
|
|
253
|
-
while (true) {
|
|
254
|
-
const { done, value } = await reader.read();
|
|
255
|
-
if (done) break;
|
|
256
|
-
if (value) {
|
|
257
|
-
onChunk(value.chunk, value.metadata);
|
|
258
|
-
}
|
|
259
|
-
}
|
|
260
|
-
} catch (e) {
|
|
261
|
-
console.error("Export encoder reader error", e);
|
|
262
|
-
}
|
|
263
|
-
}
|
|
264
|
-
async finalizeExportAudio() {
|
|
265
|
-
if (this.exportEncoderWriter) {
|
|
266
|
-
await this.exportEncoderWriter.close();
|
|
267
|
-
this.exportEncoderWriter = null;
|
|
268
|
-
}
|
|
269
|
-
this.exportEncoder = null;
|
|
270
|
-
this.exportEncoderStream = null;
|
|
271
|
-
}
|
|
272
|
-
stopAllAudioSources() {
|
|
273
|
-
for (const source of this.scheduledSources) {
|
|
274
|
-
try {
|
|
275
|
-
source.disconnect();
|
|
276
|
-
source.stop(0);
|
|
277
|
-
} catch {
|
|
278
|
-
}
|
|
279
|
-
}
|
|
280
|
-
this.scheduledSources.clear();
|
|
281
|
-
}
|
|
282
|
-
/**
|
|
283
|
-
* Core method to ensure audio for all clips in a time range
|
|
284
|
-
* Unified implementation used by ensureAudioForTime, scheduleAudio, and export
|
|
285
|
-
*/
|
|
286
|
-
async ensureAudioForTimeRange(startUs, endUs, options) {
|
|
287
|
-
const model = this.model;
|
|
288
|
-
if (!model) return;
|
|
289
|
-
const { mode = "blocking", loadResource = true, strictMode = false } = options;
|
|
290
|
-
const activeClips = model.getActiveClips(startUs, endUs);
|
|
291
|
-
const ensurePromises = activeClips.map(async (clip) => {
|
|
292
|
-
if (clip.trackKind !== "audio" && clip.trackKind !== "video") return;
|
|
293
|
-
if (!hasResourceId(clip)) return;
|
|
294
|
-
const resource = model.getResource(clip.resourceId);
|
|
295
|
-
if (resource?.state === "ready" && !this.deps.cacheManager.audioSampleCache.has(clip.resourceId)) {
|
|
296
|
-
return;
|
|
297
|
-
}
|
|
298
|
-
if (!this.deps.cacheManager.audioSampleCache.has(clip.resourceId)) {
|
|
299
|
-
if (!loadResource) {
|
|
300
|
-
return;
|
|
301
|
-
}
|
|
302
|
-
const resource2 = model.getResource(clip.resourceId);
|
|
303
|
-
if (resource2?.state !== "ready") {
|
|
304
|
-
await this.deps.resourceLoader.load(clip.resourceId, {
|
|
305
|
-
isPreload: false,
|
|
306
|
-
clipId: clip.id,
|
|
307
|
-
trackId: clip.trackId
|
|
308
|
-
});
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
const clipRelativeStartUs = Math.max(0, startUs - clip.startUs);
|
|
312
|
-
const clipRelativeEndUs = Math.min(clip.durationUs, endUs - clip.startUs);
|
|
313
|
-
const trimStartUs = clip.trimStartUs ?? 0;
|
|
314
|
-
const resourceStartUs = clipRelativeStartUs + trimStartUs;
|
|
315
|
-
const resourceEndUs = clipRelativeEndUs + trimStartUs;
|
|
316
|
-
await this.ensureAudioWindow(clip.id, resourceStartUs, resourceEndUs, strictMode);
|
|
317
|
-
});
|
|
318
|
-
if (mode === "probe") {
|
|
319
|
-
void Promise.all(ensurePromises);
|
|
320
|
-
return;
|
|
321
|
-
}
|
|
322
|
-
await Promise.all(ensurePromises);
|
|
323
|
-
}
|
|
324
|
-
/**
|
|
325
|
-
* Ensure audio window for a clip (aligned with video architecture)
|
|
326
|
-
*
|
|
327
|
-
* Note: Unlike video getFrame(), this method doesn't need a 'preheat' parameter
|
|
328
|
-
* Why: Audio cache check is window-level (range query) via hasWindowPCM()
|
|
329
|
-
* It verifies the entire window has ≥95% data (preview) or ≥99% (export)
|
|
330
|
-
* This naturally prevents premature return during preheating
|
|
331
|
-
*/
|
|
332
|
-
async ensureAudioWindow(clipId, startUs, endUs, strictMode = false) {
|
|
333
|
-
if (this.deps.cacheManager.hasWindowPCM(clipId, startUs, endUs, strictMode)) {
|
|
334
|
-
return;
|
|
335
|
-
}
|
|
336
|
-
await this.decodeAudioWindow(clipId, startUs, endUs);
|
|
337
|
-
}
|
|
338
|
-
/**
|
|
339
|
-
* Decode audio window for a clip (aligned with video architecture)
|
|
340
|
-
* Incremental decoding strategy with smart fallback:
|
|
341
|
-
* - High coverage (≥80%): Skip decoding
|
|
342
|
-
* - Low coverage (<30%): Full decode (avoid fragmentation)
|
|
343
|
-
* - Medium coverage (30%-80%): Incremental decode
|
|
344
|
-
*/
|
|
345
|
-
async decodeAudioWindow(clipId, startUs, endUs) {
|
|
346
|
-
const clip = this.model?.findClip(clipId);
|
|
347
|
-
if (!clip || !hasResourceId(clip)) {
|
|
348
|
-
return;
|
|
349
|
-
}
|
|
350
|
-
const audioRecord = this.deps.cacheManager.audioSampleCache.get(clip.resourceId);
|
|
351
|
-
if (!audioRecord) {
|
|
352
|
-
return;
|
|
353
|
-
}
|
|
354
|
-
const windowChunks = audioRecord.samples.filter((s) => {
|
|
355
|
-
const sampleEndUs = s.timestamp + (s.duration ?? 0);
|
|
356
|
-
return s.timestamp < endUs && sampleEndUs > startUs;
|
|
357
|
-
});
|
|
358
|
-
if (windowChunks.length === 0) {
|
|
359
|
-
return;
|
|
360
|
-
}
|
|
361
|
-
const INCREMENTAL_THRESHOLD = 0.95;
|
|
362
|
-
const FULL_FALLBACK_THRESHOLD = 0.3;
|
|
363
|
-
const coverage = this.deps.cacheManager.getAudioRangeCoverage(
|
|
364
|
-
clipId,
|
|
365
|
-
startUs,
|
|
366
|
-
endUs,
|
|
367
|
-
INCREMENTAL_THRESHOLD
|
|
368
|
-
);
|
|
369
|
-
if (coverage.covered) {
|
|
370
|
-
return;
|
|
371
|
-
}
|
|
372
|
-
if (coverage.coverageRatio < FULL_FALLBACK_THRESHOLD) {
|
|
373
|
-
await this.decodeAudioSamples(
|
|
374
|
-
clipId,
|
|
375
|
-
windowChunks,
|
|
376
|
-
audioRecord.metadata,
|
|
377
|
-
clip.durationUs,
|
|
378
|
-
clip.startUs
|
|
379
|
-
);
|
|
380
|
-
return;
|
|
381
|
-
}
|
|
382
|
-
const chunksToDecode = windowChunks.filter((chunk) => {
|
|
383
|
-
const chunkEndUs = chunk.timestamp + (chunk.duration ?? 0);
|
|
384
|
-
const chunkCoverage = this.deps.cacheManager.getAudioRangeCoverage(
|
|
385
|
-
clipId,
|
|
386
|
-
chunk.timestamp,
|
|
387
|
-
chunkEndUs,
|
|
388
|
-
0.95
|
|
389
|
-
// Stricter threshold for individual chunks
|
|
390
|
-
);
|
|
391
|
-
return !chunkCoverage.covered;
|
|
392
|
-
});
|
|
393
|
-
if (chunksToDecode.length === 0) {
|
|
394
|
-
return;
|
|
395
|
-
}
|
|
396
|
-
await this.decodeAudioSamples(
|
|
397
|
-
clipId,
|
|
398
|
-
chunksToDecode,
|
|
399
|
-
audioRecord.metadata,
|
|
400
|
-
clip.durationUs,
|
|
401
|
-
clip.startUs
|
|
402
|
-
);
|
|
403
|
-
}
|
|
404
|
-
/**
|
|
405
|
-
* Decode audio samples to PCM and cache
|
|
406
|
-
* Uses AudioChunkDecoder for consistency with project architecture
|
|
407
|
-
* Resamples to AudioContext sample rate if needed for better quality
|
|
408
|
-
*/
|
|
409
|
-
async decodeAudioSamples(clipId, samples, config, clipDurationUs, clipStartUs) {
|
|
410
|
-
let description;
|
|
411
|
-
if (config.description) {
|
|
412
|
-
if (config.description instanceof ArrayBuffer) {
|
|
413
|
-
description = config.description;
|
|
414
|
-
} else if (ArrayBuffer.isView(config.description)) {
|
|
415
|
-
const view = config.description;
|
|
416
|
-
const newBuffer = new ArrayBuffer(view.byteLength);
|
|
417
|
-
new Uint8Array(newBuffer).set(
|
|
418
|
-
new Uint8Array(view.buffer, view.byteOffset, view.byteLength)
|
|
419
|
-
);
|
|
420
|
-
description = newBuffer;
|
|
421
|
-
}
|
|
422
|
-
}
|
|
423
|
-
const decoderConfig = {
|
|
424
|
-
codec: config.codec,
|
|
425
|
-
sampleRate: config.sampleRate,
|
|
426
|
-
numberOfChannels: config.numberOfChannels,
|
|
427
|
-
description
|
|
428
|
-
};
|
|
429
|
-
const decoder = new AudioChunkDecoder(`audio-${clipId}`, decoderConfig);
|
|
430
|
-
try {
|
|
431
|
-
const chunkStream = new ReadableStream({
|
|
432
|
-
start(controller) {
|
|
433
|
-
for (const sample of samples) {
|
|
434
|
-
controller.enqueue(sample);
|
|
435
|
-
}
|
|
436
|
-
controller.close();
|
|
437
|
-
}
|
|
438
|
-
});
|
|
439
|
-
const audioDataStream = chunkStream.pipeThrough(decoder.createStream());
|
|
440
|
-
const reader = audioDataStream.getReader();
|
|
441
|
-
try {
|
|
442
|
-
while (true) {
|
|
443
|
-
const { done, value } = await reader.read();
|
|
444
|
-
if (done) break;
|
|
445
|
-
if (value) {
|
|
446
|
-
const globalTimeUs = clipStartUs + (value.timestamp ?? 0);
|
|
447
|
-
this.deps.cacheManager.putClipAudioData(clipId, value, clipDurationUs, globalTimeUs);
|
|
448
|
-
}
|
|
449
|
-
}
|
|
450
|
-
} finally {
|
|
451
|
-
reader.releaseLock();
|
|
452
|
-
}
|
|
453
|
-
} catch (error) {
|
|
454
|
-
console.error(`[GlobalAudioSession] Decoder error for clip ${clipId}:`, error);
|
|
455
|
-
throw error;
|
|
456
|
-
} finally {
|
|
457
|
-
await decoder.close();
|
|
458
|
-
}
|
|
459
|
-
}
|
|
460
|
-
audioBufferToAudioData(buffer, timestampUs) {
|
|
461
|
-
const sampleRate = buffer.sampleRate;
|
|
462
|
-
const numberOfChannels = buffer.numberOfChannels;
|
|
463
|
-
const numberOfFrames = buffer.length;
|
|
464
|
-
const planes = [];
|
|
465
|
-
for (let channel = 0; channel < numberOfChannels; channel++) {
|
|
466
|
-
planes.push(buffer.getChannelData(channel));
|
|
467
|
-
}
|
|
468
|
-
return new AudioData({
|
|
469
|
-
format: "f32-planar",
|
|
470
|
-
sampleRate,
|
|
471
|
-
numberOfFrames,
|
|
472
|
-
numberOfChannels,
|
|
473
|
-
timestamp: timestampUs,
|
|
474
|
-
data: this.packPlanarF32Data(planes)
|
|
475
|
-
});
|
|
476
|
-
}
|
|
477
|
-
packPlanarF32Data(planes) {
|
|
478
|
-
const numberOfChannels = planes.length;
|
|
479
|
-
const numberOfFrames = planes[0]?.length ?? 0;
|
|
480
|
-
const totalSamples = numberOfChannels * numberOfFrames;
|
|
481
|
-
const packed = new Float32Array(totalSamples);
|
|
482
|
-
for (let channel = 0; channel < numberOfChannels; channel++) {
|
|
483
|
-
const plane = planes[channel];
|
|
484
|
-
if (!plane) continue;
|
|
485
|
-
packed.set(plane, channel * numberOfFrames);
|
|
486
|
-
}
|
|
487
|
-
return packed.buffer;
|
|
488
|
-
}
|
|
489
|
-
}
|
|
490
|
-
export {
|
|
491
|
-
GlobalAudioSession
|
|
492
|
-
};
|
|
493
|
-
//# sourceMappingURL=GlobalAudioSession.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"GlobalAudioSession.js","sources":["../../src/orchestrator/GlobalAudioSession.ts"],"sourcesContent":["import type { TimeUs } from '../model/types';\nimport { OfflineAudioMixer } from '../stages/compose/OfflineAudioMixer';\nimport type { CompositionModel } from '../model';\nimport type { WorkerPool } from '../worker/WorkerPool';\nimport type { ResourceLoader } from '../stages/load/ResourceLoader';\nimport type { EventBus } from '../event/EventBus';\nimport type { EventPayloadMap } from '../event/events';\nimport { MeframeEvent } from '../event/events';\nimport type { CacheManager } from '../cache/CacheManager';\nimport { AudioChunkEncoder } from '../stages/encode/AudioChunkEncoder';\nimport { AudioChunkDecoder } from '../stages/decode/AudioChunkDecoder';\nimport { isAudioClip, hasResourceId } from '../model/types';\nimport type { RequestMode } from './types';\n\ninterface AudioDataMessage {\n sessionId: string;\n audioData: AudioData;\n clipStartUs: TimeUs;\n clipDurationUs: TimeUs;\n}\n\ninterface AudioSessionDeps {\n cacheManager: CacheManager;\n workerPool: WorkerPool;\n resourceLoader: ResourceLoader;\n eventBus: EventBus<EventPayloadMap>;\n buildWorkerConfigs: () => any;\n}\n\nexport class GlobalAudioSession {\n private mixer: OfflineAudioMixer;\n private activeClips = new Set<string>();\n private deps: AudioSessionDeps;\n private model: CompositionModel | null = null;\n private audioContext: AudioContext | null = null;\n private volume = 1.0;\n private playbackRate = 1.0;\n private isPlaying = false;\n\n // Lookahead scheduling state\n private nextScheduleTime = 0; // Next AudioContext time to schedule\n private nextContentTimeUs = 0; // Next timeline position (Us)\n private scheduledSources = new Set<AudioBufferSourceNode>();\n private readonly LOOKAHEAD_TIME = 0.2; // 200ms lookahead\n private readonly CHUNK_DURATION = 0.1; // 100ms chunks\n\n constructor(deps: AudioSessionDeps) {\n this.deps = deps;\n this.mixer = new OfflineAudioMixer(deps.cacheManager, () => this.model);\n }\n\n setModel(model: CompositionModel): void {\n this.model = model;\n }\n\n onAudioData(message: AudioDataMessage): void {\n const { sessionId, audioData, clipStartUs, clipDurationUs } = message;\n const globalTimeUs = clipStartUs + (audioData.timestamp ?? 0);\n this.deps.cacheManager.putClipAudioData(sessionId, audioData, clipDurationUs, globalTimeUs);\n }\n\n async ensureAudioForTime(timeUs: TimeUs, options?: { mode?: RequestMode }): Promise<void> {\n if (!this.model) return;\n\n const mode = options?.mode ?? 'blocking';\n const WINDOW_DURATION = 3_000_000; // 3s preheat window\n const windowEndUs = Math.min(this.model.durationUs, timeUs + WINDOW_DURATION);\n\n if (mode === 'probe') {\n void this.ensureAudioForTimeRange(timeUs, windowEndUs, { mode, loadResource: true });\n return;\n }\n await this.ensureAudioForTimeRange(timeUs, windowEndUs, { mode, loadResource: true });\n }\n\n /**\n * Fast readiness probe for preview playback.\n *\n * This is intentionally synchronous and lightweight:\n * - Only checks resource-level readiness (download + MP4 index parsing).\n * - If any relevant resource isn't ready yet, return false.\n * - Does NOT require audio samples / PCM window coverage (probe is resource-level only).\n *\n * Note: This probe does NOT gate on PCM coverage to avoid frequent buffering oscillation.\n * PCM is prepared incrementally by scheduleAudio() / ensureAudioForTimeRange().\n */\n isAudioResourceWindowReady(startUs: TimeUs, endUs: TimeUs): boolean {\n const model = this.model;\n if (!model) return true;\n\n const activeClips = model.getActiveClips(startUs, endUs);\n\n for (const clip of activeClips) {\n if (clip.trackKind !== 'audio' && clip.trackKind !== 'video') continue;\n if (!hasResourceId(clip)) continue;\n\n const resource = model.getResource(clip.resourceId);\n if (resource?.state !== 'ready') {\n return false;\n }\n }\n\n return true;\n }\n\n async activateAllAudioClips(): Promise<void> {\n const model = this.model;\n if (!model) {\n return;\n }\n\n const audioTracks = model.tracks.filter((track) => track.kind === 'audio');\n if (audioTracks.length === 0) return;\n\n // Find maximum clip count across all audio tracks\n const maxClipCount = Math.max(...audioTracks.map((track) => track.clips.length));\n\n // Horizontal loading: activate clip[0] from all tracks, then clip[1], etc.\n for (let clipIndex = 0; clipIndex < maxClipCount; clipIndex++) {\n for (const track of audioTracks) {\n const clip = track.clips[clipIndex];\n if (!clip || this.activeClips.has(clip.id)) continue;\n\n if (!isAudioClip(clip)) {\n throw new Error(`Clip ${clip.id} in audio track is not an audio clip`);\n }\n\n // Preview: Use main-thread parsing → AudioSampleCache → on-demand decode\n // Check if we have cached samples (already parsed in ResourceLoader)\n if (this.deps.cacheManager.audioSampleCache.has(clip.resourceId)) {\n // Already parsed, mark as active\n this.activeClips.add(clip.id);\n this.deps.eventBus.emit(MeframeEvent.ClipActivated, { clipId: clip.id });\n continue;\n }\n\n // Ensure resource is loaded (will be parsed and cached in main thread)\n await this.deps.resourceLoader.load(clip.resourceId, {\n isPreload: false,\n clipId: clip.id,\n trackId: track.id,\n });\n\n // Mark as active\n this.activeClips.add(clip.id);\n this.deps.eventBus.emit(MeframeEvent.ClipActivated, { clipId: clip.id });\n }\n }\n }\n\n async deactivateClip(clipId: string): Promise<void> {\n if (!this.activeClips.has(clipId)) {\n return;\n }\n\n this.activeClips.delete(clipId);\n this.deps.cacheManager.clearClipAudioData(clipId);\n }\n\n async startPlayback(timeUs: TimeUs, audioContext: AudioContext): Promise<void> {\n this.audioContext = audioContext;\n\n // Resume AudioContext if suspended (required by modern browsers)\n if (audioContext.state === 'suspended') {\n await audioContext.resume();\n }\n\n // Ensure audio is decoded and ready (blocking mode for startup)\n await this.ensureAudioForTime(timeUs, { mode: 'blocking' });\n\n this.isPlaying = true;\n // Reset playback states when starting to initialize scheduling from current time\n this.resetPlaybackStates();\n\n // Immediately fill lookahead buffer to avoid initial silence\n // This is critical after seek - ensures audio starts immediately\n await this.scheduleAudio(timeUs, audioContext);\n }\n\n stopPlayback(): void {\n this.isPlaying = false;\n this.stopAllAudioSources();\n }\n\n updateTime(_timeUs: TimeUs): void {\n // Kept for compatibility\n }\n\n /**\n * Schedule audio chunks ahead of playback cursor\n * Uses OfflineAudioMixer for proper mixing, then plays the result\n */\n async scheduleAudio(currentTimelineUs: TimeUs, audioContext: AudioContext): Promise<void> {\n if (!this.isPlaying || !this.model || !this.audioContext) {\n return;\n }\n\n const lookaheadTime = audioContext.currentTime + this.LOOKAHEAD_TIME;\n\n // Initialize on first call\n if (this.nextScheduleTime === 0) {\n this.nextScheduleTime = audioContext.currentTime + 0.01;\n this.nextContentTimeUs = currentTimelineUs;\n }\n\n // Schedule chunks until we reach lookahead limit\n while (this.nextScheduleTime < lookaheadTime) {\n // Detect and fix time drift (caused by slow async operations in CPU-throttled environments)\n if (this.nextScheduleTime < audioContext.currentTime) {\n const timeDrift = audioContext.currentTime - this.nextScheduleTime;\n\n // Large drift (>20ms): skip to avoid audio glitches\n if (timeDrift > 0.02) {\n this.nextScheduleTime = audioContext.currentTime + 0.02;\n const skippedUs = Math.round(timeDrift * 1_000_000);\n this.nextContentTimeUs += skippedUs;\n } else {\n // Small drift: adjust timing without skipping\n this.nextScheduleTime = audioContext.currentTime + 0.01;\n }\n }\n\n const chunkDurationUs = Math.round(this.CHUNK_DURATION * 1_000_000);\n const startUs = this.nextContentTimeUs;\n const endUs = startUs + chunkDurationUs;\n\n // Check if we need audio for this time range\n if (endUs > this.model.durationUs) {\n break; // Reached end of composition\n }\n\n try {\n // Ensure audio for all clips in the mixing window (not just clips at current time point)\n // This fixes the issue where boundary clips are missed by getClipsAtTime()\n await this.ensureAudioForTimeRange(startUs, endUs, {\n mode: 'blocking',\n loadResource: true,\n });\n\n // Mix audio using OfflineAudioMixer (handles resampling + mixing)\n const mixedBuffer = await this.mixer.mix(startUs, endUs);\n\n // Check again after mix - the slow operation may have caused time drift\n if (this.nextScheduleTime < audioContext.currentTime) {\n const timeDrift = audioContext.currentTime - this.nextScheduleTime;\n\n // Large drift (>20ms): skip chunk to avoid audio glitches\n if (timeDrift > 0.02) {\n console.warn(\n `[Audio] Skip chunk due to time drift: ${(timeDrift * 1000).toFixed(1)}ms`\n );\n\n this.nextScheduleTime = audioContext.currentTime + 0.02;\n this.nextContentTimeUs += chunkDurationUs;\n continue;\n }\n\n // Small drift (<20ms): adjust timing without skipping\n this.nextScheduleTime = audioContext.currentTime + 0.01;\n }\n\n // Create source and play\n const source = audioContext.createBufferSource();\n source.buffer = mixedBuffer;\n source.playbackRate.value = this.playbackRate;\n\n const gainNode = audioContext.createGain();\n gainNode.gain.value = this.volume;\n\n source.connect(gainNode);\n gainNode.connect(audioContext.destination);\n\n source.start(this.nextScheduleTime);\n this.scheduledSources.add(source);\n\n source.onended = () => {\n source.disconnect();\n gainNode.disconnect();\n this.scheduledSources.delete(source);\n };\n\n // Advance scheduling state\n const actualDuration = mixedBuffer.duration;\n this.nextScheduleTime += actualDuration;\n this.nextContentTimeUs += chunkDurationUs;\n } catch (error) {\n console.warn('[GlobalAudioSession] Mix error, skipping chunk:', error);\n // Skip this chunk and continue\n this.nextScheduleTime += this.CHUNK_DURATION;\n this.nextContentTimeUs += chunkDurationUs;\n }\n }\n }\n\n /**\n * Reset playback states (called on seek)\n */\n resetPlaybackStates(): void {\n this.stopAllAudioSources();\n this.nextScheduleTime = 0;\n this.nextContentTimeUs = 0;\n }\n\n setVolume(volume: number): void {\n this.volume = volume;\n // Note: We can't easily update volume of already scheduled sources in this lookahead model\n // without keeping track of gain nodes. For now, volume changes will apply to next chunks.\n // If immediate volume change is needed, we'd need to store GainNodes in SchedulingState.\n }\n\n setPlaybackRate(rate: number): void {\n this.playbackRate = rate;\n // Playback rate change requires reset of scheduling to avoid pitch shift artifacts on existing nodes\n // or complicated time mapping updates.\n this.resetPlaybackStates();\n }\n\n reset(): void {\n this.stopAllAudioSources();\n this.deps.cacheManager.clearAudioCache();\n this.activeClips.clear();\n }\n\n /**\n * Mix and encode audio for a specific segment (used by ExportScheduler)\n */\n async mixAndEncodeSegment(\n startUs: TimeUs,\n endUs: TimeUs,\n onChunk: (chunk: EncodedAudioChunk, metadata?: EncodedAudioChunkMetadata) => void\n ): Promise<void> {\n // Wait for audio clips in this time range to be ready (on-demand wait)\n await this.ensureAudioForSegment(startUs, endUs);\n\n const mixedBuffer = await this.mixer.mix(startUs, endUs);\n const audioData = this.audioBufferToAudioData(mixedBuffer, startUs);\n\n if (!audioData) return;\n\n if (!this.exportEncoder) {\n this.exportEncoder = new AudioChunkEncoder();\n await this.exportEncoder.initialize();\n this.exportEncoderStream = this.exportEncoder.createStream();\n this.exportEncoderWriter = this.exportEncoderStream.writable.getWriter();\n\n // Start reader immediately (but don't await - it's a long-running loop)\n void this.startExportEncoderReader(this.exportEncoderStream.readable, onChunk);\n\n // Wait a bit to ensure reader is ready before first write\n await new Promise((resolve) => setTimeout(resolve, 10));\n }\n\n await this.exportEncoderWriter?.write(audioData);\n }\n\n /**\n * Ensure audio clips in time range are decoded (for export)\n * Decodes from AudioSampleCache (replaces Worker pipeline)\n */\n private async ensureAudioForSegment(startUs: TimeUs, endUs: TimeUs): Promise<void> {\n // Export mode: don't load resources (they should already be loaded), only decode cached samples\n // Use strictMode=true to ensure 99% coverage for high-quality export\n await this.ensureAudioForTimeRange(startUs, endUs, {\n mode: 'blocking',\n loadResource: false,\n strictMode: true,\n });\n }\n\n private exportEncoder: AudioChunkEncoder | null = null;\n private exportEncoderStream: TransformStream<\n AudioData,\n { chunk: EncodedAudioChunk; metadata: any }\n > | null = null;\n private exportEncoderWriter: WritableStreamDefaultWriter<AudioData> | null = null;\n\n private async startExportEncoderReader(\n stream: ReadableStream<{ chunk: EncodedAudioChunk; metadata: any }>,\n onChunk: (chunk: EncodedAudioChunk, metadata?: EncodedAudioChunkMetadata) => void\n ) {\n const reader = stream.getReader();\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n if (value) {\n onChunk(value.chunk, value.metadata);\n }\n }\n } catch (e) {\n console.error('Export encoder reader error', e);\n }\n }\n\n async finalizeExportAudio(): Promise<void> {\n if (this.exportEncoderWriter) {\n await this.exportEncoderWriter.close();\n this.exportEncoderWriter = null;\n }\n this.exportEncoder = null;\n this.exportEncoderStream = null;\n }\n\n private stopAllAudioSources(): void {\n for (const source of this.scheduledSources) {\n try {\n // Disconnect first to prevent residual audio\n source.disconnect();\n // Use stop(0) for immediate stop\n source.stop(0);\n } catch {\n // Source may not be started yet, ignore\n }\n }\n this.scheduledSources.clear();\n }\n\n /**\n * Core method to ensure audio for all clips in a time range\n * Unified implementation used by ensureAudioForTime, scheduleAudio, and export\n */\n private async ensureAudioForTimeRange(\n startUs: TimeUs,\n endUs: TimeUs,\n options: { mode?: RequestMode; loadResource?: boolean; strictMode?: boolean }\n ): Promise<void> {\n const model = this.model;\n if (!model) return;\n\n const { mode = 'blocking', loadResource = true, strictMode = false } = options;\n\n // Find all clips that overlap with [startUs, endUs]\n const activeClips = model.getActiveClips(startUs, endUs);\n\n const ensurePromises = activeClips.map(async (clip) => {\n // Only process audio and video clips\n if (clip.trackKind !== 'audio' && clip.trackKind !== 'video') return;\n if (!hasResourceId(clip)) return;\n\n // Skip clips without audio track (performance optimization)\n // If AudioSampleCache doesn't have this resource, and resource is ready,\n // it means the resource has no audio track (e.g., video-only or image)\n const resource = model.getResource(clip.resourceId);\n if (\n resource?.state === 'ready' &&\n !this.deps.cacheManager.audioSampleCache.has(clip.resourceId)\n ) {\n // Resource is ready but has no audio samples - skip\n return;\n }\n\n // Ensure AudioSampleCache has data\n if (!this.deps.cacheManager.audioSampleCache.has(clip.resourceId)) {\n if (!loadResource) {\n // Export mode: skip clips without cached samples\n return;\n }\n\n const resource = model.getResource(clip.resourceId);\n if (resource?.state !== 'ready') {\n // Resource not yet loaded - wait for it\n await this.deps.resourceLoader.load(clip.resourceId, {\n isPreload: false,\n clipId: clip.id,\n trackId: clip.trackId,\n });\n }\n }\n\n // Calculate clip-relative time range\n const clipRelativeStartUs = Math.max(0, startUs - clip.startUs);\n const clipRelativeEndUs = Math.min(clip.durationUs, endUs - clip.startUs);\n\n // Convert to resource time (aligned with video architecture)\n // This ensures correct filtering of audio samples and cache queries\n const trimStartUs = clip.trimStartUs ?? 0;\n const resourceStartUs = clipRelativeStartUs + trimStartUs;\n const resourceEndUs = clipRelativeEndUs + trimStartUs;\n\n // Ensure audio window using resource time coordinates\n await this.ensureAudioWindow(clip.id, resourceStartUs, resourceEndUs, strictMode);\n });\n\n if (mode === 'probe') {\n void Promise.all(ensurePromises);\n return;\n }\n await Promise.all(ensurePromises);\n }\n\n /**\n * Ensure audio window for a clip (aligned with video architecture)\n *\n * Note: Unlike video getFrame(), this method doesn't need a 'preheat' parameter\n * Why: Audio cache check is window-level (range query) via hasWindowPCM()\n * It verifies the entire window has ≥95% data (preview) or ≥99% (export)\n * This naturally prevents premature return during preheating\n */\n async ensureAudioWindow(\n clipId: string,\n startUs: TimeUs,\n endUs: TimeUs,\n strictMode: boolean = false\n ): Promise<void> {\n // Check L1 cache - window-level verification (not point-level like video)\n if (this.deps.cacheManager.hasWindowPCM(clipId, startUs, endUs, strictMode)) {\n return; // Entire window has sufficient data\n }\n\n await this.decodeAudioWindow(clipId, startUs, endUs);\n }\n\n /**\n * Decode audio window for a clip (aligned with video architecture)\n * Incremental decoding strategy with smart fallback:\n * - High coverage (≥80%): Skip decoding\n * - Low coverage (<30%): Full decode (avoid fragmentation)\n * - Medium coverage (30%-80%): Incremental decode\n */\n private async decodeAudioWindow(clipId: string, startUs: TimeUs, endUs: TimeUs): Promise<void> {\n const clip = this.model?.findClip(clipId);\n if (!clip || !hasResourceId(clip)) {\n return;\n }\n\n // Get audio samples from AudioSampleCache\n const audioRecord = this.deps.cacheManager.audioSampleCache.get(clip.resourceId);\n if (!audioRecord) {\n // Resource has no audio track (common for some video files)\n return;\n }\n\n // Filter chunks within window (aligned with video GOP filtering)\n const windowChunks = audioRecord.samples.filter((s) => {\n const sampleEndUs = s.timestamp + (s.duration ?? 0);\n return s.timestamp < endUs && sampleEndUs > startUs;\n });\n\n if (windowChunks.length === 0) {\n return;\n }\n\n // Incremental decoding with smart threshold strategy\n // SKIP threshold should be high to avoid audio gaps (e.g., 95% means max 5% missing data)\n const INCREMENTAL_THRESHOLD = 0.95; // 95% coverage: skip decode (was 0.8, increased to avoid audio gaps)\n const FULL_FALLBACK_THRESHOLD = 0.3; // <30% coverage: full decode to avoid fragmentation\n\n // Check window-level coverage\n const coverage = this.deps.cacheManager.getAudioRangeCoverage(\n clipId,\n startUs,\n endUs,\n INCREMENTAL_THRESHOLD\n );\n\n // Strategy 1: High coverage - skip decode entirely\n if (coverage.covered) {\n return;\n }\n\n // Strategy 2: Very low coverage - full decode (avoid fragmentation overhead)\n if (coverage.coverageRatio < FULL_FALLBACK_THRESHOLD) {\n await this.decodeAudioSamples(\n clipId,\n windowChunks,\n audioRecord.metadata,\n clip.durationUs,\n clip.startUs\n );\n return;\n }\n\n // Strategy 3: Medium coverage - incremental decode (30%-95%)\n // Filter out chunks that are already well-covered in L1 Cache\n const chunksToDecode = windowChunks.filter((chunk) => {\n const chunkEndUs = chunk.timestamp + (chunk.duration ?? 0);\n const chunkCoverage = this.deps.cacheManager.getAudioRangeCoverage(\n clipId,\n chunk.timestamp,\n chunkEndUs,\n 0.95 // Stricter threshold for individual chunks\n );\n return !chunkCoverage.covered;\n });\n\n if (chunksToDecode.length === 0) {\n return;\n }\n\n // Decode only missing chunks\n await this.decodeAudioSamples(\n clipId,\n chunksToDecode,\n audioRecord.metadata,\n clip.durationUs,\n clip.startUs\n );\n }\n\n /**\n * Decode audio samples to PCM and cache\n * Uses AudioChunkDecoder for consistency with project architecture\n * Resamples to AudioContext sample rate if needed for better quality\n */\n private async decodeAudioSamples(\n clipId: string,\n samples: EncodedAudioChunk[],\n config: AudioDecoderConfig,\n clipDurationUs: number,\n clipStartUs: TimeUs\n ): Promise<void> {\n // Use AudioChunkDecoder for consistency with project architecture\n // Convert description to ArrayBuffer if needed for type compatibility\n let description: ArrayBuffer | undefined;\n if (config.description) {\n if (config.description instanceof ArrayBuffer) {\n description = config.description;\n } else if (ArrayBuffer.isView(config.description)) {\n // Convert TypedArray to ArrayBuffer\n const view = config.description as Uint8Array;\n // Create a new ArrayBuffer and copy data to ensure proper type\n const newBuffer = new ArrayBuffer(view.byteLength);\n new Uint8Array(newBuffer).set(\n new Uint8Array(view.buffer, view.byteOffset, view.byteLength)\n );\n description = newBuffer;\n }\n }\n\n const decoderConfig = {\n codec: config.codec,\n sampleRate: config.sampleRate,\n numberOfChannels: config.numberOfChannels,\n description,\n };\n const decoder = new AudioChunkDecoder(`audio-${clipId}`, decoderConfig);\n\n try {\n // Create chunk stream\n const chunkStream = new ReadableStream<EncodedAudioChunk>({\n start(controller) {\n for (const sample of samples) {\n controller.enqueue(sample);\n }\n controller.close();\n },\n });\n\n // Decode through stream\n const audioDataStream = chunkStream.pipeThrough(decoder.createStream());\n const reader = audioDataStream.getReader();\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n if (value) {\n // Store original sample rate - OfflineAudioMixer will handle resampling\n const globalTimeUs = clipStartUs + (value.timestamp ?? 0);\n this.deps.cacheManager.putClipAudioData(clipId, value, clipDurationUs, globalTimeUs);\n }\n }\n } finally {\n reader.releaseLock();\n }\n } catch (error) {\n console.error(`[GlobalAudioSession] Decoder error for clip ${clipId}:`, error);\n throw error;\n } finally {\n await decoder.close();\n }\n }\n\n private audioBufferToAudioData(buffer: AudioBuffer, timestampUs: TimeUs): AudioData | null {\n const sampleRate = buffer.sampleRate;\n const numberOfChannels = buffer.numberOfChannels;\n const numberOfFrames = buffer.length;\n\n const planes: Float32Array[] = [];\n for (let channel = 0; channel < numberOfChannels; channel++) {\n planes.push(buffer.getChannelData(channel));\n }\n\n return new AudioData({\n format: 'f32-planar',\n sampleRate,\n numberOfFrames,\n numberOfChannels,\n timestamp: timestampUs,\n data: this.packPlanarF32Data(planes),\n });\n }\n\n private packPlanarF32Data(planes: Float32Array[]): ArrayBuffer {\n const numberOfChannels = planes.length;\n const numberOfFrames = planes[0]?.length ?? 0;\n const totalSamples = numberOfChannels * numberOfFrames;\n const packed = new Float32Array(totalSamples);\n\n // f32-planar layout: [ch0 frames][ch1 frames]...[chN frames]\n for (let channel = 0; channel < numberOfChannels; channel++) {\n const plane = planes[channel];\n if (!plane) continue;\n packed.set(plane, channel * numberOfFrames);\n }\n\n return packed.buffer;\n }\n}\n"],"names":["resource"],"mappings":";;;;;AA6BO,MAAM,mBAAmB;AAAA,EACtB;AAAA,EACA,kCAAkB,IAAA;AAAA,EAClB;AAAA,EACA,QAAiC;AAAA,EACjC,eAAoC;AAAA,EACpC,SAAS;AAAA,EACT,eAAe;AAAA,EACf,YAAY;AAAA;AAAA,EAGZ,mBAAmB;AAAA;AAAA,EACnB,oBAAoB;AAAA;AAAA,EACpB,uCAAuB,IAAA;AAAA,EACd,iBAAiB;AAAA;AAAA,EACjB,iBAAiB;AAAA;AAAA,EAElC,YAAY,MAAwB;AAClC,SAAK,OAAO;AACZ,SAAK,QAAQ,IAAI,kBAAkB,KAAK,cAAc,MAAM,KAAK,KAAK;AAAA,EACxE;AAAA,EAEA,SAAS,OAA+B;AACtC,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,YAAY,SAAiC;AAC3C,UAAM,EAAE,WAAW,WAAW,aAAa,mBAAmB;AAC9D,UAAM,eAAe,eAAe,UAAU,aAAa;AAC3D,SAAK,KAAK,aAAa,iBAAiB,WAAW,WAAW,gBAAgB,YAAY;AAAA,EAC5F;AAAA,EAEA,MAAM,mBAAmB,QAAgB,SAAiD;AACxF,QAAI,CAAC,KAAK,MAAO;AAEjB,UAAM,OAAO,SAAS,QAAQ;AAC9B,UAAM,kBAAkB;AACxB,UAAM,cAAc,KAAK,IAAI,KAAK,MAAM,YAAY,SAAS,eAAe;AAE5E,QAAI,SAAS,SAAS;AACpB,WAAK,KAAK,wBAAwB,QAAQ,aAAa,EAAE,MAAM,cAAc,MAAM;AACnF;AAAA,IACF;AACA,UAAM,KAAK,wBAAwB,QAAQ,aAAa,EAAE,MAAM,cAAc,MAAM;AAAA,EACtF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,2BAA2B,SAAiB,OAAwB;AAClE,UAAM,QAAQ,KAAK;AACnB,QAAI,CAAC,MAAO,QAAO;AAEnB,UAAM,cAAc,MAAM,eAAe,SAAS,KAAK;AAEvD,eAAW,QAAQ,aAAa;AAC9B,UAAI,KAAK,cAAc,WAAW,KAAK,cAAc,QAAS;AAC9D,UAAI,CAAC,cAAc,IAAI,EAAG;AAE1B,YAAM,WAAW,MAAM,YAAY,KAAK,UAAU;AAClD,UAAI,UAAU,UAAU,SAAS;AAC/B,eAAO;AAAA,MACT;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,wBAAuC;AAC3C,UAAM,QAAQ,KAAK;AACnB,QAAI,CAAC,OAAO;AACV;AAAA,IACF;AAEA,UAAM,cAAc,MAAM,OAAO,OAAO,CAAC,UAAU,MAAM,SAAS,OAAO;AACzE,QAAI,YAAY,WAAW,EAAG;AAG9B,UAAM,eAAe,KAAK,IAAI,GAAG,YAAY,IAAI,CAAC,UAAU,MAAM,MAAM,MAAM,CAAC;AAG/E,aAAS,YAAY,GAAG,YAAY,cAAc,aAAa;AAC7D,iBAAW,SAAS,aAAa;AAC/B,cAAM,OAAO,MAAM,MAAM,SAAS;AAClC,YAAI,CAAC,QAAQ,KAAK,YAAY,IAAI,KAAK,EAAE,EAAG;AAE5C,YAAI,CAAC,YAAY,IAAI,GAAG;AACtB,gBAAM,IAAI,MAAM,QAAQ,KAAK,EAAE,sCAAsC;AAAA,QACvE;AAIA,YAAI,KAAK,KAAK,aAAa,iBAAiB,IAAI,KAAK,UAAU,GAAG;AAEhE,eAAK,YAAY,IAAI,KAAK,EAAE;AAC5B,eAAK,KAAK,SAAS,KAAK,aAAa,eAAe,EAAE,QAAQ,KAAK,IAAI;AACvE;AAAA,QACF;AAGA,cAAM,KAAK,KAAK,eAAe,KAAK,KAAK,YAAY;AAAA,UACnD,WAAW;AAAA,UACX,QAAQ,KAAK;AAAA,UACb,SAAS,MAAM;AAAA,QAAA,CAChB;AAGD,aAAK,YAAY,IAAI,KAAK,EAAE;AAC5B,aAAK,KAAK,SAAS,KAAK,aAAa,eAAe,EAAE,QAAQ,KAAK,IAAI;AAAA,MACzE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,eAAe,QAA+B;AAClD,QAAI,CAAC,KAAK,YAAY,IAAI,MAAM,GAAG;AACjC;AAAA,IACF;AAEA,SAAK,YAAY,OAAO,MAAM;AAC9B,SAAK,KAAK,aAAa,mBAAmB,MAAM;AAAA,EAClD;AAAA,EAEA,MAAM,cAAc,QAAgB,cAA2C;AAC7E,SAAK,eAAe;AAGpB,QAAI,aAAa,UAAU,aAAa;AACtC,YAAM,aAAa,OAAA;AAAA,IACrB;AAGA,UAAM,KAAK,mBAAmB,QAAQ,EAAE,MAAM,YAAY;AAE1D,SAAK,YAAY;AAEjB,SAAK,oBAAA;AAIL,UAAM,KAAK,cAAc,QAAQ,YAAY;AAAA,EAC/C;AAAA,EAEA,eAAqB;AACnB,SAAK,YAAY;AACjB,SAAK,oBAAA;AAAA,EACP;AAAA,EAEA,WAAW,SAAuB;AAAA,EAElC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cAAc,mBAA2B,cAA2C;AACxF,QAAI,CAAC,KAAK,aAAa,CAAC,KAAK,SAAS,CAAC,KAAK,cAAc;AACxD;AAAA,IACF;AAEA,UAAM,gBAAgB,aAAa,cAAc,KAAK;AAGtD,QAAI,KAAK,qBAAqB,GAAG;AAC/B,WAAK,mBAAmB,aAAa,cAAc;AACnD,WAAK,oBAAoB;AAAA,IAC3B;AAGA,WAAO,KAAK,mBAAmB,eAAe;AAE5C,UAAI,KAAK,mBAAmB,aAAa,aAAa;AACpD,cAAM,YAAY,aAAa,cAAc,KAAK;AAGlD,YAAI,YAAY,MAAM;AACpB,eAAK,mBAAmB,aAAa,cAAc;AACnD,gBAAM,YAAY,KAAK,MAAM,YAAY,GAAS;AAClD,eAAK,qBAAqB;AAAA,QAC5B,OAAO;AAEL,eAAK,mBAAmB,aAAa,cAAc;AAAA,QACrD;AAAA,MACF;AAEA,YAAM,kBAAkB,KAAK,MAAM,KAAK,iBAAiB,GAAS;AAClE,YAAM,UAAU,KAAK;AACrB,YAAM,QAAQ,UAAU;AAGxB,UAAI,QAAQ,KAAK,MAAM,YAAY;AACjC;AAAA,MACF;AAEA,UAAI;AAGF,cAAM,KAAK,wBAAwB,SAAS,OAAO;AAAA,UACjD,MAAM;AAAA,UACN,cAAc;AAAA,QAAA,CACf;AAGD,cAAM,cAAc,MAAM,KAAK,MAAM,IAAI,SAAS,KAAK;AAGvD,YAAI,KAAK,mBAAmB,aAAa,aAAa;AACpD,gBAAM,YAAY,aAAa,cAAc,KAAK;AAGlD,cAAI,YAAY,MAAM;AACpB,oBAAQ;AAAA,cACN,0CAA0C,YAAY,KAAM,QAAQ,CAAC,CAAC;AAAA,YAAA;AAGxE,iBAAK,mBAAmB,aAAa,cAAc;AACnD,iBAAK,qBAAqB;AAC1B;AAAA,UACF;AAGA,eAAK,mBAAmB,aAAa,cAAc;AAAA,QACrD;AAGA,cAAM,SAAS,aAAa,mBAAA;AAC5B,eAAO,SAAS;AAChB,eAAO,aAAa,QAAQ,KAAK;AAEjC,cAAM,WAAW,aAAa,WAAA;AAC9B,iBAAS,KAAK,QAAQ,KAAK;AAE3B,eAAO,QAAQ,QAAQ;AACvB,iBAAS,QAAQ,aAAa,WAAW;AAEzC,eAAO,MAAM,KAAK,gBAAgB;AAClC,aAAK,iBAAiB,IAAI,MAAM;AAEhC,eAAO,UAAU,MAAM;AACrB,iBAAO,WAAA;AACP,mBAAS,WAAA;AACT,eAAK,iBAAiB,OAAO,MAAM;AAAA,QACrC;AAGA,cAAM,iBAAiB,YAAY;AACnC,aAAK,oBAAoB;AACzB,aAAK,qBAAqB;AAAA,MAC5B,SAAS,OAAO;AACd,gBAAQ,KAAK,mDAAmD,KAAK;AAErE,aAAK,oBAAoB,KAAK;AAC9B,aAAK,qBAAqB;AAAA,MAC5B;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,sBAA4B;AAC1B,SAAK,oBAAA;AACL,SAAK,mBAAmB;AACxB,SAAK,oBAAoB;AAAA,EAC3B;AAAA,EAEA,UAAU,QAAsB;AAC9B,SAAK,SAAS;AAAA,EAIhB;AAAA,EAEA,gBAAgB,MAAoB;AAClC,SAAK,eAAe;AAGpB,SAAK,oBAAA;AAAA,EACP;AAAA,EAEA,QAAc;AACZ,SAAK,oBAAA;AACL,SAAK,KAAK,aAAa,gBAAA;AACvB,SAAK,YAAY,MAAA;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,oBACJ,SACA,OACA,SACe;AAEf,UAAM,KAAK,sBAAsB,SAAS,KAAK;AAE/C,UAAM,cAAc,MAAM,KAAK,MAAM,IAAI,SAAS,KAAK;AACvD,UAAM,YAAY,KAAK,uBAAuB,aAAa,OAAO;AAElE,QAAI,CAAC,UAAW;AAEhB,QAAI,CAAC,KAAK,eAAe;AACvB,WAAK,gBAAgB,IAAI,kBAAA;AACzB,YAAM,KAAK,cAAc,WAAA;AACzB,WAAK,sBAAsB,KAAK,cAAc,aAAA;AAC9C,WAAK,sBAAsB,KAAK,oBAAoB,SAAS,UAAA;AAG7D,WAAK,KAAK,yBAAyB,KAAK,oBAAoB,UAAU,OAAO;AAG7E,YAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AAAA,IACxD;AAEA,UAAM,KAAK,qBAAqB,MAAM,SAAS;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,sBAAsB,SAAiB,OAA8B;AAGjF,UAAM,KAAK,wBAAwB,SAAS,OAAO;AAAA,MACjD,MAAM;AAAA,MACN,cAAc;AAAA,MACd,YAAY;AAAA,IAAA,CACb;AAAA,EACH;AAAA,EAEQ,gBAA0C;AAAA,EAC1C,sBAGG;AAAA,EACH,sBAAqE;AAAA,EAE7E,MAAc,yBACZ,QACA,SACA;AACA,UAAM,SAAS,OAAO,UAAA;AACtB,QAAI;AACF,aAAO,MAAM;AACX,cAAM,EAAE,MAAM,MAAA,IAAU,MAAM,OAAO,KAAA;AACrC,YAAI,KAAM;AACV,YAAI,OAAO;AACT,kBAAQ,MAAM,OAAO,MAAM,QAAQ;AAAA,QACrC;AAAA,MACF;AAAA,IACF,SAAS,GAAG;AACV,cAAQ,MAAM,+BAA+B,CAAC;AAAA,IAChD;AAAA,EACF;AAAA,EAEA,MAAM,sBAAqC;AACzC,QAAI,KAAK,qBAAqB;AAC5B,YAAM,KAAK,oBAAoB,MAAA;AAC/B,WAAK,sBAAsB;AAAA,IAC7B;AACA,SAAK,gBAAgB;AACrB,SAAK,sBAAsB;AAAA,EAC7B;AAAA,EAEQ,sBAA4B;AAClC,eAAW,UAAU,KAAK,kBAAkB;AAC1C,UAAI;AAEF,eAAO,WAAA;AAEP,eAAO,KAAK,CAAC;AAAA,MACf,QAAQ;AAAA,MAER;AAAA,IACF;AACA,SAAK,iBAAiB,MAAA;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,wBACZ,SACA,OACA,SACe;AACf,UAAM,QAAQ,KAAK;AACnB,QAAI,CAAC,MAAO;AAEZ,UAAM,EAAE,OAAO,YAAY,eAAe,MAAM,aAAa,UAAU;AAGvE,UAAM,cAAc,MAAM,eAAe,SAAS,KAAK;AAEvD,UAAM,iBAAiB,YAAY,IAAI,OAAO,SAAS;AAErD,UAAI,KAAK,cAAc,WAAW,KAAK,cAAc,QAAS;AAC9D,UAAI,CAAC,cAAc,IAAI,EAAG;AAK1B,YAAM,WAAW,MAAM,YAAY,KAAK,UAAU;AAClD,UACE,UAAU,UAAU,WACpB,CAAC,KAAK,KAAK,aAAa,iBAAiB,IAAI,KAAK,UAAU,GAC5D;AAEA;AAAA,MACF;AAGA,UAAI,CAAC,KAAK,KAAK,aAAa,iBAAiB,IAAI,KAAK,UAAU,GAAG;AACjE,YAAI,CAAC,cAAc;AAEjB;AAAA,QACF;AAEA,cAAMA,YAAW,MAAM,YAAY,KAAK,UAAU;AAClD,YAAIA,WAAU,UAAU,SAAS;AAE/B,gBAAM,KAAK,KAAK,eAAe,KAAK,KAAK,YAAY;AAAA,YACnD,WAAW;AAAA,YACX,QAAQ,KAAK;AAAA,YACb,SAAS,KAAK;AAAA,UAAA,CACf;AAAA,QACH;AAAA,MACF;AAGA,YAAM,sBAAsB,KAAK,IAAI,GAAG,UAAU,KAAK,OAAO;AAC9D,YAAM,oBAAoB,KAAK,IAAI,KAAK,YAAY,QAAQ,KAAK,OAAO;AAIxE,YAAM,cAAc,KAAK,eAAe;AACxC,YAAM,kBAAkB,sBAAsB;AAC9C,YAAM,gBAAgB,oBAAoB;AAG1C,YAAM,KAAK,kBAAkB,KAAK,IAAI,iBAAiB,eAAe,UAAU;AAAA,IAClF,CAAC;AAED,QAAI,SAAS,SAAS;AACpB,WAAK,QAAQ,IAAI,cAAc;AAC/B;AAAA,IACF;AACA,UAAM,QAAQ,IAAI,cAAc;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,kBACJ,QACA,SACA,OACA,aAAsB,OACP;AAEf,QAAI,KAAK,KAAK,aAAa,aAAa,QAAQ,SAAS,OAAO,UAAU,GAAG;AAC3E;AAAA,IACF;AAEA,UAAM,KAAK,kBAAkB,QAAQ,SAAS,KAAK;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAc,kBAAkB,QAAgB,SAAiB,OAA8B;AAC7F,UAAM,OAAO,KAAK,OAAO,SAAS,MAAM;AACxC,QAAI,CAAC,QAAQ,CAAC,cAAc,IAAI,GAAG;AACjC;AAAA,IACF;AAGA,UAAM,cAAc,KAAK,KAAK,aAAa,iBAAiB,IAAI,KAAK,UAAU;AAC/E,QAAI,CAAC,aAAa;AAEhB;AAAA,IACF;AAGA,UAAM,eAAe,YAAY,QAAQ,OAAO,CAAC,MAAM;AACrD,YAAM,cAAc,EAAE,aAAa,EAAE,YAAY;AACjD,aAAO,EAAE,YAAY,SAAS,cAAc;AAAA,IAC9C,CAAC;AAED,QAAI,aAAa,WAAW,GAAG;AAC7B;AAAA,IACF;AAIA,UAAM,wBAAwB;AAC9B,UAAM,0BAA0B;AAGhC,UAAM,WAAW,KAAK,KAAK,aAAa;AAAA,MACtC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAIF,QAAI,SAAS,SAAS;AACpB;AAAA,IACF;AAGA,QAAI,SAAS,gBAAgB,yBAAyB;AACpD,YAAM,KAAK;AAAA,QACT;AAAA,QACA;AAAA,QACA,YAAY;AAAA,QACZ,KAAK;AAAA,QACL,KAAK;AAAA,MAAA;AAEP;AAAA,IACF;AAIA,UAAM,iBAAiB,aAAa,OAAO,CAAC,UAAU;AACpD,YAAM,aAAa,MAAM,aAAa,MAAM,YAAY;AACxD,YAAM,gBAAgB,KAAK,KAAK,aAAa;AAAA,QAC3C;AAAA,QACA,MAAM;AAAA,QACN;AAAA,QACA;AAAA;AAAA,MAAA;AAEF,aAAO,CAAC,cAAc;AAAA,IACxB,CAAC;AAED,QAAI,eAAe,WAAW,GAAG;AAC/B;AAAA,IACF;AAGA,UAAM,KAAK;AAAA,MACT;AAAA,MACA;AAAA,MACA,YAAY;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AAAA,IAAA;AAAA,EAET;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,mBACZ,QACA,SACA,QACA,gBACA,aACe;AAGf,QAAI;AACJ,QAAI,OAAO,aAAa;AACtB,UAAI,OAAO,uBAAuB,aAAa;AAC7C,sBAAc,OAAO;AAAA,MACvB,WAAW,YAAY,OAAO,OAAO,WAAW,GAAG;AAEjD,cAAM,OAAO,OAAO;AAEpB,cAAM,YAAY,IAAI,YAAY,KAAK,UAAU;AACjD,YAAI,WAAW,SAAS,EAAE;AAAA,UACxB,IAAI,WAAW,KAAK,QAAQ,KAAK,YAAY,KAAK,UAAU;AAAA,QAAA;AAE9D,sBAAc;AAAA,MAChB;AAAA,IACF;AAEA,UAAM,gBAAgB;AAAA,MACpB,OAAO,OAAO;AAAA,MACd,YAAY,OAAO;AAAA,MACnB,kBAAkB,OAAO;AAAA,MACzB;AAAA,IAAA;AAEF,UAAM,UAAU,IAAI,kBAAkB,SAAS,MAAM,IAAI,aAAa;AAEtE,QAAI;AAEF,YAAM,cAAc,IAAI,eAAkC;AAAA,QACxD,MAAM,YAAY;AAChB,qBAAW,UAAU,SAAS;AAC5B,uBAAW,QAAQ,MAAM;AAAA,UAC3B;AACA,qBAAW,MAAA;AAAA,QACb;AAAA,MAAA,CACD;AAGD,YAAM,kBAAkB,YAAY,YAAY,QAAQ,cAAc;AACtE,YAAM,SAAS,gBAAgB,UAAA;AAE/B,UAAI;AACF,eAAO,MAAM;AACX,gBAAM,EAAE,MAAM,MAAA,IAAU,MAAM,OAAO,KAAA;AACrC,cAAI,KAAM;AAEV,cAAI,OAAO;AAET,kBAAM,eAAe,eAAe,MAAM,aAAa;AACvD,iBAAK,KAAK,aAAa,iBAAiB,QAAQ,OAAO,gBAAgB,YAAY;AAAA,UACrF;AAAA,QACF;AAAA,MACF,UAAA;AACE,eAAO,YAAA;AAAA,MACT;AAAA,IACF,SAAS,OAAO;AACd,cAAQ,MAAM,+CAA+C,MAAM,KAAK,KAAK;AAC7E,YAAM;AAAA,IACR,UAAA;AACE,YAAM,QAAQ,MAAA;AAAA,IAChB;AAAA,EACF;AAAA,EAEQ,uBAAuB,QAAqB,aAAuC;AACzF,UAAM,aAAa,OAAO;AAC1B,UAAM,mBAAmB,OAAO;AAChC,UAAM,iBAAiB,OAAO;AAE9B,UAAM,SAAyB,CAAA;AAC/B,aAAS,UAAU,GAAG,UAAU,kBAAkB,WAAW;AAC3D,aAAO,KAAK,OAAO,eAAe,OAAO,CAAC;AAAA,IAC5C;AAEA,WAAO,IAAI,UAAU;AAAA,MACnB,QAAQ;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA,WAAW;AAAA,MACX,MAAM,KAAK,kBAAkB,MAAM;AAAA,IAAA,CACpC;AAAA,EACH;AAAA,EAEQ,kBAAkB,QAAqC;AAC7D,UAAM,mBAAmB,OAAO;AAChC,UAAM,iBAAiB,OAAO,CAAC,GAAG,UAAU;AAC5C,UAAM,eAAe,mBAAmB;AACxC,UAAM,SAAS,IAAI,aAAa,YAAY;AAG5C,aAAS,UAAU,GAAG,UAAU,kBAAkB,WAAW;AAC3D,YAAM,QAAQ,OAAO,OAAO;AAC5B,UAAI,CAAC,MAAO;AACZ,aAAO,IAAI,OAAO,UAAU,cAAc;AAAA,IAC5C;AAEA,WAAO,OAAO;AAAA,EAChB;AACF;"}
|