@editframe/elements 0.18.21-beta.0 → 0.18.22-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/elements/EFAudio.d.ts +1 -12
- package/dist/elements/EFAudio.js +3 -18
- package/dist/elements/EFMedia/AssetMediaEngine.d.ts +1 -1
- package/dist/elements/EFMedia/AssetMediaEngine.js +3 -3
- package/dist/elements/EFMedia/BufferedSeekingInput.d.ts +15 -9
- package/dist/elements/EFMedia/BufferedSeekingInput.js +76 -78
- package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.js +12 -10
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.js +2 -18
- package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.js +12 -10
- package/dist/elements/EFTimegroup.d.ts +4 -4
- package/dist/elements/EFTimegroup.js +52 -39
- package/dist/elements/EFVideo.d.ts +1 -32
- package/dist/elements/EFVideo.js +13 -51
- package/dist/elements/SampleBuffer.js +1 -1
- package/package.json +2 -2
- package/src/elements/EFAudio.browsertest.ts +0 -3
- package/src/elements/EFAudio.ts +3 -22
- package/src/elements/EFMedia/AssetMediaEngine.browsertest.ts +39 -1
- package/src/elements/EFMedia/AssetMediaEngine.ts +5 -4
- package/src/elements/EFMedia/BufferedSeekingInput.browsertest.ts +90 -185
- package/src/elements/EFMedia/BufferedSeekingInput.ts +119 -130
- package/src/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts +21 -21
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts +10 -5
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.ts +33 -34
- package/src/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts +22 -20
- package/src/elements/EFMedia/videoTasks/makeVideoSeekTask.ts +0 -3
- package/src/elements/EFMedia.browsertest.ts +72 -60
- package/src/elements/EFTimegroup.browsertest.ts +9 -4
- package/src/elements/EFTimegroup.ts +79 -55
- package/src/elements/EFVideo.browsertest.ts +172 -160
- package/src/elements/EFVideo.ts +17 -73
- package/src/elements/SampleBuffer.ts +1 -2
- package/test/EFVideo.framegen.browsertest.ts +0 -54
- package/types.json +1 -1
|
@@ -2,6 +2,9 @@ import {
|
|
|
2
2
|
AudioSampleSink,
|
|
3
3
|
BufferSource,
|
|
4
4
|
Input,
|
|
5
|
+
InputAudioTrack,
|
|
6
|
+
type InputTrack,
|
|
7
|
+
InputVideoTrack,
|
|
5
8
|
MP4,
|
|
6
9
|
VideoSampleSink,
|
|
7
10
|
} from "mediabunny";
|
|
@@ -26,6 +29,8 @@ const defaultOptions: BufferedSeekingInputOptions = {
|
|
|
26
29
|
|
|
27
30
|
export class NoSample extends RangeError {}
|
|
28
31
|
|
|
32
|
+
export class ConcurrentSeekError extends RangeError {}
|
|
33
|
+
|
|
29
34
|
export class BufferedSeekingInput {
|
|
30
35
|
private input: Input;
|
|
31
36
|
private trackIterators: Map<number, AsyncIterator<MediaSample>> = new Map();
|
|
@@ -120,62 +125,55 @@ export class BufferedSeekingInput {
|
|
|
120
125
|
return tracks[0];
|
|
121
126
|
}
|
|
122
127
|
|
|
123
|
-
|
|
124
|
-
if (this.trackIterators.has(
|
|
128
|
+
getTrackIterator(track: InputTrack) {
|
|
129
|
+
if (this.trackIterators.has(track.id)) {
|
|
125
130
|
// biome-ignore lint/style/noNonNullAssertion: we know the map has the key
|
|
126
|
-
return this.trackIterators.get(
|
|
131
|
+
return this.trackIterators.get(track.id)!;
|
|
127
132
|
}
|
|
128
133
|
|
|
129
|
-
|
|
130
|
-
const existingIteratorCreation =
|
|
131
|
-
this.trackIteratorCreationPromises.get(trackId);
|
|
132
|
-
if (existingIteratorCreation) {
|
|
133
|
-
await existingIteratorCreation;
|
|
134
|
-
// Check again after waiting - another operation might have created it
|
|
135
|
-
if (this.trackIterators.has(trackId)) {
|
|
136
|
-
// biome-ignore lint/style/noNonNullAssertion: we know the map has the key
|
|
137
|
-
return this.trackIterators.get(trackId)!;
|
|
138
|
-
}
|
|
139
|
-
}
|
|
134
|
+
const trackIterator = this.createTrackIterator(track);
|
|
140
135
|
|
|
141
|
-
|
|
142
|
-
this.trackIteratorCreationPromises.set(trackId, creationPromise);
|
|
136
|
+
this.trackIterators.set(track.id, trackIterator);
|
|
143
137
|
|
|
144
|
-
|
|
145
|
-
const iterator = await creationPromise;
|
|
146
|
-
return iterator;
|
|
147
|
-
} finally {
|
|
148
|
-
this.trackIteratorCreationPromises.delete(trackId);
|
|
149
|
-
}
|
|
138
|
+
return trackIterator;
|
|
150
139
|
}
|
|
151
140
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
const track = await this.getAudioTrack(trackId);
|
|
156
|
-
const sampleSink = new AudioSampleSink(track);
|
|
157
|
-
const iterator = sampleSink.samples();
|
|
158
|
-
this.trackIterators.set(trackId, iterator);
|
|
159
|
-
return iterator;
|
|
141
|
+
createTrackSampleSink(track: InputTrack) {
|
|
142
|
+
if (track instanceof InputAudioTrack) {
|
|
143
|
+
return new AudioSampleSink(track);
|
|
160
144
|
}
|
|
161
|
-
{
|
|
162
|
-
|
|
163
|
-
const sampleSink = new VideoSampleSink(track);
|
|
164
|
-
const iterator = sampleSink.samples();
|
|
165
|
-
this.trackIterators.set(trackId, iterator);
|
|
166
|
-
return iterator;
|
|
145
|
+
if (track instanceof InputVideoTrack) {
|
|
146
|
+
return new VideoSampleSink(track);
|
|
167
147
|
}
|
|
148
|
+
throw new Error(`Unsupported track type ${track.type}`);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
createTrackIterator(track: InputTrack) {
|
|
152
|
+
const sampleSink = this.createTrackSampleSink(track);
|
|
153
|
+
return sampleSink.samples();
|
|
168
154
|
}
|
|
169
155
|
|
|
170
|
-
|
|
171
|
-
const track = await this.getTrack(trackId);
|
|
156
|
+
createTrackBuffer(track: InputTrack) {
|
|
172
157
|
if (track.type === "audio") {
|
|
173
158
|
const bufferSize = this.options.audioBufferSize;
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
const bufferSize = this.options.videoBufferSize;
|
|
177
|
-
this.trackBuffers.set(trackId, new SampleBuffer(bufferSize));
|
|
159
|
+
const sampleBuffer = new SampleBuffer(bufferSize);
|
|
160
|
+
return sampleBuffer;
|
|
178
161
|
}
|
|
162
|
+
const bufferSize = this.options.videoBufferSize;
|
|
163
|
+
const sampleBuffer = new SampleBuffer(bufferSize);
|
|
164
|
+
return sampleBuffer;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
getTrackBuffer(track: InputTrack) {
|
|
168
|
+
const maybeTrackBuffer = this.trackBuffers.get(track.id);
|
|
169
|
+
|
|
170
|
+
if (maybeTrackBuffer) {
|
|
171
|
+
return maybeTrackBuffer;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
const trackBuffer = this.createTrackBuffer(track);
|
|
175
|
+
this.trackBuffers.set(track.id, trackBuffer);
|
|
176
|
+
return trackBuffer;
|
|
179
177
|
}
|
|
180
178
|
|
|
181
179
|
async seek(trackId: number, timeMs: number) {
|
|
@@ -201,17 +199,18 @@ export class BufferedSeekingInput {
|
|
|
201
199
|
}
|
|
202
200
|
}
|
|
203
201
|
|
|
204
|
-
private async resetIterator(
|
|
205
|
-
const trackBuffer = this.trackBuffers.get(
|
|
202
|
+
private async resetIterator(track: InputTrack) {
|
|
203
|
+
const trackBuffer = this.trackBuffers.get(track.id);
|
|
206
204
|
trackBuffer?.clear();
|
|
207
205
|
// Clean up iterator safely - wait for any ongoing iterator creation
|
|
208
|
-
const ongoingIteratorCreation =
|
|
209
|
-
|
|
206
|
+
const ongoingIteratorCreation = this.trackIteratorCreationPromises.get(
|
|
207
|
+
track.id,
|
|
208
|
+
);
|
|
210
209
|
if (ongoingIteratorCreation) {
|
|
211
210
|
await ongoingIteratorCreation;
|
|
212
211
|
}
|
|
213
212
|
|
|
214
|
-
const iterator = this.trackIterators.get(
|
|
213
|
+
const iterator = this.trackIterators.get(track.id);
|
|
215
214
|
if (iterator) {
|
|
216
215
|
try {
|
|
217
216
|
await iterator.return?.();
|
|
@@ -219,106 +218,96 @@ export class BufferedSeekingInput {
|
|
|
219
218
|
// Iterator cleanup failed, continue anyway
|
|
220
219
|
}
|
|
221
220
|
}
|
|
222
|
-
this.trackIterators.delete(
|
|
221
|
+
this.trackIterators.delete(track.id);
|
|
223
222
|
}
|
|
224
223
|
|
|
224
|
+
#seekLock?: PromiseWithResolvers<void>;
|
|
225
|
+
|
|
225
226
|
private async seekSafe(trackId: number, timeMs: number) {
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
await this.createTrackBuffer(trackId);
|
|
227
|
+
if (this.#seekLock) {
|
|
228
|
+
await this.#seekLock.promise;
|
|
229
229
|
}
|
|
230
|
-
|
|
231
|
-
|
|
230
|
+
const seekLock = Promise.withResolvers<void>();
|
|
231
|
+
this.#seekLock = seekLock;
|
|
232
232
|
|
|
233
|
-
|
|
233
|
+
try {
|
|
234
|
+
const track = await this.getTrack(trackId);
|
|
235
|
+
const trackBuffer = this.getTrackBuffer(track);
|
|
234
236
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
);
|
|
240
|
-
let roundedTimeMs = roundToMilliseconds(timeMs);
|
|
237
|
+
const roundedTimeMs = roundToMilliseconds(timeMs);
|
|
238
|
+
const firstTimestampMs = roundToMilliseconds(
|
|
239
|
+
(await track.getFirstTimestamp()) * 1000,
|
|
240
|
+
);
|
|
241
241
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
242
|
+
if (roundedTimeMs < firstTimestampMs) {
|
|
243
|
+
console.error("Seeking outside bounds of input", {
|
|
244
|
+
roundedTimeMs,
|
|
245
|
+
firstTimestampMs,
|
|
246
|
+
});
|
|
247
|
+
throw new NoSample(
|
|
248
|
+
`Seeking outside bounds of input ${roundedTimeMs} < ${firstTimestampMs}`,
|
|
249
|
+
);
|
|
250
|
+
}
|
|
250
251
|
|
|
252
|
+
// Check if we need to reset iterator for seeks outside current buffer range
|
|
251
253
|
const bufferContents = trackBuffer.getContents();
|
|
252
|
-
|
|
253
254
|
if (bufferContents.length > 0) {
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
} else {
|
|
258
|
-
// Empty buffer - let normal seeking proceed to load appropriate segments
|
|
259
|
-
// This maintains normal seeking behavior for tests and initial loads
|
|
260
|
-
}
|
|
261
|
-
}
|
|
255
|
+
const bufferStartMs = roundToMilliseconds(
|
|
256
|
+
trackBuffer.firstTimestamp * 1000,
|
|
257
|
+
);
|
|
262
258
|
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
// Check if we need to reset iterator for seeks outside current buffer range
|
|
268
|
-
const bufferContents = trackBuffer.getContents();
|
|
269
|
-
if (bufferContents.length > 0) {
|
|
270
|
-
const bufferStartMs = roundToMilliseconds(
|
|
271
|
-
trackBuffer.firstTimestamp * 1000,
|
|
272
|
-
);
|
|
273
|
-
const lastSample = bufferContents[bufferContents.length - 1];
|
|
274
|
-
const bufferEndMs = lastSample
|
|
275
|
-
? roundToMilliseconds(
|
|
276
|
-
(lastSample.timestamp + (lastSample.duration || 0)) * 1000,
|
|
277
|
-
)
|
|
278
|
-
: bufferStartMs;
|
|
279
|
-
|
|
280
|
-
// If seeking outside current buffer range, reset iterator to load appropriate data
|
|
281
|
-
if (roundedTimeMs < bufferStartMs || roundedTimeMs > bufferEndMs) {
|
|
282
|
-
await this.resetIterator(trackId);
|
|
259
|
+
if (roundedTimeMs < bufferStartMs) {
|
|
260
|
+
await this.resetIterator(track);
|
|
261
|
+
}
|
|
283
262
|
}
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
const alreadyInBuffer = trackBuffer.find(timeMs);
|
|
287
|
-
if (alreadyInBuffer) return alreadyInBuffer;
|
|
288
263
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
264
|
+
const alreadyInBuffer = trackBuffer.find(timeMs);
|
|
265
|
+
if (alreadyInBuffer) return alreadyInBuffer;
|
|
266
|
+
|
|
267
|
+
const iterator = this.getTrackIterator(track);
|
|
268
|
+
while (true) {
|
|
269
|
+
const { done, value: decodedSample } = await iterator.next();
|
|
270
|
+
|
|
271
|
+
if (decodedSample) {
|
|
272
|
+
trackBuffer.push(decodedSample);
|
|
273
|
+
}
|
|
274
|
+
const foundSample = trackBuffer.find(roundedTimeMs);
|
|
275
|
+
if (foundSample) {
|
|
276
|
+
return foundSample;
|
|
277
|
+
}
|
|
278
|
+
if (done) {
|
|
279
|
+
break;
|
|
280
|
+
}
|
|
294
281
|
}
|
|
295
|
-
|
|
296
|
-
if (
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
282
|
+
|
|
283
|
+
// Check if we're seeking to the exact end of the track (legitimate use case)
|
|
284
|
+
const finalBufferContents = trackBuffer.getContents();
|
|
285
|
+
if (finalBufferContents.length > 0) {
|
|
286
|
+
const lastSample = finalBufferContents[finalBufferContents.length - 1];
|
|
287
|
+
const lastSampleEndMs = roundToMilliseconds(
|
|
288
|
+
((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1000,
|
|
289
|
+
);
|
|
290
|
+
|
|
291
|
+
// Only return last sample if seeking to exactly the track duration
|
|
292
|
+
// (end of video) AND we have the final segment loaded
|
|
293
|
+
const trackDurationMs = (await track.computeDuration()) * 1000;
|
|
294
|
+
const isSeekingToTrackEnd =
|
|
295
|
+
roundToMilliseconds(timeMs) === roundToMilliseconds(trackDurationMs);
|
|
296
|
+
const isAtEndOfTrack = roundToMilliseconds(timeMs) >= lastSampleEndMs;
|
|
297
|
+
|
|
298
|
+
if (isSeekingToTrackEnd && isAtEndOfTrack) {
|
|
299
|
+
return lastSample;
|
|
300
|
+
}
|
|
301
301
|
}
|
|
302
|
-
}
|
|
303
302
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
if (finalBufferContents.length > 0) {
|
|
309
|
-
const lastSample = finalBufferContents[finalBufferContents.length - 1];
|
|
310
|
-
const lastSampleEndMs = roundToMilliseconds(
|
|
311
|
-
((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1000,
|
|
303
|
+
// For all other cases (seeking within track but outside buffer range), throw error
|
|
304
|
+
// The caller should ensure the correct segment is loaded before seeking
|
|
305
|
+
throw new NoSample(
|
|
306
|
+
`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`,
|
|
312
307
|
);
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
return lastSample;
|
|
317
|
-
}
|
|
308
|
+
} finally {
|
|
309
|
+
this.#seekLock = undefined;
|
|
310
|
+
seekLock.resolve();
|
|
318
311
|
}
|
|
319
|
-
|
|
320
|
-
throw new NoSample(
|
|
321
|
-
`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`,
|
|
322
|
-
);
|
|
323
312
|
}
|
|
324
313
|
}
|
|
@@ -93,18 +93,22 @@ export function makeAudioFrequencyAnalysisTask(element: EFMedia) {
|
|
|
93
93
|
element.fftGain,
|
|
94
94
|
element.shouldInterpolateFrequencies,
|
|
95
95
|
] as const,
|
|
96
|
-
task: async () => {
|
|
96
|
+
task: async (_, { signal }) => {
|
|
97
97
|
await element.audioBufferTask.taskComplete;
|
|
98
|
+
signal.throwIfAborted();
|
|
98
99
|
if (!element.audioBufferTask.value) return null;
|
|
99
100
|
if (element.currentSourceTimeMs < 0) return null;
|
|
100
101
|
|
|
101
102
|
const currentTimeMs = element.currentSourceTimeMs;
|
|
102
103
|
|
|
103
|
-
//
|
|
104
|
-
const
|
|
105
|
-
|
|
106
|
-
//
|
|
107
|
-
const
|
|
104
|
+
// Calculate exact audio window needed based on fftDecay and frame timing
|
|
105
|
+
const frameIntervalMs = 1000 / 30; // 33.33ms per frame
|
|
106
|
+
|
|
107
|
+
// Need audio from earliest frame to current frame
|
|
108
|
+
const earliestFrameMs =
|
|
109
|
+
currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
|
|
110
|
+
const fromMs = Math.max(0, earliestFrameMs);
|
|
111
|
+
const maxToMs = currentTimeMs + frameIntervalMs; // Include current frame
|
|
108
112
|
const videoDurationMs = element.intrinsicDurationMs || 0;
|
|
109
113
|
const toMs =
|
|
110
114
|
videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
|
|
@@ -114,15 +118,18 @@ export function makeAudioFrequencyAnalysisTask(element: EFMedia) {
|
|
|
114
118
|
return null;
|
|
115
119
|
}
|
|
116
120
|
|
|
121
|
+
// Check cache early - before expensive audio fetching
|
|
122
|
+
// Use a preliminary cache key that doesn't depend on actual startOffsetMs from audio span
|
|
123
|
+
const preliminaryCacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${fromMs}:${currentTimeMs}`;
|
|
124
|
+
const cachedSmoothedData = cache.get(preliminaryCacheKey);
|
|
125
|
+
if (cachedSmoothedData) {
|
|
126
|
+
return cachedSmoothedData;
|
|
127
|
+
}
|
|
128
|
+
|
|
117
129
|
const { fetchAudioSpanningTime: fetchAudioSpan } = await import(
|
|
118
130
|
"../shared/AudioSpanUtils.ts"
|
|
119
131
|
);
|
|
120
|
-
const audioSpan = await fetchAudioSpan(
|
|
121
|
-
element,
|
|
122
|
-
fromMs,
|
|
123
|
-
toMs,
|
|
124
|
-
new AbortController().signal,
|
|
125
|
-
);
|
|
132
|
+
const audioSpan = await fetchAudioSpan(element, fromMs, toMs, signal);
|
|
126
133
|
|
|
127
134
|
if (!audioSpan || !audioSpan.blob) {
|
|
128
135
|
console.warn("Frequency analysis skipped: no audio data available");
|
|
@@ -137,14 +144,6 @@ export function makeAudioFrequencyAnalysisTask(element: EFMedia) {
|
|
|
137
144
|
// Use actual startOffset from audioSpan (relative to requested time)
|
|
138
145
|
const startOffsetMs = audioSpan.startMs;
|
|
139
146
|
|
|
140
|
-
// ORIGINAL ALGORITHM FROM HERE - unchanged customer logic
|
|
141
|
-
const smoothedKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${startOffsetMs}:${currentTimeMs}`;
|
|
142
|
-
|
|
143
|
-
const cachedSmoothedData = cache.get(smoothedKey);
|
|
144
|
-
if (cachedSmoothedData) {
|
|
145
|
-
return cachedSmoothedData;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
147
|
const framesData = await Promise.all(
|
|
149
148
|
Array.from({ length: element.fftDecay }, async (_, i) => {
|
|
150
149
|
const frameOffset = i * (1000 / 30);
|
|
@@ -243,7 +242,8 @@ export function makeAudioFrequencyAnalysisTask(element: EFMedia) {
|
|
|
243
242
|
const processedData = element.shouldInterpolateFrequencies
|
|
244
243
|
? processFFTData(slicedData)
|
|
245
244
|
: slicedData;
|
|
246
|
-
|
|
245
|
+
// Cache with the preliminary key so future requests can skip audio fetching
|
|
246
|
+
cache.set(preliminaryCacheKey, processedData);
|
|
247
247
|
return processedData;
|
|
248
248
|
},
|
|
249
249
|
});
|
package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts
CHANGED
|
@@ -78,7 +78,8 @@ describe("Audio Seek Task - Chunk Boundary Regression Test", () => {
|
|
|
78
78
|
}
|
|
79
79
|
});
|
|
80
80
|
|
|
81
|
-
test("should not throw RangeError when seeking to exact 4000ms during playback", async ({
|
|
81
|
+
test.skip("should not throw RangeError when seeking to exact 4000ms during playback", async ({
|
|
82
|
+
// SKIP: audioSeekTask is not part of the audio rendering pipeline
|
|
82
83
|
video,
|
|
83
84
|
timegroup,
|
|
84
85
|
expect,
|
|
@@ -99,7 +100,8 @@ describe("Audio Seek Task - Chunk Boundary Regression Test", () => {
|
|
|
99
100
|
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
100
101
|
});
|
|
101
102
|
|
|
102
|
-
test("should not throw RangeError during progressive playback across segments", async ({
|
|
103
|
+
test.skip("should not throw RangeError during progressive playback across segments", async ({
|
|
104
|
+
// SKIP: audioSeekTask is not part of the audio rendering pipeline
|
|
103
105
|
video,
|
|
104
106
|
timegroup,
|
|
105
107
|
expect,
|
|
@@ -121,7 +123,8 @@ describe("Audio Seek Task - Chunk Boundary Regression Test", () => {
|
|
|
121
123
|
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
122
124
|
});
|
|
123
125
|
|
|
124
|
-
test("should not throw RangeError when localStorage restoration causes 0ms to 4000ms race condition", async ({
|
|
126
|
+
test.skip("should not throw RangeError when localStorage restoration causes 0ms to 4000ms race condition", async ({
|
|
127
|
+
// SKIP: audioSeekTask is not part of the audio rendering pipeline
|
|
125
128
|
video,
|
|
126
129
|
timegroup,
|
|
127
130
|
expect,
|
|
@@ -149,7 +152,8 @@ describe("Audio Seek Task - Chunk Boundary Regression Test", () => {
|
|
|
149
152
|
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
150
153
|
});
|
|
151
154
|
|
|
152
|
-
test("should not throw RangeError when forced segment coordination mismatch occurs", async ({
|
|
155
|
+
test.skip("should not throw RangeError when forced segment coordination mismatch occurs", async ({
|
|
156
|
+
// SKIP: audioSeekTask is not part of the audio rendering pipeline
|
|
153
157
|
video,
|
|
154
158
|
timegroup,
|
|
155
159
|
expect,
|
|
@@ -177,7 +181,8 @@ describe("Audio Seek Task - Chunk Boundary Regression Test", () => {
|
|
|
177
181
|
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
178
182
|
});
|
|
179
183
|
|
|
180
|
-
test("should not throw RangeError when rapidly crossing segment boundaries", async ({
|
|
184
|
+
test.skip("should not throw RangeError when rapidly crossing segment boundaries", async ({
|
|
185
|
+
// SKIP: audioSeekTask is not part of the audio rendering pipeline
|
|
181
186
|
video,
|
|
182
187
|
timegroup,
|
|
183
188
|
expect,
|
|
@@ -26,48 +26,47 @@ export const makeAudioSeekTask = (host: EFMedia): AudioSeekTask => {
|
|
|
26
26
|
}
|
|
27
27
|
},
|
|
28
28
|
onComplete: (_value) => {},
|
|
29
|
-
task: async (
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
): Promise<VideoSample | undefined> => {
|
|
29
|
+
task: async (): Promise<VideoSample | undefined> => {
|
|
30
|
+
return undefined;
|
|
31
|
+
// TODO: validate that the audio seek task is not actually used to render any audio
|
|
33
32
|
// CRITICAL FIX: Use the targetSeekTimeMs from args, not host.desiredSeekTimeMs
|
|
34
33
|
// This ensures we use the same seek time that the segment loading tasks used
|
|
35
34
|
|
|
36
|
-
await host.audioSegmentIdTask.taskComplete;
|
|
37
|
-
signal.throwIfAborted(); // Abort if a new seek started
|
|
38
|
-
await host.audioSegmentFetchTask.taskComplete;
|
|
39
|
-
signal.throwIfAborted(); // Abort if a new seek started
|
|
40
|
-
await host.audioInitSegmentFetchTask.taskComplete;
|
|
41
|
-
signal.throwIfAborted(); // Abort if a new seek started
|
|
35
|
+
// await host.audioSegmentIdTask.taskComplete;
|
|
36
|
+
// signal.throwIfAborted(); // Abort if a new seek started
|
|
37
|
+
// await host.audioSegmentFetchTask.taskComplete;
|
|
38
|
+
// signal.throwIfAborted(); // Abort if a new seek started
|
|
39
|
+
// await host.audioInitSegmentFetchTask.taskComplete;
|
|
40
|
+
// signal.throwIfAborted(); // Abort if a new seek started
|
|
42
41
|
|
|
43
|
-
const audioInput = await host.audioInputTask.taskComplete;
|
|
44
|
-
signal.throwIfAborted(); // Abort if a new seek started
|
|
45
|
-
if (!audioInput) {
|
|
46
|
-
|
|
47
|
-
}
|
|
48
|
-
const audioTrack = await audioInput.getFirstAudioTrack();
|
|
49
|
-
if (!audioTrack) {
|
|
50
|
-
|
|
51
|
-
}
|
|
52
|
-
signal.throwIfAborted(); // Abort if a new seek started
|
|
42
|
+
// const audioInput = await host.audioInputTask.taskComplete;
|
|
43
|
+
// signal.throwIfAborted(); // Abort if a new seek started
|
|
44
|
+
// if (!audioInput) {
|
|
45
|
+
// throw new Error("Audio input is not available");
|
|
46
|
+
// }
|
|
47
|
+
// const audioTrack = await audioInput.getFirstAudioTrack();
|
|
48
|
+
// if (!audioTrack) {
|
|
49
|
+
// throw new Error("Audio track is not available");
|
|
50
|
+
// }
|
|
51
|
+
// signal.throwIfAborted(); // Abort if a new seek started
|
|
53
52
|
|
|
54
|
-
const sample = (await audioInput.seek(
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
)) as unknown as VideoSample | undefined;
|
|
58
|
-
signal.throwIfAborted(); // Abort if a new seek started
|
|
53
|
+
// const sample = (await audioInput.seek(
|
|
54
|
+
// audioTrack.id,
|
|
55
|
+
// targetSeekTimeMs, // Use the captured value, not host.desiredSeekTimeMs
|
|
56
|
+
// )) as unknown as VideoSample | undefined;
|
|
57
|
+
// signal.throwIfAborted(); // Abort if a new seek started
|
|
59
58
|
|
|
60
|
-
// If seek returned undefined, it was aborted - don't throw
|
|
61
|
-
if (sample === undefined && signal.aborted) {
|
|
62
|
-
|
|
63
|
-
}
|
|
59
|
+
// // If seek returned undefined, it was aborted - don't throw
|
|
60
|
+
// if (sample === undefined && signal.aborted) {
|
|
61
|
+
// return undefined;
|
|
62
|
+
// }
|
|
64
63
|
|
|
65
|
-
// If we got undefined but weren't aborted, that's an actual error
|
|
66
|
-
if (sample === undefined) {
|
|
67
|
-
|
|
68
|
-
}
|
|
64
|
+
// // If we got undefined but weren't aborted, that's an actual error
|
|
65
|
+
// if (sample === undefined) {
|
|
66
|
+
// throw new Error("Audio seek failed to find sample");
|
|
67
|
+
// }
|
|
69
68
|
|
|
70
|
-
return sample;
|
|
69
|
+
// return sample;
|
|
71
70
|
},
|
|
72
71
|
});
|
|
73
72
|
};
|
|
@@ -25,18 +25,23 @@ export function makeAudioTimeDomainAnalysisTask(element: EFMedia) {
|
|
|
25
25
|
element.fftGain,
|
|
26
26
|
element.shouldInterpolateFrequencies,
|
|
27
27
|
] as const,
|
|
28
|
-
task: async () => {
|
|
28
|
+
task: async (_, { signal }) => {
|
|
29
29
|
await element.audioBufferTask.taskComplete;
|
|
30
|
+
signal.throwIfAborted();
|
|
31
|
+
|
|
30
32
|
if (!element.audioBufferTask.value) return null;
|
|
31
33
|
if (element.currentSourceTimeMs < 0) return null;
|
|
32
34
|
|
|
33
35
|
const currentTimeMs = element.currentSourceTimeMs;
|
|
34
36
|
|
|
35
|
-
//
|
|
36
|
-
const
|
|
37
|
-
|
|
38
|
-
//
|
|
39
|
-
const
|
|
37
|
+
// Calculate exact audio window needed based on fftDecay and frame timing
|
|
38
|
+
const frameIntervalMs = 1000 / 30; // 33.33ms per frame
|
|
39
|
+
|
|
40
|
+
// Need audio from earliest frame to current frame
|
|
41
|
+
const earliestFrameMs =
|
|
42
|
+
currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
|
|
43
|
+
const fromMs = Math.max(0, earliestFrameMs);
|
|
44
|
+
const maxToMs = currentTimeMs + frameIntervalMs; // Include current frame
|
|
40
45
|
const videoDurationMs = element.intrinsicDurationMs || 0;
|
|
41
46
|
const toMs =
|
|
42
47
|
videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
|
|
@@ -46,15 +51,18 @@ export function makeAudioTimeDomainAnalysisTask(element: EFMedia) {
|
|
|
46
51
|
return null;
|
|
47
52
|
}
|
|
48
53
|
|
|
54
|
+
// Check cache early - before expensive audio fetching
|
|
55
|
+
// Use a preliminary cache key that doesn't depend on actual startOffsetMs from audio span
|
|
56
|
+
const preliminaryCacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${fromMs}:${currentTimeMs}`;
|
|
57
|
+
const cachedData = cache.get(preliminaryCacheKey);
|
|
58
|
+
if (cachedData) {
|
|
59
|
+
return cachedData;
|
|
60
|
+
}
|
|
61
|
+
|
|
49
62
|
const { fetchAudioSpanningTime: fetchAudioSpan } = await import(
|
|
50
63
|
"../shared/AudioSpanUtils.ts"
|
|
51
64
|
);
|
|
52
|
-
const audioSpan = await fetchAudioSpan(
|
|
53
|
-
element,
|
|
54
|
-
fromMs,
|
|
55
|
-
toMs,
|
|
56
|
-
new AbortController().signal,
|
|
57
|
-
);
|
|
65
|
+
const audioSpan = await fetchAudioSpan(element, fromMs, toMs, signal);
|
|
58
66
|
|
|
59
67
|
if (!audioSpan || !audioSpan.blob) {
|
|
60
68
|
console.warn("Time domain analysis skipped: no audio data available");
|
|
@@ -69,13 +77,6 @@ export function makeAudioTimeDomainAnalysisTask(element: EFMedia) {
|
|
|
69
77
|
// Use actual startOffset from audioSpan (relative to requested time)
|
|
70
78
|
const startOffsetMs = audioSpan.startMs;
|
|
71
79
|
|
|
72
|
-
// ORIGINAL ALGORITHM FROM HERE - unchanged customer logic
|
|
73
|
-
const smoothedKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${startOffsetMs}:${currentTimeMs}`;
|
|
74
|
-
const cachedData = cache.get(smoothedKey);
|
|
75
|
-
if (cachedData) {
|
|
76
|
-
return cachedData;
|
|
77
|
-
}
|
|
78
|
-
|
|
79
80
|
// Process multiple frames with decay, similar to the reference code
|
|
80
81
|
const framesData = await Promise.all(
|
|
81
82
|
Array.from({ length: element.fftDecay }, async (_, frameIndex) => {
|
|
@@ -176,7 +177,8 @@ export function makeAudioTimeDomainAnalysisTask(element: EFMedia) {
|
|
|
176
177
|
smoothedData[i] = Math.min(255, Math.round(weightedSum / weightSum));
|
|
177
178
|
}
|
|
178
179
|
|
|
179
|
-
|
|
180
|
+
// Cache with the preliminary key so future requests can skip audio fetching
|
|
181
|
+
cache.set(preliminaryCacheKey, smoothedData);
|
|
180
182
|
return smoothedData;
|
|
181
183
|
},
|
|
182
184
|
});
|
|
@@ -23,9 +23,6 @@ export const makeVideoSeekTask = (host: EFVideo): VideoSeekTask => {
|
|
|
23
23
|
[targetSeekTimeMs],
|
|
24
24
|
{ signal },
|
|
25
25
|
): Promise<VideoSample | undefined> => {
|
|
26
|
-
// CRITICAL FIX: Use the targetSeekTimeMs from args, not host.desiredSeekTimeMs
|
|
27
|
-
// This ensures we use the same seek time that the segment loading tasks used
|
|
28
|
-
|
|
29
26
|
await host.mediaEngineTask.taskComplete;
|
|
30
27
|
signal.throwIfAborted(); // Abort if a new seek started
|
|
31
28
|
await host.videoSegmentIdTask.taskComplete;
|