@editframe/elements 0.18.3-beta.0 → 0.18.8-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/elements/EFAudio.d.ts +1 -2
- package/dist/elements/EFAudio.js +6 -9
- package/dist/elements/EFMedia/AssetMediaEngine.browsertest.d.ts +0 -0
- package/dist/elements/EFMedia/AssetMediaEngine.d.ts +2 -4
- package/dist/elements/EFMedia/AssetMediaEngine.js +34 -5
- package/dist/elements/EFMedia/BaseMediaEngine.js +20 -1
- package/dist/elements/EFMedia/BufferedSeekingInput.d.ts +5 -5
- package/dist/elements/EFMedia/BufferedSeekingInput.js +27 -7
- package/dist/elements/EFMedia/JitMediaEngine.d.ts +1 -1
- package/dist/elements/EFMedia/JitMediaEngine.js +22 -3
- package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.js +4 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioInputTask.js +11 -3
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.d.ts +0 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.js +17 -4
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.js +11 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.js +3 -2
- package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.js +4 -1
- package/dist/elements/EFMedia/shared/PrecisionUtils.d.ts +28 -0
- package/dist/elements/EFMedia/shared/PrecisionUtils.js +29 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSeekTask.js +11 -2
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.js +11 -1
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.js +3 -2
- package/dist/elements/EFMedia.d.ts +0 -12
- package/dist/elements/EFMedia.js +4 -30
- package/dist/elements/EFTimegroup.js +12 -17
- package/dist/elements/EFVideo.d.ts +0 -9
- package/dist/elements/EFVideo.js +0 -7
- package/dist/elements/SampleBuffer.js +6 -6
- package/dist/getRenderInfo.d.ts +2 -2
- package/dist/gui/ContextMixin.js +71 -17
- package/dist/gui/TWMixin.js +1 -1
- package/dist/style.css +1 -1
- package/dist/transcoding/types/index.d.ts +9 -9
- package/package.json +2 -3
- package/src/elements/EFAudio.browsertest.ts +7 -7
- package/src/elements/EFAudio.ts +7 -20
- package/src/elements/EFMedia/AssetMediaEngine.browsertest.ts +100 -0
- package/src/elements/EFMedia/AssetMediaEngine.ts +72 -7
- package/src/elements/EFMedia/BaseMediaEngine.ts +50 -1
- package/src/elements/EFMedia/BufferedSeekingInput.browsertest.ts +135 -54
- package/src/elements/EFMedia/BufferedSeekingInput.ts +74 -17
- package/src/elements/EFMedia/JitMediaEngine.ts +58 -2
- package/src/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts +10 -1
- package/src/elements/EFMedia/audioTasks/makeAudioInputTask.ts +16 -8
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts +199 -0
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.ts +35 -4
- package/src/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.ts +12 -1
- package/src/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.ts +3 -2
- package/src/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts +10 -1
- package/src/elements/EFMedia/shared/PrecisionUtils.ts +46 -0
- package/src/elements/EFMedia/videoTasks/makeVideoSeekTask.ts +27 -3
- package/src/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.ts +12 -1
- package/src/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.ts +3 -2
- package/src/elements/EFMedia.browsertest.ts +73 -33
- package/src/elements/EFMedia.ts +11 -54
- package/src/elements/EFTimegroup.ts +21 -26
- package/src/elements/EFVideo.browsertest.ts +895 -162
- package/src/elements/EFVideo.ts +0 -16
- package/src/elements/SampleBuffer.ts +8 -10
- package/src/gui/ContextMixin.ts +104 -26
- package/src/transcoding/types/index.ts +10 -6
- package/test/EFVideo.framegen.browsertest.ts +1 -1
- package/test/__cache__/GET__api_v1_transcode_audio_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__32da3954ba60c96ad732020c65a08ebc/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_audio_1_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__9ed2d25c675aa6bb6ff5b3ae23887c71/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_1_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__9ed2d25c675aa6bb6ff5b3ae23887c71/metadata.json +22 -0
- package/test/__cache__/GET__api_v1_transcode_audio_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__b0b2b07efcf607de8ee0f650328c32f7/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_audio_2_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__d5a3309a2bf756dd6e304807eb402f56/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_2_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__d5a3309a2bf756dd6e304807eb402f56/metadata.json +22 -0
- package/test/__cache__/GET__api_v1_transcode_audio_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a75c2252b542e0c152c780e9a8d7b154/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_audio_3_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__773254bb671e3466fca8677139fb239e/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_3_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__773254bb671e3466fca8677139fb239e/metadata.json +22 -0
- package/test/__cache__/GET__api_v1_transcode_audio_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a64ff1cfb1b52cae14df4b5dfa1e222b/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_audio_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__e66d2c831d951e74ad0aeaa6489795d0/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_high_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__26197f6f7c46cacb0a71134131c3f775/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_high_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__4cb6774cd3650ccf59c8f8dc6678c0b9/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_high_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a6fb05a22b18d850f7f2950bbcdbdeed/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a6fb05a22b18d850f7f2950bbcdbdeed/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a50058c7c3602e90879fe3428ed891f4/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a50058c7c3602e90879fe3428ed891f4/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0798c479b44aaeef850609a430f6e613/metadata.json +3 -3
- package/test/__cache__/GET__api_v1_transcode_manifest_json_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__3be92a0437de726b431ed5af2369158a/data.bin +1 -1
- package/test/__cache__/GET__api_v1_transcode_manifest_json_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__3be92a0437de726b431ed5af2369158a/metadata.json +4 -4
- package/test/recordReplayProxyPlugin.js +50 -0
- package/types.json +1 -1
- package/dist/DecoderResetFrequency.test.d.ts +0 -1
- package/dist/DecoderResetRecovery.test.d.ts +0 -1
- package/dist/ScrubTrackManager.d.ts +0 -96
- package/dist/elements/EFMedia/services/AudioElementFactory.browsertest.d.ts +0 -1
- package/dist/elements/EFMedia/services/AudioElementFactory.d.ts +0 -22
- package/dist/elements/EFMedia/services/AudioElementFactory.js +0 -72
- package/dist/elements/EFMedia/services/MediaSourceService.browsertest.d.ts +0 -1
- package/dist/elements/EFMedia/services/MediaSourceService.d.ts +0 -47
- package/dist/elements/EFMedia/services/MediaSourceService.js +0 -73
- package/dist/gui/services/ElementConnectionManager.browsertest.d.ts +0 -1
- package/dist/gui/services/ElementConnectionManager.d.ts +0 -59
- package/dist/gui/services/ElementConnectionManager.js +0 -128
- package/dist/gui/services/PlaybackController.browsertest.d.ts +0 -1
- package/dist/gui/services/PlaybackController.d.ts +0 -103
- package/dist/gui/services/PlaybackController.js +0 -290
- package/dist/services/MediaSourceManager.d.ts +0 -62
- package/dist/services/MediaSourceManager.js +0 -211
- package/src/elements/EFMedia/services/AudioElementFactory.browsertest.ts +0 -325
- package/src/elements/EFMedia/services/AudioElementFactory.ts +0 -119
- package/src/elements/EFMedia/services/MediaSourceService.browsertest.ts +0 -257
- package/src/elements/EFMedia/services/MediaSourceService.ts +0 -102
- package/src/gui/services/ElementConnectionManager.browsertest.ts +0 -263
- package/src/gui/services/ElementConnectionManager.ts +0 -224
- package/src/gui/services/PlaybackController.browsertest.ts +0 -437
- package/src/gui/services/PlaybackController.ts +0 -521
- package/src/services/MediaSourceManager.ts +0 -333
|
@@ -6,13 +6,14 @@ import {
|
|
|
6
6
|
VideoSampleSink,
|
|
7
7
|
} from "mediabunny";
|
|
8
8
|
import { type MediaSample, SampleBuffer } from "../SampleBuffer";
|
|
9
|
+
import { roundToMilliseconds } from "./shared/PrecisionUtils";
|
|
9
10
|
|
|
10
11
|
interface BufferedSeekingInputOptions {
|
|
11
12
|
videoBufferSize?: number;
|
|
12
13
|
audioBufferSize?: number;
|
|
13
14
|
/**
|
|
14
|
-
*
|
|
15
|
-
* Applied during seeking to
|
|
15
|
+
* Timeline offset in milliseconds to map user timeline to media timeline.
|
|
16
|
+
* Applied during seeking to handle media that doesn't start at 0ms.
|
|
16
17
|
*/
|
|
17
18
|
startTimeOffsetMs?: number;
|
|
18
19
|
}
|
|
@@ -35,8 +36,8 @@ export class BufferedSeekingInput {
|
|
|
35
36
|
private trackSeekPromises: Map<number, Promise<any>> = new Map();
|
|
36
37
|
|
|
37
38
|
/**
|
|
38
|
-
*
|
|
39
|
-
* Applied during seeking to
|
|
39
|
+
* Timeline offset in milliseconds to map user timeline to media timeline.
|
|
40
|
+
* Applied during seeking to handle media that doesn't start at 0ms.
|
|
40
41
|
*/
|
|
41
42
|
private readonly startTimeOffsetMs: number;
|
|
42
43
|
|
|
@@ -178,8 +179,11 @@ export class BufferedSeekingInput {
|
|
|
178
179
|
}
|
|
179
180
|
|
|
180
181
|
async seek(trackId: number, timeMs: number) {
|
|
181
|
-
// Apply
|
|
182
|
-
const
|
|
182
|
+
// Apply timeline offset to map user timeline to media timeline
|
|
183
|
+
const mediaTimeMs = timeMs + this.startTimeOffsetMs;
|
|
184
|
+
|
|
185
|
+
// Round using consistent precision handling
|
|
186
|
+
const roundedMediaTimeMs = roundToMilliseconds(mediaTimeMs);
|
|
183
187
|
|
|
184
188
|
// Serialize seek operations per track (but don't block iterator creation)
|
|
185
189
|
const existingSeek = this.trackSeekPromises.get(trackId);
|
|
@@ -187,7 +191,7 @@ export class BufferedSeekingInput {
|
|
|
187
191
|
await existingSeek;
|
|
188
192
|
}
|
|
189
193
|
|
|
190
|
-
const seekPromise = this.seekSafe(trackId,
|
|
194
|
+
const seekPromise = this.seekSafe(trackId, roundedMediaTimeMs);
|
|
191
195
|
this.trackSeekPromises.set(trackId, seekPromise);
|
|
192
196
|
|
|
193
197
|
try {
|
|
@@ -226,23 +230,60 @@ export class BufferedSeekingInput {
|
|
|
226
230
|
// biome-ignore lint/style/noNonNullAssertion: we know the map has the key
|
|
227
231
|
const trackBuffer = this.trackBuffers.get(trackId)!;
|
|
228
232
|
|
|
229
|
-
if (timeMs < trackBuffer.firstTimestamp * 1000) {
|
|
230
|
-
await this.resetIterator(trackId);
|
|
231
|
-
}
|
|
232
|
-
|
|
233
|
-
const alreadyInBuffer = trackBuffer.find(timeMs);
|
|
234
233
|
const track = await this.getTrack(trackId);
|
|
235
234
|
|
|
236
235
|
// Early validation: check if seek time is outside track bounds
|
|
237
|
-
|
|
238
|
-
const
|
|
236
|
+
// Use consistent precision handling throughout
|
|
237
|
+
const firstTimestampMs = roundToMilliseconds(
|
|
238
|
+
(await track.getFirstTimestamp()) * 1000,
|
|
239
|
+
);
|
|
240
|
+
let roundedTimeMs = roundToMilliseconds(timeMs);
|
|
241
|
+
|
|
242
|
+
// During rapid scrubbing, track.computeDuration() may only return the duration
|
|
243
|
+
// of currently loaded segments. Only validate against the start time, as the
|
|
244
|
+
// end time may not be accurate until all segments are loaded.
|
|
245
|
+
if (roundedTimeMs < firstTimestampMs) {
|
|
246
|
+
// GRACEFUL HANDLING: During rapid seeking, tasks can complete out of order, causing
|
|
247
|
+
// the audio buffer to contain segments for a different time range than the seek target.
|
|
248
|
+
// Only apply graceful adjustment if we have buffer contents that suggest a race condition.
|
|
249
|
+
// For empty buffers, allow normal seeking to proceed which may load the appropriate segments.
|
|
250
|
+
|
|
251
|
+
const bufferContents = trackBuffer.getContents();
|
|
252
|
+
|
|
253
|
+
if (bufferContents.length > 0) {
|
|
254
|
+
// We have loaded segments but they're for a different time range - adjust gracefully
|
|
255
|
+
timeMs = firstTimestampMs;
|
|
256
|
+
roundedTimeMs = roundToMilliseconds(timeMs);
|
|
257
|
+
} else {
|
|
258
|
+
// Empty buffer - let normal seeking proceed to load appropriate segments
|
|
259
|
+
// This maintains normal seeking behavior for tests and initial loads
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Note: If seeking beyond currently loaded segments, allow it to proceed
|
|
264
|
+
// The segment loading logic will handle fetching the needed segments
|
|
265
|
+
// No logging needed as this is a normal part of seeking behavior
|
|
239
266
|
|
|
240
|
-
if
|
|
241
|
-
|
|
242
|
-
|
|
267
|
+
// Check if we need to reset iterator for seeks outside current buffer range
|
|
268
|
+
const bufferContents = trackBuffer.getContents();
|
|
269
|
+
if (bufferContents.length > 0) {
|
|
270
|
+
const bufferStartMs = roundToMilliseconds(
|
|
271
|
+
trackBuffer.firstTimestamp * 1000,
|
|
243
272
|
);
|
|
273
|
+
const lastSample = bufferContents[bufferContents.length - 1];
|
|
274
|
+
const bufferEndMs = lastSample
|
|
275
|
+
? roundToMilliseconds(
|
|
276
|
+
(lastSample.timestamp + (lastSample.duration || 0)) * 1000,
|
|
277
|
+
)
|
|
278
|
+
: bufferStartMs;
|
|
279
|
+
|
|
280
|
+
// If seeking outside current buffer range, reset iterator to load appropriate data
|
|
281
|
+
if (roundedTimeMs < bufferStartMs || roundedTimeMs > bufferEndMs) {
|
|
282
|
+
await this.resetIterator(trackId);
|
|
283
|
+
}
|
|
244
284
|
}
|
|
245
285
|
|
|
286
|
+
const alreadyInBuffer = trackBuffer.find(timeMs);
|
|
246
287
|
if (alreadyInBuffer) return alreadyInBuffer;
|
|
247
288
|
|
|
248
289
|
const iterator = await this.getTrackIterator(trackId);
|
|
@@ -260,6 +301,22 @@ export class BufferedSeekingInput {
|
|
|
260
301
|
}
|
|
261
302
|
}
|
|
262
303
|
|
|
304
|
+
// If no exact sample found and we've reached the end of the track,
|
|
305
|
+
// check if the seek time is beyond the actual track duration.
|
|
306
|
+
// If so, return the last available sample instead of throwing an error.
|
|
307
|
+
const finalBufferContents = trackBuffer.getContents();
|
|
308
|
+
if (finalBufferContents.length > 0) {
|
|
309
|
+
const lastSample = finalBufferContents[finalBufferContents.length - 1];
|
|
310
|
+
const lastSampleEndMs = roundToMilliseconds(
|
|
311
|
+
((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1000,
|
|
312
|
+
);
|
|
313
|
+
|
|
314
|
+
// If seeking past the last sample, return the last sample silently
|
|
315
|
+
if (roundToMilliseconds(timeMs) >= lastSampleEndMs) {
|
|
316
|
+
return lastSample;
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
|
|
263
320
|
throw new NoSample(
|
|
264
321
|
`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`,
|
|
265
322
|
);
|
|
@@ -41,6 +41,7 @@ export class JitMediaEngine extends BaseMediaEngine implements MediaEngine {
|
|
|
41
41
|
trackId: undefined,
|
|
42
42
|
src: this.data.sourceUrl,
|
|
43
43
|
segmentDurationMs: rendition.segmentDurationMs,
|
|
44
|
+
segmentDurationsMs: rendition.segmentDurationsMs,
|
|
44
45
|
};
|
|
45
46
|
}
|
|
46
47
|
|
|
@@ -53,6 +54,7 @@ export class JitMediaEngine extends BaseMediaEngine implements MediaEngine {
|
|
|
53
54
|
trackId: undefined,
|
|
54
55
|
src: this.data.sourceUrl,
|
|
55
56
|
segmentDurationMs: rendition.segmentDurationMs,
|
|
57
|
+
segmentDurationsMs: rendition.segmentDurationsMs,
|
|
56
58
|
};
|
|
57
59
|
}
|
|
58
60
|
|
|
@@ -96,15 +98,69 @@ export class JitMediaEngine extends BaseMediaEngine implements MediaEngine {
|
|
|
96
98
|
desiredSeekTimeMs: number,
|
|
97
99
|
rendition: VideoRendition | AudioRendition,
|
|
98
100
|
) {
|
|
101
|
+
// Don't request segments beyond the actual file duration
|
|
102
|
+
// Note: seeking to exactly durationMs should be allowed (it's the last moment of the file)
|
|
103
|
+
if (desiredSeekTimeMs > this.durationMs) {
|
|
104
|
+
return undefined;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Use actual segment durations if available (more accurate)
|
|
108
|
+
if (
|
|
109
|
+
rendition.segmentDurationsMs &&
|
|
110
|
+
rendition.segmentDurationsMs.length > 0
|
|
111
|
+
) {
|
|
112
|
+
let cumulativeTime = 0;
|
|
113
|
+
|
|
114
|
+
for (let i = 0; i < rendition.segmentDurationsMs.length; i++) {
|
|
115
|
+
const segmentDuration = rendition.segmentDurationsMs[i];
|
|
116
|
+
if (segmentDuration === undefined) {
|
|
117
|
+
throw new Error("Segment duration is required for JIT metadata");
|
|
118
|
+
}
|
|
119
|
+
const segmentStartMs = cumulativeTime;
|
|
120
|
+
const segmentEndMs = cumulativeTime + segmentDuration;
|
|
121
|
+
|
|
122
|
+
// Check if the desired seek time falls within this segment
|
|
123
|
+
// Special case: for the last segment, include the exact end time
|
|
124
|
+
const isLastSegment = i === rendition.segmentDurationsMs.length - 1;
|
|
125
|
+
const includesEndTime =
|
|
126
|
+
isLastSegment && desiredSeekTimeMs === this.durationMs;
|
|
127
|
+
|
|
128
|
+
if (
|
|
129
|
+
desiredSeekTimeMs >= segmentStartMs &&
|
|
130
|
+
(desiredSeekTimeMs < segmentEndMs || includesEndTime)
|
|
131
|
+
) {
|
|
132
|
+
return i + 1; // Convert 0-based to 1-based segment ID
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
cumulativeTime += segmentDuration;
|
|
136
|
+
|
|
137
|
+
// If we've reached or exceeded file duration, stop
|
|
138
|
+
if (cumulativeTime >= this.durationMs) {
|
|
139
|
+
break;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// If we didn't find a segment, return undefined
|
|
144
|
+
return undefined;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// Fall back to fixed duration calculation for backward compatibility
|
|
99
148
|
if (!rendition.segmentDurationMs) {
|
|
100
149
|
throw new Error("Segment duration is required for JIT metadata");
|
|
101
150
|
}
|
|
151
|
+
|
|
102
152
|
const segmentIndex = Math.floor(
|
|
103
153
|
desiredSeekTimeMs / rendition.segmentDurationMs,
|
|
104
154
|
);
|
|
105
|
-
|
|
106
|
-
|
|
155
|
+
|
|
156
|
+
// Calculate the actual segment start time
|
|
157
|
+
const segmentStartMs = segmentIndex * rendition.segmentDurationMs;
|
|
158
|
+
|
|
159
|
+
// If this segment would start at or beyond file duration, it doesn't exist
|
|
160
|
+
if (segmentStartMs >= this.durationMs) {
|
|
161
|
+
return undefined;
|
|
107
162
|
}
|
|
163
|
+
|
|
108
164
|
return segmentIndex + 1; // Convert 0-based to 1-based
|
|
109
165
|
}
|
|
110
166
|
}
|
|
@@ -103,7 +103,16 @@ export function makeAudioFrequencyAnalysisTask(element: EFMedia) {
|
|
|
103
103
|
// ONLY CHANGE: Get real audio data for analysis (same technique as playback)
|
|
104
104
|
const analysisWindowMs = 5000; // Get 5 seconds for better analysis
|
|
105
105
|
const fromMs = Math.max(0, currentTimeMs);
|
|
106
|
-
|
|
106
|
+
// Clamp toMs to video duration to prevent requesting segments beyond available content
|
|
107
|
+
const maxToMs = fromMs + analysisWindowMs;
|
|
108
|
+
const videoDurationMs = element.intrinsicDurationMs || 0;
|
|
109
|
+
const toMs =
|
|
110
|
+
videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
|
|
111
|
+
|
|
112
|
+
// If the clamping results in an invalid range (seeking beyond the end), skip analysis silently
|
|
113
|
+
if (fromMs >= toMs) {
|
|
114
|
+
return null;
|
|
115
|
+
}
|
|
107
116
|
|
|
108
117
|
const { fetchAudioSpanningTime: fetchAudioSpan } = await import(
|
|
109
118
|
"../shared/AudioSpanUtils.ts"
|
|
@@ -17,19 +17,27 @@ export const makeAudioInputTask = (host: EFMedia): InputTask => {
|
|
|
17
17
|
console.error("audioInputTask error", error);
|
|
18
18
|
},
|
|
19
19
|
onComplete: (_value) => {},
|
|
20
|
-
task: async () => {
|
|
20
|
+
task: async (_, { signal }) => {
|
|
21
21
|
const initSegment = await host.audioInitSegmentFetchTask.taskComplete;
|
|
22
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
22
23
|
const segment = await host.audioSegmentFetchTask.taskComplete;
|
|
24
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
23
25
|
if (!initSegment || !segment) {
|
|
24
26
|
throw new Error("Init segment or segment is not available");
|
|
25
27
|
}
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
);
|
|
28
|
+
|
|
29
|
+
// Get startTimeOffsetMs from the audio rendition if available
|
|
30
|
+
const mediaEngine = await host.mediaEngineTask.taskComplete;
|
|
31
|
+
const audioRendition = mediaEngine?.audioRendition;
|
|
32
|
+
const startTimeOffsetMs = audioRendition?.startTimeOffsetMs;
|
|
33
|
+
|
|
34
|
+
const arrayBuffer = await new Blob([initSegment, segment]).arrayBuffer();
|
|
35
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
36
|
+
return new BufferedSeekingInput(arrayBuffer, {
|
|
37
|
+
videoBufferSize: EFMedia.VIDEO_SAMPLE_BUFFER_SIZE,
|
|
38
|
+
audioBufferSize: EFMedia.AUDIO_SAMPLE_BUFFER_SIZE,
|
|
39
|
+
startTimeOffsetMs,
|
|
40
|
+
});
|
|
33
41
|
},
|
|
34
42
|
});
|
|
35
43
|
};
|
package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import { afterEach, beforeEach, describe } from "vitest";
|
|
2
|
+
import { test as baseTest } from "../../../../test/useMSW.js";
|
|
3
|
+
import type { EFConfiguration } from "../../../gui/EFConfiguration.js";
|
|
4
|
+
import "../../../gui/EFPreview.js";
|
|
5
|
+
import "../../EFTimegroup.js";
|
|
6
|
+
import type { EFTimegroup } from "../../EFTimegroup.js";
|
|
7
|
+
import "../../EFVideo.js";
|
|
8
|
+
import type { EFVideo } from "../../EFVideo.js";
|
|
9
|
+
|
|
10
|
+
const test = baseTest.extend<{
|
|
11
|
+
timegroup: EFTimegroup;
|
|
12
|
+
video: EFVideo;
|
|
13
|
+
configuration: EFConfiguration;
|
|
14
|
+
}>({
|
|
15
|
+
timegroup: async ({}, use) => {
|
|
16
|
+
const timegroup = document.createElement("ef-timegroup");
|
|
17
|
+
timegroup.setAttribute("mode", "sequence");
|
|
18
|
+
timegroup.setAttribute("id", "test-timegroup"); // Required for localStorage key
|
|
19
|
+
timegroup.style.cssText =
|
|
20
|
+
"position: relative; height: 500px; width: 1000px; overflow: hidden; background-color: rgb(100 116 139);";
|
|
21
|
+
await use(timegroup);
|
|
22
|
+
},
|
|
23
|
+
configuration: async ({ expect }, use) => {
|
|
24
|
+
const configuration = document.createElement("ef-configuration");
|
|
25
|
+
configuration.innerHTML = `<h1 style="font: 10px monospace">${expect.getState().currentTestName}</h1>`;
|
|
26
|
+
// Use integrated proxy server (same host/port as test runner)
|
|
27
|
+
const apiHost = `${window.location.protocol}//${window.location.host}`;
|
|
28
|
+
configuration.setAttribute("api-host", apiHost);
|
|
29
|
+
configuration.apiHost = apiHost;
|
|
30
|
+
document.body.appendChild(configuration);
|
|
31
|
+
await use(configuration);
|
|
32
|
+
},
|
|
33
|
+
video: async ({ configuration, timegroup }, use) => {
|
|
34
|
+
const video = document.createElement("ef-video");
|
|
35
|
+
video.id = "bars-n-tone2";
|
|
36
|
+
video.src = "http://web:3000/head-moov-480p.mp4"; // Real video from working simple-demo
|
|
37
|
+
video.style.cssText =
|
|
38
|
+
"width: 100%; height: 100%; object-fit: cover; position: absolute; top: 0; left: 0;";
|
|
39
|
+
|
|
40
|
+
// Create the exact structure from simple-demo.html
|
|
41
|
+
const innerTimegroup = document.createElement("ef-timegroup");
|
|
42
|
+
innerTimegroup.mode = "contain";
|
|
43
|
+
innerTimegroup.style.cssText =
|
|
44
|
+
"position: absolute; width: 100%; height: 100%;";
|
|
45
|
+
innerTimegroup.append(video);
|
|
46
|
+
timegroup.append(innerTimegroup);
|
|
47
|
+
configuration.append(timegroup);
|
|
48
|
+
|
|
49
|
+
await use(video);
|
|
50
|
+
},
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Regression test for chunk boundary seeking issue
|
|
55
|
+
*
|
|
56
|
+
* Root cause: 32ms coordination gap between PlaybackController and audio track boundaries
|
|
57
|
+
* - PlaybackController seeks to chunk boundary: 4000ms
|
|
58
|
+
* - Audio track actually starts at: 4032ms
|
|
59
|
+
* - Error: "Seek time 4000ms is outside track range [4032ms, 6016ms]"
|
|
60
|
+
*
|
|
61
|
+
* This occurs during active playbook and browser reloads at 4s mark.
|
|
62
|
+
* Fix: Coordinate chunk boundaries or add tolerance for small gaps.
|
|
63
|
+
*/
|
|
64
|
+
describe("Audio Seek Task - Chunk Boundary Regression Test", () => {
|
|
65
|
+
beforeEach(() => {
|
|
66
|
+
// Clean up DOM and localStorage
|
|
67
|
+
while (document.body.children.length) {
|
|
68
|
+
document.body.children[0]?.remove();
|
|
69
|
+
}
|
|
70
|
+
localStorage.clear();
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
afterEach(async () => {
|
|
74
|
+
// Clean up any remaining elements
|
|
75
|
+
const videos = document.querySelectorAll("ef-video");
|
|
76
|
+
for (const video of videos) {
|
|
77
|
+
video.remove();
|
|
78
|
+
}
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
test("should not throw RangeError when seeking to exact 4000ms during playback", async ({
|
|
82
|
+
video,
|
|
83
|
+
timegroup,
|
|
84
|
+
expect,
|
|
85
|
+
}) => {
|
|
86
|
+
await video.mediaEngineTask.taskComplete;
|
|
87
|
+
await video.audioInputTask.taskComplete;
|
|
88
|
+
|
|
89
|
+
// Simulate active playback - start playing from beginning
|
|
90
|
+
timegroup.currentTimeMs = 0;
|
|
91
|
+
await video.audioSeekTask.taskComplete;
|
|
92
|
+
|
|
93
|
+
// Now seek to the exact problematic time that causes:
|
|
94
|
+
// "Seek time 4000ms is outside track range [4032ms, 6016ms]"
|
|
95
|
+
const exactChunkBoundary = 4000;
|
|
96
|
+
timegroup.currentTimeMs = exactChunkBoundary;
|
|
97
|
+
|
|
98
|
+
// Should not throw RangeError due to track range mismatch
|
|
99
|
+
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
test("should not throw RangeError during progressive playback across segments", async ({
|
|
103
|
+
video,
|
|
104
|
+
timegroup,
|
|
105
|
+
expect,
|
|
106
|
+
}) => {
|
|
107
|
+
await video.mediaEngineTask.taskComplete;
|
|
108
|
+
await video.audioInputTask.taskComplete;
|
|
109
|
+
|
|
110
|
+
// Simulate progressive playback that loads segments on demand
|
|
111
|
+
// Start at 3500ms to be just before the 4-second boundary
|
|
112
|
+
timegroup.currentTimeMs = 3500;
|
|
113
|
+
await video.audioSeekTask.taskComplete;
|
|
114
|
+
|
|
115
|
+
// Now cross the 4-second chunk boundary where track range issues occur
|
|
116
|
+
// This should trigger the state where track range is [4032ms, 6016ms]
|
|
117
|
+
// but we're seeking to 4000ms
|
|
118
|
+
timegroup.currentTimeMs = 4000.000000000001; // The exact error from logs
|
|
119
|
+
|
|
120
|
+
// Should not throw "Seek time 4000.000000000001ms is outside track range [4032ms, 6016ms]"
|
|
121
|
+
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
test("should not throw RangeError when localStorage restoration causes 0ms to 4000ms race condition", async ({
|
|
125
|
+
video,
|
|
126
|
+
timegroup,
|
|
127
|
+
expect,
|
|
128
|
+
}) => {
|
|
129
|
+
// REPRODUCE THE RACE CONDITION: Simulate localStorage having "4.0"
|
|
130
|
+
// This mimics the exact simple-demo.html scenario where:
|
|
131
|
+
// 1. Media loads with assumption of currentTimeMs = 0
|
|
132
|
+
// 2. localStorage restores currentTime to 4.0 seconds
|
|
133
|
+
// 3. Seeking 4000ms in segments loaded for 0ms range triggers RangeError
|
|
134
|
+
|
|
135
|
+
// Set localStorage BEFORE media finishes initializing
|
|
136
|
+
if (timegroup.id) {
|
|
137
|
+
localStorage.setItem(`ef-timegroup-${timegroup.id}`, "4.0");
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Wait for media engine but NOT for full initialization
|
|
141
|
+
await video.mediaEngineTask.taskComplete;
|
|
142
|
+
|
|
143
|
+
// Now trigger the localStorage restoration that happens in waitForMediaDurations().then()
|
|
144
|
+
// This will load currentTime = 4.0 from localStorage, jumping from 0ms to 4000ms
|
|
145
|
+
timegroup.currentTime = timegroup.maybeLoadTimeFromLocalStorage();
|
|
146
|
+
|
|
147
|
+
// This should trigger: "Seek time 4000ms is outside track range [Yms, Zms]"
|
|
148
|
+
// because segments were loaded for 0ms but we're now seeking 4000ms
|
|
149
|
+
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
test("should not throw RangeError when forced segment coordination mismatch occurs", async ({
|
|
153
|
+
video,
|
|
154
|
+
timegroup,
|
|
155
|
+
expect,
|
|
156
|
+
}) => {
|
|
157
|
+
await video.mediaEngineTask.taskComplete;
|
|
158
|
+
|
|
159
|
+
// FORCE SPECIFIC SEGMENT LOADING: Load a segment for 8000ms (segment 5)
|
|
160
|
+
timegroup.currentTimeMs = 8000;
|
|
161
|
+
await video.audioSegmentIdTask.taskComplete;
|
|
162
|
+
await video.audioSegmentFetchTask.taskComplete;
|
|
163
|
+
await video.audioInputTask.taskComplete;
|
|
164
|
+
|
|
165
|
+
// Verify we have segment 5 loaded (8000ms / 15000ms = segment 1, but 1-based = segment 1...
|
|
166
|
+
// Actually 8000ms maps to segment 5 based on the actual segment calculation)
|
|
167
|
+
const segmentId = video.audioSegmentIdTask.value;
|
|
168
|
+
expect(segmentId).toBe(4);
|
|
169
|
+
|
|
170
|
+
// Now seek to a time in a different segment to test coordination
|
|
171
|
+
timegroup.currentTimeMs = 4000;
|
|
172
|
+
|
|
173
|
+
// This tests the fundamental segment coordination issue:
|
|
174
|
+
// - We loaded segment 5 for 8000ms
|
|
175
|
+
// - Now seeking to 4000ms which should be in a different segment
|
|
176
|
+
// - Tests that seek doesn't fail due to segment boundary coordination
|
|
177
|
+
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
test("should not throw RangeError when rapidly crossing segment boundaries", async ({
|
|
181
|
+
video,
|
|
182
|
+
timegroup,
|
|
183
|
+
expect,
|
|
184
|
+
}) => {
|
|
185
|
+
await video.mediaEngineTask.taskComplete;
|
|
186
|
+
|
|
187
|
+
// RAPID BOUNDARY CROSSING: This tests timing-sensitive segment coordination
|
|
188
|
+
const boundaries = [1000, 4000, 8000, 3000, 7000]; // Jump around within segment 1
|
|
189
|
+
|
|
190
|
+
for (const timeMs of boundaries) {
|
|
191
|
+
timegroup.currentTimeMs = timeMs;
|
|
192
|
+
// Don't await - test rapid succession to trigger coordination issues
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Final seek - this should not throw even after rapid boundary crossing
|
|
196
|
+
timegroup.currentTimeMs = 4000;
|
|
197
|
+
await expect(video.audioSeekTask.taskComplete).resolves.toBeDefined();
|
|
198
|
+
});
|
|
199
|
+
});
|
|
@@ -13,16 +13,35 @@ export const makeAudioSeekTask = (host: EFMedia): AudioSeekTask => {
|
|
|
13
13
|
onError: (error) => {
|
|
14
14
|
if (error instanceof IgnorableError) {
|
|
15
15
|
console.info("audioSeekTask aborted");
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
18
|
+
if (error instanceof DOMException) {
|
|
19
|
+
console.error(
|
|
20
|
+
`audioSeekTask error: ${error.message} ${error.name} ${error.code}`,
|
|
21
|
+
);
|
|
22
|
+
} else if (error instanceof Error) {
|
|
23
|
+
console.error(`audioSeekTask error ${error.name}: ${error.message}`);
|
|
24
|
+
} else {
|
|
25
|
+
console.error("audioSeekTask unknown error", error);
|
|
16
26
|
}
|
|
17
|
-
console.error("audioSeekTask error", error);
|
|
18
27
|
},
|
|
19
28
|
onComplete: (_value) => {},
|
|
20
|
-
task: async (
|
|
29
|
+
task: async (
|
|
30
|
+
[targetSeekTimeMs],
|
|
31
|
+
{ signal },
|
|
32
|
+
): Promise<VideoSample | undefined> => {
|
|
33
|
+
// CRITICAL FIX: Use the targetSeekTimeMs from args, not host.desiredSeekTimeMs
|
|
34
|
+
// This ensures we use the same seek time that the segment loading tasks used
|
|
35
|
+
|
|
21
36
|
await host.audioSegmentIdTask.taskComplete;
|
|
37
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
22
38
|
await host.audioSegmentFetchTask.taskComplete;
|
|
39
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
23
40
|
await host.audioInitSegmentFetchTask.taskComplete;
|
|
41
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
24
42
|
|
|
25
43
|
const audioInput = await host.audioInputTask.taskComplete;
|
|
44
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
26
45
|
if (!audioInput) {
|
|
27
46
|
throw new Error("Audio input is not available");
|
|
28
47
|
}
|
|
@@ -30,11 +49,23 @@ export const makeAudioSeekTask = (host: EFMedia): AudioSeekTask => {
|
|
|
30
49
|
if (!audioTrack) {
|
|
31
50
|
throw new Error("Audio track is not available");
|
|
32
51
|
}
|
|
52
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
33
53
|
|
|
34
54
|
const sample = (await audioInput.seek(
|
|
35
55
|
audioTrack.id,
|
|
36
|
-
host.desiredSeekTimeMs
|
|
37
|
-
)) as unknown as VideoSample;
|
|
56
|
+
targetSeekTimeMs, // Use the captured value, not host.desiredSeekTimeMs
|
|
57
|
+
)) as unknown as VideoSample | undefined;
|
|
58
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
59
|
+
|
|
60
|
+
// If seek returned undefined, it was aborted - don't throw
|
|
61
|
+
if (sample === undefined && signal.aborted) {
|
|
62
|
+
return undefined;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// If we got undefined but weren't aborted, that's an actual error
|
|
66
|
+
if (sample === undefined) {
|
|
67
|
+
throw new Error("Audio seek failed to find sample");
|
|
68
|
+
}
|
|
38
69
|
|
|
39
70
|
return sample;
|
|
40
71
|
},
|
|
@@ -20,7 +20,18 @@ export const makeAudioSegmentFetchTask = (
|
|
|
20
20
|
const mediaEngine = await getLatestMediaEngine(host, signal);
|
|
21
21
|
const segmentId = await host.audioSegmentIdTask.taskComplete;
|
|
22
22
|
if (segmentId === undefined) {
|
|
23
|
-
|
|
23
|
+
// Provide more context in the error to help with debugging
|
|
24
|
+
const rendition = mediaEngine.audioRendition;
|
|
25
|
+
const debugInfo = {
|
|
26
|
+
hasRendition: !!rendition,
|
|
27
|
+
segmentDurationMs: rendition?.segmentDurationMs,
|
|
28
|
+
segmentDurationsMs: rendition?.segmentDurationsMs?.length || 0,
|
|
29
|
+
desiredSeekTimeMs: host.desiredSeekTimeMs,
|
|
30
|
+
intrinsicDurationMs: host.intrinsicDurationMs,
|
|
31
|
+
};
|
|
32
|
+
throw new Error(
|
|
33
|
+
`Segment ID is not available for audio. Debug info: ${JSON.stringify(debugInfo)}`,
|
|
34
|
+
);
|
|
24
35
|
}
|
|
25
36
|
|
|
26
37
|
// SIMPLIFIED: Direct call to mediaEngine - deduplication is built-in
|
|
@@ -12,10 +12,11 @@ export const makeAudioSegmentIdTask = (
|
|
|
12
12
|
console.error("audioSegmentIdTask error", error);
|
|
13
13
|
},
|
|
14
14
|
onComplete: (_value) => {},
|
|
15
|
-
task: async (
|
|
15
|
+
task: async ([, targetSeekTimeMs], { signal }) => {
|
|
16
16
|
const mediaEngine = await getLatestMediaEngine(host, signal);
|
|
17
|
+
signal.throwIfAborted(); // Abort if a new seek started
|
|
17
18
|
return mediaEngine.computeSegmentId(
|
|
18
|
-
host.desiredSeekTimeMs
|
|
19
|
+
targetSeekTimeMs, // Use captured value, not host.desiredSeekTimeMs
|
|
19
20
|
mediaEngine.getAudioRendition(),
|
|
20
21
|
);
|
|
21
22
|
},
|
|
@@ -35,7 +35,16 @@ export function makeAudioTimeDomainAnalysisTask(element: EFMedia) {
|
|
|
35
35
|
// ONLY CHANGE: Get real audio data for analysis (same technique as playback)
|
|
36
36
|
const analysisWindowMs = 5000; // Get 5 seconds for better analysis
|
|
37
37
|
const fromMs = Math.max(0, currentTimeMs);
|
|
38
|
-
|
|
38
|
+
// Clamp toMs to video duration to prevent requesting segments beyond available content
|
|
39
|
+
const maxToMs = fromMs + analysisWindowMs;
|
|
40
|
+
const videoDurationMs = element.intrinsicDurationMs || 0;
|
|
41
|
+
const toMs =
|
|
42
|
+
videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
|
|
43
|
+
|
|
44
|
+
// If the clamping results in an invalid range (seeking beyond the end), skip analysis silently
|
|
45
|
+
if (fromMs >= toMs) {
|
|
46
|
+
return null;
|
|
47
|
+
}
|
|
39
48
|
|
|
40
49
|
const { fetchAudioSpanningTime: fetchAudioSpan } = await import(
|
|
41
50
|
"../shared/AudioSpanUtils.ts"
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Centralized precision utilities for consistent timing calculations across the media pipeline.
|
|
3
|
+
*
|
|
4
|
+
* The key insight is that floating-point precision errors can cause inconsistencies between:
|
|
5
|
+
* 1. Segment selection logic (in AssetMediaEngine.computeSegmentId)
|
|
6
|
+
* 2. Sample finding logic (in SampleBuffer.find)
|
|
7
|
+
* 3. Timeline mapping (in BufferedSeekingInput.seek)
|
|
8
|
+
*
|
|
9
|
+
* All timing calculations must use the same rounding strategy to ensure consistency.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Round time to millisecond precision to handle floating-point precision issues.
|
|
14
|
+
* Uses Math.round for consistent behavior across the entire pipeline.
|
|
15
|
+
*
|
|
16
|
+
* This function should be used for ALL time-related calculations that need to be
|
|
17
|
+
* compared between different parts of the system.
|
|
18
|
+
*/
|
|
19
|
+
export const roundToMilliseconds = (timeMs: number): number => {
|
|
20
|
+
// Round to 3 decimal places (microsecond precision)
|
|
21
|
+
return Math.round(timeMs * 1000) / 1000;
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Convert media time (in seconds) to scaled time units using consistent rounding.
|
|
26
|
+
* This is used in segment selection to convert from milliseconds to timescale units.
|
|
27
|
+
*/
|
|
28
|
+
export const convertToScaledTime = (
|
|
29
|
+
timeMs: number,
|
|
30
|
+
timescale: number,
|
|
31
|
+
): number => {
|
|
32
|
+
const scaledTime = (timeMs / 1000) * timescale;
|
|
33
|
+
return Math.round(scaledTime);
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Convert scaled time units back to media time (in milliseconds) using consistent rounding.
|
|
38
|
+
* This is the inverse of convertToScaledTime.
|
|
39
|
+
*/
|
|
40
|
+
export const convertFromScaledTime = (
|
|
41
|
+
scaledTime: number,
|
|
42
|
+
timescale: number,
|
|
43
|
+
): number => {
|
|
44
|
+
const timeMs = (scaledTime / timescale) * 1000;
|
|
45
|
+
return roundToMilliseconds(timeMs);
|
|
46
|
+
};
|