@editframe/elements 0.17.6-beta.0 → 0.18.7-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/EF_FRAMEGEN.js +1 -1
- package/dist/elements/EFAudio.d.ts +21 -2
- package/dist/elements/EFAudio.js +41 -11
- package/dist/elements/EFImage.d.ts +1 -0
- package/dist/elements/EFImage.js +11 -3
- package/dist/elements/EFMedia/AssetIdMediaEngine.d.ts +18 -0
- package/dist/elements/EFMedia/AssetIdMediaEngine.js +41 -0
- package/dist/elements/EFMedia/AssetMediaEngine.browsertest.d.ts +0 -0
- package/dist/elements/EFMedia/AssetMediaEngine.d.ts +45 -0
- package/dist/elements/EFMedia/AssetMediaEngine.js +135 -0
- package/dist/elements/EFMedia/BaseMediaEngine.d.ts +55 -0
- package/dist/elements/EFMedia/BaseMediaEngine.js +115 -0
- package/dist/elements/EFMedia/BufferedSeekingInput.d.ts +43 -0
- package/dist/elements/EFMedia/BufferedSeekingInput.js +179 -0
- package/dist/elements/EFMedia/JitMediaEngine.browsertest.d.ts +0 -0
- package/dist/elements/EFMedia/JitMediaEngine.d.ts +31 -0
- package/dist/elements/EFMedia/JitMediaEngine.js +81 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioBufferTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioBufferTask.d.ts +16 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioBufferTask.js +48 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.d.ts +3 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.js +141 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.d.ts +4 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.js +16 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInputTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInputTask.d.ts +3 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInputTask.js +30 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.d.ts +0 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.d.ts +7 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.js +32 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.d.ts +4 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.js +28 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.d.ts +4 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.js +17 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.d.ts +3 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.js +107 -0
- package/dist/elements/EFMedia/shared/AudioSpanUtils.d.ts +7 -0
- package/dist/elements/EFMedia/shared/AudioSpanUtils.js +54 -0
- package/dist/elements/EFMedia/shared/BufferUtils.d.ts +70 -0
- package/dist/elements/EFMedia/shared/BufferUtils.js +89 -0
- package/dist/elements/EFMedia/shared/MediaTaskUtils.d.ts +23 -0
- package/dist/elements/EFMedia/shared/PrecisionUtils.d.ts +28 -0
- package/dist/elements/EFMedia/shared/PrecisionUtils.js +29 -0
- package/dist/elements/EFMedia/shared/RenditionHelpers.d.ts +19 -0
- package/dist/elements/EFMedia/tasks/makeMediaEngineTask.d.ts +18 -0
- package/dist/elements/EFMedia/tasks/makeMediaEngineTask.js +60 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoBufferTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoBufferTask.d.ts +16 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoBufferTask.js +46 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoInitSegmentFetchTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoInitSegmentFetchTask.d.ts +4 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoInitSegmentFetchTask.js +16 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoInputTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoInputTask.d.ts +3 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoInputTask.js +27 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSeekTask.d.ts +7 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSeekTask.js +34 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.d.ts +4 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.js +28 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.browsertest.d.ts +9 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.d.ts +4 -0
- package/dist/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.js +17 -0
- package/dist/elements/EFMedia.browsertest.d.ts +1 -0
- package/dist/elements/EFMedia.d.ts +63 -111
- package/dist/elements/EFMedia.js +117 -1113
- package/dist/elements/EFTemporal.d.ts +1 -1
- package/dist/elements/EFTemporal.js +1 -1
- package/dist/elements/EFTimegroup.d.ts +11 -0
- package/dist/elements/EFTimegroup.js +83 -13
- package/dist/elements/EFVideo.d.ts +54 -32
- package/dist/elements/EFVideo.js +100 -207
- package/dist/elements/EFWaveform.js +2 -2
- package/dist/elements/SampleBuffer.d.ts +14 -0
- package/dist/elements/SampleBuffer.js +52 -0
- package/dist/getRenderInfo.js +2 -1
- package/dist/gui/ContextMixin.js +3 -2
- package/dist/gui/EFFilmstrip.d.ts +3 -3
- package/dist/gui/EFFilmstrip.js +1 -1
- package/dist/gui/EFFitScale.d.ts +2 -2
- package/dist/gui/TWMixin.js +1 -1
- package/dist/style.css +1 -1
- package/dist/transcoding/cache/CacheManager.d.ts +73 -0
- package/dist/transcoding/cache/RequestDeduplicator.d.ts +29 -0
- package/dist/transcoding/cache/RequestDeduplicator.js +53 -0
- package/dist/transcoding/cache/RequestDeduplicator.test.d.ts +1 -0
- package/dist/transcoding/types/index.d.ts +242 -0
- package/dist/transcoding/utils/MediaUtils.d.ts +9 -0
- package/dist/transcoding/utils/UrlGenerator.d.ts +26 -0
- package/dist/transcoding/utils/UrlGenerator.js +45 -0
- package/dist/transcoding/utils/constants.d.ts +27 -0
- package/dist/utils/LRUCache.d.ts +34 -0
- package/dist/utils/LRUCache.js +115 -0
- package/package.json +3 -3
- package/src/elements/EFAudio.browsertest.ts +189 -49
- package/src/elements/EFAudio.ts +59 -13
- package/src/elements/EFImage.browsertest.ts +42 -0
- package/src/elements/EFImage.ts +23 -3
- package/src/elements/EFMedia/AssetIdMediaEngine.test.ts +222 -0
- package/src/elements/EFMedia/AssetIdMediaEngine.ts +70 -0
- package/src/elements/EFMedia/AssetMediaEngine.browsertest.ts +100 -0
- package/src/elements/EFMedia/AssetMediaEngine.ts +255 -0
- package/src/elements/EFMedia/BaseMediaEngine.test.ts +164 -0
- package/src/elements/EFMedia/BaseMediaEngine.ts +219 -0
- package/src/elements/EFMedia/BufferedSeekingInput.browsertest.ts +481 -0
- package/src/elements/EFMedia/BufferedSeekingInput.ts +324 -0
- package/src/elements/EFMedia/JitMediaEngine.browsertest.ts +165 -0
- package/src/elements/EFMedia/JitMediaEngine.ts +166 -0
- package/src/elements/EFMedia/audioTasks/makeAudioBufferTask.browsertest.ts +554 -0
- package/src/elements/EFMedia/audioTasks/makeAudioBufferTask.ts +81 -0
- package/src/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts +250 -0
- package/src/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.browsertest.ts +59 -0
- package/src/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.ts +23 -0
- package/src/elements/EFMedia/audioTasks/makeAudioInputTask.browsertest.ts +55 -0
- package/src/elements/EFMedia/audioTasks/makeAudioInputTask.ts +43 -0
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts +199 -0
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.ts +64 -0
- package/src/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.ts +45 -0
- package/src/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.ts +24 -0
- package/src/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts +183 -0
- package/src/elements/EFMedia/shared/AudioSpanUtils.ts +128 -0
- package/src/elements/EFMedia/shared/BufferUtils.ts +310 -0
- package/src/elements/EFMedia/shared/MediaTaskUtils.ts +44 -0
- package/src/elements/EFMedia/shared/PrecisionUtils.ts +46 -0
- package/src/elements/EFMedia/shared/RenditionHelpers.browsertest.ts +247 -0
- package/src/elements/EFMedia/shared/RenditionHelpers.ts +79 -0
- package/src/elements/EFMedia/tasks/makeMediaEngineTask.browsertest.ts +128 -0
- package/src/elements/EFMedia/tasks/makeMediaEngineTask.test.ts +233 -0
- package/src/elements/EFMedia/tasks/makeMediaEngineTask.ts +89 -0
- package/src/elements/EFMedia/videoTasks/makeVideoBufferTask.browsertest.ts +555 -0
- package/src/elements/EFMedia/videoTasks/makeVideoBufferTask.ts +79 -0
- package/src/elements/EFMedia/videoTasks/makeVideoInitSegmentFetchTask.browsertest.ts +59 -0
- package/src/elements/EFMedia/videoTasks/makeVideoInitSegmentFetchTask.ts +23 -0
- package/src/elements/EFMedia/videoTasks/makeVideoInputTask.browsertest.ts +55 -0
- package/src/elements/EFMedia/videoTasks/makeVideoInputTask.ts +45 -0
- package/src/elements/EFMedia/videoTasks/makeVideoSeekTask.ts +68 -0
- package/src/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.browsertest.ts +57 -0
- package/src/elements/EFMedia/videoTasks/makeVideoSegmentFetchTask.ts +43 -0
- package/src/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.browsertest.ts +56 -0
- package/src/elements/EFMedia/videoTasks/makeVideoSegmentIdTask.ts +24 -0
- package/src/elements/EFMedia.browsertest.ts +706 -273
- package/src/elements/EFMedia.ts +136 -1769
- package/src/elements/EFTemporal.ts +3 -4
- package/src/elements/EFTimegroup.browsertest.ts +6 -3
- package/src/elements/EFTimegroup.ts +147 -21
- package/src/elements/EFVideo.browsertest.ts +980 -169
- package/src/elements/EFVideo.ts +113 -458
- package/src/elements/EFWaveform.ts +1 -1
- package/src/elements/MediaController.ts +2 -12
- package/src/elements/SampleBuffer.ts +95 -0
- package/src/gui/ContextMixin.ts +3 -6
- package/src/transcoding/cache/CacheManager.ts +208 -0
- package/src/transcoding/cache/RequestDeduplicator.test.ts +170 -0
- package/src/transcoding/cache/RequestDeduplicator.ts +65 -0
- package/src/transcoding/types/index.ts +269 -0
- package/src/transcoding/utils/MediaUtils.ts +63 -0
- package/src/transcoding/utils/UrlGenerator.ts +68 -0
- package/src/transcoding/utils/constants.ts +36 -0
- package/src/utils/LRUCache.ts +153 -0
- package/test/EFVideo.framegen.browsertest.ts +39 -30
- package/test/__cache__/GET__api_v1_transcode_audio_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__32da3954ba60c96ad732020c65a08ebc/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__32da3954ba60c96ad732020c65a08ebc/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_audio_1_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__9ed2d25c675aa6bb6ff5b3ae23887c71/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_1_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__9ed2d25c675aa6bb6ff5b3ae23887c71/metadata.json +22 -0
- package/test/__cache__/GET__api_v1_transcode_audio_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__b0b2b07efcf607de8ee0f650328c32f7/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__b0b2b07efcf607de8ee0f650328c32f7/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_audio_2_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__d5a3309a2bf756dd6e304807eb402f56/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_2_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__d5a3309a2bf756dd6e304807eb402f56/metadata.json +22 -0
- package/test/__cache__/GET__api_v1_transcode_audio_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a75c2252b542e0c152c780e9a8d7b154/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a75c2252b542e0c152c780e9a8d7b154/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_audio_3_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__773254bb671e3466fca8677139fb239e/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_3_mp4_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4_bytes_0__773254bb671e3466fca8677139fb239e/metadata.json +22 -0
- package/test/__cache__/GET__api_v1_transcode_audio_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a64ff1cfb1b52cae14df4b5dfa1e222b/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a64ff1cfb1b52cae14df4b5dfa1e222b/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_audio_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__91e8a522f950809b9f09f4173113b4b0/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__91e8a522f950809b9f09f4173113b4b0/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_audio_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__e66d2c831d951e74ad0aeaa6489795d0/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_audio_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__e66d2c831d951e74ad0aeaa6489795d0/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__26197f6f7c46cacb0a71134131c3f775/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__26197f6f7c46cacb0a71134131c3f775/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__4cb6774cd3650ccf59c8f8dc6678c0b9/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__4cb6774cd3650ccf59c8f8dc6678c0b9/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0b3b2b1c8933f7fcf8a9ecaa88d58b41/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0b3b2b1c8933f7fcf8a9ecaa88d58b41/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a6fb05a22b18d850f7f2950bbcdbdeed/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a6fb05a22b18d850f7f2950bbcdbdeed/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a50058c7c3602e90879fe3428ed891f4/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a50058c7c3602e90879fe3428ed891f4/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_high_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0798c479b44aaeef850609a430f6e613/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0798c479b44aaeef850609a430f6e613/metadata.json +21 -0
- package/test/__cache__/GET__api_v1_transcode_manifest_json_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__3be92a0437de726b431ed5af2369158a/data.bin +1 -0
- package/test/__cache__/GET__api_v1_transcode_manifest_json_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__3be92a0437de726b431ed5af2369158a/metadata.json +19 -0
- package/test/createJitTestClips.ts +320 -188
- package/test/recordReplayProxyPlugin.js +352 -0
- package/test/useAssetMSW.ts +1 -1
- package/test/useMSW.ts +35 -22
- package/types.json +1 -1
- package/dist/JitTranscodingClient.d.ts +0 -167
- package/dist/JitTranscodingClient.js +0 -373
- package/dist/ScrubTrackManager.d.ts +0 -96
- package/dist/ScrubTrackManager.js +0 -216
- package/dist/elements/printTaskStatus.js +0 -11
- package/src/elements/__screenshots__/EFMedia.browsertest.ts/EFMedia-JIT-audio-playback-audioBufferTask-should-work-in-JIT-mode-without-URL-errors-1.png +0 -0
- package/test/EFVideo.frame-tasks.browsertest.ts +0 -524
- /package/dist/{DecoderResetFrequency.test.d.ts → elements/EFMedia/AssetIdMediaEngine.test.d.ts} +0 -0
- /package/dist/{DecoderResetRecovery.test.d.ts → elements/EFMedia/BaseMediaEngine.test.d.ts} +0 -0
- /package/dist/{JitTranscodingClient.browsertest.d.ts → elements/EFMedia/BufferedSeekingInput.browsertest.d.ts} +0 -0
- /package/dist/{JitTranscodingClient.test.d.ts → elements/EFMedia/shared/RenditionHelpers.browsertest.d.ts} +0 -0
- /package/dist/{ScrubTrackIntegration.test.d.ts → elements/EFMedia/tasks/makeMediaEngineTask.browsertest.d.ts} +0 -0
- /package/dist/{SegmentSwitchLoading.test.d.ts → elements/EFMedia/tasks/makeMediaEngineTask.test.d.ts} +0 -0
package/src/elements/EFMedia.ts
CHANGED
|
@@ -1,14 +1,18 @@
|
|
|
1
|
-
import type { TrackFragmentIndex, TrackSegment } from "@editframe/assets";
|
|
2
|
-
import { VideoAsset } from "@editframe/assets/EncodedAsset.js";
|
|
3
|
-
import { MP4File } from "@editframe/assets/MP4File.js";
|
|
4
|
-
import { Task } from "@lit/task";
|
|
5
|
-
import { deepArrayEquals } from "@lit/task/deep-equals.js";
|
|
6
|
-
import debug from "debug";
|
|
7
1
|
import { css, LitElement, type PropertyValueMap } from "lit";
|
|
8
2
|
import { property, state } from "lit/decorators.js";
|
|
9
|
-
|
|
10
|
-
import {
|
|
11
|
-
import {
|
|
3
|
+
|
|
4
|
+
import type { AudioSpan } from "../transcoding/types/index.ts";
|
|
5
|
+
import { UrlGenerator } from "../transcoding/utils/UrlGenerator.ts";
|
|
6
|
+
import { makeAudioBufferTask } from "./EFMedia/audioTasks/makeAudioBufferTask.ts";
|
|
7
|
+
import { makeAudioFrequencyAnalysisTask } from "./EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts";
|
|
8
|
+
import { makeAudioInitSegmentFetchTask } from "./EFMedia/audioTasks/makeAudioInitSegmentFetchTask.ts";
|
|
9
|
+
import { makeAudioInputTask } from "./EFMedia/audioTasks/makeAudioInputTask.ts";
|
|
10
|
+
import { makeAudioSeekTask } from "./EFMedia/audioTasks/makeAudioSeekTask.ts";
|
|
11
|
+
import { makeAudioSegmentFetchTask } from "./EFMedia/audioTasks/makeAudioSegmentFetchTask.ts";
|
|
12
|
+
import { makeAudioSegmentIdTask } from "./EFMedia/audioTasks/makeAudioSegmentIdTask.ts";
|
|
13
|
+
import { makeAudioTimeDomainAnalysisTask } from "./EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts";
|
|
14
|
+
import { fetchAudioSpanningTime } from "./EFMedia/shared/AudioSpanUtils.ts";
|
|
15
|
+
import { makeMediaEngineTask } from "./EFMedia/tasks/makeMediaEngineTask.ts";
|
|
12
16
|
import { EFSourceMixin } from "./EFSourceMixin.js";
|
|
13
17
|
import { EFTemporal } from "./EFTemporal.js";
|
|
14
18
|
import { FetchMixin } from "./FetchMixin.js";
|
|
@@ -20,41 +24,9 @@ declare global {
|
|
|
20
24
|
var EF_FRAMEGEN: import("../EF_FRAMEGEN.js").EFFramegen;
|
|
21
25
|
}
|
|
22
26
|
|
|
23
|
-
const log = debug("ef:elements:EFMedia");
|
|
24
|
-
|
|
25
27
|
const freqWeightsCache = new Map<number, Float32Array>();
|
|
26
28
|
|
|
27
|
-
class
|
|
28
|
-
private cache = new Map<K, V>();
|
|
29
|
-
private readonly maxSize: number;
|
|
30
|
-
|
|
31
|
-
constructor(maxSize: number) {
|
|
32
|
-
this.maxSize = maxSize;
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
get(key: K): V | undefined {
|
|
36
|
-
const value = this.cache.get(key);
|
|
37
|
-
if (value) {
|
|
38
|
-
// Refresh position by removing and re-adding
|
|
39
|
-
this.cache.delete(key);
|
|
40
|
-
this.cache.set(key, value);
|
|
41
|
-
}
|
|
42
|
-
return value;
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
set(key: K, value: V): void {
|
|
46
|
-
if (this.cache.has(key)) {
|
|
47
|
-
this.cache.delete(key);
|
|
48
|
-
} else if (this.cache.size >= this.maxSize) {
|
|
49
|
-
// Remove oldest entry (first item in map)
|
|
50
|
-
const firstKey = this.cache.keys().next().value;
|
|
51
|
-
if (firstKey) {
|
|
52
|
-
this.cache.delete(firstKey);
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
this.cache.set(key, value);
|
|
56
|
-
}
|
|
57
|
-
}
|
|
29
|
+
export class IgnorableError extends Error {}
|
|
58
30
|
|
|
59
31
|
export const deepGetMediaElements = (
|
|
60
32
|
element: Element,
|
|
@@ -75,6 +47,27 @@ export class EFMedia extends EFTargetable(
|
|
|
75
47
|
assetType: "isobmff_files",
|
|
76
48
|
}),
|
|
77
49
|
) {
|
|
50
|
+
// Sample buffer size configuration
|
|
51
|
+
static readonly VIDEO_SAMPLE_BUFFER_SIZE = 30;
|
|
52
|
+
static readonly AUDIO_SAMPLE_BUFFER_SIZE = 120;
|
|
53
|
+
|
|
54
|
+
static get observedAttributes() {
|
|
55
|
+
// biome-ignore lint/complexity/noThisInStatic: We need to access super
|
|
56
|
+
const parentAttributes = super.observedAttributes || [];
|
|
57
|
+
return [
|
|
58
|
+
...parentAttributes,
|
|
59
|
+
"mute",
|
|
60
|
+
"fft-size",
|
|
61
|
+
"fft-decay",
|
|
62
|
+
"fft-gain",
|
|
63
|
+
"interpolate-frequencies",
|
|
64
|
+
"asset-id",
|
|
65
|
+
"audio-buffer-duration",
|
|
66
|
+
"max-audio-buffer-fetches",
|
|
67
|
+
"enable-audio-buffering",
|
|
68
|
+
];
|
|
69
|
+
}
|
|
70
|
+
|
|
78
71
|
static styles = [
|
|
79
72
|
css`
|
|
80
73
|
:host {
|
|
@@ -89,722 +82,68 @@ export class EFMedia extends EFTargetable(
|
|
|
89
82
|
currentTimeMs = 0;
|
|
90
83
|
|
|
91
84
|
/**
|
|
92
|
-
*
|
|
93
|
-
*
|
|
94
|
-
* - "jit-transcode": Use JIT transcoding for remote URLs
|
|
95
|
-
* - "auto": Automatically detect based on URL patterns (default)
|
|
85
|
+
* Duration in milliseconds for audio buffering ahead of current time
|
|
86
|
+
* @domAttribute "audio-buffer-duration"
|
|
96
87
|
*/
|
|
97
|
-
|
|
88
|
+
@property({ type: Number, attribute: "audio-buffer-duration" })
|
|
89
|
+
audioBufferDurationMs = 30000; // 30 seconds
|
|
98
90
|
|
|
99
91
|
/**
|
|
100
|
-
*
|
|
92
|
+
* Maximum number of concurrent audio segment fetches for buffering
|
|
93
|
+
* @domAttribute "max-audio-buffer-fetches"
|
|
101
94
|
*/
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
| ("asset" | "jit-transcode" | "auto")
|
|
105
|
-
| null;
|
|
106
|
-
return attr || this._mode || "auto";
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
set mode(value: "asset" | "jit-transcode" | "auto") {
|
|
110
|
-
const oldValue = this.mode;
|
|
111
|
-
this._mode = value;
|
|
112
|
-
this.setAttribute("mode", value);
|
|
113
|
-
this.requestUpdate("mode", oldValue);
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
connectedCallback(): void {
|
|
117
|
-
super.connectedCallback();
|
|
118
|
-
|
|
119
|
-
// Initialize mode from attribute if present
|
|
120
|
-
const modeAttr = this.getAttribute("mode") as
|
|
121
|
-
| ("asset" | "jit-transcode" | "auto")
|
|
122
|
-
| null;
|
|
123
|
-
if (modeAttr && modeAttr !== this._mode) {
|
|
124
|
-
this._mode = modeAttr;
|
|
125
|
-
this.requestUpdate("mode");
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
// Manually sync attributes to properties for better control
|
|
129
|
-
const prefetchSegmentsAttr = this.getAttribute("prefetch-segments");
|
|
130
|
-
if (prefetchSegmentsAttr !== null) {
|
|
131
|
-
this.prefetchSegments = Number.parseInt(prefetchSegmentsAttr, 10) || 3;
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
const cacheSizeAttr = this.getAttribute("cache-size");
|
|
135
|
-
if (cacheSizeAttr !== null) {
|
|
136
|
-
this.cacheSize = Number.parseInt(cacheSizeAttr, 10) || 20;
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
const enablePrefetchAttr = this.getAttribute("enable-prefetch");
|
|
140
|
-
if (enablePrefetchAttr !== null) {
|
|
141
|
-
this.enablePrefetch = enablePrefetchAttr === "true";
|
|
142
|
-
}
|
|
143
|
-
}
|
|
95
|
+
@property({ type: Number, attribute: "max-audio-buffer-fetches" })
|
|
96
|
+
maxAudioBufferFetches = 2;
|
|
144
97
|
|
|
145
98
|
/**
|
|
146
|
-
*
|
|
99
|
+
* Enable/disable audio buffering system
|
|
100
|
+
* @domAttribute "enable-audio-buffering"
|
|
147
101
|
*/
|
|
148
|
-
@property({ type:
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
@property({ type: Number, attribute: "cache-size" })
|
|
152
|
-
cacheSize = 20;
|
|
153
|
-
|
|
154
|
-
@property({ type: Boolean, attribute: "enable-prefetch" })
|
|
155
|
-
enablePrefetch = true;
|
|
102
|
+
@property({ type: Boolean, attribute: "enable-audio-buffering" })
|
|
103
|
+
enableAudioBuffering = true;
|
|
156
104
|
|
|
157
105
|
/**
|
|
158
|
-
*
|
|
106
|
+
* Mute/unmute the media element
|
|
107
|
+
* @domAttribute "mute"
|
|
159
108
|
*/
|
|
160
|
-
@
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
@state()
|
|
167
|
-
jitCacheStats: { size: number; hitRate: number; efficiency: number } | null =
|
|
168
|
-
null;
|
|
109
|
+
@property({
|
|
110
|
+
type: Boolean,
|
|
111
|
+
attribute: "mute",
|
|
112
|
+
reflect: true,
|
|
113
|
+
})
|
|
114
|
+
mute = false;
|
|
169
115
|
|
|
170
116
|
/**
|
|
171
|
-
*
|
|
117
|
+
* FFT size for frequency analysis
|
|
118
|
+
* @domAttribute "fft-size"
|
|
172
119
|
*/
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
const actualMode = this.mode;
|
|
176
|
-
|
|
177
|
-
if (actualMode === "asset" || actualMode === "jit-transcode") {
|
|
178
|
-
return actualMode;
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
// Auto-detection logic only runs when mode is "auto" or not set
|
|
182
|
-
if (this.assetId) {
|
|
183
|
-
return "asset"; // Always use asset mode if assetId is specified
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
if (!this.src) {
|
|
187
|
-
return "asset"; // Default to asset mode if no src
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
if (JitTranscodingClient.isJitTranscodeEligible(this.src)) {
|
|
191
|
-
return "jit-transcode";
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
return "asset"; // Default to asset mode for everything else
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
jitClientTask = new Task(this, {
|
|
198
|
-
autoRun: EF_INTERACTIVE,
|
|
199
|
-
onError: (error) => {
|
|
200
|
-
console.error("jitClientTask error", error);
|
|
201
|
-
},
|
|
202
|
-
args: () =>
|
|
203
|
-
[
|
|
204
|
-
this.apiHost,
|
|
205
|
-
this.cacheSize,
|
|
206
|
-
this.enablePrefetch,
|
|
207
|
-
this.prefetchSegments,
|
|
208
|
-
] as const,
|
|
209
|
-
task: ([apiHost, cacheSize, enablePrefetch, prefetchSegments]) => {
|
|
210
|
-
const baseUrl =
|
|
211
|
-
apiHost && apiHost !== "https://editframe.dev"
|
|
212
|
-
? apiHost
|
|
213
|
-
: "http://localhost:3000";
|
|
214
|
-
|
|
215
|
-
return new JitTranscodingClient({
|
|
216
|
-
baseUrl,
|
|
217
|
-
segmentCacheSize: cacheSize,
|
|
218
|
-
enableNetworkAdaptation: enablePrefetch,
|
|
219
|
-
enablePrefetch: enablePrefetch,
|
|
220
|
-
prefetchSegments: prefetchSegments,
|
|
221
|
-
});
|
|
222
|
-
},
|
|
223
|
-
});
|
|
120
|
+
@property({ type: Number, attribute: "fft-size", reflect: true })
|
|
121
|
+
fftSize = 128;
|
|
224
122
|
|
|
225
123
|
/**
|
|
226
|
-
*
|
|
227
|
-
*
|
|
124
|
+
* FFT decay rate for frequency analysis
|
|
125
|
+
* @domAttribute "fft-decay"
|
|
228
126
|
*/
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
onError: (error) => {
|
|
232
|
-
console.error("jitMetadataLoader error", error);
|
|
233
|
-
},
|
|
234
|
-
args: () => [this.src, this.jitClientTask.value] as const,
|
|
235
|
-
task: async ([src, _jitClient], { signal: _signal }) => {
|
|
236
|
-
if (this.effectiveMode !== "jit-transcode") {
|
|
237
|
-
return null;
|
|
238
|
-
}
|
|
239
|
-
await this.jitClientTask.taskComplete;
|
|
240
|
-
const jitClient = this.jitClientTask.value;
|
|
241
|
-
if (!src || !jitClient) {
|
|
242
|
-
return null;
|
|
243
|
-
}
|
|
244
|
-
|
|
245
|
-
try {
|
|
246
|
-
this.jitLoadingState = "metadata";
|
|
247
|
-
this.jitErrorMessage = null;
|
|
248
|
-
|
|
249
|
-
const metadata = await jitClient.loadVideoMetadata(src);
|
|
250
|
-
|
|
251
|
-
this.jitLoadingState = "idle";
|
|
252
|
-
return metadata;
|
|
253
|
-
} catch (error) {
|
|
254
|
-
this.jitLoadingState = "error";
|
|
255
|
-
this.jitErrorMessage =
|
|
256
|
-
error instanceof Error
|
|
257
|
-
? error.message
|
|
258
|
-
: "Failed to load video metadata";
|
|
259
|
-
log("Failed to load JIT metadata:", error);
|
|
260
|
-
return null;
|
|
261
|
-
}
|
|
262
|
-
},
|
|
263
|
-
onComplete: () => {
|
|
264
|
-
if (this.jitLoadingState === "metadata") {
|
|
265
|
-
this.jitLoadingState = "idle";
|
|
266
|
-
}
|
|
267
|
-
this.requestUpdate("intrinsicDurationMs");
|
|
268
|
-
this.requestUpdate("ownCurrentTimeMs");
|
|
269
|
-
this.rootTimegroup?.requestUpdate("ownCurrentTimeMs");
|
|
270
|
-
this.rootTimegroup?.requestUpdate("durationMs");
|
|
271
|
-
},
|
|
272
|
-
});
|
|
273
|
-
|
|
274
|
-
#assetId: string | null = null;
|
|
127
|
+
@property({ type: Number, attribute: "fft-decay", reflect: true })
|
|
128
|
+
fftDecay = 8;
|
|
275
129
|
|
|
276
130
|
/**
|
|
277
|
-
*
|
|
278
|
-
*
|
|
279
|
-
* @domAttribute "asset-id"
|
|
131
|
+
* FFT gain for frequency analysis
|
|
132
|
+
* @domAttribute "fft-gain"
|
|
280
133
|
*/
|
|
281
|
-
@property({ type:
|
|
282
|
-
|
|
283
|
-
this.#assetId = value;
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
get assetId() {
|
|
287
|
-
return this.#assetId || this.getAttribute("asset-id");
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
fragmentIndexPath() {
|
|
291
|
-
if (this.assetId) {
|
|
292
|
-
return `${this.apiHost}/api/v1/isobmff_files/${this.assetId}/index`;
|
|
293
|
-
}
|
|
294
|
-
const src = this.src ?? "";
|
|
295
|
-
if (!src) {
|
|
296
|
-
// Return a safe path that will fail gracefully in tests - allows tasks to run without null errors
|
|
297
|
-
return "/@ef-track-fragment-index/no-src-available";
|
|
298
|
-
}
|
|
299
|
-
// Normalize path to avoid double slashes and handle @ef- prefixed paths
|
|
300
|
-
const normalizedSrc = src.startsWith("/") ? src.slice(1) : src;
|
|
301
|
-
// If src is an @ef- style path, it's likely already a path fragment, not a full URL
|
|
302
|
-
if (normalizedSrc.startsWith("@ef-")) {
|
|
303
|
-
// For @ef- paths, we may need different handling - they might be asset IDs
|
|
304
|
-
return `/@ef-track-fragment-index/${normalizedSrc}`;
|
|
305
|
-
}
|
|
306
|
-
return `/@ef-track-fragment-index/${normalizedSrc}`;
|
|
307
|
-
}
|
|
308
|
-
|
|
309
|
-
fragmentTrackPath(trackId: string) {
|
|
310
|
-
if (this.assetId) {
|
|
311
|
-
return `${this.apiHost}/api/v1/isobmff_tracks/${this.assetId}/${trackId}`;
|
|
312
|
-
}
|
|
313
|
-
// trackId is only specified as a query in the @ef-track url shape
|
|
314
|
-
// this is because that system doesn't have a full url matching system.
|
|
315
|
-
// This is an annoying incosistency that should be fixed.
|
|
316
|
-
const src = this.src ?? "";
|
|
317
|
-
if (!src) {
|
|
318
|
-
// Return a safe path that will fail gracefully in tests - allows tasks to run without null errors
|
|
319
|
-
return `/@ef-track/no-src-available?trackId=${trackId}`;
|
|
320
|
-
}
|
|
321
|
-
// Normalize path to avoid double slashes and handle @ef- prefixed paths
|
|
322
|
-
const normalizedSrc = src.startsWith("/") ? src.slice(1) : src;
|
|
323
|
-
// If src is an @ef- style path, it's likely already a path fragment, not a full URL
|
|
324
|
-
if (normalizedSrc.startsWith("@ef-")) {
|
|
325
|
-
return `/@ef-track/${normalizedSrc}?trackId=${trackId}`;
|
|
326
|
-
}
|
|
327
|
-
return `/@ef-track/${normalizedSrc}?trackId=${trackId}`;
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
get mediaDurationTask() {
|
|
331
|
-
return this.fragmentIndexTask;
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
get defaultVideoTrackId() {
|
|
335
|
-
const fragmentIndex = this.fragmentIndexTask.value as Record<
|
|
336
|
-
number,
|
|
337
|
-
TrackFragmentIndex
|
|
338
|
-
> | null;
|
|
339
|
-
return Object.values(fragmentIndex ?? {}).find(
|
|
340
|
-
(track) => track.type === "video",
|
|
341
|
-
)?.track;
|
|
342
|
-
}
|
|
343
|
-
|
|
344
|
-
get defaultAudioTrackId() {
|
|
345
|
-
const fragmentIndex = this.fragmentIndexTask.value as Record<
|
|
346
|
-
number,
|
|
347
|
-
TrackFragmentIndex
|
|
348
|
-
> | null;
|
|
349
|
-
return Object.values(fragmentIndex ?? {}).find(
|
|
350
|
-
(track) => track.type === "audio",
|
|
351
|
-
)?.track;
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
get intrinsicDurationMs() {
|
|
355
|
-
const fragmentIndex = this.fragmentIndexTask.value as Record<
|
|
356
|
-
number,
|
|
357
|
-
TrackFragmentIndex
|
|
358
|
-
> | null;
|
|
359
|
-
if (!fragmentIndex) return 0;
|
|
360
|
-
|
|
361
|
-
const durations = Object.values(fragmentIndex).map(
|
|
362
|
-
(track) => (track.duration / track.timescale) * 1000,
|
|
363
|
-
);
|
|
364
|
-
if (durations.length === 0) return 0;
|
|
365
|
-
return Math.max(...durations);
|
|
366
|
-
}
|
|
367
|
-
|
|
368
|
-
#audioContext = (() => {
|
|
369
|
-
try {
|
|
370
|
-
return new OfflineAudioContext(2, 48000 / 30, 48000);
|
|
371
|
-
} catch (error) {
|
|
372
|
-
throw new Error(
|
|
373
|
-
`[EFMedia.audioBufferTask] Failed to create OfflineAudioContext(2, ${48000 / 30}, 48000): ${error instanceof Error ? error.message : String(error)}. This is the class field audioContext for audio buffer task processing.`,
|
|
374
|
-
);
|
|
375
|
-
}
|
|
376
|
-
})();
|
|
377
|
-
|
|
378
|
-
audioBufferTask = new Task(this, {
|
|
379
|
-
autoRun: EF_INTERACTIVE,
|
|
380
|
-
onError: (error) => {
|
|
381
|
-
console.error("audioBufferTask error", error);
|
|
382
|
-
},
|
|
383
|
-
args: () => [this.mediaSegmentsTask.value, this.seekTask.value] as const,
|
|
384
|
-
task: async ([files, segments], { signal: _signal }) => {
|
|
385
|
-
if (!files || !segments) return;
|
|
386
|
-
|
|
387
|
-
if (!this.defaultAudioTrackId) return;
|
|
388
|
-
|
|
389
|
-
const segment = segments[this.defaultAudioTrackId];
|
|
390
|
-
if (!segment) return;
|
|
391
|
-
|
|
392
|
-
const audioFile = files[this.defaultAudioTrackId];
|
|
393
|
-
if (!audioFile) return;
|
|
394
|
-
|
|
395
|
-
return {
|
|
396
|
-
buffer: await this.#audioContext.decodeAudioData(
|
|
397
|
-
await audioFile.arrayBuffer(),
|
|
398
|
-
),
|
|
399
|
-
startOffsetMs: (segment.segment.cts / segment.track.timescale) * 1000,
|
|
400
|
-
};
|
|
401
|
-
},
|
|
402
|
-
});
|
|
403
|
-
|
|
404
|
-
async fetchAudioSpanningTime(fromMs: number, toMs: number) {
|
|
405
|
-
// Clamp toMs to the duration of the media
|
|
406
|
-
toMs = Math.min(toMs, this.durationMs);
|
|
407
|
-
// Adjust range for track's own time
|
|
408
|
-
if (this.sourceInMs) {
|
|
409
|
-
fromMs -=
|
|
410
|
-
this.startTimeMs - (this.trimStartMs ?? 0) - (this.sourceInMs ?? 0);
|
|
411
|
-
}
|
|
412
|
-
if (this.sourceOutMs) {
|
|
413
|
-
toMs -=
|
|
414
|
-
this.startTimeMs - (this.trimStartMs ?? 0) - (this.sourceOutMs ?? 0);
|
|
415
|
-
}
|
|
416
|
-
fromMs -= this.startTimeMs - (this.trimStartMs ?? 0);
|
|
417
|
-
toMs -= this.startTimeMs - (this.trimStartMs ?? 0);
|
|
418
|
-
|
|
419
|
-
await this.fragmentIndexTask.taskComplete;
|
|
420
|
-
|
|
421
|
-
const fragmentIndex = this.fragmentIndexTask.value as Record<
|
|
422
|
-
number,
|
|
423
|
-
TrackFragmentIndex
|
|
424
|
-
> | null;
|
|
425
|
-
const audioTrackId = this.defaultAudioTrackId;
|
|
426
|
-
if (!audioTrackId) {
|
|
427
|
-
return undefined;
|
|
428
|
-
}
|
|
429
|
-
|
|
430
|
-
const audioTrackIndex = fragmentIndex?.[audioTrackId];
|
|
431
|
-
if (!audioTrackIndex) {
|
|
432
|
-
return undefined;
|
|
433
|
-
}
|
|
434
|
-
|
|
435
|
-
// Branch based on effective mode: JIT vs Asset
|
|
436
|
-
if (this.effectiveMode === "jit-transcode" && this.src) {
|
|
437
|
-
// JIT mode: fetch segments and extract audio directly
|
|
438
|
-
const jitClient = this.jitClientTask.value;
|
|
439
|
-
if (!jitClient) {
|
|
440
|
-
return undefined;
|
|
441
|
-
}
|
|
442
|
-
|
|
443
|
-
try {
|
|
444
|
-
// Calculate which JIT segments we need
|
|
445
|
-
const segmentDuration = 2000; // 2s segments
|
|
446
|
-
const startSegmentIndex = Math.floor(fromMs / segmentDuration);
|
|
447
|
-
// Clamp to the last segment index, otherwise this will fetch audio past the end of the media, which is a 500 error in our server
|
|
448
|
-
const maxSegmentIndex =
|
|
449
|
-
Math.floor(this.durationMs / segmentDuration) - 1;
|
|
450
|
-
const endSegmentIndex = Math.min(
|
|
451
|
-
Math.floor(toMs / segmentDuration),
|
|
452
|
-
maxSegmentIndex,
|
|
453
|
-
);
|
|
454
|
-
|
|
455
|
-
// Fetch all needed JIT segments (they contain both video and audio)
|
|
456
|
-
const quality = await jitClient.getAdaptiveQuality();
|
|
457
|
-
const segmentPromises: Promise<{
|
|
458
|
-
buffer: ArrayBuffer;
|
|
459
|
-
startMs: number;
|
|
460
|
-
endMs: number;
|
|
461
|
-
}>[] = [];
|
|
462
|
-
|
|
463
|
-
for (let i = startSegmentIndex; i <= endSegmentIndex; i++) {
|
|
464
|
-
const segmentStartMs = i * segmentDuration;
|
|
465
|
-
const segmentEndMs = (i + 1) * segmentDuration;
|
|
466
|
-
|
|
467
|
-
segmentPromises.push(
|
|
468
|
-
jitClient
|
|
469
|
-
.fetchSegment(this.src, segmentStartMs, quality)
|
|
470
|
-
.then((buffer) => ({
|
|
471
|
-
buffer,
|
|
472
|
-
startMs: segmentStartMs,
|
|
473
|
-
endMs: segmentEndMs,
|
|
474
|
-
})),
|
|
475
|
-
);
|
|
476
|
-
}
|
|
477
|
-
|
|
478
|
-
const segments = await Promise.all(segmentPromises);
|
|
479
|
-
|
|
480
|
-
// Decode each segment individually to extract audio
|
|
481
|
-
const audioBuffers: {
|
|
482
|
-
buffer: AudioBuffer;
|
|
483
|
-
startMs: number;
|
|
484
|
-
endMs: number;
|
|
485
|
-
}[] = [];
|
|
486
|
-
|
|
487
|
-
for (const segment of segments) {
|
|
488
|
-
try {
|
|
489
|
-
// Use a temporary audio context to decode audio from the video file
|
|
490
|
-
let tempContext: OfflineAudioContext;
|
|
491
|
-
try {
|
|
492
|
-
tempContext = new OfflineAudioContext(2, 48000, 48000);
|
|
493
|
-
} catch (error) {
|
|
494
|
-
throw new Error(
|
|
495
|
-
`[EFMedia.fetchAudioSpanningTime JIT] Failed to create temp OfflineAudioContext(2, 48000, 48000) for segment ${segment.startMs}-${segment.endMs}ms: ${error instanceof Error ? error.message : String(error)}. This is for decoding audio from JIT video segments.`,
|
|
496
|
-
);
|
|
497
|
-
}
|
|
498
|
-
// Clone the ArrayBuffer to avoid detaching issues when reusing cached segments
|
|
499
|
-
const clonedBuffer = segment.buffer.slice(0);
|
|
500
|
-
const audioBuffer = await tempContext.decodeAudioData(clonedBuffer);
|
|
501
|
-
audioBuffers.push({
|
|
502
|
-
buffer: audioBuffer,
|
|
503
|
-
startMs: segment.startMs,
|
|
504
|
-
endMs: segment.endMs,
|
|
505
|
-
});
|
|
506
|
-
} catch (error) {
|
|
507
|
-
log(
|
|
508
|
-
`Failed to decode audio from segment ${segment.startMs}-${segment.endMs}ms:`,
|
|
509
|
-
error,
|
|
510
|
-
);
|
|
511
|
-
throw error;
|
|
512
|
-
}
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
if (audioBuffers.length === 0) {
|
|
516
|
-
return undefined;
|
|
517
|
-
}
|
|
518
|
-
|
|
519
|
-
// Calculate total duration and samples needed
|
|
520
|
-
const firstAudioBuffer = audioBuffers[0];
|
|
521
|
-
const lastAudioBuffer = audioBuffers[audioBuffers.length - 1];
|
|
522
|
-
|
|
523
|
-
if (!firstAudioBuffer || !lastAudioBuffer) {
|
|
524
|
-
return undefined;
|
|
525
|
-
}
|
|
526
|
-
|
|
527
|
-
const sampleRate = firstAudioBuffer.buffer.sampleRate;
|
|
528
|
-
const numberOfChannels = firstAudioBuffer.buffer.numberOfChannels;
|
|
529
|
-
|
|
530
|
-
// Calculate the exact time range we need
|
|
531
|
-
const actualStartMs = Math.max(fromMs, firstAudioBuffer.startMs);
|
|
532
|
-
const actualEndMs = Math.min(toMs, lastAudioBuffer.endMs);
|
|
533
|
-
const totalDurationMs = actualEndMs - actualStartMs;
|
|
534
|
-
const totalSamples = Math.floor((totalDurationMs / 1000) * sampleRate);
|
|
535
|
-
if (totalSamples <= 0) {
|
|
536
|
-
return undefined;
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
// Create a new audio context for the final buffer
|
|
540
|
-
let finalContext: OfflineAudioContext;
|
|
541
|
-
try {
|
|
542
|
-
finalContext = new OfflineAudioContext(
|
|
543
|
-
numberOfChannels,
|
|
544
|
-
totalSamples,
|
|
545
|
-
sampleRate,
|
|
546
|
-
);
|
|
547
|
-
} catch (error) {
|
|
548
|
-
throw new Error(
|
|
549
|
-
`[EFMedia.fetchAudioSpanningTime final] Failed to create final OfflineAudioContext(${numberOfChannels}, ${totalSamples}, ${sampleRate}) for time range ${actualStartMs}-${actualEndMs}ms: ${error instanceof Error ? error.message : String(error)}. This is for creating the final concatenated audio buffer.`,
|
|
550
|
-
);
|
|
551
|
-
}
|
|
552
|
-
const finalBuffer = finalContext.createBuffer(
|
|
553
|
-
numberOfChannels,
|
|
554
|
-
totalSamples,
|
|
555
|
-
sampleRate,
|
|
556
|
-
);
|
|
557
|
-
|
|
558
|
-
// Copy audio data from each decoded segment to the final buffer
|
|
559
|
-
let outputOffset = 0;
|
|
560
|
-
|
|
561
|
-
for (const {
|
|
562
|
-
buffer: audioBuffer,
|
|
563
|
-
startMs: segmentStartMs,
|
|
564
|
-
endMs: segmentEndMs,
|
|
565
|
-
} of audioBuffers) {
|
|
566
|
-
// Calculate which part of this segment we need
|
|
567
|
-
const segmentNeedStart = Math.max(actualStartMs, segmentStartMs);
|
|
568
|
-
const segmentNeedEnd = Math.min(actualEndMs, segmentEndMs);
|
|
569
|
-
|
|
570
|
-
if (segmentNeedStart >= segmentNeedEnd) {
|
|
571
|
-
continue; // Skip segments outside our range
|
|
572
|
-
}
|
|
573
|
-
|
|
574
|
-
// Calculate sample offsets within this segment
|
|
575
|
-
const segmentStartSample = Math.floor(
|
|
576
|
-
((segmentNeedStart - segmentStartMs) / 1000) * sampleRate,
|
|
577
|
-
);
|
|
578
|
-
const segmentDurationSamples = Math.floor(
|
|
579
|
-
((segmentNeedEnd - segmentNeedStart) / 1000) * sampleRate,
|
|
580
|
-
);
|
|
581
|
-
|
|
582
|
-
// Ensure we don't exceed buffer boundaries
|
|
583
|
-
const actualSamples = Math.min(
|
|
584
|
-
segmentDurationSamples,
|
|
585
|
-
audioBuffer.length - segmentStartSample,
|
|
586
|
-
totalSamples - outputOffset,
|
|
587
|
-
);
|
|
588
|
-
|
|
589
|
-
if (actualSamples <= 0) {
|
|
590
|
-
continue;
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
// Copy each channel
|
|
594
|
-
for (let channel = 0; channel < numberOfChannels; channel++) {
|
|
595
|
-
const sourceData = audioBuffer.getChannelData(channel);
|
|
596
|
-
const targetData = finalBuffer.getChannelData(channel);
|
|
597
|
-
|
|
598
|
-
for (let i = 0; i < actualSamples; i++) {
|
|
599
|
-
const sourceIndex = segmentStartSample + i;
|
|
600
|
-
const targetIndex = outputOffset + i;
|
|
601
|
-
|
|
602
|
-
if (
|
|
603
|
-
sourceIndex < sourceData.length &&
|
|
604
|
-
targetIndex < targetData.length
|
|
605
|
-
) {
|
|
606
|
-
const sample = sourceData[sourceIndex];
|
|
607
|
-
if (sample !== undefined) {
|
|
608
|
-
targetData[targetIndex] = sample;
|
|
609
|
-
}
|
|
610
|
-
}
|
|
611
|
-
}
|
|
612
|
-
}
|
|
613
|
-
|
|
614
|
-
outputOffset += actualSamples;
|
|
615
|
-
}
|
|
616
|
-
|
|
617
|
-
// Encode the final buffer back to a blob
|
|
618
|
-
// We'll create a simple WAV file since that's more reliable than trying to create MP4
|
|
619
|
-
const wavBlob = this.encodeWAVBuffer(finalBuffer);
|
|
620
|
-
|
|
621
|
-
const result = {
|
|
622
|
-
blob: wavBlob,
|
|
623
|
-
startMs: actualStartMs - (this.trimStartMs ?? 0),
|
|
624
|
-
endMs: actualEndMs - (this.trimEndMs ?? 0),
|
|
625
|
-
};
|
|
626
|
-
|
|
627
|
-
return result;
|
|
628
|
-
} catch (error) {
|
|
629
|
-
log(
|
|
630
|
-
"Failed to extract and concatenate audio from JIT video segments:",
|
|
631
|
-
error,
|
|
632
|
-
);
|
|
633
|
-
return undefined;
|
|
634
|
-
}
|
|
635
|
-
}
|
|
636
|
-
|
|
637
|
-
// Asset mode: use original fragmented MP4 approach
|
|
638
|
-
const start = audioTrackIndex.initSegment.offset;
|
|
639
|
-
const end =
|
|
640
|
-
audioTrackIndex.initSegment.offset + audioTrackIndex.initSegment.size;
|
|
641
|
-
const audioInitFragmentRequest = this.fetch(
|
|
642
|
-
this.fragmentTrackPath(String(audioTrackId)),
|
|
643
|
-
{
|
|
644
|
-
headers: { Range: `bytes=${start}-${end - 1}` },
|
|
645
|
-
},
|
|
646
|
-
);
|
|
647
|
-
|
|
648
|
-
const fragments = Object.values(
|
|
649
|
-
audioTrackIndex.segments as TrackSegment[],
|
|
650
|
-
).filter((segment: TrackSegment) => {
|
|
651
|
-
const segmentStartsBeforeEnd =
|
|
652
|
-
segment.dts <= (toMs * audioTrackIndex.timescale) / 1000;
|
|
653
|
-
const segmentEndsAfterStart =
|
|
654
|
-
segment.dts + segment.duration >=
|
|
655
|
-
(fromMs * audioTrackIndex.timescale) / 1000;
|
|
656
|
-
return segmentStartsBeforeEnd && segmentEndsAfterStart;
|
|
657
|
-
});
|
|
658
|
-
|
|
659
|
-
const firstFragment = fragments[0];
|
|
660
|
-
if (!firstFragment) {
|
|
661
|
-
return undefined;
|
|
662
|
-
}
|
|
663
|
-
const lastFragment = fragments[fragments.length - 1];
|
|
664
|
-
if (!lastFragment) {
|
|
665
|
-
return undefined;
|
|
666
|
-
}
|
|
667
|
-
const fragmentStart = firstFragment.offset;
|
|
668
|
-
const fragmentEnd = lastFragment.offset + lastFragment.size;
|
|
669
|
-
|
|
670
|
-
const audioFragmentRequest = this.fetch(
|
|
671
|
-
this.fragmentTrackPath(String(audioTrackId)),
|
|
672
|
-
{
|
|
673
|
-
headers: { Range: `bytes=${fragmentStart}-${fragmentEnd - 1}` },
|
|
674
|
-
},
|
|
675
|
-
);
|
|
676
|
-
|
|
677
|
-
const initResponse = await audioInitFragmentRequest;
|
|
678
|
-
const dataResponse = await audioFragmentRequest;
|
|
679
|
-
|
|
680
|
-
const initBuffer = await initResponse.arrayBuffer();
|
|
681
|
-
const dataBuffer = await dataResponse.arrayBuffer();
|
|
682
|
-
|
|
683
|
-
const audioBlob = new Blob([initBuffer, dataBuffer], {
|
|
684
|
-
type: "audio/mp4",
|
|
685
|
-
});
|
|
686
|
-
|
|
687
|
-
return {
|
|
688
|
-
blob: audioBlob,
|
|
689
|
-
startMs:
|
|
690
|
-
(firstFragment.dts / audioTrackIndex.timescale) * 1000 -
|
|
691
|
-
(this.trimStartMs ?? 0),
|
|
692
|
-
endMs:
|
|
693
|
-
(lastFragment.dts / audioTrackIndex.timescale) * 1000 +
|
|
694
|
-
(lastFragment.duration / audioTrackIndex.timescale) * 1000 -
|
|
695
|
-
(this.trimEndMs ?? 0),
|
|
696
|
-
};
|
|
697
|
-
}
|
|
134
|
+
@property({ type: Number, attribute: "fft-gain", reflect: true })
|
|
135
|
+
fftGain = 3.0;
|
|
698
136
|
|
|
699
137
|
/**
|
|
700
|
-
*
|
|
138
|
+
* Enable/disable frequency interpolation
|
|
139
|
+
* @domAttribute "interpolate-frequencies"
|
|
701
140
|
*/
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
const bytesPerSample = 2; // 16-bit
|
|
709
|
-
const blockAlign = numberOfChannels * bytesPerSample;
|
|
710
|
-
const byteRate = sampleRate * blockAlign;
|
|
711
|
-
const dataSize = length * blockAlign;
|
|
712
|
-
const fileSize = 36 + dataSize;
|
|
713
|
-
|
|
714
|
-
// Create WAV file buffer
|
|
715
|
-
const buffer = new ArrayBuffer(44 + dataSize);
|
|
716
|
-
const view = new DataView(buffer);
|
|
717
|
-
|
|
718
|
-
// Write WAV header
|
|
719
|
-
let offset = 0;
|
|
720
|
-
|
|
721
|
-
// RIFF chunk descriptor
|
|
722
|
-
view.setUint32(offset, 0x52494646, false); // "RIFF"
|
|
723
|
-
offset += 4;
|
|
724
|
-
view.setUint32(offset, fileSize, true); // File size
|
|
725
|
-
offset += 4;
|
|
726
|
-
view.setUint32(offset, 0x57415645, false); // "WAVE"
|
|
727
|
-
offset += 4;
|
|
728
|
-
|
|
729
|
-
// fmt sub-chunk
|
|
730
|
-
view.setUint32(offset, 0x666d7420, false); // "fmt "
|
|
731
|
-
offset += 4;
|
|
732
|
-
view.setUint32(offset, 16, true); // Subchunk1Size (16 for PCM)
|
|
733
|
-
offset += 4;
|
|
734
|
-
view.setUint16(offset, 1, true); // AudioFormat (1 for PCM)
|
|
735
|
-
offset += 2;
|
|
736
|
-
view.setUint16(offset, numberOfChannels, true); // NumChannels
|
|
737
|
-
offset += 2;
|
|
738
|
-
view.setUint32(offset, sampleRate, true); // SampleRate
|
|
739
|
-
offset += 4;
|
|
740
|
-
view.setUint32(offset, byteRate, true); // ByteRate
|
|
741
|
-
offset += 4;
|
|
742
|
-
view.setUint16(offset, blockAlign, true); // BlockAlign
|
|
743
|
-
offset += 2;
|
|
744
|
-
view.setUint16(offset, 16, true); // BitsPerSample
|
|
745
|
-
offset += 2;
|
|
746
|
-
|
|
747
|
-
// data sub-chunk
|
|
748
|
-
view.setUint32(offset, 0x64617461, false); // "data"
|
|
749
|
-
offset += 4;
|
|
750
|
-
view.setUint32(offset, dataSize, true); // Subchunk2Size
|
|
751
|
-
offset += 4;
|
|
752
|
-
|
|
753
|
-
// Write audio data
|
|
754
|
-
for (let i = 0; i < length; i++) {
|
|
755
|
-
for (let channel = 0; channel < numberOfChannels; channel++) {
|
|
756
|
-
const sample = audioBuffer.getChannelData(channel)[i] || 0;
|
|
757
|
-
// Convert float (-1 to 1) to 16-bit PCM
|
|
758
|
-
const pcmSample = Math.max(
|
|
759
|
-
-32768,
|
|
760
|
-
Math.min(32767, Math.floor(sample * 32767)),
|
|
761
|
-
);
|
|
762
|
-
view.setInt16(offset, pcmSample, true);
|
|
763
|
-
offset += 2;
|
|
764
|
-
}
|
|
765
|
-
}
|
|
766
|
-
|
|
767
|
-
return new Blob([buffer], { type: "audio/wav" });
|
|
768
|
-
}
|
|
769
|
-
|
|
770
|
-
set fftSize(value: number) {
|
|
771
|
-
const oldValue = this.fftSize;
|
|
772
|
-
this.setAttribute("fft-size", String(value));
|
|
773
|
-
this.requestUpdate("fft-size", oldValue);
|
|
774
|
-
}
|
|
775
|
-
|
|
776
|
-
set fftDecay(value: number) {
|
|
777
|
-
const oldValue = this.fftDecay;
|
|
778
|
-
this.setAttribute("fft-decay", String(value));
|
|
779
|
-
this.requestUpdate("fft-decay", oldValue);
|
|
780
|
-
}
|
|
781
|
-
|
|
782
|
-
get fftSize() {
|
|
783
|
-
return Number.parseInt(this.getAttribute("fft-size") ?? "128", 10);
|
|
784
|
-
}
|
|
785
|
-
|
|
786
|
-
get fftDecay() {
|
|
787
|
-
return Number.parseInt(this.getAttribute("fft-decay") ?? "8", 10);
|
|
788
|
-
}
|
|
789
|
-
|
|
790
|
-
set interpolateFrequencies(value: boolean) {
|
|
791
|
-
const oldValue = this.interpolateFrequencies;
|
|
792
|
-
this.setAttribute("interpolate-frequencies", String(value));
|
|
793
|
-
this.requestUpdate("interpolate-frequencies", oldValue);
|
|
794
|
-
}
|
|
795
|
-
|
|
796
|
-
get interpolateFrequencies() {
|
|
797
|
-
return this.getAttribute("interpolate-frequencies") !== "false";
|
|
798
|
-
}
|
|
799
|
-
|
|
800
|
-
get shouldInterpolateFrequencies() {
|
|
801
|
-
if (this.hasAttribute("interpolate-frequencies")) {
|
|
802
|
-
return this.getAttribute("interpolate-frequencies") !== "false";
|
|
803
|
-
}
|
|
804
|
-
return false;
|
|
805
|
-
}
|
|
806
|
-
|
|
807
|
-
private static readonly DECAY_WEIGHT = 0.7;
|
|
141
|
+
@property({
|
|
142
|
+
type: Boolean,
|
|
143
|
+
attribute: "interpolate-frequencies",
|
|
144
|
+
reflect: true,
|
|
145
|
+
})
|
|
146
|
+
interpolateFrequencies = false;
|
|
808
147
|
|
|
809
148
|
// Update FREQ_WEIGHTS to use the instance fftSize instead of a static value
|
|
810
149
|
get FREQ_WEIGHTS() {
|
|
@@ -828,509 +167,52 @@ export class EFMedia extends EFTargetable(
|
|
|
828
167
|
return weights;
|
|
829
168
|
}
|
|
830
169
|
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
autoRun: EF_INTERACTIVE,
|
|
835
|
-
onError: (error) => {
|
|
836
|
-
console.error("byteTimeDomainTask error", error);
|
|
837
|
-
},
|
|
838
|
-
args: () =>
|
|
839
|
-
[
|
|
840
|
-
this.audioBufferTask.status,
|
|
841
|
-
this.currentSourceTimeMs,
|
|
842
|
-
this.fftSize,
|
|
843
|
-
this.fftDecay,
|
|
844
|
-
this.fftGain,
|
|
845
|
-
this.shouldInterpolateFrequencies,
|
|
846
|
-
] as const,
|
|
847
|
-
task: async () => {
|
|
848
|
-
await this.audioBufferTask.taskComplete;
|
|
849
|
-
if (!this.audioBufferTask.value) return null;
|
|
850
|
-
if (this.currentSourceTimeMs < 0) return null;
|
|
851
|
-
|
|
852
|
-
const currentTimeMs = this.currentSourceTimeMs;
|
|
853
|
-
const startOffsetMs = this.audioBufferTask.value.startOffsetMs;
|
|
854
|
-
const audioBuffer = this.audioBufferTask.value.buffer;
|
|
855
|
-
|
|
856
|
-
const smoothedKey = `${this.shouldInterpolateFrequencies}:${this.fftSize}:${this.fftDecay}:${this.fftGain}:${startOffsetMs}:${currentTimeMs}`;
|
|
857
|
-
const cachedData = this.#byteTimeDomainCache.get(smoothedKey);
|
|
858
|
-
if (cachedData) return cachedData;
|
|
859
|
-
|
|
860
|
-
// Process multiple frames with decay, similar to the reference code
|
|
861
|
-
const framesData = await Promise.all(
|
|
862
|
-
Array.from({ length: this.fftDecay }, async (_, frameIndex) => {
|
|
863
|
-
const frameOffset = frameIndex * (1000 / 30);
|
|
864
|
-
const startTime = Math.max(
|
|
865
|
-
0,
|
|
866
|
-
(currentTimeMs - frameOffset - startOffsetMs) / 1000,
|
|
867
|
-
);
|
|
868
|
-
|
|
869
|
-
const cacheKey = `${this.shouldInterpolateFrequencies}:${this.fftSize}:${this.fftGain}:${startOffsetMs}:${startTime}`;
|
|
870
|
-
const cachedFrame = this.#byteTimeDomainCache.get(cacheKey);
|
|
871
|
-
if (cachedFrame) return cachedFrame;
|
|
872
|
-
|
|
873
|
-
let audioContext: OfflineAudioContext;
|
|
874
|
-
try {
|
|
875
|
-
audioContext = new OfflineAudioContext(2, 48000 * (1 / 30), 48000);
|
|
876
|
-
} catch (error) {
|
|
877
|
-
throw new Error(
|
|
878
|
-
`[EFMedia.byteTimeDomainTask] Failed to create OfflineAudioContext(2, ${48000 * (1 / 30)}, 48000) for frame ${frameIndex} at time ${startTime}s: ${error instanceof Error ? error.message : String(error)}. This is for audio time domain analysis.`,
|
|
879
|
-
);
|
|
880
|
-
}
|
|
881
|
-
|
|
882
|
-
const source = audioContext.createBufferSource();
|
|
883
|
-
source.buffer = audioBuffer;
|
|
884
|
-
|
|
885
|
-
// Create analyzer for PCM data
|
|
886
|
-
const analyser = audioContext.createAnalyser();
|
|
887
|
-
analyser.fftSize = this.fftSize; // Ensure enough samples
|
|
888
|
-
analyser.minDecibels = -90;
|
|
889
|
-
analyser.maxDecibels = -20;
|
|
890
|
-
|
|
891
|
-
const gainNode = audioContext.createGain();
|
|
892
|
-
gainNode.gain.value = this.fftGain; // Amplify the signal
|
|
893
|
-
|
|
894
|
-
source.connect(gainNode);
|
|
895
|
-
gainNode.connect(analyser);
|
|
896
|
-
analyser.connect(audioContext.destination);
|
|
897
|
-
|
|
898
|
-
source.start(0, startTime, 1 / 30);
|
|
899
|
-
|
|
900
|
-
const dataLength = analyser.fftSize / 2;
|
|
901
|
-
try {
|
|
902
|
-
await audioContext.startRendering();
|
|
903
|
-
const frameData = new Uint8Array(dataLength);
|
|
904
|
-
analyser.getByteTimeDomainData(frameData);
|
|
905
|
-
|
|
906
|
-
// const points = frameData;
|
|
907
|
-
// Calculate RMS and midpoint values
|
|
908
|
-
const points = new Uint8Array(dataLength);
|
|
909
|
-
for (let i = 0; i < dataLength; i++) {
|
|
910
|
-
const pointSamples = frameData.slice(
|
|
911
|
-
i * (frameData.length / dataLength),
|
|
912
|
-
(i + 1) * (frameData.length / dataLength),
|
|
913
|
-
);
|
|
914
|
-
|
|
915
|
-
// Calculate RMS while preserving sign
|
|
916
|
-
const rms = Math.sqrt(
|
|
917
|
-
pointSamples.reduce((sum, sample) => {
|
|
918
|
-
const normalized = (sample - 128) / 128;
|
|
919
|
-
return sum + normalized * normalized;
|
|
920
|
-
}, 0) / pointSamples.length,
|
|
921
|
-
);
|
|
922
|
-
|
|
923
|
-
// Get average sign of the samples to determine direction
|
|
924
|
-
const avgSign = Math.sign(
|
|
925
|
-
pointSamples.reduce((sum, sample) => sum + (sample - 128), 0),
|
|
926
|
-
);
|
|
927
|
-
|
|
928
|
-
// Convert RMS back to byte range, preserving direction
|
|
929
|
-
points[i] = Math.min(255, Math.round(128 + avgSign * rms * 128));
|
|
930
|
-
}
|
|
931
|
-
|
|
932
|
-
this.#byteTimeDomainCache.set(cacheKey, points);
|
|
933
|
-
return points;
|
|
934
|
-
} finally {
|
|
935
|
-
source.disconnect();
|
|
936
|
-
analyser.disconnect();
|
|
937
|
-
}
|
|
938
|
-
}),
|
|
939
|
-
);
|
|
940
|
-
|
|
941
|
-
// Combine frames with decay weighting
|
|
942
|
-
const frameLength = framesData[0]?.length ?? 0;
|
|
943
|
-
const smoothedData = new Uint8Array(frameLength);
|
|
944
|
-
|
|
945
|
-
for (let i = 0; i < frameLength; i++) {
|
|
946
|
-
let weightedSum = 0;
|
|
947
|
-
let weightSum = 0;
|
|
948
|
-
|
|
949
|
-
framesData.forEach((frame, frameIndex) => {
|
|
950
|
-
const decayWeight = EFMedia.DECAY_WEIGHT ** frameIndex;
|
|
951
|
-
weightedSum += (frame[i] ?? 0) * decayWeight;
|
|
952
|
-
weightSum += decayWeight;
|
|
953
|
-
});
|
|
954
|
-
|
|
955
|
-
smoothedData[i] = Math.min(255, Math.round(weightedSum / weightSum));
|
|
956
|
-
}
|
|
957
|
-
|
|
958
|
-
this.#byteTimeDomainCache.set(smoothedKey, smoothedData);
|
|
959
|
-
return smoothedData;
|
|
960
|
-
},
|
|
961
|
-
});
|
|
962
|
-
|
|
963
|
-
#frequencyDataCache = new LRUCache<string, Uint8Array>(100);
|
|
964
|
-
|
|
965
|
-
frequencyDataTask = new Task(this, {
|
|
966
|
-
autoRun: EF_INTERACTIVE,
|
|
967
|
-
onError: (error) => {
|
|
968
|
-
console.error("frequencyDataTask error", error);
|
|
969
|
-
},
|
|
970
|
-
args: () =>
|
|
971
|
-
[
|
|
972
|
-
this.audioBufferTask.status,
|
|
973
|
-
this.currentSourceTimeMs,
|
|
974
|
-
this.fftSize,
|
|
975
|
-
this.fftDecay,
|
|
976
|
-
this.fftGain,
|
|
977
|
-
this.shouldInterpolateFrequencies,
|
|
978
|
-
] as const,
|
|
979
|
-
task: async () => {
|
|
980
|
-
await this.audioBufferTask.taskComplete;
|
|
981
|
-
if (!this.audioBufferTask.value) return null;
|
|
982
|
-
if (this.currentSourceTimeMs < 0) return null;
|
|
983
|
-
|
|
984
|
-
const currentTimeMs = this.currentSourceTimeMs;
|
|
985
|
-
const startOffsetMs = this.audioBufferTask.value.startOffsetMs;
|
|
986
|
-
const audioBuffer = this.audioBufferTask.value.buffer;
|
|
987
|
-
const smoothedKey = `${this.shouldInterpolateFrequencies}:${this.fftSize}:${this.fftDecay}:${this.fftGain}:${startOffsetMs}:${currentTimeMs}`;
|
|
988
|
-
|
|
989
|
-
const cachedSmoothedData = this.#frequencyDataCache.get(smoothedKey);
|
|
990
|
-
if (cachedSmoothedData) {
|
|
991
|
-
return cachedSmoothedData;
|
|
992
|
-
}
|
|
993
|
-
|
|
994
|
-
const framesData = await Promise.all(
|
|
995
|
-
Array.from({ length: this.fftDecay }, async (_, i) => {
|
|
996
|
-
const frameOffset = i * (1000 / 30);
|
|
997
|
-
const startTime = Math.max(
|
|
998
|
-
0,
|
|
999
|
-
(currentTimeMs - frameOffset - startOffsetMs) / 1000,
|
|
1000
|
-
);
|
|
1001
|
-
|
|
1002
|
-
// Cache key for this specific frame
|
|
1003
|
-
const cacheKey = `${this.shouldInterpolateFrequencies}:${this.fftSize}:${this.fftGain}:${startOffsetMs}:${startTime}`;
|
|
1004
|
-
|
|
1005
|
-
// Check cache for this specific frame
|
|
1006
|
-
const cachedFrame = this.#frequencyDataCache.get(cacheKey);
|
|
1007
|
-
if (cachedFrame) {
|
|
1008
|
-
return cachedFrame;
|
|
1009
|
-
}
|
|
1010
|
-
|
|
1011
|
-
// Running 48000 * (1 / 30) = 1600 broke something terrible, it came out as 0,
|
|
1012
|
-
// I'm assuming weird floating point nonsense to do with running on rosetta
|
|
1013
|
-
const SIZE = 48000 / 30;
|
|
1014
|
-
let audioContext: OfflineAudioContext;
|
|
1015
|
-
try {
|
|
1016
|
-
audioContext = new OfflineAudioContext(2, SIZE, 48000);
|
|
1017
|
-
} catch (error) {
|
|
1018
|
-
throw new Error(
|
|
1019
|
-
`[EFMedia.frequencyDataTask] Failed to create OfflineAudioContext(2, ${SIZE}, 48000) for frame ${i} at time ${startTime}s: ${error instanceof Error ? error.message : String(error)}. This is for audio frequency analysis.`,
|
|
1020
|
-
);
|
|
1021
|
-
}
|
|
1022
|
-
const analyser = audioContext.createAnalyser();
|
|
1023
|
-
analyser.fftSize = this.fftSize;
|
|
1024
|
-
analyser.minDecibels = -90;
|
|
1025
|
-
analyser.maxDecibels = -10;
|
|
1026
|
-
|
|
1027
|
-
const gainNode = audioContext.createGain();
|
|
1028
|
-
gainNode.gain.value = this.fftGain;
|
|
1029
|
-
|
|
1030
|
-
const filter = audioContext.createBiquadFilter();
|
|
1031
|
-
filter.type = "bandpass";
|
|
1032
|
-
filter.frequency.value = 15000;
|
|
1033
|
-
filter.Q.value = 0.05;
|
|
1034
|
-
|
|
1035
|
-
const audioBufferSource = audioContext.createBufferSource();
|
|
1036
|
-
audioBufferSource.buffer = audioBuffer;
|
|
1037
|
-
|
|
1038
|
-
audioBufferSource.connect(filter);
|
|
1039
|
-
filter.connect(gainNode);
|
|
1040
|
-
gainNode.connect(analyser);
|
|
1041
|
-
analyser.connect(audioContext.destination);
|
|
1042
|
-
|
|
1043
|
-
audioBufferSource.start(0, startTime, 1 / 30);
|
|
1044
|
-
|
|
1045
|
-
try {
|
|
1046
|
-
await audioContext.startRendering();
|
|
1047
|
-
const frameData = new Uint8Array(this.fftSize / 2);
|
|
1048
|
-
analyser.getByteFrequencyData(frameData);
|
|
1049
|
-
|
|
1050
|
-
// Cache this frame's analysis
|
|
1051
|
-
this.#frequencyDataCache.set(cacheKey, frameData);
|
|
1052
|
-
return frameData;
|
|
1053
|
-
} finally {
|
|
1054
|
-
audioBufferSource.disconnect();
|
|
1055
|
-
analyser.disconnect();
|
|
1056
|
-
}
|
|
1057
|
-
}),
|
|
1058
|
-
);
|
|
1059
|
-
|
|
1060
|
-
const frameLength = framesData[0]?.length ?? 0;
|
|
1061
|
-
|
|
1062
|
-
// Combine frames with decay
|
|
1063
|
-
const smoothedData = new Uint8Array(frameLength);
|
|
1064
|
-
for (let i = 0; i < frameLength; i++) {
|
|
1065
|
-
let weightedSum = 0;
|
|
1066
|
-
let weightSum = 0;
|
|
1067
|
-
|
|
1068
|
-
framesData.forEach((frame, frameIndex) => {
|
|
1069
|
-
const decayWeight = EFMedia.DECAY_WEIGHT ** frameIndex;
|
|
1070
|
-
weightedSum += (frame[i] ?? 0) * decayWeight;
|
|
1071
|
-
weightSum += decayWeight;
|
|
1072
|
-
});
|
|
1073
|
-
|
|
1074
|
-
smoothedData[i] = Math.min(255, Math.round(weightedSum / weightSum));
|
|
1075
|
-
}
|
|
1076
|
-
|
|
1077
|
-
// Apply frequency weights using instance FREQ_WEIGHTS
|
|
1078
|
-
smoothedData.forEach((value, i) => {
|
|
1079
|
-
const freqWeight = this.FREQ_WEIGHTS[i] ?? 0;
|
|
1080
|
-
smoothedData[i] = Math.min(255, Math.round(value * freqWeight));
|
|
1081
|
-
});
|
|
1082
|
-
|
|
1083
|
-
// Only return the lower half of the frequency data
|
|
1084
|
-
// The top half is zeroed out, which makes for aesthetically unpleasing waveforms
|
|
1085
|
-
const slicedData = smoothedData.slice(
|
|
1086
|
-
0,
|
|
1087
|
-
Math.floor(smoothedData.length / 2),
|
|
1088
|
-
);
|
|
1089
|
-
const processedData = this.shouldInterpolateFrequencies
|
|
1090
|
-
? processFFTData(slicedData)
|
|
1091
|
-
: slicedData;
|
|
1092
|
-
this.#frequencyDataCache.set(smoothedKey, processedData);
|
|
1093
|
-
return processedData;
|
|
1094
|
-
},
|
|
1095
|
-
});
|
|
1096
|
-
|
|
1097
|
-
set fftGain(value: number) {
|
|
1098
|
-
const oldValue = this.fftGain;
|
|
1099
|
-
this.setAttribute("fft-gain", String(value));
|
|
1100
|
-
this.requestUpdate("fft-gain", oldValue);
|
|
1101
|
-
}
|
|
1102
|
-
|
|
1103
|
-
get fftGain() {
|
|
1104
|
-
return Number.parseFloat(this.getAttribute("fft-gain") ?? "3.0");
|
|
1105
|
-
}
|
|
1106
|
-
|
|
1107
|
-
// Add helper methods for the new architecture
|
|
1108
|
-
private synthesizeFragmentIndex(
|
|
1109
|
-
jitMetadata: any,
|
|
1110
|
-
): Record<number, TrackFragmentIndex> {
|
|
1111
|
-
const segmentDuration = jitMetadata.segmentDuration || 2000;
|
|
1112
|
-
const numSegments = Math.ceil(jitMetadata.durationMs / segmentDuration);
|
|
1113
|
-
const fragmentIndex: Record<number, TrackFragmentIndex> = {};
|
|
1114
|
-
|
|
1115
|
-
// Create video track fragment index
|
|
1116
|
-
const videoStream = jitMetadata.streams.find(
|
|
1117
|
-
(s: any) => s.type === "video",
|
|
1118
|
-
);
|
|
1119
|
-
if (videoStream) {
|
|
1120
|
-
const segments: TrackSegment[] = [];
|
|
1121
|
-
for (let i = 0; i < numSegments; i++) {
|
|
1122
|
-
const startMs = i * segmentDuration;
|
|
1123
|
-
const endMs = Math.min(
|
|
1124
|
-
startMs + segmentDuration,
|
|
1125
|
-
jitMetadata.durationMs,
|
|
1126
|
-
);
|
|
1127
|
-
segments.push({
|
|
1128
|
-
dts: Math.floor(startMs * 90), // Convert to video timescale
|
|
1129
|
-
cts: Math.floor(startMs * 90),
|
|
1130
|
-
duration: Math.floor((endMs - startMs) * 90),
|
|
1131
|
-
offset: 0, // Not used for JIT segments
|
|
1132
|
-
size: 0, // Not used for JIT segments
|
|
1133
|
-
});
|
|
1134
|
-
}
|
|
1135
|
-
|
|
1136
|
-
fragmentIndex[videoStream.index] = {
|
|
1137
|
-
track: videoStream.index,
|
|
1138
|
-
type: "video",
|
|
1139
|
-
timescale: 90000, // Standard video timescale
|
|
1140
|
-
duration: Math.floor(jitMetadata.durationMs * 90),
|
|
1141
|
-
width: videoStream.width || 1920,
|
|
1142
|
-
height: videoStream.height || 1080,
|
|
1143
|
-
sample_count: numSegments * 50, // Estimate ~50 frames per 2s segment
|
|
1144
|
-
codec: videoStream.codecName || "h264",
|
|
1145
|
-
segments,
|
|
1146
|
-
initSegment: { offset: 0, size: 0 }, // Not used for JIT
|
|
1147
|
-
};
|
|
1148
|
-
}
|
|
1149
|
-
|
|
1150
|
-
// Create audio track fragment index
|
|
1151
|
-
const audioStream = jitMetadata.streams.find(
|
|
1152
|
-
(s: any) => s.type === "audio",
|
|
1153
|
-
);
|
|
1154
|
-
if (audioStream) {
|
|
1155
|
-
const segments: TrackSegment[] = [];
|
|
1156
|
-
const audioTimescale = audioStream.sampleRate || 48000;
|
|
1157
|
-
for (let i = 0; i < numSegments; i++) {
|
|
1158
|
-
const startMs = i * segmentDuration;
|
|
1159
|
-
const endMs = Math.min(
|
|
1160
|
-
startMs + segmentDuration,
|
|
1161
|
-
jitMetadata.durationMs,
|
|
1162
|
-
);
|
|
1163
|
-
segments.push({
|
|
1164
|
-
dts: Math.floor((startMs * audioTimescale) / 1000),
|
|
1165
|
-
cts: Math.floor((startMs * audioTimescale) / 1000),
|
|
1166
|
-
duration: Math.floor(((endMs - startMs) * audioTimescale) / 1000),
|
|
1167
|
-
offset: 0, // Not used for JIT segments
|
|
1168
|
-
size: 0, // Not used for JIT segments
|
|
1169
|
-
});
|
|
1170
|
-
}
|
|
1171
|
-
|
|
1172
|
-
fragmentIndex[audioStream.index] = {
|
|
1173
|
-
track: audioStream.index,
|
|
1174
|
-
type: "audio",
|
|
1175
|
-
timescale: audioTimescale,
|
|
1176
|
-
duration: Math.floor((jitMetadata.durationMs * audioTimescale) / 1000),
|
|
1177
|
-
channel_count: audioStream.channels || 2,
|
|
1178
|
-
sample_rate: audioStream.sampleRate || 48000,
|
|
1179
|
-
sample_size: 16, // Standard sample size
|
|
1180
|
-
sample_count: Math.floor(
|
|
1181
|
-
(jitMetadata.durationMs * (audioStream.sampleRate || 48000)) / 1000,
|
|
1182
|
-
),
|
|
1183
|
-
codec: audioStream.codecName || "aac",
|
|
1184
|
-
segments,
|
|
1185
|
-
initSegment: { offset: 0, size: 0 }, // Not used for JIT
|
|
1186
|
-
};
|
|
1187
|
-
}
|
|
1188
|
-
|
|
1189
|
-
return fragmentIndex;
|
|
1190
|
-
}
|
|
1191
|
-
|
|
1192
|
-
private calculateAssetSegmentKeys(
|
|
1193
|
-
fragmentIndex: Record<number, TrackFragmentIndex>,
|
|
1194
|
-
seekMs: number,
|
|
1195
|
-
) {
|
|
1196
|
-
const segmentKeys: Record<
|
|
1197
|
-
string,
|
|
1198
|
-
{ startTimeMs: number; trackId: string }
|
|
1199
|
-
> = {};
|
|
1200
|
-
|
|
1201
|
-
for (const [trackId, index] of Object.entries(fragmentIndex)) {
|
|
1202
|
-
const segment = index.segments.toReversed().find((segment) => {
|
|
1203
|
-
const segmentStartMs = (segment.dts / index.timescale) * 1000;
|
|
1204
|
-
return segmentStartMs <= seekMs;
|
|
1205
|
-
});
|
|
1206
|
-
|
|
1207
|
-
if (segment) {
|
|
1208
|
-
const startTimeMs = (segment.dts / index.timescale) * 1000;
|
|
1209
|
-
segmentKeys[trackId] = { startTimeMs, trackId };
|
|
1210
|
-
}
|
|
1211
|
-
}
|
|
1212
|
-
|
|
1213
|
-
return segmentKeys;
|
|
170
|
+
// Helper getter for backwards compatibility
|
|
171
|
+
get shouldInterpolateFrequencies() {
|
|
172
|
+
return this.interpolateFrequencies;
|
|
1214
173
|
}
|
|
1215
174
|
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
string,
|
|
1219
|
-
{ startTimeMs: number; trackId: string }
|
|
1220
|
-
> = {};
|
|
1221
|
-
const segmentDuration = metadata.segmentDuration || 2000;
|
|
1222
|
-
|
|
1223
|
-
for (const stream of metadata.streams) {
|
|
1224
|
-
const segmentIndex = Math.floor(seekMs / segmentDuration);
|
|
1225
|
-
const startTimeMs = segmentIndex * segmentDuration;
|
|
1226
|
-
segmentKeys[stream.index] = {
|
|
1227
|
-
startTimeMs,
|
|
1228
|
-
trackId: String(stream.index),
|
|
1229
|
-
};
|
|
1230
|
-
}
|
|
1231
|
-
|
|
1232
|
-
return segmentKeys;
|
|
175
|
+
get urlGenerator() {
|
|
176
|
+
return new UrlGenerator(() => this.apiHost ?? "");
|
|
1233
177
|
}
|
|
1234
178
|
|
|
1235
|
-
|
|
1236
|
-
fragmentIndex: Record<number, TrackFragmentIndex>,
|
|
1237
|
-
initSegments: any[],
|
|
1238
|
-
seekMs: number,
|
|
1239
|
-
) {
|
|
1240
|
-
const result: Record<
|
|
1241
|
-
string,
|
|
1242
|
-
{
|
|
1243
|
-
segment: TrackSegment;
|
|
1244
|
-
track: MP4Box.TrackInfo;
|
|
1245
|
-
nextSegment?: TrackSegment;
|
|
1246
|
-
}
|
|
1247
|
-
> = {};
|
|
1248
|
-
|
|
1249
|
-
for (const index of Object.values(fragmentIndex)) {
|
|
1250
|
-
const initTrack = initSegments
|
|
1251
|
-
.find((segment) => segment.trackId === String(index.track))
|
|
1252
|
-
?.mp4File.getInfo().tracks[0];
|
|
1253
|
-
|
|
1254
|
-
if (!initTrack) continue;
|
|
179
|
+
mediaEngineTask = makeMediaEngineTask(this);
|
|
1255
180
|
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
181
|
+
audioSegmentIdTask = makeAudioSegmentIdTask(this);
|
|
182
|
+
audioInitSegmentFetchTask = makeAudioInitSegmentFetchTask(this);
|
|
183
|
+
audioSegmentFetchTask = makeAudioSegmentFetchTask(this);
|
|
184
|
+
audioInputTask = makeAudioInputTask(this);
|
|
185
|
+
audioSeekTask = makeAudioSeekTask(this);
|
|
1260
186
|
|
|
1261
|
-
|
|
1262
|
-
return (segment.dts / initTrack.timescale) * 1000 > seekMs;
|
|
1263
|
-
});
|
|
187
|
+
audioBufferTask = makeAudioBufferTask(this);
|
|
1264
188
|
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
}
|
|
189
|
+
// Audio analysis tasks for frequency and time domain analysis
|
|
190
|
+
byteTimeDomainTask = makeAudioTimeDomainAnalysisTask(this);
|
|
191
|
+
frequencyDataTask = makeAudioFrequencyAnalysisTask(this);
|
|
1269
192
|
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
const result: Record<
|
|
1278
|
-
string,
|
|
1279
|
-
{
|
|
1280
|
-
segment: TrackSegment;
|
|
1281
|
-
track: MP4Box.TrackInfo;
|
|
1282
|
-
nextSegment?: TrackSegment;
|
|
1283
|
-
}
|
|
1284
|
-
> = {};
|
|
1285
|
-
|
|
1286
|
-
for (const index of Object.values(fragmentIndex)) {
|
|
1287
|
-
const track = this.createTrackInfo(index);
|
|
1288
|
-
|
|
1289
|
-
const segment = index.segments.toReversed().find((segment) => {
|
|
1290
|
-
const segmentStartMs = (segment.dts / track.timescale) * 1000;
|
|
1291
|
-
return segmentStartMs <= seekMs;
|
|
1292
|
-
});
|
|
1293
|
-
|
|
1294
|
-
const nextSegment = index.segments.find((segment) => {
|
|
1295
|
-
return (segment.dts / track.timescale) * 1000 > seekMs;
|
|
1296
|
-
});
|
|
1297
|
-
|
|
1298
|
-
if (segment) {
|
|
1299
|
-
result[index.track] = { segment, track, nextSegment };
|
|
1300
|
-
}
|
|
1301
|
-
}
|
|
1302
|
-
|
|
1303
|
-
return result;
|
|
1304
|
-
}
|
|
193
|
+
/**
|
|
194
|
+
* The unique identifier for the media asset.
|
|
195
|
+
* This property can be set programmatically or via the "asset-id" attribute.
|
|
196
|
+
* @domAttribute "asset-id"
|
|
197
|
+
*/
|
|
198
|
+
@property({ type: String, attribute: "asset-id", reflect: true })
|
|
199
|
+
assetId: string | null = null;
|
|
1305
200
|
|
|
1306
|
-
|
|
1307
|
-
return
|
|
1308
|
-
id: index.track,
|
|
1309
|
-
name: index.type,
|
|
1310
|
-
type: index.type,
|
|
1311
|
-
timescale: index.timescale,
|
|
1312
|
-
duration: index.duration,
|
|
1313
|
-
bitrate: index.type === "video" ? 1000000 : 128000,
|
|
1314
|
-
created: new Date(),
|
|
1315
|
-
modified: new Date(),
|
|
1316
|
-
movie_duration: index.duration,
|
|
1317
|
-
movie_timescale: index.timescale,
|
|
1318
|
-
layer: 0,
|
|
1319
|
-
alternate_group: 0,
|
|
1320
|
-
volume: index.type === "audio" ? 1.0 : 0,
|
|
1321
|
-
track_width: index.type === "video" ? (index as any).width || 0 : 0,
|
|
1322
|
-
track_height: index.type === "video" ? (index as any).height || 0 : 0,
|
|
1323
|
-
samples_duration: index.duration,
|
|
1324
|
-
codec: (index as any).codec || "unknown",
|
|
1325
|
-
language: "und",
|
|
1326
|
-
nb_samples: (index as any).sample_count || 0,
|
|
1327
|
-
} as MP4Box.TrackInfo;
|
|
201
|
+
get intrinsicDurationMs() {
|
|
202
|
+
return this.mediaEngineTask.value?.durationMs ?? 0;
|
|
1328
203
|
}
|
|
1329
204
|
|
|
1330
205
|
protected updated(
|
|
1331
206
|
changedProperties: PropertyValueMap<any> | Map<PropertyKey, unknown>,
|
|
1332
207
|
): void {
|
|
1333
208
|
super.updated(changedProperties);
|
|
209
|
+
|
|
210
|
+
// Check if our timeline position has actually changed, even if ownCurrentTimeMs isn't tracked as a property
|
|
211
|
+
const newCurrentSourceTimeMs = this.currentSourceTimeMs;
|
|
212
|
+
if (newCurrentSourceTimeMs !== this.desiredSeekTimeMs) {
|
|
213
|
+
this.executeSeek(newCurrentSourceTimeMs);
|
|
214
|
+
}
|
|
215
|
+
|
|
1334
216
|
if (changedProperties.has("ownCurrentTimeMs")) {
|
|
1335
217
|
this.executeSeek(this.currentSourceTimeMs);
|
|
1336
218
|
}
|
|
@@ -1346,59 +228,8 @@ export class EFMedia extends EFTargetable(
|
|
|
1346
228
|
return true;
|
|
1347
229
|
}
|
|
1348
230
|
|
|
1349
|
-
// Update videoAssetTask to use new convergent tasks
|
|
1350
|
-
videoAssetTask = new Task(this, {
|
|
1351
|
-
autoRun: EF_INTERACTIVE,
|
|
1352
|
-
onError: (error) => {
|
|
1353
|
-
console.error("videoAssetTask error", error);
|
|
1354
|
-
},
|
|
1355
|
-
args: () => [this.effectiveMode, this.mediaSegmentsTask.value] as const,
|
|
1356
|
-
task: async ([mode, files], { signal: _signal }) => {
|
|
1357
|
-
if (!files) return;
|
|
1358
|
-
|
|
1359
|
-
const fragmentIndex = this.fragmentIndexTask.value as Record<
|
|
1360
|
-
number,
|
|
1361
|
-
TrackFragmentIndex
|
|
1362
|
-
> | null;
|
|
1363
|
-
const computedVideoTrackId = Object.values(fragmentIndex ?? {}).find(
|
|
1364
|
-
(track) => track.type === "video",
|
|
1365
|
-
)?.track;
|
|
1366
|
-
|
|
1367
|
-
if (computedVideoTrackId === undefined) return;
|
|
1368
|
-
|
|
1369
|
-
const videoFile = files[computedVideoTrackId];
|
|
1370
|
-
if (!videoFile) return;
|
|
1371
|
-
|
|
1372
|
-
// Cleanup existing asset
|
|
1373
|
-
const existingAsset = this.videoAssetTask.value;
|
|
1374
|
-
if (existingAsset) {
|
|
1375
|
-
for (const frame of existingAsset?.decodedFrames || []) {
|
|
1376
|
-
frame.close();
|
|
1377
|
-
}
|
|
1378
|
-
const maybeDecoder = existingAsset?.videoDecoder;
|
|
1379
|
-
if (maybeDecoder?.state !== "closed") {
|
|
1380
|
-
maybeDecoder.close();
|
|
1381
|
-
}
|
|
1382
|
-
}
|
|
1383
|
-
|
|
1384
|
-
// Single branching point for creation method
|
|
1385
|
-
if (mode === "jit-transcode") {
|
|
1386
|
-
return await VideoAsset.createFromCompleteMP4(
|
|
1387
|
-
`jit-segment-${computedVideoTrackId}`,
|
|
1388
|
-
videoFile,
|
|
1389
|
-
);
|
|
1390
|
-
}
|
|
1391
|
-
|
|
1392
|
-
return await VideoAsset.createFromReadableStream(
|
|
1393
|
-
"video.mp4",
|
|
1394
|
-
videoFile.stream(),
|
|
1395
|
-
videoFile,
|
|
1396
|
-
);
|
|
1397
|
-
},
|
|
1398
|
-
});
|
|
1399
|
-
|
|
1400
231
|
@state()
|
|
1401
|
-
private _desiredSeekTimeMs =
|
|
232
|
+
private _desiredSeekTimeMs = 0; // Initialize to 0 for proper segment loading
|
|
1402
233
|
|
|
1403
234
|
get desiredSeekTimeMs() {
|
|
1404
235
|
return this._desiredSeekTimeMs;
|
|
@@ -1411,506 +242,42 @@ export class EFMedia extends EFTargetable(
|
|
|
1411
242
|
}
|
|
1412
243
|
|
|
1413
244
|
protected async executeSeek(seekToMs: number) {
|
|
245
|
+
// The seekToMs parameter should be the timeline-relative media time
|
|
246
|
+
// calculated from currentSourceTimeMs which includes timeline positioning
|
|
1414
247
|
this.desiredSeekTimeMs = seekToMs;
|
|
1415
248
|
}
|
|
1416
249
|
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
this.effectiveMode === "asset" ? this.fragmentIndexPath() : null,
|
|
1428
|
-
this.fetch,
|
|
1429
|
-
] as const,
|
|
1430
|
-
task: async ([path, fetch], { signal }) => {
|
|
1431
|
-
if (!path) return null;
|
|
1432
|
-
try {
|
|
1433
|
-
const response = await fetch(path, { signal });
|
|
1434
|
-
return (await response.json()) as Record<number, TrackFragmentIndex>;
|
|
1435
|
-
} catch (error) {
|
|
1436
|
-
console.error("Failed to load asset fragment index", error);
|
|
1437
|
-
return null;
|
|
1438
|
-
}
|
|
1439
|
-
},
|
|
1440
|
-
onComplete: () => {
|
|
1441
|
-
this.requestUpdate("intrinsicDurationMs");
|
|
1442
|
-
this.requestUpdate("ownCurrentTimeMs");
|
|
1443
|
-
this.rootTimegroup?.requestUpdate("ownCurrentTimeMs");
|
|
1444
|
-
this.rootTimegroup?.requestUpdate("durationMs");
|
|
1445
|
-
},
|
|
1446
|
-
});
|
|
1447
|
-
|
|
1448
|
-
// Asset segment keys calculation - separate from loading
|
|
1449
|
-
assetSegmentKeysTask = new Task(this, {
|
|
1450
|
-
autoRun: EF_INTERACTIVE, // Always run since this is critical for frame rendering
|
|
1451
|
-
onError: (error) => {
|
|
1452
|
-
console.error("assetSegmentKeysTask error", error);
|
|
1453
|
-
},
|
|
1454
|
-
args: () =>
|
|
1455
|
-
[
|
|
1456
|
-
this.effectiveMode === "asset" ? this.assetIndexLoader.value : null,
|
|
1457
|
-
this.desiredSeekTimeMs,
|
|
1458
|
-
] as const,
|
|
1459
|
-
task: async ([fragmentIndex, seekMs]) => {
|
|
1460
|
-
if (this.effectiveMode === "asset") {
|
|
1461
|
-
await this.assetIndexLoader.taskComplete;
|
|
1462
|
-
fragmentIndex = this.assetIndexLoader.value;
|
|
1463
|
-
}
|
|
1464
|
-
if (!fragmentIndex || seekMs == null) return null;
|
|
1465
|
-
return this.calculateAssetSegmentKeys(fragmentIndex, seekMs);
|
|
1466
|
-
},
|
|
1467
|
-
});
|
|
1468
|
-
|
|
1469
|
-
// Asset init segments loader - separate from media segments
|
|
1470
|
-
assetInitSegmentsTask = new Task(this, {
|
|
1471
|
-
autoRun: EF_INTERACTIVE, // Always run since this is critical for frame rendering
|
|
1472
|
-
onError: (error) => {
|
|
1473
|
-
console.error("assetInitSegmentsTask error", error);
|
|
1474
|
-
},
|
|
1475
|
-
args: () =>
|
|
1476
|
-
[
|
|
1477
|
-
this.effectiveMode === "asset" ? this.assetIndexLoader.value : null,
|
|
1478
|
-
this.fetch,
|
|
1479
|
-
] as const,
|
|
1480
|
-
task: async ([fragmentIndex, fetch], { signal }) => {
|
|
1481
|
-
if (this.effectiveMode === "asset") {
|
|
1482
|
-
await this.assetIndexLoader.taskComplete;
|
|
1483
|
-
fragmentIndex = this.assetIndexLoader.value;
|
|
1484
|
-
}
|
|
1485
|
-
if (!fragmentIndex) return null;
|
|
1486
|
-
|
|
1487
|
-
return await Promise.all(
|
|
1488
|
-
Object.entries(fragmentIndex).map(async ([trackId, track]) => {
|
|
1489
|
-
const start = track.initSegment.offset;
|
|
1490
|
-
const end = track.initSegment.offset + track.initSegment.size;
|
|
1491
|
-
const response = await fetch(this.fragmentTrackPath(trackId), {
|
|
1492
|
-
signal,
|
|
1493
|
-
headers: { Range: `bytes=${start}-${end - 1}` },
|
|
1494
|
-
});
|
|
1495
|
-
const buffer =
|
|
1496
|
-
(await response.arrayBuffer()) as MP4Box.MP4ArrayBuffer;
|
|
1497
|
-
buffer.fileStart = 0;
|
|
1498
|
-
const mp4File = new MP4File();
|
|
1499
|
-
mp4File.appendBuffer(buffer, true);
|
|
1500
|
-
mp4File.flush();
|
|
1501
|
-
await mp4File.readyPromise;
|
|
1502
|
-
return { trackId, buffer, mp4File };
|
|
1503
|
-
}),
|
|
1504
|
-
);
|
|
1505
|
-
},
|
|
1506
|
-
});
|
|
1507
|
-
|
|
1508
|
-
// Asset media segments loader - now focused only on media segments
|
|
1509
|
-
assetSegmentLoader = new Task(this, {
|
|
1510
|
-
autoRun: EF_INTERACTIVE, // Always run since this is critical for frame rendering
|
|
1511
|
-
onError: (error) => {
|
|
1512
|
-
console.error("assetSegmentLoader error", error);
|
|
1513
|
-
},
|
|
1514
|
-
argsEqual: deepArrayEquals,
|
|
1515
|
-
args: () =>
|
|
1516
|
-
[
|
|
1517
|
-
this.assetIndexLoader.value,
|
|
1518
|
-
this.assetSegmentKeysTask.value,
|
|
1519
|
-
this.assetInitSegmentsTask.value,
|
|
1520
|
-
this.fetch,
|
|
1521
|
-
] as const,
|
|
1522
|
-
task: async (
|
|
1523
|
-
[fragmentIndex, segmentKeys, initSegments, fetch],
|
|
1524
|
-
{ signal },
|
|
1525
|
-
) => {
|
|
1526
|
-
if (this.effectiveMode === "asset") {
|
|
1527
|
-
await this.assetIndexLoader.taskComplete;
|
|
1528
|
-
fragmentIndex = this.assetIndexLoader.value;
|
|
1529
|
-
await this.assetSegmentKeysTask.taskComplete;
|
|
1530
|
-
segmentKeys = this.assetSegmentKeysTask.value;
|
|
1531
|
-
await this.assetInitSegmentsTask.taskComplete;
|
|
1532
|
-
initSegments = this.assetInitSegmentsTask.value;
|
|
1533
|
-
}
|
|
1534
|
-
|
|
1535
|
-
if (!fragmentIndex || !segmentKeys || !initSegments) return null;
|
|
1536
|
-
|
|
1537
|
-
// Access current seek time directly for calculations that need it
|
|
1538
|
-
const seekMs = this.desiredSeekTimeMs;
|
|
1539
|
-
if (seekMs == null) return null;
|
|
1540
|
-
|
|
1541
|
-
const files: Record<string, File> = {};
|
|
1542
|
-
|
|
1543
|
-
// Calculate and fetch media segments
|
|
1544
|
-
const seekResult = this.calculateAssetSeekResult(
|
|
1545
|
-
fragmentIndex,
|
|
1546
|
-
initSegments,
|
|
1547
|
-
seekMs,
|
|
1548
|
-
);
|
|
1549
|
-
if (!seekResult) return null;
|
|
1550
|
-
|
|
1551
|
-
for (const [trackId, { segment, track, nextSegment }] of Object.entries(
|
|
1552
|
-
seekResult,
|
|
1553
|
-
)) {
|
|
1554
|
-
const start = segment.offset;
|
|
1555
|
-
const end = segment.offset + segment.size;
|
|
1556
|
-
|
|
1557
|
-
const response = await fetch(this.fragmentTrackPath(trackId), {
|
|
1558
|
-
signal,
|
|
1559
|
-
headers: { Range: `bytes=${start}-${end - 1}` },
|
|
1560
|
-
});
|
|
1561
|
-
|
|
1562
|
-
// Prefetch next segment
|
|
1563
|
-
if (nextSegment) {
|
|
1564
|
-
const nextStart = nextSegment.offset;
|
|
1565
|
-
const nextEnd = nextSegment.offset + nextSegment.size;
|
|
1566
|
-
fetch(this.fragmentTrackPath(trackId), {
|
|
1567
|
-
signal,
|
|
1568
|
-
headers: { Range: `bytes=${nextStart}-${nextEnd - 1}` },
|
|
1569
|
-
}).catch(() => {}); // Fire and forget
|
|
1570
|
-
}
|
|
1571
|
-
|
|
1572
|
-
const initSegment = initSegments.find(
|
|
1573
|
-
(seg) => seg.trackId === String(track.id),
|
|
1574
|
-
);
|
|
1575
|
-
if (!initSegment) continue;
|
|
1576
|
-
|
|
1577
|
-
const mediaBuffer = await response.arrayBuffer();
|
|
1578
|
-
files[trackId] = new File(
|
|
1579
|
-
[initSegment.buffer, mediaBuffer],
|
|
1580
|
-
"video.mp4",
|
|
1581
|
-
{
|
|
1582
|
-
type: "video/mp4",
|
|
1583
|
-
},
|
|
1584
|
-
);
|
|
1585
|
-
}
|
|
1586
|
-
|
|
1587
|
-
return files;
|
|
1588
|
-
},
|
|
1589
|
-
});
|
|
1590
|
-
|
|
1591
|
-
// JIT segment keys calculation - separate from loading
|
|
1592
|
-
jitSegmentKeysTask = new Task(this, {
|
|
1593
|
-
autoRun: EF_INTERACTIVE,
|
|
1594
|
-
onError: (error) => {
|
|
1595
|
-
console.error("jitSegmentKeysTask error", error);
|
|
1596
|
-
},
|
|
1597
|
-
args: () =>
|
|
1598
|
-
[
|
|
1599
|
-
this.effectiveMode === "jit-transcode"
|
|
1600
|
-
? this.jitMetadataLoader.value
|
|
1601
|
-
: null,
|
|
1602
|
-
this.desiredSeekTimeMs,
|
|
1603
|
-
] as const,
|
|
1604
|
-
task: ([metadata, seekMs]) => {
|
|
1605
|
-
if (!metadata || seekMs == null) return null;
|
|
1606
|
-
return this.calculateJitSegmentKeys(metadata, seekMs);
|
|
1607
|
-
},
|
|
1608
|
-
});
|
|
1609
|
-
|
|
1610
|
-
// JIT segments loader - now focused only on segment loading
|
|
1611
|
-
jitSegmentLoader = new Task(this, {
|
|
1612
|
-
autoRun: EF_INTERACTIVE,
|
|
1613
|
-
onError: (error) => {
|
|
1614
|
-
console.error("jitSegmentLoader error", error);
|
|
1615
|
-
},
|
|
1616
|
-
argsEqual: deepArrayEquals,
|
|
1617
|
-
args: () =>
|
|
1618
|
-
[
|
|
1619
|
-
this.src,
|
|
1620
|
-
this.jitSegmentKeysTask.value,
|
|
1621
|
-
this.jitMetadataLoader.value,
|
|
1622
|
-
] as const,
|
|
1623
|
-
task: async ([src, segmentKeys, metadata], { signal: _signal }) => {
|
|
1624
|
-
await this.jitSegmentKeysTask.taskComplete;
|
|
1625
|
-
await this.jitMetadataLoader.taskComplete;
|
|
1626
|
-
|
|
1627
|
-
if (!src || !segmentKeys || !metadata || !this.jitClientTask.value)
|
|
1628
|
-
return null;
|
|
1629
|
-
|
|
1630
|
-
// Access current seek time directly for calculations that need it
|
|
1631
|
-
const seekMs = this.desiredSeekTimeMs;
|
|
1632
|
-
if (seekMs == null) return null;
|
|
1633
|
-
|
|
1634
|
-
try {
|
|
1635
|
-
this.jitLoadingState = "segments";
|
|
1636
|
-
this.jitErrorMessage = null;
|
|
1637
|
-
|
|
1638
|
-
const files: Record<string, File> = {};
|
|
1639
|
-
const quality = await this.jitClientTask.value.getAdaptiveQuality();
|
|
1640
|
-
|
|
1641
|
-
// Calculate which segments we need based on synthetic fragment index
|
|
1642
|
-
const fragmentIndex = this.synthesizeFragmentIndex(metadata);
|
|
1643
|
-
const seekResult = this.calculateJitSeekResult(fragmentIndex, seekMs);
|
|
1644
|
-
|
|
1645
|
-
for (const [trackId, { segment, track, nextSegment }] of Object.entries(
|
|
1646
|
-
seekResult,
|
|
1647
|
-
)) {
|
|
1648
|
-
const startTimeMs = (segment.dts / track.timescale) * 1000;
|
|
1649
|
-
|
|
1650
|
-
// Fetch current segment
|
|
1651
|
-
const segmentBuffer = await this.jitClientTask.value.fetchSegment(
|
|
1652
|
-
src,
|
|
1653
|
-
startTimeMs,
|
|
1654
|
-
quality,
|
|
1655
|
-
);
|
|
1656
|
-
files[trackId] = new File([segmentBuffer], "segment.mp4", {
|
|
1657
|
-
type: "video/mp4",
|
|
1658
|
-
});
|
|
1659
|
-
|
|
1660
|
-
// Prefetch next segment
|
|
1661
|
-
if (nextSegment && this.enablePrefetch) {
|
|
1662
|
-
const nextStartTimeMs = (nextSegment.dts / track.timescale) * 1000;
|
|
1663
|
-
this.jitClientTask.value
|
|
1664
|
-
.fetchSegment(src, nextStartTimeMs, quality)
|
|
1665
|
-
.catch(() => {}); // Fire and forget
|
|
1666
|
-
}
|
|
1667
|
-
}
|
|
1668
|
-
|
|
1669
|
-
this.jitCacheStats = this.jitClientTask.value.getCacheStats();
|
|
1670
|
-
this.jitLoadingState = "idle";
|
|
1671
|
-
return files;
|
|
1672
|
-
} catch (error) {
|
|
1673
|
-
this.jitLoadingState = "error";
|
|
1674
|
-
this.jitErrorMessage =
|
|
1675
|
-
error instanceof Error
|
|
1676
|
-
? error.message
|
|
1677
|
-
: "Failed to load video segments";
|
|
1678
|
-
throw error;
|
|
1679
|
-
}
|
|
1680
|
-
},
|
|
1681
|
-
});
|
|
1682
|
-
|
|
1683
|
-
// CONVERGENT TASKS - Mode-Agnostic
|
|
1684
|
-
|
|
1685
|
-
// Convergent fragment index from either asset or JIT metadata
|
|
1686
|
-
fragmentIndexTask = new Task(this, {
|
|
1687
|
-
autoRun: EF_INTERACTIVE,
|
|
1688
|
-
onError: (error) => {
|
|
1689
|
-
console.error("fragmentIndexTask error", error);
|
|
1690
|
-
},
|
|
1691
|
-
args: () =>
|
|
1692
|
-
[this.assetIndexLoader.value, this.jitMetadataLoader.value] as const,
|
|
1693
|
-
task: async ([assetIndex, jitMetadata]) => {
|
|
1694
|
-
await this.assetIndexLoader.taskComplete;
|
|
1695
|
-
await this.jitMetadataLoader.taskComplete;
|
|
1696
|
-
if (assetIndex) return assetIndex;
|
|
1697
|
-
if (jitMetadata) return this.synthesizeFragmentIndex(jitMetadata);
|
|
1698
|
-
return null;
|
|
1699
|
-
},
|
|
1700
|
-
});
|
|
1701
|
-
|
|
1702
|
-
// Convergent media segments from either asset or JIT loaders
|
|
1703
|
-
mediaSegmentsTask = new Task(this, {
|
|
1704
|
-
autoRun: EF_INTERACTIVE,
|
|
1705
|
-
onError: (error) => {
|
|
1706
|
-
console.error("mediaSegmentsTask error", error);
|
|
1707
|
-
},
|
|
1708
|
-
args: () =>
|
|
1709
|
-
[this.assetSegmentLoader.value, this.jitSegmentLoader.value] as const,
|
|
1710
|
-
task: async ([_assetFiles, _jitFiles], { signal }) => {
|
|
1711
|
-
log("🔍 SIGNAL: mediaSegmentsTask starting", {
|
|
1712
|
-
signalAborted: signal.aborted,
|
|
1713
|
-
});
|
|
1714
|
-
|
|
1715
|
-
await this.assetSegmentLoader.taskComplete;
|
|
1716
|
-
if (signal.aborted) {
|
|
1717
|
-
log(
|
|
1718
|
-
"🔍 SIGNAL: mediaSegmentsTask aborted after assetSegmentLoader.taskComplete",
|
|
1719
|
-
);
|
|
1720
|
-
return null;
|
|
1721
|
-
}
|
|
1722
|
-
|
|
1723
|
-
await this.jitSegmentLoader.taskComplete;
|
|
1724
|
-
if (signal.aborted) {
|
|
1725
|
-
log(
|
|
1726
|
-
"🔍 SIGNAL: mediaSegmentsTask aborted after jitSegmentLoader.taskComplete",
|
|
1727
|
-
);
|
|
1728
|
-
return null;
|
|
1729
|
-
}
|
|
1730
|
-
|
|
1731
|
-
// Get fresh values
|
|
1732
|
-
const assetFiles = this.assetSegmentLoader.value;
|
|
1733
|
-
const jitFiles = this.jitSegmentLoader.value;
|
|
1734
|
-
|
|
1735
|
-
log("🔍 SIGNAL: mediaSegmentsTask using fresh values", {
|
|
1736
|
-
hasAssetFiles: !!assetFiles,
|
|
1737
|
-
hasJitFiles: !!jitFiles,
|
|
1738
|
-
signalAborted: signal.aborted,
|
|
1739
|
-
});
|
|
1740
|
-
|
|
1741
|
-
const result = assetFiles || jitFiles || null;
|
|
1742
|
-
log("🔍 SIGNAL: mediaSegmentsTask resolved", {
|
|
1743
|
-
hasResult: !!result,
|
|
1744
|
-
signalAborted: signal.aborted,
|
|
1745
|
-
});
|
|
1746
|
-
return result;
|
|
1747
|
-
},
|
|
1748
|
-
});
|
|
1749
|
-
|
|
1750
|
-
// Replace seekTask with unified task
|
|
1751
|
-
seekTask = new Task(this, {
|
|
1752
|
-
autoRun: EF_INTERACTIVE, // Always run since this is critical for frame rendering
|
|
1753
|
-
onError: (error) => {
|
|
1754
|
-
console.error("seekTask error", error);
|
|
1755
|
-
},
|
|
1756
|
-
args: () =>
|
|
1757
|
-
[
|
|
1758
|
-
this.fragmentIndexTask.value,
|
|
1759
|
-
this.mediaSegmentsTask.value,
|
|
1760
|
-
this.desiredSeekTimeMs,
|
|
1761
|
-
] as const,
|
|
1762
|
-
task: async ([_fragmentIndex, _files, seekMs], { signal }) => {
|
|
1763
|
-
log("🔍 SIGNAL: seekTask starting", {
|
|
1764
|
-
seekMs,
|
|
1765
|
-
signalAborted: signal.aborted,
|
|
1766
|
-
});
|
|
1767
|
-
|
|
1768
|
-
await this.fragmentIndexTask.taskComplete;
|
|
1769
|
-
if (signal.aborted) {
|
|
1770
|
-
log("🔍 SIGNAL: seekTask aborted after fragmentIndexTask.taskComplete");
|
|
1771
|
-
return null;
|
|
1772
|
-
}
|
|
1773
|
-
|
|
1774
|
-
await this.mediaSegmentsTask.taskComplete;
|
|
1775
|
-
if (signal.aborted) {
|
|
1776
|
-
log("🔍 SIGNAL: seekTask aborted after mediaSegmentsTask.taskComplete");
|
|
1777
|
-
return null;
|
|
1778
|
-
}
|
|
1779
|
-
|
|
1780
|
-
// Get fresh values after awaiting
|
|
1781
|
-
const fragmentIndex = this.fragmentIndexTask.value;
|
|
1782
|
-
const files = this.mediaSegmentsTask.value;
|
|
1783
|
-
|
|
1784
|
-
log("🔍 SIGNAL: seekTask using fresh values", {
|
|
1785
|
-
hasFragmentIndex: !!fragmentIndex,
|
|
1786
|
-
hasFiles: !!files,
|
|
1787
|
-
seekMs,
|
|
1788
|
-
signalAborted: signal.aborted,
|
|
1789
|
-
});
|
|
1790
|
-
|
|
1791
|
-
const typedFragmentIndex = fragmentIndex as Record<
|
|
1792
|
-
number,
|
|
1793
|
-
TrackFragmentIndex
|
|
1794
|
-
> | null;
|
|
1795
|
-
if (!typedFragmentIndex || !files) {
|
|
1796
|
-
log("🔍 SIGNAL: seekTask calculation aborted - missing required data");
|
|
1797
|
-
return null;
|
|
1798
|
-
}
|
|
1799
|
-
|
|
1800
|
-
// Calculate seek metadata that downstream tasks need
|
|
1801
|
-
const result: Record<
|
|
1802
|
-
string,
|
|
1803
|
-
{
|
|
1804
|
-
segment: TrackSegment;
|
|
1805
|
-
track: MP4Box.TrackInfo;
|
|
1806
|
-
nextSegment?: TrackSegment;
|
|
1807
|
-
}
|
|
1808
|
-
> = {};
|
|
1809
|
-
|
|
1810
|
-
for (const index of Object.values(typedFragmentIndex)) {
|
|
1811
|
-
// Create track info (synthetic for JIT, real for asset)
|
|
1812
|
-
const track = this.createTrackInfo(index);
|
|
1813
|
-
log("trace: processing track", {
|
|
1814
|
-
trackId: index.track,
|
|
1815
|
-
type: index.type,
|
|
1816
|
-
});
|
|
1817
|
-
|
|
1818
|
-
const segment = index.segments
|
|
1819
|
-
.toReversed()
|
|
1820
|
-
.find((segment: TrackSegment) => {
|
|
1821
|
-
const segmentStartMs = (segment.dts / track.timescale) * 1000;
|
|
1822
|
-
return segmentStartMs <= seekMs;
|
|
1823
|
-
});
|
|
1824
|
-
|
|
1825
|
-
const nextSegment = index.segments.find((segment: TrackSegment) => {
|
|
1826
|
-
const segmentStartMs = (segment.dts / track.timescale) * 1000;
|
|
1827
|
-
return segmentStartMs > seekMs;
|
|
1828
|
-
});
|
|
1829
|
-
|
|
1830
|
-
if (segment) {
|
|
1831
|
-
result[index.track] = { segment, track, nextSegment };
|
|
1832
|
-
log("trace: found segment for track", {
|
|
1833
|
-
trackId: index.track,
|
|
1834
|
-
segmentDts: segment.dts,
|
|
1835
|
-
hasNextSegment: !!nextSegment,
|
|
1836
|
-
});
|
|
1837
|
-
}
|
|
1838
|
-
}
|
|
1839
|
-
|
|
1840
|
-
log("🔍 SIGNAL: seekTask calculation complete", {
|
|
1841
|
-
trackCount: Object.keys(result).length,
|
|
1842
|
-
signalAborted: signal.aborted,
|
|
1843
|
-
});
|
|
1844
|
-
return result;
|
|
1845
|
-
},
|
|
1846
|
-
});
|
|
1847
|
-
}
|
|
1848
|
-
|
|
1849
|
-
function processFFTData(
|
|
1850
|
-
fftData: Uint8Array,
|
|
1851
|
-
zeroThresholdPercent = 0.1,
|
|
1852
|
-
): Uint8Array {
|
|
1853
|
-
// Step 1: Determine the threshold for zeros
|
|
1854
|
-
const totalBins = fftData.length;
|
|
1855
|
-
const zeroThresholdCount = Math.floor(totalBins * zeroThresholdPercent);
|
|
1856
|
-
|
|
1857
|
-
// Step 2: Interrogate the FFT output to find the cutoff point
|
|
1858
|
-
let zeroCount = 0;
|
|
1859
|
-
let cutoffIndex = totalBins; // Default to the end of the array
|
|
1860
|
-
|
|
1861
|
-
for (let i = totalBins - 1; i >= 0; i--) {
|
|
1862
|
-
if (fftData[i] ?? 0 < 10) {
|
|
1863
|
-
zeroCount++;
|
|
1864
|
-
} else {
|
|
1865
|
-
// If we encounter a non-zero value, we can stop
|
|
1866
|
-
if (zeroCount >= zeroThresholdCount) {
|
|
1867
|
-
cutoffIndex = i + 1; // Include this index
|
|
1868
|
-
break;
|
|
1869
|
-
}
|
|
1870
|
-
}
|
|
1871
|
-
}
|
|
1872
|
-
|
|
1873
|
-
if (cutoffIndex < zeroThresholdCount) {
|
|
1874
|
-
return fftData;
|
|
250
|
+
/**
|
|
251
|
+
* Main integration method for EFTimegroup audio playback
|
|
252
|
+
* Now powered by clean, testable utility functions
|
|
253
|
+
*/
|
|
254
|
+
async fetchAudioSpanningTime(
|
|
255
|
+
fromMs: number,
|
|
256
|
+
toMs: number,
|
|
257
|
+
signal: AbortSignal = new AbortController().signal,
|
|
258
|
+
): Promise<AudioSpan> {
|
|
259
|
+
return fetchAudioSpanningTime(this, fromMs, toMs, signal);
|
|
1875
260
|
}
|
|
1876
261
|
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
for (let i = attenuationStartIndex; i < totalBins; i++) {
|
|
1884
|
-
// Calculate attenuation factor that goes from 1 to 0 over the top 10%
|
|
1885
|
-
const attenuationProgress =
|
|
1886
|
-
(i - attenuationStartIndex) / (totalBins - attenuationStartIndex) + 0.2;
|
|
1887
|
-
const attenuationFactor = Math.max(0, 1 - attenuationProgress);
|
|
1888
|
-
resampledData[i] = Math.floor((resampledData[i] ?? 0) * attenuationFactor);
|
|
262
|
+
/**
|
|
263
|
+
* Check if an audio segment is cached in the unified buffer system
|
|
264
|
+
* Now uses the same caching approach as video for consistency
|
|
265
|
+
*/
|
|
266
|
+
getCachedAudioSegment(segmentId: number): boolean {
|
|
267
|
+
return this.audioBufferTask.value?.cachedSegments.has(segmentId) ?? false;
|
|
1889
268
|
}
|
|
1890
269
|
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
// Calculate the corresponding index in the original data
|
|
1900
|
-
const ratio = (i / (targetSize - 1)) * (dataLength - 1);
|
|
1901
|
-
const index = Math.floor(ratio);
|
|
1902
|
-
const fraction = ratio - index;
|
|
1903
|
-
|
|
1904
|
-
// Handle edge cases
|
|
1905
|
-
if (index >= dataLength - 1) {
|
|
1906
|
-
resampled[i] = data[dataLength - 1] ?? 0; // Last value
|
|
1907
|
-
} else {
|
|
1908
|
-
// Linear interpolation
|
|
1909
|
-
resampled[i] = Math.round(
|
|
1910
|
-
(data[index] ?? 0) * (1 - fraction) + (data[index + 1] ?? 0) * fraction,
|
|
1911
|
-
);
|
|
270
|
+
/**
|
|
271
|
+
* Get cached audio segments from the unified buffer system
|
|
272
|
+
* Now uses the same caching approach as video for consistency
|
|
273
|
+
*/
|
|
274
|
+
getCachedAudioSegments(segmentIds: number[]): Set<number> {
|
|
275
|
+
const bufferState = this.audioBufferTask.value;
|
|
276
|
+
if (!bufferState) {
|
|
277
|
+
return new Set();
|
|
1912
278
|
}
|
|
279
|
+
return new Set(
|
|
280
|
+
segmentIds.filter((id) => bufferState.cachedSegments.has(id)),
|
|
281
|
+
);
|
|
1913
282
|
}
|
|
1914
|
-
|
|
1915
|
-
return resampled;
|
|
1916
283
|
}
|