@editframe/elements 0.19.4-beta.0 → 0.20.1-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/elements/ContextProxiesController.d.ts +40 -0
- package/dist/elements/ContextProxiesController.js +69 -0
- package/dist/elements/EFCaptions.d.ts +45 -6
- package/dist/elements/EFCaptions.js +220 -26
- package/dist/elements/EFImage.js +4 -1
- package/dist/elements/EFMedia/AssetIdMediaEngine.d.ts +2 -1
- package/dist/elements/EFMedia/AssetIdMediaEngine.js +9 -0
- package/dist/elements/EFMedia/AssetMediaEngine.d.ts +1 -0
- package/dist/elements/EFMedia/AssetMediaEngine.js +11 -0
- package/dist/elements/EFMedia/BaseMediaEngine.d.ts +13 -1
- package/dist/elements/EFMedia/BaseMediaEngine.js +9 -0
- package/dist/elements/EFMedia/JitMediaEngine.d.ts +7 -1
- package/dist/elements/EFMedia/JitMediaEngine.js +15 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioBufferTask.js +2 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.js +2 -0
- package/dist/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.d.ts +1 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.js +3 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioInputTask.js +1 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.d.ts +1 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.js +6 -5
- package/dist/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.js +3 -1
- package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.js +2 -0
- package/dist/elements/EFMedia/shared/AudioSpanUtils.js +2 -2
- package/dist/elements/EFMedia/shared/GlobalInputCache.d.ts +39 -0
- package/dist/elements/EFMedia/shared/GlobalInputCache.js +57 -0
- package/dist/elements/EFMedia/shared/ThumbnailExtractor.d.ts +27 -0
- package/dist/elements/EFMedia/shared/ThumbnailExtractor.js +106 -0
- package/dist/elements/EFMedia/tasks/makeMediaEngineTask.js +1 -1
- package/dist/elements/EFMedia.d.ts +2 -2
- package/dist/elements/EFMedia.js +25 -1
- package/dist/elements/EFSurface.browsertest.d.ts +0 -0
- package/dist/elements/EFSurface.d.ts +30 -0
- package/dist/elements/EFSurface.js +96 -0
- package/dist/elements/EFTemporal.js +7 -6
- package/dist/elements/EFThumbnailStrip.browsertest.d.ts +0 -0
- package/dist/elements/EFThumbnailStrip.d.ts +86 -0
- package/dist/elements/EFThumbnailStrip.js +490 -0
- package/dist/elements/EFThumbnailStrip.media-engine.browsertest.d.ts +0 -0
- package/dist/elements/EFTimegroup.d.ts +6 -1
- package/dist/elements/EFTimegroup.js +53 -11
- package/dist/elements/updateAnimations.browsertest.d.ts +13 -0
- package/dist/elements/updateAnimations.d.ts +5 -0
- package/dist/elements/updateAnimations.js +37 -13
- package/dist/getRenderInfo.js +1 -1
- package/dist/gui/ContextMixin.js +27 -14
- package/dist/gui/EFControls.browsertest.d.ts +0 -0
- package/dist/gui/EFControls.d.ts +38 -0
- package/dist/gui/EFControls.js +51 -0
- package/dist/gui/EFFilmstrip.d.ts +40 -1
- package/dist/gui/EFFilmstrip.js +240 -3
- package/dist/gui/EFPreview.js +2 -1
- package/dist/gui/EFScrubber.d.ts +6 -5
- package/dist/gui/EFScrubber.js +31 -21
- package/dist/gui/EFTimeDisplay.browsertest.d.ts +0 -0
- package/dist/gui/EFTimeDisplay.d.ts +2 -6
- package/dist/gui/EFTimeDisplay.js +13 -23
- package/dist/gui/TWMixin.js +1 -1
- package/dist/gui/currentTimeContext.d.ts +3 -0
- package/dist/gui/currentTimeContext.js +3 -0
- package/dist/gui/durationContext.d.ts +3 -0
- package/dist/gui/durationContext.js +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +4 -1
- package/dist/style.css +1 -1
- package/dist/transcoding/types/index.d.ts +11 -0
- package/dist/utils/LRUCache.d.ts +46 -0
- package/dist/utils/LRUCache.js +382 -1
- package/dist/utils/LRUCache.test.d.ts +1 -0
- package/package.json +2 -2
- package/src/elements/ContextProxiesController.ts +124 -0
- package/src/elements/EFCaptions.browsertest.ts +1820 -0
- package/src/elements/EFCaptions.ts +373 -36
- package/src/elements/EFImage.ts +4 -1
- package/src/elements/EFMedia/AssetIdMediaEngine.ts +30 -1
- package/src/elements/EFMedia/AssetMediaEngine.ts +33 -0
- package/src/elements/EFMedia/BaseMediaEngine.browsertest.ts +3 -8
- package/src/elements/EFMedia/BaseMediaEngine.ts +35 -0
- package/src/elements/EFMedia/JitMediaEngine.ts +34 -0
- package/src/elements/EFMedia/audioTasks/makeAudioBufferTask.ts +6 -5
- package/src/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts +5 -0
- package/src/elements/EFMedia/audioTasks/makeAudioInitSegmentFetchTask.ts +8 -5
- package/src/elements/EFMedia/audioTasks/makeAudioInputTask.ts +5 -5
- package/src/elements/EFMedia/audioTasks/makeAudioSegmentFetchTask.ts +11 -12
- package/src/elements/EFMedia/audioTasks/makeAudioSegmentIdTask.ts +7 -4
- package/src/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts +5 -0
- package/src/elements/EFMedia/shared/AudioSpanUtils.ts +2 -2
- package/src/elements/EFMedia/shared/GlobalInputCache.ts +77 -0
- package/src/elements/EFMedia/shared/RenditionHelpers.browsertest.ts +2 -2
- package/src/elements/EFMedia/shared/RenditionHelpers.ts +2 -2
- package/src/elements/EFMedia/shared/ThumbnailExtractor.ts +227 -0
- package/src/elements/EFMedia/tasks/makeMediaEngineTask.ts +1 -1
- package/src/elements/EFMedia.ts +38 -1
- package/src/elements/EFSurface.browsertest.ts +155 -0
- package/src/elements/EFSurface.ts +141 -0
- package/src/elements/EFTemporal.ts +14 -8
- package/src/elements/EFThumbnailStrip.browsertest.ts +591 -0
- package/src/elements/EFThumbnailStrip.media-engine.browsertest.ts +713 -0
- package/src/elements/EFThumbnailStrip.ts +905 -0
- package/src/elements/EFTimegroup.browsertest.ts +56 -7
- package/src/elements/EFTimegroup.ts +88 -16
- package/src/elements/updateAnimations.browsertest.ts +333 -11
- package/src/elements/updateAnimations.ts +68 -19
- package/src/gui/ContextMixin.browsertest.ts +0 -25
- package/src/gui/ContextMixin.ts +44 -20
- package/src/gui/EFControls.browsertest.ts +175 -0
- package/src/gui/EFControls.ts +84 -0
- package/src/gui/EFFilmstrip.ts +323 -4
- package/src/gui/EFPreview.ts +2 -1
- package/src/gui/EFScrubber.ts +29 -25
- package/src/gui/EFTimeDisplay.browsertest.ts +237 -0
- package/src/gui/EFTimeDisplay.ts +12 -40
- package/src/gui/currentTimeContext.ts +5 -0
- package/src/gui/durationContext.ts +3 -0
- package/src/transcoding/types/index.ts +13 -0
- package/src/utils/LRUCache.test.ts +272 -0
- package/src/utils/LRUCache.ts +543 -0
- package/test/__cache__/GET__api_v1_transcode_high_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__26197f6f7c46cacb0a71134131c3f775/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_1_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__26197f6f7c46cacb0a71134131c3f775/metadata.json +1 -1
- package/test/__cache__/GET__api_v1_transcode_high_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__4cb6774cd3650ccf59c8f8dc6678c0b9/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_2_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__4cb6774cd3650ccf59c8f8dc6678c0b9/metadata.json +1 -1
- package/test/__cache__/GET__api_v1_transcode_high_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0b3b2b1c8933f7fcf8a9ecaa88d58b41/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_3_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0b3b2b1c8933f7fcf8a9ecaa88d58b41/metadata.json +1 -1
- package/test/__cache__/GET__api_v1_transcode_high_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a6fb05a22b18d850f7f2950bbcdbdeed/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_4_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a6fb05a22b18d850f7f2950bbcdbdeed/metadata.json +1 -1
- package/test/__cache__/GET__api_v1_transcode_high_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a50058c7c3602e90879fe3428ed891f4/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_high_5_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__a50058c7c3602e90879fe3428ed891f4/metadata.json +1 -1
- package/test/__cache__/GET__api_v1_transcode_high_init_m4s_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__0798c479b44aaeef850609a430f6e613/data.bin +0 -0
- package/test/__cache__/GET__api_v1_transcode_manifest_json_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__3be92a0437de726b431ed5af2369158a/data.bin +1 -1
- package/test/__cache__/GET__api_v1_transcode_manifest_json_url_http_3A_2F_2Fweb_3A3000_2Fhead_moov_480p_mp4__3be92a0437de726b431ed5af2369158a/metadata.json +1 -1
- package/types.json +1 -1
- package/dist/transcoding/cache/CacheManager.d.ts +0 -73
- package/src/transcoding/cache/CacheManager.ts +0 -208
|
@@ -157,5 +157,16 @@ var AssetMediaEngine = class AssetMediaEngine extends BaseMediaEngine {
|
|
|
157
157
|
maxAudioBufferFetches: 1
|
|
158
158
|
};
|
|
159
159
|
}
|
|
160
|
+
convertToSegmentRelativeTimestamps(globalTimestamps, segmentId, rendition) {
|
|
161
|
+
{
|
|
162
|
+
if (!rendition.trackId) throw new Error("Track ID is required for asset metadata");
|
|
163
|
+
const trackData = this.data[rendition.trackId];
|
|
164
|
+
if (!trackData) throw new Error("Track not found");
|
|
165
|
+
const segment = trackData.segments?.[segmentId];
|
|
166
|
+
if (!segment) throw new Error("Segment not found");
|
|
167
|
+
const segmentStartMs = segment.cts / trackData.timescale * 1e3;
|
|
168
|
+
return globalTimestamps.map((globalMs) => (globalMs - segmentStartMs) / 1e3);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
160
171
|
};
|
|
161
172
|
export { AssetMediaEngine };
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import { RequestDeduplicator } from '../../transcoding/cache/RequestDeduplicator.js';
|
|
2
|
-
import { AudioRendition, SegmentTimeRange, VideoRendition } from '../../transcoding/types';
|
|
2
|
+
import { AudioRendition, SegmentTimeRange, ThumbnailResult, VideoRendition } from '../../transcoding/types';
|
|
3
3
|
import { SizeAwareLRUCache } from '../../utils/LRUCache.js';
|
|
4
4
|
import { EFMedia } from '../EFMedia.js';
|
|
5
|
+
import { MediaRendition } from './shared/MediaTaskUtils.js';
|
|
5
6
|
export declare const mediaCache: SizeAwareLRUCache<string>;
|
|
6
7
|
export declare const globalRequestDeduplicator: RequestDeduplicator;
|
|
7
8
|
export declare abstract class BaseMediaEngine {
|
|
@@ -43,6 +44,11 @@ export declare abstract class BaseMediaEngine {
|
|
|
43
44
|
trackId: number | undefined;
|
|
44
45
|
src: string;
|
|
45
46
|
}): Promise<ArrayBuffer>;
|
|
47
|
+
abstract fetchInitSegment(rendition: {
|
|
48
|
+
trackId: number | undefined;
|
|
49
|
+
src: string;
|
|
50
|
+
}, signal: AbortSignal): Promise<ArrayBuffer>;
|
|
51
|
+
abstract computeSegmentId(desiredSeekTimeMs: number, rendition: MediaRendition): number | undefined;
|
|
46
52
|
/**
|
|
47
53
|
* Fetch media segment with built-in deduplication
|
|
48
54
|
* Now uses global deduplication for all requests
|
|
@@ -80,4 +86,10 @@ export declare abstract class BaseMediaEngine {
|
|
|
80
86
|
* Get cached segment IDs from a list for a given rendition
|
|
81
87
|
*/
|
|
82
88
|
getCachedSegments(segmentIds: number[], rendition: AudioRendition | VideoRendition): Set<number>;
|
|
89
|
+
/**
|
|
90
|
+
* Extract thumbnail canvases at multiple timestamps efficiently
|
|
91
|
+
* Default implementation provides helpful error information
|
|
92
|
+
*/
|
|
93
|
+
extractThumbnails(timestamps: number[]): Promise<(ThumbnailResult | null)[]>;
|
|
94
|
+
abstract convertToSegmentRelativeTimestamps(globalTimestamps: number[], segmentId: number, rendition: VideoRendition): number[];
|
|
83
95
|
}
|
|
@@ -190,5 +190,14 @@ var BaseMediaEngine = class {
|
|
|
190
190
|
getCachedSegments(segmentIds, rendition) {
|
|
191
191
|
return new Set(segmentIds.filter((id) => this.isSegmentCached(id, rendition)));
|
|
192
192
|
}
|
|
193
|
+
/**
|
|
194
|
+
* Extract thumbnail canvases at multiple timestamps efficiently
|
|
195
|
+
* Default implementation provides helpful error information
|
|
196
|
+
*/
|
|
197
|
+
async extractThumbnails(timestamps) {
|
|
198
|
+
const engineName = this.constructor.name;
|
|
199
|
+
console.warn(`${engineName}: extractThumbnails not properly implemented. This MediaEngine type does not support thumbnail generation. Supported engines: JitMediaEngine. Requested ${timestamps.length} thumbnail${timestamps.length === 1 ? "" : "s"}.`);
|
|
200
|
+
return timestamps.map(() => null);
|
|
201
|
+
}
|
|
193
202
|
};
|
|
194
203
|
export { BaseMediaEngine };
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import { AudioRendition, MediaEngine, RenditionId, VideoRendition } from '../../transcoding/types';
|
|
1
|
+
import { AudioRendition, MediaEngine, RenditionId, ThumbnailResult, VideoRendition } from '../../transcoding/types';
|
|
2
2
|
import { UrlGenerator } from '../../transcoding/utils/UrlGenerator';
|
|
3
3
|
import { EFMedia } from '../EFMedia.js';
|
|
4
4
|
import { BaseMediaEngine } from './BaseMediaEngine';
|
|
5
5
|
export declare class JitMediaEngine extends BaseMediaEngine implements MediaEngine {
|
|
6
6
|
private urlGenerator;
|
|
7
7
|
private data;
|
|
8
|
+
private thumbnailExtractor;
|
|
8
9
|
static fetch(host: EFMedia, urlGenerator: UrlGenerator, url: string): Promise<JitMediaEngine>;
|
|
9
10
|
constructor(host: EFMedia, urlGenerator: UrlGenerator);
|
|
10
11
|
get durationMs(): number;
|
|
@@ -37,4 +38,9 @@ export declare class JitMediaEngine extends BaseMediaEngine implements MediaEngi
|
|
|
37
38
|
maxVideoBufferFetches: number;
|
|
38
39
|
maxAudioBufferFetches: number;
|
|
39
40
|
};
|
|
41
|
+
/**
|
|
42
|
+
* Extract thumbnail canvases using same rendition priority as video playback for frame alignment
|
|
43
|
+
*/
|
|
44
|
+
extractThumbnails(timestamps: number[]): Promise<(ThumbnailResult | null)[]>;
|
|
45
|
+
convertToSegmentRelativeTimestamps(globalTimestamps: number[], _segmentId: number, _rendition: VideoRendition): number[];
|
|
40
46
|
}
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { BaseMediaEngine } from "./BaseMediaEngine.js";
|
|
2
|
+
import { ThumbnailExtractor } from "./shared/ThumbnailExtractor.js";
|
|
2
3
|
var JitMediaEngine = class JitMediaEngine extends BaseMediaEngine {
|
|
3
4
|
static async fetch(host, urlGenerator, url) {
|
|
4
5
|
const engine = new JitMediaEngine(host, urlGenerator);
|
|
@@ -10,6 +11,7 @@ var JitMediaEngine = class JitMediaEngine extends BaseMediaEngine {
|
|
|
10
11
|
super(host);
|
|
11
12
|
this.data = {};
|
|
12
13
|
this.urlGenerator = urlGenerator;
|
|
14
|
+
this.thumbnailExtractor = new ThumbnailExtractor(this);
|
|
13
15
|
}
|
|
14
16
|
get durationMs() {
|
|
15
17
|
return this.data.durationMs;
|
|
@@ -101,5 +103,18 @@ var JitMediaEngine = class JitMediaEngine extends BaseMediaEngine {
|
|
|
101
103
|
maxAudioBufferFetches: 3
|
|
102
104
|
};
|
|
103
105
|
}
|
|
106
|
+
/**
|
|
107
|
+
* Extract thumbnail canvases using same rendition priority as video playback for frame alignment
|
|
108
|
+
*/
|
|
109
|
+
async extractThumbnails(timestamps) {
|
|
110
|
+
const mainRendition = this.videoRendition;
|
|
111
|
+
const scrubRendition = this.getScrubVideoRendition();
|
|
112
|
+
const rendition = mainRendition || scrubRendition;
|
|
113
|
+
if (!rendition) return timestamps.map(() => null);
|
|
114
|
+
return this.thumbnailExtractor.extractThumbnails(timestamps, rendition, this.durationMs);
|
|
115
|
+
}
|
|
116
|
+
convertToSegmentRelativeTimestamps(globalTimestamps, _segmentId, _rendition) {
|
|
117
|
+
return globalTimestamps.map((timestamp) => timestamp / 1e3);
|
|
118
|
+
}
|
|
104
119
|
};
|
|
105
120
|
export { JitMediaEngine };
|
|
@@ -22,6 +22,7 @@ const makeAudioBufferTask = (host) => {
|
|
|
22
22
|
task: async ([seekTimeMs], { signal }) => {
|
|
23
23
|
if (EF_RENDERING()) return currentState;
|
|
24
24
|
const mediaEngine = await getLatestMediaEngine(host, signal);
|
|
25
|
+
if (!mediaEngine.audioRendition) return currentState;
|
|
25
26
|
const engineConfig = mediaEngine.getBufferConfig();
|
|
26
27
|
const bufferDurationMs = engineConfig.audioBufferDurationMs;
|
|
27
28
|
const maxParallelFetches = engineConfig.maxAudioBufferFetches;
|
|
@@ -47,7 +48,7 @@ const makeAudioBufferTask = (host) => {
|
|
|
47
48
|
getRendition: async () => {
|
|
48
49
|
const mediaEngine$1 = await getLatestMediaEngine(host, signal);
|
|
49
50
|
const audioRendition = mediaEngine$1.audioRendition;
|
|
50
|
-
if (!audioRendition) throw new Error("
|
|
51
|
+
if (!audioRendition) throw new Error("No audio track available in source");
|
|
51
52
|
return audioRendition;
|
|
52
53
|
},
|
|
53
54
|
logError: console.error
|
|
@@ -51,6 +51,8 @@ function makeAudioFrequencyAnalysisTask(element) {
|
|
|
51
51
|
],
|
|
52
52
|
task: async (_, { signal }) => {
|
|
53
53
|
if (element.currentSourceTimeMs < 0) return null;
|
|
54
|
+
const mediaEngine = element.mediaEngineTask.value;
|
|
55
|
+
if (!mediaEngine?.audioRendition) return null;
|
|
54
56
|
const currentTimeMs = element.currentSourceTimeMs;
|
|
55
57
|
const frameIntervalMs = 1e3 / 30;
|
|
56
58
|
const earliestFrameMs = currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
1
|
import { Task } from '@lit/task';
|
|
2
2
|
import { MediaEngine } from '../../../transcoding/types';
|
|
3
3
|
import { EFMedia } from '../../EFMedia';
|
|
4
|
-
export declare const makeAudioInitSegmentFetchTask: (host: EFMedia) => Task<readonly [MediaEngine | undefined], ArrayBuffer>;
|
|
4
|
+
export declare const makeAudioInitSegmentFetchTask: (host: EFMedia) => Task<readonly [MediaEngine | undefined], ArrayBuffer | undefined>;
|
|
@@ -9,7 +9,9 @@ const makeAudioInitSegmentFetchTask = (host) => {
|
|
|
9
9
|
onComplete: (_value) => {},
|
|
10
10
|
task: async ([_mediaEngine], { signal }) => {
|
|
11
11
|
const mediaEngine = await getLatestMediaEngine(host, signal);
|
|
12
|
-
|
|
12
|
+
const audioRendition = mediaEngine.audioRendition;
|
|
13
|
+
if (!audioRendition) return void 0;
|
|
14
|
+
return mediaEngine.fetchInitSegment(audioRendition, signal);
|
|
13
15
|
}
|
|
14
16
|
});
|
|
15
17
|
};
|
|
@@ -13,7 +13,7 @@ const makeAudioInputTask = (host) => {
|
|
|
13
13
|
signal.throwIfAborted();
|
|
14
14
|
const segment = await host.audioSegmentFetchTask.taskComplete;
|
|
15
15
|
signal.throwIfAborted();
|
|
16
|
-
if (!initSegment || !segment) throw new Error("
|
|
16
|
+
if (!initSegment || !segment) throw new Error("No audio track available in source");
|
|
17
17
|
const mediaEngine = await host.mediaEngineTask.taskComplete;
|
|
18
18
|
const audioRendition = mediaEngine?.audioRendition;
|
|
19
19
|
const startTimeOffsetMs = audioRendition?.startTimeOffsetMs;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
1
|
import { Task } from '@lit/task';
|
|
2
2
|
import { MediaEngine } from '../../../transcoding/types';
|
|
3
3
|
import { EFMedia } from '../../EFMedia';
|
|
4
|
-
export declare const makeAudioSegmentFetchTask: (host: EFMedia) => Task<readonly [MediaEngine | undefined, number | undefined], ArrayBuffer>;
|
|
4
|
+
export declare const makeAudioSegmentFetchTask: (host: EFMedia) => Task<readonly [MediaEngine | undefined, number | undefined], ArrayBuffer | undefined>;
|
|
@@ -9,19 +9,20 @@ const makeAudioSegmentFetchTask = (host) => {
|
|
|
9
9
|
onComplete: (_value) => {},
|
|
10
10
|
task: async (_, { signal }) => {
|
|
11
11
|
const mediaEngine = await getLatestMediaEngine(host, signal);
|
|
12
|
+
const audioRendition = mediaEngine.audioRendition;
|
|
13
|
+
if (!audioRendition) return void 0;
|
|
12
14
|
const segmentId = await host.audioSegmentIdTask.taskComplete;
|
|
13
15
|
if (segmentId === void 0) {
|
|
14
|
-
const rendition = mediaEngine.audioRendition;
|
|
15
16
|
const debugInfo = {
|
|
16
|
-
hasRendition:
|
|
17
|
-
segmentDurationMs:
|
|
18
|
-
segmentDurationsMs:
|
|
17
|
+
hasRendition: true,
|
|
18
|
+
segmentDurationMs: audioRendition.segmentDurationMs,
|
|
19
|
+
segmentDurationsMs: audioRendition.segmentDurationsMs?.length || 0,
|
|
19
20
|
desiredSeekTimeMs: host.desiredSeekTimeMs,
|
|
20
21
|
intrinsicDurationMs: host.intrinsicDurationMs
|
|
21
22
|
};
|
|
22
23
|
throw new Error(`Segment ID is not available for audio. Debug info: ${JSON.stringify(debugInfo)}`);
|
|
23
24
|
}
|
|
24
|
-
return mediaEngine.fetchMediaSegment(segmentId,
|
|
25
|
+
return mediaEngine.fetchMediaSegment(segmentId, audioRendition, signal);
|
|
25
26
|
}
|
|
26
27
|
});
|
|
27
28
|
};
|
|
@@ -10,7 +10,9 @@ const makeAudioSegmentIdTask = (host) => {
|
|
|
10
10
|
task: async ([, targetSeekTimeMs], { signal }) => {
|
|
11
11
|
const mediaEngine = await getLatestMediaEngine(host, signal);
|
|
12
12
|
signal.throwIfAborted();
|
|
13
|
-
|
|
13
|
+
const audioRendition = mediaEngine.audioRendition;
|
|
14
|
+
if (!audioRendition) return void 0;
|
|
15
|
+
return mediaEngine.computeSegmentId(targetSeekTimeMs, audioRendition);
|
|
14
16
|
}
|
|
15
17
|
});
|
|
16
18
|
};
|
|
@@ -18,6 +18,8 @@ function makeAudioTimeDomainAnalysisTask(element) {
|
|
|
18
18
|
],
|
|
19
19
|
task: async (_, { signal }) => {
|
|
20
20
|
if (element.currentSourceTimeMs < 0) return null;
|
|
21
|
+
const mediaEngine = element.mediaEngineTask.value;
|
|
22
|
+
if (!mediaEngine?.audioRendition) return null;
|
|
21
23
|
const currentTimeMs = element.currentSourceTimeMs;
|
|
22
24
|
const frameIntervalMs = 1e3 / 30;
|
|
23
25
|
const earliestFrameMs = currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
*/
|
|
5
5
|
const fetchAudioSegmentData = async (segmentIds, mediaEngine, signal) => {
|
|
6
6
|
const audioRendition = mediaEngine.audioRendition;
|
|
7
|
-
if (!audioRendition) throw new Error("
|
|
7
|
+
if (!audioRendition) throw new Error("No audio track available in source");
|
|
8
8
|
const segmentData = /* @__PURE__ */ new Map();
|
|
9
9
|
const fetchPromises = segmentIds.map(async (segmentId) => {
|
|
10
10
|
const arrayBuffer = await mediaEngine.fetchMediaSegment(segmentId, audioRendition, signal);
|
|
@@ -31,7 +31,7 @@ const fetchAudioSpanningTime = async (host, fromMs, toMs, signal) => {
|
|
|
31
31
|
if (fromMs >= toMs || fromMs < 0) throw new Error(`Invalid time range: fromMs=${fromMs}, toMs=${toMs}`);
|
|
32
32
|
const mediaEngine = await host.mediaEngineTask.taskComplete;
|
|
33
33
|
const initSegment = await host.audioInitSegmentFetchTask.taskComplete;
|
|
34
|
-
if (!mediaEngine?.audioRendition) throw new Error("
|
|
34
|
+
if (!mediaEngine?.audioRendition) throw new Error("No audio track available in source");
|
|
35
35
|
if (!initSegment) throw new Error("Audio init segment is not available");
|
|
36
36
|
const segmentRanges = mediaEngine.calculateAudioSegmentRange(fromMs, toMs, mediaEngine.audioRendition, host.intrinsicDurationMs || 1e4);
|
|
37
37
|
if (segmentRanges.length === 0) throw new Error(`No segments found for time range ${fromMs}-${toMs}ms`);
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { Input } from 'mediabunny';
|
|
2
|
+
/**
|
|
3
|
+
* Global cache for MediaBunny Input instances
|
|
4
|
+
* Shared across all MediaEngine instances to prevent duplicate decoding
|
|
5
|
+
* of the same segment data
|
|
6
|
+
*/
|
|
7
|
+
declare class GlobalInputCache {
|
|
8
|
+
private cache;
|
|
9
|
+
/**
|
|
10
|
+
* Generate standardized cache key for Input objects
|
|
11
|
+
* Format: "input:{src}:{segmentId}:{renditionId}"
|
|
12
|
+
*/
|
|
13
|
+
private generateKey;
|
|
14
|
+
/**
|
|
15
|
+
* Get cached Input object
|
|
16
|
+
*/
|
|
17
|
+
get(src: string, segmentId: number, renditionId?: string): Input | undefined;
|
|
18
|
+
/**
|
|
19
|
+
* Cache Input object
|
|
20
|
+
*/
|
|
21
|
+
set(src: string, segmentId: number, input: Input, renditionId?: string): void;
|
|
22
|
+
/**
|
|
23
|
+
* Check if Input is cached
|
|
24
|
+
*/
|
|
25
|
+
has(src: string, segmentId: number, renditionId?: string): boolean;
|
|
26
|
+
/**
|
|
27
|
+
* Clear all cached Input objects
|
|
28
|
+
*/
|
|
29
|
+
clear(): void;
|
|
30
|
+
/**
|
|
31
|
+
* Get cache statistics for debugging
|
|
32
|
+
*/
|
|
33
|
+
getStats(): {
|
|
34
|
+
size: number;
|
|
35
|
+
cachedKeys: unknown[];
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
export declare const globalInputCache: GlobalInputCache;
|
|
39
|
+
export {};
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { LRUCache } from "../../../utils/LRUCache.js";
|
|
2
|
+
/**
|
|
3
|
+
* Global cache for MediaBunny Input instances
|
|
4
|
+
* Shared across all MediaEngine instances to prevent duplicate decoding
|
|
5
|
+
* of the same segment data
|
|
6
|
+
*/
|
|
7
|
+
var GlobalInputCache = class {
|
|
8
|
+
constructor() {
|
|
9
|
+
this.cache = new LRUCache(50);
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Generate standardized cache key for Input objects
|
|
13
|
+
* Format: "input:{src}:{segmentId}:{renditionId}"
|
|
14
|
+
*/
|
|
15
|
+
generateKey(src, segmentId, renditionId) {
|
|
16
|
+
return `input:${src}:${segmentId}:${renditionId || "default"}`;
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Get cached Input object
|
|
20
|
+
*/
|
|
21
|
+
get(src, segmentId, renditionId) {
|
|
22
|
+
const key = this.generateKey(src, segmentId, renditionId);
|
|
23
|
+
return this.cache.get(key);
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Cache Input object
|
|
27
|
+
*/
|
|
28
|
+
set(src, segmentId, input, renditionId) {
|
|
29
|
+
const key = this.generateKey(src, segmentId, renditionId);
|
|
30
|
+
this.cache.set(key, input);
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Check if Input is cached
|
|
34
|
+
*/
|
|
35
|
+
has(src, segmentId, renditionId) {
|
|
36
|
+
const key = this.generateKey(src, segmentId, renditionId);
|
|
37
|
+
return this.cache.has(key);
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Clear all cached Input objects
|
|
41
|
+
*/
|
|
42
|
+
clear() {
|
|
43
|
+
this.cache.clear();
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Get cache statistics for debugging
|
|
47
|
+
*/
|
|
48
|
+
getStats() {
|
|
49
|
+
return {
|
|
50
|
+
size: this.cache.size,
|
|
51
|
+
cachedKeys: Array.from(this.cache.cache.keys())
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
const globalInputCache = new GlobalInputCache();
|
|
56
|
+
globalThis.debugInputCache = globalInputCache;
|
|
57
|
+
export { globalInputCache };
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { ThumbnailResult, VideoRendition } from '../../../transcoding/types/index.js';
|
|
2
|
+
import { BaseMediaEngine } from '../BaseMediaEngine.js';
|
|
3
|
+
/**
|
|
4
|
+
* Shared thumbnail extraction logic for all MediaEngine implementations
|
|
5
|
+
* Eliminates code duplication and provides consistent behavior
|
|
6
|
+
*/
|
|
7
|
+
export declare class ThumbnailExtractor {
|
|
8
|
+
private mediaEngine;
|
|
9
|
+
constructor(mediaEngine: BaseMediaEngine);
|
|
10
|
+
/**
|
|
11
|
+
* Extract thumbnails at multiple timestamps efficiently using segment batching
|
|
12
|
+
*/
|
|
13
|
+
extractThumbnails(timestamps: number[], rendition: VideoRendition, durationMs: number): Promise<(ThumbnailResult | null)[]>;
|
|
14
|
+
/**
|
|
15
|
+
* Group timestamps by segment ID for efficient batch processing
|
|
16
|
+
*/
|
|
17
|
+
private groupTimestampsBySegment;
|
|
18
|
+
/**
|
|
19
|
+
* Extract thumbnails for a specific segment using CanvasSink
|
|
20
|
+
*/
|
|
21
|
+
private extractSegmentThumbnails;
|
|
22
|
+
/**
|
|
23
|
+
* Convert global timestamps to segment-relative timestamps for mediabunny
|
|
24
|
+
* This is where the main difference between JIT and Asset engines lies
|
|
25
|
+
*/
|
|
26
|
+
private convertToSegmentRelativeTimestamps;
|
|
27
|
+
}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import { globalInputCache } from "./GlobalInputCache.js";
|
|
2
|
+
import { ALL_FORMATS, BlobSource, CanvasSink, Input } from "mediabunny";
|
|
3
|
+
/**
|
|
4
|
+
* Shared thumbnail extraction logic for all MediaEngine implementations
|
|
5
|
+
* Eliminates code duplication and provides consistent behavior
|
|
6
|
+
*/
|
|
7
|
+
var ThumbnailExtractor = class {
|
|
8
|
+
constructor(mediaEngine) {
|
|
9
|
+
this.mediaEngine = mediaEngine;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Extract thumbnails at multiple timestamps efficiently using segment batching
|
|
13
|
+
*/
|
|
14
|
+
async extractThumbnails(timestamps, rendition, durationMs) {
|
|
15
|
+
if (timestamps.length === 0) return [];
|
|
16
|
+
const validTimestamps = timestamps.filter((timeMs) => timeMs >= 0 && timeMs <= durationMs);
|
|
17
|
+
if (validTimestamps.length === 0) {
|
|
18
|
+
console.warn(`ThumbnailExtractor: All timestamps out of bounds (0-${durationMs}ms)`);
|
|
19
|
+
return timestamps.map(() => null);
|
|
20
|
+
}
|
|
21
|
+
const segmentGroups = this.groupTimestampsBySegment(validTimestamps, rendition);
|
|
22
|
+
const results = /* @__PURE__ */ new Map();
|
|
23
|
+
for (const [segmentId, segmentTimestamps] of segmentGroups) try {
|
|
24
|
+
const segmentResults = await this.extractSegmentThumbnails(segmentId, segmentTimestamps, rendition);
|
|
25
|
+
for (const [timestamp, thumbnail] of segmentResults) results.set(timestamp, thumbnail);
|
|
26
|
+
} catch (error) {
|
|
27
|
+
console.warn(`ThumbnailExtractor: Failed to extract thumbnails for segment ${segmentId}:`, error);
|
|
28
|
+
for (const timestamp of segmentTimestamps) results.set(timestamp, null);
|
|
29
|
+
}
|
|
30
|
+
return timestamps.map((t) => {
|
|
31
|
+
if (t < 0 || t > durationMs) return null;
|
|
32
|
+
return results.get(t) || null;
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Group timestamps by segment ID for efficient batch processing
|
|
37
|
+
*/
|
|
38
|
+
groupTimestampsBySegment(timestamps, rendition) {
|
|
39
|
+
const segmentGroups = /* @__PURE__ */ new Map();
|
|
40
|
+
for (const timeMs of timestamps) try {
|
|
41
|
+
const segmentId = this.mediaEngine.computeSegmentId(timeMs, rendition);
|
|
42
|
+
if (segmentId !== void 0) {
|
|
43
|
+
if (!segmentGroups.has(segmentId)) segmentGroups.set(segmentId, []);
|
|
44
|
+
const segmentGroup = segmentGroups.get(segmentId) ?? [];
|
|
45
|
+
if (!segmentGroup) segmentGroups.set(segmentId, []);
|
|
46
|
+
segmentGroup.push(timeMs);
|
|
47
|
+
}
|
|
48
|
+
} catch (error) {
|
|
49
|
+
console.warn(`ThumbnailExtractor: Could not compute segment for timestamp ${timeMs}:`, error);
|
|
50
|
+
}
|
|
51
|
+
return segmentGroups;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Extract thumbnails for a specific segment using CanvasSink
|
|
55
|
+
*/
|
|
56
|
+
async extractSegmentThumbnails(segmentId, timestamps, rendition) {
|
|
57
|
+
const results = /* @__PURE__ */ new Map();
|
|
58
|
+
try {
|
|
59
|
+
const abortController = new AbortController();
|
|
60
|
+
const [initSegment, mediaSegment] = await Promise.all([this.mediaEngine.fetchInitSegment(rendition, abortController.signal), this.mediaEngine.fetchMediaSegment(segmentId, rendition)]);
|
|
61
|
+
const segmentBlob = new Blob([initSegment, mediaSegment]);
|
|
62
|
+
let input = globalInputCache.get(rendition.src, segmentId, rendition.id);
|
|
63
|
+
if (!input) {
|
|
64
|
+
input = new Input({
|
|
65
|
+
formats: ALL_FORMATS,
|
|
66
|
+
source: new BlobSource(segmentBlob)
|
|
67
|
+
});
|
|
68
|
+
globalInputCache.set(rendition.src, segmentId, input, rendition.id);
|
|
69
|
+
}
|
|
70
|
+
const videoTrack = await input.getPrimaryVideoTrack();
|
|
71
|
+
if (!videoTrack) {
|
|
72
|
+
for (const timestamp of timestamps) results.set(timestamp, null);
|
|
73
|
+
return results;
|
|
74
|
+
}
|
|
75
|
+
const sink = new CanvasSink(videoTrack);
|
|
76
|
+
const relativeTimestamps = this.convertToSegmentRelativeTimestamps(timestamps, segmentId, rendition);
|
|
77
|
+
const timestampResults = [];
|
|
78
|
+
for await (const result of sink.canvasesAtTimestamps(relativeTimestamps)) timestampResults.push(result);
|
|
79
|
+
for (let i = 0; i < timestamps.length; i++) {
|
|
80
|
+
const globalTimestamp = timestamps[i];
|
|
81
|
+
if (globalTimestamp === void 0) continue;
|
|
82
|
+
const result = timestampResults[i];
|
|
83
|
+
if (result?.canvas) {
|
|
84
|
+
const canvas = result.canvas;
|
|
85
|
+
if (canvas instanceof HTMLCanvasElement || canvas instanceof OffscreenCanvas) results.set(globalTimestamp, {
|
|
86
|
+
timestamp: globalTimestamp,
|
|
87
|
+
thumbnail: canvas
|
|
88
|
+
});
|
|
89
|
+
else results.set(globalTimestamp, null);
|
|
90
|
+
} else results.set(globalTimestamp, null);
|
|
91
|
+
}
|
|
92
|
+
} catch (error) {
|
|
93
|
+
console.error(`ThumbnailExtractor: Failed to extract thumbnails for segment ${segmentId}:`, error);
|
|
94
|
+
for (const timestamp of timestamps) results.set(timestamp, null);
|
|
95
|
+
}
|
|
96
|
+
return results;
|
|
97
|
+
}
|
|
98
|
+
/**
|
|
99
|
+
* Convert global timestamps to segment-relative timestamps for mediabunny
|
|
100
|
+
* This is where the main difference between JIT and Asset engines lies
|
|
101
|
+
*/
|
|
102
|
+
convertToSegmentRelativeTimestamps(globalTimestamps, segmentId, rendition) {
|
|
103
|
+
return this.mediaEngine.convertToSegmentRelativeTimestamps(globalTimestamps, segmentId, rendition);
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
export { ThumbnailExtractor };
|
|
@@ -11,7 +11,7 @@ const getLatestMediaEngine = async (host, signal) => {
|
|
|
11
11
|
};
|
|
12
12
|
const getVideoRendition = (mediaEngine) => {
|
|
13
13
|
const videoRendition = mediaEngine.videoRendition;
|
|
14
|
-
if (!videoRendition) throw new Error("
|
|
14
|
+
if (!videoRendition) throw new Error("No video track available in source");
|
|
15
15
|
return videoRendition;
|
|
16
16
|
};
|
|
17
17
|
/**
|
|
@@ -58,8 +58,8 @@ export declare class EFMedia extends EFMedia_base {
|
|
|
58
58
|
get urlGenerator(): UrlGenerator;
|
|
59
59
|
mediaEngineTask: import('@lit/task').Task<readonly [string, string | null], import('../transcoding/types/index.ts').MediaEngine>;
|
|
60
60
|
audioSegmentIdTask: import('@lit/task').Task<readonly [import('../transcoding/types/index.ts').MediaEngine | undefined, number], number | undefined>;
|
|
61
|
-
audioInitSegmentFetchTask: import('@lit/task').Task<readonly [import('../transcoding/types/index.ts').MediaEngine | undefined], ArrayBuffer>;
|
|
62
|
-
audioSegmentFetchTask: import('@lit/task').Task<readonly [import('../transcoding/types/index.ts').MediaEngine | undefined, number | undefined], ArrayBuffer>;
|
|
61
|
+
audioInitSegmentFetchTask: import('@lit/task').Task<readonly [import('../transcoding/types/index.ts').MediaEngine | undefined], ArrayBuffer | undefined>;
|
|
62
|
+
audioSegmentFetchTask: import('@lit/task').Task<readonly [import('../transcoding/types/index.ts').MediaEngine | undefined, number | undefined], ArrayBuffer | undefined>;
|
|
63
63
|
audioInputTask: import('./EFMedia/shared/MediaTaskUtils.ts').InputTask;
|
|
64
64
|
audioSeekTask: import('@lit/task').Task<readonly [number, import('./EFMedia/BufferedSeekingInput.ts').BufferedSeekingInput | undefined], import('mediabunny').VideoSample | undefined>;
|
|
65
65
|
audioBufferTask: import('@lit/task').Task<readonly [number], import('./EFMedia/audioTasks/makeAudioBufferTask.ts').AudioBufferState>;
|
package/dist/elements/EFMedia.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { isContextMixin } from "../gui/ContextMixin.js";
|
|
1
2
|
import { UrlGenerator } from "../transcoding/utils/UrlGenerator.js";
|
|
2
3
|
import { makeMediaEngineTask } from "./EFMedia/tasks/makeMediaEngineTask.js";
|
|
3
4
|
import { makeAudioBufferTask } from "./EFMedia/audioTasks/makeAudioBufferTask.js";
|
|
@@ -64,7 +65,9 @@ var EFMedia = class extends EFTargetable(EFSourceMixin(EFTemporal(FetchMixin(Lit
|
|
|
64
65
|
"asset-id",
|
|
65
66
|
"audio-buffer-duration",
|
|
66
67
|
"max-audio-buffer-fetches",
|
|
67
|
-
"enable-audio-buffering"
|
|
68
|
+
"enable-audio-buffering",
|
|
69
|
+
"sourcein",
|
|
70
|
+
"sourceout"
|
|
68
71
|
];
|
|
69
72
|
}
|
|
70
73
|
static {
|
|
@@ -105,6 +108,27 @@ var EFMedia = class extends EFTargetable(EFSourceMixin(EFTemporal(FetchMixin(Lit
|
|
|
105
108
|
const newCurrentSourceTimeMs = this.currentSourceTimeMs;
|
|
106
109
|
if (newCurrentSourceTimeMs !== this.desiredSeekTimeMs) this.executeSeek(newCurrentSourceTimeMs);
|
|
107
110
|
if (changedProperties.has("ownCurrentTimeMs")) this.executeSeek(this.currentSourceTimeMs);
|
|
111
|
+
const durationAffectingProps = [
|
|
112
|
+
"_trimStartMs",
|
|
113
|
+
"_trimEndMs",
|
|
114
|
+
"_sourceInMs",
|
|
115
|
+
"_sourceOutMs"
|
|
116
|
+
];
|
|
117
|
+
const hasDurationChange = durationAffectingProps.some((prop) => changedProperties.has(prop));
|
|
118
|
+
if (hasDurationChange) {
|
|
119
|
+
if (this.parentTimegroup) {
|
|
120
|
+
this.parentTimegroup.requestUpdate("durationMs");
|
|
121
|
+
this.parentTimegroup.requestUpdate("currentTime");
|
|
122
|
+
let parent = this.parentNode;
|
|
123
|
+
while (parent) {
|
|
124
|
+
if (isContextMixin(parent)) {
|
|
125
|
+
parent.dispatchEvent(new CustomEvent("child-duration-changed", { detail: { source: this } }));
|
|
126
|
+
break;
|
|
127
|
+
}
|
|
128
|
+
parent = parent.parentNode;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
108
132
|
}
|
|
109
133
|
get hasOwnDuration() {
|
|
110
134
|
return true;
|
|
File without changes
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { Task } from '@lit/task';
|
|
2
|
+
import { LitElement } from 'lit';
|
|
3
|
+
import { ContextMixinInterface } from '../gui/ContextMixin.ts';
|
|
4
|
+
export declare class EFSurface extends LitElement {
|
|
5
|
+
#private;
|
|
6
|
+
static styles: import('lit').CSSResult[];
|
|
7
|
+
canvasRef: import('lit-html/directives/ref').Ref<HTMLCanvasElement>;
|
|
8
|
+
targetElement: ContextMixinInterface | null;
|
|
9
|
+
target: string;
|
|
10
|
+
render(): import('lit-html').TemplateResult<1>;
|
|
11
|
+
get rootTimegroup(): any;
|
|
12
|
+
get currentTimeMs(): number;
|
|
13
|
+
get durationMs(): number;
|
|
14
|
+
get startTimeMs(): number;
|
|
15
|
+
get endTimeMs(): number;
|
|
16
|
+
/**
|
|
17
|
+
* Minimal integration with EFTimegroup's frame scheduling:
|
|
18
|
+
* - Waits for the target video element's frameTask to complete (ensuring it painted)
|
|
19
|
+
* - Copies the target's canvas into this element's canvas
|
|
20
|
+
*/
|
|
21
|
+
frameTask: Task<readonly [ContextMixinInterface | null], void>;
|
|
22
|
+
protected updated(): void;
|
|
23
|
+
private getSourceCanvas;
|
|
24
|
+
private copyFromTarget;
|
|
25
|
+
}
|
|
26
|
+
declare global {
|
|
27
|
+
interface HTMLElementTagNameMap {
|
|
28
|
+
"ef-surface": EFSurface;
|
|
29
|
+
}
|
|
30
|
+
}
|