@editframe/elements 0.18.21-beta.0 → 0.18.23-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/elements/EFAudio.d.ts +1 -12
- package/dist/elements/EFAudio.js +3 -18
- package/dist/elements/EFMedia/AssetMediaEngine.d.ts +1 -1
- package/dist/elements/EFMedia/AssetMediaEngine.js +3 -3
- package/dist/elements/EFMedia/BufferedSeekingInput.d.ts +15 -9
- package/dist/elements/EFMedia/BufferedSeekingInput.js +76 -78
- package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.js +12 -10
- package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.js +2 -18
- package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.js +12 -10
- package/dist/elements/EFTimegroup.d.ts +4 -4
- package/dist/elements/EFTimegroup.js +52 -39
- package/dist/elements/EFVideo.d.ts +1 -32
- package/dist/elements/EFVideo.js +13 -51
- package/dist/elements/SampleBuffer.js +1 -1
- package/package.json +2 -2
- package/src/elements/EFAudio.browsertest.ts +0 -3
- package/src/elements/EFAudio.ts +3 -22
- package/src/elements/EFMedia/AssetMediaEngine.browsertest.ts +39 -1
- package/src/elements/EFMedia/AssetMediaEngine.ts +5 -4
- package/src/elements/EFMedia/BufferedSeekingInput.browsertest.ts +90 -185
- package/src/elements/EFMedia/BufferedSeekingInput.ts +119 -130
- package/src/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts +21 -21
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts +10 -5
- package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.ts +33 -34
- package/src/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts +22 -20
- package/src/elements/EFMedia/videoTasks/makeVideoSeekTask.ts +0 -3
- package/src/elements/EFMedia.browsertest.ts +72 -60
- package/src/elements/EFTimegroup.browsertest.ts +9 -4
- package/src/elements/EFTimegroup.ts +79 -55
- package/src/elements/EFVideo.browsertest.ts +172 -160
- package/src/elements/EFVideo.ts +17 -73
- package/src/elements/SampleBuffer.ts +1 -2
- package/test/EFVideo.framegen.browsertest.ts +0 -54
- package/types.json +1 -1
|
@@ -8,20 +8,9 @@ export declare class EFAudio extends EFAudio_base {
|
|
|
8
8
|
frameTask: Task<readonly [import('@lit/task').TaskStatus, import('@lit/task').TaskStatus, import('@lit/task').TaskStatus, import('@lit/task').TaskStatus], void>;
|
|
9
9
|
/**
|
|
10
10
|
* Legacy getter for fragment index task (maps to audioSegmentIdTask)
|
|
11
|
+
* Still used by EFCaptions
|
|
11
12
|
*/
|
|
12
13
|
get fragmentIndexTask(): Task<readonly [import('../transcoding/types/index.js').MediaEngine | undefined, number], number | undefined>;
|
|
13
|
-
/**
|
|
14
|
-
* Legacy getter for media segments task (maps to audioSegmentFetchTask)
|
|
15
|
-
*/
|
|
16
|
-
get mediaSegmentsTask(): Task<readonly [import('../transcoding/types/index.js').MediaEngine | undefined, number | undefined], ArrayBuffer>;
|
|
17
|
-
/**
|
|
18
|
-
* Legacy getter for seek task (maps to audioSeekTask)
|
|
19
|
-
*/
|
|
20
|
-
get seekTask(): Task<readonly [number, import('./EFMedia/BufferedSeekingInput.js').BufferedSeekingInput | undefined], import('mediabunny').VideoSample | undefined>;
|
|
21
|
-
/**
|
|
22
|
-
* Legacy getter for audio asset task (maps to audioBufferTask)
|
|
23
|
-
*/
|
|
24
|
-
get videoAssetTask(): Task<readonly [number], import('./EFMedia/audioTasks/makeAudioBufferTask.js').AudioBufferState>;
|
|
25
14
|
}
|
|
26
15
|
declare global {
|
|
27
16
|
interface HTMLElementTagNameMap {
|
package/dist/elements/EFAudio.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { EF_INTERACTIVE } from "../EF_INTERACTIVE.js";
|
|
1
2
|
import { EFMedia } from "./EFMedia.js";
|
|
2
3
|
import { TWMixin } from "../gui/TWMixin2.js";
|
|
3
4
|
import { Task } from "@lit/task";
|
|
@@ -11,6 +12,7 @@ let EFAudio = class EFAudio$1 extends TWMixin(EFMedia) {
|
|
|
11
12
|
this._propertyHack = false;
|
|
12
13
|
this.audioElementRef = createRef();
|
|
13
14
|
this.frameTask = new Task(this, {
|
|
15
|
+
autoRun: EF_INTERACTIVE,
|
|
14
16
|
args: () => [
|
|
15
17
|
this.audioBufferTask.status,
|
|
16
18
|
this.audioSeekTask.status,
|
|
@@ -31,28 +33,11 @@ let EFAudio = class EFAudio$1 extends TWMixin(EFMedia) {
|
|
|
31
33
|
}
|
|
32
34
|
/**
|
|
33
35
|
* Legacy getter for fragment index task (maps to audioSegmentIdTask)
|
|
36
|
+
* Still used by EFCaptions
|
|
34
37
|
*/
|
|
35
38
|
get fragmentIndexTask() {
|
|
36
39
|
return this.audioSegmentIdTask;
|
|
37
40
|
}
|
|
38
|
-
/**
|
|
39
|
-
* Legacy getter for media segments task (maps to audioSegmentFetchTask)
|
|
40
|
-
*/
|
|
41
|
-
get mediaSegmentsTask() {
|
|
42
|
-
return this.audioSegmentFetchTask;
|
|
43
|
-
}
|
|
44
|
-
/**
|
|
45
|
-
* Legacy getter for seek task (maps to audioSeekTask)
|
|
46
|
-
*/
|
|
47
|
-
get seekTask() {
|
|
48
|
-
return this.audioSeekTask;
|
|
49
|
-
}
|
|
50
|
-
/**
|
|
51
|
-
* Legacy getter for audio asset task (maps to audioBufferTask)
|
|
52
|
-
*/
|
|
53
|
-
get videoAssetTask() {
|
|
54
|
-
return this.audioBufferTask;
|
|
55
|
-
}
|
|
56
41
|
};
|
|
57
42
|
_decorate([property({
|
|
58
43
|
type: Boolean,
|
|
@@ -40,5 +40,5 @@ export declare class AssetMediaEngine extends BaseMediaEngine implements MediaEn
|
|
|
40
40
|
* Calculate audio segments for variable-duration segments using track fragment index
|
|
41
41
|
*/
|
|
42
42
|
calculateAudioSegmentRange(fromMs: number, toMs: number, rendition: AudioRendition, _durationMs: number): SegmentTimeRange[];
|
|
43
|
-
computeSegmentId(
|
|
43
|
+
computeSegmentId(seekTimeMs: number, rendition: MediaRendition): number;
|
|
44
44
|
}
|
|
@@ -112,14 +112,14 @@ var AssetMediaEngine = class AssetMediaEngine extends BaseMediaEngine {
|
|
|
112
112
|
})}`);
|
|
113
113
|
return segmentRanges;
|
|
114
114
|
}
|
|
115
|
-
computeSegmentId(
|
|
115
|
+
computeSegmentId(seekTimeMs, rendition) {
|
|
116
116
|
if (!rendition.trackId) throw new Error("Track ID is required for asset metadata");
|
|
117
117
|
const track = this.data[rendition.trackId];
|
|
118
118
|
if (!track) throw new Error("Track not found");
|
|
119
119
|
const { timescale, segments } = track;
|
|
120
120
|
const startTimeOffsetMs = "startTimeOffsetMs" in rendition && rendition.startTimeOffsetMs || 0;
|
|
121
|
-
const
|
|
122
|
-
const scaledSeekTime = convertToScaledTime(
|
|
121
|
+
const offsetSeekTimeMs = roundToMilliseconds(seekTimeMs + startTimeOffsetMs);
|
|
122
|
+
const scaledSeekTime = convertToScaledTime(offsetSeekTimeMs, timescale);
|
|
123
123
|
for (let i = segments.length - 1; i >= 0; i--) {
|
|
124
124
|
const segment = segments[i];
|
|
125
125
|
const segmentEndTime = segment.cts + segment.duration;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { AudioSampleSink, InputAudioTrack, InputTrack, InputVideoTrack, VideoSampleSink } from 'mediabunny';
|
|
2
|
+
import { MediaSample, SampleBuffer } from '../SampleBuffer';
|
|
2
3
|
interface BufferedSeekingInputOptions {
|
|
3
4
|
videoBufferSize?: number;
|
|
4
5
|
audioBufferSize?: number;
|
|
@@ -10,7 +11,10 @@ interface BufferedSeekingInputOptions {
|
|
|
10
11
|
}
|
|
11
12
|
export declare class NoSample extends RangeError {
|
|
12
13
|
}
|
|
14
|
+
export declare class ConcurrentSeekError extends RangeError {
|
|
15
|
+
}
|
|
13
16
|
export declare class BufferedSeekingInput {
|
|
17
|
+
#private;
|
|
14
18
|
private input;
|
|
15
19
|
private trackIterators;
|
|
16
20
|
private trackBuffers;
|
|
@@ -28,14 +32,16 @@ export declare class BufferedSeekingInput {
|
|
|
28
32
|
getBufferTimestamps(trackId: number): number[];
|
|
29
33
|
clearBuffer(trackId: number): void;
|
|
30
34
|
computeDuration(): Promise<number>;
|
|
31
|
-
getTrack(trackId: number): Promise<
|
|
32
|
-
getAudioTrack(trackId: number): Promise<
|
|
33
|
-
getVideoTrack(trackId: number): Promise<
|
|
34
|
-
getFirstVideoTrack(): Promise<
|
|
35
|
-
getFirstAudioTrack(): Promise<
|
|
36
|
-
getTrackIterator(
|
|
37
|
-
|
|
38
|
-
|
|
35
|
+
getTrack(trackId: number): Promise<InputTrack>;
|
|
36
|
+
getAudioTrack(trackId: number): Promise<InputAudioTrack>;
|
|
37
|
+
getVideoTrack(trackId: number): Promise<InputVideoTrack>;
|
|
38
|
+
getFirstVideoTrack(): Promise<InputVideoTrack | undefined>;
|
|
39
|
+
getFirstAudioTrack(): Promise<InputAudioTrack | undefined>;
|
|
40
|
+
getTrackIterator(track: InputTrack): AsyncIterator<MediaSample, any, undefined>;
|
|
41
|
+
createTrackSampleSink(track: InputTrack): AudioSampleSink | VideoSampleSink;
|
|
42
|
+
createTrackIterator(track: InputTrack): AsyncGenerator<import('mediabunny').VideoSample, void, unknown> | AsyncGenerator<import('mediabunny').AudioSample, void, unknown>;
|
|
43
|
+
createTrackBuffer(track: InputTrack): SampleBuffer;
|
|
44
|
+
getTrackBuffer(track: InputTrack): SampleBuffer;
|
|
39
45
|
seek(trackId: number, timeMs: number): Promise<MediaSample | undefined>;
|
|
40
46
|
private resetIterator;
|
|
41
47
|
private seekSafe;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { roundToMilliseconds } from "./shared/PrecisionUtils.js";
|
|
2
2
|
import { SampleBuffer } from "../SampleBuffer.js";
|
|
3
|
-
import { AudioSampleSink, BufferSource, Input, MP4, VideoSampleSink } from "mediabunny";
|
|
3
|
+
import { AudioSampleSink, BufferSource, Input, InputAudioTrack, InputVideoTrack, MP4, VideoSampleSink } from "mediabunny";
|
|
4
4
|
const defaultOptions = {
|
|
5
5
|
videoBufferSize: 30,
|
|
6
6
|
audioBufferSize: 100,
|
|
@@ -70,48 +70,37 @@ var BufferedSeekingInput = class {
|
|
|
70
70
|
const tracks = await this.input.getAudioTracks();
|
|
71
71
|
return tracks[0];
|
|
72
72
|
}
|
|
73
|
-
|
|
74
|
-
if (this.trackIterators.has(
|
|
75
|
-
const
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
if (this.trackIterators.has(trackId)) return this.trackIterators.get(trackId);
|
|
79
|
-
}
|
|
80
|
-
const creationPromise = this.createIteratorSafe(trackId);
|
|
81
|
-
this.trackIteratorCreationPromises.set(trackId, creationPromise);
|
|
82
|
-
try {
|
|
83
|
-
const iterator = await creationPromise;
|
|
84
|
-
return iterator;
|
|
85
|
-
} finally {
|
|
86
|
-
this.trackIteratorCreationPromises.delete(trackId);
|
|
87
|
-
}
|
|
73
|
+
getTrackIterator(track) {
|
|
74
|
+
if (this.trackIterators.has(track.id)) return this.trackIterators.get(track.id);
|
|
75
|
+
const trackIterator = this.createTrackIterator(track);
|
|
76
|
+
this.trackIterators.set(track.id, trackIterator);
|
|
77
|
+
return trackIterator;
|
|
88
78
|
}
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
if (track
|
|
92
|
-
|
|
93
|
-
const sampleSink = new AudioSampleSink(track$1);
|
|
94
|
-
const iterator = sampleSink.samples();
|
|
95
|
-
this.trackIterators.set(trackId, iterator);
|
|
96
|
-
return iterator;
|
|
97
|
-
}
|
|
98
|
-
{
|
|
99
|
-
const track$1 = await this.getVideoTrack(trackId);
|
|
100
|
-
const sampleSink = new VideoSampleSink(track$1);
|
|
101
|
-
const iterator = sampleSink.samples();
|
|
102
|
-
this.trackIterators.set(trackId, iterator);
|
|
103
|
-
return iterator;
|
|
104
|
-
}
|
|
79
|
+
createTrackSampleSink(track) {
|
|
80
|
+
if (track instanceof InputAudioTrack) return new AudioSampleSink(track);
|
|
81
|
+
if (track instanceof InputVideoTrack) return new VideoSampleSink(track);
|
|
82
|
+
throw new Error(`Unsupported track type ${track.type}`);
|
|
105
83
|
}
|
|
106
|
-
|
|
107
|
-
const
|
|
84
|
+
createTrackIterator(track) {
|
|
85
|
+
const sampleSink = this.createTrackSampleSink(track);
|
|
86
|
+
return sampleSink.samples();
|
|
87
|
+
}
|
|
88
|
+
createTrackBuffer(track) {
|
|
108
89
|
if (track.type === "audio") {
|
|
109
|
-
const bufferSize = this.options.audioBufferSize;
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
const bufferSize = this.options.videoBufferSize;
|
|
113
|
-
this.trackBuffers.set(trackId, new SampleBuffer(bufferSize));
|
|
90
|
+
const bufferSize$1 = this.options.audioBufferSize;
|
|
91
|
+
const sampleBuffer$1 = new SampleBuffer(bufferSize$1);
|
|
92
|
+
return sampleBuffer$1;
|
|
114
93
|
}
|
|
94
|
+
const bufferSize = this.options.videoBufferSize;
|
|
95
|
+
const sampleBuffer = new SampleBuffer(bufferSize);
|
|
96
|
+
return sampleBuffer;
|
|
97
|
+
}
|
|
98
|
+
getTrackBuffer(track) {
|
|
99
|
+
const maybeTrackBuffer = this.trackBuffers.get(track.id);
|
|
100
|
+
if (maybeTrackBuffer) return maybeTrackBuffer;
|
|
101
|
+
const trackBuffer = this.createTrackBuffer(track);
|
|
102
|
+
this.trackBuffers.set(track.id, trackBuffer);
|
|
103
|
+
return trackBuffer;
|
|
115
104
|
}
|
|
116
105
|
async seek(trackId, timeMs) {
|
|
117
106
|
const mediaTimeMs = timeMs + this.startTimeOffsetMs;
|
|
@@ -126,54 +115,63 @@ var BufferedSeekingInput = class {
|
|
|
126
115
|
this.trackSeekPromises.delete(trackId);
|
|
127
116
|
}
|
|
128
117
|
}
|
|
129
|
-
async resetIterator(
|
|
130
|
-
const trackBuffer = this.trackBuffers.get(
|
|
118
|
+
async resetIterator(track) {
|
|
119
|
+
const trackBuffer = this.trackBuffers.get(track.id);
|
|
131
120
|
trackBuffer?.clear();
|
|
132
|
-
const ongoingIteratorCreation = this.trackIteratorCreationPromises.get(
|
|
121
|
+
const ongoingIteratorCreation = this.trackIteratorCreationPromises.get(track.id);
|
|
133
122
|
if (ongoingIteratorCreation) await ongoingIteratorCreation;
|
|
134
|
-
const iterator = this.trackIterators.get(
|
|
123
|
+
const iterator = this.trackIterators.get(track.id);
|
|
135
124
|
if (iterator) try {
|
|
136
125
|
await iterator.return?.();
|
|
137
126
|
} catch (_error) {}
|
|
138
|
-
this.trackIterators.delete(
|
|
127
|
+
this.trackIterators.delete(track.id);
|
|
139
128
|
}
|
|
129
|
+
#seekLock;
|
|
140
130
|
async seekSafe(trackId, timeMs) {
|
|
141
|
-
if (
|
|
142
|
-
const
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
const
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
131
|
+
if (this.#seekLock) await this.#seekLock.promise;
|
|
132
|
+
const seekLock = Promise.withResolvers();
|
|
133
|
+
this.#seekLock = seekLock;
|
|
134
|
+
try {
|
|
135
|
+
const track = await this.getTrack(trackId);
|
|
136
|
+
const trackBuffer = this.getTrackBuffer(track);
|
|
137
|
+
const roundedTimeMs = roundToMilliseconds(timeMs);
|
|
138
|
+
const firstTimestampMs = roundToMilliseconds(await track.getFirstTimestamp() * 1e3);
|
|
139
|
+
if (roundedTimeMs < firstTimestampMs) {
|
|
140
|
+
console.error("Seeking outside bounds of input", {
|
|
141
|
+
roundedTimeMs,
|
|
142
|
+
firstTimestampMs
|
|
143
|
+
});
|
|
144
|
+
throw new NoSample(`Seeking outside bounds of input ${roundedTimeMs} < ${firstTimestampMs}`);
|
|
151
145
|
}
|
|
146
|
+
const bufferContents = trackBuffer.getContents();
|
|
147
|
+
if (bufferContents.length > 0) {
|
|
148
|
+
const bufferStartMs = roundToMilliseconds(trackBuffer.firstTimestamp * 1e3);
|
|
149
|
+
if (roundedTimeMs < bufferStartMs) await this.resetIterator(track);
|
|
150
|
+
}
|
|
151
|
+
const alreadyInBuffer = trackBuffer.find(timeMs);
|
|
152
|
+
if (alreadyInBuffer) return alreadyInBuffer;
|
|
153
|
+
const iterator = this.getTrackIterator(track);
|
|
154
|
+
while (true) {
|
|
155
|
+
const { done, value: decodedSample } = await iterator.next();
|
|
156
|
+
if (decodedSample) trackBuffer.push(decodedSample);
|
|
157
|
+
const foundSample = trackBuffer.find(roundedTimeMs);
|
|
158
|
+
if (foundSample) return foundSample;
|
|
159
|
+
if (done) break;
|
|
160
|
+
}
|
|
161
|
+
const finalBufferContents = trackBuffer.getContents();
|
|
162
|
+
if (finalBufferContents.length > 0) {
|
|
163
|
+
const lastSample = finalBufferContents[finalBufferContents.length - 1];
|
|
164
|
+
const lastSampleEndMs = roundToMilliseconds(((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1e3);
|
|
165
|
+
const trackDurationMs = await track.computeDuration() * 1e3;
|
|
166
|
+
const isSeekingToTrackEnd = roundToMilliseconds(timeMs) === roundToMilliseconds(trackDurationMs);
|
|
167
|
+
const isAtEndOfTrack = roundToMilliseconds(timeMs) >= lastSampleEndMs;
|
|
168
|
+
if (isSeekingToTrackEnd && isAtEndOfTrack) return lastSample;
|
|
169
|
+
}
|
|
170
|
+
throw new NoSample(`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`);
|
|
171
|
+
} finally {
|
|
172
|
+
this.#seekLock = void 0;
|
|
173
|
+
seekLock.resolve();
|
|
152
174
|
}
|
|
153
|
-
const bufferContents = trackBuffer.getContents();
|
|
154
|
-
if (bufferContents.length > 0) {
|
|
155
|
-
const bufferStartMs = roundToMilliseconds(trackBuffer.firstTimestamp * 1e3);
|
|
156
|
-
const lastSample = bufferContents[bufferContents.length - 1];
|
|
157
|
-
const bufferEndMs = lastSample ? roundToMilliseconds((lastSample.timestamp + (lastSample.duration || 0)) * 1e3) : bufferStartMs;
|
|
158
|
-
if (roundedTimeMs < bufferStartMs || roundedTimeMs > bufferEndMs) await this.resetIterator(trackId);
|
|
159
|
-
}
|
|
160
|
-
const alreadyInBuffer = trackBuffer.find(timeMs);
|
|
161
|
-
if (alreadyInBuffer) return alreadyInBuffer;
|
|
162
|
-
const iterator = await this.getTrackIterator(trackId);
|
|
163
|
-
while (true) {
|
|
164
|
-
const { done, value: decodedSample } = await iterator.next();
|
|
165
|
-
if (decodedSample) trackBuffer.push(decodedSample);
|
|
166
|
-
const foundSample = trackBuffer.find(timeMs);
|
|
167
|
-
if (foundSample) return foundSample;
|
|
168
|
-
if (done) break;
|
|
169
|
-
}
|
|
170
|
-
const finalBufferContents = trackBuffer.getContents();
|
|
171
|
-
if (finalBufferContents.length > 0) {
|
|
172
|
-
const lastSample = finalBufferContents[finalBufferContents.length - 1];
|
|
173
|
-
const lastSampleEndMs = roundToMilliseconds(((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1e3);
|
|
174
|
-
if (roundToMilliseconds(timeMs) >= lastSampleEndMs) return lastSample;
|
|
175
|
-
}
|
|
176
|
-
throw new NoSample(`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`);
|
|
177
175
|
}
|
|
178
176
|
};
|
|
179
177
|
export { BufferedSeekingInput };
|
|
@@ -50,19 +50,24 @@ function makeAudioFrequencyAnalysisTask(element) {
|
|
|
50
50
|
element.fftGain,
|
|
51
51
|
element.shouldInterpolateFrequencies
|
|
52
52
|
],
|
|
53
|
-
task: async () => {
|
|
53
|
+
task: async (_, { signal }) => {
|
|
54
54
|
await element.audioBufferTask.taskComplete;
|
|
55
|
+
signal.throwIfAborted();
|
|
55
56
|
if (!element.audioBufferTask.value) return null;
|
|
56
57
|
if (element.currentSourceTimeMs < 0) return null;
|
|
57
58
|
const currentTimeMs = element.currentSourceTimeMs;
|
|
58
|
-
const
|
|
59
|
-
const
|
|
60
|
-
const
|
|
59
|
+
const frameIntervalMs = 1e3 / 30;
|
|
60
|
+
const earliestFrameMs = currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
|
|
61
|
+
const fromMs = Math.max(0, earliestFrameMs);
|
|
62
|
+
const maxToMs = currentTimeMs + frameIntervalMs;
|
|
61
63
|
const videoDurationMs = element.intrinsicDurationMs || 0;
|
|
62
64
|
const toMs = videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
|
|
63
65
|
if (fromMs >= toMs) return null;
|
|
66
|
+
const preliminaryCacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${fromMs}:${currentTimeMs}`;
|
|
67
|
+
const cachedSmoothedData = cache.get(preliminaryCacheKey);
|
|
68
|
+
if (cachedSmoothedData) return cachedSmoothedData;
|
|
64
69
|
const { fetchAudioSpanningTime: fetchAudioSpan } = await import("../shared/AudioSpanUtils.js");
|
|
65
|
-
const audioSpan = await fetchAudioSpan(element, fromMs, toMs,
|
|
70
|
+
const audioSpan = await fetchAudioSpan(element, fromMs, toMs, signal);
|
|
66
71
|
if (!audioSpan || !audioSpan.blob) {
|
|
67
72
|
console.warn("Frequency analysis skipped: no audio data available");
|
|
68
73
|
return null;
|
|
@@ -71,10 +76,7 @@ function makeAudioFrequencyAnalysisTask(element) {
|
|
|
71
76
|
const arrayBuffer = await audioSpan.blob.arrayBuffer();
|
|
72
77
|
const audioBuffer = await tempAudioContext.decodeAudioData(arrayBuffer);
|
|
73
78
|
const startOffsetMs = audioSpan.startMs;
|
|
74
|
-
const
|
|
75
|
-
const cachedSmoothedData = cache.get(smoothedKey);
|
|
76
|
-
if (cachedSmoothedData) return cachedSmoothedData;
|
|
77
|
-
const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_, i) => {
|
|
79
|
+
const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_$1, i) => {
|
|
78
80
|
const frameOffset = i * (1e3 / 30);
|
|
79
81
|
const startTime = Math.max(0, (currentTimeMs - frameOffset - startOffsetMs) / 1e3);
|
|
80
82
|
const cacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftGain}:${startOffsetMs}:${startTime}`;
|
|
@@ -133,7 +135,7 @@ function makeAudioFrequencyAnalysisTask(element) {
|
|
|
133
135
|
});
|
|
134
136
|
const slicedData = smoothedData.slice(0, Math.floor(smoothedData.length / 2));
|
|
135
137
|
const processedData = element.shouldInterpolateFrequencies ? processFFTData(slicedData) : slicedData;
|
|
136
|
-
cache.set(
|
|
138
|
+
cache.set(preliminaryCacheKey, processedData);
|
|
137
139
|
return processedData;
|
|
138
140
|
}
|
|
139
141
|
});
|
|
@@ -13,24 +13,8 @@ const makeAudioSeekTask = (host) => {
|
|
|
13
13
|
else console.error("audioSeekTask unknown error", error);
|
|
14
14
|
},
|
|
15
15
|
onComplete: (_value) => {},
|
|
16
|
-
task: async (
|
|
17
|
-
|
|
18
|
-
signal.throwIfAborted();
|
|
19
|
-
await host.audioSegmentFetchTask.taskComplete;
|
|
20
|
-
signal.throwIfAborted();
|
|
21
|
-
await host.audioInitSegmentFetchTask.taskComplete;
|
|
22
|
-
signal.throwIfAborted();
|
|
23
|
-
const audioInput = await host.audioInputTask.taskComplete;
|
|
24
|
-
signal.throwIfAborted();
|
|
25
|
-
if (!audioInput) throw new Error("Audio input is not available");
|
|
26
|
-
const audioTrack = await audioInput.getFirstAudioTrack();
|
|
27
|
-
if (!audioTrack) throw new Error("Audio track is not available");
|
|
28
|
-
signal.throwIfAborted();
|
|
29
|
-
const sample = await audioInput.seek(audioTrack.id, targetSeekTimeMs);
|
|
30
|
-
signal.throwIfAborted();
|
|
31
|
-
if (sample === void 0 && signal.aborted) return void 0;
|
|
32
|
-
if (sample === void 0) throw new Error("Audio seek failed to find sample");
|
|
33
|
-
return sample;
|
|
16
|
+
task: async () => {
|
|
17
|
+
return void 0;
|
|
34
18
|
}
|
|
35
19
|
});
|
|
36
20
|
};
|
|
@@ -17,19 +17,24 @@ function makeAudioTimeDomainAnalysisTask(element) {
|
|
|
17
17
|
element.fftGain,
|
|
18
18
|
element.shouldInterpolateFrequencies
|
|
19
19
|
],
|
|
20
|
-
task: async () => {
|
|
20
|
+
task: async (_, { signal }) => {
|
|
21
21
|
await element.audioBufferTask.taskComplete;
|
|
22
|
+
signal.throwIfAborted();
|
|
22
23
|
if (!element.audioBufferTask.value) return null;
|
|
23
24
|
if (element.currentSourceTimeMs < 0) return null;
|
|
24
25
|
const currentTimeMs = element.currentSourceTimeMs;
|
|
25
|
-
const
|
|
26
|
-
const
|
|
27
|
-
const
|
|
26
|
+
const frameIntervalMs = 1e3 / 30;
|
|
27
|
+
const earliestFrameMs = currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
|
|
28
|
+
const fromMs = Math.max(0, earliestFrameMs);
|
|
29
|
+
const maxToMs = currentTimeMs + frameIntervalMs;
|
|
28
30
|
const videoDurationMs = element.intrinsicDurationMs || 0;
|
|
29
31
|
const toMs = videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
|
|
30
32
|
if (fromMs >= toMs) return null;
|
|
33
|
+
const preliminaryCacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${fromMs}:${currentTimeMs}`;
|
|
34
|
+
const cachedData = cache.get(preliminaryCacheKey);
|
|
35
|
+
if (cachedData) return cachedData;
|
|
31
36
|
const { fetchAudioSpanningTime: fetchAudioSpan } = await import("../shared/AudioSpanUtils.js");
|
|
32
|
-
const audioSpan = await fetchAudioSpan(element, fromMs, toMs,
|
|
37
|
+
const audioSpan = await fetchAudioSpan(element, fromMs, toMs, signal);
|
|
33
38
|
if (!audioSpan || !audioSpan.blob) {
|
|
34
39
|
console.warn("Time domain analysis skipped: no audio data available");
|
|
35
40
|
return null;
|
|
@@ -38,10 +43,7 @@ function makeAudioTimeDomainAnalysisTask(element) {
|
|
|
38
43
|
const arrayBuffer = await audioSpan.blob.arrayBuffer();
|
|
39
44
|
const audioBuffer = await tempAudioContext.decodeAudioData(arrayBuffer);
|
|
40
45
|
const startOffsetMs = audioSpan.startMs;
|
|
41
|
-
const
|
|
42
|
-
const cachedData = cache.get(smoothedKey);
|
|
43
|
-
if (cachedData) return cachedData;
|
|
44
|
-
const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_, frameIndex) => {
|
|
46
|
+
const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_$1, frameIndex) => {
|
|
45
47
|
const frameOffset = frameIndex * (1e3 / 30);
|
|
46
48
|
const startTime = Math.max(0, (currentTimeMs - frameOffset - startOffsetMs) / 1e3);
|
|
47
49
|
const cacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftGain}:${startOffsetMs}:${startTime}`;
|
|
@@ -99,7 +101,7 @@ function makeAudioTimeDomainAnalysisTask(element) {
|
|
|
99
101
|
});
|
|
100
102
|
smoothedData[i] = Math.min(255, Math.round(weightedSum / weightSum));
|
|
101
103
|
}
|
|
102
|
-
cache.set(
|
|
104
|
+
cache.set(preliminaryCacheKey, smoothedData);
|
|
103
105
|
return smoothedData;
|
|
104
106
|
}
|
|
105
107
|
});
|
|
@@ -6,8 +6,6 @@ export declare class EFTimegroup extends EFTimegroup_base {
|
|
|
6
6
|
#private;
|
|
7
7
|
static styles: import('lit').CSSResult;
|
|
8
8
|
_timeGroupContext: this;
|
|
9
|
-
private isFrameUpdateInProgress;
|
|
10
|
-
private queuedTimeUpdate;
|
|
11
9
|
mode: "fit" | "fixed" | "sequence" | "contain";
|
|
12
10
|
overlapMs: number;
|
|
13
11
|
fit: "none" | "contain" | "cover";
|
|
@@ -27,8 +25,9 @@ export declare class EFTimegroup extends EFTimegroup_base {
|
|
|
27
25
|
get intrinsicDurationMs(): number | undefined;
|
|
28
26
|
get hasOwnDuration(): boolean;
|
|
29
27
|
get durationMs(): number;
|
|
30
|
-
getPendingFrameTasks(): Promise<Task<readonly unknown[], unknown>[]>;
|
|
31
|
-
|
|
28
|
+
getPendingFrameTasks(signal?: AbortSignal): Promise<Task<readonly unknown[], unknown>[]>;
|
|
29
|
+
waitForNestedUpdates(signal?: AbortSignal): Promise<void>;
|
|
30
|
+
waitForFrameTasks(signal?: AbortSignal): Promise<void>;
|
|
32
31
|
/**
|
|
33
32
|
* Wait for all media elements to load their initial segments.
|
|
34
33
|
* Ideally we would only need the extracted index json data, but
|
|
@@ -60,6 +59,7 @@ export declare class EFTimegroup extends EFTimegroup_base {
|
|
|
60
59
|
testPlayAudio(fromMs: number, toMs: number): Promise<void>;
|
|
61
60
|
loadMd5Sums(): Promise<void>;
|
|
62
61
|
frameTask: Task<readonly [number, number], void>;
|
|
62
|
+
seekTask: Task<readonly [number], void>;
|
|
63
63
|
}
|
|
64
64
|
declare global {
|
|
65
65
|
interface HTMLElementTagNameMap {
|
|
@@ -25,17 +25,28 @@ let EFTimegroup = class EFTimegroup$1 extends EFTemporal(LitElement) {
|
|
|
25
25
|
constructor(..._args) {
|
|
26
26
|
super(..._args);
|
|
27
27
|
this._timeGroupContext = this;
|
|
28
|
-
this.isFrameUpdateInProgress = false;
|
|
29
|
-
this.queuedTimeUpdate = null;
|
|
30
28
|
this.mode = "contain";
|
|
31
29
|
this.overlapMs = 0;
|
|
32
30
|
this.fit = "none";
|
|
33
31
|
this.frameTask = new Task(this, {
|
|
34
32
|
autoRun: EF_INTERACTIVE,
|
|
35
33
|
args: () => [this.ownCurrentTimeMs, this.currentTimeMs],
|
|
36
|
-
task: async ([], { signal
|
|
37
|
-
|
|
38
|
-
|
|
34
|
+
task: async ([], { signal }) => {
|
|
35
|
+
if (this.isRootTimegroup) await this.waitForFrameTasks(signal);
|
|
36
|
+
}
|
|
37
|
+
});
|
|
38
|
+
this.seekTask = new Task(this, {
|
|
39
|
+
args: () => [this.#pendingSeekTime ?? this.#currentTime],
|
|
40
|
+
task: async ([targetTime], { signal }) => {
|
|
41
|
+
const newTime = Math.max(0, Math.min(targetTime, this.durationMs / 1e3));
|
|
42
|
+
this.#currentTime = newTime;
|
|
43
|
+
this.requestUpdate("currentTime");
|
|
44
|
+
await this.updateComplete;
|
|
45
|
+
signal.throwIfAborted();
|
|
46
|
+
const videoElements = this.querySelectorAll("ef-video");
|
|
47
|
+
for (const video of videoElements) if (video.videoSeekTask) video.videoSeekTask.run();
|
|
48
|
+
await this.frameTask.run();
|
|
49
|
+
this.#saveTimeToLocalStorage(newTime);
|
|
39
50
|
}
|
|
40
51
|
});
|
|
41
52
|
}
|
|
@@ -53,17 +64,23 @@ let EFTimegroup = class EFTimegroup$1 extends EFTemporal(LitElement) {
|
|
|
53
64
|
}
|
|
54
65
|
#currentTime = 0;
|
|
55
66
|
#resizeObserver;
|
|
67
|
+
#seekInProgress = false;
|
|
68
|
+
#pendingSeekTime;
|
|
56
69
|
set currentTime(time) {
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
this.queuedTimeUpdate = newTime;
|
|
70
|
+
if (this.#seekInProgress) {
|
|
71
|
+
this.#pendingSeekTime = time;
|
|
60
72
|
return;
|
|
61
73
|
}
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
this.#
|
|
66
|
-
|
|
74
|
+
this.#seekInProgress = true;
|
|
75
|
+
this.#pendingSeekTime = time;
|
|
76
|
+
this.seekTask.run().finally(() => {
|
|
77
|
+
this.#seekInProgress = false;
|
|
78
|
+
if (this.#pendingSeekTime !== void 0 && this.#pendingSeekTime !== time) {
|
|
79
|
+
const pendingTime = this.#pendingSeekTime;
|
|
80
|
+
this.#pendingSeekTime = void 0;
|
|
81
|
+
this.currentTime = pendingTime;
|
|
82
|
+
} else this.#pendingSeekTime = void 0;
|
|
83
|
+
});
|
|
67
84
|
}
|
|
68
85
|
get currentTime() {
|
|
69
86
|
return this.#currentTime;
|
|
@@ -81,26 +98,6 @@ let EFTimegroup = class EFTimegroup$1 extends EFTemporal(LitElement) {
|
|
|
81
98
|
return this.closest("ef-timegroup") === this;
|
|
82
99
|
}
|
|
83
100
|
/**
|
|
84
|
-
* Executes time update with frame locking for root timegroups
|
|
85
|
-
*/
|
|
86
|
-
async #executeTimeUpdate(time) {
|
|
87
|
-
this.isFrameUpdateInProgress = true;
|
|
88
|
-
this.#currentTime = time;
|
|
89
|
-
try {
|
|
90
|
-
this.#saveTimeToLocalStorage(time);
|
|
91
|
-
await this.waitForFrameTasks();
|
|
92
|
-
} catch (error) {
|
|
93
|
-
console.error("⚠️ [TIME_UPDATE_ERROR] Error during frame update:", error);
|
|
94
|
-
} finally {
|
|
95
|
-
this.isFrameUpdateInProgress = false;
|
|
96
|
-
if (this.queuedTimeUpdate !== null && this.queuedTimeUpdate !== time) {
|
|
97
|
-
const nextTime = this.queuedTimeUpdate;
|
|
98
|
-
this.queuedTimeUpdate = null;
|
|
99
|
-
setTimeout(() => this.#executeTimeUpdate(nextTime), 0);
|
|
100
|
-
}
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
/**
|
|
104
101
|
* Saves time to localStorage (extracted for reuse)
|
|
105
102
|
*/
|
|
106
103
|
#saveTimeToLocalStorage(time) {
|
|
@@ -175,21 +172,37 @@ let EFTimegroup = class EFTimegroup$1 extends EFTemporal(LitElement) {
|
|
|
175
172
|
default: throw new Error(`Invalid time mode: ${this.mode}`);
|
|
176
173
|
}
|
|
177
174
|
}
|
|
178
|
-
async getPendingFrameTasks() {
|
|
179
|
-
await this.
|
|
175
|
+
async getPendingFrameTasks(signal) {
|
|
176
|
+
await this.waitForNestedUpdates(signal);
|
|
177
|
+
signal?.throwIfAborted();
|
|
180
178
|
const temporals = deepGetElementsWithFrameTasks(this);
|
|
181
179
|
return temporals.map((temporal) => temporal.frameTask).filter((task) => task.status < TaskStatus.COMPLETE);
|
|
182
180
|
}
|
|
183
|
-
async
|
|
181
|
+
async waitForNestedUpdates(signal) {
|
|
182
|
+
const limit = 10;
|
|
183
|
+
let steps = 0;
|
|
184
|
+
let isComplete = true;
|
|
185
|
+
while (true) {
|
|
186
|
+
steps++;
|
|
187
|
+
if (steps > limit) throw new Error("Reached update depth limit.");
|
|
188
|
+
isComplete = await this.updateComplete;
|
|
189
|
+
signal?.throwIfAborted();
|
|
190
|
+
if (isComplete) break;
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
async waitForFrameTasks(signal) {
|
|
184
194
|
const limit = 10;
|
|
185
195
|
let step = 0;
|
|
186
|
-
await this.
|
|
196
|
+
await this.waitForNestedUpdates(signal);
|
|
187
197
|
while (step < limit) {
|
|
188
198
|
step++;
|
|
189
|
-
let pendingTasks = await this.getPendingFrameTasks();
|
|
199
|
+
let pendingTasks = await this.getPendingFrameTasks(signal);
|
|
200
|
+
signal?.throwIfAborted();
|
|
190
201
|
await Promise.all(pendingTasks.map((task) => task.taskComplete));
|
|
202
|
+
signal?.throwIfAborted();
|
|
191
203
|
await this.updateComplete;
|
|
192
|
-
|
|
204
|
+
signal?.throwIfAborted();
|
|
205
|
+
pendingTasks = await this.getPendingFrameTasks(signal);
|
|
193
206
|
if (pendingTasks.length === 0) break;
|
|
194
207
|
}
|
|
195
208
|
}
|