@editframe/elements 0.18.20-beta.0 → 0.18.22-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/dist/elements/EFAudio.d.ts +1 -12
  2. package/dist/elements/EFAudio.js +3 -18
  3. package/dist/elements/EFMedia/AssetMediaEngine.d.ts +1 -1
  4. package/dist/elements/EFMedia/AssetMediaEngine.js +3 -3
  5. package/dist/elements/EFMedia/BufferedSeekingInput.d.ts +15 -9
  6. package/dist/elements/EFMedia/BufferedSeekingInput.js +76 -78
  7. package/dist/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.js +12 -10
  8. package/dist/elements/EFMedia/audioTasks/makeAudioSeekTask.js +2 -18
  9. package/dist/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.js +12 -10
  10. package/dist/elements/EFTemporal.d.ts +0 -1
  11. package/dist/elements/EFTemporal.js +4 -8
  12. package/dist/elements/EFTimegroup.d.ts +4 -4
  13. package/dist/elements/EFTimegroup.js +52 -60
  14. package/dist/elements/EFVideo.d.ts +1 -32
  15. package/dist/elements/EFVideo.js +13 -51
  16. package/dist/elements/SampleBuffer.js +1 -1
  17. package/dist/gui/ContextMixin.browsertest.d.ts +1 -1
  18. package/dist/gui/ContextMixin.js +1 -1
  19. package/package.json +2 -2
  20. package/src/elements/EFAudio.browsertest.ts +0 -3
  21. package/src/elements/EFAudio.ts +3 -22
  22. package/src/elements/EFMedia/AssetMediaEngine.browsertest.ts +39 -1
  23. package/src/elements/EFMedia/AssetMediaEngine.ts +5 -4
  24. package/src/elements/EFMedia/BufferedSeekingInput.browsertest.ts +90 -185
  25. package/src/elements/EFMedia/BufferedSeekingInput.ts +119 -130
  26. package/src/elements/EFMedia/audioTasks/makeAudioFrequencyAnalysisTask.ts +21 -21
  27. package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.chunkboundary.regression.browsertest.ts +10 -5
  28. package/src/elements/EFMedia/audioTasks/makeAudioSeekTask.ts +33 -34
  29. package/src/elements/EFMedia/audioTasks/makeAudioTimeDomainAnalysisTask.ts +22 -20
  30. package/src/elements/EFMedia/videoTasks/makeVideoSeekTask.ts +0 -3
  31. package/src/elements/EFMedia.browsertest.ts +72 -60
  32. package/src/elements/EFTemporal.ts +5 -15
  33. package/src/elements/EFTimegroup.browsertest.ts +9 -4
  34. package/src/elements/EFTimegroup.ts +79 -95
  35. package/src/elements/EFVideo.browsertest.ts +172 -160
  36. package/src/elements/EFVideo.ts +17 -73
  37. package/src/elements/SampleBuffer.ts +1 -2
  38. package/src/gui/ContextMixin.browsertest.ts +5 -2
  39. package/src/gui/ContextMixin.ts +7 -0
  40. package/test/EFVideo.framegen.browsertest.ts +0 -54
  41. package/types.json +1 -1
@@ -8,20 +8,9 @@ export declare class EFAudio extends EFAudio_base {
8
8
  frameTask: Task<readonly [import('@lit/task').TaskStatus, import('@lit/task').TaskStatus, import('@lit/task').TaskStatus, import('@lit/task').TaskStatus], void>;
9
9
  /**
10
10
  * Legacy getter for fragment index task (maps to audioSegmentIdTask)
11
+ * Still used by EFCaptions
11
12
  */
12
13
  get fragmentIndexTask(): Task<readonly [import('../transcoding/types/index.js').MediaEngine | undefined, number], number | undefined>;
13
- /**
14
- * Legacy getter for media segments task (maps to audioSegmentFetchTask)
15
- */
16
- get mediaSegmentsTask(): Task<readonly [import('../transcoding/types/index.js').MediaEngine | undefined, number | undefined], ArrayBuffer>;
17
- /**
18
- * Legacy getter for seek task (maps to audioSeekTask)
19
- */
20
- get seekTask(): Task<readonly [number, import('./EFMedia/BufferedSeekingInput.js').BufferedSeekingInput | undefined], import('mediabunny').VideoSample | undefined>;
21
- /**
22
- * Legacy getter for audio asset task (maps to audioBufferTask)
23
- */
24
- get videoAssetTask(): Task<readonly [number], import('./EFMedia/audioTasks/makeAudioBufferTask.js').AudioBufferState>;
25
14
  }
26
15
  declare global {
27
16
  interface HTMLElementTagNameMap {
@@ -1,3 +1,4 @@
1
+ import { EF_INTERACTIVE } from "../EF_INTERACTIVE.js";
1
2
  import { EFMedia } from "./EFMedia.js";
2
3
  import { TWMixin } from "../gui/TWMixin2.js";
3
4
  import { Task } from "@lit/task";
@@ -11,6 +12,7 @@ let EFAudio = class EFAudio$1 extends TWMixin(EFMedia) {
11
12
  this._propertyHack = false;
12
13
  this.audioElementRef = createRef();
13
14
  this.frameTask = new Task(this, {
15
+ autoRun: EF_INTERACTIVE,
14
16
  args: () => [
15
17
  this.audioBufferTask.status,
16
18
  this.audioSeekTask.status,
@@ -31,28 +33,11 @@ let EFAudio = class EFAudio$1 extends TWMixin(EFMedia) {
31
33
  }
32
34
  /**
33
35
  * Legacy getter for fragment index task (maps to audioSegmentIdTask)
36
+ * Still used by EFCaptions
34
37
  */
35
38
  get fragmentIndexTask() {
36
39
  return this.audioSegmentIdTask;
37
40
  }
38
- /**
39
- * Legacy getter for media segments task (maps to audioSegmentFetchTask)
40
- */
41
- get mediaSegmentsTask() {
42
- return this.audioSegmentFetchTask;
43
- }
44
- /**
45
- * Legacy getter for seek task (maps to audioSeekTask)
46
- */
47
- get seekTask() {
48
- return this.audioSeekTask;
49
- }
50
- /**
51
- * Legacy getter for audio asset task (maps to audioBufferTask)
52
- */
53
- get videoAssetTask() {
54
- return this.audioBufferTask;
55
- }
56
41
  };
57
42
  _decorate([property({
58
43
  type: Boolean,
@@ -40,5 +40,5 @@ export declare class AssetMediaEngine extends BaseMediaEngine implements MediaEn
40
40
  * Calculate audio segments for variable-duration segments using track fragment index
41
41
  */
42
42
  calculateAudioSegmentRange(fromMs: number, toMs: number, rendition: AudioRendition, _durationMs: number): SegmentTimeRange[];
43
- computeSegmentId(desiredSeekTimeMs: number, rendition: MediaRendition): number;
43
+ computeSegmentId(seekTimeMs: number, rendition: MediaRendition): number;
44
44
  }
@@ -112,14 +112,14 @@ var AssetMediaEngine = class AssetMediaEngine extends BaseMediaEngine {
112
112
  })}`);
113
113
  return segmentRanges;
114
114
  }
115
- computeSegmentId(desiredSeekTimeMs, rendition) {
115
+ computeSegmentId(seekTimeMs, rendition) {
116
116
  if (!rendition.trackId) throw new Error("Track ID is required for asset metadata");
117
117
  const track = this.data[rendition.trackId];
118
118
  if (!track) throw new Error("Track not found");
119
119
  const { timescale, segments } = track;
120
120
  const startTimeOffsetMs = "startTimeOffsetMs" in rendition && rendition.startTimeOffsetMs || 0;
121
- const mediaTimeMs = roundToMilliseconds(desiredSeekTimeMs + startTimeOffsetMs);
122
- const scaledSeekTime = convertToScaledTime(mediaTimeMs, timescale);
121
+ const offsetSeekTimeMs = roundToMilliseconds(seekTimeMs + startTimeOffsetMs);
122
+ const scaledSeekTime = convertToScaledTime(offsetSeekTimeMs, timescale);
123
123
  for (let i = segments.length - 1; i >= 0; i--) {
124
124
  const segment = segments[i];
125
125
  const segmentEndTime = segment.cts + segment.duration;
@@ -1,4 +1,5 @@
1
- import { MediaSample } from '../SampleBuffer';
1
+ import { AudioSampleSink, InputAudioTrack, InputTrack, InputVideoTrack, VideoSampleSink } from 'mediabunny';
2
+ import { MediaSample, SampleBuffer } from '../SampleBuffer';
2
3
  interface BufferedSeekingInputOptions {
3
4
  videoBufferSize?: number;
4
5
  audioBufferSize?: number;
@@ -10,7 +11,10 @@ interface BufferedSeekingInputOptions {
10
11
  }
11
12
  export declare class NoSample extends RangeError {
12
13
  }
14
+ export declare class ConcurrentSeekError extends RangeError {
15
+ }
13
16
  export declare class BufferedSeekingInput {
17
+ #private;
14
18
  private input;
15
19
  private trackIterators;
16
20
  private trackBuffers;
@@ -28,14 +32,16 @@ export declare class BufferedSeekingInput {
28
32
  getBufferTimestamps(trackId: number): number[];
29
33
  clearBuffer(trackId: number): void;
30
34
  computeDuration(): Promise<number>;
31
- getTrack(trackId: number): Promise<import('mediabunny').InputTrack>;
32
- getAudioTrack(trackId: number): Promise<import('mediabunny').InputAudioTrack>;
33
- getVideoTrack(trackId: number): Promise<import('mediabunny').InputVideoTrack>;
34
- getFirstVideoTrack(): Promise<import('mediabunny').InputVideoTrack | undefined>;
35
- getFirstAudioTrack(): Promise<import('mediabunny').InputAudioTrack | undefined>;
36
- getTrackIterator(trackId: number): Promise<AsyncIterator<MediaSample, any, undefined>>;
37
- private createIteratorSafe;
38
- createTrackBuffer(trackId: number): Promise<void>;
35
+ getTrack(trackId: number): Promise<InputTrack>;
36
+ getAudioTrack(trackId: number): Promise<InputAudioTrack>;
37
+ getVideoTrack(trackId: number): Promise<InputVideoTrack>;
38
+ getFirstVideoTrack(): Promise<InputVideoTrack | undefined>;
39
+ getFirstAudioTrack(): Promise<InputAudioTrack | undefined>;
40
+ getTrackIterator(track: InputTrack): AsyncIterator<MediaSample, any, undefined>;
41
+ createTrackSampleSink(track: InputTrack): AudioSampleSink | VideoSampleSink;
42
+ createTrackIterator(track: InputTrack): AsyncGenerator<import('mediabunny').VideoSample, void, unknown> | AsyncGenerator<import('mediabunny').AudioSample, void, unknown>;
43
+ createTrackBuffer(track: InputTrack): SampleBuffer;
44
+ getTrackBuffer(track: InputTrack): SampleBuffer;
39
45
  seek(trackId: number, timeMs: number): Promise<MediaSample | undefined>;
40
46
  private resetIterator;
41
47
  private seekSafe;
@@ -1,6 +1,6 @@
1
1
  import { roundToMilliseconds } from "./shared/PrecisionUtils.js";
2
2
  import { SampleBuffer } from "../SampleBuffer.js";
3
- import { AudioSampleSink, BufferSource, Input, MP4, VideoSampleSink } from "mediabunny";
3
+ import { AudioSampleSink, BufferSource, Input, InputAudioTrack, InputVideoTrack, MP4, VideoSampleSink } from "mediabunny";
4
4
  const defaultOptions = {
5
5
  videoBufferSize: 30,
6
6
  audioBufferSize: 100,
@@ -70,48 +70,37 @@ var BufferedSeekingInput = class {
70
70
  const tracks = await this.input.getAudioTracks();
71
71
  return tracks[0];
72
72
  }
73
- async getTrackIterator(trackId) {
74
- if (this.trackIterators.has(trackId)) return this.trackIterators.get(trackId);
75
- const existingIteratorCreation = this.trackIteratorCreationPromises.get(trackId);
76
- if (existingIteratorCreation) {
77
- await existingIteratorCreation;
78
- if (this.trackIterators.has(trackId)) return this.trackIterators.get(trackId);
79
- }
80
- const creationPromise = this.createIteratorSafe(trackId);
81
- this.trackIteratorCreationPromises.set(trackId, creationPromise);
82
- try {
83
- const iterator = await creationPromise;
84
- return iterator;
85
- } finally {
86
- this.trackIteratorCreationPromises.delete(trackId);
87
- }
73
+ getTrackIterator(track) {
74
+ if (this.trackIterators.has(track.id)) return this.trackIterators.get(track.id);
75
+ const trackIterator = this.createTrackIterator(track);
76
+ this.trackIterators.set(track.id, trackIterator);
77
+ return trackIterator;
88
78
  }
89
- async createIteratorSafe(trackId) {
90
- const track = await this.getTrack(trackId);
91
- if (track.type === "audio") {
92
- const track$1 = await this.getAudioTrack(trackId);
93
- const sampleSink = new AudioSampleSink(track$1);
94
- const iterator = sampleSink.samples();
95
- this.trackIterators.set(trackId, iterator);
96
- return iterator;
97
- }
98
- {
99
- const track$1 = await this.getVideoTrack(trackId);
100
- const sampleSink = new VideoSampleSink(track$1);
101
- const iterator = sampleSink.samples();
102
- this.trackIterators.set(trackId, iterator);
103
- return iterator;
104
- }
79
+ createTrackSampleSink(track) {
80
+ if (track instanceof InputAudioTrack) return new AudioSampleSink(track);
81
+ if (track instanceof InputVideoTrack) return new VideoSampleSink(track);
82
+ throw new Error(`Unsupported track type ${track.type}`);
105
83
  }
106
- async createTrackBuffer(trackId) {
107
- const track = await this.getTrack(trackId);
84
+ createTrackIterator(track) {
85
+ const sampleSink = this.createTrackSampleSink(track);
86
+ return sampleSink.samples();
87
+ }
88
+ createTrackBuffer(track) {
108
89
  if (track.type === "audio") {
109
- const bufferSize = this.options.audioBufferSize;
110
- this.trackBuffers.set(trackId, new SampleBuffer(bufferSize));
111
- } else {
112
- const bufferSize = this.options.videoBufferSize;
113
- this.trackBuffers.set(trackId, new SampleBuffer(bufferSize));
90
+ const bufferSize$1 = this.options.audioBufferSize;
91
+ const sampleBuffer$1 = new SampleBuffer(bufferSize$1);
92
+ return sampleBuffer$1;
114
93
  }
94
+ const bufferSize = this.options.videoBufferSize;
95
+ const sampleBuffer = new SampleBuffer(bufferSize);
96
+ return sampleBuffer;
97
+ }
98
+ getTrackBuffer(track) {
99
+ const maybeTrackBuffer = this.trackBuffers.get(track.id);
100
+ if (maybeTrackBuffer) return maybeTrackBuffer;
101
+ const trackBuffer = this.createTrackBuffer(track);
102
+ this.trackBuffers.set(track.id, trackBuffer);
103
+ return trackBuffer;
115
104
  }
116
105
  async seek(trackId, timeMs) {
117
106
  const mediaTimeMs = timeMs + this.startTimeOffsetMs;
@@ -126,54 +115,63 @@ var BufferedSeekingInput = class {
126
115
  this.trackSeekPromises.delete(trackId);
127
116
  }
128
117
  }
129
- async resetIterator(trackId) {
130
- const trackBuffer = this.trackBuffers.get(trackId);
118
+ async resetIterator(track) {
119
+ const trackBuffer = this.trackBuffers.get(track.id);
131
120
  trackBuffer?.clear();
132
- const ongoingIteratorCreation = this.trackIteratorCreationPromises.get(trackId);
121
+ const ongoingIteratorCreation = this.trackIteratorCreationPromises.get(track.id);
133
122
  if (ongoingIteratorCreation) await ongoingIteratorCreation;
134
- const iterator = this.trackIterators.get(trackId);
123
+ const iterator = this.trackIterators.get(track.id);
135
124
  if (iterator) try {
136
125
  await iterator.return?.();
137
126
  } catch (_error) {}
138
- this.trackIterators.delete(trackId);
127
+ this.trackIterators.delete(track.id);
139
128
  }
129
+ #seekLock;
140
130
  async seekSafe(trackId, timeMs) {
141
- if (!this.trackBuffers.has(trackId)) await this.createTrackBuffer(trackId);
142
- const trackBuffer = this.trackBuffers.get(trackId);
143
- const track = await this.getTrack(trackId);
144
- const firstTimestampMs = roundToMilliseconds(await track.getFirstTimestamp() * 1e3);
145
- let roundedTimeMs = roundToMilliseconds(timeMs);
146
- if (roundedTimeMs < firstTimestampMs) {
147
- const bufferContents$1 = trackBuffer.getContents();
148
- if (bufferContents$1.length > 0) {
149
- timeMs = firstTimestampMs;
150
- roundedTimeMs = roundToMilliseconds(timeMs);
131
+ if (this.#seekLock) await this.#seekLock.promise;
132
+ const seekLock = Promise.withResolvers();
133
+ this.#seekLock = seekLock;
134
+ try {
135
+ const track = await this.getTrack(trackId);
136
+ const trackBuffer = this.getTrackBuffer(track);
137
+ const roundedTimeMs = roundToMilliseconds(timeMs);
138
+ const firstTimestampMs = roundToMilliseconds(await track.getFirstTimestamp() * 1e3);
139
+ if (roundedTimeMs < firstTimestampMs) {
140
+ console.error("Seeking outside bounds of input", {
141
+ roundedTimeMs,
142
+ firstTimestampMs
143
+ });
144
+ throw new NoSample(`Seeking outside bounds of input ${roundedTimeMs} < ${firstTimestampMs}`);
151
145
  }
146
+ const bufferContents = trackBuffer.getContents();
147
+ if (bufferContents.length > 0) {
148
+ const bufferStartMs = roundToMilliseconds(trackBuffer.firstTimestamp * 1e3);
149
+ if (roundedTimeMs < bufferStartMs) await this.resetIterator(track);
150
+ }
151
+ const alreadyInBuffer = trackBuffer.find(timeMs);
152
+ if (alreadyInBuffer) return alreadyInBuffer;
153
+ const iterator = this.getTrackIterator(track);
154
+ while (true) {
155
+ const { done, value: decodedSample } = await iterator.next();
156
+ if (decodedSample) trackBuffer.push(decodedSample);
157
+ const foundSample = trackBuffer.find(roundedTimeMs);
158
+ if (foundSample) return foundSample;
159
+ if (done) break;
160
+ }
161
+ const finalBufferContents = trackBuffer.getContents();
162
+ if (finalBufferContents.length > 0) {
163
+ const lastSample = finalBufferContents[finalBufferContents.length - 1];
164
+ const lastSampleEndMs = roundToMilliseconds(((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1e3);
165
+ const trackDurationMs = await track.computeDuration() * 1e3;
166
+ const isSeekingToTrackEnd = roundToMilliseconds(timeMs) === roundToMilliseconds(trackDurationMs);
167
+ const isAtEndOfTrack = roundToMilliseconds(timeMs) >= lastSampleEndMs;
168
+ if (isSeekingToTrackEnd && isAtEndOfTrack) return lastSample;
169
+ }
170
+ throw new NoSample(`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`);
171
+ } finally {
172
+ this.#seekLock = void 0;
173
+ seekLock.resolve();
152
174
  }
153
- const bufferContents = trackBuffer.getContents();
154
- if (bufferContents.length > 0) {
155
- const bufferStartMs = roundToMilliseconds(trackBuffer.firstTimestamp * 1e3);
156
- const lastSample = bufferContents[bufferContents.length - 1];
157
- const bufferEndMs = lastSample ? roundToMilliseconds((lastSample.timestamp + (lastSample.duration || 0)) * 1e3) : bufferStartMs;
158
- if (roundedTimeMs < bufferStartMs || roundedTimeMs > bufferEndMs) await this.resetIterator(trackId);
159
- }
160
- const alreadyInBuffer = trackBuffer.find(timeMs);
161
- if (alreadyInBuffer) return alreadyInBuffer;
162
- const iterator = await this.getTrackIterator(trackId);
163
- while (true) {
164
- const { done, value: decodedSample } = await iterator.next();
165
- if (decodedSample) trackBuffer.push(decodedSample);
166
- const foundSample = trackBuffer.find(timeMs);
167
- if (foundSample) return foundSample;
168
- if (done) break;
169
- }
170
- const finalBufferContents = trackBuffer.getContents();
171
- if (finalBufferContents.length > 0) {
172
- const lastSample = finalBufferContents[finalBufferContents.length - 1];
173
- const lastSampleEndMs = roundToMilliseconds(((lastSample?.timestamp || 0) + (lastSample?.duration || 0)) * 1e3);
174
- if (roundToMilliseconds(timeMs) >= lastSampleEndMs) return lastSample;
175
- }
176
- throw new NoSample(`Sample not found for time ${timeMs} in ${track.type} track ${trackId}`);
177
175
  }
178
176
  };
179
177
  export { BufferedSeekingInput };
@@ -50,19 +50,24 @@ function makeAudioFrequencyAnalysisTask(element) {
50
50
  element.fftGain,
51
51
  element.shouldInterpolateFrequencies
52
52
  ],
53
- task: async () => {
53
+ task: async (_, { signal }) => {
54
54
  await element.audioBufferTask.taskComplete;
55
+ signal.throwIfAborted();
55
56
  if (!element.audioBufferTask.value) return null;
56
57
  if (element.currentSourceTimeMs < 0) return null;
57
58
  const currentTimeMs = element.currentSourceTimeMs;
58
- const analysisWindowMs = 5e3;
59
- const fromMs = Math.max(0, currentTimeMs);
60
- const maxToMs = fromMs + analysisWindowMs;
59
+ const frameIntervalMs = 1e3 / 30;
60
+ const earliestFrameMs = currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
61
+ const fromMs = Math.max(0, earliestFrameMs);
62
+ const maxToMs = currentTimeMs + frameIntervalMs;
61
63
  const videoDurationMs = element.intrinsicDurationMs || 0;
62
64
  const toMs = videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
63
65
  if (fromMs >= toMs) return null;
66
+ const preliminaryCacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${fromMs}:${currentTimeMs}`;
67
+ const cachedSmoothedData = cache.get(preliminaryCacheKey);
68
+ if (cachedSmoothedData) return cachedSmoothedData;
64
69
  const { fetchAudioSpanningTime: fetchAudioSpan } = await import("../shared/AudioSpanUtils.js");
65
- const audioSpan = await fetchAudioSpan(element, fromMs, toMs, new AbortController().signal);
70
+ const audioSpan = await fetchAudioSpan(element, fromMs, toMs, signal);
66
71
  if (!audioSpan || !audioSpan.blob) {
67
72
  console.warn("Frequency analysis skipped: no audio data available");
68
73
  return null;
@@ -71,10 +76,7 @@ function makeAudioFrequencyAnalysisTask(element) {
71
76
  const arrayBuffer = await audioSpan.blob.arrayBuffer();
72
77
  const audioBuffer = await tempAudioContext.decodeAudioData(arrayBuffer);
73
78
  const startOffsetMs = audioSpan.startMs;
74
- const smoothedKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${startOffsetMs}:${currentTimeMs}`;
75
- const cachedSmoothedData = cache.get(smoothedKey);
76
- if (cachedSmoothedData) return cachedSmoothedData;
77
- const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_, i) => {
79
+ const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_$1, i) => {
78
80
  const frameOffset = i * (1e3 / 30);
79
81
  const startTime = Math.max(0, (currentTimeMs - frameOffset - startOffsetMs) / 1e3);
80
82
  const cacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftGain}:${startOffsetMs}:${startTime}`;
@@ -133,7 +135,7 @@ function makeAudioFrequencyAnalysisTask(element) {
133
135
  });
134
136
  const slicedData = smoothedData.slice(0, Math.floor(smoothedData.length / 2));
135
137
  const processedData = element.shouldInterpolateFrequencies ? processFFTData(slicedData) : slicedData;
136
- cache.set(smoothedKey, processedData);
138
+ cache.set(preliminaryCacheKey, processedData);
137
139
  return processedData;
138
140
  }
139
141
  });
@@ -13,24 +13,8 @@ const makeAudioSeekTask = (host) => {
13
13
  else console.error("audioSeekTask unknown error", error);
14
14
  },
15
15
  onComplete: (_value) => {},
16
- task: async ([targetSeekTimeMs], { signal }) => {
17
- await host.audioSegmentIdTask.taskComplete;
18
- signal.throwIfAborted();
19
- await host.audioSegmentFetchTask.taskComplete;
20
- signal.throwIfAborted();
21
- await host.audioInitSegmentFetchTask.taskComplete;
22
- signal.throwIfAborted();
23
- const audioInput = await host.audioInputTask.taskComplete;
24
- signal.throwIfAborted();
25
- if (!audioInput) throw new Error("Audio input is not available");
26
- const audioTrack = await audioInput.getFirstAudioTrack();
27
- if (!audioTrack) throw new Error("Audio track is not available");
28
- signal.throwIfAborted();
29
- const sample = await audioInput.seek(audioTrack.id, targetSeekTimeMs);
30
- signal.throwIfAborted();
31
- if (sample === void 0 && signal.aborted) return void 0;
32
- if (sample === void 0) throw new Error("Audio seek failed to find sample");
33
- return sample;
16
+ task: async () => {
17
+ return void 0;
34
18
  }
35
19
  });
36
20
  };
@@ -17,19 +17,24 @@ function makeAudioTimeDomainAnalysisTask(element) {
17
17
  element.fftGain,
18
18
  element.shouldInterpolateFrequencies
19
19
  ],
20
- task: async () => {
20
+ task: async (_, { signal }) => {
21
21
  await element.audioBufferTask.taskComplete;
22
+ signal.throwIfAborted();
22
23
  if (!element.audioBufferTask.value) return null;
23
24
  if (element.currentSourceTimeMs < 0) return null;
24
25
  const currentTimeMs = element.currentSourceTimeMs;
25
- const analysisWindowMs = 5e3;
26
- const fromMs = Math.max(0, currentTimeMs);
27
- const maxToMs = fromMs + analysisWindowMs;
26
+ const frameIntervalMs = 1e3 / 30;
27
+ const earliestFrameMs = currentTimeMs - (element.fftDecay - 1) * frameIntervalMs;
28
+ const fromMs = Math.max(0, earliestFrameMs);
29
+ const maxToMs = currentTimeMs + frameIntervalMs;
28
30
  const videoDurationMs = element.intrinsicDurationMs || 0;
29
31
  const toMs = videoDurationMs > 0 ? Math.min(maxToMs, videoDurationMs) : maxToMs;
30
32
  if (fromMs >= toMs) return null;
33
+ const preliminaryCacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${fromMs}:${currentTimeMs}`;
34
+ const cachedData = cache.get(preliminaryCacheKey);
35
+ if (cachedData) return cachedData;
31
36
  const { fetchAudioSpanningTime: fetchAudioSpan } = await import("../shared/AudioSpanUtils.js");
32
- const audioSpan = await fetchAudioSpan(element, fromMs, toMs, new AbortController().signal);
37
+ const audioSpan = await fetchAudioSpan(element, fromMs, toMs, signal);
33
38
  if (!audioSpan || !audioSpan.blob) {
34
39
  console.warn("Time domain analysis skipped: no audio data available");
35
40
  return null;
@@ -38,10 +43,7 @@ function makeAudioTimeDomainAnalysisTask(element) {
38
43
  const arrayBuffer = await audioSpan.blob.arrayBuffer();
39
44
  const audioBuffer = await tempAudioContext.decodeAudioData(arrayBuffer);
40
45
  const startOffsetMs = audioSpan.startMs;
41
- const smoothedKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftDecay}:${element.fftGain}:${startOffsetMs}:${currentTimeMs}`;
42
- const cachedData = cache.get(smoothedKey);
43
- if (cachedData) return cachedData;
44
- const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_, frameIndex) => {
46
+ const framesData = await Promise.all(Array.from({ length: element.fftDecay }, async (_$1, frameIndex) => {
45
47
  const frameOffset = frameIndex * (1e3 / 30);
46
48
  const startTime = Math.max(0, (currentTimeMs - frameOffset - startOffsetMs) / 1e3);
47
49
  const cacheKey = `${element.shouldInterpolateFrequencies}:${element.fftSize}:${element.fftGain}:${startOffsetMs}:${startTime}`;
@@ -99,7 +101,7 @@ function makeAudioTimeDomainAnalysisTask(element) {
99
101
  });
100
102
  smoothedData[i] = Math.min(255, Math.round(weightedSum / weightSum));
101
103
  }
102
- cache.set(smoothedKey, smoothedData);
104
+ cache.set(preliminaryCacheKey, smoothedData);
103
105
  return smoothedData;
104
106
  }
105
107
  });
@@ -187,7 +187,6 @@ export declare const deepGetElementsWithFrameTasks: (element: Element, elements?
187
187
  }>) => (HTMLElement & {
188
188
  frameTask: Task;
189
189
  })[];
190
- export declare const clearTemporalCacheForElement: (element: Element) => void;
191
190
  export declare const shallowGetTemporalElements: (element: Element, temporals?: TemporalMixinInterface[]) => TemporalMixinInterface[];
192
191
  export declare class OwnCurrentTimeController implements ReactiveController {
193
192
  private host;
@@ -15,21 +15,17 @@ const deepGetElementsWithFrameTasks = (element, elements = []) => {
15
15
  return elements;
16
16
  };
17
17
  let temporalCache;
18
- let modifiedElements = /* @__PURE__ */ new WeakSet();
19
18
  const resetTemporalCache = () => {
20
19
  temporalCache = /* @__PURE__ */ new Map();
21
- modifiedElements = /* @__PURE__ */ new WeakSet();
22
20
  if (typeof requestAnimationFrame !== "undefined") requestAnimationFrame(resetTemporalCache);
23
21
  };
24
22
  resetTemporalCache();
25
- const clearTemporalCacheForElement = (element) => {
26
- temporalCache.delete(element);
27
- modifiedElements.add(element);
28
- };
29
23
  const shallowGetTemporalElements = (element, temporals = []) => {
30
- temporals.length = 0;
24
+ const cachedResult = temporalCache.get(element);
25
+ if (cachedResult) return cachedResult;
31
26
  for (const child of element.children) if (isEFTemporal(child)) temporals.push(child);
32
27
  else shallowGetTemporalElements(child, temporals);
28
+ temporalCache.set(element, temporals);
33
29
  return temporals;
34
30
  };
35
31
  var OwnCurrentTimeController = class {
@@ -285,4 +281,4 @@ const EFTemporal = (superClass) => {
285
281
  Object.defineProperty(TemporalMixinClass.prototype, EF_TEMPORAL, { value: true });
286
282
  return TemporalMixinClass;
287
283
  };
288
- export { EFTemporal, clearTemporalCacheForElement, deepGetElementsWithFrameTasks, flushStartTimeMsCache, isEFTemporal, shallowGetTemporalElements, timegroupContext };
284
+ export { EFTemporal, deepGetElementsWithFrameTasks, flushStartTimeMsCache, isEFTemporal, shallowGetTemporalElements, timegroupContext };
@@ -6,8 +6,6 @@ export declare class EFTimegroup extends EFTimegroup_base {
6
6
  #private;
7
7
  static styles: import('lit').CSSResult;
8
8
  _timeGroupContext: this;
9
- private isFrameUpdateInProgress;
10
- private queuedTimeUpdate;
11
9
  mode: "fit" | "fixed" | "sequence" | "contain";
12
10
  overlapMs: number;
13
11
  fit: "none" | "contain" | "cover";
@@ -27,8 +25,9 @@ export declare class EFTimegroup extends EFTimegroup_base {
27
25
  get intrinsicDurationMs(): number | undefined;
28
26
  get hasOwnDuration(): boolean;
29
27
  get durationMs(): number;
30
- getPendingFrameTasks(): Promise<Task<readonly unknown[], unknown>[]>;
31
- waitForFrameTasks(): Promise<void>;
28
+ getPendingFrameTasks(signal?: AbortSignal): Promise<Task<readonly unknown[], unknown>[]>;
29
+ waitForNestedUpdates(signal?: AbortSignal): Promise<void>;
30
+ waitForFrameTasks(signal?: AbortSignal): Promise<void>;
32
31
  /**
33
32
  * Wait for all media elements to load their initial segments.
34
33
  * Ideally we would only need the extracted index json data, but
@@ -60,6 +59,7 @@ export declare class EFTimegroup extends EFTimegroup_base {
60
59
  testPlayAudio(fromMs: number, toMs: number): Promise<void>;
61
60
  loadMd5Sums(): Promise<void>;
62
61
  frameTask: Task<readonly [number, number], void>;
62
+ seekTask: Task<readonly [number], void>;
63
63
  }
64
64
  declare global {
65
65
  interface HTMLElementTagNameMap {