@editframe/elements 0.11.0-beta.14 → 0.11.0-beta.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,7 +16,7 @@ export declare class EFMedia extends EFMedia_base {
16
16
  fragmentIndexPath(): string;
17
17
  fragmentTrackPath(trackId: string): string;
18
18
  trackFragmentIndexLoader: Task<readonly [string, typeof fetch], Record<number, TrackFragmentIndex>>;
19
- protected initSegmentsLoader: Task<readonly [Record<number, TrackFragmentIndex> | undefined, string, typeof fetch], {
19
+ initSegmentsLoader: Task<readonly [Record<number, TrackFragmentIndex> | undefined, string, typeof fetch], {
20
20
  trackId: string;
21
21
  buffer: MP4Box.MP4ArrayBuffer;
22
22
  mp4File: MP4File;
@@ -17,7 +17,17 @@ export declare class EFTimegroup extends EFTimegroup_base {
17
17
  connectedCallback(): void;
18
18
  get storageKey(): string;
19
19
  get durationMs(): number;
20
- waitForMediaDurations(): Promise<Record<number, import('../../../assets/src/index.ts').TrackFragmentIndex>[]>;
20
+ /**
21
+ * Wait for all media elements to load their initial segments.
22
+ * Ideally we would only need the extracted index json data, but
23
+ * that caused issues with constructing audio data. We had negative durations
24
+ * in calculations and it was not clear why.
25
+ */
26
+ waitForMediaDurations(): Promise<({
27
+ trackId: string;
28
+ buffer: import('mp4box').MP4ArrayBuffer;
29
+ mp4File: import('../../../assets/src/MP4File.ts').MP4File;
30
+ }[] | undefined)[]>;
21
31
  get childTemporals(): import('./EFTemporal.ts').TemporalMixinInterface[];
22
32
  protected updated(changedProperties: PropertyValueMap<any> | Map<PropertyKey, unknown>): void;
23
33
  get contextProvider(): (ParentNode & import('../gui/ContextMixin.ts').ContextMixinInterface) | null;
@@ -139,10 +139,16 @@ let EFTimegroup = class extends EFTemporal(LitElement) {
139
139
  throw new Error(`Invalid time mode: ${this.mode}`);
140
140
  }
141
141
  }
142
+ /**
143
+ * Wait for all media elements to load their initial segments.
144
+ * Ideally we would only need the extracted index json data, but
145
+ * that caused issues with constructing audio data. We had negative durations
146
+ * in calculations and it was not clear why.
147
+ */
142
148
  async waitForMediaDurations() {
143
149
  return await Promise.all(
144
150
  deepGetMediaElements(this).map(
145
- (media) => media.trackFragmentIndexLoader.taskComplete
151
+ (media) => media.initSegmentsLoader.taskComplete
146
152
  )
147
153
  );
148
154
  }
@@ -291,10 +297,8 @@ _currentTime = /* @__PURE__ */ new WeakMap();
291
297
  _EFTimegroup_instances = /* @__PURE__ */ new WeakSet();
292
298
  addAudioToContext_fn = async function(audioContext, fromMs, toMs) {
293
299
  await this.waitForMediaDurations();
294
- const durationMs = toMs - fromMs;
295
300
  await Promise.all(
296
301
  deepGetMediaElements(this).map(async (mediaElement) => {
297
- await mediaElement.trackFragmentIndexLoader.taskComplete;
298
302
  const mediaStartsBeforeEnd = mediaElement.startTimeMs <= toMs;
299
303
  const mediaEndsAfterStart = mediaElement.endTimeMs >= fromMs;
300
304
  const mediaOverlaps = mediaStartsBeforeEnd && mediaEndsAfterStart;
@@ -305,15 +309,15 @@ addAudioToContext_fn = async function(audioContext, fromMs, toMs) {
305
309
  if (!audio) {
306
310
  throw new Error("Failed to fetch audio");
307
311
  }
308
- const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
309
- const ctxEndMs = Math.min(durationMs, mediaElement.endTimeMs - fromMs);
310
- const ctxDurationMs = ctxEndMs - ctxStartMs;
311
- const offset = Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
312
312
  const bufferSource = audioContext.createBufferSource();
313
313
  bufferSource.buffer = await audioContext.decodeAudioData(
314
314
  await audio.blob.arrayBuffer()
315
315
  );
316
316
  bufferSource.connect(audioContext.destination);
317
+ const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
318
+ const ctxEndMs = mediaElement.endTimeMs - fromMs;
319
+ const ctxDurationMs = ctxEndMs - ctxStartMs;
320
+ const offset = Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
317
321
  bufferSource.start(
318
322
  ctxStartMs / 1e3,
319
323
  offset / 1e3,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@editframe/elements",
3
- "version": "0.11.0-beta.14",
3
+ "version": "0.11.0-beta.15",
4
4
  "description": "",
5
5
  "exports": {
6
6
  ".": {
@@ -20,7 +20,7 @@
20
20
  "author": "",
21
21
  "license": "UNLICENSED",
22
22
  "dependencies": {
23
- "@editframe/assets": "0.11.0-beta.14",
23
+ "@editframe/assets": "0.11.0-beta.15",
24
24
  "@lit/context": "^1.1.2",
25
25
  "@lit/task": "^1.0.1",
26
26
  "d3": "^7.9.0",
@@ -104,7 +104,7 @@ export class EFMedia extends EFSourceMixin(EFTemporal(FetchMixin(LitElement)), {
104
104
  },
105
105
  });
106
106
 
107
- protected initSegmentsLoader = new Task(this, {
107
+ public initSegmentsLoader = new Task(this, {
108
108
  autoRun: EF_INTERACTIVE,
109
109
  args: () =>
110
110
  [this.trackFragmentIndexLoader.value, this.src, this.fetch] as const,
@@ -152,10 +152,16 @@ export class EFTimegroup extends EFTemporal(LitElement) {
152
152
  }
153
153
  }
154
154
 
155
+ /**
156
+ * Wait for all media elements to load their initial segments.
157
+ * Ideally we would only need the extracted index json data, but
158
+ * that caused issues with constructing audio data. We had negative durations
159
+ * in calculations and it was not clear why.
160
+ */
155
161
  async waitForMediaDurations() {
156
162
  return await Promise.all(
157
163
  deepGetMediaElements(this).map(
158
- (media) => media.trackFragmentIndexLoader.taskComplete,
164
+ (media) => media.initSegmentsLoader.taskComplete,
159
165
  ),
160
166
  );
161
167
  }
@@ -309,12 +315,8 @@ export class EFTimegroup extends EFTemporal(LitElement) {
309
315
  ) {
310
316
  await this.waitForMediaDurations();
311
317
 
312
- const durationMs = toMs - fromMs;
313
-
314
318
  await Promise.all(
315
319
  deepGetMediaElements(this).map(async (mediaElement) => {
316
- await mediaElement.trackFragmentIndexLoader.taskComplete;
317
-
318
320
  const mediaStartsBeforeEnd = mediaElement.startTimeMs <= toMs;
319
321
  const mediaEndsAfterStart = mediaElement.endTimeMs >= fromMs;
320
322
  const mediaOverlaps = mediaStartsBeforeEnd && mediaEndsAfterStart;
@@ -327,19 +329,19 @@ export class EFTimegroup extends EFTemporal(LitElement) {
327
329
  throw new Error("Failed to fetch audio");
328
330
  }
329
331
 
330
- const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
331
- const ctxEndMs = Math.min(durationMs, mediaElement.endTimeMs - fromMs);
332
- const ctxDurationMs = ctxEndMs - ctxStartMs;
333
-
334
- const offset =
335
- Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
336
-
337
332
  const bufferSource = audioContext.createBufferSource();
338
333
  bufferSource.buffer = await audioContext.decodeAudioData(
339
334
  await audio.blob.arrayBuffer(),
340
335
  );
341
336
  bufferSource.connect(audioContext.destination);
342
337
 
338
+ const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
339
+ const ctxEndMs = mediaElement.endTimeMs - fromMs;
340
+ const ctxDurationMs = ctxEndMs - ctxStartMs;
341
+
342
+ const offset =
343
+ Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
344
+
343
345
  bufferSource.start(
344
346
  ctxStartMs / 1000,
345
347
  offset / 1000,