@editframe/elements 0.11.0-beta.10 → 0.11.0-beta.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,7 +16,7 @@ export declare class EFMedia extends EFMedia_base {
16
16
  fragmentIndexPath(): string;
17
17
  fragmentTrackPath(trackId: string): string;
18
18
  trackFragmentIndexLoader: Task<readonly [string, typeof fetch], Record<number, TrackFragmentIndex>>;
19
- protected initSegmentsLoader: Task<readonly [Record<number, TrackFragmentIndex> | undefined, string, typeof fetch], {
19
+ initSegmentsLoader: Task<readonly [Record<number, TrackFragmentIndex> | undefined, string, typeof fetch], {
20
20
  trackId: string;
21
21
  buffer: MP4Box.MP4ArrayBuffer;
22
22
  mp4File: MP4File;
@@ -17,7 +17,17 @@ export declare class EFTimegroup extends EFTimegroup_base {
17
17
  connectedCallback(): void;
18
18
  get storageKey(): string;
19
19
  get durationMs(): number;
20
- waitForMediaDurations(): Promise<Record<number, import('../../../assets/src/index.ts').TrackFragmentIndex>[]>;
20
+ /**
21
+ * Wait for all media elements to load their initial segments.
22
+ * Ideally we would only need the extracted index json data, but
23
+ * that caused issues with constructing audio data. We had negative durations
24
+ * in calculations and it was not clear why.
25
+ */
26
+ waitForMediaDurations(): Promise<({
27
+ trackId: string;
28
+ buffer: import('mp4box').MP4ArrayBuffer;
29
+ mp4File: import('../../../assets/src/MP4File.ts').MP4File;
30
+ }[] | undefined)[]>;
21
31
  get childTemporals(): import('./EFTemporal.ts').TemporalMixinInterface[];
22
32
  protected updated(changedProperties: PropertyValueMap<any> | Map<PropertyKey, unknown>): void;
23
33
  get contextProvider(): (ParentNode & import('../gui/ContextMixin.ts').ContextMixinInterface) | null;
@@ -18,15 +18,6 @@ const timegroupContext = createContext(
18
18
  );
19
19
  const isEFTemporal = (obj) => obj[EF_TEMPORAL];
20
20
  const EF_TEMPORAL = Symbol("EF_TEMPORAL");
21
- const deepGetTemporalElements = (element, temporals = []) => {
22
- for (const child of element.children) {
23
- if (isEFTemporal(child)) {
24
- temporals.push(child);
25
- }
26
- deepGetTemporalElements(child, temporals);
27
- }
28
- return temporals;
29
- };
30
21
  const deepGetElementsWithFrameTasks = (element, elements = []) => {
31
22
  for (const child of element.children) {
32
23
  if ("frameTask" in child && child.frameTask instanceof Task) {
@@ -126,6 +117,9 @@ const EFTemporal = (superClass) => {
126
117
  return this._trimStartMs;
127
118
  }
128
119
  set trimStartMs(value) {
120
+ if (this._trimStartMs === value) {
121
+ return;
122
+ }
129
123
  this._trimStartMs = value;
130
124
  this.setAttribute(
131
125
  "trimstart",
@@ -143,6 +137,9 @@ const EFTemporal = (superClass) => {
143
137
  return this._trimEndMs;
144
138
  }
145
139
  set trimEndMs(value) {
140
+ if (this._trimEndMs === value) {
141
+ return;
142
+ }
146
143
  this._trimEndMs = value;
147
144
  this.setAttribute("trimend", durationConverter.toAttribute(value / 1e3));
148
145
  }
@@ -373,7 +370,6 @@ export {
373
370
  EFTemporal,
374
371
  OwnCurrentTimeController,
375
372
  deepGetElementsWithFrameTasks,
376
- deepGetTemporalElements,
377
373
  isEFTemporal,
378
374
  shallowGetTemporalElements,
379
375
  timegroupContext
@@ -139,10 +139,16 @@ let EFTimegroup = class extends EFTemporal(LitElement) {
139
139
  throw new Error(`Invalid time mode: ${this.mode}`);
140
140
  }
141
141
  }
142
+ /**
143
+ * Wait for all media elements to load their initial segments.
144
+ * Ideally we would only need the extracted index json data, but
145
+ * that caused issues with constructing audio data. We had negative durations
146
+ * in calculations and it was not clear why.
147
+ */
142
148
  async waitForMediaDurations() {
143
149
  return await Promise.all(
144
150
  deepGetMediaElements(this).map(
145
- (media) => media.trackFragmentIndexLoader.taskComplete
151
+ (media) => media.initSegmentsLoader.taskComplete
146
152
  )
147
153
  );
148
154
  }
@@ -234,7 +240,7 @@ let EFTimegroup = class extends EFTemporal(LitElement) {
234
240
  *
235
241
  */
236
242
  shouldWrapWithWorkbench() {
237
- return EF_INTERACTIVE && this.closest("ef-timegroup") === this && this.contextProvider === null;
243
+ return EF_INTERACTIVE && this.closest("ef-timegroup") === this && this.closest("ef-preview") === null && this.closest("ef-workbench") === null && this.closest("test-context") === null;
238
244
  }
239
245
  wrapWithWorkbench() {
240
246
  const workbench = document.createElement("ef-workbench");
@@ -291,10 +297,8 @@ _currentTime = /* @__PURE__ */ new WeakMap();
291
297
  _EFTimegroup_instances = /* @__PURE__ */ new WeakSet();
292
298
  addAudioToContext_fn = async function(audioContext, fromMs, toMs) {
293
299
  await this.waitForMediaDurations();
294
- const durationMs = toMs - fromMs;
295
300
  await Promise.all(
296
301
  deepGetMediaElements(this).map(async (mediaElement) => {
297
- await mediaElement.trackFragmentIndexLoader.taskComplete;
298
302
  const mediaStartsBeforeEnd = mediaElement.startTimeMs <= toMs;
299
303
  const mediaEndsAfterStart = mediaElement.endTimeMs >= fromMs;
300
304
  const mediaOverlaps = mediaStartsBeforeEnd && mediaEndsAfterStart;
@@ -305,15 +309,15 @@ addAudioToContext_fn = async function(audioContext, fromMs, toMs) {
305
309
  if (!audio) {
306
310
  throw new Error("Failed to fetch audio");
307
311
  }
308
- const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
309
- const ctxEndMs = Math.min(durationMs, mediaElement.endTimeMs - fromMs);
310
- const ctxDurationMs = ctxEndMs - ctxStartMs;
311
- const offset = Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
312
312
  const bufferSource = audioContext.createBufferSource();
313
313
  bufferSource.buffer = await audioContext.decodeAudioData(
314
314
  await audio.blob.arrayBuffer()
315
315
  );
316
316
  bufferSource.connect(audioContext.destination);
317
+ const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
318
+ const ctxEndMs = mediaElement.endTimeMs - fromMs;
319
+ const ctxDurationMs = ctxEndMs - ctxStartMs;
320
+ const offset = Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
317
321
  bufferSource.start(
318
322
  ctxStartMs / 1e3,
319
323
  offset / 1e3,
@@ -18,14 +18,13 @@ var __decorateClass = (decorators, target, key, kind) => {
18
18
  };
19
19
  const contextMixinSymbol = Symbol("contextMixin");
20
20
  function isContextMixin(value) {
21
- return typeof value === "object" && value !== null && contextMixinSymbol in value;
21
+ return typeof value === "object" && value !== null && contextMixinSymbol in value.constructor;
22
22
  }
23
23
  function ContextMixin(superClass) {
24
24
  var _a, _b;
25
25
  class ContextElement extends (_b = superClass, _a = contextMixinSymbol, _b) {
26
26
  constructor() {
27
27
  super(...arguments);
28
- this[_a] = true;
29
28
  this.focusContext = this;
30
29
  this.efContext = this;
31
30
  this.fetch = async (url, init = {}) => {
@@ -101,6 +100,9 @@ function ContextMixin(superClass) {
101
100
  this.#playbackAnimationFrameRequest = null;
102
101
  this.#AUDIO_PLAYBACK_SLICE_MS = 1e3;
103
102
  }
103
+ static {
104
+ this[_a] = true;
105
+ }
104
106
  #URLTokens;
105
107
  connectedCallback() {
106
108
  super.connectedCallback();
@@ -1,11 +1,8 @@
1
1
  import { html, css, LitElement } from "lit";
2
- import { TaskStatus } from "@lit/task";
3
2
  import { eventOptions, customElement } from "lit/decorators.js";
4
3
  import { createRef, ref } from "lit/directives/ref.js";
5
- import { deepGetTemporalElements } from "../elements/EFTemporal.js";
6
- import { TWMixin } from "./TWMixin.js";
7
- import { shallowGetTimegroups } from "../elements/EFTimegroup.js";
8
4
  import { ContextMixin } from "./ContextMixin.js";
5
+ import { TWMixin } from "./TWMixin.js";
9
6
  var __defProp = Object.defineProperty;
10
7
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
11
8
  var __decorateClass = (decorators, target, key, kind) => {
@@ -87,26 +84,6 @@ let EFWorkbench = class extends ContextMixin(TWMixin(LitElement)) {
87
84
  </div>
88
85
  `;
89
86
  }
90
- async stepThrough() {
91
- const stepDurationMs = 1e3 / 30;
92
- const timegroups = shallowGetTimegroups(this);
93
- const firstGroup = timegroups[0];
94
- if (!firstGroup) {
95
- throw new Error("No temporal elements found");
96
- }
97
- firstGroup.currentTimeMs = 0;
98
- const temporals = deepGetTemporalElements(this);
99
- const frameCount = Math.ceil(firstGroup.durationMs / stepDurationMs);
100
- const busyTasks = temporals.filter((temporal) => temporal.frameTask.status < TaskStatus.COMPLETE).map((temporal) => temporal.frameTask);
101
- await Promise.all(busyTasks.map((task) => task.taskComplete));
102
- for (let i = 0; i < frameCount; i++) {
103
- firstGroup.currentTimeMs = i * stepDurationMs;
104
- await new Promise(queueMicrotask);
105
- const busyTasks2 = temporals.filter((temporal) => temporal.frameTask.status < TaskStatus.COMPLETE).map((temporal) => temporal.frameTask);
106
- await Promise.all(busyTasks2.map((task) => task.taskComplete));
107
- await new Promise((resolve) => requestAnimationFrame(resolve));
108
- }
109
- }
110
87
  };
111
88
  EFWorkbench.styles = [
112
89
  css`
@@ -8,7 +8,6 @@ export declare class EFWorkbench extends EFWorkbench_base {
8
8
  update(changedProperties: PropertyValueMap<any> | Map<PropertyKey, unknown>): void;
9
9
  drawOverlays: () => void;
10
10
  render(): import('lit-html').TemplateResult<1>;
11
- stepThrough(): Promise<void>;
12
11
  }
13
12
  declare global {
14
13
  interface HTMLElementTagNameMap {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@editframe/elements",
3
- "version": "0.11.0-beta.10",
3
+ "version": "0.11.0-beta.15",
4
4
  "description": "",
5
5
  "exports": {
6
6
  ".": {
@@ -20,7 +20,7 @@
20
20
  "author": "",
21
21
  "license": "UNLICENSED",
22
22
  "dependencies": {
23
- "@editframe/assets": "0.11.0-beta.10",
23
+ "@editframe/assets": "0.11.0-beta.15",
24
24
  "@lit/context": "^1.1.2",
25
25
  "@lit/task": "^1.0.1",
26
26
  "d3": "^7.9.0",
@@ -104,7 +104,7 @@ export class EFMedia extends EFSourceMixin(EFTemporal(FetchMixin(LitElement)), {
104
104
  },
105
105
  });
106
106
 
107
- protected initSegmentsLoader = new Task(this, {
107
+ public initSegmentsLoader = new Task(this, {
108
108
  autoRun: EF_INTERACTIVE,
109
109
  args: () =>
110
110
  [this.trackFragmentIndexLoader.value, this.src, this.fetch] as const,
@@ -187,6 +187,9 @@ export const EFTemporal = <T extends Constructor<LitElement>>(
187
187
  return this._trimStartMs;
188
188
  }
189
189
  public set trimStartMs(value: number) {
190
+ if (this._trimStartMs === value) {
191
+ return;
192
+ }
190
193
  this._trimStartMs = value;
191
194
  this.setAttribute(
192
195
  "trimstart",
@@ -211,6 +214,9 @@ export const EFTemporal = <T extends Constructor<LitElement>>(
211
214
  return this._trimEndMs;
212
215
  }
213
216
  public set trimEndMs(value: number) {
217
+ if (this._trimEndMs === value) {
218
+ return;
219
+ }
214
220
  this._trimEndMs = value;
215
221
  this.setAttribute("trimend", durationConverter.toAttribute(value / 1000));
216
222
  }
@@ -152,10 +152,16 @@ export class EFTimegroup extends EFTemporal(LitElement) {
152
152
  }
153
153
  }
154
154
 
155
+ /**
156
+ * Wait for all media elements to load their initial segments.
157
+ * Ideally we would only need the extracted index json data, but
158
+ * that caused issues with constructing audio data. We had negative durations
159
+ * in calculations and it was not clear why.
160
+ */
155
161
  async waitForMediaDurations() {
156
162
  return await Promise.all(
157
163
  deepGetMediaElements(this).map(
158
- (media) => media.trackFragmentIndexLoader.taskComplete,
164
+ (media) => media.initSegmentsLoader.taskComplete,
159
165
  ),
160
166
  );
161
167
  }
@@ -269,7 +275,9 @@ export class EFTimegroup extends EFTemporal(LitElement) {
269
275
  return (
270
276
  EF_INTERACTIVE &&
271
277
  this.closest("ef-timegroup") === this &&
272
- this.contextProvider === null
278
+ this.closest("ef-preview") === null &&
279
+ this.closest("ef-workbench") === null &&
280
+ this.closest("test-context") === null
273
281
  );
274
282
  }
275
283
 
@@ -307,12 +315,8 @@ export class EFTimegroup extends EFTemporal(LitElement) {
307
315
  ) {
308
316
  await this.waitForMediaDurations();
309
317
 
310
- const durationMs = toMs - fromMs;
311
-
312
318
  await Promise.all(
313
319
  deepGetMediaElements(this).map(async (mediaElement) => {
314
- await mediaElement.trackFragmentIndexLoader.taskComplete;
315
-
316
320
  const mediaStartsBeforeEnd = mediaElement.startTimeMs <= toMs;
317
321
  const mediaEndsAfterStart = mediaElement.endTimeMs >= fromMs;
318
322
  const mediaOverlaps = mediaStartsBeforeEnd && mediaEndsAfterStart;
@@ -325,19 +329,19 @@ export class EFTimegroup extends EFTemporal(LitElement) {
325
329
  throw new Error("Failed to fetch audio");
326
330
  }
327
331
 
328
- const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
329
- const ctxEndMs = Math.min(durationMs, mediaElement.endTimeMs - fromMs);
330
- const ctxDurationMs = ctxEndMs - ctxStartMs;
331
-
332
- const offset =
333
- Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
334
-
335
332
  const bufferSource = audioContext.createBufferSource();
336
333
  bufferSource.buffer = await audioContext.decodeAudioData(
337
334
  await audio.blob.arrayBuffer(),
338
335
  );
339
336
  bufferSource.connect(audioContext.destination);
340
337
 
338
+ const ctxStartMs = Math.max(0, mediaElement.startTimeMs - fromMs);
339
+ const ctxEndMs = mediaElement.endTimeMs - fromMs;
340
+ const ctxDurationMs = ctxEndMs - ctxStartMs;
341
+
342
+ const offset =
343
+ Math.max(0, fromMs - mediaElement.startTimeMs) - audio.startMs;
344
+
341
345
  bufferSource.start(
342
346
  ctxStartMs / 1000,
343
347
  offset / 1000,
@@ -30,14 +30,16 @@ const contextMixinSymbol = Symbol("contextMixin");
30
30
 
31
31
  export function isContextMixin(value: any): value is ContextMixinInterface {
32
32
  return (
33
- typeof value === "object" && value !== null && contextMixinSymbol in value
33
+ typeof value === "object" &&
34
+ value !== null &&
35
+ contextMixinSymbol in value.constructor
34
36
  );
35
37
  }
36
38
 
37
39
  type Constructor<T = {}> = new (...args: any[]) => T;
38
40
  export function ContextMixin<T extends Constructor<LitElement>>(superClass: T) {
39
41
  class ContextElement extends superClass {
40
- [contextMixinSymbol] = true;
42
+ static [contextMixinSymbol] = true;
41
43
 
42
44
  @provide({ context: focusContext })
43
45
  focusContext = this as FocusContext;
@@ -1,12 +1,9 @@
1
- import { LitElement, html, css, type PropertyValueMap } from "lit";
2
- import { TaskStatus } from "@lit/task";
1
+ import { LitElement, type PropertyValueMap, css, html } from "lit";
3
2
  import { customElement, eventOptions } from "lit/decorators.js";
4
- import { ref, createRef } from "lit/directives/ref.js";
3
+ import { createRef, ref } from "lit/directives/ref.js";
5
4
 
6
- import { deepGetTemporalElements } from "../elements/EFTemporal.ts";
7
- import { TWMixin } from "./TWMixin.ts";
8
- import { shallowGetTimegroups } from "../elements/EFTimegroup.ts";
9
5
  import { ContextMixin } from "./ContextMixin.ts";
6
+ import { TWMixin } from "./TWMixin.ts";
10
7
 
11
8
  @customElement("ef-workbench")
12
9
  export class EFWorkbench extends ContextMixin(TWMixin(LitElement)) {
@@ -96,36 +93,6 @@ export class EFWorkbench extends ContextMixin(TWMixin(LitElement)) {
96
93
  </div>
97
94
  `;
98
95
  }
99
-
100
- async stepThrough() {
101
- const stepDurationMs = 1000 / 30;
102
- const timegroups = shallowGetTimegroups(this);
103
- const firstGroup = timegroups[0];
104
- if (!firstGroup) {
105
- throw new Error("No temporal elements found");
106
- }
107
- firstGroup.currentTimeMs = 0;
108
-
109
- const temporals = deepGetTemporalElements(this);
110
- const frameCount = Math.ceil(firstGroup.durationMs / stepDurationMs);
111
-
112
- const busyTasks = temporals
113
- .filter((temporal) => temporal.frameTask.status < TaskStatus.COMPLETE)
114
- .map((temporal) => temporal.frameTask);
115
-
116
- await Promise.all(busyTasks.map((task) => task.taskComplete));
117
-
118
- for (let i = 0; i < frameCount; i++) {
119
- firstGroup.currentTimeMs = i * stepDurationMs;
120
- await new Promise<void>(queueMicrotask);
121
- const busyTasks = temporals
122
- .filter((temporal) => temporal.frameTask.status < TaskStatus.COMPLETE)
123
- .map((temporal) => temporal.frameTask);
124
-
125
- await Promise.all(busyTasks.map((task) => task.taskComplete));
126
- await new Promise((resolve) => requestAnimationFrame(resolve));
127
- }
128
- }
129
96
  }
130
97
 
131
98
  declare global {