@videojs/html 10.0.0-beta.1 → 10.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/cdn/audio-minimal.css +1 -0
  2. package/cdn/audio-minimal.dev.js +5360 -0
  3. package/cdn/audio-minimal.dev.js.map +1 -0
  4. package/cdn/audio-minimal.js +25 -0
  5. package/cdn/audio-minimal.js.map +1 -0
  6. package/cdn/audio.css +1 -0
  7. package/cdn/audio.dev.js +5351 -0
  8. package/cdn/audio.dev.js.map +1 -0
  9. package/cdn/audio.js +25 -0
  10. package/cdn/audio.js.map +1 -0
  11. package/cdn/background.css +1 -0
  12. package/cdn/background.dev.js +2057 -0
  13. package/cdn/background.dev.js.map +1 -0
  14. package/cdn/background.js +19 -0
  15. package/cdn/background.js.map +1 -0
  16. package/cdn/media/hls-video.dev.js +28728 -0
  17. package/cdn/media/hls-video.dev.js.map +1 -0
  18. package/cdn/media/hls-video.js +83 -0
  19. package/cdn/media/hls-video.js.map +1 -0
  20. package/cdn/media/simple-hls-video.dev.js +3796 -0
  21. package/cdn/media/simple-hls-video.dev.js.map +1 -0
  22. package/cdn/media/simple-hls-video.js +44 -0
  23. package/cdn/media/simple-hls-video.js.map +1 -0
  24. package/cdn/video-minimal.css +1 -0
  25. package/cdn/video-minimal.dev.js +5714 -0
  26. package/cdn/video-minimal.dev.js.map +1 -0
  27. package/cdn/video-minimal.js +25 -0
  28. package/cdn/video-minimal.js.map +1 -0
  29. package/cdn/video.css +1 -0
  30. package/cdn/video.dev.js +5782 -0
  31. package/cdn/video.dev.js.map +1 -0
  32. package/cdn/video.js +25 -0
  33. package/cdn/video.js.map +1 -0
  34. package/dist/default/_virtual/inline-css_src/define/audio/minimal-skin.js +1 -1
  35. package/dist/default/_virtual/inline-css_src/define/audio/minimal-skin.js.map +1 -1
  36. package/dist/default/_virtual/inline-css_src/define/audio/skin.js +1 -1
  37. package/dist/default/_virtual/inline-css_src/define/audio/skin.js.map +1 -1
  38. package/dist/default/_virtual/inline-css_src/define/video/minimal-skin.js +1 -1
  39. package/dist/default/_virtual/inline-css_src/define/video/minimal-skin.js.map +1 -1
  40. package/dist/default/_virtual/inline-css_src/define/video/skin.js +1 -1
  41. package/dist/default/_virtual/inline-css_src/define/video/skin.js.map +1 -1
  42. package/dist/default/define/audio/minimal-skin.js +1 -79
  43. package/dist/default/define/audio/minimal-skin.js.map +1 -1
  44. package/dist/default/define/audio/minimal-skin.tailwind.js +1 -81
  45. package/dist/default/define/audio/minimal-skin.tailwind.js.map +1 -1
  46. package/dist/default/define/audio/skin.js +1 -70
  47. package/dist/default/define/audio/skin.js.map +1 -1
  48. package/dist/default/define/audio/skin.tailwind.js +1 -72
  49. package/dist/default/define/audio/skin.tailwind.js.map +1 -1
  50. package/dist/default/define/background/skin.js +1 -5
  51. package/dist/default/define/background/skin.js.map +1 -1
  52. package/dist/default/define/skin-mixin.js +1 -15
  53. package/dist/default/define/skin-mixin.js.map +1 -1
  54. package/dist/default/define/video/minimal-skin.js +1 -121
  55. package/dist/default/define/video/minimal-skin.js.map +1 -1
  56. package/dist/default/define/video/minimal-skin.tailwind.js +1 -131
  57. package/dist/default/define/video/minimal-skin.tailwind.js.map +1 -1
  58. package/dist/default/define/video/skin.js +1 -116
  59. package/dist/default/define/video/skin.js.map +1 -1
  60. package/dist/default/define/video/skin.tailwind.js +1 -124
  61. package/dist/default/define/video/skin.tailwind.js.map +1 -1
  62. package/dist/default/media/background-video/index.js +1 -18
  63. package/dist/default/media/background-video/index.js.map +1 -1
  64. package/package.json +12 -10
@@ -0,0 +1,3796 @@
1
+ //#region ../core/dist/dev/core/utils/define-class-prop-hooks.js
2
+ function defineClassPropHooks(Class, BaseClassProto) {
3
+ for (const prop of Object.getOwnPropertyNames(BaseClassProto)) {
4
+ if (prop in Class.prototype) continue;
5
+ const descriptor = Object.getOwnPropertyDescriptor(BaseClassProto, prop);
6
+ if (!descriptor) continue;
7
+ const config = {};
8
+ if (typeof descriptor.value === "function") config.value = function(...args) {
9
+ return this.call?.(prop, ...args);
10
+ };
11
+ else if (descriptor.get) {
12
+ config.get = function() {
13
+ return this.get?.(prop);
14
+ };
15
+ if (descriptor.set) config.set = function(val) {
16
+ this.set?.(prop, val);
17
+ };
18
+ }
19
+ Object.defineProperty(Class.prototype, prop, config);
20
+ }
21
+ }
22
+
23
+ //#endregion
24
+ //#region ../core/dist/dev/core/media/delegate.js
25
+ /**
26
+ * Mixin that intercepts `get`, `set`, and `call` to delegate property access
27
+ * and method calls to an instance of `DelegateClass` before falling through
28
+ * to the base class implementation.
29
+ *
30
+ * Works with both `CustomMediaMixin` and `MediaProxyMixin`.
31
+ */
32
+ function MediaDelegateMixin(BaseClass, DelegateClass) {
33
+ class DelegateMedia extends BaseClass {
34
+ #delegate = new DelegateClass();
35
+ get(prop) {
36
+ if (prop in this.#delegate) return this.#delegate[prop];
37
+ return super.get?.(prop);
38
+ }
39
+ set(prop, val) {
40
+ if (prop in this.#delegate) {
41
+ this.#delegate[prop] = val;
42
+ return;
43
+ }
44
+ super.set?.(prop, val);
45
+ }
46
+ call(prop, ...args) {
47
+ if (prop in this.#delegate) return this.#delegate[prop](...args);
48
+ return super.call?.(prop, ...args);
49
+ }
50
+ attach(target) {
51
+ super.attach?.(target);
52
+ this.#delegate.attach?.(target);
53
+ }
54
+ detach() {
55
+ this.#delegate.detach?.();
56
+ super.detach?.();
57
+ }
58
+ }
59
+ for (let proto = DelegateClass.prototype; proto && proto !== Object.prototype; proto = Object.getPrototypeOf(proto)) defineClassPropHooks(DelegateMedia, proto);
60
+ return DelegateMedia;
61
+ }
62
+
63
+ //#endregion
64
+ //#region ../core/dist/dev/dom/media/custom-media-element/index.js
65
+ /**
66
+ * Custom Media Element
67
+ * Based on https://github.com/muxinc/custom-video-element - Mux - MIT License
68
+ *
69
+ * The goal is to create an element that works just like the video element
70
+ * but can be extended/sub-classed, because native elements cannot be
71
+ * extended today across browsers.
72
+ */
73
+ const Events = [
74
+ "abort",
75
+ "canplay",
76
+ "canplaythrough",
77
+ "durationchange",
78
+ "emptied",
79
+ "encrypted",
80
+ "ended",
81
+ "error",
82
+ "loadeddata",
83
+ "loadedmetadata",
84
+ "loadstart",
85
+ "pause",
86
+ "play",
87
+ "playing",
88
+ "progress",
89
+ "ratechange",
90
+ "seeked",
91
+ "seeking",
92
+ "stalled",
93
+ "suspend",
94
+ "timeupdate",
95
+ "volumechange",
96
+ "waiting",
97
+ "waitingforkey",
98
+ "resize",
99
+ "enterpictureinpicture",
100
+ "leavepictureinpicture",
101
+ "webkitbeginfullscreen",
102
+ "webkitendfullscreen",
103
+ "webkitpresentationmodechanged"
104
+ ];
105
+ const Attributes = [
106
+ "autopictureinpicture",
107
+ "disablepictureinpicture",
108
+ "disableremoteplayback",
109
+ "autoplay",
110
+ "controls",
111
+ "controlslist",
112
+ "crossorigin",
113
+ "loop",
114
+ "muted",
115
+ "playsinline",
116
+ "poster",
117
+ "preload",
118
+ "src"
119
+ ];
120
+ /**
121
+ * Helper function to generate the HTML template for audio elements.
122
+ */
123
+ function getAudioTemplateHTML(attrs) {
124
+ return `
125
+ <style>
126
+ :host {
127
+ display: inline-flex;
128
+ line-height: 0;
129
+ flex-direction: column;
130
+ justify-content: end;
131
+ }
132
+
133
+ audio {
134
+ width: 100%;
135
+ }
136
+ </style>
137
+ <slot name="media">
138
+ <audio${serializeAttributes(attrs)}></audio>
139
+ </slot>
140
+ <slot></slot>
141
+ `;
142
+ }
143
+ /**
144
+ * Helper function to generate the HTML template for video elements.
145
+ */
146
+ function getVideoTemplateHTML(attrs) {
147
+ return `
148
+ <style>
149
+ :host {
150
+ display: inline-block;
151
+ line-height: 0;
152
+ }
153
+
154
+ video {
155
+ max-width: 100%;
156
+ max-height: 100%;
157
+ min-width: 100%;
158
+ min-height: 100%;
159
+ object-fit: var(--media-object-fit, contain);
160
+ object-position: var(--media-object-position, 50% 50%);
161
+ }
162
+
163
+ video::-webkit-media-text-track-container {
164
+ transform: var(--media-webkit-text-track-transform);
165
+ transition: var(--media-webkit-text-track-transition);
166
+ }
167
+ </style>
168
+ <slot name="media">
169
+ <video${serializeAttributes(attrs)}></video>
170
+ </slot>
171
+ <slot></slot>
172
+ `;
173
+ }
174
+ function CustomMediaMixin(superclass, { tag, is }) {
175
+ const nativeElTest = globalThis.document?.createElement?.(tag, { is });
176
+ const nativeElProps = nativeElTest ? getNativeElProps(nativeElTest) : [];
177
+ return class CustomMedia extends superclass {
178
+ static getTemplateHTML = tag.endsWith("audio") ? getAudioTemplateHTML : getVideoTemplateHTML;
179
+ static shadowRootOptions = { mode: "open" };
180
+ static Events = Events;
181
+ static #isDefined = false;
182
+ static #propsToAttrs;
183
+ static get observedAttributes() {
184
+ CustomMedia.#define();
185
+ return [...nativeElTest?.constructor?.observedAttributes ?? [], ...Attributes];
186
+ }
187
+ static #define() {
188
+ if (CustomMedia.#isDefined) return;
189
+ CustomMedia.#isDefined = true;
190
+ CustomMedia.#propsToAttrs = new Set(CustomMedia.observedAttributes);
191
+ CustomMedia.#propsToAttrs.delete("muted");
192
+ for (const prop of nativeElProps) {
193
+ if (prop in CustomMedia.prototype) continue;
194
+ if (typeof nativeElTest[prop] === "function") CustomMedia.prototype[prop] = function(...args) {
195
+ this.#init();
196
+ return this.call(prop, ...args);
197
+ };
198
+ else {
199
+ const config = { get() {
200
+ this.#init();
201
+ return this.get(prop);
202
+ } };
203
+ if (prop !== prop.toUpperCase()) config.set = function(val) {
204
+ this.#init();
205
+ this.set(prop, val);
206
+ };
207
+ Object.defineProperty(CustomMedia.prototype, prop, config);
208
+ }
209
+ }
210
+ }
211
+ #isInit = false;
212
+ #target = null;
213
+ #childMap = /* @__PURE__ */ new Map();
214
+ #childObserver;
215
+ get(prop) {
216
+ const attr = prop.toLowerCase();
217
+ if (CustomMedia.#propsToAttrs.has(attr)) {
218
+ const val = this.getAttribute(attr);
219
+ return val === null ? false : val === "" ? true : val;
220
+ }
221
+ return this.target?.[prop];
222
+ }
223
+ set(prop, val) {
224
+ const attr = prop.toLowerCase();
225
+ if (CustomMedia.#propsToAttrs.has(attr)) {
226
+ if (val === true || val === false || val == null) this.toggleAttribute(attr, Boolean(val));
227
+ else this.setAttribute(attr, val);
228
+ return;
229
+ }
230
+ if (this.target) this.target[prop] = val;
231
+ }
232
+ call(prop, ...args) {
233
+ return (this.target?.[prop])?.apply(this.target, args);
234
+ }
235
+ get target() {
236
+ this.#init();
237
+ return this.#target ?? this.querySelector(":scope > [slot=media]") ?? this.querySelector(tag) ?? this.shadowRoot?.querySelector(tag) ?? null;
238
+ }
239
+ set target(val) {
240
+ this.#target = val;
241
+ }
242
+ get defaultMuted() {
243
+ this.#init();
244
+ return this.get("muted");
245
+ }
246
+ set defaultMuted(val) {
247
+ this.#init();
248
+ this.set("muted", val);
249
+ }
250
+ #init() {
251
+ if (this.#isInit) return;
252
+ this.#isInit = true;
253
+ this.init();
254
+ }
255
+ init() {
256
+ if (!this.shadowRoot) {
257
+ this.attachShadow({ mode: "open" });
258
+ const attrs = namedNodeMapToObject(this.attributes);
259
+ if (is) attrs.is = is;
260
+ if (tag) attrs.part = tag;
261
+ this.shadowRoot.innerHTML = this.constructor.getTemplateHTML(attrs);
262
+ }
263
+ this.target.muted = this.hasAttribute("muted");
264
+ for (const prop of nativeElProps) this.#upgradeProperty(prop);
265
+ this.#childObserver = new MutationObserver(this.#syncMediaChildAttribute.bind(this));
266
+ this.shadowRoot.addEventListener("slotchange", () => this.#syncMediaChildren());
267
+ this.#syncMediaChildren();
268
+ for (const type of this.constructor.Events) this.shadowRoot.addEventListener(type, this, true);
269
+ }
270
+ handleEvent(event) {
271
+ if (event.target === this.target) this.dispatchEvent(new CustomEvent(event.type, { detail: event.detail }));
272
+ }
273
+ #syncMediaChildren() {
274
+ const removeNativeChildren = new Map(this.#childMap);
275
+ ((this.shadowRoot?.querySelector("slot:not([name])"))?.assignedElements({ flatten: true }).filter((el) => ["track", "source"].includes(el.localName))).forEach((el) => {
276
+ removeNativeChildren.delete(el);
277
+ let clone = this.#childMap.get(el);
278
+ if (!clone) {
279
+ clone = el.cloneNode();
280
+ this.#childMap.set(el, clone);
281
+ this.#childObserver?.observe(el, { attributes: true });
282
+ }
283
+ this.target?.append(clone);
284
+ this.#enableDefaultTrack(clone);
285
+ });
286
+ removeNativeChildren.forEach((clone, el) => {
287
+ clone.remove();
288
+ this.#childMap.delete(el);
289
+ });
290
+ }
291
+ #syncMediaChildAttribute(mutations) {
292
+ for (const mutation of mutations) if (mutation.type === "attributes") {
293
+ const { target, attributeName } = mutation;
294
+ const clone = this.#childMap.get(target);
295
+ if (clone && attributeName) {
296
+ clone.setAttribute(attributeName, target.getAttribute(attributeName) ?? "");
297
+ this.#enableDefaultTrack(clone);
298
+ }
299
+ }
300
+ }
301
+ #enableDefaultTrack(trackEl) {
302
+ if (trackEl && trackEl.localName === "track" && trackEl.default && (trackEl.kind === "chapters" || trackEl.kind === "metadata") && trackEl.track.mode === "disabled") trackEl.track.mode = "hidden";
303
+ }
304
+ #upgradeProperty(prop) {
305
+ if (Object.hasOwn(this, prop)) {
306
+ const value = this[prop];
307
+ delete this[prop];
308
+ this[prop] = value;
309
+ }
310
+ }
311
+ attributeChangedCallback(attrName, oldValue, newValue) {
312
+ this.#init();
313
+ this.#forwardAttribute(attrName, oldValue, newValue);
314
+ }
315
+ #forwardAttribute(attrName, _oldValue, newValue) {
316
+ if (["id", "class"].includes(attrName)) return;
317
+ if (!CustomMedia.observedAttributes.includes(attrName) && this.constructor.observedAttributes.includes(attrName)) return;
318
+ if (newValue === null) this.target?.removeAttribute(attrName);
319
+ else if (this.target?.getAttribute(attrName) !== newValue) this.target?.setAttribute(attrName, newValue);
320
+ }
321
+ connectedCallback() {
322
+ this.#init();
323
+ this.setAttribute("data-media-element", "");
324
+ }
325
+ };
326
+ }
327
+ /**
328
+ * Helper function to get all properties from a native media element's prototype.
329
+ */
330
+ function getNativeElProps(nativeElTest) {
331
+ const nativeElProps = [];
332
+ for (let proto = Object.getPrototypeOf(nativeElTest); proto && proto !== HTMLElement.prototype; proto = Object.getPrototypeOf(proto)) {
333
+ const props = Object.getOwnPropertyNames(proto);
334
+ nativeElProps.push(...props);
335
+ }
336
+ return nativeElProps;
337
+ }
338
+ /**
339
+ * Helper function to serialize attributes into a string.
340
+ */
341
+ function serializeAttributes(attrs) {
342
+ let html = "";
343
+ for (const key in attrs) {
344
+ if (!Attributes.includes(key)) continue;
345
+ const value = attrs[key];
346
+ if (value === "") html += ` ${key}`;
347
+ else html += ` ${key}="${value}"`;
348
+ }
349
+ return html;
350
+ }
351
+ /**
352
+ * Helper function to convert NamedNodeMap to a plain object.
353
+ */
354
+ function namedNodeMapToObject(namedNodeMap) {
355
+ const obj = {};
356
+ for (const attr of namedNodeMap) obj[attr.name] = attr.value;
357
+ return obj;
358
+ }
359
+ const CustomVideoElement = CustomMediaMixin(globalThis.HTMLElement ?? class {}, { tag: "video" });
360
+ const CustomAudioElement = CustomMediaMixin(globalThis.HTMLElement ?? class {}, { tag: "audio" });
361
+
362
+ //#endregion
363
+ //#region ../utils/dist/dom/listen.js
364
+ function listen(target, type, listener, options) {
365
+ target.addEventListener(type, listener, options);
366
+ return () => target.removeEventListener(type, listener, options);
367
+ }
368
+
369
+ //#endregion
370
+ //#region ../utils/dist/predicate/predicate.js
371
+ function isUndefined(value) {
372
+ return typeof value === "undefined";
373
+ }
374
+ function isNil(value) {
375
+ return value == null;
376
+ }
377
+
378
+ //#endregion
379
+ //#region ../spf/dist/adapter-CMw-rvbk.js
380
+ /**
381
+ * Reactive state container with selectors, custom equality, and batched updates.
382
+ *
383
+ * Manages both immutable state values and mutable object references (e.g., HTMLMediaElement).
384
+ */
385
+ const STATE_SYMBOL = Symbol("@videojs/spf/state");
386
+ /**
387
+ * Default equality function using Object.is.
388
+ */
389
+ function defaultEquality(a, b) {
390
+ return Object.is(a, b);
391
+ }
392
+ /**
393
+ * State container implementation.
394
+ */
395
+ var StateContainer = class {
396
+ [STATE_SYMBOL] = true;
397
+ #current;
398
+ #pending = null;
399
+ #pendingFlush = false;
400
+ #equalityFn;
401
+ #listeners = /* @__PURE__ */ new Set();
402
+ #selectorListeners = /* @__PURE__ */ new Set();
403
+ constructor(initial, config) {
404
+ this.#current = typeof initial === "object" && initial !== null ? { ...initial } : initial;
405
+ this.#equalityFn = config?.equalityFn ?? defaultEquality;
406
+ }
407
+ get current() {
408
+ return this.#pending ?? this.#current;
409
+ }
410
+ patch(partial) {
411
+ const base = this.#pending ?? this.#current;
412
+ if (typeof base !== "object" || base === null) {
413
+ const value = partial;
414
+ if (!Object.is(base, value)) {
415
+ this.#pending = value;
416
+ this.#scheduleFlush();
417
+ }
418
+ return;
419
+ }
420
+ const next = { ...base };
421
+ let changed = false;
422
+ for (const key in partial) {
423
+ if (!Object.hasOwn(partial, key)) continue;
424
+ const value = partial[key];
425
+ if (!Object.is(base[key], value)) {
426
+ next[key] = value;
427
+ changed = true;
428
+ }
429
+ }
430
+ if (changed) {
431
+ this.#pending = next;
432
+ this.#scheduleFlush();
433
+ }
434
+ }
435
+ subscribe(selectorOrListener, maybeListener, options) {
436
+ if (maybeListener === void 0) {
437
+ const listener$1 = selectorOrListener;
438
+ this.#listeners.add(listener$1);
439
+ listener$1(this.current);
440
+ return () => {
441
+ this.#listeners.delete(listener$1);
442
+ };
443
+ }
444
+ const selector = selectorOrListener;
445
+ const listener = maybeListener;
446
+ const entry = {
447
+ selector,
448
+ listener,
449
+ options: options ?? {}
450
+ };
451
+ this.#selectorListeners.add(entry);
452
+ listener(selector(this.current));
453
+ return () => {
454
+ this.#selectorListeners.delete(entry);
455
+ };
456
+ }
457
+ flush() {
458
+ if (this.#pending === null) return;
459
+ const prev = this.#current;
460
+ const next = this.#pending;
461
+ this.#pending = null;
462
+ this.#pendingFlush = false;
463
+ if (this.#equalityFn(prev, next)) return;
464
+ this.#current = next;
465
+ for (const listener of this.#listeners) listener(this.#current);
466
+ for (const entry of this.#selectorListeners) {
467
+ const prevSelected = entry.selector(prev);
468
+ const nextSelected = entry.selector(this.#current);
469
+ if (!(entry.options.equalityFn ?? Object.is)(prevSelected, nextSelected)) entry.listener(nextSelected);
470
+ }
471
+ }
472
+ #scheduleFlush() {
473
+ if (this.#pendingFlush) return;
474
+ this.#pendingFlush = true;
475
+ queueMicrotask(() => this.flush());
476
+ }
477
+ };
478
+ /**
479
+ * Create a reactive state container.
480
+ *
481
+ * @example
482
+ * ```typescript
483
+ * const state = createState({ count: 0 });
484
+ *
485
+ * // Subscribe to changes
486
+ * state.subscribe((current, prev) => {
487
+ * console.log('Changed:', prev, '->', current);
488
+ * });
489
+ *
490
+ * // Updates are batched
491
+ * state.patch({ count: 1 });
492
+ * state.patch({ count: 2 });
493
+ * // Only one notification fires (with count: 2)
494
+ * ```
495
+ *
496
+ * @example Selector subscriptions
497
+ * ```typescript
498
+ * const state = createState({ count: 0, name: 'test' });
499
+ *
500
+ * // Only notified when count changes
501
+ * state.subscribe(
502
+ * s => s.count,
503
+ * (current, prev) => console.log(current, prev)
504
+ * );
505
+ * ```
506
+ *
507
+ * @example Custom equality
508
+ * ```typescript
509
+ * const state = createState(
510
+ * { count: 0, name: 'test' },
511
+ * { equalityFn: (a, b) => a.count === b.count }
512
+ * );
513
+ * ```
514
+ */
515
+ function createState(initial, config) {
516
+ return new StateContainer(initial, config);
517
+ }
518
+ /**
519
+ * Parse HLS attribute list from a tag line.
520
+ * Handles both quoted and unquoted values.
521
+ */
522
+ function parseAttributeList(line) {
523
+ const attributes = /* @__PURE__ */ new Map();
524
+ for (const match of line.matchAll(/([A-Z0-9-]+)=(?:"([^"]*)"|([^,]*))/g)) {
525
+ const key = match[1];
526
+ const value = match[2] ?? match[3] ?? "";
527
+ if (key) attributes.set(key, value);
528
+ }
529
+ return attributes;
530
+ }
531
+ /**
532
+ * Parse RESOLUTION attribute value (WIDTHxHEIGHT).
533
+ */
534
+ function parseResolution(value) {
535
+ const match = /^(\d+)x(\d+)$/.exec(value);
536
+ if (!match) return null;
537
+ return {
538
+ width: Number.parseInt(match[1], 10),
539
+ height: Number.parseInt(match[2], 10)
540
+ };
541
+ }
542
+ /**
543
+ * Parse FRAME-RATE attribute to rational frame rate.
544
+ */
545
+ function parseFrameRate(value) {
546
+ const fps = Number.parseFloat(value);
547
+ if (Number.isNaN(fps) || fps <= 0) return void 0;
548
+ if (Math.abs(fps - 23.976) < .01) return {
549
+ frameRateNumerator: 24e3,
550
+ frameRateDenominator: 1001
551
+ };
552
+ if (Math.abs(fps - 29.97) < .01) return {
553
+ frameRateNumerator: 3e4,
554
+ frameRateDenominator: 1001
555
+ };
556
+ if (Math.abs(fps - 59.94) < .01) return {
557
+ frameRateNumerator: 6e4,
558
+ frameRateDenominator: 1001
559
+ };
560
+ if (fps % 1 === 0) return { frameRateNumerator: Math.round(fps) };
561
+ return { frameRateNumerator: Math.round(fps) };
562
+ }
563
+ /**
564
+ * Parse CODECS attribute into separate video and audio codecs.
565
+ */
566
+ function parseCodecs(codecs) {
567
+ const parts = codecs.split(",").map((s) => s.trim());
568
+ const result = {};
569
+ for (const codec of parts) if (codec.startsWith("avc1.") || codec.startsWith("hvc1.") || codec.startsWith("hev1.")) result.video = codec;
570
+ else if (codec.startsWith("mp4a.")) result.audio = codec;
571
+ return result;
572
+ }
573
+ /**
574
+ * Parse #EXTINF duration value.
575
+ */
576
+ function parseExtInfDuration(value) {
577
+ const durationPart = value.split(",")[0] ?? value;
578
+ const duration = Number.parseFloat(durationPart);
579
+ return Number.isNaN(duration) ? 0 : duration;
580
+ }
581
+ /**
582
+ * Parse BYTERANGE attribute value.
583
+ * Format: "length[@offset]"
584
+ * If offset is omitted, it continues from the previous byte range end.
585
+ */
586
+ function parseByteRange(value, previousEnd) {
587
+ const match = /^(\d+)(?:@(\d+))?$/.exec(value);
588
+ if (!match) return null;
589
+ const length = Number.parseInt(match[1], 10);
590
+ if (Number.isNaN(length)) return null;
591
+ let start;
592
+ if (match[2] !== void 0) {
593
+ start = Number.parseInt(match[2], 10);
594
+ if (Number.isNaN(start)) return null;
595
+ } else if (previousEnd !== void 0) start = previousEnd;
596
+ else return null;
597
+ return {
598
+ start,
599
+ end: start + length - 1
600
+ };
601
+ }
602
+ /**
603
+ * Create AttributeList from raw attribute string.
604
+ */
605
+ function createAttributeList(line) {
606
+ const map = parseAttributeList(line);
607
+ return {
608
+ get(key) {
609
+ return map.get(key);
610
+ },
611
+ getInt(key, defaultValue) {
612
+ const value = map.get(key);
613
+ if (value === void 0) return defaultValue;
614
+ const parsed = Number.parseInt(value, 10);
615
+ return Number.isNaN(parsed) ? defaultValue : parsed;
616
+ },
617
+ getFloat(key, defaultValue) {
618
+ const value = map.get(key);
619
+ if (value === void 0) return defaultValue;
620
+ const parsed = Number.parseFloat(value);
621
+ return Number.isNaN(parsed) ? defaultValue : parsed;
622
+ },
623
+ getBool(key) {
624
+ return map.get(key) === "YES";
625
+ },
626
+ getResolution(key) {
627
+ const value = map.get(key);
628
+ if (!value) return void 0;
629
+ return parseResolution(value) ?? void 0;
630
+ },
631
+ getFrameRate(key) {
632
+ const value = map.get(key);
633
+ if (!value) return void 0;
634
+ return parseFrameRate(value);
635
+ }
636
+ };
637
+ }
638
+ /**
639
+ * Match a tag and extract its attributes.
640
+ * Returns null if the line doesn't match the tag.
641
+ */
642
+ function matchTag(line, tag) {
643
+ const prefix = `#${tag}:`;
644
+ if (!line.startsWith(prefix)) return null;
645
+ return createAttributeList(line.slice(prefix.length));
646
+ }
647
+ /**
648
+ * Resolve a potentially relative URL against a base URL using native URL API.
649
+ */
650
+ function resolveUrl(url, baseUrl) {
651
+ return new URL(url, baseUrl).href;
652
+ }
653
+ /**
654
+ * Parse HLS media playlist and resolve track with segments.
655
+ *
656
+ * Takes an unresolved track (from multivariant playlist) and media playlist text,
657
+ * returns a HAM-compliant resolved track with segments.
658
+ *
659
+ * @param text - Media playlist text content
660
+ * @param unresolved - Unresolved track from parseMultivariantPlaylist
661
+ * @returns Resolved track with segments (type inferred from input)
662
+ */
663
+ function parseMediaPlaylist(text, unresolved) {
664
+ const lines = text.split(/\r?\n/);
665
+ const baseUrl = unresolved.url;
666
+ const segments = [];
667
+ let initSegmentUrl;
668
+ let initSegmentByteRange;
669
+ let currentDuration = 0;
670
+ let currentByteRange;
671
+ let currentTime = 0;
672
+ let segmentIndex = 0;
673
+ let previousByteRangeEnd;
674
+ for (const line of lines) {
675
+ const trimmed = line.trim();
676
+ if (!trimmed || trimmed.startsWith("#") && !trimmed.startsWith("#EXT")) continue;
677
+ if (trimmed === "#EXTM3U" || trimmed.startsWith("#EXT-X-VERSION:") || trimmed.startsWith("#EXT-X-TARGETDURATION:") || trimmed.startsWith("#EXT-X-PLAYLIST-TYPE:") || trimmed.startsWith("#EXT-X-INDEPENDENT-SEGMENTS")) continue;
678
+ const mapAttrs = matchTag(trimmed, "EXT-X-MAP");
679
+ if (mapAttrs) {
680
+ const uri = mapAttrs.get("URI");
681
+ if (uri) {
682
+ initSegmentUrl = resolveUrl(uri, baseUrl);
683
+ const byteRangeStr = mapAttrs.get("BYTERANGE");
684
+ if (byteRangeStr) initSegmentByteRange = parseByteRange(byteRangeStr, 0) ?? void 0;
685
+ }
686
+ continue;
687
+ }
688
+ if (trimmed.startsWith("#EXTINF:")) {
689
+ currentDuration = parseExtInfDuration(trimmed.slice(8));
690
+ continue;
691
+ }
692
+ if (trimmed.startsWith("#EXT-X-BYTERANGE:")) {
693
+ currentByteRange = parseByteRange(trimmed.slice(17), previousByteRangeEnd) ?? void 0;
694
+ continue;
695
+ }
696
+ if (trimmed === "#EXT-X-ENDLIST") continue;
697
+ if (!trimmed.startsWith("#") && currentDuration > 0) {
698
+ const segment = {
699
+ id: `segment-${segmentIndex}`,
700
+ url: resolveUrl(trimmed, baseUrl),
701
+ duration: currentDuration,
702
+ startTime: currentTime
703
+ };
704
+ if (currentByteRange) {
705
+ segment.byteRange = currentByteRange;
706
+ previousByteRangeEnd = currentByteRange.end + 1;
707
+ } else previousByteRangeEnd = void 0;
708
+ segments.push(segment);
709
+ currentTime += currentDuration;
710
+ segmentIndex++;
711
+ currentDuration = 0;
712
+ currentByteRange = void 0;
713
+ }
714
+ }
715
+ const totalDuration = currentTime;
716
+ const initialization = unresolved.type === "text" && !initSegmentUrl ? void 0 : initSegmentUrl ? {
717
+ url: initSegmentUrl,
718
+ ...initSegmentByteRange ? { byteRange: initSegmentByteRange } : {}
719
+ } : { url: "" };
720
+ return {
721
+ ...unresolved,
722
+ startTime: 0,
723
+ duration: totalDuration,
724
+ segments,
725
+ initialization
726
+ };
727
+ }
728
+ /**
729
+ * Generate unique ID for HAM objects.
730
+ *
731
+ * Uses timestamp + random number for sufficient uniqueness.
732
+ * IDs are strings without decimals.
733
+ *
734
+ * @returns Unique string ID in format: timestamp-random
735
+ *
736
+ * @example
737
+ * ```ts
738
+ * const id = generateId(); // "1738423156789-542891"
739
+ * ```
740
+ */
741
+ function generateId() {
742
+ return `${Date.now()}-${Math.floor(Math.random() * 1e6)}`;
743
+ }
744
+ /**
745
+ * Parse HLS multivariant playlist into a Presentation.
746
+ *
747
+ * Returns Presentation with partially resolved tracks (no segment information).
748
+ * Tracks contain metadata from multivariant playlist (bandwidth, resolution, codecs)
749
+ * but segment information is added when media playlists are fetched.
750
+ *
751
+ * @param text - Raw playlist text content
752
+ * @param unresolved - Unresolved presentation (contains URL for base URL resolution)
753
+ * @returns Presentation with partially resolved tracks (duration is undefined)
754
+ */
755
+ function parseMultivariantPlaylist(text, unresolved) {
756
+ const baseUrl = unresolved.url;
757
+ const lines = text.split(/\r?\n/);
758
+ const streams = [];
759
+ const audioRenditions = [];
760
+ const subtitleRenditions = [];
761
+ let pendingStreamInfo = null;
762
+ for (const line of lines) {
763
+ const trimmed = line.trim();
764
+ if (!trimmed || trimmed.startsWith("#") && !trimmed.startsWith("#EXT")) continue;
765
+ if (trimmed === "#EXTM3U" || trimmed.startsWith("#EXT-X-VERSION:") || trimmed.startsWith("#EXT-X-INDEPENDENT-SEGMENTS")) continue;
766
+ const mediaAttrs = matchTag(trimmed, "EXT-X-MEDIA");
767
+ if (mediaAttrs) {
768
+ const type = mediaAttrs.get("TYPE");
769
+ const groupId = mediaAttrs.get("GROUP-ID");
770
+ const name = mediaAttrs.get("NAME");
771
+ if (type === "AUDIO" && groupId && name) {
772
+ const uri = mediaAttrs.get("URI");
773
+ audioRenditions.push({
774
+ groupId,
775
+ name,
776
+ language: mediaAttrs.get("LANGUAGE"),
777
+ uri: uri ? resolveUrl(uri, baseUrl) : void 0,
778
+ default: mediaAttrs.getBool("DEFAULT"),
779
+ autoselect: mediaAttrs.getBool("AUTOSELECT")
780
+ });
781
+ }
782
+ if (type === "SUBTITLES" && groupId && name) {
783
+ const uri = mediaAttrs.get("URI");
784
+ if (uri) subtitleRenditions.push({
785
+ groupId,
786
+ name,
787
+ language: mediaAttrs.get("LANGUAGE"),
788
+ uri: resolveUrl(uri, baseUrl),
789
+ default: mediaAttrs.getBool("DEFAULT"),
790
+ autoselect: mediaAttrs.getBool("AUTOSELECT"),
791
+ forced: mediaAttrs.getBool("FORCED")
792
+ });
793
+ }
794
+ continue;
795
+ }
796
+ const streamInfAttrs = matchTag(trimmed, "EXT-X-STREAM-INF");
797
+ if (streamInfAttrs) {
798
+ pendingStreamInfo = {
799
+ bandwidth: streamInfAttrs.getInt("BANDWIDTH", 0),
800
+ resolution: streamInfAttrs.getResolution("RESOLUTION"),
801
+ codecs: streamInfAttrs.get("CODECS"),
802
+ frameRate: streamInfAttrs.getFrameRate("FRAME-RATE"),
803
+ audioGroupId: streamInfAttrs.get("AUDIO")
804
+ };
805
+ continue;
806
+ }
807
+ if (!trimmed.startsWith("#") && pendingStreamInfo) {
808
+ streams.push({
809
+ ...pendingStreamInfo,
810
+ uri: resolveUrl(trimmed, baseUrl)
811
+ });
812
+ pendingStreamInfo = null;
813
+ }
814
+ }
815
+ const videoStreams = [];
816
+ const audioOnlyStreams = [];
817
+ for (const stream of streams) {
818
+ if (!stream.codecs) {
819
+ videoStreams.push(stream);
820
+ continue;
821
+ }
822
+ const parsedCodecs = parseCodecs(stream.codecs);
823
+ if (stream.codecs.split(",").length === 1) if (parsedCodecs.audio && !parsedCodecs.video) audioOnlyStreams.push(stream);
824
+ else videoStreams.push(stream);
825
+ else videoStreams.push(stream);
826
+ }
827
+ const videoTracks = videoStreams.map((stream) => {
828
+ const codecs = stream.codecs ? parseCodecs(stream.codecs) : void 0;
829
+ const track = {
830
+ type: "video",
831
+ id: generateId(),
832
+ url: stream.uri,
833
+ bandwidth: stream.bandwidth,
834
+ mimeType: "video/mp4",
835
+ codecs: []
836
+ };
837
+ if (stream.resolution?.width !== void 0) track.width = stream.resolution.width;
838
+ if (stream.resolution?.height !== void 0) track.height = stream.resolution.height;
839
+ if (codecs?.video) track.codecs = [codecs.video];
840
+ if (stream.frameRate) track.frameRate = stream.frameRate;
841
+ if (stream.audioGroupId) track.audioGroupId = stream.audioGroupId;
842
+ return track;
843
+ });
844
+ const audioOnlyTracks = audioOnlyStreams.map((stream) => {
845
+ const codecs = stream.codecs ? parseCodecs(stream.codecs) : void 0;
846
+ return {
847
+ type: "audio",
848
+ id: generateId(),
849
+ url: stream.uri,
850
+ bandwidth: stream.bandwidth,
851
+ mimeType: "audio/mp4",
852
+ codecs: codecs?.audio ? [codecs.audio] : [],
853
+ groupId: stream.audioGroupId || "default",
854
+ name: "Default",
855
+ sampleRate: 48e3,
856
+ channels: 2
857
+ };
858
+ });
859
+ const audioTracks = [...audioRenditions.map((rendition) => {
860
+ let audioCodecs;
861
+ for (const stream of streams) if (stream.audioGroupId === rendition.groupId && stream.codecs) {
862
+ const codecs = parseCodecs(stream.codecs);
863
+ if (codecs.audio) {
864
+ audioCodecs = [codecs.audio];
865
+ break;
866
+ }
867
+ }
868
+ const track = {
869
+ type: "audio",
870
+ id: generateId(),
871
+ url: rendition.uri ?? "",
872
+ groupId: rendition.groupId,
873
+ name: rendition.name,
874
+ mimeType: "audio/mp4",
875
+ bandwidth: 0,
876
+ sampleRate: 48e3,
877
+ channels: 2,
878
+ codecs: []
879
+ };
880
+ if (rendition.language) track.language = rendition.language;
881
+ if (audioCodecs) track.codecs = audioCodecs;
882
+ if (rendition.default) track.default = rendition.default;
883
+ if (rendition.autoselect) track.autoselect = rendition.autoselect;
884
+ return track;
885
+ }), ...audioOnlyTracks];
886
+ const textTracks = subtitleRenditions.map((rendition) => {
887
+ const track = {
888
+ type: "text",
889
+ id: generateId(),
890
+ url: rendition.uri,
891
+ groupId: rendition.groupId,
892
+ label: rendition.name,
893
+ kind: "subtitles",
894
+ mimeType: "text/vtt",
895
+ bandwidth: 0
896
+ };
897
+ if (rendition.language) track.language = rendition.language;
898
+ if (rendition.default && rendition.autoselect) track.default = true;
899
+ if (rendition.autoselect) track.autoselect = rendition.autoselect;
900
+ if (rendition.forced) track.forced = rendition.forced;
901
+ return track;
902
+ });
903
+ const selectionSets = [];
904
+ if (videoTracks.length > 0) {
905
+ const videoSwitchingSet = {
906
+ id: generateId(),
907
+ type: "video",
908
+ tracks: videoTracks
909
+ };
910
+ const videoSelectionSet = {
911
+ id: generateId(),
912
+ type: "video",
913
+ switchingSets: [videoSwitchingSet]
914
+ };
915
+ selectionSets.push(videoSelectionSet);
916
+ }
917
+ if (audioTracks.length > 0) {
918
+ const audioSwitchingSet = {
919
+ id: generateId(),
920
+ type: "audio",
921
+ tracks: audioTracks
922
+ };
923
+ const audioSelectionSet = {
924
+ id: generateId(),
925
+ type: "audio",
926
+ switchingSets: [audioSwitchingSet]
927
+ };
928
+ selectionSets.push(audioSelectionSet);
929
+ }
930
+ if (textTracks.length > 0) {
931
+ const textSwitchingSet = {
932
+ id: generateId(),
933
+ type: "text",
934
+ tracks: textTracks
935
+ };
936
+ const textSelectionSet = {
937
+ id: generateId(),
938
+ type: "text",
939
+ switchingSets: [textSwitchingSet]
940
+ };
941
+ selectionSets.push(textSelectionSet);
942
+ }
943
+ return {
944
+ id: generateId(),
945
+ url: unresolved.url,
946
+ startTime: 0,
947
+ selectionSets
948
+ };
949
+ }
950
+ /**
951
+ * Exponentially Weighted Moving Average (EWMA)
952
+ *
953
+ * Pure functional implementation of EWMA calculations.
954
+ * Based on Shaka Player's EWMA algorithm.
955
+ */
956
+ /**
957
+ * Calculate alpha (decay factor) from half-life.
958
+ *
959
+ * Alpha determines how quickly old data "expires":
960
+ * - alpha close to 1 = slow decay (long memory)
961
+ * - alpha close to 0 = fast decay (short memory)
962
+ *
963
+ * @param halfLife - The quantity of prior samples (by weight) that make up
964
+ * half of the new estimate. Must be positive.
965
+ * @returns Alpha value between 0 and 1
966
+ *
967
+ * @example
968
+ * const alpha = calculateAlpha(2); // ≈ 0.7071 for 2-second half-life
969
+ */
970
+ function calculateAlpha(halfLife) {
971
+ return Math.exp(Math.log(.5) / halfLife);
972
+ }
973
+ /**
974
+ * Calculate exponentially weighted moving average.
975
+ *
976
+ * Updates an estimate by blending a new value with the previous estimate,
977
+ * weighted by the sample duration. Longer samples have more influence.
978
+ *
979
+ * @param prevEstimate - Previous EWMA estimate
980
+ * @param value - New sample value to incorporate
981
+ * @param weight - Sample weight (typically duration in seconds)
982
+ * @param halfLife - Half-life for decay (typically 2-5 seconds)
983
+ * @returns Updated EWMA estimate
984
+ *
985
+ * @example
986
+ * let estimate = 0;
987
+ * estimate = calculateEwma(estimate, 1_000_000, 1, 2); // First sample
988
+ * estimate = calculateEwma(estimate, 2_000_000, 1, 2); // Second sample
989
+ */
990
+ function calculateEwma(prevEstimate, value, weight, halfLife) {
991
+ const adjAlpha = calculateAlpha(halfLife) ** weight;
992
+ return value * (1 - adjAlpha) + adjAlpha * prevEstimate;
993
+ }
994
+ /**
995
+ * Apply zero-factor correction to EWMA estimate.
996
+ *
997
+ * The zero-factor correction compensates for bias when starting from zero.
998
+ * Without this correction, early estimates would be artificially low.
999
+ *
1000
+ * As totalWeight increases, the correction factor approaches 1, meaning
1001
+ * the estimate becomes more reliable and needs less correction.
1002
+ *
1003
+ * @param estimate - Raw EWMA estimate (uncorrected)
1004
+ * @param totalWeight - Accumulated weight from all samples
1005
+ * @param halfLife - Half-life used in EWMA calculation
1006
+ * @returns Corrected estimate, or 0 if totalWeight is 0
1007
+ *
1008
+ * @example
1009
+ * const raw = calculateEwma(0, 1_000_000, 1, 2);
1010
+ * const corrected = applyZeroFactor(raw, 1, 2); // ≈ 1_000_000
1011
+ */
1012
+ function applyZeroFactor(estimate, totalWeight, halfLife) {
1013
+ if (totalWeight === 0) return 0;
1014
+ return estimate / (1 - calculateAlpha(halfLife) ** totalWeight);
1015
+ }
1016
+ /**
1017
+ * Default bandwidth estimator configuration.
1018
+ *
1019
+ * Values match Shaka Player defaults based on experimentation.
1020
+ */
1021
+ const DEFAULT_BANDWIDTH_CONFIG = {
1022
+ fastHalfLife: 2,
1023
+ slowHalfLife: 5,
1024
+ minTotalBytes: 128e3,
1025
+ minBytes: 16e3,
1026
+ minDuration: 5
1027
+ };
1028
+ /**
1029
+ * Add a bandwidth sample from a segment download.
1030
+ *
1031
+ * Samples are filtered based on:
1032
+ * - Minimum bytes (filters TTFB-dominated small segments)
1033
+ * - Minimum duration (filters cached responses)
1034
+ *
1035
+ * Valid samples update both fast and slow EWMA estimates.
1036
+ *
1037
+ * @param state - Current estimator state
1038
+ * @param durationMs - Download duration in milliseconds
1039
+ * @param numBytes - Number of bytes downloaded
1040
+ * @param config - Optional estimator configuration (uses defaults if not provided)
1041
+ * @returns New estimator state with sample incorporated (or unchanged if filtered)
1042
+ *
1043
+ * @example
1044
+ * let state = { fastEstimate: 0, fastTotalWeight: 0, ... };
1045
+ * // Sample: 1MB in 1 second
1046
+ * state = sampleBandwidth(state, 1000, 1_000_000);
1047
+ */
1048
+ function sampleBandwidth(state, durationMs, numBytes, config = DEFAULT_BANDWIDTH_CONFIG) {
1049
+ const updatedBytesSampled = state.bytesSampled + numBytes;
1050
+ if (numBytes < config.minBytes) return {
1051
+ ...state,
1052
+ bytesSampled: updatedBytesSampled
1053
+ };
1054
+ if (durationMs < config.minDuration) return {
1055
+ ...state,
1056
+ bytesSampled: updatedBytesSampled
1057
+ };
1058
+ const bandwidth = 8e3 * numBytes / durationMs;
1059
+ const weight = durationMs / 1e3;
1060
+ return {
1061
+ fastEstimate: calculateEwma(state.fastEstimate, bandwidth, weight, config.fastHalfLife),
1062
+ fastTotalWeight: state.fastTotalWeight + weight,
1063
+ slowEstimate: calculateEwma(state.slowEstimate, bandwidth, weight, config.slowHalfLife),
1064
+ slowTotalWeight: state.slowTotalWeight + weight,
1065
+ bytesSampled: updatedBytesSampled
1066
+ };
1067
+ }
1068
+ /**
1069
+ * Get the current bandwidth estimate.
1070
+ *
1071
+ * Returns the **minimum** of the fast and slow EWMA estimates.
1072
+ * This provides the key asymmetric behavior:
1073
+ * - When bandwidth drops, fast EWMA reacts first and dominates (quick adaptation)
1074
+ * - When bandwidth rises, slow EWMA lags behind and dominates (slow adaptation)
1075
+ *
1076
+ * Uses default estimate until enough data has been sampled.
1077
+ *
1078
+ * @param state - Current estimator state
1079
+ * @param defaultEstimate - Fallback estimate before sufficient samples (bps)
1080
+ * @param config - Optional estimator configuration (uses defaults if not provided)
1081
+ * @returns Bandwidth estimate in bits per second
1082
+ *
1083
+ * @example
1084
+ * const estimate = getBandwidthEstimate(state, 5_000_000); // 5 Mbps default
1085
+ */
1086
+ function getBandwidthEstimate(state, defaultEstimate, config = DEFAULT_BANDWIDTH_CONFIG) {
1087
+ if (state.bytesSampled < config.minTotalBytes) return defaultEstimate;
1088
+ const fastEstimate = applyZeroFactor(state.fastEstimate, state.fastTotalWeight, config.fastHalfLife);
1089
+ const slowEstimate = applyZeroFactor(state.slowEstimate, state.slowTotalWeight, config.slowHalfLife);
1090
+ return Math.min(fastEstimate, slowEstimate);
1091
+ }
1092
+ /**
1093
+ * Default quality selection configuration.
1094
+ * Values match Shaka Player upgrade threshold (0.85 = 15% headroom).
1095
+ */
1096
+ const DEFAULT_QUALITY_CONFIG = { safetyMargin: .85 };
1097
+ /**
1098
+ * Select the best video track based on current bandwidth estimate.
1099
+ *
1100
+ * Selects the highest quality track where bandwidth is sufficient with safety margin:
1101
+ * - currentBandwidth >= track.bandwidth / safetyMargin
1102
+ * - Default safetyMargin 0.85 means track uses ≤85% of bandwidth (15% headroom)
1103
+ * - At same bandwidth, prefers higher resolution
1104
+ *
1105
+ * @param tracks - Available video tracks (can be unsorted)
1106
+ * @param currentBandwidth - Current bandwidth estimate in bits per second
1107
+ * @param config - Optional quality selection configuration
1108
+ * @returns Selected track, or undefined if no tracks available
1109
+ *
1110
+ * @example
1111
+ * const tracks = [
1112
+ * { id: '360p', bandwidth: 500_000, ... },
1113
+ * { id: '720p', bandwidth: 2_000_000, ... },
1114
+ * { id: '1080p', bandwidth: 4_000_000, ... },
1115
+ * ];
1116
+ *
1117
+ * // With 2.5 Mbps, selects 720p (1080p needs 4M/0.85 = 4.7 Mbps)
1118
+ * const selected = selectQuality(tracks, 2_500_000);
1119
+ */
1120
+ function selectQuality(tracks, currentBandwidth, config = DEFAULT_QUALITY_CONFIG) {
1121
+ if (tracks.length === 0) return;
1122
+ const sortedTracks = tracks.slice().sort((a, b) => a.bandwidth - b.bandwidth);
1123
+ let chosen;
1124
+ for (const track of sortedTracks) if (currentBandwidth >= track.bandwidth / config.safetyMargin) {
1125
+ if (!chosen || track.bandwidth > chosen.bandwidth || track.bandwidth === chosen.bandwidth && hasHigherResolution(track, chosen)) chosen = track;
1126
+ }
1127
+ return chosen ?? sortedTracks[0];
1128
+ }
1129
+ /**
1130
+ * Check if track A has higher resolution than track B.
1131
+ * Compares by total pixel count (width × height).
1132
+ *
1133
+ * @param trackA - First track to compare
1134
+ * @param trackB - Second track to compare
1135
+ * @returns True if trackA has more pixels than trackB
1136
+ */
1137
+ function hasHigherResolution(trackA, trackB) {
1138
+ return (trackA.width ?? 0) * (trackA.height ?? 0) > (trackB.width ?? 0) * (trackB.height ?? 0);
1139
+ }
1140
+ /**
1141
+ * Default back buffer configuration.
1142
+ */
1143
+ const DEFAULT_BACK_BUFFER_CONFIG = { keepSegments: 2 };
1144
+ /**
1145
+ * Calculate back buffer flush point.
1146
+ *
1147
+ * Determines where to flush old segments from the back buffer.
1148
+ * Keeps a fixed number of segments behind the current playback position.
1149
+ *
1150
+ * Algorithm:
1151
+ * 1. Find segments before currentTime
1152
+ * 2. Count back N segments (keepSegments)
1153
+ * 3. Return startTime of segment N+1 back (flush everything before this)
1154
+ *
1155
+ * @param segments - Available segments (should be sorted by startTime)
1156
+ * @param currentTime - Current playback position in seconds
1157
+ * @param config - Optional back buffer configuration
1158
+ * @returns Time in seconds to flush up to (flush range: [0, flushEnd))
1159
+ *
1160
+ * @example
1161
+ * const segments = [
1162
+ * { startTime: 0, duration: 6, ... },
1163
+ * { startTime: 6, duration: 6, ... },
1164
+ * { startTime: 12, duration: 6, ... },
1165
+ * { startTime: 18, duration: 6, ... },
1166
+ * ];
1167
+ *
1168
+ * // Playing at 18s, keep 2 segments
1169
+ * const flushEnd = calculateBackBufferFlushPoint(segments, 18);
1170
+ * // Returns 6 (flush [0, 6), keep [6-18))
1171
+ */
1172
+ function calculateBackBufferFlushPoint(segments, currentTime, config = DEFAULT_BACK_BUFFER_CONFIG) {
1173
+ if (segments.length === 0) return 0;
1174
+ const segmentsBefore = segments.filter((seg) => seg.startTime < currentTime);
1175
+ if (segmentsBefore.length === 0) return 0;
1176
+ const segmentsToFlush = segmentsBefore.length - config.keepSegments;
1177
+ if (segmentsToFlush <= 0) return 0;
1178
+ if (segmentsToFlush >= segmentsBefore.length) return currentTime;
1179
+ return segmentsBefore[segmentsToFlush].startTime;
1180
+ }
1181
+ /**
1182
+ * Default forward buffer configuration.
1183
+ */
1184
+ const DEFAULT_FORWARD_BUFFER_CONFIG = { bufferDuration: 30 };
1185
+ /**
1186
+ * Get segments that need to be loaded for forward buffer.
1187
+ *
1188
+ * Determines which segments to load to maintain target buffer duration.
1189
+ * Handles discontiguous buffering (gaps after seeks).
1190
+ *
1191
+ * Algorithm:
1192
+ * 1. Calculate target time: currentTime + bufferDuration
1193
+ * 2. Find all segments in range [currentTime, targetTime)
1194
+ * 3. Filter out segments already buffered at that time position
1195
+ * 4. Return segments to load (fills gaps + extends to target)
1196
+ *
1197
+ * @param segments - All available segments from playlist
1198
+ * @param bufferedSegments - Segments already buffered (ordered by startTime)
1199
+ * @param currentTime - Current playback position in seconds
1200
+ * @param config - Optional forward buffer configuration
1201
+ * @returns Array of segments to load (empty if buffer is sufficient)
1202
+ *
1203
+ * @example
1204
+ * // After seek: buffered [0-12, 18-30], playing at 7s
1205
+ * const toLoad = getSegmentsToLoad(segments, buffered, 7, { bufferDuration: 24 });
1206
+ * // Returns [seg-12, seg-30] (fills gap, extends to target 31s)
1207
+ */
1208
+ /**
1209
+ * Calculate the start time from which to flush forward buffer content.
1210
+ *
1211
+ * Content that starts at or beyond `currentTime + bufferDuration` is no
1212
+ * longer needed for the current playback position and should be removed
1213
+ * from the SourceBuffer. This prevents unbounded accumulation of scattered
1214
+ * SourceBuffer content after seeks, which can cause QuotaExceededError on
1215
+ * long-form content.
1216
+ *
1217
+ * Returns `Infinity` when nothing needs flushing (no buffered segments
1218
+ * exist beyond the threshold).
1219
+ *
1220
+ * @param bufferedSegments - Segments currently tracked in the buffer model
1221
+ * @param currentTime - Current playback position in seconds
1222
+ * @param config - Optional forward buffer configuration
1223
+ * @returns Start time to flush from (flush range: [flushStart, Infinity)),
1224
+ * or Infinity if no flush is needed
1225
+ *
1226
+ * @example
1227
+ * // Playing at 0s, buffered [0,6,12,18,24,30,36], bufferDuration=30
1228
+ * const flushStart = calculateForwardFlushPoint(segments, 0);
1229
+ * // Returns 30 — flush [30, Infinity), keep [0, 30)
1230
+ */
1231
+ function calculateForwardFlushPoint(bufferedSegments, currentTime, config = DEFAULT_FORWARD_BUFFER_CONFIG) {
1232
+ if (bufferedSegments.length === 0) return Infinity;
1233
+ const threshold = currentTime + config.bufferDuration;
1234
+ const beyond = bufferedSegments.filter((seg) => seg.startTime >= threshold);
1235
+ if (beyond.length === 0) return Infinity;
1236
+ return Math.min(...beyond.map((seg) => seg.startTime));
1237
+ }
1238
+ function getSegmentsToLoad(segments, bufferedSegments, currentTime, config = DEFAULT_FORWARD_BUFFER_CONFIG) {
1239
+ if (segments.length === 0) return [];
1240
+ const targetTime = currentTime + config.bufferDuration;
1241
+ const bufferedStartTimes = new Set(bufferedSegments.map((seg) => seg.startTime));
1242
+ return segments.filter((seg) => {
1243
+ const segmentEnd = seg.startTime + seg.duration;
1244
+ const isInRange = seg.startTime < targetTime && segmentEnd > currentTime;
1245
+ const isNotBuffered = !bufferedStartTimes.has(seg.startTime);
1246
+ return isInRange && isNotBuffered;
1247
+ });
1248
+ }
1249
+ function isResolvedTrack(track) {
1250
+ return "segments" in track;
1251
+ }
1252
+ /**
1253
+ * Check if a presentation has duration (at least one track resolved).
1254
+ * Narrows type to include required duration.
1255
+ */
1256
+ function hasPresentationDuration(presentation) {
1257
+ return presentation.duration !== void 0;
1258
+ }
1259
+ /**
1260
+ * MediaSource Setup
1261
+ *
1262
+ * Utilities for creating and configuring MediaSource/ManagedMediaSource
1263
+ * for MSE (Media Source Extensions) playback.
1264
+ *
1265
+ * Global ManagedMediaSource types are defined in ./mediasource.d.ts
1266
+ */
1267
+ /**
1268
+ * Check if MediaSource API is supported.
1269
+ */
1270
+ function supportsMediaSource() {
1271
+ return typeof MediaSource !== "undefined";
1272
+ }
1273
+ /**
1274
+ * Check if ManagedMediaSource API is supported.
1275
+ * ManagedMediaSource is a newer Safari API with better lifecycle management.
1276
+ */
1277
+ function supportsManagedMediaSource() {
1278
+ return typeof ManagedMediaSource !== "undefined";
1279
+ }
1280
+ /**
1281
+ * Create a MediaSource or ManagedMediaSource instance.
1282
+ *
1283
+ * @param options - Creation options
1284
+ * @returns A MediaSource or ManagedMediaSource instance
1285
+ * @throws Error if no MediaSource API is available
1286
+ *
1287
+ * @example
1288
+ * const mediaSource = createMediaSource();
1289
+ * const mediaElement = document.querySelector('video');
1290
+ * attachMediaSource(mediaSource, mediaElement);
1291
+ */
1292
+ function createMediaSource(options = {}) {
1293
+ const { preferManaged = false } = options;
1294
+ if (preferManaged && supportsManagedMediaSource()) return new ManagedMediaSource();
1295
+ if (supportsMediaSource()) return new MediaSource();
1296
+ throw new Error("MediaSource API is not supported");
1297
+ }
1298
+ /**
1299
+ * Attach a MediaSource to an HTMLMediaElement.
1300
+ *
1301
+ * Uses srcObject for ManagedMediaSource (Safari), or createObjectURL for regular MediaSource.
1302
+ *
1303
+ * @param mediaSource - The MediaSource to attach
1304
+ * @param mediaElement - The media element to attach to
1305
+ * @returns Object with URL and detach function
1306
+ *
1307
+ * @example
1308
+ * const mediaSource = createMediaSource();
1309
+ * const { detach } = attachMediaSource(mediaSource, videoElement);
1310
+ * await waitForSourceOpen(mediaSource);
1311
+ * // Use mediaSource...
1312
+ * // Later, to clean up:
1313
+ * detach();
1314
+ */
1315
+ function attachMediaSource(mediaSource, mediaElement) {
1316
+ if (supportsManagedMediaSource() && mediaSource instanceof ManagedMediaSource) {
1317
+ mediaElement.disableRemotePlayback = true;
1318
+ mediaElement.srcObject = mediaSource;
1319
+ const detach$1 = () => {
1320
+ mediaElement.srcObject = null;
1321
+ mediaElement.load();
1322
+ };
1323
+ return {
1324
+ url: "",
1325
+ detach: detach$1
1326
+ };
1327
+ }
1328
+ const url = URL.createObjectURL(mediaSource);
1329
+ mediaElement.src = url;
1330
+ const detach = () => {
1331
+ mediaElement.removeAttribute("src");
1332
+ mediaElement.load();
1333
+ URL.revokeObjectURL(url);
1334
+ };
1335
+ return {
1336
+ url,
1337
+ detach
1338
+ };
1339
+ }
1340
+ /**
1341
+ * Wait for a MediaSource to reach the 'open' state.
1342
+ * Resolves immediately if already open.
1343
+ *
1344
+ * @param mediaSource - The MediaSource to wait for
1345
+ * @param signal - Optional AbortSignal for cancellation
1346
+ * @returns Promise that resolves when the MediaSource is open
1347
+ *
1348
+ * @example
1349
+ * const mediaSource = createMediaSource();
1350
+ * attachMediaSource(mediaSource, videoElement);
1351
+ * await waitForSourceOpen(mediaSource);
1352
+ * // MediaSource is now ready for SourceBuffer creation
1353
+ */
1354
+ function waitForSourceOpen(mediaSource, signal) {
1355
+ return new Promise((resolve, reject) => {
1356
+ if (mediaSource.readyState === "open") {
1357
+ resolve();
1358
+ return;
1359
+ }
1360
+ if (signal?.aborted) {
1361
+ reject(new DOMException("Aborted", "AbortError"));
1362
+ return;
1363
+ }
1364
+ const controller = new AbortController();
1365
+ const options = { signal: controller.signal };
1366
+ mediaSource.addEventListener("sourceopen", () => {
1367
+ controller.abort();
1368
+ resolve();
1369
+ }, options);
1370
+ signal?.addEventListener("abort", () => {
1371
+ controller.abort();
1372
+ reject(new DOMException("Aborted", "AbortError"));
1373
+ }, options);
1374
+ });
1375
+ }
1376
+ /**
1377
+ * Create a SourceBuffer on a MediaSource.
1378
+ *
1379
+ * @param mediaSource - The MediaSource (must be in 'open' state)
1380
+ * @param mimeCodec - MIME type with codecs (e.g., 'video/mp4; codecs="avc1.42E01E"')
1381
+ * @returns The created SourceBuffer
1382
+ * @throws Error if MediaSource is not open or codec is unsupported
1383
+ *
1384
+ * @example
1385
+ * await waitForSourceOpen(mediaSource);
1386
+ * const buffer = createSourceBuffer(mediaSource, 'video/mp4; codecs="avc1.42E01E"');
1387
+ */
1388
+ function createSourceBuffer(mediaSource, mimeCodec) {
1389
+ if (mediaSource.readyState !== "open") throw new Error("MediaSource is not open");
1390
+ if (!isCodecSupported(mimeCodec)) throw new Error(`Codec not supported: ${mimeCodec}`);
1391
+ return mediaSource.addSourceBuffer(mimeCodec);
1392
+ }
1393
+ /**
1394
+ * Check if a codec is supported.
1395
+ *
1396
+ * @param mimeCodec - MIME type with codecs string
1397
+ * @returns True if the codec is supported
1398
+ *
1399
+ * @example
1400
+ * if (isCodecSupported('video/mp4; codecs="avc1.42E01E"')) {
1401
+ * // Create source buffer
1402
+ * }
1403
+ */
1404
+ function isCodecSupported(mimeCodec) {
1405
+ if (!supportsMediaSource()) return false;
1406
+ return MediaSource.isTypeSupported(mimeCodec);
1407
+ }
1408
+ /**
1409
+ * Fetch resolvable from AddressableObject.
1410
+ *
1411
+ * Handles byte range requests if byteRange is present.
1412
+ * Returns native fetch Response for composability (can extract text, stream, etc.).
1413
+ *
1414
+ * @param addressable - Resource to fetch (url + optional byteRange)
1415
+ * @returns Promise resolving to Response
1416
+ *
1417
+ * @example
1418
+ * const response = await fetchResolvable({ url: 'https://example.com/segment.m4s' });
1419
+ * const text = await getResponseText(response);
1420
+ *
1421
+ * @example
1422
+ * // With byte range
1423
+ * const response = await fetchResolvable({
1424
+ * url: 'https://example.com/file.mp4',
1425
+ * byteRange: { start: 1000, end: 1999 }
1426
+ * });
1427
+ */
1428
+ async function fetchResolvable(addressable, options) {
1429
+ const headers = new Headers(options?.headers);
1430
+ if (addressable.byteRange) {
1431
+ const { start, end } = addressable.byteRange;
1432
+ headers.set("Range", `bytes=${start}-${end}`);
1433
+ }
1434
+ const request = new Request(addressable.url, {
1435
+ method: "GET",
1436
+ headers,
1437
+ ...options
1438
+ });
1439
+ return fetch(request);
1440
+ }
1441
+ /**
1442
+ * Fetch resolvable as bytes.
1443
+ *
1444
+ * Convenience wrapper around fetchResolvable that resolves the body as an
1445
+ * ArrayBuffer. Use this when you need the raw bytes (e.g. segment appends).
1446
+ * For text or streaming consumption, use fetchResolvable directly.
1447
+ */
1448
+ async function fetchResolvableBytes(addressable, options) {
1449
+ return (await fetchResolvable(addressable, options)).arrayBuffer();
1450
+ }
1451
+ /**
1452
+ * Extract text from Response.
1453
+ *
1454
+ * Accepts minimal Response-like object (just needs text() method).
1455
+ * Returns promise from response.text().
1456
+ *
1457
+ * @param response - Response-like object with text() method
1458
+ * @returns Promise resolving to text content
1459
+ *
1460
+ * @example
1461
+ * const response = await fetchResolvable(addressable);
1462
+ * const text = await getResponseText(response);
1463
+ */
1464
+ function getResponseText(response) {
1465
+ return response.text();
1466
+ }
1467
+ /**
1468
+ * Minimal event stream with Observable-like shape.
1469
+ *
1470
+ * Simple Subject/Observable-like implementation for dispatching discrete events.
1471
+ * Events are dispatched synchronously to all subscribers.
1472
+ */
1473
+ const EVENT_STREAM_SYMBOL = Symbol("@videojs/event-stream");
1474
+ /**
1475
+ * Creates a minimal event stream for dispatching discrete events.
1476
+ *
1477
+ * Events are dispatched synchronously to all subscribers.
1478
+ * Conforms to Observable-like shape for future compatibility.
1479
+ *
1480
+ * Events must have a 'type' property for discriminated union type narrowing.
1481
+ *
1482
+ * @example
1483
+ * ```ts
1484
+ * type Action = { type: 'PLAY' } | { type: 'PAUSE' };
1485
+ * const events = createEventStream<Action>();
1486
+ *
1487
+ * events.subscribe((action) => {
1488
+ * if (action.type === 'PLAY') {
1489
+ * // Type narrowed to { type: 'PLAY' }
1490
+ * }
1491
+ * });
1492
+ *
1493
+ * events.dispatch({ type: 'PLAY' });
1494
+ * ```
1495
+ */
1496
+ function createEventStream() {
1497
+ const subscribers = /* @__PURE__ */ new Set();
1498
+ return {
1499
+ [EVENT_STREAM_SYMBOL]: true,
1500
+ dispatch(event) {
1501
+ const current = Array.from(subscribers);
1502
+ for (const listener of current) listener(event);
1503
+ },
1504
+ subscribe(listener) {
1505
+ subscribers.add(listener);
1506
+ return () => subscribers.delete(listener);
1507
+ }
1508
+ };
1509
+ }
1510
+ /**
1511
+ * Combines multiple Observable sources into a single Observable.
1512
+ *
1513
+ * Emits an array of latest values whenever any source emits.
1514
+ * Only emits after all sources have emitted at least once.
1515
+ *
1516
+ * Supports selector-based subscriptions (fires only when the selected value
1517
+ * changes, per the optional equalityFn) mirroring the createState API.
1518
+ *
1519
+ * @param sources - Array of Observable sources
1520
+ * @returns Combined Observable
1521
+ *
1522
+ * @example
1523
+ * ```ts
1524
+ * const state = createState({ count: 0 });
1525
+ * const events = createEventStream<Action>();
1526
+ *
1527
+ * combineLatest([state, events]).subscribe(([state, event]) => {
1528
+ * if (event.type === 'PLAY' && state.count > 0) {
1529
+ * // React to event + state condition
1530
+ * }
1531
+ * });
1532
+ * ```
1533
+ *
1534
+ * @example Selector subscription
1535
+ * ```ts
1536
+ * combineLatest([state, owners]).subscribe(
1537
+ * ([s, o]) => deriveKey(s, o),
1538
+ * (key) => { ... },
1539
+ * { equalityFn: keyEq }
1540
+ * );
1541
+ * ```
1542
+ */
1543
+ function combineLatest(sources) {
1544
+ const subscribeToSources = (listener) => {
1545
+ const latest = new Array(sources.length);
1546
+ const hasValue = new Array(sources.length).fill(false);
1547
+ const unsubscribers = [];
1548
+ for (let i = 0; i < sources.length; i++) {
1549
+ const unsubscribe = sources[i].subscribe((value) => {
1550
+ latest[i] = value;
1551
+ hasValue[i] = true;
1552
+ if (hasValue.every((has) => has)) listener([...latest]);
1553
+ });
1554
+ unsubscribers.push(unsubscribe);
1555
+ }
1556
+ return () => {
1557
+ for (const unsubscribe of unsubscribers) unsubscribe();
1558
+ };
1559
+ };
1560
+ return { subscribe(listenerOrSelector, maybeListener, options) {
1561
+ if (maybeListener === void 0) return subscribeToSources(listenerOrSelector);
1562
+ const selector = listenerOrSelector;
1563
+ const listener = maybeListener;
1564
+ const equalityFn = options?.equalityFn ?? Object.is;
1565
+ let prevSelected;
1566
+ let initialized = false;
1567
+ return subscribeToSources((values) => {
1568
+ const nextSelected = selector(values);
1569
+ if (!initialized || !equalityFn(prevSelected, nextSelected)) {
1570
+ prevSelected = nextSelected;
1571
+ initialized = true;
1572
+ listener(nextSelected);
1573
+ }
1574
+ });
1575
+ } };
1576
+ }
1577
+ /**
1578
+ * Type guard to check if presentation is unresolved.
1579
+ */
1580
+ function isUnresolved(presentation) {
1581
+ return presentation !== void 0 && "url" in presentation && !("id" in presentation);
1582
+ }
1583
+ function canResolve$1(state) {
1584
+ return isUnresolved(state.presentation);
1585
+ }
1586
+ /**
1587
+ * Determines if resolution conditions are met based on preload policy and event.
1588
+ *
1589
+ * Resolution conditions:
1590
+ * - State-driven: preload is 'auto' or 'metadata'
1591
+ * - Event-driven: play event
1592
+ *
1593
+ * @param state - Current presentation state
1594
+ * @param event - Current action/event
1595
+ * @returns true if resolution conditions are met
1596
+ */
1597
+ function shouldResolve$1(state, event) {
1598
+ const { preload } = state;
1599
+ return ["auto", "metadata"].includes(preload) || event.type === "play";
1600
+ }
1601
+ /**
1602
+ * Syncs preload attribute from mediaElement to state.
1603
+ *
1604
+ * Watches the owners state for mediaElement changes and copies the
1605
+ * preload attribute to the immutable state.
1606
+ *
1607
+ * @param state - Immutable state container
1608
+ * @param owners - Mutable platform objects container
1609
+ * @returns Cleanup function to stop syncing
1610
+ */
1611
+ function syncPreloadAttribute(state, owners) {
1612
+ return owners.subscribe((current) => {
1613
+ const preload = current.mediaElement?.preload || void 0;
1614
+ state.patch({ preload });
1615
+ });
1616
+ }
1617
+ /**
1618
+ * Resolves unresolved presentations using reactive composition.
1619
+ *
1620
+ * Uses combineLatest to compose state + events, enabling both state-driven
1621
+ * and event-driven resolution triggers.
1622
+ *
1623
+ * Triggers resolution when:
1624
+ * - State-driven: Unresolved presentation + preload allows (auto/metadata)
1625
+ * - Event-driven: PLAY event when preload="none"
1626
+ *
1627
+ * @example
1628
+ * ```ts
1629
+ * const state = createState({ presentation: undefined, preload: 'auto' });
1630
+ * const events = createEventStream<PresentationAction>();
1631
+ *
1632
+ * const cleanup = resolvePresentation({ state, events });
1633
+ *
1634
+ * // State-driven: resolves immediately when preload allows
1635
+ * state.patch({ presentation: { url: 'http://example.com/playlist.m3u8' } });
1636
+ *
1637
+ * // Event-driven: resolves on PLAY when preload="none"
1638
+ * state.patch({ preload: 'none', presentation: { url: '...' } });
1639
+ * events.dispatch({ type: 'PLAY' });
1640
+ * ```
1641
+ */
1642
+ function resolvePresentation({ state, events }) {
1643
+ let resolving = false;
1644
+ let abortController = null;
1645
+ const cleanup = combineLatest([state, events]).subscribe(async ([currentState, event]) => {
1646
+ if (!canResolve$1(currentState) || !shouldResolve$1(currentState, event) || resolving) return;
1647
+ try {
1648
+ resolving = true;
1649
+ abortController = new AbortController();
1650
+ const { presentation } = currentState;
1651
+ const parsed = parseMultivariantPlaylist(await getResponseText(await fetchResolvable(presentation, { signal: abortController.signal })), presentation);
1652
+ state.patch({ presentation: parsed });
1653
+ } catch (error) {
1654
+ if (error instanceof Error && error.name === "AbortError") return;
1655
+ throw error;
1656
+ } finally {
1657
+ resolving = false;
1658
+ abortController = null;
1659
+ }
1660
+ });
1661
+ return () => {
1662
+ abortController?.abort();
1663
+ cleanup();
1664
+ };
1665
+ }
1666
+ /**
1667
+ * Default quality switching configuration.
1668
+ */
1669
+ const DEFAULT_SWITCHING_CONFIG = {
1670
+ safetyMargin: .85,
1671
+ minUpgradeInterval: 8e3,
1672
+ defaultBandwidth: 5e6
1673
+ };
1674
+ /**
1675
+ * Get all video tracks from a presentation's first switching set.
1676
+ * Returns [] when the presentation is still unresolved (no selectionSets yet).
1677
+ */
1678
+ function getVideoTracks(presentation) {
1679
+ return (presentation.selectionSets?.find((s) => s.type === "video"))?.switchingSets[0]?.tracks ?? [];
1680
+ }
1681
+ /**
1682
+ * Quality switching orchestration (F9).
1683
+ *
1684
+ * Reacts to bandwidth estimate changes and updates `selectedVideoTrackId`
1685
+ * when a different quality is optimal:
1686
+ *
1687
+ * - **Downgrades** happen immediately to avoid buffering stalls.
1688
+ * - **Upgrades** are gated by `minUpgradeInterval` to prevent oscillation.
1689
+ * - The first switch (from any track, or no track) is always immediate.
1690
+ *
1691
+ * Smooth switching is handled downstream: when `selectedVideoTrackId` changes,
1692
+ * `resolveTrack` fetches the new playlist and `loadSegments` reloads the init
1693
+ * segment, then appends media segments from the current position in the new
1694
+ * quality. The browser's SourceBuffer replaces the overlapping buffered range.
1695
+ *
1696
+ * @example
1697
+ * const cleanup = switchQuality({ state });
1698
+ * // Later, when done:
1699
+ * cleanup();
1700
+ */
1701
+ function switchQuality({ state }, config = {}) {
1702
+ const safetyMargin = config.safetyMargin ?? DEFAULT_SWITCHING_CONFIG.safetyMargin;
1703
+ const minUpgradeInterval = config.minUpgradeInterval ?? DEFAULT_SWITCHING_CONFIG.minUpgradeInterval;
1704
+ const defaultBandwidth = config.defaultBandwidth ?? DEFAULT_SWITCHING_CONFIG.defaultBandwidth;
1705
+ let lastUpgradeTime = Date.now();
1706
+ let firstMeaningfulFire = true;
1707
+ return state.subscribe((currentState) => {
1708
+ const { presentation, bandwidthState, selectedVideoTrackId, abrDisabled } = currentState;
1709
+ if (abrDisabled === true) return;
1710
+ if (!presentation || !bandwidthState) return;
1711
+ const videoTracks = getVideoTracks(presentation);
1712
+ if (videoTracks.length === 0) return;
1713
+ const isFirst = firstMeaningfulFire;
1714
+ firstMeaningfulFire = false;
1715
+ const optimal = selectQuality(videoTracks, getBandwidthEstimate(bandwidthState, defaultBandwidth), { safetyMargin });
1716
+ if (!optimal || optimal.id === selectedVideoTrackId) return;
1717
+ const currentTrack = videoTracks.find((t) => t.id === selectedVideoTrackId);
1718
+ if (!currentTrack || optimal.bandwidth > currentTrack.bandwidth) {
1719
+ const now = Date.now();
1720
+ if (!isFirst && now - lastUpgradeTime < minUpgradeInterval) return;
1721
+ lastUpgradeTime = now;
1722
+ }
1723
+ state.patch({ selectedVideoTrackId: optimal.id });
1724
+ });
1725
+ }
1726
+ /**
1727
+ * Map track type to selected track ID property key in state.
1728
+ */
1729
+ const SelectedTrackIdKeyByType = {
1730
+ video: "selectedVideoTrackId",
1731
+ audio: "selectedAudioTrackId",
1732
+ text: "selectedTextTrackId"
1733
+ };
1734
+ /**
1735
+ * Map track type to buffer owner property key.
1736
+ * Used for SourceBuffer references in owners.
1737
+ */
1738
+ const BufferKeyByType = {
1739
+ video: "videoBuffer",
1740
+ audio: "audioBuffer"
1741
+ };
1742
+ /**
1743
+ * Get selected track from state by type.
1744
+ * Returns properly typed track (partially or fully resolved) or undefined.
1745
+ * Type parameter T is inferred from the type argument.
1746
+ *
1747
+ * @example
1748
+ * const videoTrack = getSelectedTrack(state, 'video');
1749
+ * if (videoTrack && isResolvedTrack(videoTrack)) {
1750
+ * // videoTrack is VideoTrack
1751
+ * }
1752
+ */
1753
+ function getSelectedTrack(state, type) {
1754
+ const { presentation } = state;
1755
+ /** @TODO Consider moving and reusing isUnresolved(presentation) (CJP) */
1756
+ if (!presentation || !("id" in presentation)) return void 0;
1757
+ const trackId = state[SelectedTrackIdKeyByType[type]];
1758
+ return presentation.selectionSets.find(({ type: selectionSetType }) => selectionSetType === type)?.switchingSets[0]?.tracks.find(({ id }) => id === trackId);
1759
+ }
1760
+ /**
1761
+ * Creates a SegmentLoaderActor for one track type (video or audio).
1762
+ *
1763
+ * Receives load assignments via `send()` and owns all execution: planning,
1764
+ * removes, fetches, and appends. Coordinates with the SourceBufferActor for
1765
+ * all physical SourceBuffer operations.
1766
+ *
1767
+ * Planning (Cases 1–3) happens in `send()` on every incoming message, producing
1768
+ * an ordered LoadTask list. The runner drains that list sequentially. When a new
1769
+ * message arrives mid-run, send() replans and either continues the in-flight
1770
+ * operation (if still needed) or preempts it.
1771
+ *
1772
+ * @param sourceBufferActor - Shared SourceBufferActor reference (not owned)
1773
+ * @param fetchBytes - Tracked fetch closure (owns throughput sampling)
1774
+ */
1775
+ function createSegmentLoaderActor(sourceBufferActor, fetchBytes) {
1776
+ let pendingTasks = null;
1777
+ let inFlightInitTrackId = null;
1778
+ let inFlightSegmentId = null;
1779
+ let abortController = null;
1780
+ let running = false;
1781
+ let destroyed = false;
1782
+ const getBufferedSegments = (allSegments) => {
1783
+ const bufferedIds = new Set(sourceBufferActor.snapshot.context.segments.map((s) => s.id));
1784
+ return allSegments.filter((s) => bufferedIds.has(s.id));
1785
+ };
1786
+ /**
1787
+ * Translate a load message into an ordered LoadTask list based on committed
1788
+ * actor state. In-flight awareness is handled separately in send().
1789
+ *
1790
+ * @todo Rename alongside LoadTask (e.g. planOps).
1791
+ *
1792
+ * Case 1 — Removes: forward and back buffer flush points, segment-aligned.
1793
+ * No flush on track switch: appending new content overwrites existing buffer
1794
+ * ranges, and the actor's time-aligned deduplication keeps the segment model
1795
+ * accurate as new segments arrive.
1796
+ *
1797
+ * Case 2 — Init: schedule if not yet committed for this track.
1798
+ *
1799
+ * Case 3 — Segments: all segments in the load window not yet committed.
1800
+ */
1801
+ const planTasks = (message) => {
1802
+ const { track, range } = message;
1803
+ const actorCtx = sourceBufferActor.snapshot.context;
1804
+ const bufferedSegments = getBufferedSegments(track.segments);
1805
+ const currentTime = range?.start ?? 0;
1806
+ const tasks = [];
1807
+ if (range) {
1808
+ const forwardFlushStart = calculateForwardFlushPoint(bufferedSegments, currentTime);
1809
+ if (forwardFlushStart < Infinity) tasks.push({
1810
+ type: "remove",
1811
+ start: forwardFlushStart,
1812
+ end: Infinity
1813
+ });
1814
+ const backFlushEnd = calculateBackBufferFlushPoint(bufferedSegments, currentTime);
1815
+ if (backFlushEnd > 0) tasks.push({
1816
+ type: "remove",
1817
+ start: 0,
1818
+ end: backFlushEnd
1819
+ });
1820
+ }
1821
+ if (actorCtx.initTrackId !== track.id) tasks.push({
1822
+ type: "append-init",
1823
+ meta: { trackId: track.id },
1824
+ url: track.initialization.url
1825
+ });
1826
+ if (range) {
1827
+ const EPSILON = 1e-4;
1828
+ const segmentsToLoad = getSegmentsToLoad(track.segments, bufferedSegments, currentTime).filter((seg) => {
1829
+ const existing = actorCtx.segments.find((s) => Math.abs(s.startTime - seg.startTime) < EPSILON);
1830
+ if (!existing?.trackBandwidth || !track.bandwidth) return true;
1831
+ return track.bandwidth > existing.trackBandwidth;
1832
+ });
1833
+ for (const segment of segmentsToLoad) tasks.push({
1834
+ type: "append-segment",
1835
+ meta: {
1836
+ id: segment.id,
1837
+ startTime: segment.startTime,
1838
+ duration: segment.duration,
1839
+ trackId: track.id,
1840
+ trackBandwidth: track.bandwidth
1841
+ },
1842
+ url: segment.url
1843
+ });
1844
+ }
1845
+ return tasks;
1846
+ };
1847
+ /**
1848
+ * Execute a single LoadTask: fetch (if needed) then forward to SourceBufferActor.
1849
+ * Sets/clears in-flight tracking around async operations so send() can make
1850
+ * accurate continue/preempt decisions at any point during execution.
1851
+ *
1852
+ * @todo Rename alongside LoadTask (e.g. executeOp).
1853
+ */
1854
+ const executeLoadTask = async (task) => {
1855
+ const signal = abortController.signal;
1856
+ try {
1857
+ if (task.type === "remove") {
1858
+ await sourceBufferActor.send(task, signal);
1859
+ return;
1860
+ }
1861
+ if (task.type === "append-init") {
1862
+ inFlightInitTrackId = task.meta.trackId;
1863
+ if (!signal.aborted) {
1864
+ const data = await fetchBytes(task, { signal });
1865
+ const isTrackSwitch = pendingTasks?.some((t) => t.type === "append-init" && t.meta.trackId !== task.meta.trackId);
1866
+ if (!signal.aborted || !isTrackSwitch) {
1867
+ const appendSignal = signal.aborted ? new AbortController().signal : signal;
1868
+ await sourceBufferActor.send({
1869
+ type: "append-init",
1870
+ data,
1871
+ meta: task.meta
1872
+ }, appendSignal);
1873
+ }
1874
+ }
1875
+ return;
1876
+ }
1877
+ inFlightSegmentId = task.meta.id;
1878
+ if (!signal.aborted) {
1879
+ const data = await fetchBytes(task, { signal });
1880
+ if (!signal.aborted) await sourceBufferActor.send({
1881
+ type: "append-segment",
1882
+ data,
1883
+ meta: task.meta
1884
+ }, signal);
1885
+ }
1886
+ } finally {
1887
+ inFlightInitTrackId = null;
1888
+ inFlightSegmentId = null;
1889
+ }
1890
+ };
1891
+ /**
1892
+ * Drain the scheduled task list sequentially.
1893
+ * After each task completes, checks for a pending replacement plan from send().
1894
+ * If the signal was aborted and no new plan arrived, stops immediately.
1895
+ */
1896
+ const runScheduled = async (initialTasks) => {
1897
+ running = true;
1898
+ abortController = new AbortController();
1899
+ let scheduled = initialTasks;
1900
+ while (scheduled.length > 0 && !destroyed) {
1901
+ const task = scheduled[0];
1902
+ scheduled = scheduled.slice(1);
1903
+ try {
1904
+ await executeLoadTask(task);
1905
+ } catch (error) {
1906
+ if (error instanceof Error && error.name === "AbortError") {} else {
1907
+ console.error("Unexpected error in segment loader:", error);
1908
+ scheduled = [];
1909
+ }
1910
+ }
1911
+ if (pendingTasks !== null) {
1912
+ scheduled = pendingTasks;
1913
+ pendingTasks = null;
1914
+ abortController = new AbortController();
1915
+ } else if (abortController.signal.aborted) break;
1916
+ }
1917
+ abortController = null;
1918
+ running = false;
1919
+ };
1920
+ return {
1921
+ send(message) {
1922
+ if (destroyed) return;
1923
+ const allTasks = planTasks(message);
1924
+ if (!running) {
1925
+ if (allTasks.length === 0) return;
1926
+ runScheduled(allTasks);
1927
+ return;
1928
+ }
1929
+ if (inFlightSegmentId !== null && allTasks.some((t) => t.type === "append-segment" && t.meta.id === inFlightSegmentId) || inFlightInitTrackId !== null && allTasks.some((t) => t.type === "append-init" && t.meta.trackId === inFlightInitTrackId)) pendingTasks = allTasks.filter((t) => !(t.type === "append-segment" && t.meta.id === inFlightSegmentId) && !(t.type === "append-init" && t.meta.trackId === inFlightInitTrackId));
1930
+ else {
1931
+ pendingTasks = allTasks;
1932
+ abortController?.abort();
1933
+ }
1934
+ },
1935
+ destroy() {
1936
+ destroyed = true;
1937
+ abortController?.abort();
1938
+ }
1939
+ };
1940
+ }
1941
+ const ActorKeyByType$1 = {
1942
+ video: "videoBufferActor",
1943
+ audio: "audioBufferActor"
1944
+ };
1945
+ /**
1946
+ * Creates a fetch function that transparently samples bandwidth after each
1947
+ * completed request. Callers receive bytes; throughput tracking is invisible.
1948
+ *
1949
+ * `onSample` is an optional callback invoked after each sample is recorded,
1950
+ * used for bridging throughput state outward (e.g. migration bridge to global
1951
+ * state). A callback is used rather than a subscription so that no immediate
1952
+ * fire occurs at setup time — subscriptions fire on registration and would
1953
+ * trigger spurious state changes before any work has started.
1954
+ */
1955
+ function createTrackedFetch(throughput, onSample) {
1956
+ return async (addressable, options) => {
1957
+ const start = performance.now();
1958
+ const data = await fetchResolvableBytes(addressable, options);
1959
+ const elapsed = performance.now() - start;
1960
+ const next = sampleBandwidth(throughput.current, elapsed, data.byteLength);
1961
+ throughput.patch(next);
1962
+ throughput.flush();
1963
+ onSample?.(next);
1964
+ return data;
1965
+ };
1966
+ }
1967
+ function selectLoadingInputs([segmentsCanLoad, state], type) {
1968
+ const { playbackInitiated, preload, currentTime } = state;
1969
+ return {
1970
+ playbackInitiated,
1971
+ preload,
1972
+ currentTime,
1973
+ track: getSelectedTrack(state, type),
1974
+ segmentsCanLoad
1975
+ };
1976
+ }
1977
+ /**
1978
+ * Equality function encoding the condition hierarchy for relevant changes.
1979
+ *
1980
+ * Pre-play (!playbackInitiated):
1981
+ * Only preload changes matter. currentTime and resolvedTrackId are ignored
1982
+ * (track changes not supported pre-play; currentTime value is used at
1983
+ * trigger time but changes don't re-trigger).
1984
+ *
1985
+ * playbackInitiated transition:
1986
+ * Always fires (handled in the subscriber; preload='auto' suppression
1987
+ * applied there since equality functions have no memory of prior values).
1988
+ *
1989
+ * Post-play (playbackInitiated):
1990
+ * resolvedTrackId changes (track switch or previously-unresolved track
1991
+ * resolving) and currentTime changes both trigger. preload is irrelevant.
1992
+ */
1993
+ const segmentStartFor = (currentTime, track) => {
1994
+ if (currentTime == null) return void 0;
1995
+ return track?.segments.find(({ startTime, duration }, i, segments) => currentTime >= startTime && (currentTime < startTime + duration || i === segments.length - 1))?.startTime;
1996
+ };
1997
+ /**
1998
+ * Returns true when the inputs are equal (no meaningful change — don't fire).
1999
+ * Returns false when the inputs differ in a way that requires a new message.
2000
+ *
2001
+ * This IS the shouldLoadSegments logic, expressed as an equality function.
2002
+ */
2003
+ function loadingInputsEq(prevState, curState) {
2004
+ if (!curState.segmentsCanLoad) return true;
2005
+ if (!curState.playbackInitiated) {
2006
+ if (curState.preload === "none") return true;
2007
+ return curState.preload === prevState.preload;
2008
+ }
2009
+ if (!prevState.playbackInitiated && curState.playbackInitiated) {
2010
+ if (prevState.preload !== "auto") return false;
2011
+ }
2012
+ if (!curState.track || !isResolvedTrack(curState.track)) return true;
2013
+ if (prevState.track?.id !== curState.track.id && isResolvedTrack(curState.track)) return false;
2014
+ return segmentStartFor(prevState.currentTime, curState.track) === segmentStartFor(curState.currentTime, curState.track);
2015
+ }
2016
+ /**
2017
+ * Load segments orchestration — Reactor layer.
2018
+ *
2019
+ * Sends typed load messages to a SegmentLoaderActor when relevant conditions
2020
+ * change. Uses targeted subscriptions rather than broad combineLatest so only
2021
+ * meaningful state changes trigger evaluation.
2022
+ *
2023
+ * Condition hierarchy (see SegmentLoadingKey for detail):
2024
+ *
2025
+ * !playbackInitiated
2026
+ * preload==='none' (or unset) → dormant; no trigger
2027
+ * preload==='metadata' → trigger on transition to 'metadata'
2028
+ * preload==='auto' → trigger on transition to 'auto'
2029
+ *
2030
+ * !playbackInitiated → playbackInitiated
2031
+ * preload !== 'auto' → trigger (message shape changes)
2032
+ * preload === 'auto' → suppressed (was already full-range mode;
2033
+ * let segmentStart take over post-play)
2034
+ * KNOWN LIMITATION: seek-before-play with
2035
+ * preload='auto' is not supported — if the
2036
+ * user seeks before pressing play, the
2037
+ * first re-send is delayed until the next
2038
+ * segment boundary crossing post-play.
2039
+ *
2040
+ * playbackInitiated
2041
+ * resolvedTrackId changes → trigger
2042
+ * segmentStart(currentTime) changes → trigger (segment boundary only)
2043
+ *
2044
+ * @example
2045
+ * const cleanup = loadSegments({ state, owners }, { type: 'video' });
2046
+ */
2047
+ function loadSegments({ state, owners }, config) {
2048
+ const { type } = config;
2049
+ const actorKey = ActorKeyByType$1[type];
2050
+ const initialBandwidth = state.current.bandwidthState;
2051
+ const throughput = createState(initialBandwidth ?? {
2052
+ fastEstimate: 0,
2053
+ fastTotalWeight: 0,
2054
+ slowEstimate: 0,
2055
+ slowTotalWeight: 0,
2056
+ bytesSampled: 0
2057
+ });
2058
+ const fetchBytes = type === "video" ? createTrackedFetch(throughput, initialBandwidth !== void 0 ? (next) => {
2059
+ state.patch({ bandwidthState: next });
2060
+ state.flush();
2061
+ } : void 0) : fetchResolvableBytes;
2062
+ const segmentLoader = createState(void 0);
2063
+ const unsubActorLifecycle = owners.subscribe((o) => o[actorKey], (actor) => {
2064
+ if (actor) segmentLoader.patch(createSegmentLoaderActor(actor, fetchBytes));
2065
+ else if (!actor && segmentLoader.current) {
2066
+ segmentLoader.current.destroy();
2067
+ segmentLoader.patch(void 0);
2068
+ }
2069
+ return () => {
2070
+ segmentLoader.current?.destroy();
2071
+ segmentLoader.patch(void 0);
2072
+ };
2073
+ });
2074
+ const segmentsCanLoad = createState(false);
2075
+ const unsubscribeCanLoadSegments = combineLatest([state, segmentLoader]).subscribe(([currentState, currentSegmentLoader]) => {
2076
+ const track = getSelectedTrack(currentState, type);
2077
+ const trackResolved = !!track && isResolvedTrack(track);
2078
+ const segmentLoaderActorExists = !!currentSegmentLoader;
2079
+ segmentsCanLoad.patch(trackResolved && segmentLoaderActorExists);
2080
+ });
2081
+ const unsubscribeShouldLoadSegments = combineLatest([segmentsCanLoad, state]).subscribe(([segmentsCanLoad$1, state$1]) => selectLoadingInputs([segmentsCanLoad$1, state$1], type), ({ preload, playbackInitiated, currentTime, track }) => {
2082
+ if (!(preload === "auto" || !!playbackInitiated))
2083
+ /** @ts-expect-error */
2084
+ segmentLoader.current?.send({
2085
+ type: "load",
2086
+ track
2087
+ });
2088
+ else segmentLoader.current?.send({
2089
+ type: "load",
2090
+ track,
2091
+ range: {
2092
+ start: currentTime,
2093
+ end: currentTime + DEFAULT_FORWARD_BUFFER_CONFIG.bufferDuration
2094
+ }
2095
+ });
2096
+ }, { equalityFn: loadingInputsEq });
2097
+ return () => {
2098
+ unsubscribeCanLoadSegments();
2099
+ unsubscribeShouldLoadSegments();
2100
+ unsubActorLifecycle();
2101
+ };
2102
+ }
2103
+ /**
2104
+ * Parse a VTT segment using browser's native parser.
2105
+ *
2106
+ * Creates a dummy video element with a track element to leverage
2107
+ * the browser's optimized VTT parsing. Returns parsed VTTCue objects.
2108
+ */
2109
+ let dummyVideo = null;
2110
+ function ensureDummyVideo() {
2111
+ if (!dummyVideo) {
2112
+ dummyVideo = document.createElement("video");
2113
+ dummyVideo.muted = true;
2114
+ dummyVideo.preload = "none";
2115
+ dummyVideo.style.display = "none";
2116
+ dummyVideo.crossOrigin = "anonymous";
2117
+ }
2118
+ return dummyVideo;
2119
+ }
2120
+ function parseVttSegment(url) {
2121
+ const video = ensureDummyVideo();
2122
+ const track = document.createElement("track");
2123
+ track.kind = "subtitles";
2124
+ track.default = true;
2125
+ return new Promise((resolve, reject) => {
2126
+ const onLoad = () => {
2127
+ const cues = [];
2128
+ const textTrack = track.track;
2129
+ if (textTrack.cues) for (let i = 0; i < textTrack.cues.length; i++) {
2130
+ const cue = textTrack.cues[i];
2131
+ if (cue) cues.push(cue);
2132
+ }
2133
+ cleanup();
2134
+ resolve(cues);
2135
+ };
2136
+ const onError = () => {
2137
+ cleanup();
2138
+ reject(/* @__PURE__ */ new Error(`Failed to load VTT segment: ${url}`));
2139
+ };
2140
+ const cleanup = () => {
2141
+ track.removeEventListener("load", onLoad);
2142
+ track.removeEventListener("error", onError);
2143
+ video.removeChild(track);
2144
+ };
2145
+ track.addEventListener("load", onLoad);
2146
+ track.addEventListener("error", onError);
2147
+ video.appendChild(track);
2148
+ track.src = url;
2149
+ });
2150
+ }
2151
+ function destroyVttParser() {
2152
+ dummyVideo = null;
2153
+ }
2154
+ function isDuplicateCue(cue, textTrack) {
2155
+ const { cues } = textTrack;
2156
+ if (!cues) return false;
2157
+ for (let i = 0; i < cues.length; i++) {
2158
+ const existing = cues[i];
2159
+ if (existing.startTime === cue.startTime && existing.endTime === cue.endTime && existing.text === cue.text) return true;
2160
+ }
2161
+ return false;
2162
+ }
2163
+ const loadVttSegmentTask = async ({ segment }, context) => {
2164
+ const cues = await parseVttSegment(segment.url);
2165
+ for (const cue of cues) if (!isDuplicateCue(cue, context.textTrack)) context.textTrack.addCue(cue);
2166
+ };
2167
+ /**
2168
+ * Load text track cues task (composite - orchestrates VTT segment subtasks).
2169
+ */
2170
+ const loadTextTrackCuesTask = async ({ currentState }, context) => {
2171
+ const track = findSelectedTextTrack(currentState);
2172
+ if (!track || !isResolvedTrack(track)) return;
2173
+ const { segments } = track;
2174
+ if (segments.length === 0) return;
2175
+ const trackId = track.id;
2176
+ const loadedIds = new Set((currentState.textBufferState?.[trackId]?.segments ?? []).map((s) => s.id));
2177
+ const segmentsToLoad = getSegmentsToLoad(segments, segments.filter((s) => loadedIds.has(s.id)), currentState.currentTime ?? 0).filter((s) => !loadedIds.has(s.id));
2178
+ if (segmentsToLoad.length === 0) return;
2179
+ for (const segment of segmentsToLoad) {
2180
+ if (context.signal.aborted) break;
2181
+ try {
2182
+ await loadVttSegmentTask({ segment }, { textTrack: context.textTrack });
2183
+ const latest = context.state.current.textBufferState ?? {};
2184
+ const trackState = latest[trackId] ?? { segments: [] };
2185
+ context.state.patch({ textBufferState: {
2186
+ ...latest,
2187
+ [trackId]: { segments: [...trackState.segments, { id: segment.id }] }
2188
+ } });
2189
+ } catch (error) {
2190
+ if (error instanceof Error && error.name === "AbortError") break;
2191
+ console.error("Failed to load VTT segment:", error);
2192
+ }
2193
+ }
2194
+ if (context.textTrack.mode === "showing" && context.textTrack.cues) Array.from(context.textTrack.cues).forEach((cue) => {
2195
+ context.textTrack.addCue(cue);
2196
+ });
2197
+ await new Promise((resolve) => requestAnimationFrame(resolve));
2198
+ };
2199
+ /**
2200
+ * Find the selected text track in the presentation.
2201
+ */
2202
+ function findSelectedTextTrack(state) {
2203
+ if (!state.presentation || !state.selectedTextTrackId) return;
2204
+ const textSet = state.presentation.selectionSets.find((set) => set.type === "text");
2205
+ if (!textSet?.switchingSets?.[0]?.tracks) return;
2206
+ return textSet.switchingSets[0].tracks.find((t) => t.id === state.selectedTextTrackId);
2207
+ }
2208
+ /**
2209
+ * Get the browser's TextTrack object for the selected text track.
2210
+ *
2211
+ * Retrieves the live TextTrack interface from the track element in owners,
2212
+ * which is used for adding cues, checking mode, and managing track state.
2213
+ *
2214
+ * Note: Returns the DOM TextTrack interface (HTMLTrackElement.track),
2215
+ * not the presentation Track metadata type.
2216
+ *
2217
+ * @param state - Current playback state (track selection)
2218
+ * @param owners - DOM owners containing track elements map
2219
+ * @returns DOM TextTrack interface or undefined if not found
2220
+ */
2221
+ function getSelectedTextTrackFromOwners(state, owners) {
2222
+ const trackId = state.selectedTextTrackId;
2223
+ if (!trackId || !owners.textTracks) return;
2224
+ return owners.textTracks.get(trackId)?.track;
2225
+ }
2226
+ /**
2227
+ * Check if we can load text track cues.
2228
+ *
2229
+ * Requires:
2230
+ * - Selected text track ID exists
2231
+ * - Track elements map exists
2232
+ * - Track element exists for selected track
2233
+ */
2234
+ function canLoadTextTrackCues(state, owners) {
2235
+ return !!state.selectedTextTrackId && !!owners.textTracks && owners.textTracks.has(state.selectedTextTrackId);
2236
+ }
2237
+ /**
2238
+ * Check if we should load text track cues.
2239
+ *
2240
+ * Only load if:
2241
+ * - Track is resolved (has segments)
2242
+ * - Track has at least one segment
2243
+ * - Track element exists
2244
+ */
2245
+ function shouldLoadTextTrackCues(state, owners) {
2246
+ if (!canLoadTextTrackCues(state, owners)) return false;
2247
+ const track = findSelectedTextTrack(state);
2248
+ if (!track || !isResolvedTrack(track) || track.segments.length === 0) return false;
2249
+ if (!getSelectedTextTrackFromOwners(state, owners)) return false;
2250
+ return true;
2251
+ }
2252
+ /**
2253
+ * Load text track cues orchestration.
2254
+ *
2255
+ * Triggers when:
2256
+ * - Text track is selected
2257
+ * - Track is resolved (has segments)
2258
+ * - Track element exists
2259
+ *
2260
+ * Fetches and parses VTT segments within the forward buffer window, then adds
2261
+ * cues to the track incrementally. Continues on segment errors to provide
2262
+ * partial subtitles.
2263
+ *
2264
+ * @example
2265
+ * const cleanup = loadTextTrackCues({ state, owners });
2266
+ */
2267
+ function loadTextTrackCues({ state, owners }) {
2268
+ let currentTask = null;
2269
+ let abortController = null;
2270
+ let lastTrackId;
2271
+ const cleanup = combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
2272
+ if (currentState.selectedTextTrackId !== lastTrackId) {
2273
+ lastTrackId = currentState.selectedTextTrackId;
2274
+ abortController?.abort();
2275
+ currentTask = null;
2276
+ }
2277
+ if (currentTask) return;
2278
+ if (!shouldLoadTextTrackCues(currentState, currentOwners)) return;
2279
+ const textTrack = getSelectedTextTrackFromOwners(currentState, currentOwners);
2280
+ if (!textTrack) return;
2281
+ abortController = new AbortController();
2282
+ currentTask = loadTextTrackCuesTask({ currentState }, {
2283
+ signal: abortController.signal,
2284
+ textTrack,
2285
+ state
2286
+ }).finally(() => {
2287
+ currentTask = null;
2288
+ });
2289
+ });
2290
+ return () => {
2291
+ abortController?.abort();
2292
+ cleanup();
2293
+ };
2294
+ }
2295
+ /**
2296
+ * Track current playback position from the media element.
2297
+ *
2298
+ * Mirrors `mediaElement.currentTime` into reactive state on:
2299
+ * - `timeupdate` — fires during playback (~4 Hz)
2300
+ * - `seeking` — fires when a seek begins; per spec, `currentTime` is
2301
+ * already at the new position when this event dispatches, so buffer
2302
+ * management can react immediately rather than waiting for `timeupdate`,
2303
+ * which does not fire while paused.
2304
+ *
2305
+ * Also syncs immediately when a media element becomes available.
2306
+ *
2307
+ * @example
2308
+ * const cleanup = trackCurrentTime({ state, owners });
2309
+ */
2310
+ function trackCurrentTime({ state, owners }) {
2311
+ let lastMediaElement;
2312
+ let removeListeners = null;
2313
+ const unsubscribe = owners.subscribe((currentOwners) => {
2314
+ const { mediaElement } = currentOwners;
2315
+ if (mediaElement === lastMediaElement) return;
2316
+ removeListeners?.();
2317
+ removeListeners = null;
2318
+ lastMediaElement = mediaElement;
2319
+ if (!mediaElement) return;
2320
+ state.patch({ currentTime: mediaElement.currentTime });
2321
+ const sync = () => state.patch({ currentTime: mediaElement.currentTime });
2322
+ const removeTimeupdate = listen(mediaElement, "timeupdate", sync);
2323
+ const removeSeeking = listen(mediaElement, "seeking", sync);
2324
+ removeListeners = () => {
2325
+ removeTimeupdate();
2326
+ removeSeeking();
2327
+ };
2328
+ });
2329
+ return () => {
2330
+ removeListeners?.();
2331
+ unsubscribe();
2332
+ };
2333
+ }
2334
+ /**
2335
+ * Track whether playback has been initiated by the user.
2336
+ *
2337
+ * Sets `state.playbackInitiated = true` when the media element fires a `play`
2338
+ * event (via `element.play()`, native controls, or autoplay) and simultaneously
2339
+ * dispatches `{ type: 'play' }` to the event stream so `resolvePresentation`
2340
+ * can react.
2341
+ *
2342
+ * Resets `state.playbackInitiated = false` when `presentation.url` changes,
2343
+ * so a new source with `preload="none"` won't load segments until play is
2344
+ * triggered again.
2345
+ *
2346
+ * This flag is used by `shouldLoadSegments` to allow segment loading after
2347
+ * play is initiated regardless of the initial `preload` setting — `preload`
2348
+ * is a startup hint, not a runtime gate.
2349
+ *
2350
+ * @example
2351
+ * const cleanup = trackPlaybackInitiated({ state, owners, events });
2352
+ */
2353
+ function trackPlaybackInitiated({ state, owners, events }) {
2354
+ let lastMediaElement;
2355
+ let removeListener = null;
2356
+ let lastPresentationUrl;
2357
+ const unsubscribeState = state.subscribe((currentState) => {
2358
+ const url = currentState.presentation?.url;
2359
+ if (url !== lastPresentationUrl) {
2360
+ if (lastPresentationUrl !== void 0) state.patch({ playbackInitiated: false });
2361
+ lastPresentationUrl = url;
2362
+ }
2363
+ });
2364
+ const unsubscribeOwners = owners.subscribe((currentOwners) => {
2365
+ const { mediaElement } = currentOwners;
2366
+ if (mediaElement === lastMediaElement) return;
2367
+ removeListener?.();
2368
+ removeListener = null;
2369
+ lastMediaElement = mediaElement;
2370
+ if (!mediaElement) return;
2371
+ removeListener = listen(mediaElement, "play", () => {
2372
+ state.patch({ playbackInitiated: true });
2373
+ events.dispatch({ type: "play" });
2374
+ });
2375
+ });
2376
+ return () => {
2377
+ removeListener?.();
2378
+ unsubscribeState();
2379
+ unsubscribeOwners();
2380
+ };
2381
+ }
2382
+ /**
2383
+ * Segment appender helper (P11)
2384
+ *
2385
+ * Appends media segments (ArrayBuffer) to SourceBuffer.
2386
+ */
2387
+ /**
2388
+ * Append a media segment to a SourceBuffer.
2389
+ *
2390
+ * Waits for the SourceBuffer to be ready (not updating), then appends
2391
+ * the segment data. Returns a promise that resolves when append completes.
2392
+ *
2393
+ * @param sourceBuffer - The SourceBuffer to append to
2394
+ * @param segmentData - The segment data as ArrayBuffer
2395
+ * @returns Promise that resolves when append completes
2396
+ *
2397
+ * @example
2398
+ * const data = await fetch(segmentUrl).then(r => r.arrayBuffer());
2399
+ * await appendSegment(videoSourceBuffer, data);
2400
+ */
2401
+ async function appendSegment(sourceBuffer, segmentData) {
2402
+ if (sourceBuffer.updating) await new Promise((resolve) => {
2403
+ const onUpdateEnd = () => {
2404
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2405
+ resolve();
2406
+ };
2407
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2408
+ });
2409
+ return new Promise((resolve, reject) => {
2410
+ const onUpdateEnd = () => {
2411
+ cleanup();
2412
+ resolve();
2413
+ };
2414
+ const onError = (event) => {
2415
+ cleanup();
2416
+ reject(/* @__PURE__ */ new Error(`SourceBuffer append error: ${event.type}`));
2417
+ };
2418
+ const cleanup = () => {
2419
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2420
+ sourceBuffer.removeEventListener("error", onError);
2421
+ };
2422
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2423
+ sourceBuffer.addEventListener("error", onError);
2424
+ try {
2425
+ sourceBuffer.appendBuffer(segmentData);
2426
+ } catch (error) {
2427
+ cleanup();
2428
+ reject(error);
2429
+ }
2430
+ });
2431
+ }
2432
+ /**
2433
+ * Buffer flusher helper (P12)
2434
+ *
2435
+ * Removes a time range from a SourceBuffer to manage memory.
2436
+ */
2437
+ /**
2438
+ * Remove a time range from a SourceBuffer.
2439
+ *
2440
+ * Waits for the SourceBuffer to be ready (not updating), then removes
2441
+ * the specified range. Returns a promise that resolves when removal completes.
2442
+ *
2443
+ * @param sourceBuffer - The SourceBuffer to remove data from
2444
+ * @param start - Start of the time range to remove (seconds)
2445
+ * @param end - End of the time range to remove (seconds)
2446
+ * @returns Promise that resolves when removal completes
2447
+ *
2448
+ * @example
2449
+ * await flushBuffer(videoSourceBuffer, 0, 30);
2450
+ */
2451
+ async function flushBuffer(sourceBuffer, start, end) {
2452
+ if (sourceBuffer.updating) await new Promise((resolve) => {
2453
+ const onUpdateEnd = () => {
2454
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2455
+ resolve();
2456
+ };
2457
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2458
+ });
2459
+ return new Promise((resolve, reject) => {
2460
+ const onUpdateEnd = () => {
2461
+ cleanup();
2462
+ resolve();
2463
+ };
2464
+ const onError = (event) => {
2465
+ cleanup();
2466
+ reject(/* @__PURE__ */ new Error(`SourceBuffer remove error: ${event.type}`));
2467
+ };
2468
+ const cleanup = () => {
2469
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2470
+ sourceBuffer.removeEventListener("error", onError);
2471
+ };
2472
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2473
+ sourceBuffer.addEventListener("error", onError);
2474
+ try {
2475
+ sourceBuffer.remove(start, end);
2476
+ } catch (error) {
2477
+ cleanup();
2478
+ reject(error);
2479
+ }
2480
+ });
2481
+ }
2482
+ /**
2483
+ * Check if we can calculate presentation duration (have required data).
2484
+ */
2485
+ function canCalculateDuration(state) {
2486
+ if (!state.presentation) return false;
2487
+ return !!(state.selectedVideoTrackId || state.selectedAudioTrackId);
2488
+ }
2489
+ /**
2490
+ * Check if we should calculate presentation duration (conditions met).
2491
+ */
2492
+ function shouldCalculateDuration(state) {
2493
+ if (!canCalculateDuration(state)) return false;
2494
+ const { presentation } = state;
2495
+ if (presentation.duration !== void 0) return false;
2496
+ const videoTrack = state.selectedVideoTrackId ? getSelectedTrack(state, "video") : void 0;
2497
+ const audioTrack = state.selectedAudioTrackId ? getSelectedTrack(state, "audio") : void 0;
2498
+ return !!(videoTrack && isResolvedTrack(videoTrack) || audioTrack && isResolvedTrack(audioTrack));
2499
+ }
2500
+ /**
2501
+ * Get duration from the first resolved track (prefer video, fallback to audio).
2502
+ */
2503
+ function getDurationFromResolvedTracks(state) {
2504
+ const videoTrack = state.selectedVideoTrackId ? getSelectedTrack(state, "video") : void 0;
2505
+ if (videoTrack && isResolvedTrack(videoTrack)) return videoTrack.duration;
2506
+ const audioTrack = state.selectedAudioTrackId ? getSelectedTrack(state, "audio") : void 0;
2507
+ if (audioTrack && isResolvedTrack(audioTrack)) return audioTrack.duration;
2508
+ }
2509
+ /**
2510
+ * Calculate and set presentation duration from resolved tracks.
2511
+ */
2512
+ function calculatePresentationDuration({ state }) {
2513
+ return combineLatest([state]).subscribe(([currentState]) => {
2514
+ if (!shouldCalculateDuration(currentState)) return;
2515
+ const duration = getDurationFromResolvedTracks(currentState);
2516
+ if (duration === void 0 || !Number.isFinite(duration)) return;
2517
+ const { presentation } = currentState;
2518
+ state.patch({ presentation: {
2519
+ ...presentation,
2520
+ duration
2521
+ } });
2522
+ });
2523
+ }
2524
+ /**
2525
+ * Generic reusable task that wraps an async run function.
2526
+ *
2527
+ * Owns its own AbortController so it can always be aborted independently.
2528
+ * Optionally composes an external AbortSignal so that a parent's cancellation
2529
+ * propagates into the task's work without requiring the caller to track the
2530
+ * task separately.
2531
+ *
2532
+ * Ordering guarantee: `value` is written before `status` transitions to `'done'`;
2533
+ * `error` is written before `status` transitions to `'error'`. Any reader
2534
+ * observing `status === 'done'` is guaranteed `value` is already present.
2535
+ */
2536
+ var Task = class {
2537
+ id;
2538
+ #runFn;
2539
+ #abortController = new AbortController();
2540
+ #signal;
2541
+ #status = "pending";
2542
+ #value = void 0;
2543
+ #error = void 0;
2544
+ constructor(runFn, config) {
2545
+ this.#runFn = runFn;
2546
+ const rawId = config?.id;
2547
+ this.id = typeof rawId === "function" ? rawId() : rawId ?? generateId();
2548
+ this.#signal = config?.signal ? AbortSignal.any([this.#abortController.signal, config.signal]) : this.#abortController.signal;
2549
+ }
2550
+ get status() {
2551
+ return this.#status;
2552
+ }
2553
+ get value() {
2554
+ return this.#value;
2555
+ }
2556
+ get error() {
2557
+ return this.#error;
2558
+ }
2559
+ async run() {
2560
+ this.#status = "running";
2561
+ try {
2562
+ const result = await this.#runFn(this.#signal);
2563
+ this.#value = result;
2564
+ this.#status = "done";
2565
+ return result;
2566
+ } catch (e) {
2567
+ this.#error = e;
2568
+ this.#status = "error";
2569
+ throw e;
2570
+ }
2571
+ }
2572
+ abort() {
2573
+ this.#abortController.abort();
2574
+ }
2575
+ };
2576
+ /**
2577
+ * Runs tasks concurrently, deduplicated by task id.
2578
+ *
2579
+ * If a task with a given id is already in flight, subsequent schedule() calls
2580
+ * for that id are silently ignored until the first completes. Tasks are stored
2581
+ * so abortAll() can cancel any in-flight work (e.g. on engine cleanup).
2582
+ */
2583
+ var ConcurrentRunner = class {
2584
+ #pending = /* @__PURE__ */ new Map();
2585
+ schedule(task) {
2586
+ if (this.#pending.has(task.id)) return;
2587
+ this.#pending.set(task.id, task);
2588
+ task.run().catch((error) => {
2589
+ if (!(error instanceof Error && error.name === "AbortError")) throw error;
2590
+ }).finally(() => {
2591
+ this.#pending.delete(task.id);
2592
+ });
2593
+ }
2594
+ abortAll() {
2595
+ for (const task of this.#pending.values()) task.abort();
2596
+ this.#pending.clear();
2597
+ }
2598
+ };
2599
+ /**
2600
+ * Runs tasks one at a time in submission order.
2601
+ *
2602
+ * Each schedule() call returns a Promise that resolves or rejects with the
2603
+ * task's result when it is eventually executed. Tasks wait in queue until the
2604
+ * prior task completes.
2605
+ *
2606
+ * Serialization is achieved by chaining each task's run() onto the tail of a
2607
+ * shared promise chain — no explicit queue or drain loop needed.
2608
+ *
2609
+ * abortAll() aborts all pending (not yet started) tasks and the currently
2610
+ * in-flight task. Pending tasks still run briefly but receive an aborted
2611
+ * signal and are expected to exit early.
2612
+ */
2613
+ var SerialRunner = class {
2614
+ #chain = Promise.resolve();
2615
+ #pending = /* @__PURE__ */ new Set();
2616
+ #current = null;
2617
+ schedule(task) {
2618
+ const t = task;
2619
+ this.#pending.add(t);
2620
+ const result = this.#chain.then(() => {
2621
+ this.#pending.delete(t);
2622
+ this.#current = t;
2623
+ return task.run();
2624
+ }).finally(() => {
2625
+ this.#current = null;
2626
+ });
2627
+ this.#chain = result.then(() => {}, () => {});
2628
+ return result;
2629
+ }
2630
+ abortAll() {
2631
+ for (const task of this.#pending) task.abort();
2632
+ this.#pending.clear();
2633
+ this.#current?.abort();
2634
+ }
2635
+ destroy() {
2636
+ this.abortAll();
2637
+ }
2638
+ };
2639
+ function canResolve(state, config) {
2640
+ const track = getSelectedTrack(state, config.type);
2641
+ if (!track) return false;
2642
+ return !isResolvedTrack(track);
2643
+ }
2644
+ /**
2645
+ * Determines if track resolution conditions are met.
2646
+ *
2647
+ * Currently always returns true - conditions are checked by canResolveTrack()
2648
+ * and resolving flag. Kept as placeholder for future conditional logic.
2649
+ *
2650
+ * @param state - Current track resolution state
2651
+ * @param event - Current action/event
2652
+ * @returns true (conditions checked elsewhere)
2653
+ */
2654
+ function shouldResolve(_state, _event) {
2655
+ return true;
2656
+ }
2657
+ /**
2658
+ * Updates a track within a presentation (immutably).
2659
+ * Generic - works for video, audio, or text tracks.
2660
+ */
2661
+ function updateTrackInPresentation(presentation, resolvedTrack) {
2662
+ const trackId = resolvedTrack.id;
2663
+ return {
2664
+ ...presentation,
2665
+ selectionSets: presentation.selectionSets.map((selectionSet) => ({
2666
+ ...selectionSet,
2667
+ switchingSets: selectionSet.switchingSets.map((switchingSet) => ({
2668
+ ...switchingSet,
2669
+ tracks: switchingSet.tracks.map((track) => track.id === trackId ? resolvedTrack : track)
2670
+ }))
2671
+ }))
2672
+ };
2673
+ }
2674
+ /**
2675
+ * Resolves unresolved tracks using reactive composition.
2676
+ *
2677
+ * The subscribe closure is pure scheduling logic: it checks conditions and
2678
+ * creates a Task for the selected track when appropriate. The ConcurrentRunner
2679
+ * handles all concurrency concerns — deduplication, parallel execution, and
2680
+ * cleanup.
2681
+ *
2682
+ * Generic version that works for video, audio, or text tracks based on config.
2683
+ * Type parameter T is inferred from config.type (use 'as const' for inference).
2684
+ */
2685
+ function resolveTrack({ state, events }, config) {
2686
+ const runner = new ConcurrentRunner();
2687
+ const cleanup = combineLatest([state, events]).subscribe(([currentState, event]) => {
2688
+ if (!canResolve(currentState, config) || !shouldResolve(currentState, event)) return;
2689
+ const track = getSelectedTrack(currentState, config.type);
2690
+ if (!track) return;
2691
+ const resolvedTrack = track;
2692
+ runner.schedule(new Task(async (signal) => {
2693
+ const mediaTrack = parseMediaPlaylist(await getResponseText(await fetchResolvable(resolvedTrack, { signal })), resolvedTrack);
2694
+ const latestPresentation = state.current.presentation;
2695
+ const updatedPresentation = updateTrackInPresentation(latestPresentation, mediaTrack);
2696
+ state.patch({ presentation: updatedPresentation });
2697
+ }, { id: track.id }));
2698
+ });
2699
+ return () => {
2700
+ runner.abortAll();
2701
+ cleanup();
2702
+ };
2703
+ }
2704
+ /**
2705
+ * Pick text track to activate.
2706
+ *
2707
+ * Selection priority (if enabled):
2708
+ * 1. User preference (preferredSubtitleLanguage)
2709
+ * 2. DEFAULT track (if enableDefaultTrack is true and track has DEFAULT=YES + AUTOSELECT=YES)
2710
+ * 3. No auto-selection (user opt-in)
2711
+ *
2712
+ * By default, FORCED tracks are excluded per Apple's HLS spec.
2713
+ *
2714
+ * @param presentation - Presentation with text tracks
2715
+ * @param config - Selection configuration
2716
+ * @returns Track ID or undefined (no auto-selection)
2717
+ */
2718
+ function pickTextTrack(presentation, config) {
2719
+ const textSet = presentation.selectionSets.find((set) => set.type === "text");
2720
+ if (!textSet?.switchingSets?.[0]?.tracks.length) return void 0;
2721
+ const tracks = textSet.switchingSets[0].tracks;
2722
+ const availableTracks = config.includeForcedTracks ? tracks : tracks.filter((track) => !track.forced);
2723
+ if (availableTracks.length === 0) return void 0;
2724
+ const { preferredSubtitleLanguage, enableDefaultTrack = false } = config;
2725
+ if (preferredSubtitleLanguage) {
2726
+ const languageMatch = availableTracks.find((track) => track.language === preferredSubtitleLanguage);
2727
+ if (languageMatch) return languageMatch.id;
2728
+ }
2729
+ if (enableDefaultTrack) {
2730
+ const defaultTrack = availableTracks.find((track) => track.default === true);
2731
+ if (defaultTrack) return defaultTrack.id;
2732
+ }
2733
+ }
2734
+ /**
2735
+ * Check if we can select a track of the given type.
2736
+ *
2737
+ * Returns true when:
2738
+ * - Presentation exists
2739
+ * - Has tracks of the specified type
2740
+ *
2741
+ * Generic over track type - works for video, audio, or text.
2742
+ */
2743
+ function canSelectTrack(state, config) {
2744
+ return !!state?.presentation?.selectionSets?.find(({ type }) => type === config.type)?.switchingSets?.[0]?.tracks.length;
2745
+ }
2746
+ /**
2747
+ * Check if we should select a track of the given type.
2748
+ *
2749
+ * Returns true when:
2750
+ * - Track of this type is not already selected
2751
+ *
2752
+ * Generic over track type - works for video, audio, or text.
2753
+ *
2754
+ * @TODO figure out reactive model for ABR cases - right now we're only selecting
2755
+ * if we have nothing selected (CJP)
2756
+ */
2757
+ function shouldSelectTrack(state, config) {
2758
+ return !state[SelectedTrackIdKeyByType[config.type]];
2759
+ }
2760
+ /**
2761
+ * Select video track orchestration.
2762
+ *
2763
+ * Selects video track when:
2764
+ * - Presentation exists
2765
+ * - No video track is selected yet
2766
+ *
2767
+ * Uses bandwidth-based quality selection algorithm.
2768
+ *
2769
+ * @example
2770
+ * const cleanup = selectVideoTrack(
2771
+ * { state, owners, events },
2772
+ * { initialBandwidth: 2_000_000 }
2773
+ * );
2774
+ */
2775
+ function selectVideoTrack({ state }, config = { type: "video" }) {
2776
+ let selecting = false;
2777
+ return state.subscribe(async (currentState) => {
2778
+ if (!canSelectTrack(currentState, config) || !shouldSelectTrack(currentState, config) || selecting) return;
2779
+ try {
2780
+ selecting = true;
2781
+ const selectedTrackId = currentState.presentation?.selectionSets.find(({ type }) => type === config.type)?.switchingSets[0]?.tracks[0]?.id;
2782
+ if (selectedTrackId) {
2783
+ const selectedTrackKey = SelectedTrackIdKeyByType[config.type];
2784
+ state.patch({ [selectedTrackKey]: selectedTrackId });
2785
+ }
2786
+ } finally {
2787
+ selecting = false;
2788
+ }
2789
+ });
2790
+ }
2791
+ /**
2792
+ * Select audio track orchestration.
2793
+ *
2794
+ * Selects audio track when:
2795
+ * - Presentation exists
2796
+ * - No audio track is selected yet
2797
+ *
2798
+ * Uses language and preference-based selection.
2799
+ *
2800
+ * @example
2801
+ * const cleanup = selectAudioTrack(
2802
+ * { state, owners, events },
2803
+ * { preferredAudioLanguage: 'en' }
2804
+ * );
2805
+ */
2806
+ function selectAudioTrack({ state }, config = { type: "audio" }) {
2807
+ let selecting = false;
2808
+ return state.subscribe(async (currentState) => {
2809
+ if (!canSelectTrack(currentState, config) || !shouldSelectTrack(currentState, config) || selecting) return;
2810
+ try {
2811
+ selecting = true;
2812
+ const selectedTrackId = currentState.presentation?.selectionSets.find(({ type }) => type === "audio")?.switchingSets[0]?.tracks[0]?.id;
2813
+ if (selectedTrackId) state.patch({ selectedAudioTrackId: selectedTrackId });
2814
+ } finally {
2815
+ selecting = false;
2816
+ }
2817
+ });
2818
+ }
2819
+ /**
2820
+ * Select text track orchestration.
2821
+ *
2822
+ * Selects text track when:
2823
+ * - Presentation exists
2824
+ * - No text track is selected yet
2825
+ *
2826
+ * Note: Currently does not auto-select (user opt-in).
2827
+ *
2828
+ * @example
2829
+ * const cleanup = selectTextTrack({ state, owners, events }, {});
2830
+ */
2831
+ function selectTextTrack({ state }, config = { type: "text" }) {
2832
+ let selecting = false;
2833
+ return state.subscribe(async (currentState) => {
2834
+ if (!canSelectTrack(currentState, config) || !shouldSelectTrack(currentState, config) || selecting) return;
2835
+ try {
2836
+ selecting = true;
2837
+ const selectedTextTrackId = pickTextTrack(currentState.presentation, config);
2838
+ if (selectedTextTrackId) state.patch({ selectedTextTrackId });
2839
+ } finally {
2840
+ selecting = false;
2841
+ }
2842
+ });
2843
+ }
2844
+ /**
2845
+ * Check if the last segment of a track has been appended to a SourceBuffer.
2846
+ *
2847
+ * Checks by segment ID rather than a pipeline flag, so it is robust across
2848
+ * quality switches (different tracks have different segment IDs) and
2849
+ * back-buffer flushes (flushed segment IDs are removed from the model).
2850
+ */
2851
+ function isLastSegmentAppended(segments, actor) {
2852
+ if (segments.length === 0) return true;
2853
+ const lastSeg = segments[segments.length - 1];
2854
+ if (!lastSeg) return false;
2855
+ return actor?.snapshot.context.segments.some((s) => s.id === lastSeg.id) ?? false;
2856
+ }
2857
+ /**
2858
+ * Check if the last segment has been appended for each selected track.
2859
+ *
2860
+ * Handles video-only, audio-only, and video+audio scenarios.
2861
+ * A track with no segments (e.g. unresolved) is considered not ready.
2862
+ */
2863
+ function hasLastSegmentLoaded(state, owners) {
2864
+ const videoTrack = state.selectedVideoTrackId ? getSelectedTrack(state, "video") : void 0;
2865
+ const audioTrack = state.selectedAudioTrackId ? getSelectedTrack(state, "audio") : void 0;
2866
+ if (videoTrack && !isResolvedTrack(videoTrack)) return false;
2867
+ if (audioTrack && !isResolvedTrack(audioTrack)) return false;
2868
+ if (videoTrack && isResolvedTrack(videoTrack)) {
2869
+ if (!isLastSegmentAppended(videoTrack.segments, owners.videoBufferActor)) return false;
2870
+ }
2871
+ if (audioTrack && isResolvedTrack(audioTrack)) {
2872
+ if (!isLastSegmentAppended(audioTrack.segments, owners.audioBufferActor)) return false;
2873
+ }
2874
+ return true;
2875
+ }
2876
+ /**
2877
+ * Check if we can call endOfStream.
2878
+ */
2879
+ function canEndStream(state, owners) {
2880
+ return !!(owners.mediaSource && state.presentation);
2881
+ }
2882
+ /**
2883
+ * Check if we should call endOfStream.
2884
+ */
2885
+ function shouldEndStream(state, owners) {
2886
+ if (!canEndStream(state, owners)) return false;
2887
+ const { mediaSource, mediaElement } = owners;
2888
+ if (mediaSource.readyState !== "open") return false;
2889
+ if (mediaElement && mediaElement.readyState < HTMLMediaElement.HAVE_METADATA) return false;
2890
+ const hasVideoTrack = !!state.selectedVideoTrackId;
2891
+ const hasAudioTrack = !!state.selectedAudioTrackId;
2892
+ if (hasVideoTrack && !owners.videoBuffer) return false;
2893
+ if (hasAudioTrack && !owners.audioBuffer) return false;
2894
+ if (owners.videoBufferActor?.snapshot.status === "updating") return false;
2895
+ if (owners.audioBufferActor?.snapshot.status === "updating") return false;
2896
+ if (!hasLastSegmentLoaded(state, owners)) return false;
2897
+ if (mediaElement) {
2898
+ const videoTrack = hasVideoTrack ? getSelectedTrack(state, "video") : void 0;
2899
+ const audioTrack = hasAudioTrack ? getSelectedTrack(state, "audio") : void 0;
2900
+ const refTrack = videoTrack && isResolvedTrack(videoTrack) ? videoTrack : audioTrack && isResolvedTrack(audioTrack) ? audioTrack : void 0;
2901
+ if (refTrack && refTrack.segments.length > 0) {
2902
+ const lastSeg = refTrack.segments[refTrack.segments.length - 1];
2903
+ if (mediaElement.currentTime < lastSeg.startTime) return false;
2904
+ }
2905
+ }
2906
+ return true;
2907
+ }
2908
+ /**
2909
+ * Wait for all currently-updating SourceBufferActors to finish.
2910
+ * Uses actor status rather than raw SourceBuffer.updating so the wait is
2911
+ * aligned with the same abstraction that owns all buffer operations.
2912
+ */
2913
+ function waitForSourceBuffersReady$1(owners) {
2914
+ const updatingActors = [owners.videoBufferActor, owners.audioBufferActor].filter((actor) => actor !== void 0 && actor.snapshot.status === "updating");
2915
+ if (updatingActors.length === 0) return Promise.resolve();
2916
+ return Promise.all(updatingActors.map((actor) => new Promise((resolve) => {
2917
+ const unsub = actor.subscribe((snapshot) => {
2918
+ if (snapshot.status !== "updating") {
2919
+ unsub();
2920
+ resolve();
2921
+ }
2922
+ });
2923
+ }))).then(() => void 0);
2924
+ }
2925
+ /**
2926
+ * Get the highest buffered end time across all active SourceBuffers.
2927
+ * Used to set the final duration from actual container timestamps rather
2928
+ * than playlist metadata, which handles both shorter and longer cases.
2929
+ */
2930
+ function getMaxBufferedEnd$1(owners) {
2931
+ let max = 0;
2932
+ for (const buf of [owners.videoBuffer, owners.audioBuffer]) if (buf && buf.buffered.length > 0) {
2933
+ const end = buf.buffered.end(buf.buffered.length - 1);
2934
+ if (end > max) max = end;
2935
+ }
2936
+ return max;
2937
+ }
2938
+ /**
2939
+ * End of stream task (module-level, pure).
2940
+ * Sets the final duration from actual buffered end time, then calls endOfStream().
2941
+ */
2942
+ const endOfStreamTask = async ({ currentOwners }, _context) => {
2943
+ const { mediaSource } = currentOwners;
2944
+ if (mediaSource.readyState === "ended") return;
2945
+ await waitForSourceBuffersReady$1(currentOwners);
2946
+ if (mediaSource.readyState !== "open") return;
2947
+ const bufferedEnd = getMaxBufferedEnd$1(currentOwners);
2948
+ if (bufferedEnd > 0) mediaSource.duration = bufferedEnd;
2949
+ mediaSource.endOfStream();
2950
+ await new Promise((resolve) => requestAnimationFrame(resolve));
2951
+ };
2952
+ /**
2953
+ * Call endOfStream when the last segment has been appended.
2954
+ * This signals to the browser that the stream is complete.
2955
+ *
2956
+ * Per the MSE spec, appendBuffer() remains valid after endOfStream() —
2957
+ * seeks that require re-appending earlier segments will still work.
2958
+ * What becomes blocked is calling endOfStream() again, addSourceBuffer(),
2959
+ * and MediaSource.duration updates.
2960
+ */
2961
+ function endOfStream({ state, owners }) {
2962
+ let hasEnded = false;
2963
+ let destroyed = false;
2964
+ const activeActorUnsubs = [];
2965
+ const runEvaluate = async () => {
2966
+ if (destroyed) return;
2967
+ const currentState = state.current;
2968
+ const currentOwners = owners.current;
2969
+ if (hasEnded) {
2970
+ if (currentOwners.mediaSource?.readyState !== "open") return;
2971
+ hasEnded = false;
2972
+ }
2973
+ if (!shouldEndStream(currentState, currentOwners)) return;
2974
+ hasEnded = true;
2975
+ try {
2976
+ await endOfStreamTask({ currentOwners }, {});
2977
+ } catch (error) {
2978
+ console.error("Failed to call endOfStream:", error);
2979
+ }
2980
+ };
2981
+ const cleanupOwners = owners.subscribe((currentOwners) => {
2982
+ activeActorUnsubs.forEach((u) => u());
2983
+ activeActorUnsubs.length = 0;
2984
+ for (const actor of [currentOwners.videoBufferActor, currentOwners.audioBufferActor]) {
2985
+ if (!actor) continue;
2986
+ let isFirst = true;
2987
+ activeActorUnsubs.push(actor.subscribe(() => {
2988
+ if (isFirst) {
2989
+ isFirst = false;
2990
+ return;
2991
+ }
2992
+ runEvaluate();
2993
+ }));
2994
+ }
2995
+ });
2996
+ const cleanupCombineLatest = combineLatest([state, owners]).subscribe(async () => runEvaluate());
2997
+ return () => {
2998
+ destroyed = true;
2999
+ activeActorUnsubs.forEach((u) => u());
3000
+ cleanupOwners();
3001
+ cleanupCombineLatest();
3002
+ };
3003
+ }
3004
+ /**
3005
+ * Check if we have the minimum requirements to create MediaSource.
3006
+ */
3007
+ function canSetup(state, owners) {
3008
+ return !isNil(owners.mediaElement) && !isNil(state.presentation?.url);
3009
+ }
3010
+ /**
3011
+ * Check if we should proceed with MediaSource creation.
3012
+ * Placeholder for future conditions (e.g., checking if already created).
3013
+ */
3014
+ function shouldSetup(_state, owners) {
3015
+ return isNil(owners.mediaSource);
3016
+ }
3017
+ /**
3018
+ * Setup MediaSource orchestration.
3019
+ *
3020
+ * Creates and attaches MediaSource when:
3021
+ * - mediaElement exists in owners
3022
+ * - presentation.url exists in state
3023
+ *
3024
+ * Updates owners.mediaSource after successful setup.
3025
+ */
3026
+ function setupMediaSource({ state, owners }) {
3027
+ let settingUp = false;
3028
+ let abortController = null;
3029
+ const unsubscribe = combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
3030
+ if (!canSetup(currentState, currentOwners) || !shouldSetup(currentState, currentOwners) || settingUp) return;
3031
+ try {
3032
+ settingUp = true;
3033
+ abortController = new AbortController();
3034
+ const mediaSource = createMediaSource({ preferManaged: true });
3035
+ attachMediaSource(mediaSource, currentOwners.mediaElement);
3036
+ await waitForSourceOpen(mediaSource, abortController.signal);
3037
+ owners.patch({ mediaSource });
3038
+ } catch (error) {
3039
+ if (error instanceof DOMException && error.name === "AbortError") return;
3040
+ throw error;
3041
+ } finally {
3042
+ settingUp = false;
3043
+ }
3044
+ });
3045
+ return () => {
3046
+ abortController?.abort();
3047
+ unsubscribe();
3048
+ };
3049
+ }
3050
+ /**
3051
+ * Thrown when a message is sent to the actor in a state that does not
3052
+ * accept messages (currently: 'updating').
3053
+ */
3054
+ var SourceBufferActorError = class extends Error {
3055
+ constructor(message) {
3056
+ super(message);
3057
+ this.name = "SourceBufferActorError";
3058
+ }
3059
+ };
3060
+ function snapshotBuffered(buffered) {
3061
+ const ranges = [];
3062
+ for (let i = 0; i < buffered.length; i++) ranges.push({
3063
+ start: buffered.start(i),
3064
+ end: buffered.end(i)
3065
+ });
3066
+ return ranges;
3067
+ }
3068
+ function appendInitTask(message, { signal, getCtx, sourceBuffer }) {
3069
+ return new Task(async (taskSignal) => {
3070
+ const ctx = getCtx();
3071
+ if (taskSignal.aborted) return ctx;
3072
+ await appendSegment(sourceBuffer, message.data);
3073
+ return {
3074
+ ...ctx,
3075
+ initTrackId: message.meta.trackId
3076
+ };
3077
+ }, { signal });
3078
+ }
3079
+ function appendSegmentTask(message, { signal, getCtx, sourceBuffer }) {
3080
+ return new Task(async (taskSignal) => {
3081
+ const ctx = getCtx();
3082
+ if (taskSignal.aborted) return ctx;
3083
+ await appendSegment(sourceBuffer, message.data);
3084
+ const { meta } = message;
3085
+ const EPSILON = 1e-4;
3086
+ const filtered = ctx.segments.filter((s) => Math.abs(s.startTime - meta.startTime) >= EPSILON);
3087
+ return {
3088
+ ...ctx,
3089
+ segments: [...filtered, {
3090
+ id: meta.id,
3091
+ startTime: meta.startTime,
3092
+ duration: meta.duration,
3093
+ trackId: meta.trackId,
3094
+ ...meta.trackBandwidth !== void 0 && { trackBandwidth: meta.trackBandwidth }
3095
+ }],
3096
+ bufferedRanges: snapshotBuffered(sourceBuffer.buffered)
3097
+ };
3098
+ }, { signal });
3099
+ }
3100
+ function removeTask(message, { signal, getCtx, sourceBuffer }) {
3101
+ return new Task(async (taskSignal) => {
3102
+ const ctx = getCtx();
3103
+ if (taskSignal.aborted) return ctx;
3104
+ await flushBuffer(sourceBuffer, message.start, message.end);
3105
+ const bufferedRanges = snapshotBuffered(sourceBuffer.buffered);
3106
+ const filtered = ctx.segments.filter((s) => {
3107
+ const midpoint = s.startTime + s.duration / 2;
3108
+ return bufferedRanges.some((r) => midpoint >= r.start && midpoint < r.end);
3109
+ });
3110
+ return {
3111
+ ...ctx,
3112
+ segments: filtered,
3113
+ bufferedRanges
3114
+ };
3115
+ }, { signal });
3116
+ }
3117
+ const messageTaskFactories = {
3118
+ "append-init": appendInitTask,
3119
+ "append-segment": appendSegmentTask,
3120
+ remove: removeTask
3121
+ };
3122
+ function messageToTask(message, options) {
3123
+ const factory = messageTaskFactories[message.type];
3124
+ return factory(message, options);
3125
+ }
3126
+ function createSourceBufferActor(sourceBuffer, initialContext) {
3127
+ const state = createState({
3128
+ status: "idle",
3129
+ context: {
3130
+ segments: [],
3131
+ bufferedRanges: [],
3132
+ initTrackId: void 0,
3133
+ ...initialContext
3134
+ }
3135
+ });
3136
+ const runner = new SerialRunner();
3137
+ function applyResult(newContext) {
3138
+ const status = state.current.status === "destroyed" ? "destroyed" : "idle";
3139
+ state.patch({
3140
+ status,
3141
+ context: newContext
3142
+ });
3143
+ state.flush();
3144
+ }
3145
+ function handleError(e) {
3146
+ const status = state.current.status === "destroyed" ? "destroyed" : "idle";
3147
+ state.patch({ status });
3148
+ state.flush();
3149
+ throw e;
3150
+ }
3151
+ return {
3152
+ get snapshot() {
3153
+ return state.current;
3154
+ },
3155
+ subscribe(listener) {
3156
+ return state.subscribe(listener);
3157
+ },
3158
+ send(message, signal) {
3159
+ if (state.current.status !== "idle") return Promise.reject(new SourceBufferActorError(`send() called while actor is ${state.current.status}`));
3160
+ state.patch({ status: "updating" });
3161
+ const task = messageToTask(message, {
3162
+ signal,
3163
+ getCtx: () => state.current.context,
3164
+ sourceBuffer
3165
+ });
3166
+ return runner.schedule(task).then(applyResult).catch(handleError);
3167
+ },
3168
+ batch(messages, signal) {
3169
+ if (state.current.status !== "idle") return Promise.reject(new SourceBufferActorError(`batch() called while actor is ${state.current.status}`));
3170
+ if (messages.length === 0) return Promise.resolve();
3171
+ state.patch({ status: "updating" });
3172
+ let workingCtx = state.current.context;
3173
+ for (const message of messages.slice(0, -1)) {
3174
+ const task = messageToTask(message, {
3175
+ signal,
3176
+ getCtx: () => workingCtx,
3177
+ sourceBuffer
3178
+ });
3179
+ runner.schedule(task).then((newCtx) => {
3180
+ workingCtx = newCtx;
3181
+ });
3182
+ }
3183
+ const lastTask = messageToTask(messages[messages.length - 1], {
3184
+ signal,
3185
+ getCtx: () => workingCtx,
3186
+ sourceBuffer
3187
+ });
3188
+ return runner.schedule(lastTask).then(applyResult).catch(handleError);
3189
+ },
3190
+ destroy() {
3191
+ state.patch({ status: "destroyed" });
3192
+ state.flush();
3193
+ runner.destroy();
3194
+ }
3195
+ };
3196
+ }
3197
+ /** Map track type to SourceBufferActor owner property key. */
3198
+ const ActorKeyByType = {
3199
+ video: "videoBufferActor",
3200
+ audio: "audioBufferActor"
3201
+ };
3202
+ /**
3203
+ * Setup SourceBuffer task (module-level, pure).
3204
+ * Creates SourceBuffer for resolved track and waits a frame before completing.
3205
+ */
3206
+ const setupSourceBufferTask = async ({ currentState, currentOwners }, context) => {
3207
+ const track = getSelectedTrack(currentState, context.config.type);
3208
+ if (!track || !isResolvedTrack(track)) return;
3209
+ if (!track.codecs || track.codecs.length === 0) return;
3210
+ const mimeCodec = buildMimeCodec(track);
3211
+ const buffer = createSourceBuffer(currentOwners.mediaSource, mimeCodec);
3212
+ const actor = createSourceBufferActor(buffer);
3213
+ const bufferKey = BufferKeyByType[context.config.type];
3214
+ const actorKey = ActorKeyByType[context.config.type];
3215
+ context.owners.patch({
3216
+ [bufferKey]: buffer,
3217
+ [actorKey]: actor
3218
+ });
3219
+ await new Promise((resolve) => requestAnimationFrame(resolve));
3220
+ };
3221
+ /**
3222
+ * Build MIME codec string from track metadata.
3223
+ *
3224
+ * @param track - Resolved track with mimeType and codecs
3225
+ * @returns MIME codec string (e.g., 'video/mp4; codecs="avc1.42E01E,mp4a.40.2"')
3226
+ *
3227
+ * @example
3228
+ * buildMimeCodec({ mimeType: 'video/mp4', codecs: ['avc1.42E01E'] })
3229
+ * // => 'video/mp4; codecs="avc1.42E01E"'
3230
+ */
3231
+ function buildMimeCodec(track) {
3232
+ const codecString = track.codecs?.join(",") ?? "";
3233
+ return `${track.mimeType}; codecs="${codecString}"`;
3234
+ }
3235
+ /**
3236
+ * Check if we can setup SourceBuffer for track type.
3237
+ *
3238
+ * Requires:
3239
+ * - MediaSource exists in owners
3240
+ * - Track is selected
3241
+ *
3242
+ * Note: We don't check mediaSource.readyState because owners holds references
3243
+ * to mutable objects. Changes to properties on those objects won't trigger
3244
+ * observations. Instead, setupMediaSource only patches owners.mediaSource after
3245
+ * it's already open, so if it exists in owners, it's ready to use.
3246
+ *
3247
+ * Note: Track does not need to be resolved yet. The orchestration will wait
3248
+ * for the track to be resolved (via resolveTrack) before creating the SourceBuffer.
3249
+ */
3250
+ function canSetupBuffer(state, owners, type) {
3251
+ if (!owners.mediaSource) return false;
3252
+ if (!getSelectedTrack(state, type)) return false;
3253
+ return true;
3254
+ }
3255
+ /**
3256
+ * Check if we should create SourceBuffer (not already created).
3257
+ */
3258
+ function shouldSetupBuffer(owners, type) {
3259
+ const bufferKey = BufferKeyByType[type];
3260
+ return isUndefined(owners[bufferKey]);
3261
+ }
3262
+ /**
3263
+ * Setup SourceBuffer orchestration.
3264
+ *
3265
+ * Triggers when:
3266
+ * - MediaSource exists and is in 'open' state
3267
+ * - Track is selected (same condition as resolveTrack)
3268
+ *
3269
+ * Creates SourceBuffer when track becomes resolved with codecs.
3270
+ * This allows setupSourceBuffer to run in parallel with resolveTrack.
3271
+ *
3272
+ * Note: Text tracks don't use SourceBuffers and should be handled separately.
3273
+ *
3274
+ * Generic over track type - create one orchestration per track type:
3275
+ * @example
3276
+ * const videoCleanup = setupSourceBuffer({ state, owners }, { type: 'video' });
3277
+ * const audioCleanup = setupSourceBuffer({ state, owners }, { type: 'audio' });
3278
+ */
3279
+ function setupSourceBuffer({ state, owners }, config) {
3280
+ let currentTask = null;
3281
+ return combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
3282
+ if (currentTask) return;
3283
+ if (!canSetupBuffer(currentState, currentOwners, config.type) || !shouldSetupBuffer(currentOwners, config.type)) return;
3284
+ currentTask = setupSourceBufferTask({
3285
+ currentState,
3286
+ currentOwners
3287
+ }, {
3288
+ owners,
3289
+ config
3290
+ });
3291
+ try {
3292
+ await currentTask;
3293
+ } finally {
3294
+ currentTask = null;
3295
+ }
3296
+ });
3297
+ }
3298
+ /**
3299
+ * Get all text tracks from presentation.
3300
+ */
3301
+ function getTextTracks(presentation) {
3302
+ if (!presentation?.selectionSets) return [];
3303
+ const textSet = presentation.selectionSets.find((set) => set.type === "text");
3304
+ if (!textSet?.switchingSets?.[0]?.tracks) return [];
3305
+ return textSet.switchingSets[0].tracks;
3306
+ }
3307
+ /**
3308
+ * Check if we can setup text tracks.
3309
+ *
3310
+ * Requires:
3311
+ * - mediaElement exists
3312
+ * - presentation has text tracks to setup
3313
+ */
3314
+ function canSetupTextTracks(state, owners) {
3315
+ return !!owners.mediaElement && !!getTextTracks(state.presentation).length;
3316
+ }
3317
+ /**
3318
+ * Check if we should setup text tracks (not already set up).
3319
+ */
3320
+ function shouldSetupTextTracks(owners) {
3321
+ return !owners.textTracks;
3322
+ }
3323
+ /**
3324
+ * Create a track element for a text track.
3325
+ *
3326
+ * Note: We use DOM <track> elements instead of the TextTrack JS API
3327
+ * because there's no way to remove TextTracks added via addTextTrack().
3328
+ */
3329
+ function createTrackElement(track) {
3330
+ const trackElement = document.createElement("track");
3331
+ trackElement.id = track.id;
3332
+ trackElement.kind = track.kind;
3333
+ trackElement.label = track.label;
3334
+ if (track.language) trackElement.srclang = track.language;
3335
+ if (track.default) trackElement.default = true;
3336
+ trackElement.src = track.url;
3337
+ return trackElement;
3338
+ }
3339
+ /**
3340
+ * Setup text tracks orchestration.
3341
+ *
3342
+ * Triggers when:
3343
+ * - mediaElement exists
3344
+ * - presentation is resolved (has text tracks)
3345
+ *
3346
+ * Creates <track> elements for all text tracks and adds them as children
3347
+ * to the media element. This allows the browser's native text track rendering.
3348
+ *
3349
+ * Note: Uses DOM track elements instead of TextTrack API because tracks
3350
+ * added via addTextTrack() cannot be removed.
3351
+ *
3352
+ * @example
3353
+ * const cleanup = setupTextTracks({ state, owners });
3354
+ */
3355
+ function setupTextTracks({ state, owners }) {
3356
+ let hasSetup = false;
3357
+ let createdTracks = [];
3358
+ const unsubscribe = combineLatest([state, owners]).subscribe(([s, o]) => {
3359
+ if (hasSetup) return;
3360
+ if (!canSetupTextTracks(s, o) || !shouldSetupTextTracks(o)) return;
3361
+ hasSetup = true;
3362
+ const textTracks = getTextTracks(s.presentation);
3363
+ if (textTracks.length === 0) return;
3364
+ const trackMap = /* @__PURE__ */ new Map();
3365
+ for (const track of textTracks) {
3366
+ const trackElement = createTrackElement(track);
3367
+ o.mediaElement.appendChild(trackElement);
3368
+ trackMap.set(track.id, trackElement);
3369
+ createdTracks.push(trackElement);
3370
+ }
3371
+ owners.patch({ textTracks: trackMap });
3372
+ });
3373
+ return () => {
3374
+ for (const trackElement of createdTracks) trackElement.remove();
3375
+ createdTracks = [];
3376
+ unsubscribe();
3377
+ };
3378
+ }
3379
+ /**
3380
+ * Sync selectedTextTrackId from DOM text track mode changes.
3381
+ *
3382
+ * Listens to the `change` event on `media.textTracks` and updates
3383
+ * `selectedTextTrackId` when external code (e.g. the captions button via
3384
+ * `toggleSubtitles()`) changes a subtitle/caption track mode to 'showing'.
3385
+ *
3386
+ * This bridges the core store's `toggleSubtitles()` with SPF's reactive text
3387
+ * track pipeline (`syncTextTrackModes`, `loadTextTrackCues`). Without this
3388
+ * bridge, direct DOM mode changes would be immediately overridden by
3389
+ * `syncTextTrackModes` on the next SPF state update.
3390
+ *
3391
+ * When a subtitle/caption track's mode is 'showing', its DOM `id` — which
3392
+ * matches the SPF track ID set by `setupTextTracks` — is written to
3393
+ * `selectedTextTrackId`. When no subtitle/caption track is 'showing',
3394
+ * `selectedTextTrackId` is cleared along with the deselected track's
3395
+ * `textBufferState` entry — setting mode to 'disabled' clears native cues from
3396
+ * the track element, so the buffer must be reset to re-fetch cues on re-enable.
3397
+ *
3398
+ * @example
3399
+ * const cleanup = syncSelectedTextTrackFromDom({ state, owners });
3400
+ */
3401
+ function syncSelectedTextTrackFromDom({ state, owners }) {
3402
+ let lastMediaElement;
3403
+ let removeListener = null;
3404
+ const unsubscribe = owners.subscribe((currentOwners) => {
3405
+ const { mediaElement } = currentOwners;
3406
+ if (mediaElement === lastMediaElement) return;
3407
+ removeListener?.();
3408
+ removeListener = null;
3409
+ lastMediaElement = mediaElement;
3410
+ if (!mediaElement) return;
3411
+ const sync = () => {
3412
+ const newId = Array.from(mediaElement.textTracks).find((t) => t.mode === "showing" && (t.kind === "subtitles" || t.kind === "captions"))?.id || void 0;
3413
+ const current = state.current;
3414
+ if (current.selectedTextTrackId === newId) return;
3415
+ if (newId) state.patch({ selectedTextTrackId: newId });
3416
+ else {
3417
+ const prevId = current.selectedTextTrackId;
3418
+ if (prevId && current.textBufferState?.[prevId]) {
3419
+ const next = { ...current.textBufferState };
3420
+ delete next[prevId];
3421
+ state.patch({
3422
+ selectedTextTrackId: void 0,
3423
+ textBufferState: next
3424
+ });
3425
+ } else state.patch({ selectedTextTrackId: void 0 });
3426
+ }
3427
+ };
3428
+ removeListener = listen(mediaElement.textTracks, "change", sync);
3429
+ });
3430
+ return () => {
3431
+ removeListener?.();
3432
+ unsubscribe();
3433
+ };
3434
+ }
3435
+ /**
3436
+ * Check if we can sync text track modes.
3437
+ *
3438
+ * Requires:
3439
+ * - textTracks map exists (track elements created)
3440
+ */
3441
+ function canSyncTextTrackModes(owners) {
3442
+ return !!owners.textTracks && owners.textTracks.size > 0;
3443
+ }
3444
+ /**
3445
+ * Sync text track modes orchestration.
3446
+ *
3447
+ * Manages track element modes based on selectedTextTrackId:
3448
+ * - Selected track: mode = "showing"
3449
+ * - Other tracks: mode = "hidden"
3450
+ * - No selection: all tracks mode = "hidden"
3451
+ *
3452
+ * Note: Uses "hidden" instead of "disabled" for non-selected tracks
3453
+ * so they remain available in the browser's track menu.
3454
+ *
3455
+ * @example
3456
+ * const cleanup = syncTextTrackModes({ state, owners });
3457
+ */
3458
+ function syncTextTrackModes({ state, owners }) {
3459
+ return combineLatest([state, owners]).subscribe(([s, o]) => {
3460
+ if (!canSyncTextTrackModes(o)) return;
3461
+ const selectedId = s.selectedTextTrackId;
3462
+ for (const [trackId, trackElement] of o.textTracks) if (trackId === selectedId) trackElement.track.mode = "showing";
3463
+ else trackElement.track.mode = "hidden";
3464
+ });
3465
+ }
3466
+ /**
3467
+ * Check if we can update MediaSource duration (have required data).
3468
+ */
3469
+ function canUpdateDuration(state, owners) {
3470
+ return !!(owners.mediaSource && state.presentation && hasPresentationDuration(state.presentation));
3471
+ }
3472
+ /**
3473
+ * Get the maximum buffered end time across all SourceBuffers.
3474
+ */
3475
+ function getMaxBufferedEnd(owners) {
3476
+ let maxEnd = 0;
3477
+ const buffers = [owners.videoSourceBuffer, owners.audioSourceBuffer].filter((buf) => buf !== void 0);
3478
+ for (const buffer of buffers) {
3479
+ const { buffered } = buffer;
3480
+ if (buffered.length > 0) {
3481
+ const end = buffered.end(buffered.length - 1);
3482
+ if (end > maxEnd) maxEnd = end;
3483
+ }
3484
+ }
3485
+ return maxEnd;
3486
+ }
3487
+ /**
3488
+ * Check if we should update MediaSource duration (conditions met).
3489
+ */
3490
+ function shouldUpdateDuration(state, owners) {
3491
+ if (!canUpdateDuration(state, owners)) return false;
3492
+ const { mediaSource } = owners;
3493
+ const { presentation } = state;
3494
+ if (mediaSource.readyState !== "open") return false;
3495
+ const duration = presentation.duration;
3496
+ if (!Number.isFinite(duration) || Number.isNaN(duration) || duration <= 0) return false;
3497
+ return Number.isNaN(mediaSource.duration);
3498
+ }
3499
+ /**
3500
+ * Wait for all currently-updating SourceBuffers to finish.
3501
+ *
3502
+ * The MSE spec forbids setting MediaSource.duration while any attached
3503
+ * SourceBuffer has updating === true. This defers until all are idle.
3504
+ */
3505
+ function waitForSourceBuffersReady(owners) {
3506
+ const updating = [owners.videoSourceBuffer, owners.audioSourceBuffer].filter((buf) => buf?.updating === true);
3507
+ if (updating.length === 0) return Promise.resolve();
3508
+ return Promise.all(updating.map((buf) => new Promise((resolve) => buf.addEventListener("updateend", () => resolve(), { once: true })))).then(() => void 0);
3509
+ }
3510
+ /**
3511
+ * Update MediaSource duration when presentation duration becomes available.
3512
+ */
3513
+ function updateDuration({ state, owners }) {
3514
+ let destroyed = false;
3515
+ const unsubscribe = combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
3516
+ if (!shouldUpdateDuration(currentState, currentOwners)) return;
3517
+ const { mediaSource } = currentOwners;
3518
+ await waitForSourceBuffersReady(currentOwners);
3519
+ if (destroyed || mediaSource.readyState !== "open") return;
3520
+ let duration = currentState.presentation.duration;
3521
+ const maxBufferedEnd = getMaxBufferedEnd(currentOwners);
3522
+ if (maxBufferedEnd > duration) duration = maxBufferedEnd;
3523
+ mediaSource.duration = duration;
3524
+ });
3525
+ return () => {
3526
+ destroyed = true;
3527
+ unsubscribe();
3528
+ };
3529
+ }
3530
+ /**
3531
+ * Create a POC playback engine.
3532
+ *
3533
+ * Wires together all orchestrations to create a reactive playback pipeline:
3534
+ * 1. Resolve presentation (multivariant playlist)
3535
+ * 2. Select initial video and audio tracks
3536
+ * 3. Resolve selected tracks (media playlists)
3537
+ * 4. Setup MediaSource
3538
+ * 5. Setup SourceBuffers for video and audio
3539
+ *
3540
+ * Note: This is a POC - does not yet load/append segments.
3541
+ *
3542
+ * @param config - Playback engine configuration
3543
+ * @returns Playback engine instance with state, owners, and destroy function
3544
+ *
3545
+ * @example
3546
+ * const engine = createPlaybackEngine({
3547
+ * initialBandwidth: 2_000_000,
3548
+ * preferredAudioLanguage: 'en',
3549
+ * });
3550
+ *
3551
+ * // Initialize by patching state and owners
3552
+ * engine.owners.patch({ mediaElement: document.querySelector('video') });
3553
+ * engine.state.patch({
3554
+ * presentation: { url: 'https://example.com/playlist.m3u8' },
3555
+ * preload: 'auto',
3556
+ * });
3557
+ *
3558
+ * // Inspect state
3559
+ * console.log(engine.state.current);
3560
+ *
3561
+ * // Cleanup
3562
+ * engine.destroy();
3563
+ */
3564
+ function createPlaybackEngine(config = {}) {
3565
+ const state = createState({ bandwidthState: {
3566
+ fastEstimate: 0,
3567
+ fastTotalWeight: 0,
3568
+ slowEstimate: 0,
3569
+ slowTotalWeight: 0,
3570
+ bytesSampled: 0
3571
+ } });
3572
+ const owners = createState({});
3573
+ const events = createEventStream();
3574
+ const cleanups = [
3575
+ syncPreloadAttribute(state, owners),
3576
+ trackPlaybackInitiated({
3577
+ state,
3578
+ owners,
3579
+ events
3580
+ }),
3581
+ resolvePresentation({
3582
+ state,
3583
+ events
3584
+ }),
3585
+ selectVideoTrack({
3586
+ state,
3587
+ owners,
3588
+ events
3589
+ }, {
3590
+ type: "video",
3591
+ ...config.initialBandwidth !== void 0 && { initialBandwidth: config.initialBandwidth }
3592
+ }),
3593
+ selectAudioTrack({
3594
+ state,
3595
+ owners,
3596
+ events
3597
+ }, {
3598
+ type: "audio",
3599
+ ...config.preferredAudioLanguage !== void 0 && { preferredAudioLanguage: config.preferredAudioLanguage }
3600
+ }),
3601
+ selectTextTrack({
3602
+ state,
3603
+ owners,
3604
+ events
3605
+ }, {
3606
+ type: "text",
3607
+ ...config.preferredSubtitleLanguage !== void 0 && { preferredSubtitleLanguage: config.preferredSubtitleLanguage },
3608
+ ...config.includeForcedTracks !== void 0 && { includeForcedTracks: config.includeForcedTracks },
3609
+ ...config.enableDefaultTrack !== void 0 && { enableDefaultTrack: config.enableDefaultTrack }
3610
+ }),
3611
+ resolveTrack({
3612
+ state,
3613
+ events
3614
+ }, { type: "video" }),
3615
+ resolveTrack({
3616
+ state,
3617
+ events
3618
+ }, { type: "audio" }),
3619
+ resolveTrack({
3620
+ state,
3621
+ events
3622
+ }, { type: "text" }),
3623
+ calculatePresentationDuration({ state }),
3624
+ setupMediaSource({
3625
+ state,
3626
+ owners
3627
+ }),
3628
+ updateDuration({
3629
+ state,
3630
+ owners
3631
+ }),
3632
+ setupSourceBuffer({
3633
+ state,
3634
+ owners
3635
+ }, { type: "video" }),
3636
+ setupSourceBuffer({
3637
+ state,
3638
+ owners
3639
+ }, { type: "audio" }),
3640
+ trackCurrentTime({
3641
+ state,
3642
+ owners
3643
+ }),
3644
+ switchQuality({ state }),
3645
+ loadSegments({
3646
+ state,
3647
+ owners
3648
+ }, { type: "video" }),
3649
+ loadSegments({
3650
+ state,
3651
+ owners
3652
+ }, { type: "audio" }),
3653
+ endOfStream({
3654
+ state,
3655
+ owners
3656
+ }),
3657
+ setupTextTracks({
3658
+ state,
3659
+ owners
3660
+ }),
3661
+ syncTextTrackModes({
3662
+ state,
3663
+ owners
3664
+ }),
3665
+ syncSelectedTextTrackFromDom({
3666
+ state,
3667
+ owners
3668
+ }),
3669
+ loadTextTrackCues({
3670
+ state,
3671
+ owners
3672
+ })
3673
+ ];
3674
+ events.dispatch({ type: "@@INITIALIZE@@" });
3675
+ return {
3676
+ state,
3677
+ owners,
3678
+ events,
3679
+ destroy: () => {
3680
+ cleanups.forEach((cleanup) => cleanup());
3681
+ destroyVttParser();
3682
+ }
3683
+ };
3684
+ }
3685
+ /**
3686
+ * HTMLMediaElement-compatible adapter for the SPF playback engine.
3687
+ *
3688
+ * Implements the src/play() contract per the WHATWG HTML spec so that SPF can
3689
+ * be used anywhere a media element API is expected.
3690
+ *
3691
+ * A new engine is created on every src assignment — this fully tears down all
3692
+ * state, SourceBuffers, and in-flight requests from the previous source before
3693
+ * the next one begins. The media element reference is preserved across src
3694
+ * changes and re-applied to the new engine automatically.
3695
+ *
3696
+ * @example
3697
+ * const media = new SpfMedia({ preferredAudioLanguage: 'en' });
3698
+ * media.attach(document.querySelector('video'));
3699
+ * media.src = 'https://stream.mux.com/abc123.m3u8';
3700
+ *
3701
+ * // Change source — old engine is destroyed, new one starts clean:
3702
+ * media.src = 'https://stream.mux.com/xyz456.m3u8';
3703
+ *
3704
+ * // Explicit teardown:
3705
+ * media.destroy();
3706
+ */
3707
+ var SpfMedia = class {
3708
+ #engine;
3709
+ #config;
3710
+ /** Pending loadstart listener from a deferred play() retry, if any. */
3711
+ #loadstartListener = null;
3712
+ constructor(config = {}) {
3713
+ this.#config = config;
3714
+ this.#engine = createPlaybackEngine(config);
3715
+ }
3716
+ get engine() {
3717
+ return this.#engine;
3718
+ }
3719
+ attach(mediaElement) {
3720
+ this.#engine.owners.patch({ mediaElement });
3721
+ }
3722
+ detach() {
3723
+ this.#cancelPendingPlay();
3724
+ this.#engine.owners.patch({ mediaElement: void 0 });
3725
+ }
3726
+ destroy() {
3727
+ this.#cancelPendingPlay();
3728
+ this.#engine.destroy();
3729
+ }
3730
+ get src() {
3731
+ return this.#engine.state.current.presentation?.url ?? "";
3732
+ }
3733
+ set src(value) {
3734
+ const prevMediaElement = this.#engine.owners.current.mediaElement;
3735
+ this.#cancelPendingPlay();
3736
+ this.#engine.destroy();
3737
+ this.#engine = createPlaybackEngine(this.#config);
3738
+ if (prevMediaElement) this.#engine.owners.patch({ mediaElement: prevMediaElement });
3739
+ if (value) this.#engine.state.patch({ presentation: { url: value } });
3740
+ }
3741
+ play() {
3742
+ const { mediaElement } = this.#engine.owners.current;
3743
+ if (!mediaElement) return Promise.reject(/* @__PURE__ */ new Error("SpfMedia: no media element attached"));
3744
+ this.#engine.state.patch({ playbackInitiated: true });
3745
+ return mediaElement.play().catch((err) => {
3746
+ if (this.src) return new Promise((resolve, reject) => {
3747
+ const listener = () => {
3748
+ this.#loadstartListener = null;
3749
+ mediaElement.play().then(resolve, reject);
3750
+ };
3751
+ this.#loadstartListener = listener;
3752
+ mediaElement.addEventListener("loadstart", listener, { once: true });
3753
+ });
3754
+ throw err;
3755
+ });
3756
+ }
3757
+ #cancelPendingPlay() {
3758
+ if (!this.#loadstartListener) return;
3759
+ const { mediaElement } = this.#engine.owners.current;
3760
+ mediaElement?.removeEventListener("loadstart", this.#loadstartListener);
3761
+ this.#loadstartListener = null;
3762
+ }
3763
+ };
3764
+
3765
+ //#endregion
3766
+ //#region ../core/dist/dev/dom/media/simple-hls/index.js
3767
+ var SimpleHlsCustomMedia = class extends MediaDelegateMixin(CustomMediaMixin(globalThis.HTMLElement ?? class {}, { tag: "video" }), SpfMedia) {};
3768
+
3769
+ //#endregion
3770
+ //#region src/media/simple-hls-video/index.ts
3771
+ var SimpleHlsVideo = class extends SimpleHlsCustomMedia {
3772
+ static getTemplateHTML(attrs) {
3773
+ const { src, ...rest } = attrs;
3774
+ return super.getTemplateHTML(rest);
3775
+ }
3776
+ constructor() {
3777
+ super();
3778
+ this.attach(this.target);
3779
+ }
3780
+ attributeChangedCallback(attrName, oldValue, newValue) {
3781
+ if (attrName !== "src") super.attributeChangedCallback(attrName, oldValue, newValue);
3782
+ if (attrName === "src" && oldValue !== newValue) this.src = newValue ?? "";
3783
+ }
3784
+ };
3785
+
3786
+ //#endregion
3787
+ //#region src/define/media/simple-hls-video.ts
3788
+ var SimpleHlsVideoElement = class extends SimpleHlsVideo {
3789
+ static {
3790
+ this.tagName = "simple-hls-video";
3791
+ }
3792
+ };
3793
+ customElements.define(SimpleHlsVideoElement.tagName, SimpleHlsVideoElement);
3794
+
3795
+ //#endregion
3796
+ //# sourceMappingURL=simple-hls-video.dev.js.map