@videojs/html 10.0.0-beta.1 → 10.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (410) hide show
  1. package/cdn/audio-minimal.dev.d.ts +1 -0
  2. package/cdn/audio-minimal.dev.js +112 -0
  3. package/cdn/audio-minimal.dev.js.map +1 -0
  4. package/cdn/audio-minimal.js +2 -0
  5. package/cdn/audio-minimal.js.map +1 -0
  6. package/cdn/audio.dev.d.ts +1 -0
  7. package/cdn/audio.dev.js +103 -0
  8. package/cdn/audio.dev.js.map +1 -0
  9. package/cdn/audio.js +2 -0
  10. package/cdn/audio.js.map +1 -0
  11. package/cdn/background.dev.d.ts +1 -0
  12. package/cdn/background.dev.js +159 -0
  13. package/cdn/background.dev.js.map +1 -0
  14. package/cdn/background.js +2 -0
  15. package/cdn/background.js.map +1 -0
  16. package/cdn/context-C_e06fGU.js +13 -0
  17. package/cdn/context-C_e06fGU.js.map +1 -0
  18. package/cdn/context-DTY0nOpS.js +98 -0
  19. package/cdn/context-DTY0nOpS.js.map +1 -0
  20. package/cdn/create-player-BTIU8EwT.js +7 -0
  21. package/cdn/create-player-BTIU8EwT.js.map +1 -0
  22. package/cdn/create-player-Cwxvswyv.js +3218 -0
  23. package/cdn/create-player-Cwxvswyv.js.map +1 -0
  24. package/cdn/default-GgKND7a8.js +2 -0
  25. package/cdn/default-GgKND7a8.js.map +1 -0
  26. package/cdn/default-cLso8BHO.js +28 -0
  27. package/cdn/default-cLso8BHO.js.map +1 -0
  28. package/cdn/listen-BXAYCbZA.js +9 -0
  29. package/cdn/listen-BXAYCbZA.js.map +1 -0
  30. package/cdn/listen-DX5vU4s4.js +2 -0
  31. package/cdn/listen-DX5vU4s4.js.map +1 -0
  32. package/cdn/media/dash-video.dev.d.ts +1 -0
  33. package/cdn/media/dash-video.dev.js +39165 -0
  34. package/cdn/media/dash-video.dev.js.map +1 -0
  35. package/cdn/media/dash-video.js +21 -0
  36. package/cdn/media/dash-video.js.map +1 -0
  37. package/cdn/media/hls-video.dev.d.ts +1 -0
  38. package/cdn/media/hls-video.dev.js +28357 -0
  39. package/cdn/media/hls-video.dev.js.map +1 -0
  40. package/cdn/media/hls-video.js +41 -0
  41. package/cdn/media/hls-video.js.map +1 -0
  42. package/cdn/media/simple-hls-video.dev.d.ts +1 -0
  43. package/cdn/media/simple-hls-video.dev.js +3465 -0
  44. package/cdn/media/simple-hls-video.dev.js.map +1 -0
  45. package/cdn/media/simple-hls-video.js +2 -0
  46. package/cdn/media/simple-hls-video.js.map +1 -0
  47. package/cdn/media-attach-mixin-ChyNp2eK.js +44 -0
  48. package/cdn/media-attach-mixin-ChyNp2eK.js.map +1 -0
  49. package/cdn/media-attach-mixin-tFNcHnvo.js +2 -0
  50. package/cdn/media-attach-mixin-tFNcHnvo.js.map +1 -0
  51. package/cdn/minimal-BJfleQcQ.js +2 -0
  52. package/cdn/minimal-BJfleQcQ.js.map +1 -0
  53. package/cdn/minimal-DBMdC_0I.js +28 -0
  54. package/cdn/minimal-DBMdC_0I.js.map +1 -0
  55. package/cdn/player-BHhLXO-R.js +2 -0
  56. package/cdn/player-BHhLXO-R.js.map +1 -0
  57. package/cdn/player-DEfj0RU6.js +15 -0
  58. package/cdn/player-DEfj0RU6.js.map +1 -0
  59. package/cdn/poster-Dd0F1rRd.js +195 -0
  60. package/cdn/poster-Dd0F1rRd.js.map +1 -0
  61. package/cdn/poster-DwQ3RAch.js +2 -0
  62. package/cdn/poster-DwQ3RAch.js.map +1 -0
  63. package/cdn/predicate-BG-dj_kF.js +26 -0
  64. package/cdn/predicate-BG-dj_kF.js.map +1 -0
  65. package/cdn/predicate-Y9jDHLpX.js +2 -0
  66. package/cdn/predicate-Y9jDHLpX.js.map +1 -0
  67. package/cdn/proxy-2oO2ph3m.js +47 -0
  68. package/cdn/proxy-2oO2ph3m.js.map +1 -0
  69. package/cdn/proxy-6KS6wy69.js +2 -0
  70. package/cdn/proxy-6KS6wy69.js.map +1 -0
  71. package/cdn/proxy-XzDf9gyk.js +66 -0
  72. package/cdn/proxy-XzDf9gyk.js.map +1 -0
  73. package/cdn/proxy-dR7IDk37.js +349 -0
  74. package/cdn/proxy-dR7IDk37.js.map +1 -0
  75. package/cdn/safe-define-B8lHgj_K.js +9 -0
  76. package/cdn/safe-define-B8lHgj_K.js.map +1 -0
  77. package/cdn/safe-define-GrHW3P9e.js +2 -0
  78. package/cdn/safe-define-GrHW3P9e.js.map +1 -0
  79. package/cdn/video-minimal.dev.d.ts +1 -0
  80. package/cdn/video-minimal.dev.js +155 -0
  81. package/cdn/video-minimal.dev.js.map +1 -0
  82. package/cdn/video-minimal.js +2 -0
  83. package/cdn/video-minimal.js.map +1 -0
  84. package/cdn/video.dev.d.ts +1 -0
  85. package/cdn/video.dev.js +170 -0
  86. package/cdn/video.dev.js.map +1 -0
  87. package/cdn/video.js +2 -0
  88. package/cdn/video.js.map +1 -0
  89. package/cdn/volume-slider-DgJ0rAfC.js +2459 -0
  90. package/cdn/volume-slider-DgJ0rAfC.js.map +1 -0
  91. package/cdn/volume-slider-Pd0AMTCH.js +8 -0
  92. package/cdn/volume-slider-Pd0AMTCH.js.map +1 -0
  93. package/dist/default/_virtual/inline-css_src/define/audio/minimal-skin.js +1 -1
  94. package/dist/default/_virtual/inline-css_src/define/audio/minimal-skin.js.map +1 -1
  95. package/dist/default/_virtual/inline-css_src/define/audio/skin.js +1 -1
  96. package/dist/default/_virtual/inline-css_src/define/audio/skin.js.map +1 -1
  97. package/dist/default/_virtual/inline-css_src/define/background/skin.js +6 -0
  98. package/dist/default/_virtual/inline-css_src/define/background/skin.js.map +1 -0
  99. package/dist/default/_virtual/inline-css_src/define/base.js +6 -0
  100. package/dist/default/_virtual/inline-css_src/define/base.js.map +1 -0
  101. package/dist/default/_virtual/inline-css_src/define/shared.js +6 -0
  102. package/dist/default/_virtual/inline-css_src/define/shared.js.map +1 -0
  103. package/dist/default/_virtual/inline-css_src/define/video/minimal-skin.js +1 -1
  104. package/dist/default/_virtual/inline-css_src/define/video/minimal-skin.js.map +1 -1
  105. package/dist/default/_virtual/inline-css_src/define/video/skin.js +1 -1
  106. package/dist/default/_virtual/inline-css_src/define/video/skin.js.map +1 -1
  107. package/dist/default/define/audio/minimal-skin.css +237 -157
  108. package/dist/default/define/audio/minimal-skin.js +2 -79
  109. package/dist/default/define/audio/minimal-skin.js.map +1 -1
  110. package/dist/default/define/audio/minimal-skin.tailwind.js +5 -82
  111. package/dist/default/define/audio/minimal-skin.tailwind.js.map +1 -1
  112. package/dist/default/define/audio/skin.css +234 -153
  113. package/dist/default/define/audio/skin.js +2 -70
  114. package/dist/default/define/audio/skin.js.map +1 -1
  115. package/dist/default/define/audio/skin.tailwind.js +6 -75
  116. package/dist/default/define/audio/skin.tailwind.js.map +1 -1
  117. package/dist/default/define/background/skin.css +1 -1
  118. package/dist/default/define/background/skin.js +11 -5
  119. package/dist/default/define/background/skin.js.map +1 -1
  120. package/dist/default/define/base.css +23 -0
  121. package/dist/default/define/media/dash-video.js +14 -0
  122. package/dist/default/define/media/dash-video.js.map +1 -0
  123. package/dist/default/define/shared.css +13 -0
  124. package/dist/default/define/skin-mixin.js +16 -33
  125. package/dist/default/define/skin-mixin.js.map +1 -1
  126. package/dist/default/define/video/minimal-skin.css +486 -301
  127. package/dist/default/define/video/minimal-skin.js +3 -121
  128. package/dist/default/define/video/minimal-skin.js.map +1 -1
  129. package/dist/default/define/video/minimal-skin.tailwind.js +8 -136
  130. package/dist/default/define/video/minimal-skin.tailwind.js.map +1 -1
  131. package/dist/default/define/video/skin.css +484 -289
  132. package/dist/default/define/video/skin.js +3 -116
  133. package/dist/default/define/video/skin.js.map +1 -1
  134. package/dist/default/define/video/skin.tailwind.js +8 -128
  135. package/dist/default/define/video/skin.tailwind.js.map +1 -1
  136. package/dist/default/icons/dist/render/default/index.js +2 -1
  137. package/dist/default/icons/dist/render/default/index.js.map +1 -1
  138. package/dist/default/icons/dist/render/minimal/index.js +2 -1
  139. package/dist/default/icons/dist/render/minimal/index.js.map +1 -1
  140. package/dist/default/index.js +3 -2
  141. package/dist/default/media/background-video/index.js +6 -19
  142. package/dist/default/media/background-video/index.js.map +1 -1
  143. package/dist/default/media/container-element.js +5 -2
  144. package/dist/default/media/container-element.js.map +1 -1
  145. package/dist/default/media/dash-video/index.js +26 -0
  146. package/dist/default/media/dash-video/index.js.map +1 -0
  147. package/dist/default/media/hls-video/index.js +2 -1
  148. package/dist/default/media/hls-video/index.js.map +1 -1
  149. package/dist/default/media/simple-hls-video/index.js +3 -1
  150. package/dist/default/media/simple-hls-video/index.js.map +1 -1
  151. package/dist/default/player/context.js +6 -2
  152. package/dist/default/player/context.js.map +1 -1
  153. package/dist/default/player/create-player.js +11 -3
  154. package/dist/default/player/create-player.js.map +1 -1
  155. package/dist/default/skins/dist/default/default/tailwind/audio.tailwind.js +10 -26
  156. package/dist/default/skins/dist/default/default/tailwind/audio.tailwind.js.map +1 -1
  157. package/dist/default/skins/dist/default/default/tailwind/components/button.js +4 -3
  158. package/dist/default/skins/dist/default/default/tailwind/components/button.js.map +1 -1
  159. package/dist/default/skins/dist/default/default/tailwind/components/controls.js +1 -1
  160. package/dist/default/skins/dist/default/default/tailwind/components/controls.js.map +1 -1
  161. package/dist/default/skins/dist/default/default/tailwind/components/error.js +3 -3
  162. package/dist/default/skins/dist/default/default/tailwind/components/error.js.map +1 -1
  163. package/dist/default/skins/dist/default/default/tailwind/components/overlay.js +1 -1
  164. package/dist/default/skins/dist/default/default/tailwind/components/overlay.js.map +1 -1
  165. package/dist/default/skins/dist/default/default/tailwind/components/popup.js +3 -3
  166. package/dist/default/skins/dist/default/default/tailwind/components/popup.js.map +1 -1
  167. package/dist/default/skins/dist/default/default/tailwind/components/poster.js +16 -0
  168. package/dist/default/skins/dist/default/default/tailwind/components/poster.js.map +1 -0
  169. package/dist/default/skins/dist/default/default/tailwind/components/preview.js +13 -0
  170. package/dist/default/skins/dist/default/default/tailwind/components/preview.js.map +1 -0
  171. package/dist/default/skins/dist/default/default/tailwind/components/root.js +1 -1
  172. package/dist/default/skins/dist/default/default/tailwind/components/root.js.map +1 -1
  173. package/dist/default/skins/dist/default/default/tailwind/components/seek.js +1 -1
  174. package/dist/default/skins/dist/default/default/tailwind/components/seek.js.map +1 -1
  175. package/dist/default/skins/dist/default/default/tailwind/components/slider.js +1 -1
  176. package/dist/default/skins/dist/default/default/tailwind/components/slider.js.map +1 -1
  177. package/dist/default/skins/dist/default/default/tailwind/components/surface.js +1 -1
  178. package/dist/default/skins/dist/default/default/tailwind/components/surface.js.map +1 -1
  179. package/dist/default/skins/dist/default/default/tailwind/video.tailwind.js +27 -16
  180. package/dist/default/skins/dist/default/default/tailwind/video.tailwind.js.map +1 -1
  181. package/dist/default/skins/dist/default/minimal/tailwind/audio.tailwind.js +11 -24
  182. package/dist/default/skins/dist/default/minimal/tailwind/audio.tailwind.js.map +1 -1
  183. package/dist/default/skins/dist/default/minimal/tailwind/components/button.js +4 -3
  184. package/dist/default/skins/dist/default/minimal/tailwind/components/button.js.map +1 -1
  185. package/dist/default/skins/dist/default/minimal/tailwind/components/controls.js +1 -1
  186. package/dist/default/skins/dist/default/minimal/tailwind/components/controls.js.map +1 -1
  187. package/dist/default/skins/dist/default/minimal/tailwind/components/error.js +3 -3
  188. package/dist/default/skins/dist/default/minimal/tailwind/components/error.js.map +1 -1
  189. package/dist/default/skins/dist/default/minimal/tailwind/components/overlay.js +1 -1
  190. package/dist/default/skins/dist/default/minimal/tailwind/components/overlay.js.map +1 -1
  191. package/dist/default/skins/dist/default/minimal/tailwind/components/playback-rate.js +6 -0
  192. package/dist/default/skins/dist/default/minimal/tailwind/components/playback-rate.js.map +1 -0
  193. package/dist/default/skins/dist/default/minimal/tailwind/components/popup.js +4 -3
  194. package/dist/default/skins/dist/default/minimal/tailwind/components/popup.js.map +1 -1
  195. package/dist/default/skins/dist/default/minimal/tailwind/components/poster.js +16 -0
  196. package/dist/default/skins/dist/default/minimal/tailwind/components/poster.js.map +1 -0
  197. package/dist/default/skins/dist/default/minimal/tailwind/components/preview.js +14 -0
  198. package/dist/default/skins/dist/default/minimal/tailwind/components/preview.js.map +1 -0
  199. package/dist/default/skins/dist/default/minimal/tailwind/components/root.js +1 -1
  200. package/dist/default/skins/dist/default/minimal/tailwind/components/root.js.map +1 -1
  201. package/dist/default/skins/dist/default/minimal/tailwind/components/seek.js +1 -1
  202. package/dist/default/skins/dist/default/minimal/tailwind/components/seek.js.map +1 -1
  203. package/dist/default/skins/dist/default/minimal/tailwind/components/slider.js +1 -1
  204. package/dist/default/skins/dist/default/minimal/tailwind/components/slider.js.map +1 -1
  205. package/dist/default/skins/dist/default/minimal/tailwind/components/time.js +1 -1
  206. package/dist/default/skins/dist/default/minimal/tailwind/components/time.js.map +1 -1
  207. package/dist/default/skins/dist/default/minimal/tailwind/video.tailwind.js +33 -16
  208. package/dist/default/skins/dist/default/minimal/tailwind/video.tailwind.js.map +1 -1
  209. package/dist/default/skins/dist/default/{default/tailwind/components → shared/tailwind}/icon-state.js +6 -1
  210. package/dist/default/skins/dist/default/shared/tailwind/icon-state.js.map +1 -0
  211. package/dist/{dev/skins/dist/default/default/tailwind/components → default/skins/dist/default/shared/tailwind}/tooltip-state.js +1 -1
  212. package/dist/default/skins/dist/default/shared/tailwind/tooltip-state.js.map +1 -0
  213. package/dist/default/store/container-mixin.js +16 -60
  214. package/dist/default/store/container-mixin.js.map +1 -1
  215. package/dist/default/store/media-attach-mixin.js +45 -0
  216. package/dist/default/store/media-attach-mixin.js.map +1 -0
  217. package/dist/default/store/provider-mixin.js +99 -9
  218. package/dist/default/store/provider-mixin.js.map +1 -1
  219. package/dist/default/ui/popover/popover-element.js +54 -3
  220. package/dist/default/ui/popover/popover-element.js.map +1 -1
  221. package/dist/default/ui/time-slider/time-slider-element.js +1 -1
  222. package/dist/default/ui/time-slider/time-slider-element.js.map +1 -1
  223. package/dist/default/ui/tooltip/tooltip-element.js +53 -3
  224. package/dist/default/ui/tooltip/tooltip-element.js.map +1 -1
  225. package/dist/default/ui/tooltip/tooltip-group-element.js +4 -1
  226. package/dist/default/ui/tooltip/tooltip-group-element.js.map +1 -1
  227. package/dist/default/ui/volume-slider/volume-slider-element.js +3 -3
  228. package/dist/default/ui/volume-slider/volume-slider-element.js.map +1 -1
  229. package/dist/dev/_virtual/inline-css_src/define/audio/minimal-skin.js +1 -1
  230. package/dist/dev/_virtual/inline-css_src/define/audio/minimal-skin.js.map +1 -1
  231. package/dist/dev/_virtual/inline-css_src/define/audio/skin.js +1 -1
  232. package/dist/dev/_virtual/inline-css_src/define/audio/skin.js.map +1 -1
  233. package/dist/dev/_virtual/inline-css_src/define/background/skin.js +6 -0
  234. package/dist/dev/_virtual/inline-css_src/define/background/skin.js.map +1 -0
  235. package/dist/dev/_virtual/inline-css_src/define/base.js +6 -0
  236. package/dist/dev/_virtual/inline-css_src/define/base.js.map +1 -0
  237. package/dist/dev/_virtual/inline-css_src/define/shared.js +6 -0
  238. package/dist/dev/_virtual/inline-css_src/define/shared.js.map +1 -0
  239. package/dist/dev/_virtual/inline-css_src/define/video/minimal-skin.js +1 -1
  240. package/dist/dev/_virtual/inline-css_src/define/video/minimal-skin.js.map +1 -1
  241. package/dist/dev/_virtual/inline-css_src/define/video/skin.js +1 -1
  242. package/dist/dev/_virtual/inline-css_src/define/video/skin.js.map +1 -1
  243. package/dist/dev/define/audio/minimal-skin.css +237 -157
  244. package/dist/dev/define/audio/minimal-skin.d.ts.map +1 -1
  245. package/dist/dev/define/audio/minimal-skin.js +69 -64
  246. package/dist/dev/define/audio/minimal-skin.js.map +1 -1
  247. package/dist/dev/define/audio/minimal-skin.tailwind.d.ts.map +1 -1
  248. package/dist/dev/define/audio/minimal-skin.tailwind.js +73 -66
  249. package/dist/dev/define/audio/minimal-skin.tailwind.js.map +1 -1
  250. package/dist/dev/define/audio/skin.css +234 -153
  251. package/dist/dev/define/audio/skin.d.ts.map +1 -1
  252. package/dist/dev/define/audio/skin.js +61 -56
  253. package/dist/dev/define/audio/skin.js.map +1 -1
  254. package/dist/dev/define/audio/skin.tailwind.d.ts.map +1 -1
  255. package/dist/dev/define/audio/skin.tailwind.js +67 -61
  256. package/dist/dev/define/audio/skin.tailwind.js.map +1 -1
  257. package/dist/dev/define/background/skin.css +1 -1
  258. package/dist/dev/define/background/skin.d.ts.map +1 -1
  259. package/dist/dev/define/background/skin.js +13 -1
  260. package/dist/dev/define/background/skin.js.map +1 -1
  261. package/dist/dev/define/base.css +23 -0
  262. package/dist/dev/define/media/dash-video.d.ts +14 -0
  263. package/dist/dev/define/media/dash-video.d.ts.map +1 -0
  264. package/dist/dev/define/media/dash-video.js +14 -0
  265. package/dist/dev/define/media/dash-video.js.map +1 -0
  266. package/dist/dev/define/shared.css +13 -0
  267. package/dist/dev/define/skin-mixin.d.ts +2 -2
  268. package/dist/dev/define/skin-mixin.d.ts.map +1 -1
  269. package/dist/dev/define/skin-mixin.js +16 -33
  270. package/dist/dev/define/skin-mixin.js.map +1 -1
  271. package/dist/dev/define/video/minimal-skin.css +486 -301
  272. package/dist/dev/define/video/minimal-skin.d.ts.map +1 -1
  273. package/dist/dev/define/video/minimal-skin.js +110 -103
  274. package/dist/dev/define/video/minimal-skin.js.map +1 -1
  275. package/dist/dev/define/video/minimal-skin.tailwind.d.ts.map +1 -1
  276. package/dist/dev/define/video/minimal-skin.tailwind.js +123 -116
  277. package/dist/dev/define/video/minimal-skin.tailwind.js.map +1 -1
  278. package/dist/dev/define/video/skin.css +484 -289
  279. package/dist/dev/define/video/skin.d.ts.map +1 -1
  280. package/dist/dev/define/video/skin.js +105 -100
  281. package/dist/dev/define/video/skin.js.map +1 -1
  282. package/dist/dev/define/video/skin.tailwind.d.ts.map +1 -1
  283. package/dist/dev/define/video/skin.tailwind.js +114 -108
  284. package/dist/dev/define/video/skin.tailwind.js.map +1 -1
  285. package/dist/dev/icons/dist/render/default/index.js +2 -1
  286. package/dist/dev/icons/dist/render/default/index.js.map +1 -1
  287. package/dist/dev/icons/dist/render/minimal/index.js +2 -1
  288. package/dist/dev/icons/dist/render/minimal/index.js.map +1 -1
  289. package/dist/dev/index.d.ts +5 -4
  290. package/dist/dev/index.js +3 -2
  291. package/dist/dev/media/background-video/index.d.ts +8 -1
  292. package/dist/dev/media/background-video/index.d.ts.map +1 -1
  293. package/dist/dev/media/background-video/index.js +5 -1
  294. package/dist/dev/media/background-video/index.js.map +1 -1
  295. package/dist/dev/media/container-element.js +5 -2
  296. package/dist/dev/media/container-element.js.map +1 -1
  297. package/dist/dev/media/dash-video/index.d.ts +13 -0
  298. package/dist/dev/media/dash-video/index.d.ts.map +1 -0
  299. package/dist/dev/media/dash-video/index.js +26 -0
  300. package/dist/dev/media/dash-video/index.js.map +1 -0
  301. package/dist/dev/media/hls-video/index.d.ts +2 -1
  302. package/dist/dev/media/hls-video/index.d.ts.map +1 -1
  303. package/dist/dev/media/hls-video/index.js +2 -1
  304. package/dist/dev/media/hls-video/index.js.map +1 -1
  305. package/dist/dev/media/simple-hls-video/index.d.ts +2 -1
  306. package/dist/dev/media/simple-hls-video/index.d.ts.map +1 -1
  307. package/dist/dev/media/simple-hls-video/index.js +3 -1
  308. package/dist/dev/media/simple-hls-video/index.js.map +1 -1
  309. package/dist/dev/player/context.d.ts +16 -2
  310. package/dist/dev/player/context.d.ts.map +1 -1
  311. package/dist/dev/player/context.js +6 -2
  312. package/dist/dev/player/context.js.map +1 -1
  313. package/dist/dev/player/create-player.d.ts +1 -1
  314. package/dist/dev/player/create-player.js +11 -3
  315. package/dist/dev/player/create-player.js.map +1 -1
  316. package/dist/dev/skins/dist/default/default/tailwind/audio.tailwind.js +10 -26
  317. package/dist/dev/skins/dist/default/default/tailwind/audio.tailwind.js.map +1 -1
  318. package/dist/dev/skins/dist/default/default/tailwind/components/button.js +4 -3
  319. package/dist/dev/skins/dist/default/default/tailwind/components/button.js.map +1 -1
  320. package/dist/dev/skins/dist/default/default/tailwind/components/controls.js +1 -1
  321. package/dist/dev/skins/dist/default/default/tailwind/components/controls.js.map +1 -1
  322. package/dist/dev/skins/dist/default/default/tailwind/components/error.js +3 -3
  323. package/dist/dev/skins/dist/default/default/tailwind/components/error.js.map +1 -1
  324. package/dist/dev/skins/dist/default/default/tailwind/components/overlay.js +1 -1
  325. package/dist/dev/skins/dist/default/default/tailwind/components/overlay.js.map +1 -1
  326. package/dist/dev/skins/dist/default/default/tailwind/components/popup.js +3 -3
  327. package/dist/dev/skins/dist/default/default/tailwind/components/popup.js.map +1 -1
  328. package/dist/dev/skins/dist/default/default/tailwind/components/poster.js +16 -0
  329. package/dist/dev/skins/dist/default/default/tailwind/components/poster.js.map +1 -0
  330. package/dist/dev/skins/dist/default/default/tailwind/components/preview.js +13 -0
  331. package/dist/dev/skins/dist/default/default/tailwind/components/preview.js.map +1 -0
  332. package/dist/dev/skins/dist/default/default/tailwind/components/root.js +1 -1
  333. package/dist/dev/skins/dist/default/default/tailwind/components/root.js.map +1 -1
  334. package/dist/dev/skins/dist/default/default/tailwind/components/seek.js +1 -1
  335. package/dist/dev/skins/dist/default/default/tailwind/components/seek.js.map +1 -1
  336. package/dist/dev/skins/dist/default/default/tailwind/components/slider.js +1 -1
  337. package/dist/dev/skins/dist/default/default/tailwind/components/slider.js.map +1 -1
  338. package/dist/dev/skins/dist/default/default/tailwind/components/surface.js +1 -1
  339. package/dist/dev/skins/dist/default/default/tailwind/components/surface.js.map +1 -1
  340. package/dist/dev/skins/dist/default/default/tailwind/video.tailwind.js +27 -16
  341. package/dist/dev/skins/dist/default/default/tailwind/video.tailwind.js.map +1 -1
  342. package/dist/dev/skins/dist/default/minimal/tailwind/audio.tailwind.js +11 -24
  343. package/dist/dev/skins/dist/default/minimal/tailwind/audio.tailwind.js.map +1 -1
  344. package/dist/dev/skins/dist/default/minimal/tailwind/components/button.js +4 -3
  345. package/dist/dev/skins/dist/default/minimal/tailwind/components/button.js.map +1 -1
  346. package/dist/dev/skins/dist/default/minimal/tailwind/components/controls.js +1 -1
  347. package/dist/dev/skins/dist/default/minimal/tailwind/components/controls.js.map +1 -1
  348. package/dist/dev/skins/dist/default/minimal/tailwind/components/error.js +3 -3
  349. package/dist/dev/skins/dist/default/minimal/tailwind/components/error.js.map +1 -1
  350. package/dist/dev/skins/dist/default/minimal/tailwind/components/overlay.js +1 -1
  351. package/dist/dev/skins/dist/default/minimal/tailwind/components/overlay.js.map +1 -1
  352. package/dist/dev/skins/dist/default/minimal/tailwind/components/playback-rate.js +6 -0
  353. package/dist/dev/skins/dist/default/minimal/tailwind/components/playback-rate.js.map +1 -0
  354. package/dist/dev/skins/dist/default/minimal/tailwind/components/popup.js +4 -3
  355. package/dist/dev/skins/dist/default/minimal/tailwind/components/popup.js.map +1 -1
  356. package/dist/dev/skins/dist/default/minimal/tailwind/components/poster.js +16 -0
  357. package/dist/dev/skins/dist/default/minimal/tailwind/components/poster.js.map +1 -0
  358. package/dist/dev/skins/dist/default/minimal/tailwind/components/preview.js +14 -0
  359. package/dist/dev/skins/dist/default/minimal/tailwind/components/preview.js.map +1 -0
  360. package/dist/dev/skins/dist/default/minimal/tailwind/components/root.js +1 -1
  361. package/dist/dev/skins/dist/default/minimal/tailwind/components/root.js.map +1 -1
  362. package/dist/dev/skins/dist/default/minimal/tailwind/components/seek.js +1 -1
  363. package/dist/dev/skins/dist/default/minimal/tailwind/components/seek.js.map +1 -1
  364. package/dist/dev/skins/dist/default/minimal/tailwind/components/slider.js +1 -1
  365. package/dist/dev/skins/dist/default/minimal/tailwind/components/slider.js.map +1 -1
  366. package/dist/dev/skins/dist/default/minimal/tailwind/components/time.js +1 -1
  367. package/dist/dev/skins/dist/default/minimal/tailwind/components/time.js.map +1 -1
  368. package/dist/dev/skins/dist/default/minimal/tailwind/video.tailwind.js +33 -16
  369. package/dist/dev/skins/dist/default/minimal/tailwind/video.tailwind.js.map +1 -1
  370. package/dist/{default/skins/dist/default/minimal/tailwind/components → dev/skins/dist/default/shared/tailwind}/icon-state.js +6 -1
  371. package/dist/dev/skins/dist/default/shared/tailwind/icon-state.js.map +1 -0
  372. package/dist/dev/skins/dist/default/{minimal/tailwind/components → shared/tailwind}/tooltip-state.js +1 -1
  373. package/dist/dev/skins/dist/default/shared/tailwind/tooltip-state.js.map +1 -0
  374. package/dist/dev/store/container-mixin.d.ts +10 -5
  375. package/dist/dev/store/container-mixin.d.ts.map +1 -1
  376. package/dist/dev/store/container-mixin.js +16 -60
  377. package/dist/dev/store/container-mixin.js.map +1 -1
  378. package/dist/dev/store/media-attach-mixin.d.ts +19 -0
  379. package/dist/dev/store/media-attach-mixin.d.ts.map +1 -0
  380. package/dist/dev/store/media-attach-mixin.js +45 -0
  381. package/dist/dev/store/media-attach-mixin.js.map +1 -0
  382. package/dist/dev/store/provider-mixin.d.ts +19 -6
  383. package/dist/dev/store/provider-mixin.d.ts.map +1 -1
  384. package/dist/dev/store/provider-mixin.js +99 -9
  385. package/dist/dev/store/provider-mixin.js.map +1 -1
  386. package/dist/dev/ui/popover/popover-element.d.ts.map +1 -1
  387. package/dist/dev/ui/popover/popover-element.js +54 -3
  388. package/dist/dev/ui/popover/popover-element.js.map +1 -1
  389. package/dist/dev/ui/time-slider/time-slider-element.js +1 -1
  390. package/dist/dev/ui/time-slider/time-slider-element.js.map +1 -1
  391. package/dist/dev/ui/tooltip/tooltip-element.d.ts.map +1 -1
  392. package/dist/dev/ui/tooltip/tooltip-element.js +53 -3
  393. package/dist/dev/ui/tooltip/tooltip-element.js.map +1 -1
  394. package/dist/dev/ui/tooltip/tooltip-group-element.js +4 -1
  395. package/dist/dev/ui/tooltip/tooltip-group-element.js.map +1 -1
  396. package/dist/dev/ui/volume-slider/volume-slider-element.js +3 -3
  397. package/dist/dev/ui/volume-slider/volume-slider-element.js.map +1 -1
  398. package/package.json +24 -11
  399. package/dist/default/skins/dist/default/default/tailwind/components/icon-state.js.map +0 -1
  400. package/dist/default/skins/dist/default/default/tailwind/components/tooltip-state.js +0 -28
  401. package/dist/default/skins/dist/default/default/tailwind/components/tooltip-state.js.map +0 -1
  402. package/dist/default/skins/dist/default/minimal/tailwind/components/icon-state.js.map +0 -1
  403. package/dist/default/skins/dist/default/minimal/tailwind/components/tooltip-state.js +0 -28
  404. package/dist/default/skins/dist/default/minimal/tailwind/components/tooltip-state.js.map +0 -1
  405. package/dist/dev/skins/dist/default/default/tailwind/components/icon-state.js +0 -29
  406. package/dist/dev/skins/dist/default/default/tailwind/components/icon-state.js.map +0 -1
  407. package/dist/dev/skins/dist/default/default/tailwind/components/tooltip-state.js.map +0 -1
  408. package/dist/dev/skins/dist/default/minimal/tailwind/components/icon-state.js +0 -29
  409. package/dist/dev/skins/dist/default/minimal/tailwind/components/icon-state.js.map +0 -1
  410. package/dist/dev/skins/dist/default/minimal/tailwind/components/tooltip-state.js.map +0 -1
@@ -0,0 +1,3465 @@
1
+ import { n as isNil } from "../predicate-BG-dj_kF.js";
2
+ import { t as listen } from "../listen-BXAYCbZA.js";
3
+ import { t as MediaAttachMixin } from "../media-attach-mixin-ChyNp2eK.js";
4
+ import { n as DelegateMixin, t as CustomMediaMixin } from "../proxy-dR7IDk37.js";
5
+
6
+ //#region ../spf/dist/adapter-CflgYzjF.js
7
+ /**
8
+ * Reactive state container with selectors, custom equality, and batched updates.
9
+ *
10
+ * Manages both immutable state values and mutable object references (e.g., HTMLMediaElement).
11
+ */
12
+ const STATE_SYMBOL = Symbol("@videojs/spf/state");
13
+ /**
14
+ * Default equality function using Object.is.
15
+ */
16
+ function defaultEquality(a, b) {
17
+ return Object.is(a, b);
18
+ }
19
+ /**
20
+ * State container implementation.
21
+ */
22
+ var StateContainer = class {
23
+ [STATE_SYMBOL] = true;
24
+ #current;
25
+ #pending = null;
26
+ #pendingFlush = false;
27
+ #equalityFn;
28
+ #listeners = /* @__PURE__ */ new Set();
29
+ #selectorListeners = /* @__PURE__ */ new Set();
30
+ constructor(initial, config) {
31
+ this.#current = typeof initial === "object" && initial !== null ? { ...initial } : initial;
32
+ this.#equalityFn = config?.equalityFn ?? defaultEquality;
33
+ }
34
+ get current() {
35
+ return this.#pending ?? this.#current;
36
+ }
37
+ patch(partial) {
38
+ const base = this.#pending ?? this.#current;
39
+ if (typeof base !== "object" || base === null) {
40
+ const value = partial;
41
+ if (!Object.is(base, value)) {
42
+ this.#pending = value;
43
+ this.#scheduleFlush();
44
+ }
45
+ return;
46
+ }
47
+ const next = { ...base };
48
+ let changed = false;
49
+ for (const key in partial) {
50
+ if (!Object.hasOwn(partial, key)) continue;
51
+ const value = partial[key];
52
+ if (!Object.is(base[key], value)) {
53
+ next[key] = value;
54
+ changed = true;
55
+ }
56
+ }
57
+ if (changed) {
58
+ this.#pending = next;
59
+ this.#scheduleFlush();
60
+ }
61
+ }
62
+ subscribe(selectorOrListener, maybeListener, options) {
63
+ if (maybeListener === void 0) {
64
+ const listener$1 = selectorOrListener;
65
+ this.#listeners.add(listener$1);
66
+ listener$1(this.current);
67
+ return () => {
68
+ this.#listeners.delete(listener$1);
69
+ };
70
+ }
71
+ const selector = selectorOrListener;
72
+ const listener = maybeListener;
73
+ const entry = {
74
+ selector,
75
+ listener,
76
+ options: options ?? {}
77
+ };
78
+ this.#selectorListeners.add(entry);
79
+ listener(selector(this.current));
80
+ return () => {
81
+ this.#selectorListeners.delete(entry);
82
+ };
83
+ }
84
+ flush() {
85
+ if (this.#pending === null) return;
86
+ const prev = this.#current;
87
+ const next = this.#pending;
88
+ this.#pending = null;
89
+ this.#pendingFlush = false;
90
+ if (this.#equalityFn(prev, next)) return;
91
+ this.#current = next;
92
+ for (const listener of this.#listeners) listener(this.#current);
93
+ for (const entry of this.#selectorListeners) {
94
+ const prevSelected = entry.selector(prev);
95
+ const nextSelected = entry.selector(this.#current);
96
+ if (!(entry.options.equalityFn ?? Object.is)(prevSelected, nextSelected)) entry.listener(nextSelected);
97
+ }
98
+ }
99
+ #scheduleFlush() {
100
+ if (this.#pendingFlush) return;
101
+ this.#pendingFlush = true;
102
+ queueMicrotask(() => this.flush());
103
+ }
104
+ };
105
+ /**
106
+ * Create a reactive state container.
107
+ *
108
+ * @example
109
+ * ```typescript
110
+ * const state = createState({ count: 0 });
111
+ *
112
+ * // Subscribe to changes
113
+ * state.subscribe((current, prev) => {
114
+ * console.log('Changed:', prev, '->', current);
115
+ * });
116
+ *
117
+ * // Updates are batched
118
+ * state.patch({ count: 1 });
119
+ * state.patch({ count: 2 });
120
+ * // Only one notification fires (with count: 2)
121
+ * ```
122
+ *
123
+ * @example Selector subscriptions
124
+ * ```typescript
125
+ * const state = createState({ count: 0, name: 'test' });
126
+ *
127
+ * // Only notified when count changes
128
+ * state.subscribe(
129
+ * s => s.count,
130
+ * (current, prev) => console.log(current, prev)
131
+ * );
132
+ * ```
133
+ *
134
+ * @example Custom equality
135
+ * ```typescript
136
+ * const state = createState(
137
+ * { count: 0, name: 'test' },
138
+ * { equalityFn: (a, b) => a.count === b.count }
139
+ * );
140
+ * ```
141
+ */
142
+ function createState(initial, config) {
143
+ return new StateContainer(initial, config);
144
+ }
145
+ /**
146
+ * Parse HLS attribute list from a tag line.
147
+ * Handles both quoted and unquoted values.
148
+ */
149
+ function parseAttributeList(line) {
150
+ const attributes = /* @__PURE__ */ new Map();
151
+ for (const match of line.matchAll(/([A-Z0-9-]+)=(?:"([^"]*)"|([^,]*))/g)) {
152
+ const key = match[1];
153
+ const value = match[2] ?? match[3] ?? "";
154
+ if (key) attributes.set(key, value);
155
+ }
156
+ return attributes;
157
+ }
158
+ /**
159
+ * Parse RESOLUTION attribute value (WIDTHxHEIGHT).
160
+ */
161
+ function parseResolution(value) {
162
+ const match = /^(\d+)x(\d+)$/.exec(value);
163
+ if (!match) return null;
164
+ return {
165
+ width: Number.parseInt(match[1], 10),
166
+ height: Number.parseInt(match[2], 10)
167
+ };
168
+ }
169
+ /**
170
+ * Parse FRAME-RATE attribute to rational frame rate.
171
+ */
172
+ function parseFrameRate(value) {
173
+ const fps = Number.parseFloat(value);
174
+ if (Number.isNaN(fps) || fps <= 0) return void 0;
175
+ if (Math.abs(fps - 23.976) < .01) return {
176
+ frameRateNumerator: 24e3,
177
+ frameRateDenominator: 1001
178
+ };
179
+ if (Math.abs(fps - 29.97) < .01) return {
180
+ frameRateNumerator: 3e4,
181
+ frameRateDenominator: 1001
182
+ };
183
+ if (Math.abs(fps - 59.94) < .01) return {
184
+ frameRateNumerator: 6e4,
185
+ frameRateDenominator: 1001
186
+ };
187
+ if (fps % 1 === 0) return { frameRateNumerator: Math.round(fps) };
188
+ return { frameRateNumerator: Math.round(fps) };
189
+ }
190
+ /**
191
+ * Parse CODECS attribute into separate video and audio codecs.
192
+ */
193
+ function parseCodecs(codecs) {
194
+ const parts = codecs.split(",").map((s) => s.trim());
195
+ const result = {};
196
+ for (const codec of parts) if (codec.startsWith("avc1.") || codec.startsWith("hvc1.") || codec.startsWith("hev1.")) result.video = codec;
197
+ else if (codec.startsWith("mp4a.")) result.audio = codec;
198
+ return result;
199
+ }
200
+ /**
201
+ * Parse #EXTINF duration value.
202
+ */
203
+ function parseExtInfDuration(value) {
204
+ const durationPart = value.split(",")[0] ?? value;
205
+ const duration = Number.parseFloat(durationPart);
206
+ return Number.isNaN(duration) ? 0 : duration;
207
+ }
208
+ /**
209
+ * Parse BYTERANGE attribute value.
210
+ * Format: "length[@offset]"
211
+ * If offset is omitted, it continues from the previous byte range end.
212
+ */
213
+ function parseByteRange(value, previousEnd) {
214
+ const match = /^(\d+)(?:@(\d+))?$/.exec(value);
215
+ if (!match) return null;
216
+ const length = Number.parseInt(match[1], 10);
217
+ if (Number.isNaN(length)) return null;
218
+ let start;
219
+ if (match[2] !== void 0) {
220
+ start = Number.parseInt(match[2], 10);
221
+ if (Number.isNaN(start)) return null;
222
+ } else if (previousEnd !== void 0) start = previousEnd;
223
+ else return null;
224
+ return {
225
+ start,
226
+ end: start + length - 1
227
+ };
228
+ }
229
+ /**
230
+ * Create AttributeList from raw attribute string.
231
+ */
232
+ function createAttributeList(line) {
233
+ const map = parseAttributeList(line);
234
+ return {
235
+ get(key) {
236
+ return map.get(key);
237
+ },
238
+ getInt(key, defaultValue) {
239
+ const value = map.get(key);
240
+ if (value === void 0) return defaultValue;
241
+ const parsed = Number.parseInt(value, 10);
242
+ return Number.isNaN(parsed) ? defaultValue : parsed;
243
+ },
244
+ getFloat(key, defaultValue) {
245
+ const value = map.get(key);
246
+ if (value === void 0) return defaultValue;
247
+ const parsed = Number.parseFloat(value);
248
+ return Number.isNaN(parsed) ? defaultValue : parsed;
249
+ },
250
+ getBool(key) {
251
+ return map.get(key) === "YES";
252
+ },
253
+ getResolution(key) {
254
+ const value = map.get(key);
255
+ if (!value) return void 0;
256
+ return parseResolution(value) ?? void 0;
257
+ },
258
+ getFrameRate(key) {
259
+ const value = map.get(key);
260
+ if (!value) return void 0;
261
+ return parseFrameRate(value);
262
+ }
263
+ };
264
+ }
265
+ /**
266
+ * Match a tag and extract its attributes.
267
+ * Returns null if the line doesn't match the tag.
268
+ */
269
+ function matchTag(line, tag) {
270
+ const prefix = `#${tag}:`;
271
+ if (!line.startsWith(prefix)) return null;
272
+ return createAttributeList(line.slice(prefix.length));
273
+ }
274
+ /**
275
+ * Resolve a potentially relative URL against a base URL using native URL API.
276
+ */
277
+ function resolveUrl(url, baseUrl) {
278
+ return new URL(url, baseUrl).href;
279
+ }
280
+ /**
281
+ * Parse HLS media playlist and resolve track with segments.
282
+ *
283
+ * Takes an unresolved track (from multivariant playlist) and media playlist text,
284
+ * returns a HAM-compliant resolved track with segments.
285
+ *
286
+ * @param text - Media playlist text content
287
+ * @param unresolved - Unresolved track from parseMultivariantPlaylist
288
+ * @returns Resolved track with segments (type inferred from input)
289
+ */
290
+ function parseMediaPlaylist(text, unresolved) {
291
+ const lines = text.split(/\r?\n/);
292
+ const baseUrl = unresolved.url;
293
+ const segments = [];
294
+ let initSegmentUrl;
295
+ let initSegmentByteRange;
296
+ let currentDuration = 0;
297
+ let currentByteRange;
298
+ let currentTime = 0;
299
+ let segmentIndex = 0;
300
+ let previousByteRangeEnd;
301
+ for (const line of lines) {
302
+ const trimmed = line.trim();
303
+ if (!trimmed || trimmed.startsWith("#") && !trimmed.startsWith("#EXT")) continue;
304
+ if (trimmed === "#EXTM3U" || trimmed.startsWith("#EXT-X-VERSION:") || trimmed.startsWith("#EXT-X-TARGETDURATION:") || trimmed.startsWith("#EXT-X-PLAYLIST-TYPE:") || trimmed.startsWith("#EXT-X-INDEPENDENT-SEGMENTS")) continue;
305
+ const mapAttrs = matchTag(trimmed, "EXT-X-MAP");
306
+ if (mapAttrs) {
307
+ const uri = mapAttrs.get("URI");
308
+ if (uri) {
309
+ initSegmentUrl = resolveUrl(uri, baseUrl);
310
+ const byteRangeStr = mapAttrs.get("BYTERANGE");
311
+ if (byteRangeStr) initSegmentByteRange = parseByteRange(byteRangeStr, 0) ?? void 0;
312
+ }
313
+ continue;
314
+ }
315
+ if (trimmed.startsWith("#EXTINF:")) {
316
+ currentDuration = parseExtInfDuration(trimmed.slice(8));
317
+ continue;
318
+ }
319
+ if (trimmed.startsWith("#EXT-X-BYTERANGE:")) {
320
+ currentByteRange = parseByteRange(trimmed.slice(17), previousByteRangeEnd) ?? void 0;
321
+ continue;
322
+ }
323
+ if (trimmed === "#EXT-X-ENDLIST") continue;
324
+ if (!trimmed.startsWith("#") && currentDuration > 0) {
325
+ const segment = {
326
+ id: `segment-${segmentIndex}`,
327
+ url: resolveUrl(trimmed, baseUrl),
328
+ duration: currentDuration,
329
+ startTime: currentTime
330
+ };
331
+ if (currentByteRange) {
332
+ segment.byteRange = currentByteRange;
333
+ previousByteRangeEnd = currentByteRange.end + 1;
334
+ } else previousByteRangeEnd = void 0;
335
+ segments.push(segment);
336
+ currentTime += currentDuration;
337
+ segmentIndex++;
338
+ currentDuration = 0;
339
+ currentByteRange = void 0;
340
+ }
341
+ }
342
+ const totalDuration = currentTime;
343
+ const initialization = unresolved.type === "text" && !initSegmentUrl ? void 0 : initSegmentUrl ? {
344
+ url: initSegmentUrl,
345
+ ...initSegmentByteRange ? { byteRange: initSegmentByteRange } : {}
346
+ } : { url: "" };
347
+ return {
348
+ ...unresolved,
349
+ startTime: 0,
350
+ duration: totalDuration,
351
+ segments,
352
+ initialization
353
+ };
354
+ }
355
+ /**
356
+ * Generate unique ID for HAM objects.
357
+ *
358
+ * Uses timestamp + random number for sufficient uniqueness.
359
+ * IDs are strings without decimals.
360
+ *
361
+ * @returns Unique string ID in format: timestamp-random
362
+ *
363
+ * @example
364
+ * ```ts
365
+ * const id = generateId(); // "1738423156789-542891"
366
+ * ```
367
+ */
368
+ function generateId() {
369
+ return `${Date.now()}-${Math.floor(Math.random() * 1e6)}`;
370
+ }
371
+ /**
372
+ * Parse HLS multivariant playlist into a Presentation.
373
+ *
374
+ * Returns Presentation with partially resolved tracks (no segment information).
375
+ * Tracks contain metadata from multivariant playlist (bandwidth, resolution, codecs)
376
+ * but segment information is added when media playlists are fetched.
377
+ *
378
+ * @param text - Raw playlist text content
379
+ * @param unresolved - Unresolved presentation (contains URL for base URL resolution)
380
+ * @returns Presentation with partially resolved tracks (duration is undefined)
381
+ */
382
+ function parseMultivariantPlaylist(text, unresolved) {
383
+ const baseUrl = unresolved.url;
384
+ const lines = text.split(/\r?\n/);
385
+ const streams = [];
386
+ const audioRenditions = [];
387
+ const subtitleRenditions = [];
388
+ let pendingStreamInfo = null;
389
+ for (const line of lines) {
390
+ const trimmed = line.trim();
391
+ if (!trimmed || trimmed.startsWith("#") && !trimmed.startsWith("#EXT")) continue;
392
+ if (trimmed === "#EXTM3U" || trimmed.startsWith("#EXT-X-VERSION:") || trimmed.startsWith("#EXT-X-INDEPENDENT-SEGMENTS")) continue;
393
+ const mediaAttrs = matchTag(trimmed, "EXT-X-MEDIA");
394
+ if (mediaAttrs) {
395
+ const type = mediaAttrs.get("TYPE");
396
+ const groupId = mediaAttrs.get("GROUP-ID");
397
+ const name = mediaAttrs.get("NAME");
398
+ if (type === "AUDIO" && groupId && name) {
399
+ const uri = mediaAttrs.get("URI");
400
+ audioRenditions.push({
401
+ groupId,
402
+ name,
403
+ language: mediaAttrs.get("LANGUAGE"),
404
+ uri: uri ? resolveUrl(uri, baseUrl) : void 0,
405
+ default: mediaAttrs.getBool("DEFAULT"),
406
+ autoselect: mediaAttrs.getBool("AUTOSELECT")
407
+ });
408
+ }
409
+ if (type === "SUBTITLES" && groupId && name) {
410
+ const uri = mediaAttrs.get("URI");
411
+ if (uri) subtitleRenditions.push({
412
+ groupId,
413
+ name,
414
+ language: mediaAttrs.get("LANGUAGE"),
415
+ uri: resolveUrl(uri, baseUrl),
416
+ default: mediaAttrs.getBool("DEFAULT"),
417
+ autoselect: mediaAttrs.getBool("AUTOSELECT"),
418
+ forced: mediaAttrs.getBool("FORCED")
419
+ });
420
+ }
421
+ continue;
422
+ }
423
+ const streamInfAttrs = matchTag(trimmed, "EXT-X-STREAM-INF");
424
+ if (streamInfAttrs) {
425
+ pendingStreamInfo = {
426
+ bandwidth: streamInfAttrs.getInt("BANDWIDTH", 0),
427
+ resolution: streamInfAttrs.getResolution("RESOLUTION"),
428
+ codecs: streamInfAttrs.get("CODECS"),
429
+ frameRate: streamInfAttrs.getFrameRate("FRAME-RATE"),
430
+ audioGroupId: streamInfAttrs.get("AUDIO")
431
+ };
432
+ continue;
433
+ }
434
+ if (!trimmed.startsWith("#") && pendingStreamInfo) {
435
+ streams.push({
436
+ ...pendingStreamInfo,
437
+ uri: resolveUrl(trimmed, baseUrl)
438
+ });
439
+ pendingStreamInfo = null;
440
+ }
441
+ }
442
+ const videoStreams = [];
443
+ const audioOnlyStreams = [];
444
+ for (const stream of streams) {
445
+ if (!stream.codecs) {
446
+ videoStreams.push(stream);
447
+ continue;
448
+ }
449
+ const parsedCodecs = parseCodecs(stream.codecs);
450
+ if (stream.codecs.split(",").length === 1) if (parsedCodecs.audio && !parsedCodecs.video) audioOnlyStreams.push(stream);
451
+ else videoStreams.push(stream);
452
+ else videoStreams.push(stream);
453
+ }
454
+ const videoTracks = videoStreams.map((stream) => {
455
+ const codecs = stream.codecs ? parseCodecs(stream.codecs) : void 0;
456
+ const track = {
457
+ type: "video",
458
+ id: generateId(),
459
+ url: stream.uri,
460
+ bandwidth: stream.bandwidth,
461
+ mimeType: "video/mp4",
462
+ codecs: []
463
+ };
464
+ if (stream.resolution?.width !== void 0) track.width = stream.resolution.width;
465
+ if (stream.resolution?.height !== void 0) track.height = stream.resolution.height;
466
+ if (codecs?.video) track.codecs = [codecs.video];
467
+ if (stream.frameRate) track.frameRate = stream.frameRate;
468
+ if (stream.audioGroupId) track.audioGroupId = stream.audioGroupId;
469
+ return track;
470
+ });
471
+ const audioOnlyTracks = audioOnlyStreams.map((stream) => {
472
+ const codecs = stream.codecs ? parseCodecs(stream.codecs) : void 0;
473
+ return {
474
+ type: "audio",
475
+ id: generateId(),
476
+ url: stream.uri,
477
+ bandwidth: stream.bandwidth,
478
+ mimeType: "audio/mp4",
479
+ codecs: codecs?.audio ? [codecs.audio] : [],
480
+ groupId: stream.audioGroupId || "default",
481
+ name: "Default",
482
+ sampleRate: 48e3,
483
+ channels: 2
484
+ };
485
+ });
486
+ const audioTracks = [...audioRenditions.map((rendition) => {
487
+ let audioCodecs;
488
+ for (const stream of streams) if (stream.audioGroupId === rendition.groupId && stream.codecs) {
489
+ const codecs = parseCodecs(stream.codecs);
490
+ if (codecs.audio) {
491
+ audioCodecs = [codecs.audio];
492
+ break;
493
+ }
494
+ }
495
+ const track = {
496
+ type: "audio",
497
+ id: generateId(),
498
+ url: rendition.uri ?? "",
499
+ groupId: rendition.groupId,
500
+ name: rendition.name,
501
+ mimeType: "audio/mp4",
502
+ bandwidth: 0,
503
+ sampleRate: 48e3,
504
+ channels: 2,
505
+ codecs: []
506
+ };
507
+ if (rendition.language) track.language = rendition.language;
508
+ if (audioCodecs) track.codecs = audioCodecs;
509
+ if (rendition.default) track.default = rendition.default;
510
+ if (rendition.autoselect) track.autoselect = rendition.autoselect;
511
+ return track;
512
+ }), ...audioOnlyTracks];
513
+ const textTracks = subtitleRenditions.map((rendition) => {
514
+ const track = {
515
+ type: "text",
516
+ id: generateId(),
517
+ url: rendition.uri,
518
+ groupId: rendition.groupId,
519
+ label: rendition.name,
520
+ kind: "subtitles",
521
+ mimeType: "text/vtt",
522
+ bandwidth: 0
523
+ };
524
+ if (rendition.language) track.language = rendition.language;
525
+ if (rendition.default && rendition.autoselect) track.default = true;
526
+ if (rendition.autoselect) track.autoselect = rendition.autoselect;
527
+ if (rendition.forced) track.forced = rendition.forced;
528
+ return track;
529
+ });
530
+ const selectionSets = [];
531
+ if (videoTracks.length > 0) {
532
+ const videoSwitchingSet = {
533
+ id: generateId(),
534
+ type: "video",
535
+ tracks: videoTracks
536
+ };
537
+ const videoSelectionSet = {
538
+ id: generateId(),
539
+ type: "video",
540
+ switchingSets: [videoSwitchingSet]
541
+ };
542
+ selectionSets.push(videoSelectionSet);
543
+ }
544
+ if (audioTracks.length > 0) {
545
+ const audioSwitchingSet = {
546
+ id: generateId(),
547
+ type: "audio",
548
+ tracks: audioTracks
549
+ };
550
+ const audioSelectionSet = {
551
+ id: generateId(),
552
+ type: "audio",
553
+ switchingSets: [audioSwitchingSet]
554
+ };
555
+ selectionSets.push(audioSelectionSet);
556
+ }
557
+ if (textTracks.length > 0) {
558
+ const textSwitchingSet = {
559
+ id: generateId(),
560
+ type: "text",
561
+ tracks: textTracks
562
+ };
563
+ const textSelectionSet = {
564
+ id: generateId(),
565
+ type: "text",
566
+ switchingSets: [textSwitchingSet]
567
+ };
568
+ selectionSets.push(textSelectionSet);
569
+ }
570
+ return {
571
+ id: generateId(),
572
+ url: unresolved.url,
573
+ startTime: 0,
574
+ selectionSets
575
+ };
576
+ }
577
+ /**
578
+ * Exponentially Weighted Moving Average (EWMA)
579
+ *
580
+ * Pure functional implementation of EWMA calculations.
581
+ * Based on Shaka Player's EWMA algorithm.
582
+ */
583
+ /**
584
+ * Calculate alpha (decay factor) from half-life.
585
+ *
586
+ * Alpha determines how quickly old data "expires":
587
+ * - alpha close to 1 = slow decay (long memory)
588
+ * - alpha close to 0 = fast decay (short memory)
589
+ *
590
+ * @param halfLife - The quantity of prior samples (by weight) that make up
591
+ * half of the new estimate. Must be positive.
592
+ * @returns Alpha value between 0 and 1
593
+ *
594
+ * @example
595
+ * const alpha = calculateAlpha(2); // ≈ 0.7071 for 2-second half-life
596
+ */
597
+ function calculateAlpha(halfLife) {
598
+ return Math.exp(Math.log(.5) / halfLife);
599
+ }
600
+ /**
601
+ * Calculate exponentially weighted moving average.
602
+ *
603
+ * Updates an estimate by blending a new value with the previous estimate,
604
+ * weighted by the sample duration. Longer samples have more influence.
605
+ *
606
+ * @param prevEstimate - Previous EWMA estimate
607
+ * @param value - New sample value to incorporate
608
+ * @param weight - Sample weight (typically duration in seconds)
609
+ * @param halfLife - Half-life for decay (typically 2-5 seconds)
610
+ * @returns Updated EWMA estimate
611
+ *
612
+ * @example
613
+ * let estimate = 0;
614
+ * estimate = calculateEwma(estimate, 1_000_000, 1, 2); // First sample
615
+ * estimate = calculateEwma(estimate, 2_000_000, 1, 2); // Second sample
616
+ */
617
+ function calculateEwma(prevEstimate, value, weight, halfLife) {
618
+ const adjAlpha = calculateAlpha(halfLife) ** weight;
619
+ return value * (1 - adjAlpha) + adjAlpha * prevEstimate;
620
+ }
621
+ /**
622
+ * Apply zero-factor correction to EWMA estimate.
623
+ *
624
+ * The zero-factor correction compensates for bias when starting from zero.
625
+ * Without this correction, early estimates would be artificially low.
626
+ *
627
+ * As totalWeight increases, the correction factor approaches 1, meaning
628
+ * the estimate becomes more reliable and needs less correction.
629
+ *
630
+ * @param estimate - Raw EWMA estimate (uncorrected)
631
+ * @param totalWeight - Accumulated weight from all samples
632
+ * @param halfLife - Half-life used in EWMA calculation
633
+ * @returns Corrected estimate, or 0 if totalWeight is 0
634
+ *
635
+ * @example
636
+ * const raw = calculateEwma(0, 1_000_000, 1, 2);
637
+ * const corrected = applyZeroFactor(raw, 1, 2); // ≈ 1_000_000
638
+ */
639
+ function applyZeroFactor(estimate, totalWeight, halfLife) {
640
+ if (totalWeight === 0) return 0;
641
+ return estimate / (1 - calculateAlpha(halfLife) ** totalWeight);
642
+ }
643
+ /**
644
+ * Default bandwidth estimator configuration.
645
+ *
646
+ * Values match Shaka Player defaults based on experimentation.
647
+ */
648
+ const DEFAULT_BANDWIDTH_CONFIG = {
649
+ fastHalfLife: 2,
650
+ slowHalfLife: 5,
651
+ minTotalBytes: 128e3,
652
+ minBytes: 16e3,
653
+ minDuration: 5
654
+ };
655
+ /**
656
+ * Add a bandwidth sample from a segment download.
657
+ *
658
+ * Samples are filtered based on:
659
+ * - Minimum bytes (filters TTFB-dominated small segments)
660
+ * - Minimum duration (filters cached responses)
661
+ *
662
+ * Valid samples update both fast and slow EWMA estimates.
663
+ *
664
+ * @param state - Current estimator state
665
+ * @param durationMs - Download duration in milliseconds
666
+ * @param numBytes - Number of bytes downloaded
667
+ * @param config - Optional estimator configuration (uses defaults if not provided)
668
+ * @returns New estimator state with sample incorporated (or unchanged if filtered)
669
+ *
670
+ * @example
671
+ * let state = { fastEstimate: 0, fastTotalWeight: 0, ... };
672
+ * // Sample: 1MB in 1 second
673
+ * state = sampleBandwidth(state, 1000, 1_000_000);
674
+ */
675
+ function sampleBandwidth(state, durationMs, numBytes, config = DEFAULT_BANDWIDTH_CONFIG) {
676
+ const updatedBytesSampled = state.bytesSampled + numBytes;
677
+ if (numBytes < config.minBytes) return {
678
+ ...state,
679
+ bytesSampled: updatedBytesSampled
680
+ };
681
+ if (durationMs < config.minDuration) return {
682
+ ...state,
683
+ bytesSampled: updatedBytesSampled
684
+ };
685
+ const bandwidth = 8e3 * numBytes / durationMs;
686
+ const weight = durationMs / 1e3;
687
+ return {
688
+ fastEstimate: calculateEwma(state.fastEstimate, bandwidth, weight, config.fastHalfLife),
689
+ fastTotalWeight: state.fastTotalWeight + weight,
690
+ slowEstimate: calculateEwma(state.slowEstimate, bandwidth, weight, config.slowHalfLife),
691
+ slowTotalWeight: state.slowTotalWeight + weight,
692
+ bytesSampled: updatedBytesSampled
693
+ };
694
+ }
695
+ /**
696
+ * Get the current bandwidth estimate.
697
+ *
698
+ * Returns the **minimum** of the fast and slow EWMA estimates.
699
+ * This provides the key asymmetric behavior:
700
+ * - When bandwidth drops, fast EWMA reacts first and dominates (quick adaptation)
701
+ * - When bandwidth rises, slow EWMA lags behind and dominates (slow adaptation)
702
+ *
703
+ * Uses default estimate until enough data has been sampled.
704
+ *
705
+ * @param state - Current estimator state
706
+ * @param defaultEstimate - Fallback estimate before sufficient samples (bps)
707
+ * @param config - Optional estimator configuration (uses defaults if not provided)
708
+ * @returns Bandwidth estimate in bits per second
709
+ *
710
+ * @example
711
+ * const estimate = getBandwidthEstimate(state, 5_000_000); // 5 Mbps default
712
+ */
713
+ function getBandwidthEstimate(state, defaultEstimate, config = DEFAULT_BANDWIDTH_CONFIG) {
714
+ if (state.bytesSampled < config.minTotalBytes) return defaultEstimate;
715
+ const fastEstimate = applyZeroFactor(state.fastEstimate, state.fastTotalWeight, config.fastHalfLife);
716
+ const slowEstimate = applyZeroFactor(state.slowEstimate, state.slowTotalWeight, config.slowHalfLife);
717
+ return Math.min(fastEstimate, slowEstimate);
718
+ }
719
+ /**
720
+ * Default quality selection configuration.
721
+ * Values match Shaka Player upgrade threshold (0.85 = 15% headroom).
722
+ */
723
+ const DEFAULT_QUALITY_CONFIG = { safetyMargin: .85 };
724
+ /**
725
+ * Select the best video track based on current bandwidth estimate.
726
+ *
727
+ * Selects the highest quality track where bandwidth is sufficient with safety margin:
728
+ * - currentBandwidth >= track.bandwidth / safetyMargin
729
+ * - Default safetyMargin 0.85 means track uses ≤85% of bandwidth (15% headroom)
730
+ * - At same bandwidth, prefers higher resolution
731
+ *
732
+ * @param tracks - Available video tracks (can be unsorted)
733
+ * @param currentBandwidth - Current bandwidth estimate in bits per second
734
+ * @param config - Optional quality selection configuration
735
+ * @returns Selected track, or undefined if no tracks available
736
+ *
737
+ * @example
738
+ * const tracks = [
739
+ * { id: '360p', bandwidth: 500_000, ... },
740
+ * { id: '720p', bandwidth: 2_000_000, ... },
741
+ * { id: '1080p', bandwidth: 4_000_000, ... },
742
+ * ];
743
+ *
744
+ * // With 2.5 Mbps, selects 720p (1080p needs 4M/0.85 = 4.7 Mbps)
745
+ * const selected = selectQuality(tracks, 2_500_000);
746
+ */
747
+ function selectQuality(tracks, currentBandwidth, config = DEFAULT_QUALITY_CONFIG) {
748
+ if (tracks.length === 0) return;
749
+ const sortedTracks = tracks.slice().sort((a, b) => a.bandwidth - b.bandwidth);
750
+ let chosen;
751
+ for (const track of sortedTracks) if (currentBandwidth >= track.bandwidth / config.safetyMargin) {
752
+ if (!chosen || track.bandwidth > chosen.bandwidth || track.bandwidth === chosen.bandwidth && hasHigherResolution(track, chosen)) chosen = track;
753
+ }
754
+ return chosen ?? sortedTracks[0];
755
+ }
756
+ /**
757
+ * Check if track A has higher resolution than track B.
758
+ * Compares by total pixel count (width × height).
759
+ *
760
+ * @param trackA - First track to compare
761
+ * @param trackB - Second track to compare
762
+ * @returns True if trackA has more pixels than trackB
763
+ */
764
+ function hasHigherResolution(trackA, trackB) {
765
+ return (trackA.width ?? 0) * (trackA.height ?? 0) > (trackB.width ?? 0) * (trackB.height ?? 0);
766
+ }
767
+ /**
768
+ * Default back buffer configuration.
769
+ */
770
+ const DEFAULT_BACK_BUFFER_CONFIG = { keepSegments: 2 };
771
+ /**
772
+ * Calculate back buffer flush point.
773
+ *
774
+ * Determines where to flush old segments from the back buffer.
775
+ * Keeps a fixed number of segments behind the current playback position.
776
+ *
777
+ * Algorithm:
778
+ * 1. Find segments before currentTime
779
+ * 2. Count back N segments (keepSegments)
780
+ * 3. Return startTime of segment N+1 back (flush everything before this)
781
+ *
782
+ * @param segments - Available segments (should be sorted by startTime)
783
+ * @param currentTime - Current playback position in seconds
784
+ * @param config - Optional back buffer configuration
785
+ * @returns Time in seconds to flush up to (flush range: [0, flushEnd))
786
+ *
787
+ * @example
788
+ * const segments = [
789
+ * { startTime: 0, duration: 6, ... },
790
+ * { startTime: 6, duration: 6, ... },
791
+ * { startTime: 12, duration: 6, ... },
792
+ * { startTime: 18, duration: 6, ... },
793
+ * ];
794
+ *
795
+ * // Playing at 18s, keep 2 segments
796
+ * const flushEnd = calculateBackBufferFlushPoint(segments, 18);
797
+ * // Returns 6 (flush [0, 6), keep [6-18))
798
+ */
799
+ function calculateBackBufferFlushPoint(segments, currentTime, config = DEFAULT_BACK_BUFFER_CONFIG) {
800
+ if (segments.length === 0) return 0;
801
+ const segmentsBefore = segments.filter((seg) => seg.startTime < currentTime);
802
+ if (segmentsBefore.length === 0) return 0;
803
+ const segmentsToFlush = segmentsBefore.length - config.keepSegments;
804
+ if (segmentsToFlush <= 0) return 0;
805
+ if (segmentsToFlush >= segmentsBefore.length) return currentTime;
806
+ return segmentsBefore[segmentsToFlush].startTime;
807
+ }
808
+ /**
809
+ * Default forward buffer configuration.
810
+ */
811
+ const DEFAULT_FORWARD_BUFFER_CONFIG = { bufferDuration: 30 };
812
+ /**
813
+ * Get segments that need to be loaded for forward buffer.
814
+ *
815
+ * Determines which segments to load to maintain target buffer duration.
816
+ * Handles discontiguous buffering (gaps after seeks).
817
+ *
818
+ * Algorithm:
819
+ * 1. Calculate target time: currentTime + bufferDuration
820
+ * 2. Find all segments in range [currentTime, targetTime)
821
+ * 3. Filter out segments already buffered at that time position
822
+ * 4. Return segments to load (fills gaps + extends to target)
823
+ *
824
+ * @param segments - All available segments from playlist
825
+ * @param bufferedSegments - Segments already buffered (ordered by startTime)
826
+ * @param currentTime - Current playback position in seconds
827
+ * @param config - Optional forward buffer configuration
828
+ * @returns Array of segments to load (empty if buffer is sufficient)
829
+ *
830
+ * @example
831
+ * // After seek: buffered [0-12, 18-30], playing at 7s
832
+ * const toLoad = getSegmentsToLoad(segments, buffered, 7, { bufferDuration: 24 });
833
+ * // Returns [seg-12, seg-30] (fills gap, extends to target 31s)
834
+ */
835
+ /**
836
+ * Calculate the start time from which to flush forward buffer content.
837
+ *
838
+ * Content that starts at or beyond `currentTime + bufferDuration` is no
839
+ * longer needed for the current playback position and should be removed
840
+ * from the SourceBuffer. This prevents unbounded accumulation of scattered
841
+ * SourceBuffer content after seeks, which can cause QuotaExceededError on
842
+ * long-form content.
843
+ *
844
+ * Returns `Infinity` when nothing needs flushing (no buffered segments
845
+ * exist beyond the threshold).
846
+ *
847
+ * @param bufferedSegments - Segments currently tracked in the buffer model
848
+ * @param currentTime - Current playback position in seconds
849
+ * @param config - Optional forward buffer configuration
850
+ * @returns Start time to flush from (flush range: [flushStart, Infinity)),
851
+ * or Infinity if no flush is needed
852
+ *
853
+ * @example
854
+ * // Playing at 0s, buffered [0,6,12,18,24,30,36], bufferDuration=30
855
+ * const flushStart = calculateForwardFlushPoint(segments, 0);
856
+ * // Returns 30 — flush [30, Infinity), keep [0, 30)
857
+ */
858
+ function calculateForwardFlushPoint(bufferedSegments, currentTime, config = DEFAULT_FORWARD_BUFFER_CONFIG) {
859
+ if (bufferedSegments.length === 0) return Infinity;
860
+ const threshold = currentTime + config.bufferDuration;
861
+ const beyond = bufferedSegments.filter((seg) => seg.startTime >= threshold);
862
+ if (beyond.length === 0) return Infinity;
863
+ return Math.min(...beyond.map((seg) => seg.startTime));
864
+ }
865
+ function getSegmentsToLoad(segments, bufferedSegments, currentTime, config = DEFAULT_FORWARD_BUFFER_CONFIG) {
866
+ if (segments.length === 0) return [];
867
+ const targetTime = currentTime + config.bufferDuration;
868
+ const bufferedStartTimes = new Set(bufferedSegments.map((seg) => seg.startTime));
869
+ return segments.filter((seg) => {
870
+ const segmentEnd = seg.startTime + seg.duration;
871
+ const isInRange = seg.startTime < targetTime && segmentEnd > currentTime;
872
+ const isNotBuffered = !bufferedStartTimes.has(seg.startTime);
873
+ return isInRange && isNotBuffered;
874
+ });
875
+ }
876
+ function isResolvedTrack(track) {
877
+ return "segments" in track;
878
+ }
879
+ /**
880
+ * Check if a presentation has duration (at least one track resolved).
881
+ * Narrows type to include required duration.
882
+ */
883
+ function hasPresentationDuration(presentation) {
884
+ return presentation.duration !== void 0;
885
+ }
886
+ /**
887
+ * MediaSource Setup
888
+ *
889
+ * Utilities for creating and configuring MediaSource/ManagedMediaSource
890
+ * for MSE (Media Source Extensions) playback.
891
+ *
892
+ * Global ManagedMediaSource types are defined in ./mediasource.d.ts
893
+ */
894
+ /**
895
+ * Check if MediaSource API is supported.
896
+ */
897
+ function supportsMediaSource() {
898
+ return typeof MediaSource !== "undefined";
899
+ }
900
+ /**
901
+ * Check if ManagedMediaSource API is supported.
902
+ * ManagedMediaSource is a newer Safari API with better lifecycle management.
903
+ */
904
+ function supportsManagedMediaSource() {
905
+ return typeof ManagedMediaSource !== "undefined";
906
+ }
907
+ /**
908
+ * Create a MediaSource or ManagedMediaSource instance.
909
+ *
910
+ * @param options - Creation options
911
+ * @returns A MediaSource or ManagedMediaSource instance
912
+ * @throws Error if no MediaSource API is available
913
+ *
914
+ * @example
915
+ * const mediaSource = createMediaSource();
916
+ * const mediaElement = document.querySelector('video');
917
+ * attachMediaSource(mediaSource, mediaElement);
918
+ */
919
+ function createMediaSource(options = {}) {
920
+ const { preferManaged = false } = options;
921
+ if (preferManaged && supportsManagedMediaSource()) return new ManagedMediaSource();
922
+ if (supportsMediaSource()) return new MediaSource();
923
+ throw new Error("MediaSource API is not supported");
924
+ }
925
+ /**
926
+ * Attach a MediaSource to an HTMLMediaElement.
927
+ *
928
+ * Uses srcObject for ManagedMediaSource (Safari), or createObjectURL for regular MediaSource.
929
+ *
930
+ * @param mediaSource - The MediaSource to attach
931
+ * @param mediaElement - The media element to attach to
932
+ * @returns Object with URL and detach function
933
+ *
934
+ * @example
935
+ * const mediaSource = createMediaSource();
936
+ * const { detach } = attachMediaSource(mediaSource, videoElement);
937
+ * await waitForSourceOpen(mediaSource);
938
+ * // Use mediaSource...
939
+ * // Later, to clean up:
940
+ * detach();
941
+ */
942
+ function attachMediaSource(mediaSource, mediaElement) {
943
+ if (supportsManagedMediaSource() && mediaSource instanceof ManagedMediaSource) {
944
+ mediaElement.disableRemotePlayback = true;
945
+ mediaElement.srcObject = mediaSource;
946
+ const detach$1 = () => {
947
+ mediaElement.srcObject = null;
948
+ mediaElement.load();
949
+ };
950
+ return {
951
+ url: "",
952
+ detach: detach$1
953
+ };
954
+ }
955
+ const url = URL.createObjectURL(mediaSource);
956
+ mediaElement.src = url;
957
+ const detach = () => {
958
+ mediaElement.removeAttribute("src");
959
+ mediaElement.load();
960
+ URL.revokeObjectURL(url);
961
+ };
962
+ return {
963
+ url,
964
+ detach
965
+ };
966
+ }
967
+ /**
968
+ * Wait for a MediaSource to reach the 'open' state.
969
+ * Resolves immediately if already open.
970
+ *
971
+ * @param mediaSource - The MediaSource to wait for
972
+ * @param signal - Optional AbortSignal for cancellation
973
+ * @returns Promise that resolves when the MediaSource is open
974
+ *
975
+ * @example
976
+ * const mediaSource = createMediaSource();
977
+ * attachMediaSource(mediaSource, videoElement);
978
+ * await waitForSourceOpen(mediaSource);
979
+ * // MediaSource is now ready for SourceBuffer creation
980
+ */
981
+ function waitForSourceOpen(mediaSource, signal) {
982
+ return new Promise((resolve, reject) => {
983
+ if (mediaSource.readyState === "open") {
984
+ resolve();
985
+ return;
986
+ }
987
+ if (signal?.aborted) {
988
+ reject(new DOMException("Aborted", "AbortError"));
989
+ return;
990
+ }
991
+ const controller = new AbortController();
992
+ const options = { signal: controller.signal };
993
+ mediaSource.addEventListener("sourceopen", () => {
994
+ controller.abort();
995
+ resolve();
996
+ }, options);
997
+ signal?.addEventListener("abort", () => {
998
+ controller.abort();
999
+ reject(new DOMException("Aborted", "AbortError"));
1000
+ }, options);
1001
+ });
1002
+ }
1003
+ /**
1004
+ * Create a SourceBuffer on a MediaSource.
1005
+ *
1006
+ * @param mediaSource - The MediaSource (must be in 'open' state)
1007
+ * @param mimeCodec - MIME type with codecs (e.g., 'video/mp4; codecs="avc1.42E01E"')
1008
+ * @returns The created SourceBuffer
1009
+ * @throws Error if MediaSource is not open or codec is unsupported
1010
+ *
1011
+ * @example
1012
+ * await waitForSourceOpen(mediaSource);
1013
+ * const buffer = createSourceBuffer(mediaSource, 'video/mp4; codecs="avc1.42E01E"');
1014
+ */
1015
+ function createSourceBuffer(mediaSource, mimeCodec) {
1016
+ if (mediaSource.readyState !== "open") throw new Error("MediaSource is not open");
1017
+ if (!isCodecSupported(mimeCodec)) throw new Error(`Codec not supported: ${mimeCodec}`);
1018
+ return mediaSource.addSourceBuffer(mimeCodec);
1019
+ }
1020
+ /**
1021
+ * Check if a codec is supported.
1022
+ *
1023
+ * @param mimeCodec - MIME type with codecs string
1024
+ * @returns True if the codec is supported
1025
+ *
1026
+ * @example
1027
+ * if (isCodecSupported('video/mp4; codecs="avc1.42E01E"')) {
1028
+ * // Create source buffer
1029
+ * }
1030
+ */
1031
+ function isCodecSupported(mimeCodec) {
1032
+ if (!supportsMediaSource()) return false;
1033
+ return MediaSource.isTypeSupported(mimeCodec);
1034
+ }
1035
+ const DEFAULT_MIN_CHUNK_SIZE = 2 ** 17;
1036
+ /**
1037
+ * Adapts a `ReadableStream<Uint8Array>` (e.g. `response.body`) into an
1038
+ * `AsyncIterable<Uint8Array>` that yields chunks no smaller than
1039
+ * `minChunkSize` bytes. Smaller network chunks are accumulated and yielded
1040
+ * together once the threshold is met. Any remainder is flushed on stream end.
1041
+ *
1042
+ * Errors from the underlying stream propagate naturally — the reader lock is
1043
+ * always released via `finally`.
1044
+ */
1045
+ var ChunkedStreamIterable = class {
1046
+ minChunkSize;
1047
+ #readableStream;
1048
+ constructor(readableStream, { minChunkSize = DEFAULT_MIN_CHUNK_SIZE } = {}) {
1049
+ this.#readableStream = readableStream;
1050
+ this.minChunkSize = minChunkSize;
1051
+ }
1052
+ async *[Symbol.asyncIterator]() {
1053
+ let pending;
1054
+ const reader = this.#readableStream.getReader();
1055
+ try {
1056
+ while (true) {
1057
+ const { done, value } = await reader.read();
1058
+ if (done) {
1059
+ if (pending) yield pending;
1060
+ break;
1061
+ }
1062
+ pending = pending ? concat(pending, value) : value;
1063
+ if (pending.length >= this.minChunkSize) {
1064
+ yield pending;
1065
+ pending = void 0;
1066
+ }
1067
+ }
1068
+ } finally {
1069
+ reader.releaseLock();
1070
+ }
1071
+ }
1072
+ };
1073
+ function concat(a, b) {
1074
+ const result = new Uint8Array(a.length + b.length);
1075
+ result.set(a);
1076
+ result.set(b, a.length);
1077
+ return result;
1078
+ }
1079
+ /**
1080
+ * Fetch resolvable from AddressableObject.
1081
+ *
1082
+ * Handles byte range requests if byteRange is present.
1083
+ * Returns native fetch Response for composability (can extract text, stream, etc.).
1084
+ *
1085
+ * @param addressable - Resource to fetch (url + optional byteRange)
1086
+ * @returns Promise resolving to Response
1087
+ *
1088
+ * @example
1089
+ * const response = await fetchResolvable({ url: 'https://example.com/segment.m4s' });
1090
+ * const text = await getResponseText(response);
1091
+ *
1092
+ * @example
1093
+ * // With byte range
1094
+ * const response = await fetchResolvable({
1095
+ * url: 'https://example.com/file.mp4',
1096
+ * byteRange: { start: 1000, end: 1999 }
1097
+ * });
1098
+ */
1099
+ async function fetchResolvable(addressable, options) {
1100
+ const headers = new Headers(options?.headers);
1101
+ if (addressable.byteRange) {
1102
+ const { start, end } = addressable.byteRange;
1103
+ headers.set("Range", `bytes=${start}-${end}`);
1104
+ }
1105
+ const request = new Request(addressable.url, {
1106
+ method: "GET",
1107
+ headers,
1108
+ ...options
1109
+ });
1110
+ return fetch(request);
1111
+ }
1112
+ /**
1113
+ * Extract text from Response.
1114
+ *
1115
+ * Accepts minimal Response-like object (just needs text() method).
1116
+ * Returns promise from response.text().
1117
+ *
1118
+ * @param response - Response-like object with text() method
1119
+ * @returns Promise resolving to text content
1120
+ *
1121
+ * @example
1122
+ * const response = await fetchResolvable(addressable);
1123
+ * const text = await getResponseText(response);
1124
+ */
1125
+ function getResponseText(response) {
1126
+ return response.text();
1127
+ }
1128
+ /**
1129
+ * Minimal event stream with Observable-like shape.
1130
+ *
1131
+ * Simple Subject/Observable-like implementation for dispatching discrete events.
1132
+ * Events are dispatched synchronously to all subscribers.
1133
+ */
1134
+ const EVENT_STREAM_SYMBOL = Symbol("@videojs/event-stream");
1135
+ /**
1136
+ * Creates a minimal event stream for dispatching discrete events.
1137
+ *
1138
+ * Events are dispatched synchronously to all subscribers.
1139
+ * Conforms to Observable-like shape for future compatibility.
1140
+ *
1141
+ * Events must have a 'type' property for discriminated union type narrowing.
1142
+ *
1143
+ * @example
1144
+ * ```ts
1145
+ * type Action = { type: 'PLAY' } | { type: 'PAUSE' };
1146
+ * const events = createEventStream<Action>();
1147
+ *
1148
+ * events.subscribe((action) => {
1149
+ * if (action.type === 'PLAY') {
1150
+ * // Type narrowed to { type: 'PLAY' }
1151
+ * }
1152
+ * });
1153
+ *
1154
+ * events.dispatch({ type: 'PLAY' });
1155
+ * ```
1156
+ */
1157
+ function createEventStream() {
1158
+ const subscribers = /* @__PURE__ */ new Set();
1159
+ return {
1160
+ [EVENT_STREAM_SYMBOL]: true,
1161
+ dispatch(event) {
1162
+ const current = Array.from(subscribers);
1163
+ for (const listener of current) listener(event);
1164
+ },
1165
+ subscribe(listener) {
1166
+ subscribers.add(listener);
1167
+ return () => subscribers.delete(listener);
1168
+ }
1169
+ };
1170
+ }
1171
+ /**
1172
+ * Combines multiple Observable sources into a single Observable.
1173
+ *
1174
+ * Emits an array of latest values whenever any source emits.
1175
+ * Only emits after all sources have emitted at least once.
1176
+ *
1177
+ * Supports selector-based subscriptions (fires only when the selected value
1178
+ * changes, per the optional equalityFn) mirroring the createState API.
1179
+ *
1180
+ * @param sources - Array of Observable sources
1181
+ * @returns Combined Observable
1182
+ *
1183
+ * @example
1184
+ * ```ts
1185
+ * const state = createState({ count: 0 });
1186
+ * const events = createEventStream<Action>();
1187
+ *
1188
+ * combineLatest([state, events]).subscribe(([state, event]) => {
1189
+ * if (event.type === 'PLAY' && state.count > 0) {
1190
+ * // React to event + state condition
1191
+ * }
1192
+ * });
1193
+ * ```
1194
+ *
1195
+ * @example Selector subscription
1196
+ * ```ts
1197
+ * combineLatest([state, owners]).subscribe(
1198
+ * ([s, o]) => deriveKey(s, o),
1199
+ * (key) => { ... },
1200
+ * { equalityFn: keyEq }
1201
+ * );
1202
+ * ```
1203
+ */
1204
+ function combineLatest(sources) {
1205
+ const subscribeToSources = (listener) => {
1206
+ const latest = new Array(sources.length);
1207
+ const hasValue = new Array(sources.length).fill(false);
1208
+ const unsubscribers = [];
1209
+ for (let i = 0; i < sources.length; i++) {
1210
+ const unsubscribe = sources[i].subscribe((value) => {
1211
+ latest[i] = value;
1212
+ hasValue[i] = true;
1213
+ if (hasValue.every((has) => has)) listener([...latest]);
1214
+ });
1215
+ unsubscribers.push(unsubscribe);
1216
+ }
1217
+ return () => {
1218
+ for (const unsubscribe of unsubscribers) unsubscribe();
1219
+ };
1220
+ };
1221
+ return { subscribe(listenerOrSelector, maybeListener, options) {
1222
+ if (maybeListener === void 0) return subscribeToSources(listenerOrSelector);
1223
+ const selector = listenerOrSelector;
1224
+ const listener = maybeListener;
1225
+ const equalityFn = options?.equalityFn ?? Object.is;
1226
+ let prevSelected;
1227
+ let initialized = false;
1228
+ return subscribeToSources((values) => {
1229
+ const nextSelected = selector(values);
1230
+ if (!initialized || !equalityFn(prevSelected, nextSelected)) {
1231
+ prevSelected = nextSelected;
1232
+ initialized = true;
1233
+ listener(nextSelected);
1234
+ }
1235
+ });
1236
+ } };
1237
+ }
1238
+ /**
1239
+ * Type guard to check if presentation is unresolved.
1240
+ */
1241
+ function isUnresolved(presentation) {
1242
+ return presentation !== void 0 && "url" in presentation && !("id" in presentation);
1243
+ }
1244
+ function canResolve$1(state) {
1245
+ return isUnresolved(state.presentation);
1246
+ }
1247
+ /**
1248
+ * Determines if resolution conditions are met based on preload policy and event.
1249
+ *
1250
+ * Resolution conditions:
1251
+ * - State-driven: preload is 'auto' or 'metadata'
1252
+ * - Event-driven: play event
1253
+ *
1254
+ * @param state - Current presentation state
1255
+ * @param event - Current action/event
1256
+ * @returns true if resolution conditions are met
1257
+ */
1258
+ function shouldResolve$1(state, event) {
1259
+ const { preload } = state;
1260
+ return ["auto", "metadata"].includes(preload) || event.type === "play";
1261
+ }
1262
+ /**
1263
+ * Syncs preload attribute from mediaElement to state.
1264
+ *
1265
+ * Watches the owners state for mediaElement changes and copies the
1266
+ * preload attribute to the immutable state.
1267
+ *
1268
+ * @param state - Immutable state container
1269
+ * @param owners - Mutable platform objects container
1270
+ * @returns Cleanup function to stop syncing
1271
+ */
1272
+ function syncPreloadAttribute(state, owners) {
1273
+ return owners.subscribe((current) => {
1274
+ if (state.current.preload !== void 0) return;
1275
+ const preload = current.mediaElement?.preload || void 0;
1276
+ state.patch({ preload });
1277
+ });
1278
+ }
1279
+ /**
1280
+ * Resolves unresolved presentations using reactive composition.
1281
+ *
1282
+ * Uses combineLatest to compose state + events, enabling both state-driven
1283
+ * and event-driven resolution triggers.
1284
+ *
1285
+ * Triggers resolution when:
1286
+ * - State-driven: Unresolved presentation + preload allows (auto/metadata)
1287
+ * - Event-driven: PLAY event when preload="none"
1288
+ *
1289
+ * @example
1290
+ * ```ts
1291
+ * const state = createState({ presentation: undefined, preload: 'auto' });
1292
+ * const events = createEventStream<PresentationAction>();
1293
+ *
1294
+ * const cleanup = resolvePresentation({ state, events });
1295
+ *
1296
+ * // State-driven: resolves immediately when preload allows
1297
+ * state.patch({ presentation: { url: 'http://example.com/playlist.m3u8' } });
1298
+ *
1299
+ * // Event-driven: resolves on PLAY when preload="none"
1300
+ * state.patch({ preload: 'none', presentation: { url: '...' } });
1301
+ * events.dispatch({ type: 'PLAY' });
1302
+ * ```
1303
+ */
1304
+ function resolvePresentation({ state, events }) {
1305
+ let resolving = false;
1306
+ let abortController = null;
1307
+ const cleanup = combineLatest([state, events]).subscribe(async ([currentState, event]) => {
1308
+ if (!canResolve$1(currentState) || !shouldResolve$1(currentState, event) || resolving) return;
1309
+ try {
1310
+ resolving = true;
1311
+ abortController = new AbortController();
1312
+ const { presentation } = currentState;
1313
+ const parsed = parseMultivariantPlaylist(await getResponseText(await fetchResolvable(presentation, { signal: abortController.signal })), presentation);
1314
+ state.patch({ presentation: parsed });
1315
+ } catch (error) {
1316
+ if (error instanceof Error && error.name === "AbortError") return;
1317
+ throw error;
1318
+ } finally {
1319
+ resolving = false;
1320
+ abortController = null;
1321
+ }
1322
+ });
1323
+ return () => {
1324
+ abortController?.abort();
1325
+ cleanup();
1326
+ };
1327
+ }
1328
+ /**
1329
+ * Default quality switching configuration.
1330
+ */
1331
+ const DEFAULT_SWITCHING_CONFIG = {
1332
+ safetyMargin: .85,
1333
+ minUpgradeInterval: 8e3,
1334
+ defaultBandwidth: 5e6
1335
+ };
1336
+ /**
1337
+ * Get all video tracks from a presentation's first switching set.
1338
+ * Returns [] when the presentation is still unresolved (no selectionSets yet).
1339
+ */
1340
+ function getVideoTracks(presentation) {
1341
+ return (presentation.selectionSets?.find((s) => s.type === "video"))?.switchingSets[0]?.tracks ?? [];
1342
+ }
1343
+ /**
1344
+ * Quality switching orchestration (F9).
1345
+ *
1346
+ * Reacts to bandwidth estimate changes and updates `selectedVideoTrackId`
1347
+ * when a different quality is optimal:
1348
+ *
1349
+ * - **Downgrades** happen immediately to avoid buffering stalls.
1350
+ * - **Upgrades** are gated by `minUpgradeInterval` to prevent oscillation.
1351
+ * - The first switch (from any track, or no track) is always immediate.
1352
+ *
1353
+ * Smooth switching is handled downstream: when `selectedVideoTrackId` changes,
1354
+ * `resolveTrack` fetches the new playlist and `loadSegments` reloads the init
1355
+ * segment, then appends media segments from the current position in the new
1356
+ * quality. The browser's SourceBuffer replaces the overlapping buffered range.
1357
+ *
1358
+ * @example
1359
+ * const cleanup = switchQuality({ state });
1360
+ * // Later, when done:
1361
+ * cleanup();
1362
+ */
1363
+ function switchQuality({ state }, config = {}) {
1364
+ const safetyMargin = config.safetyMargin ?? DEFAULT_SWITCHING_CONFIG.safetyMargin;
1365
+ const minUpgradeInterval = config.minUpgradeInterval ?? DEFAULT_SWITCHING_CONFIG.minUpgradeInterval;
1366
+ const defaultBandwidth = config.defaultBandwidth ?? DEFAULT_SWITCHING_CONFIG.defaultBandwidth;
1367
+ let lastUpgradeTime = Date.now();
1368
+ let firstMeaningfulFire = true;
1369
+ return state.subscribe((currentState) => {
1370
+ const { presentation, bandwidthState, selectedVideoTrackId, abrDisabled } = currentState;
1371
+ if (abrDisabled === true) return;
1372
+ if (!presentation || !bandwidthState) return;
1373
+ const videoTracks = getVideoTracks(presentation);
1374
+ if (videoTracks.length === 0) return;
1375
+ const isFirst = firstMeaningfulFire;
1376
+ firstMeaningfulFire = false;
1377
+ const optimal = selectQuality(videoTracks, getBandwidthEstimate(bandwidthState, defaultBandwidth), { safetyMargin });
1378
+ if (!optimal || optimal.id === selectedVideoTrackId) return;
1379
+ const currentTrack = videoTracks.find((t) => t.id === selectedVideoTrackId);
1380
+ if (!currentTrack || optimal.bandwidth > currentTrack.bandwidth) {
1381
+ const now = Date.now();
1382
+ if (!isFirst && now - lastUpgradeTime < minUpgradeInterval) return;
1383
+ lastUpgradeTime = now;
1384
+ }
1385
+ state.patch({ selectedVideoTrackId: optimal.id });
1386
+ });
1387
+ }
1388
+ /**
1389
+ * Map track type to selected track ID property key in state.
1390
+ */
1391
+ const SelectedTrackIdKeyByType = {
1392
+ video: "selectedVideoTrackId",
1393
+ audio: "selectedAudioTrackId",
1394
+ text: "selectedTextTrackId"
1395
+ };
1396
+ /**
1397
+ * Get selected track from state by type.
1398
+ * Returns properly typed track (partially or fully resolved) or undefined.
1399
+ * Type parameter T is inferred from the type argument.
1400
+ *
1401
+ * @example
1402
+ * const videoTrack = getSelectedTrack(state, 'video');
1403
+ * if (videoTrack && isResolvedTrack(videoTrack)) {
1404
+ * // videoTrack is VideoTrack
1405
+ * }
1406
+ */
1407
+ function getSelectedTrack(state, type) {
1408
+ const { presentation } = state;
1409
+ /** @TODO Consider moving and reusing isUnresolved(presentation) (CJP) */
1410
+ if (!presentation || !("id" in presentation)) return void 0;
1411
+ const trackId = state[SelectedTrackIdKeyByType[type]];
1412
+ return presentation.selectionSets.find(({ type: selectionSetType }) => selectionSetType === type)?.switchingSets[0]?.tracks.find(({ id }) => id === trackId);
1413
+ }
1414
+ /**
1415
+ * Creates a SegmentLoaderActor for one track type (video or audio).
1416
+ *
1417
+ * Receives load assignments via `send()` and owns all execution: planning,
1418
+ * removes, fetches, and appends. Coordinates with the SourceBufferActor for
1419
+ * all physical SourceBuffer operations.
1420
+ *
1421
+ * Planning (Cases 1–3) happens in `send()` on every incoming message, producing
1422
+ * an ordered LoadTask list. The runner drains that list sequentially. When a new
1423
+ * message arrives mid-run, send() replans and either continues the in-flight
1424
+ * operation (if still needed) or preempts it.
1425
+ *
1426
+ * @param sourceBufferActor - Shared SourceBufferActor reference (not owned)
1427
+ * @param fetchBytes - Tracked fetch closure (owns throughput sampling for segments).
1428
+ * Accepts an optional `minChunkSize` in options; init segments pass `Infinity`
1429
+ * so the entire body accumulates as one chunk before appending.
1430
+ */
1431
+ function createSegmentLoaderActor(sourceBufferActor, fetchBytes) {
1432
+ let pendingTasks = null;
1433
+ let inFlightInitTrackId = null;
1434
+ let inFlightSegmentId = null;
1435
+ let abortController = null;
1436
+ let running = false;
1437
+ let destroyed = false;
1438
+ const getBufferedSegments = (allSegments) => {
1439
+ const bufferedIds = new Set(sourceBufferActor.snapshot.context.segments.filter((s) => !s.partial).map((s) => s.id));
1440
+ return allSegments.filter((s) => bufferedIds.has(s.id));
1441
+ };
1442
+ /**
1443
+ * Translate a load message into an ordered LoadTask list based on committed
1444
+ * actor state. In-flight awareness is handled separately in send().
1445
+ *
1446
+ * @todo Rename alongside LoadTask (e.g. planOps).
1447
+ *
1448
+ * Case 1 — Removes: forward and back buffer flush points, segment-aligned.
1449
+ * No flush on track switch: appending new content overwrites existing buffer
1450
+ * ranges, and the actor's time-aligned deduplication keeps the segment model
1451
+ * accurate as new segments arrive.
1452
+ *
1453
+ * Case 2 — Init: schedule if not yet committed for this track.
1454
+ *
1455
+ * Case 3 — Segments: all segments in the load window not yet committed.
1456
+ */
1457
+ const planTasks = (message) => {
1458
+ const { track, range } = message;
1459
+ const actorCtx = sourceBufferActor.snapshot.context;
1460
+ const bufferedSegments = getBufferedSegments(track.segments);
1461
+ const currentTime = range?.start ?? 0;
1462
+ const tasks = [];
1463
+ if (range) {
1464
+ const forwardFlushStart = calculateForwardFlushPoint(bufferedSegments, currentTime);
1465
+ if (forwardFlushStart < Infinity) tasks.push({
1466
+ type: "remove",
1467
+ start: forwardFlushStart,
1468
+ end: Infinity
1469
+ });
1470
+ const backFlushEnd = calculateBackBufferFlushPoint(bufferedSegments, currentTime);
1471
+ if (backFlushEnd > 0) tasks.push({
1472
+ type: "remove",
1473
+ start: 0,
1474
+ end: backFlushEnd
1475
+ });
1476
+ }
1477
+ if (actorCtx.initTrackId !== track.id) tasks.push({
1478
+ type: "append-init",
1479
+ meta: { trackId: track.id },
1480
+ url: track.initialization.url,
1481
+ ...track.initialization.byteRange !== void 0 && { byteRange: track.initialization.byteRange }
1482
+ });
1483
+ if (range) {
1484
+ const EPSILON = 1e-4;
1485
+ const segmentsToLoad = getSegmentsToLoad(track.segments, bufferedSegments, currentTime).filter((seg) => {
1486
+ const existing = actorCtx.segments.find((s) => Math.abs(s.startTime - seg.startTime) < EPSILON);
1487
+ if (existing?.partial) return true;
1488
+ if (!existing?.trackBandwidth || !track.bandwidth) return true;
1489
+ return track.bandwidth > existing.trackBandwidth;
1490
+ });
1491
+ for (const segment of segmentsToLoad) tasks.push({
1492
+ type: "append-segment",
1493
+ meta: {
1494
+ id: segment.id,
1495
+ startTime: segment.startTime,
1496
+ duration: segment.duration,
1497
+ trackId: track.id,
1498
+ trackBandwidth: track.bandwidth
1499
+ },
1500
+ url: segment.url,
1501
+ ...segment.byteRange !== void 0 && { byteRange: segment.byteRange }
1502
+ });
1503
+ }
1504
+ return tasks;
1505
+ };
1506
+ /**
1507
+ * Execute a single LoadTask: fetch (if needed) then forward to SourceBufferActor.
1508
+ * Sets/clears in-flight tracking around async operations so send() can make
1509
+ * accurate continue/preempt decisions at any point during execution.
1510
+ *
1511
+ * @todo Rename alongside LoadTask (e.g. executeOp).
1512
+ */
1513
+ const executeLoadTask = async (task) => {
1514
+ const signal = abortController.signal;
1515
+ try {
1516
+ if (task.type === "remove") {
1517
+ await sourceBufferActor.send(task, signal);
1518
+ return;
1519
+ }
1520
+ if (task.type === "append-init") {
1521
+ inFlightInitTrackId = task.meta.trackId;
1522
+ if (!signal.aborted) {
1523
+ const data = await fetchBytes(task, {
1524
+ signal,
1525
+ minChunkSize: Infinity
1526
+ });
1527
+ const isTrackSwitch = pendingTasks?.some((t) => t.type === "append-init" && t.meta.trackId !== task.meta.trackId);
1528
+ if (!signal.aborted || !isTrackSwitch) {
1529
+ const appendSignal = signal.aborted ? new AbortController().signal : signal;
1530
+ await sourceBufferActor.send({
1531
+ type: "append-init",
1532
+ data,
1533
+ meta: task.meta
1534
+ }, appendSignal);
1535
+ }
1536
+ }
1537
+ return;
1538
+ }
1539
+ inFlightSegmentId = task.meta.id;
1540
+ if (!signal.aborted) {
1541
+ const stream = await fetchBytes(task, { signal });
1542
+ if (!signal.aborted) await sourceBufferActor.send({
1543
+ type: "append-segment",
1544
+ data: stream,
1545
+ meta: task.meta
1546
+ }, signal);
1547
+ }
1548
+ } finally {
1549
+ inFlightInitTrackId = null;
1550
+ inFlightSegmentId = null;
1551
+ }
1552
+ };
1553
+ /**
1554
+ * Drain the scheduled task list sequentially.
1555
+ * After each task completes, checks for a pending replacement plan from send().
1556
+ * If the signal was aborted and no new plan arrived, stops immediately.
1557
+ */
1558
+ const runScheduled = async (initialTasks) => {
1559
+ running = true;
1560
+ abortController = new AbortController();
1561
+ let scheduled = initialTasks;
1562
+ while (scheduled.length > 0 && !destroyed) {
1563
+ const task = scheduled[0];
1564
+ scheduled = scheduled.slice(1);
1565
+ try {
1566
+ await executeLoadTask(task);
1567
+ } catch (error) {
1568
+ if (error instanceof Error && error.name === "AbortError") {} else {
1569
+ console.error("Unexpected error in segment loader:", error);
1570
+ scheduled = [];
1571
+ }
1572
+ }
1573
+ if (pendingTasks !== null) {
1574
+ scheduled = pendingTasks;
1575
+ pendingTasks = null;
1576
+ abortController = new AbortController();
1577
+ } else if (abortController.signal.aborted) break;
1578
+ }
1579
+ abortController = null;
1580
+ running = false;
1581
+ };
1582
+ return {
1583
+ send(message) {
1584
+ if (destroyed) return;
1585
+ const allTasks = planTasks(message);
1586
+ if (!running) {
1587
+ if (allTasks.length === 0) return;
1588
+ runScheduled(allTasks);
1589
+ return;
1590
+ }
1591
+ if (inFlightSegmentId !== null && allTasks.some((t) => t.type === "append-segment" && t.meta.id === inFlightSegmentId) || inFlightInitTrackId !== null && allTasks.some((t) => t.type === "append-init" && t.meta.trackId === inFlightInitTrackId)) pendingTasks = allTasks.filter((t) => !(t.type === "append-segment" && t.meta.id === inFlightSegmentId) && !(t.type === "append-init" && t.meta.trackId === inFlightInitTrackId));
1592
+ else {
1593
+ pendingTasks = allTasks;
1594
+ abortController?.abort();
1595
+ }
1596
+ },
1597
+ destroy() {
1598
+ destroyed = true;
1599
+ abortController?.abort();
1600
+ }
1601
+ };
1602
+ }
1603
+ const ActorKeyByType = {
1604
+ video: "videoBufferActor",
1605
+ audio: "audioBufferActor"
1606
+ };
1607
+ function createTrackedFetch(throughput, onSample) {
1608
+ return async (addressable, options) => {
1609
+ const { minChunkSize, ...fetchOptions } = options ?? {};
1610
+ const response = await fetchResolvable(addressable, fetchOptions);
1611
+ if (!response.body) throw new Error("Response has no body");
1612
+ const body = response.body;
1613
+ return { [Symbol.asyncIterator]: async function* () {
1614
+ let chunkStart = performance.now();
1615
+ for await (const chunk of new ChunkedStreamIterable(body, ...minChunkSize !== void 0 ? [{ minChunkSize }] : [])) {
1616
+ const elapsed = performance.now() - chunkStart;
1617
+ const next = sampleBandwidth(throughput.current, elapsed, chunk.byteLength);
1618
+ throughput.patch(next);
1619
+ throughput.flush();
1620
+ onSample?.(next);
1621
+ yield chunk;
1622
+ chunkStart = performance.now();
1623
+ }
1624
+ } };
1625
+ };
1626
+ }
1627
+ /**
1628
+ * Non-tracking fetch: eagerly starts the request and returns the response body
1629
+ * as a lazy chunk iterable. Used for audio tracks which don't sample bandwidth.
1630
+ * Pass `minChunkSize: Infinity` to accumulate the full body as a single chunk
1631
+ * (equivalent to arrayBuffer() but through the same streaming path).
1632
+ */
1633
+ async function fetchStream(addressable, options) {
1634
+ const { minChunkSize, ...fetchOptions } = options ?? {};
1635
+ const response = await fetchResolvable(addressable, fetchOptions);
1636
+ if (!response.body) throw new Error("Response has no body");
1637
+ return new ChunkedStreamIterable(response.body, ...minChunkSize !== void 0 ? [{ minChunkSize }] : []);
1638
+ }
1639
+ function selectLoadingInputs([segmentsCanLoad, state], type) {
1640
+ const { playbackInitiated, preload, currentTime } = state;
1641
+ return {
1642
+ playbackInitiated,
1643
+ preload,
1644
+ currentTime,
1645
+ track: getSelectedTrack(state, type),
1646
+ segmentsCanLoad
1647
+ };
1648
+ }
1649
+ /**
1650
+ * Equality function encoding the condition hierarchy for relevant changes.
1651
+ *
1652
+ * Pre-play (!playbackInitiated):
1653
+ * Only preload changes matter. currentTime and resolvedTrackId are ignored
1654
+ * (track changes not supported pre-play; currentTime value is used at
1655
+ * trigger time but changes don't re-trigger).
1656
+ *
1657
+ * playbackInitiated transition:
1658
+ * Always fires (handled in the subscriber; preload='auto' suppression
1659
+ * applied there since equality functions have no memory of prior values).
1660
+ *
1661
+ * Post-play (playbackInitiated):
1662
+ * resolvedTrackId changes (track switch or previously-unresolved track
1663
+ * resolving) and currentTime changes both trigger. preload is irrelevant.
1664
+ */
1665
+ const segmentStartFor = (currentTime, track) => {
1666
+ if (currentTime == null) return void 0;
1667
+ return track?.segments.find(({ startTime, duration }, i, segments) => currentTime >= startTime && (currentTime < startTime + duration || i === segments.length - 1))?.startTime;
1668
+ };
1669
+ /**
1670
+ * Returns true when the inputs are equal (no meaningful change — don't fire).
1671
+ * Returns false when the inputs differ in a way that requires a new message.
1672
+ *
1673
+ * This IS the shouldLoadSegments logic, expressed as an equality function.
1674
+ */
1675
+ function loadingInputsEq(prevState, curState) {
1676
+ if (!curState.segmentsCanLoad) return true;
1677
+ if (!curState.playbackInitiated) {
1678
+ if (curState.preload === "none") return true;
1679
+ return curState.preload === prevState.preload;
1680
+ }
1681
+ if (!prevState.playbackInitiated && curState.playbackInitiated) {
1682
+ if (prevState.preload !== "auto") return false;
1683
+ }
1684
+ if (!curState.track || !isResolvedTrack(curState.track)) return true;
1685
+ if (prevState.track?.id !== curState.track.id && isResolvedTrack(curState.track)) return false;
1686
+ return segmentStartFor(prevState.currentTime, curState.track) === segmentStartFor(curState.currentTime, curState.track);
1687
+ }
1688
+ /**
1689
+ * Load segments orchestration — Reactor layer.
1690
+ *
1691
+ * Sends typed load messages to a SegmentLoaderActor when relevant conditions
1692
+ * change. Uses targeted subscriptions rather than broad combineLatest so only
1693
+ * meaningful state changes trigger evaluation.
1694
+ *
1695
+ * Condition hierarchy (see SegmentLoadingKey for detail):
1696
+ *
1697
+ * !playbackInitiated
1698
+ * preload==='none' (or unset) → dormant; no trigger
1699
+ * preload==='metadata' → trigger on transition to 'metadata'
1700
+ * preload==='auto' → trigger on transition to 'auto'
1701
+ *
1702
+ * !playbackInitiated → playbackInitiated
1703
+ * preload !== 'auto' → trigger (message shape changes)
1704
+ * preload === 'auto' → suppressed (was already full-range mode;
1705
+ * let segmentStart take over post-play)
1706
+ * KNOWN LIMITATION: seek-before-play with
1707
+ * preload='auto' is not supported — if the
1708
+ * user seeks before pressing play, the
1709
+ * first re-send is delayed until the next
1710
+ * segment boundary crossing post-play.
1711
+ *
1712
+ * playbackInitiated
1713
+ * resolvedTrackId changes → trigger
1714
+ * segmentStart(currentTime) changes → trigger (segment boundary only)
1715
+ *
1716
+ * @example
1717
+ * const cleanup = loadSegments({ state, owners }, { type: 'video' });
1718
+ */
1719
+ function loadSegments({ state, owners }, config) {
1720
+ const { type } = config;
1721
+ const actorKey = ActorKeyByType[type];
1722
+ const initialBandwidth = state.current.bandwidthState;
1723
+ const throughput = createState(initialBandwidth ?? {
1724
+ fastEstimate: 0,
1725
+ fastTotalWeight: 0,
1726
+ slowEstimate: 0,
1727
+ slowTotalWeight: 0,
1728
+ bytesSampled: 0
1729
+ });
1730
+ const fetchBytes = type === "video" ? createTrackedFetch(throughput, initialBandwidth !== void 0 ? (next) => {
1731
+ state.patch({ bandwidthState: next });
1732
+ state.flush();
1733
+ } : void 0) : fetchStream;
1734
+ const segmentLoader = createState(void 0);
1735
+ const unsubActorLifecycle = owners.subscribe((o) => o[actorKey], (actor) => {
1736
+ if (actor) segmentLoader.patch(createSegmentLoaderActor(actor, fetchBytes));
1737
+ else if (!actor && segmentLoader.current) {
1738
+ segmentLoader.current.destroy();
1739
+ segmentLoader.patch(void 0);
1740
+ }
1741
+ return () => {
1742
+ segmentLoader.current?.destroy();
1743
+ segmentLoader.patch(void 0);
1744
+ };
1745
+ });
1746
+ const segmentsCanLoad = createState(false);
1747
+ const unsubscribeCanLoadSegments = combineLatest([state, segmentLoader]).subscribe(([currentState, currentSegmentLoader]) => {
1748
+ const track = getSelectedTrack(currentState, type);
1749
+ const trackResolved = !!track && isResolvedTrack(track);
1750
+ const segmentLoaderActorExists = !!currentSegmentLoader;
1751
+ segmentsCanLoad.patch(trackResolved && segmentLoaderActorExists);
1752
+ });
1753
+ const unsubscribeShouldLoadSegments = combineLatest([segmentsCanLoad, state]).subscribe(([segmentsCanLoad$1, state$1]) => selectLoadingInputs([segmentsCanLoad$1, state$1], type), ({ preload, playbackInitiated, currentTime, track }) => {
1754
+ if (!(preload === "auto" || !!playbackInitiated))
1755
+ /** @ts-expect-error */
1756
+ segmentLoader.current?.send({
1757
+ type: "load",
1758
+ track
1759
+ });
1760
+ else segmentLoader.current?.send({
1761
+ type: "load",
1762
+ track,
1763
+ range: {
1764
+ start: currentTime,
1765
+ end: currentTime + DEFAULT_FORWARD_BUFFER_CONFIG.bufferDuration
1766
+ }
1767
+ });
1768
+ }, { equalityFn: loadingInputsEq });
1769
+ return () => {
1770
+ unsubscribeCanLoadSegments();
1771
+ unsubscribeShouldLoadSegments();
1772
+ unsubActorLifecycle();
1773
+ };
1774
+ }
1775
+ /**
1776
+ * Parse a VTT segment using browser's native parser.
1777
+ *
1778
+ * Creates a dummy video element with a track element to leverage
1779
+ * the browser's optimized VTT parsing. Returns parsed VTTCue objects.
1780
+ */
1781
+ let dummyVideo = null;
1782
+ function ensureDummyVideo() {
1783
+ if (!dummyVideo) {
1784
+ dummyVideo = document.createElement("video");
1785
+ dummyVideo.muted = true;
1786
+ dummyVideo.preload = "none";
1787
+ dummyVideo.style.display = "none";
1788
+ dummyVideo.crossOrigin = "anonymous";
1789
+ }
1790
+ return dummyVideo;
1791
+ }
1792
+ function parseVttSegment(url) {
1793
+ const video = ensureDummyVideo();
1794
+ const track = document.createElement("track");
1795
+ track.kind = "subtitles";
1796
+ track.default = true;
1797
+ return new Promise((resolve, reject) => {
1798
+ const onLoad = () => {
1799
+ const cues = [];
1800
+ const textTrack = track.track;
1801
+ if (textTrack.cues) for (let i = 0; i < textTrack.cues.length; i++) {
1802
+ const cue = textTrack.cues[i];
1803
+ if (cue) cues.push(cue);
1804
+ }
1805
+ cleanup();
1806
+ resolve(cues);
1807
+ };
1808
+ const onError = () => {
1809
+ cleanup();
1810
+ reject(/* @__PURE__ */ new Error(`Failed to load VTT segment: ${url}`));
1811
+ };
1812
+ const cleanup = () => {
1813
+ track.removeEventListener("load", onLoad);
1814
+ track.removeEventListener("error", onError);
1815
+ video.removeChild(track);
1816
+ };
1817
+ track.addEventListener("load", onLoad);
1818
+ track.addEventListener("error", onError);
1819
+ video.appendChild(track);
1820
+ track.src = url;
1821
+ });
1822
+ }
1823
+ function destroyVttParser() {
1824
+ dummyVideo = null;
1825
+ }
1826
+ function isDuplicateCue(cue, textTrack) {
1827
+ const { cues } = textTrack;
1828
+ if (!cues) return false;
1829
+ for (let i = 0; i < cues.length; i++) {
1830
+ const existing = cues[i];
1831
+ if (existing.startTime === cue.startTime && existing.endTime === cue.endTime && existing.text === cue.text) return true;
1832
+ }
1833
+ return false;
1834
+ }
1835
+ const loadVttSegmentTask = async ({ segment }, context) => {
1836
+ const cues = await parseVttSegment(segment.url);
1837
+ for (const cue of cues) if (!isDuplicateCue(cue, context.textTrack)) context.textTrack.addCue(cue);
1838
+ };
1839
+ /**
1840
+ * Load text track cues task (composite - orchestrates VTT segment subtasks).
1841
+ */
1842
+ const loadTextTrackCuesTask = async ({ currentState }, context) => {
1843
+ const track = findSelectedTextTrack(currentState);
1844
+ if (!track || !isResolvedTrack(track)) return;
1845
+ const { segments } = track;
1846
+ if (segments.length === 0) return;
1847
+ const trackId = track.id;
1848
+ const loadedIds = new Set((currentState.textBufferState?.[trackId]?.segments ?? []).map((s) => s.id));
1849
+ const segmentsToLoad = getSegmentsToLoad(segments, segments.filter((s) => loadedIds.has(s.id)), currentState.currentTime ?? 0).filter((s) => !loadedIds.has(s.id));
1850
+ if (segmentsToLoad.length === 0) return;
1851
+ for (const segment of segmentsToLoad) {
1852
+ if (context.signal.aborted) break;
1853
+ try {
1854
+ await loadVttSegmentTask({ segment }, { textTrack: context.textTrack });
1855
+ const latest = context.state.current.textBufferState ?? {};
1856
+ const trackState = latest[trackId] ?? { segments: [] };
1857
+ context.state.patch({ textBufferState: {
1858
+ ...latest,
1859
+ [trackId]: { segments: [...trackState.segments, { id: segment.id }] }
1860
+ } });
1861
+ } catch (error) {
1862
+ if (error instanceof Error && error.name === "AbortError") break;
1863
+ console.error("Failed to load VTT segment:", error);
1864
+ }
1865
+ }
1866
+ if (context.textTrack.mode === "showing" && context.textTrack.cues) Array.from(context.textTrack.cues).forEach((cue) => {
1867
+ context.textTrack.addCue(cue);
1868
+ });
1869
+ await new Promise((resolve) => requestAnimationFrame(resolve));
1870
+ };
1871
+ /**
1872
+ * Find the selected text track in the presentation.
1873
+ */
1874
+ function findSelectedTextTrack(state) {
1875
+ if (!state.presentation || !state.selectedTextTrackId) return;
1876
+ const textSet = state.presentation.selectionSets.find((set) => set.type === "text");
1877
+ if (!textSet?.switchingSets?.[0]?.tracks) return;
1878
+ return textSet.switchingSets[0].tracks.find((t) => t.id === state.selectedTextTrackId);
1879
+ }
1880
+ /**
1881
+ * Get the browser's TextTrack object for the selected text track.
1882
+ *
1883
+ * Retrieves the live TextTrack interface from the track element in owners,
1884
+ * which is used for adding cues, checking mode, and managing track state.
1885
+ *
1886
+ * Note: Returns the DOM TextTrack interface (HTMLTrackElement.track),
1887
+ * not the presentation Track metadata type.
1888
+ *
1889
+ * @param state - Current playback state (track selection)
1890
+ * @param owners - DOM owners containing track elements map
1891
+ * @returns DOM TextTrack interface or undefined if not found
1892
+ */
1893
+ function getSelectedTextTrackFromOwners(state, owners) {
1894
+ const trackId = state.selectedTextTrackId;
1895
+ if (!trackId || !owners.textTracks) return;
1896
+ return owners.textTracks.get(trackId)?.track;
1897
+ }
1898
+ /**
1899
+ * Check if we can load text track cues.
1900
+ *
1901
+ * Requires:
1902
+ * - Selected text track ID exists
1903
+ * - Track elements map exists
1904
+ * - Track element exists for selected track
1905
+ */
1906
+ function canLoadTextTrackCues(state, owners) {
1907
+ return !!state.selectedTextTrackId && !!owners.textTracks && owners.textTracks.has(state.selectedTextTrackId);
1908
+ }
1909
+ /**
1910
+ * Check if we should load text track cues.
1911
+ *
1912
+ * Only load if:
1913
+ * - Track is resolved (has segments)
1914
+ * - Track has at least one segment
1915
+ * - Track element exists
1916
+ */
1917
+ function shouldLoadTextTrackCues(state, owners) {
1918
+ if (!canLoadTextTrackCues(state, owners)) return false;
1919
+ const track = findSelectedTextTrack(state);
1920
+ if (!track || !isResolvedTrack(track) || track.segments.length === 0) return false;
1921
+ if (!getSelectedTextTrackFromOwners(state, owners)) return false;
1922
+ return true;
1923
+ }
1924
+ /**
1925
+ * Load text track cues orchestration.
1926
+ *
1927
+ * Triggers when:
1928
+ * - Text track is selected
1929
+ * - Track is resolved (has segments)
1930
+ * - Track element exists
1931
+ *
1932
+ * Fetches and parses VTT segments within the forward buffer window, then adds
1933
+ * cues to the track incrementally. Continues on segment errors to provide
1934
+ * partial subtitles.
1935
+ *
1936
+ * @example
1937
+ * const cleanup = loadTextTrackCues({ state, owners });
1938
+ */
1939
+ function loadTextTrackCues({ state, owners }) {
1940
+ let currentTask = null;
1941
+ let abortController = null;
1942
+ let lastTrackId;
1943
+ const cleanup = combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
1944
+ if (currentState.selectedTextTrackId !== lastTrackId) {
1945
+ lastTrackId = currentState.selectedTextTrackId;
1946
+ abortController?.abort();
1947
+ currentTask = null;
1948
+ }
1949
+ if (currentTask) return;
1950
+ if (!shouldLoadTextTrackCues(currentState, currentOwners)) return;
1951
+ const textTrack = getSelectedTextTrackFromOwners(currentState, currentOwners);
1952
+ if (!textTrack) return;
1953
+ abortController = new AbortController();
1954
+ currentTask = loadTextTrackCuesTask({ currentState }, {
1955
+ signal: abortController.signal,
1956
+ textTrack,
1957
+ state
1958
+ }).finally(() => {
1959
+ currentTask = null;
1960
+ });
1961
+ });
1962
+ return () => {
1963
+ abortController?.abort();
1964
+ cleanup();
1965
+ };
1966
+ }
1967
+ /**
1968
+ * Track current playback position from the media element.
1969
+ *
1970
+ * Mirrors `mediaElement.currentTime` into reactive state on:
1971
+ * - `timeupdate` — fires during playback (~4 Hz)
1972
+ * - `seeking` — fires when a seek begins; per spec, `currentTime` is
1973
+ * already at the new position when this event dispatches, so buffer
1974
+ * management can react immediately rather than waiting for `timeupdate`,
1975
+ * which does not fire while paused.
1976
+ *
1977
+ * Also syncs immediately when a media element becomes available.
1978
+ *
1979
+ * @example
1980
+ * const cleanup = trackCurrentTime({ state, owners });
1981
+ */
1982
+ function trackCurrentTime({ state, owners }) {
1983
+ let lastMediaElement;
1984
+ let removeListeners = null;
1985
+ const unsubscribe = owners.subscribe((currentOwners) => {
1986
+ const { mediaElement } = currentOwners;
1987
+ if (mediaElement === lastMediaElement) return;
1988
+ removeListeners?.();
1989
+ removeListeners = null;
1990
+ lastMediaElement = mediaElement;
1991
+ if (!mediaElement) return;
1992
+ state.patch({ currentTime: mediaElement.currentTime });
1993
+ const sync = () => state.patch({ currentTime: mediaElement.currentTime });
1994
+ const removeTimeupdate = listen(mediaElement, "timeupdate", sync);
1995
+ const removeSeeking = listen(mediaElement, "seeking", sync);
1996
+ removeListeners = () => {
1997
+ removeTimeupdate();
1998
+ removeSeeking();
1999
+ };
2000
+ });
2001
+ return () => {
2002
+ removeListeners?.();
2003
+ unsubscribe();
2004
+ };
2005
+ }
2006
+ /**
2007
+ * Track whether playback has been initiated by the user.
2008
+ *
2009
+ * Sets `state.playbackInitiated = true` when the media element fires a `play`
2010
+ * event (via `element.play()`, native controls, or autoplay) and simultaneously
2011
+ * dispatches `{ type: 'play' }` to the event stream so `resolvePresentation`
2012
+ * can react.
2013
+ *
2014
+ * Resets `state.playbackInitiated = false` when `presentation.url` changes,
2015
+ * so a new source with `preload="none"` won't load segments until play is
2016
+ * triggered again.
2017
+ *
2018
+ * This flag is used by `shouldLoadSegments` to allow segment loading after
2019
+ * play is initiated regardless of the initial `preload` setting — `preload`
2020
+ * is a startup hint, not a runtime gate.
2021
+ *
2022
+ * @example
2023
+ * const cleanup = trackPlaybackInitiated({ state, owners, events });
2024
+ */
2025
+ function trackPlaybackInitiated({ state, owners, events }) {
2026
+ let lastMediaElement;
2027
+ let removeListener = null;
2028
+ let lastPresentationUrl;
2029
+ const unsubscribeState = state.subscribe((currentState) => {
2030
+ const url = currentState.presentation?.url;
2031
+ if (url !== lastPresentationUrl) {
2032
+ if (lastPresentationUrl !== void 0) state.patch({ playbackInitiated: false });
2033
+ lastPresentationUrl = url;
2034
+ }
2035
+ });
2036
+ const unsubscribeOwners = owners.subscribe((currentOwners) => {
2037
+ const { mediaElement } = currentOwners;
2038
+ if (mediaElement === lastMediaElement) return;
2039
+ removeListener?.();
2040
+ removeListener = null;
2041
+ lastMediaElement = mediaElement;
2042
+ if (!mediaElement) return;
2043
+ removeListener = listen(mediaElement, "play", () => {
2044
+ state.patch({ playbackInitiated: true });
2045
+ events.dispatch({ type: "play" });
2046
+ });
2047
+ });
2048
+ return () => {
2049
+ removeListener?.();
2050
+ unsubscribeState();
2051
+ unsubscribeOwners();
2052
+ };
2053
+ }
2054
+ /**
2055
+ * Append media data to a SourceBuffer.
2056
+ *
2057
+ * Accepts either a full ArrayBuffer (single append) or an AsyncIterable of
2058
+ * Uint8Array chunks (one append per chunk, in order). Waits for `updateend`
2059
+ * between each call so appends are serialized correctly.
2060
+ *
2061
+ * Errors from the SourceBuffer (`error` event) or from the iterable are
2062
+ * propagated as rejections.
2063
+ */
2064
+ async function appendSegment(sourceBuffer, data, signal) {
2065
+ if (data instanceof ArrayBuffer) await appendChunk(sourceBuffer, data);
2066
+ else try {
2067
+ for await (const chunk of data) {
2068
+ if (signal?.aborted) throw signal.reason ?? new DOMException("Aborted", "AbortError");
2069
+ await appendChunk(sourceBuffer, chunk);
2070
+ }
2071
+ } catch (e) {
2072
+ if (e instanceof DOMException && e.name === "AbortError" && !sourceBuffer.updating) try {
2073
+ sourceBuffer.abort();
2074
+ } catch {}
2075
+ throw e;
2076
+ }
2077
+ }
2078
+ async function appendChunk(sourceBuffer, data) {
2079
+ if (sourceBuffer.updating) await new Promise((resolve) => {
2080
+ const onUpdateEnd = () => {
2081
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2082
+ resolve();
2083
+ };
2084
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2085
+ });
2086
+ return new Promise((resolve, reject) => {
2087
+ const onUpdateEnd = () => {
2088
+ cleanup();
2089
+ resolve();
2090
+ };
2091
+ const onError = (event) => {
2092
+ cleanup();
2093
+ reject(/* @__PURE__ */ new Error(`SourceBuffer append error: ${event.type}`));
2094
+ };
2095
+ const cleanup = () => {
2096
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2097
+ sourceBuffer.removeEventListener("error", onError);
2098
+ };
2099
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2100
+ sourceBuffer.addEventListener("error", onError);
2101
+ try {
2102
+ sourceBuffer.appendBuffer(data);
2103
+ } catch (error) {
2104
+ cleanup();
2105
+ reject(error);
2106
+ }
2107
+ });
2108
+ }
2109
+ /**
2110
+ * Buffer flusher helper (P12)
2111
+ *
2112
+ * Removes a time range from a SourceBuffer to manage memory.
2113
+ */
2114
+ /**
2115
+ * Remove a time range from a SourceBuffer.
2116
+ *
2117
+ * Waits for the SourceBuffer to be ready (not updating), then removes
2118
+ * the specified range. Returns a promise that resolves when removal completes.
2119
+ *
2120
+ * @param sourceBuffer - The SourceBuffer to remove data from
2121
+ * @param start - Start of the time range to remove (seconds)
2122
+ * @param end - End of the time range to remove (seconds)
2123
+ * @returns Promise that resolves when removal completes
2124
+ *
2125
+ * @example
2126
+ * await flushBuffer(videoSourceBuffer, 0, 30);
2127
+ */
2128
+ async function flushBuffer(sourceBuffer, start, end) {
2129
+ if (sourceBuffer.updating) await new Promise((resolve) => {
2130
+ const onUpdateEnd = () => {
2131
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2132
+ resolve();
2133
+ };
2134
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2135
+ });
2136
+ return new Promise((resolve, reject) => {
2137
+ const onUpdateEnd = () => {
2138
+ cleanup();
2139
+ resolve();
2140
+ };
2141
+ const onError = (event) => {
2142
+ cleanup();
2143
+ reject(/* @__PURE__ */ new Error(`SourceBuffer remove error: ${event.type}`));
2144
+ };
2145
+ const cleanup = () => {
2146
+ sourceBuffer.removeEventListener("updateend", onUpdateEnd);
2147
+ sourceBuffer.removeEventListener("error", onError);
2148
+ };
2149
+ sourceBuffer.addEventListener("updateend", onUpdateEnd);
2150
+ sourceBuffer.addEventListener("error", onError);
2151
+ try {
2152
+ sourceBuffer.remove(start, end);
2153
+ } catch (error) {
2154
+ cleanup();
2155
+ reject(error);
2156
+ }
2157
+ });
2158
+ }
2159
+ /**
2160
+ * Check if we can calculate presentation duration (have required data).
2161
+ */
2162
+ function canCalculateDuration(state) {
2163
+ if (!state.presentation) return false;
2164
+ return !!(state.selectedVideoTrackId || state.selectedAudioTrackId);
2165
+ }
2166
+ /**
2167
+ * Check if we should calculate presentation duration (conditions met).
2168
+ */
2169
+ function shouldCalculateDuration(state) {
2170
+ if (!canCalculateDuration(state)) return false;
2171
+ const { presentation } = state;
2172
+ if (presentation.duration !== void 0) return false;
2173
+ const videoTrack = state.selectedVideoTrackId ? getSelectedTrack(state, "video") : void 0;
2174
+ const audioTrack = state.selectedAudioTrackId ? getSelectedTrack(state, "audio") : void 0;
2175
+ return !!(videoTrack && isResolvedTrack(videoTrack) || audioTrack && isResolvedTrack(audioTrack));
2176
+ }
2177
+ /**
2178
+ * Get duration from the first resolved track (prefer video, fallback to audio).
2179
+ */
2180
+ function getDurationFromResolvedTracks(state) {
2181
+ const videoTrack = state.selectedVideoTrackId ? getSelectedTrack(state, "video") : void 0;
2182
+ if (videoTrack && isResolvedTrack(videoTrack)) return videoTrack.duration;
2183
+ const audioTrack = state.selectedAudioTrackId ? getSelectedTrack(state, "audio") : void 0;
2184
+ if (audioTrack && isResolvedTrack(audioTrack)) return audioTrack.duration;
2185
+ }
2186
+ /**
2187
+ * Calculate and set presentation duration from resolved tracks.
2188
+ */
2189
+ function calculatePresentationDuration({ state }) {
2190
+ return combineLatest([state]).subscribe(([currentState]) => {
2191
+ if (!shouldCalculateDuration(currentState)) return;
2192
+ const duration = getDurationFromResolvedTracks(currentState);
2193
+ if (duration === void 0 || !Number.isFinite(duration)) return;
2194
+ const { presentation } = currentState;
2195
+ state.patch({ presentation: {
2196
+ ...presentation,
2197
+ duration
2198
+ } });
2199
+ });
2200
+ }
2201
+ /**
2202
+ * Generic reusable task that wraps an async run function.
2203
+ *
2204
+ * Owns its own AbortController so it can always be aborted independently.
2205
+ * Optionally composes an external AbortSignal so that a parent's cancellation
2206
+ * propagates into the task's work without requiring the caller to track the
2207
+ * task separately.
2208
+ *
2209
+ * Ordering guarantee: `value` is written before `status` transitions to `'done'`;
2210
+ * `error` is written before `status` transitions to `'error'`. Any reader
2211
+ * observing `status === 'done'` is guaranteed `value` is already present.
2212
+ */
2213
+ var Task = class {
2214
+ id;
2215
+ #runFn;
2216
+ #abortController = new AbortController();
2217
+ #signal;
2218
+ #status = "pending";
2219
+ #value = void 0;
2220
+ #error = void 0;
2221
+ constructor(runFn, config) {
2222
+ this.#runFn = runFn;
2223
+ const rawId = config?.id;
2224
+ this.id = typeof rawId === "function" ? rawId() : rawId ?? generateId();
2225
+ this.#signal = config?.signal ? AbortSignal.any([this.#abortController.signal, config.signal]) : this.#abortController.signal;
2226
+ }
2227
+ get status() {
2228
+ return this.#status;
2229
+ }
2230
+ get value() {
2231
+ return this.#value;
2232
+ }
2233
+ get error() {
2234
+ return this.#error;
2235
+ }
2236
+ async run() {
2237
+ this.#status = "running";
2238
+ try {
2239
+ const result = await this.#runFn(this.#signal);
2240
+ this.#value = result;
2241
+ this.#status = "done";
2242
+ return result;
2243
+ } catch (e) {
2244
+ this.#error = e;
2245
+ this.#status = "error";
2246
+ throw e;
2247
+ }
2248
+ }
2249
+ abort() {
2250
+ this.#abortController.abort();
2251
+ }
2252
+ };
2253
+ /**
2254
+ * Runs tasks concurrently, deduplicated by task id.
2255
+ *
2256
+ * If a task with a given id is already in flight, subsequent schedule() calls
2257
+ * for that id are silently ignored until the first completes. Tasks are stored
2258
+ * so abortAll() can cancel any in-flight work (e.g. on engine cleanup).
2259
+ */
2260
+ var ConcurrentRunner = class {
2261
+ #pending = /* @__PURE__ */ new Map();
2262
+ schedule(task) {
2263
+ if (this.#pending.has(task.id)) return;
2264
+ this.#pending.set(task.id, task);
2265
+ task.run().catch((error) => {
2266
+ if (!(error instanceof Error && error.name === "AbortError")) throw error;
2267
+ }).finally(() => {
2268
+ this.#pending.delete(task.id);
2269
+ });
2270
+ }
2271
+ abortAll() {
2272
+ for (const task of this.#pending.values()) task.abort();
2273
+ this.#pending.clear();
2274
+ }
2275
+ };
2276
+ /**
2277
+ * Runs tasks one at a time in submission order.
2278
+ *
2279
+ * Each schedule() call returns a Promise that resolves or rejects with the
2280
+ * task's result when it is eventually executed. Tasks wait in queue until the
2281
+ * prior task completes.
2282
+ *
2283
+ * Serialization is achieved by chaining each task's run() onto the tail of a
2284
+ * shared promise chain — no explicit queue or drain loop needed.
2285
+ *
2286
+ * abortAll() aborts all pending (not yet started) tasks and the currently
2287
+ * in-flight task. Pending tasks still run briefly but receive an aborted
2288
+ * signal and are expected to exit early.
2289
+ */
2290
+ var SerialRunner = class {
2291
+ #chain = Promise.resolve();
2292
+ #pending = /* @__PURE__ */ new Set();
2293
+ #current = null;
2294
+ schedule(task) {
2295
+ const t = task;
2296
+ this.#pending.add(t);
2297
+ const result = this.#chain.then(() => {
2298
+ this.#pending.delete(t);
2299
+ this.#current = t;
2300
+ return task.run();
2301
+ }).finally(() => {
2302
+ this.#current = null;
2303
+ });
2304
+ this.#chain = result.then(() => {}, () => {});
2305
+ return result;
2306
+ }
2307
+ abortAll() {
2308
+ for (const task of this.#pending) task.abort();
2309
+ this.#pending.clear();
2310
+ this.#current?.abort();
2311
+ }
2312
+ destroy() {
2313
+ this.abortAll();
2314
+ }
2315
+ };
2316
+ function canResolve(state, config) {
2317
+ const track = getSelectedTrack(state, config.type);
2318
+ if (!track) return false;
2319
+ return !isResolvedTrack(track);
2320
+ }
2321
+ /**
2322
+ * Determines if track resolution conditions are met.
2323
+ *
2324
+ * Currently always returns true - conditions are checked by canResolveTrack()
2325
+ * and resolving flag. Kept as placeholder for future conditional logic.
2326
+ *
2327
+ * @param state - Current track resolution state
2328
+ * @param event - Current action/event
2329
+ * @returns true (conditions checked elsewhere)
2330
+ */
2331
+ function shouldResolve(_state, _event) {
2332
+ return true;
2333
+ }
2334
+ /**
2335
+ * Updates a track within a presentation (immutably).
2336
+ * Generic - works for video, audio, or text tracks.
2337
+ */
2338
+ function updateTrackInPresentation(presentation, resolvedTrack) {
2339
+ const trackId = resolvedTrack.id;
2340
+ return {
2341
+ ...presentation,
2342
+ selectionSets: presentation.selectionSets.map((selectionSet) => ({
2343
+ ...selectionSet,
2344
+ switchingSets: selectionSet.switchingSets.map((switchingSet) => ({
2345
+ ...switchingSet,
2346
+ tracks: switchingSet.tracks.map((track) => track.id === trackId ? resolvedTrack : track)
2347
+ }))
2348
+ }))
2349
+ };
2350
+ }
2351
+ /**
2352
+ * Resolves unresolved tracks using reactive composition.
2353
+ *
2354
+ * The subscribe closure is pure scheduling logic: it checks conditions and
2355
+ * creates a Task for the selected track when appropriate. The ConcurrentRunner
2356
+ * handles all concurrency concerns — deduplication, parallel execution, and
2357
+ * cleanup.
2358
+ *
2359
+ * Generic version that works for video, audio, or text tracks based on config.
2360
+ * Type parameter T is inferred from config.type (use 'as const' for inference).
2361
+ */
2362
+ function resolveTrack({ state, events }, config) {
2363
+ const runner = new ConcurrentRunner();
2364
+ const cleanup = combineLatest([state, events]).subscribe(([currentState, event]) => {
2365
+ if (!canResolve(currentState, config) || !shouldResolve(currentState, event)) return;
2366
+ const track = getSelectedTrack(currentState, config.type);
2367
+ if (!track) return;
2368
+ const resolvedTrack = track;
2369
+ runner.schedule(new Task(async (signal) => {
2370
+ const mediaTrack = parseMediaPlaylist(await getResponseText(await fetchResolvable(resolvedTrack, { signal })), resolvedTrack);
2371
+ const latestPresentation = state.current.presentation;
2372
+ const updatedPresentation = updateTrackInPresentation(latestPresentation, mediaTrack);
2373
+ state.patch({ presentation: updatedPresentation });
2374
+ }, { id: track.id }));
2375
+ });
2376
+ return () => {
2377
+ runner.abortAll();
2378
+ cleanup();
2379
+ };
2380
+ }
2381
+ /**
2382
+ * Pick text track to activate.
2383
+ *
2384
+ * Selection priority (if enabled):
2385
+ * 1. User preference (preferredSubtitleLanguage)
2386
+ * 2. DEFAULT track (if enableDefaultTrack is true and track has DEFAULT=YES + AUTOSELECT=YES)
2387
+ * 3. No auto-selection (user opt-in)
2388
+ *
2389
+ * By default, FORCED tracks are excluded per Apple's HLS spec.
2390
+ *
2391
+ * @param presentation - Presentation with text tracks
2392
+ * @param config - Selection configuration
2393
+ * @returns Track ID or undefined (no auto-selection)
2394
+ */
2395
+ function pickTextTrack(presentation, config) {
2396
+ const textSet = presentation.selectionSets.find((set) => set.type === "text");
2397
+ if (!textSet?.switchingSets?.[0]?.tracks.length) return void 0;
2398
+ const tracks = textSet.switchingSets[0].tracks;
2399
+ const availableTracks = config.includeForcedTracks ? tracks : tracks.filter((track) => !track.forced);
2400
+ if (availableTracks.length === 0) return void 0;
2401
+ const { preferredSubtitleLanguage, enableDefaultTrack = false } = config;
2402
+ if (preferredSubtitleLanguage) {
2403
+ const languageMatch = availableTracks.find((track) => track.language === preferredSubtitleLanguage);
2404
+ if (languageMatch) return languageMatch.id;
2405
+ }
2406
+ if (enableDefaultTrack) {
2407
+ const defaultTrack = availableTracks.find((track) => track.default === true);
2408
+ if (defaultTrack) return defaultTrack.id;
2409
+ }
2410
+ }
2411
+ /**
2412
+ * Check if we can select a track of the given type.
2413
+ *
2414
+ * Returns true when:
2415
+ * - Presentation exists
2416
+ * - Has tracks of the specified type
2417
+ *
2418
+ * Generic over track type - works for video, audio, or text.
2419
+ */
2420
+ function canSelectTrack(state, config) {
2421
+ return !!state?.presentation?.selectionSets?.find(({ type }) => type === config.type)?.switchingSets?.[0]?.tracks.length;
2422
+ }
2423
+ /**
2424
+ * Check if we should select a track of the given type.
2425
+ *
2426
+ * Returns true when:
2427
+ * - Track of this type is not already selected
2428
+ *
2429
+ * Generic over track type - works for video, audio, or text.
2430
+ *
2431
+ * @TODO figure out reactive model for ABR cases - right now we're only selecting
2432
+ * if we have nothing selected (CJP)
2433
+ */
2434
+ function shouldSelectTrack(state, config) {
2435
+ return !state[SelectedTrackIdKeyByType[config.type]];
2436
+ }
2437
+ /**
2438
+ * Select video track orchestration.
2439
+ *
2440
+ * Selects video track when:
2441
+ * - Presentation exists
2442
+ * - No video track is selected yet
2443
+ *
2444
+ * Uses bandwidth-based quality selection algorithm.
2445
+ *
2446
+ * @example
2447
+ * const cleanup = selectVideoTrack(
2448
+ * { state, owners, events },
2449
+ * { initialBandwidth: 2_000_000 }
2450
+ * );
2451
+ */
2452
+ function selectVideoTrack({ state }, config = { type: "video" }) {
2453
+ let selecting = false;
2454
+ return state.subscribe(async (currentState) => {
2455
+ if (!canSelectTrack(currentState, config) || !shouldSelectTrack(currentState, config) || selecting) return;
2456
+ try {
2457
+ selecting = true;
2458
+ const selectedTrackId = currentState.presentation?.selectionSets.find(({ type }) => type === config.type)?.switchingSets[0]?.tracks[0]?.id;
2459
+ if (selectedTrackId) {
2460
+ const selectedTrackKey = SelectedTrackIdKeyByType[config.type];
2461
+ state.patch({ [selectedTrackKey]: selectedTrackId });
2462
+ }
2463
+ } finally {
2464
+ selecting = false;
2465
+ }
2466
+ });
2467
+ }
2468
+ /**
2469
+ * Select audio track orchestration.
2470
+ *
2471
+ * Selects audio track when:
2472
+ * - Presentation exists
2473
+ * - No audio track is selected yet
2474
+ *
2475
+ * Uses language and preference-based selection.
2476
+ *
2477
+ * @example
2478
+ * const cleanup = selectAudioTrack(
2479
+ * { state, owners, events },
2480
+ * { preferredAudioLanguage: 'en' }
2481
+ * );
2482
+ */
2483
+ function selectAudioTrack({ state }, config = { type: "audio" }) {
2484
+ let selecting = false;
2485
+ return state.subscribe(async (currentState) => {
2486
+ if (!canSelectTrack(currentState, config) || !shouldSelectTrack(currentState, config) || selecting) return;
2487
+ try {
2488
+ selecting = true;
2489
+ const selectedTrackId = currentState.presentation?.selectionSets.find(({ type }) => type === "audio")?.switchingSets[0]?.tracks[0]?.id;
2490
+ if (selectedTrackId) state.patch({ selectedAudioTrackId: selectedTrackId });
2491
+ } finally {
2492
+ selecting = false;
2493
+ }
2494
+ });
2495
+ }
2496
+ /**
2497
+ * Select text track orchestration.
2498
+ *
2499
+ * Selects text track when:
2500
+ * - Presentation exists
2501
+ * - No text track is selected yet
2502
+ *
2503
+ * Note: Currently does not auto-select (user opt-in).
2504
+ *
2505
+ * @example
2506
+ * const cleanup = selectTextTrack({ state, owners, events }, {});
2507
+ */
2508
+ function selectTextTrack({ state }, config = { type: "text" }) {
2509
+ let selecting = false;
2510
+ return state.subscribe(async (currentState) => {
2511
+ if (!canSelectTrack(currentState, config) || !shouldSelectTrack(currentState, config) || selecting) return;
2512
+ try {
2513
+ selecting = true;
2514
+ const selectedTextTrackId = pickTextTrack(currentState.presentation, config);
2515
+ if (selectedTextTrackId) state.patch({ selectedTextTrackId });
2516
+ } finally {
2517
+ selecting = false;
2518
+ }
2519
+ });
2520
+ }
2521
+ /**
2522
+ * Check if the last segment of a track has been appended to a SourceBuffer.
2523
+ *
2524
+ * Checks by segment ID rather than a pipeline flag, so it is robust across
2525
+ * quality switches (different tracks have different segment IDs) and
2526
+ * back-buffer flushes (flushed segment IDs are removed from the model).
2527
+ */
2528
+ function isLastSegmentAppended(segments, actor) {
2529
+ if (segments.length === 0) return true;
2530
+ const lastSeg = segments[segments.length - 1];
2531
+ if (!lastSeg) return false;
2532
+ return actor?.snapshot.context.segments.some((s) => s.id === lastSeg.id && !s.partial) ?? false;
2533
+ }
2534
+ /**
2535
+ * Check if the last segment has been appended for each selected track.
2536
+ *
2537
+ * Handles video-only, audio-only, and video+audio scenarios.
2538
+ * A track with no segments (e.g. unresolved) is considered not ready.
2539
+ */
2540
+ function hasLastSegmentLoaded(state, owners) {
2541
+ const videoTrack = state.selectedVideoTrackId ? getSelectedTrack(state, "video") : void 0;
2542
+ const audioTrack = state.selectedAudioTrackId ? getSelectedTrack(state, "audio") : void 0;
2543
+ if (videoTrack && !isResolvedTrack(videoTrack)) return false;
2544
+ if (audioTrack && !isResolvedTrack(audioTrack)) return false;
2545
+ if (videoTrack && isResolvedTrack(videoTrack)) {
2546
+ if (!isLastSegmentAppended(videoTrack.segments, owners.videoBufferActor)) return false;
2547
+ }
2548
+ if (audioTrack && isResolvedTrack(audioTrack)) {
2549
+ if (!isLastSegmentAppended(audioTrack.segments, owners.audioBufferActor)) return false;
2550
+ }
2551
+ return true;
2552
+ }
2553
+ /**
2554
+ * Check if we can call endOfStream.
2555
+ */
2556
+ function canEndStream(state, owners) {
2557
+ return !!(owners.mediaSource && state.presentation);
2558
+ }
2559
+ /**
2560
+ * Check if we should call endOfStream.
2561
+ */
2562
+ function shouldEndStream(state, owners) {
2563
+ if (!canEndStream(state, owners)) return false;
2564
+ const { mediaSource, mediaElement } = owners;
2565
+ if (mediaSource.readyState !== "open") return false;
2566
+ if (mediaElement && mediaElement.readyState < HTMLMediaElement.HAVE_METADATA) return false;
2567
+ const hasVideoTrack = !!state.selectedVideoTrackId;
2568
+ const hasAudioTrack = !!state.selectedAudioTrackId;
2569
+ if (hasVideoTrack && !owners.videoBuffer) return false;
2570
+ if (hasAudioTrack && !owners.audioBuffer) return false;
2571
+ if (owners.videoBufferActor?.snapshot.status === "updating") return false;
2572
+ if (owners.audioBufferActor?.snapshot.status === "updating") return false;
2573
+ if (!hasLastSegmentLoaded(state, owners)) return false;
2574
+ if (mediaElement) {
2575
+ const videoTrack = hasVideoTrack ? getSelectedTrack(state, "video") : void 0;
2576
+ const audioTrack = hasAudioTrack ? getSelectedTrack(state, "audio") : void 0;
2577
+ const refTrack = videoTrack && isResolvedTrack(videoTrack) ? videoTrack : audioTrack && isResolvedTrack(audioTrack) ? audioTrack : void 0;
2578
+ if (refTrack && refTrack.segments.length > 0) {
2579
+ const lastSeg = refTrack.segments[refTrack.segments.length - 1];
2580
+ if (mediaElement.currentTime < lastSeg.startTime) return false;
2581
+ }
2582
+ }
2583
+ return true;
2584
+ }
2585
+ /**
2586
+ * Wait for all currently-updating SourceBufferActors to finish.
2587
+ * Uses actor status rather than raw SourceBuffer.updating so the wait is
2588
+ * aligned with the same abstraction that owns all buffer operations.
2589
+ */
2590
+ function waitForSourceBuffersReady$1(owners) {
2591
+ const updatingActors = [owners.videoBufferActor, owners.audioBufferActor].filter((actor) => actor !== void 0 && actor.snapshot.status === "updating");
2592
+ if (updatingActors.length === 0) return Promise.resolve();
2593
+ return Promise.all(updatingActors.map((actor) => new Promise((resolve) => {
2594
+ const unsub = actor.subscribe((snapshot) => {
2595
+ if (snapshot.status !== "updating") {
2596
+ unsub();
2597
+ resolve();
2598
+ }
2599
+ });
2600
+ }))).then(() => void 0);
2601
+ }
2602
+ /**
2603
+ * Get the highest buffered end time across all active SourceBuffers.
2604
+ * Used to set the final duration from actual container timestamps rather
2605
+ * than playlist metadata, which handles both shorter and longer cases.
2606
+ */
2607
+ function getMaxBufferedEnd$1(owners) {
2608
+ let max = 0;
2609
+ for (const buf of [owners.videoBuffer, owners.audioBuffer]) if (buf && buf.buffered.length > 0) {
2610
+ const end = buf.buffered.end(buf.buffered.length - 1);
2611
+ if (end > max) max = end;
2612
+ }
2613
+ return max;
2614
+ }
2615
+ /**
2616
+ * End of stream task (module-level, pure).
2617
+ * Sets the final duration from actual buffered end time, then calls endOfStream().
2618
+ */
2619
+ const endOfStreamTask = async ({ currentOwners }, _context) => {
2620
+ const { mediaSource } = currentOwners;
2621
+ if (mediaSource.readyState === "ended") return;
2622
+ await waitForSourceBuffersReady$1(currentOwners);
2623
+ if (mediaSource.readyState !== "open") return;
2624
+ const bufferedEnd = getMaxBufferedEnd$1(currentOwners);
2625
+ if (bufferedEnd > 0) mediaSource.duration = bufferedEnd;
2626
+ mediaSource.endOfStream();
2627
+ await new Promise((resolve) => requestAnimationFrame(resolve));
2628
+ };
2629
+ /**
2630
+ * Call endOfStream when the last segment has been appended.
2631
+ * This signals to the browser that the stream is complete.
2632
+ *
2633
+ * Per the MSE spec, appendBuffer() remains valid after endOfStream() —
2634
+ * seeks that require re-appending earlier segments will still work.
2635
+ * What becomes blocked is calling endOfStream() again, addSourceBuffer(),
2636
+ * and MediaSource.duration updates.
2637
+ */
2638
+ function endOfStream({ state, owners }) {
2639
+ let hasEnded = false;
2640
+ let destroyed = false;
2641
+ const activeActorUnsubs = [];
2642
+ const runEvaluate = async () => {
2643
+ if (destroyed) return;
2644
+ const currentState = state.current;
2645
+ const currentOwners = owners.current;
2646
+ if (hasEnded) {
2647
+ if (currentOwners.mediaSource?.readyState !== "open") return;
2648
+ hasEnded = false;
2649
+ }
2650
+ if (!shouldEndStream(currentState, currentOwners)) return;
2651
+ hasEnded = true;
2652
+ try {
2653
+ await endOfStreamTask({ currentOwners }, {});
2654
+ } catch (error) {
2655
+ console.error("Failed to call endOfStream:", error);
2656
+ }
2657
+ };
2658
+ const cleanupOwners = owners.subscribe((currentOwners) => {
2659
+ activeActorUnsubs.forEach((u) => u());
2660
+ activeActorUnsubs.length = 0;
2661
+ for (const actor of [currentOwners.videoBufferActor, currentOwners.audioBufferActor]) {
2662
+ if (!actor) continue;
2663
+ let isFirst = true;
2664
+ activeActorUnsubs.push(actor.subscribe(() => {
2665
+ if (isFirst) {
2666
+ isFirst = false;
2667
+ return;
2668
+ }
2669
+ runEvaluate();
2670
+ }));
2671
+ }
2672
+ });
2673
+ const cleanupCombineLatest = combineLatest([state, owners]).subscribe(async () => runEvaluate());
2674
+ return () => {
2675
+ destroyed = true;
2676
+ activeActorUnsubs.forEach((u) => u());
2677
+ cleanupOwners();
2678
+ cleanupCombineLatest();
2679
+ };
2680
+ }
2681
+ /**
2682
+ * Check if we have the minimum requirements to create MediaSource.
2683
+ */
2684
+ function canSetup(state, owners) {
2685
+ return !isNil(owners.mediaElement) && !isNil(state.presentation?.url);
2686
+ }
2687
+ /**
2688
+ * Check if we should proceed with MediaSource creation.
2689
+ * Placeholder for future conditions (e.g., checking if already created).
2690
+ */
2691
+ function shouldSetup(_state, owners) {
2692
+ return isNil(owners.mediaSource);
2693
+ }
2694
+ /**
2695
+ * Setup MediaSource orchestration.
2696
+ *
2697
+ * Creates and attaches MediaSource when:
2698
+ * - mediaElement exists in owners
2699
+ * - presentation.url exists in state
2700
+ *
2701
+ * Updates owners.mediaSource after successful setup.
2702
+ */
2703
+ function setupMediaSource({ state, owners }) {
2704
+ let settingUp = false;
2705
+ let abortController = null;
2706
+ const unsubscribe = combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
2707
+ if (!canSetup(currentState, currentOwners) || !shouldSetup(currentState, currentOwners) || settingUp) return;
2708
+ try {
2709
+ settingUp = true;
2710
+ abortController = new AbortController();
2711
+ const mediaSource = createMediaSource({ preferManaged: true });
2712
+ attachMediaSource(mediaSource, currentOwners.mediaElement);
2713
+ await waitForSourceOpen(mediaSource, abortController.signal);
2714
+ owners.patch({ mediaSource });
2715
+ } catch (error) {
2716
+ if (error instanceof DOMException && error.name === "AbortError") return;
2717
+ throw error;
2718
+ } finally {
2719
+ settingUp = false;
2720
+ }
2721
+ });
2722
+ return () => {
2723
+ abortController?.abort();
2724
+ unsubscribe();
2725
+ };
2726
+ }
2727
+ /**
2728
+ * Thrown when a message is sent to the actor in a state that does not
2729
+ * accept messages (currently: 'updating').
2730
+ */
2731
+ var SourceBufferActorError = class extends Error {
2732
+ constructor(message) {
2733
+ super(message);
2734
+ this.name = "SourceBufferActorError";
2735
+ }
2736
+ };
2737
+ function snapshotBuffered(buffered) {
2738
+ const ranges = [];
2739
+ for (let i = 0; i < buffered.length; i++) ranges.push({
2740
+ start: buffered.start(i),
2741
+ end: buffered.end(i)
2742
+ });
2743
+ return ranges;
2744
+ }
2745
+ function appendInitTask(message, { signal, getCtx, sourceBuffer }) {
2746
+ return new Task(async (taskSignal) => {
2747
+ const ctx = getCtx();
2748
+ if (taskSignal.aborted) return ctx;
2749
+ await appendSegment(sourceBuffer, message.data);
2750
+ return {
2751
+ ...ctx,
2752
+ initTrackId: message.meta.trackId
2753
+ };
2754
+ }, { signal });
2755
+ }
2756
+ function appendSegmentTask(message, { signal, getCtx, sourceBuffer, onPartialContext }) {
2757
+ return new Task(async (taskSignal) => {
2758
+ const ctx = getCtx();
2759
+ if (taskSignal.aborted) return ctx;
2760
+ const { meta } = message;
2761
+ const EPSILON = 1e-4;
2762
+ const filtered = ctx.segments.filter((s) => Math.abs(s.startTime - meta.startTime) >= EPSILON);
2763
+ if (!(message.data instanceof ArrayBuffer)) onPartialContext({
2764
+ ...ctx,
2765
+ segments: [...filtered, {
2766
+ id: meta.id,
2767
+ startTime: meta.startTime,
2768
+ duration: meta.duration,
2769
+ trackId: meta.trackId,
2770
+ ...meta.trackBandwidth !== void 0 && { trackBandwidth: meta.trackBandwidth },
2771
+ partial: true
2772
+ }],
2773
+ bufferedRanges: ctx.bufferedRanges
2774
+ });
2775
+ await appendSegment(sourceBuffer, message.data, taskSignal);
2776
+ return {
2777
+ ...ctx,
2778
+ segments: [...filtered, {
2779
+ id: meta.id,
2780
+ startTime: meta.startTime,
2781
+ duration: meta.duration,
2782
+ trackId: meta.trackId,
2783
+ ...meta.trackBandwidth !== void 0 && { trackBandwidth: meta.trackBandwidth }
2784
+ }],
2785
+ bufferedRanges: snapshotBuffered(sourceBuffer.buffered)
2786
+ };
2787
+ }, { signal });
2788
+ }
2789
+ function removeTask(message, { signal, getCtx, sourceBuffer }) {
2790
+ return new Task(async (taskSignal) => {
2791
+ const ctx = getCtx();
2792
+ if (taskSignal.aborted) return ctx;
2793
+ await flushBuffer(sourceBuffer, message.start, message.end);
2794
+ const bufferedRanges = snapshotBuffered(sourceBuffer.buffered);
2795
+ const filtered = ctx.segments.filter((s) => {
2796
+ const midpoint = s.startTime + s.duration / 2;
2797
+ return bufferedRanges.some((r) => midpoint >= r.start && midpoint < r.end);
2798
+ });
2799
+ return {
2800
+ ...ctx,
2801
+ segments: filtered,
2802
+ bufferedRanges
2803
+ };
2804
+ }, { signal });
2805
+ }
2806
+ const messageTaskFactories = {
2807
+ "append-init": appendInitTask,
2808
+ "append-segment": appendSegmentTask,
2809
+ remove: removeTask
2810
+ };
2811
+ function messageToTask(message, options) {
2812
+ const factory = messageTaskFactories[message.type];
2813
+ return factory(message, options);
2814
+ }
2815
+ function createSourceBufferActor(sourceBuffer, initialContext) {
2816
+ const state = createState({
2817
+ status: "idle",
2818
+ context: {
2819
+ segments: [],
2820
+ bufferedRanges: [],
2821
+ initTrackId: void 0,
2822
+ ...initialContext
2823
+ }
2824
+ });
2825
+ const runner = new SerialRunner();
2826
+ function applyResult(newContext) {
2827
+ const status = state.current.status === "destroyed" ? "destroyed" : "idle";
2828
+ state.patch({
2829
+ status,
2830
+ context: newContext
2831
+ });
2832
+ state.flush();
2833
+ }
2834
+ function handleError(e) {
2835
+ const status = state.current.status === "destroyed" ? "destroyed" : "idle";
2836
+ state.patch({ status });
2837
+ state.flush();
2838
+ throw e;
2839
+ }
2840
+ return {
2841
+ get snapshot() {
2842
+ return state.current;
2843
+ },
2844
+ subscribe(listener) {
2845
+ return state.subscribe(listener);
2846
+ },
2847
+ send(message, signal) {
2848
+ if (state.current.status !== "idle") return Promise.reject(new SourceBufferActorError(`send() called while actor is ${state.current.status}`));
2849
+ state.patch({ status: "updating" });
2850
+ const onPartialContext = (ctx) => {
2851
+ state.patch({
2852
+ status: "updating",
2853
+ context: ctx
2854
+ });
2855
+ state.flush();
2856
+ };
2857
+ const task = messageToTask(message, {
2858
+ signal,
2859
+ getCtx: () => state.current.context,
2860
+ sourceBuffer,
2861
+ onPartialContext
2862
+ });
2863
+ return runner.schedule(task).then(applyResult).catch(handleError);
2864
+ },
2865
+ batch(messages, signal) {
2866
+ if (state.current.status !== "idle") return Promise.reject(new SourceBufferActorError(`batch() called while actor is ${state.current.status}`));
2867
+ if (messages.length === 0) return Promise.resolve();
2868
+ state.patch({ status: "updating" });
2869
+ let workingCtx = state.current.context;
2870
+ const onPartialContext = (ctx) => {
2871
+ state.patch({
2872
+ status: "updating",
2873
+ context: ctx
2874
+ });
2875
+ state.flush();
2876
+ };
2877
+ for (const message of messages.slice(0, -1)) {
2878
+ const task = messageToTask(message, {
2879
+ signal,
2880
+ getCtx: () => workingCtx,
2881
+ sourceBuffer,
2882
+ onPartialContext
2883
+ });
2884
+ runner.schedule(task).then((newCtx) => {
2885
+ workingCtx = newCtx;
2886
+ });
2887
+ }
2888
+ const lastTask = messageToTask(messages[messages.length - 1], {
2889
+ signal,
2890
+ getCtx: () => workingCtx,
2891
+ sourceBuffer,
2892
+ onPartialContext
2893
+ });
2894
+ return runner.schedule(lastTask).then(applyResult).catch(handleError);
2895
+ },
2896
+ destroy() {
2897
+ state.patch({ status: "destroyed" });
2898
+ state.flush();
2899
+ runner.destroy();
2900
+ }
2901
+ };
2902
+ }
2903
+ /**
2904
+ * Build MIME codec string from track metadata.
2905
+ *
2906
+ * @param track - Resolved track with mimeType and codecs
2907
+ * @returns MIME codec string (e.g., 'video/mp4; codecs="avc1.42E01E,mp4a.40.2"')
2908
+ *
2909
+ * @example
2910
+ * buildMimeCodec({ mimeType: 'video/mp4', codecs: ['avc1.42E01E'] })
2911
+ * // => 'video/mp4; codecs="avc1.42E01E"'
2912
+ */
2913
+ function buildMimeCodec(track) {
2914
+ const codecString = track.codecs?.join(",") ?? "";
2915
+ return `${track.mimeType}; codecs="${codecString}"`;
2916
+ }
2917
+ /**
2918
+ * Setup all needed SourceBuffers as a single coordinated operation.
2919
+ *
2920
+ * Waits until ALL selected tracks (video and/or audio) are resolved with
2921
+ * codecs, then creates every SourceBuffer in one synchronous block before
2922
+ * patching owners. This guarantees that downstream consumers (e.g.
2923
+ * loadSegments) never see a partial set of SourceBuffers — preventing the
2924
+ * Firefox bug where appending to a video SourceBuffer before the audio
2925
+ * SourceBuffer exists causes mozHasAudio to be permanently false.
2926
+ *
2927
+ * Handles video-only, audio-only, and combined presentations correctly:
2928
+ * only the tracks that are actually selected are waited on and created.
2929
+ *
2930
+ * @example
2931
+ * const cleanup = setupSourceBuffers({ state, owners });
2932
+ */
2933
+ function setupSourceBuffers({ state, owners }) {
2934
+ let setupDone = false;
2935
+ return combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
2936
+ if (setupDone) return;
2937
+ if (!currentOwners.mediaSource) return;
2938
+ const videoSelected = !!currentState.selectedVideoTrackId;
2939
+ const audioSelected = !!currentState.selectedAudioTrackId;
2940
+ if (!videoSelected && !audioSelected) return;
2941
+ const videoTrack = videoSelected ? getSelectedTrack(currentState, "video") : null;
2942
+ const audioTrack = audioSelected ? getSelectedTrack(currentState, "audio") : null;
2943
+ if (videoSelected && (!videoTrack || !isResolvedTrack(videoTrack) || !videoTrack.codecs?.length)) return;
2944
+ if (audioSelected && (!audioTrack || !isResolvedTrack(audioTrack) || !audioTrack.codecs?.length)) return;
2945
+ setupDone = true;
2946
+ const patch = {};
2947
+ if (videoSelected && videoTrack && isResolvedTrack(videoTrack)) {
2948
+ const buffer = createSourceBuffer(currentOwners.mediaSource, buildMimeCodec(videoTrack));
2949
+ patch.videoBuffer = buffer;
2950
+ patch.videoBufferActor = createSourceBufferActor(buffer);
2951
+ }
2952
+ if (audioSelected && audioTrack && isResolvedTrack(audioTrack)) {
2953
+ const buffer = createSourceBuffer(currentOwners.mediaSource, buildMimeCodec(audioTrack));
2954
+ patch.audioBuffer = buffer;
2955
+ patch.audioBufferActor = createSourceBufferActor(buffer);
2956
+ }
2957
+ owners.patch(patch);
2958
+ await new Promise((resolve) => requestAnimationFrame(resolve));
2959
+ });
2960
+ }
2961
+ /**
2962
+ * Get all text tracks from presentation.
2963
+ */
2964
+ function getTextTracks(presentation) {
2965
+ if (!presentation?.selectionSets) return [];
2966
+ const textSet = presentation.selectionSets.find((set) => set.type === "text");
2967
+ if (!textSet?.switchingSets?.[0]?.tracks) return [];
2968
+ return textSet.switchingSets[0].tracks;
2969
+ }
2970
+ /**
2971
+ * Check if we can setup text tracks.
2972
+ *
2973
+ * Requires:
2974
+ * - mediaElement exists
2975
+ * - presentation has text tracks to setup
2976
+ */
2977
+ function canSetupTextTracks(state, owners) {
2978
+ return !!owners.mediaElement && !!getTextTracks(state.presentation).length;
2979
+ }
2980
+ /**
2981
+ * Check if we should setup text tracks (not already set up).
2982
+ */
2983
+ function shouldSetupTextTracks(owners) {
2984
+ return !owners.textTracks;
2985
+ }
2986
+ /**
2987
+ * Create a track element for a text track.
2988
+ *
2989
+ * Note: We use DOM <track> elements instead of the TextTrack JS API
2990
+ * because there's no way to remove TextTracks added via addTextTrack().
2991
+ */
2992
+ function createTrackElement(track) {
2993
+ const trackElement = document.createElement("track");
2994
+ trackElement.id = track.id;
2995
+ trackElement.kind = track.kind;
2996
+ trackElement.label = track.label;
2997
+ if (track.language) trackElement.srclang = track.language;
2998
+ if (track.default) trackElement.default = true;
2999
+ trackElement.src = track.url;
3000
+ return trackElement;
3001
+ }
3002
+ /**
3003
+ * Setup text tracks orchestration.
3004
+ *
3005
+ * Triggers when:
3006
+ * - mediaElement exists
3007
+ * - presentation is resolved (has text tracks)
3008
+ *
3009
+ * Creates <track> elements for all text tracks and adds them as children
3010
+ * to the media element. This allows the browser's native text track rendering.
3011
+ *
3012
+ * Note: Uses DOM track elements instead of TextTrack API because tracks
3013
+ * added via addTextTrack() cannot be removed.
3014
+ *
3015
+ * @example
3016
+ * const cleanup = setupTextTracks({ state, owners });
3017
+ */
3018
+ function setupTextTracks({ state, owners }) {
3019
+ let hasSetup = false;
3020
+ let createdTracks = [];
3021
+ const unsubscribe = combineLatest([state, owners]).subscribe(([s, o]) => {
3022
+ if (hasSetup) return;
3023
+ if (!canSetupTextTracks(s, o) || !shouldSetupTextTracks(o)) return;
3024
+ hasSetup = true;
3025
+ const textTracks = getTextTracks(s.presentation);
3026
+ if (textTracks.length === 0) return;
3027
+ const trackMap = /* @__PURE__ */ new Map();
3028
+ for (const track of textTracks) {
3029
+ const trackElement = createTrackElement(track);
3030
+ o.mediaElement.appendChild(trackElement);
3031
+ trackMap.set(track.id, trackElement);
3032
+ createdTracks.push(trackElement);
3033
+ }
3034
+ owners.patch({ textTracks: trackMap });
3035
+ });
3036
+ return () => {
3037
+ for (const trackElement of createdTracks) trackElement.remove();
3038
+ createdTracks = [];
3039
+ unsubscribe();
3040
+ };
3041
+ }
3042
+ /**
3043
+ * Sync selectedTextTrackId from DOM text track mode changes.
3044
+ *
3045
+ * Listens to the `change` event on `media.textTracks` and updates
3046
+ * `selectedTextTrackId` when external code (e.g. the captions button via
3047
+ * `toggleSubtitles()`) changes a subtitle/caption track mode to 'showing'.
3048
+ *
3049
+ * This bridges the core store's `toggleSubtitles()` with SPF's reactive text
3050
+ * track pipeline (`syncTextTrackModes`, `loadTextTrackCues`). Without this
3051
+ * bridge, direct DOM mode changes would be immediately overridden by
3052
+ * `syncTextTrackModes` on the next SPF state update.
3053
+ *
3054
+ * When a subtitle/caption track's mode is 'showing', its DOM `id` — which
3055
+ * matches the SPF track ID set by `setupTextTracks` — is written to
3056
+ * `selectedTextTrackId`. When no subtitle/caption track is 'showing',
3057
+ * `selectedTextTrackId` is cleared along with the deselected track's
3058
+ * `textBufferState` entry — setting mode to 'disabled' clears native cues from
3059
+ * the track element, so the buffer must be reset to re-fetch cues on re-enable.
3060
+ *
3061
+ * @example
3062
+ * const cleanup = syncSelectedTextTrackFromDom({ state, owners });
3063
+ */
3064
+ function syncSelectedTextTrackFromDom({ state, owners }) {
3065
+ let lastMediaElement;
3066
+ let removeListener = null;
3067
+ const unsubscribe = owners.subscribe((currentOwners) => {
3068
+ const { mediaElement } = currentOwners;
3069
+ if (mediaElement === lastMediaElement) return;
3070
+ removeListener?.();
3071
+ removeListener = null;
3072
+ lastMediaElement = mediaElement;
3073
+ if (!mediaElement) return;
3074
+ const sync = () => {
3075
+ const newId = Array.from(mediaElement.textTracks).find((t) => t.mode === "showing" && (t.kind === "subtitles" || t.kind === "captions"))?.id || void 0;
3076
+ const current = state.current;
3077
+ if (current.selectedTextTrackId === newId) return;
3078
+ if (newId) state.patch({ selectedTextTrackId: newId });
3079
+ else {
3080
+ const prevId = current.selectedTextTrackId;
3081
+ if (prevId && current.textBufferState?.[prevId]) {
3082
+ const next = { ...current.textBufferState };
3083
+ delete next[prevId];
3084
+ state.patch({
3085
+ selectedTextTrackId: void 0,
3086
+ textBufferState: next
3087
+ });
3088
+ } else state.patch({ selectedTextTrackId: void 0 });
3089
+ }
3090
+ };
3091
+ removeListener = listen(mediaElement.textTracks, "change", sync);
3092
+ });
3093
+ return () => {
3094
+ removeListener?.();
3095
+ unsubscribe();
3096
+ };
3097
+ }
3098
+ /**
3099
+ * Check if we can sync text track modes.
3100
+ *
3101
+ * Requires:
3102
+ * - textTracks map exists (track elements created)
3103
+ */
3104
+ function canSyncTextTrackModes(owners) {
3105
+ return !!owners.textTracks && owners.textTracks.size > 0;
3106
+ }
3107
+ /**
3108
+ * Sync text track modes orchestration.
3109
+ *
3110
+ * Manages track element modes based on selectedTextTrackId:
3111
+ * - Selected track: mode = "showing"
3112
+ * - Other tracks: mode = "hidden"
3113
+ * - No selection: all tracks mode = "hidden"
3114
+ *
3115
+ * Note: Uses "hidden" instead of "disabled" for non-selected tracks
3116
+ * so they remain available in the browser's track menu.
3117
+ *
3118
+ * @example
3119
+ * const cleanup = syncTextTrackModes({ state, owners });
3120
+ */
3121
+ function syncTextTrackModes({ state, owners }) {
3122
+ return combineLatest([state, owners]).subscribe(([s, o]) => {
3123
+ if (!canSyncTextTrackModes(o)) return;
3124
+ const selectedId = s.selectedTextTrackId;
3125
+ for (const [trackId, trackElement] of o.textTracks) if (trackId === selectedId) trackElement.track.mode = "showing";
3126
+ else trackElement.track.mode = "hidden";
3127
+ });
3128
+ }
3129
+ /**
3130
+ * Check if we can update MediaSource duration (have required data).
3131
+ */
3132
+ function canUpdateDuration(state, owners) {
3133
+ return !!(owners.mediaSource && state.presentation && hasPresentationDuration(state.presentation));
3134
+ }
3135
+ /**
3136
+ * Get the maximum buffered end time across all SourceBuffers.
3137
+ */
3138
+ function getMaxBufferedEnd(owners) {
3139
+ let maxEnd = 0;
3140
+ const buffers = [owners.videoSourceBuffer, owners.audioSourceBuffer].filter((buf) => buf !== void 0);
3141
+ for (const buffer of buffers) {
3142
+ const { buffered } = buffer;
3143
+ if (buffered.length > 0) {
3144
+ const end = buffered.end(buffered.length - 1);
3145
+ if (end > maxEnd) maxEnd = end;
3146
+ }
3147
+ }
3148
+ return maxEnd;
3149
+ }
3150
+ /**
3151
+ * Check if we should update MediaSource duration (conditions met).
3152
+ */
3153
+ function shouldUpdateDuration(state, owners) {
3154
+ if (!canUpdateDuration(state, owners)) return false;
3155
+ const { mediaSource } = owners;
3156
+ const { presentation } = state;
3157
+ if (mediaSource.readyState !== "open") return false;
3158
+ const duration = presentation.duration;
3159
+ if (!Number.isFinite(duration) || Number.isNaN(duration) || duration <= 0) return false;
3160
+ return Number.isNaN(mediaSource.duration);
3161
+ }
3162
+ /**
3163
+ * Wait for all currently-updating SourceBuffers to finish.
3164
+ *
3165
+ * The MSE spec forbids setting MediaSource.duration while any attached
3166
+ * SourceBuffer has updating === true. This defers until all are idle.
3167
+ */
3168
+ function waitForSourceBuffersReady(owners) {
3169
+ const updating = [owners.videoSourceBuffer, owners.audioSourceBuffer].filter((buf) => buf?.updating === true);
3170
+ if (updating.length === 0) return Promise.resolve();
3171
+ return Promise.all(updating.map((buf) => new Promise((resolve) => buf.addEventListener("updateend", () => resolve(), { once: true })))).then(() => void 0);
3172
+ }
3173
+ /**
3174
+ * Update MediaSource duration when presentation duration becomes available.
3175
+ */
3176
+ function updateDuration({ state, owners }) {
3177
+ let destroyed = false;
3178
+ const unsubscribe = combineLatest([state, owners]).subscribe(async ([currentState, currentOwners]) => {
3179
+ if (!shouldUpdateDuration(currentState, currentOwners)) return;
3180
+ const { mediaSource } = currentOwners;
3181
+ await waitForSourceBuffersReady(currentOwners);
3182
+ if (destroyed || mediaSource.readyState !== "open") return;
3183
+ let duration = currentState.presentation.duration;
3184
+ const maxBufferedEnd = getMaxBufferedEnd(currentOwners);
3185
+ if (maxBufferedEnd > duration) duration = maxBufferedEnd;
3186
+ mediaSource.duration = duration;
3187
+ });
3188
+ return () => {
3189
+ destroyed = true;
3190
+ unsubscribe();
3191
+ };
3192
+ }
3193
+ /**
3194
+ * Create a POC playback engine.
3195
+ *
3196
+ * Wires together all orchestrations to create a reactive playback pipeline:
3197
+ * 1. Resolve presentation (multivariant playlist)
3198
+ * 2. Select initial video and audio tracks
3199
+ * 3. Resolve selected tracks (media playlists)
3200
+ * 4. Setup MediaSource
3201
+ * 5. Setup SourceBuffers for video and audio
3202
+ *
3203
+ * Note: This is a POC - does not yet load/append segments.
3204
+ *
3205
+ * @param config - Playback engine configuration
3206
+ * @returns Playback engine instance with state, owners, and destroy function
3207
+ *
3208
+ * @example
3209
+ * const engine = createPlaybackEngine({
3210
+ * initialBandwidth: 2_000_000,
3211
+ * preferredAudioLanguage: 'en',
3212
+ * });
3213
+ *
3214
+ * // Initialize by patching state and owners
3215
+ * engine.owners.patch({ mediaElement: document.querySelector('video') });
3216
+ * engine.state.patch({
3217
+ * presentation: { url: 'https://example.com/playlist.m3u8' },
3218
+ * preload: 'auto',
3219
+ * });
3220
+ *
3221
+ * // Inspect state
3222
+ * console.log(engine.state.current);
3223
+ *
3224
+ * // Cleanup
3225
+ * engine.destroy();
3226
+ */
3227
+ function createPlaybackEngine(config = {}) {
3228
+ const state = createState({ bandwidthState: {
3229
+ fastEstimate: 0,
3230
+ fastTotalWeight: 0,
3231
+ slowEstimate: 0,
3232
+ slowTotalWeight: 0,
3233
+ bytesSampled: 0
3234
+ } });
3235
+ const owners = createState({});
3236
+ const events = createEventStream();
3237
+ const cleanups = [
3238
+ syncPreloadAttribute(state, owners),
3239
+ trackPlaybackInitiated({
3240
+ state,
3241
+ owners,
3242
+ events
3243
+ }),
3244
+ resolvePresentation({
3245
+ state,
3246
+ events
3247
+ }),
3248
+ selectVideoTrack({
3249
+ state,
3250
+ owners,
3251
+ events
3252
+ }, {
3253
+ type: "video",
3254
+ ...config.initialBandwidth !== void 0 && { initialBandwidth: config.initialBandwidth }
3255
+ }),
3256
+ selectAudioTrack({
3257
+ state,
3258
+ owners,
3259
+ events
3260
+ }, {
3261
+ type: "audio",
3262
+ ...config.preferredAudioLanguage !== void 0 && { preferredAudioLanguage: config.preferredAudioLanguage }
3263
+ }),
3264
+ selectTextTrack({
3265
+ state,
3266
+ owners,
3267
+ events
3268
+ }, {
3269
+ type: "text",
3270
+ ...config.preferredSubtitleLanguage !== void 0 && { preferredSubtitleLanguage: config.preferredSubtitleLanguage },
3271
+ ...config.includeForcedTracks !== void 0 && { includeForcedTracks: config.includeForcedTracks },
3272
+ ...config.enableDefaultTrack !== void 0 && { enableDefaultTrack: config.enableDefaultTrack }
3273
+ }),
3274
+ resolveTrack({
3275
+ state,
3276
+ events
3277
+ }, { type: "video" }),
3278
+ resolveTrack({
3279
+ state,
3280
+ events
3281
+ }, { type: "audio" }),
3282
+ resolveTrack({
3283
+ state,
3284
+ events
3285
+ }, { type: "text" }),
3286
+ calculatePresentationDuration({ state }),
3287
+ setupMediaSource({
3288
+ state,
3289
+ owners
3290
+ }),
3291
+ updateDuration({
3292
+ state,
3293
+ owners
3294
+ }),
3295
+ setupSourceBuffers({
3296
+ state,
3297
+ owners
3298
+ }),
3299
+ trackCurrentTime({
3300
+ state,
3301
+ owners
3302
+ }),
3303
+ switchQuality({ state }),
3304
+ loadSegments({
3305
+ state,
3306
+ owners
3307
+ }, { type: "video" }),
3308
+ loadSegments({
3309
+ state,
3310
+ owners
3311
+ }, { type: "audio" }),
3312
+ endOfStream({
3313
+ state,
3314
+ owners
3315
+ }),
3316
+ setupTextTracks({
3317
+ state,
3318
+ owners
3319
+ }),
3320
+ syncTextTrackModes({
3321
+ state,
3322
+ owners
3323
+ }),
3324
+ syncSelectedTextTrackFromDom({
3325
+ state,
3326
+ owners
3327
+ }),
3328
+ loadTextTrackCues({
3329
+ state,
3330
+ owners
3331
+ })
3332
+ ];
3333
+ events.dispatch({ type: "@@INITIALIZE@@" });
3334
+ return {
3335
+ state,
3336
+ owners,
3337
+ events,
3338
+ destroy: () => {
3339
+ cleanups.forEach((cleanup) => cleanup());
3340
+ destroyVttParser();
3341
+ }
3342
+ };
3343
+ }
3344
+ /**
3345
+ * HTMLMediaElement-compatible adapter for the SPF playback engine.
3346
+ *
3347
+ * Implements the src/play() contract per the WHATWG HTML spec so that SPF can
3348
+ * be used anywhere a media element API is expected.
3349
+ *
3350
+ * A new engine is created on every src assignment — this fully tears down all
3351
+ * state, SourceBuffers, and in-flight requests from the previous source before
3352
+ * the next one begins. The media element reference is preserved across src
3353
+ * changes and re-applied to the new engine automatically.
3354
+ *
3355
+ * @example
3356
+ * const media = new SpfMedia({ preferredAudioLanguage: 'en' });
3357
+ * media.attach(document.querySelector('video'));
3358
+ * media.src = 'https://stream.mux.com/abc123.m3u8';
3359
+ *
3360
+ * // Change source — old engine is destroyed, new one starts clean:
3361
+ * media.src = 'https://stream.mux.com/xyz456.m3u8';
3362
+ *
3363
+ * // Explicit teardown:
3364
+ * media.destroy();
3365
+ */
3366
+ var SpfMedia = class {
3367
+ #engine;
3368
+ #config;
3369
+ #preload = "";
3370
+ /** Pending loadstart listener from a deferred play() retry, if any. */
3371
+ #loadstartListener = null;
3372
+ constructor(config = {}) {
3373
+ this.#config = config;
3374
+ this.#engine = createPlaybackEngine(config);
3375
+ }
3376
+ get engine() {
3377
+ return this.#engine;
3378
+ }
3379
+ attach(mediaElement) {
3380
+ this.#engine.owners.patch({ mediaElement });
3381
+ }
3382
+ detach() {
3383
+ this.#cancelPendingPlay();
3384
+ this.#engine.owners.patch({ mediaElement: void 0 });
3385
+ }
3386
+ destroy() {
3387
+ this.#cancelPendingPlay();
3388
+ this.#engine.destroy();
3389
+ }
3390
+ get preload() {
3391
+ return this.#preload;
3392
+ }
3393
+ set preload(value) {
3394
+ this.#preload = value;
3395
+ if (value) this.#engine.state.patch({ preload: value });
3396
+ }
3397
+ get src() {
3398
+ return this.#engine.state.current.presentation?.url ?? "";
3399
+ }
3400
+ set src(value) {
3401
+ const prevMediaElement = this.#engine.owners.current.mediaElement;
3402
+ this.#cancelPendingPlay();
3403
+ this.#engine.destroy();
3404
+ this.#engine = createPlaybackEngine(this.#config);
3405
+ if (this.#preload) this.#engine.state.patch({ preload: this.#preload });
3406
+ if (prevMediaElement) this.#engine.owners.patch({ mediaElement: prevMediaElement });
3407
+ if (value) this.#engine.state.patch({ presentation: { url: value } });
3408
+ }
3409
+ play() {
3410
+ const { mediaElement } = this.#engine.owners.current;
3411
+ if (!mediaElement) return Promise.reject(/* @__PURE__ */ new Error("SpfMedia: no media element attached"));
3412
+ this.#engine.state.patch({ playbackInitiated: true });
3413
+ return mediaElement.play().catch((err) => {
3414
+ if (this.src) return new Promise((resolve, reject) => {
3415
+ const listener = () => {
3416
+ this.#loadstartListener = null;
3417
+ mediaElement.play().then(resolve, reject);
3418
+ };
3419
+ this.#loadstartListener = listener;
3420
+ mediaElement.addEventListener("loadstart", listener, { once: true });
3421
+ });
3422
+ throw err;
3423
+ });
3424
+ }
3425
+ #cancelPendingPlay() {
3426
+ if (!this.#loadstartListener) return;
3427
+ const { mediaElement } = this.#engine.owners.current;
3428
+ mediaElement?.removeEventListener("loadstart", this.#loadstartListener);
3429
+ this.#loadstartListener = null;
3430
+ }
3431
+ };
3432
+
3433
+ //#endregion
3434
+ //#region ../core/dist/dev/dom/media/simple-hls/index.js
3435
+ var SimpleHlsCustomMedia = class extends DelegateMixin(CustomMediaMixin(globalThis.HTMLElement ?? class {}, { tag: "video" }), SpfMedia) {};
3436
+
3437
+ //#endregion
3438
+ //#region src/media/simple-hls-video/index.ts
3439
+ var SimpleHlsVideo = class extends MediaAttachMixin(SimpleHlsCustomMedia) {
3440
+ static getTemplateHTML(attrs) {
3441
+ const { src, ...rest } = attrs;
3442
+ return super.getTemplateHTML(rest);
3443
+ }
3444
+ constructor() {
3445
+ super();
3446
+ this.attach(this.target);
3447
+ }
3448
+ attributeChangedCallback(attrName, oldValue, newValue) {
3449
+ if (attrName !== "src") super.attributeChangedCallback(attrName, oldValue, newValue);
3450
+ if (attrName === "src" && oldValue !== newValue) this.src = newValue ?? "";
3451
+ if (attrName === "preload" && oldValue !== newValue) this.preload = newValue ?? "";
3452
+ }
3453
+ };
3454
+
3455
+ //#endregion
3456
+ //#region src/define/media/simple-hls-video.ts
3457
+ var SimpleHlsVideoElement = class extends SimpleHlsVideo {
3458
+ static {
3459
+ this.tagName = "simple-hls-video";
3460
+ }
3461
+ };
3462
+ customElements.define(SimpleHlsVideoElement.tagName, SimpleHlsVideoElement);
3463
+
3464
+ //#endregion
3465
+ //# sourceMappingURL=simple-hls-video.dev.js.map