@kenzuya/mediabunny 1.26.0 → 1.28.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. package/README.md +1 -1
  2. package/dist/bundles/{mediabunny.mjs → mediabunny.js} +21963 -21390
  3. package/dist/bundles/mediabunny.min.js +490 -0
  4. package/dist/modules/shared/mp3-misc.d.ts.map +1 -1
  5. package/dist/modules/src/adts/adts-demuxer.d.ts +6 -6
  6. package/dist/modules/src/adts/adts-demuxer.d.ts.map +1 -1
  7. package/dist/modules/src/adts/adts-muxer.d.ts +4 -4
  8. package/dist/modules/src/adts/adts-muxer.d.ts.map +1 -1
  9. package/dist/modules/src/adts/adts-reader.d.ts +1 -1
  10. package/dist/modules/src/adts/adts-reader.d.ts.map +1 -1
  11. package/dist/modules/src/avi/avi-demuxer.d.ts +44 -0
  12. package/dist/modules/src/avi/avi-demuxer.d.ts.map +1 -0
  13. package/dist/modules/src/avi/avi-misc.d.ts +88 -0
  14. package/dist/modules/src/avi/avi-misc.d.ts.map +1 -0
  15. package/dist/modules/src/avi/avi-muxer.d.ts +45 -0
  16. package/dist/modules/src/avi/avi-muxer.d.ts.map +1 -0
  17. package/dist/modules/src/avi/riff-writer.d.ts +26 -0
  18. package/dist/modules/src/avi/riff-writer.d.ts.map +1 -0
  19. package/dist/modules/src/codec-data.d.ts +8 -3
  20. package/dist/modules/src/codec-data.d.ts.map +1 -1
  21. package/dist/modules/src/codec.d.ts +10 -10
  22. package/dist/modules/src/codec.d.ts.map +1 -1
  23. package/dist/modules/src/conversion.d.ts +33 -16
  24. package/dist/modules/src/conversion.d.ts.map +1 -1
  25. package/dist/modules/src/custom-coder.d.ts +8 -8
  26. package/dist/modules/src/custom-coder.d.ts.map +1 -1
  27. package/dist/modules/src/demuxer.d.ts +3 -3
  28. package/dist/modules/src/demuxer.d.ts.map +1 -1
  29. package/dist/modules/src/encode.d.ts +8 -8
  30. package/dist/modules/src/encode.d.ts.map +1 -1
  31. package/dist/modules/src/flac/flac-demuxer.d.ts +7 -7
  32. package/dist/modules/src/flac/flac-demuxer.d.ts.map +1 -1
  33. package/dist/modules/src/flac/flac-misc.d.ts +3 -3
  34. package/dist/modules/src/flac/flac-misc.d.ts.map +1 -1
  35. package/dist/modules/src/flac/flac-muxer.d.ts +5 -5
  36. package/dist/modules/src/flac/flac-muxer.d.ts.map +1 -1
  37. package/dist/modules/src/id3.d.ts +3 -3
  38. package/dist/modules/src/id3.d.ts.map +1 -1
  39. package/dist/modules/src/index.d.ts +20 -20
  40. package/dist/modules/src/index.d.ts.map +1 -1
  41. package/dist/modules/src/input-format.d.ts +22 -0
  42. package/dist/modules/src/input-format.d.ts.map +1 -1
  43. package/dist/modules/src/input-track.d.ts +8 -8
  44. package/dist/modules/src/input-track.d.ts.map +1 -1
  45. package/dist/modules/src/input.d.ts +12 -12
  46. package/dist/modules/src/isobmff/isobmff-boxes.d.ts +2 -2
  47. package/dist/modules/src/isobmff/isobmff-boxes.d.ts.map +1 -1
  48. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts +12 -12
  49. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts.map +1 -1
  50. package/dist/modules/src/isobmff/isobmff-misc.d.ts.map +1 -1
  51. package/dist/modules/src/isobmff/isobmff-muxer.d.ts +11 -11
  52. package/dist/modules/src/isobmff/isobmff-muxer.d.ts.map +1 -1
  53. package/dist/modules/src/isobmff/isobmff-reader.d.ts +2 -2
  54. package/dist/modules/src/isobmff/isobmff-reader.d.ts.map +1 -1
  55. package/dist/modules/src/matroska/ebml.d.ts +3 -3
  56. package/dist/modules/src/matroska/ebml.d.ts.map +1 -1
  57. package/dist/modules/src/matroska/matroska-demuxer.d.ts +13 -13
  58. package/dist/modules/src/matroska/matroska-demuxer.d.ts.map +1 -1
  59. package/dist/modules/src/matroska/matroska-input.d.ts +33 -0
  60. package/dist/modules/src/matroska/matroska-input.d.ts.map +1 -0
  61. package/dist/modules/src/matroska/matroska-misc.d.ts.map +1 -1
  62. package/dist/modules/src/matroska/matroska-muxer.d.ts +5 -5
  63. package/dist/modules/src/matroska/matroska-muxer.d.ts.map +1 -1
  64. package/dist/modules/src/media-sink.d.ts +5 -5
  65. package/dist/modules/src/media-sink.d.ts.map +1 -1
  66. package/dist/modules/src/media-source.d.ts +22 -4
  67. package/dist/modules/src/media-source.d.ts.map +1 -1
  68. package/dist/modules/src/metadata.d.ts +2 -2
  69. package/dist/modules/src/metadata.d.ts.map +1 -1
  70. package/dist/modules/src/misc.d.ts +5 -4
  71. package/dist/modules/src/misc.d.ts.map +1 -1
  72. package/dist/modules/src/mp3/mp3-demuxer.d.ts +7 -7
  73. package/dist/modules/src/mp3/mp3-demuxer.d.ts.map +1 -1
  74. package/dist/modules/src/mp3/mp3-muxer.d.ts +4 -4
  75. package/dist/modules/src/mp3/mp3-muxer.d.ts.map +1 -1
  76. package/dist/modules/src/mp3/mp3-reader.d.ts +2 -2
  77. package/dist/modules/src/mp3/mp3-reader.d.ts.map +1 -1
  78. package/dist/modules/src/mp3/mp3-writer.d.ts +1 -1
  79. package/dist/modules/src/mp3/mp3-writer.d.ts.map +1 -1
  80. package/dist/modules/src/muxer.d.ts +4 -4
  81. package/dist/modules/src/muxer.d.ts.map +1 -1
  82. package/dist/modules/src/ogg/ogg-demuxer.d.ts +7 -7
  83. package/dist/modules/src/ogg/ogg-demuxer.d.ts.map +1 -1
  84. package/dist/modules/src/ogg/ogg-misc.d.ts +1 -1
  85. package/dist/modules/src/ogg/ogg-misc.d.ts.map +1 -1
  86. package/dist/modules/src/ogg/ogg-muxer.d.ts +5 -5
  87. package/dist/modules/src/ogg/ogg-muxer.d.ts.map +1 -1
  88. package/dist/modules/src/ogg/ogg-reader.d.ts +1 -1
  89. package/dist/modules/src/ogg/ogg-reader.d.ts.map +1 -1
  90. package/dist/modules/src/output-format.d.ts +51 -6
  91. package/dist/modules/src/output-format.d.ts.map +1 -1
  92. package/dist/modules/src/output.d.ts +13 -13
  93. package/dist/modules/src/output.d.ts.map +1 -1
  94. package/dist/modules/src/packet.d.ts +1 -1
  95. package/dist/modules/src/packet.d.ts.map +1 -1
  96. package/dist/modules/src/pcm.d.ts.map +1 -1
  97. package/dist/modules/src/reader.d.ts +2 -2
  98. package/dist/modules/src/reader.d.ts.map +1 -1
  99. package/dist/modules/src/sample.d.ts +57 -15
  100. package/dist/modules/src/sample.d.ts.map +1 -1
  101. package/dist/modules/src/source.d.ts +3 -3
  102. package/dist/modules/src/source.d.ts.map +1 -1
  103. package/dist/modules/src/subtitles.d.ts +1 -1
  104. package/dist/modules/src/subtitles.d.ts.map +1 -1
  105. package/dist/modules/src/target.d.ts +2 -2
  106. package/dist/modules/src/target.d.ts.map +1 -1
  107. package/dist/modules/src/tsconfig.tsbuildinfo +1 -1
  108. package/dist/modules/src/wave/riff-writer.d.ts +1 -1
  109. package/dist/modules/src/wave/riff-writer.d.ts.map +1 -1
  110. package/dist/modules/src/wave/wave-demuxer.d.ts +6 -6
  111. package/dist/modules/src/wave/wave-demuxer.d.ts.map +1 -1
  112. package/dist/modules/src/wave/wave-muxer.d.ts +4 -4
  113. package/dist/modules/src/wave/wave-muxer.d.ts.map +1 -1
  114. package/dist/modules/src/writer.d.ts +1 -1
  115. package/dist/modules/src/writer.d.ts.map +1 -1
  116. package/dist/packages/eac3/eac3.wasm +0 -0
  117. package/dist/packages/eac3/mediabunny-eac3.js +1058 -0
  118. package/dist/packages/eac3/mediabunny-eac3.min.js +44 -0
  119. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.js +694 -0
  120. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.min.js +58 -0
  121. package/dist/packages/mpeg4/mediabunny-mpeg4.js +1198 -0
  122. package/dist/packages/mpeg4/mediabunny-mpeg4.min.js +44 -0
  123. package/dist/packages/mpeg4/xvid.wasm +0 -0
  124. package/package.json +18 -57
  125. package/dist/bundles/mediabunny.cjs +0 -26140
  126. package/dist/bundles/mediabunny.min.cjs +0 -147
  127. package/dist/bundles/mediabunny.min.mjs +0 -146
  128. package/dist/mediabunny.d.ts +0 -3319
  129. package/dist/modules/shared/mp3-misc.js +0 -147
  130. package/dist/modules/src/adts/adts-demuxer.js +0 -239
  131. package/dist/modules/src/adts/adts-muxer.js +0 -80
  132. package/dist/modules/src/adts/adts-reader.js +0 -63
  133. package/dist/modules/src/codec-data.js +0 -1730
  134. package/dist/modules/src/codec.js +0 -869
  135. package/dist/modules/src/conversion.js +0 -1459
  136. package/dist/modules/src/custom-coder.js +0 -117
  137. package/dist/modules/src/demuxer.js +0 -12
  138. package/dist/modules/src/encode.js +0 -442
  139. package/dist/modules/src/flac/flac-demuxer.js +0 -504
  140. package/dist/modules/src/flac/flac-misc.js +0 -135
  141. package/dist/modules/src/flac/flac-muxer.js +0 -222
  142. package/dist/modules/src/id3.js +0 -848
  143. package/dist/modules/src/index.js +0 -28
  144. package/dist/modules/src/input-format.js +0 -480
  145. package/dist/modules/src/input-track.js +0 -372
  146. package/dist/modules/src/input.js +0 -188
  147. package/dist/modules/src/isobmff/isobmff-boxes.js +0 -1480
  148. package/dist/modules/src/isobmff/isobmff-demuxer.js +0 -2618
  149. package/dist/modules/src/isobmff/isobmff-misc.js +0 -20
  150. package/dist/modules/src/isobmff/isobmff-muxer.js +0 -966
  151. package/dist/modules/src/isobmff/isobmff-reader.js +0 -72
  152. package/dist/modules/src/matroska/ebml.js +0 -653
  153. package/dist/modules/src/matroska/matroska-demuxer.js +0 -2133
  154. package/dist/modules/src/matroska/matroska-misc.js +0 -20
  155. package/dist/modules/src/matroska/matroska-muxer.js +0 -1017
  156. package/dist/modules/src/media-sink.js +0 -1736
  157. package/dist/modules/src/media-source.js +0 -1825
  158. package/dist/modules/src/metadata.js +0 -193
  159. package/dist/modules/src/misc.js +0 -623
  160. package/dist/modules/src/mp3/mp3-demuxer.js +0 -285
  161. package/dist/modules/src/mp3/mp3-muxer.js +0 -123
  162. package/dist/modules/src/mp3/mp3-reader.js +0 -26
  163. package/dist/modules/src/mp3/mp3-writer.js +0 -78
  164. package/dist/modules/src/muxer.js +0 -50
  165. package/dist/modules/src/node.d.ts +0 -9
  166. package/dist/modules/src/node.d.ts.map +0 -1
  167. package/dist/modules/src/node.js +0 -9
  168. package/dist/modules/src/ogg/ogg-demuxer.js +0 -763
  169. package/dist/modules/src/ogg/ogg-misc.js +0 -78
  170. package/dist/modules/src/ogg/ogg-muxer.js +0 -353
  171. package/dist/modules/src/ogg/ogg-reader.js +0 -65
  172. package/dist/modules/src/output-format.js +0 -527
  173. package/dist/modules/src/output.js +0 -300
  174. package/dist/modules/src/packet.js +0 -182
  175. package/dist/modules/src/pcm.js +0 -85
  176. package/dist/modules/src/reader.js +0 -236
  177. package/dist/modules/src/sample.js +0 -1056
  178. package/dist/modules/src/source.js +0 -1182
  179. package/dist/modules/src/subtitles.js +0 -575
  180. package/dist/modules/src/target.js +0 -140
  181. package/dist/modules/src/wave/riff-writer.js +0 -30
  182. package/dist/modules/src/wave/wave-demuxer.js +0 -447
  183. package/dist/modules/src/wave/wave-muxer.js +0 -318
  184. package/dist/modules/src/writer.js +0 -370
  185. package/src/adts/adts-demuxer.ts +0 -331
  186. package/src/adts/adts-muxer.ts +0 -111
  187. package/src/adts/adts-reader.ts +0 -85
  188. package/src/codec-data.ts +0 -2078
  189. package/src/codec.ts +0 -1092
  190. package/src/conversion.ts +0 -2112
  191. package/src/custom-coder.ts +0 -197
  192. package/src/demuxer.ts +0 -24
  193. package/src/encode.ts +0 -739
  194. package/src/flac/flac-demuxer.ts +0 -730
  195. package/src/flac/flac-misc.ts +0 -164
  196. package/src/flac/flac-muxer.ts +0 -320
  197. package/src/id3.ts +0 -925
  198. package/src/index.ts +0 -221
  199. package/src/input-format.ts +0 -541
  200. package/src/input-track.ts +0 -529
  201. package/src/input.ts +0 -235
  202. package/src/isobmff/isobmff-boxes.ts +0 -1719
  203. package/src/isobmff/isobmff-demuxer.ts +0 -3190
  204. package/src/isobmff/isobmff-misc.ts +0 -29
  205. package/src/isobmff/isobmff-muxer.ts +0 -1348
  206. package/src/isobmff/isobmff-reader.ts +0 -91
  207. package/src/matroska/ebml.ts +0 -730
  208. package/src/matroska/matroska-demuxer.ts +0 -2481
  209. package/src/matroska/matroska-misc.ts +0 -29
  210. package/src/matroska/matroska-muxer.ts +0 -1276
  211. package/src/media-sink.ts +0 -2179
  212. package/src/media-source.ts +0 -2243
  213. package/src/metadata.ts +0 -320
  214. package/src/misc.ts +0 -798
  215. package/src/mp3/mp3-demuxer.ts +0 -383
  216. package/src/mp3/mp3-muxer.ts +0 -166
  217. package/src/mp3/mp3-reader.ts +0 -34
  218. package/src/mp3/mp3-writer.ts +0 -120
  219. package/src/muxer.ts +0 -88
  220. package/src/node.ts +0 -11
  221. package/src/ogg/ogg-demuxer.ts +0 -1053
  222. package/src/ogg/ogg-misc.ts +0 -116
  223. package/src/ogg/ogg-muxer.ts +0 -497
  224. package/src/ogg/ogg-reader.ts +0 -93
  225. package/src/output-format.ts +0 -945
  226. package/src/output.ts +0 -488
  227. package/src/packet.ts +0 -263
  228. package/src/pcm.ts +0 -112
  229. package/src/reader.ts +0 -323
  230. package/src/sample.ts +0 -1461
  231. package/src/source.ts +0 -1688
  232. package/src/subtitles.ts +0 -711
  233. package/src/target.ts +0 -204
  234. package/src/tsconfig.json +0 -16
  235. package/src/wave/riff-writer.ts +0 -36
  236. package/src/wave/wave-demuxer.ts +0 -529
  237. package/src/wave/wave-muxer.ts +0 -371
  238. package/src/writer.ts +0 -490
@@ -1,1182 +0,0 @@
1
- /*!
2
- * Copyright (c) 2025-present, Vanilagy and contributors
3
- *
4
- * This Source Code Form is subject to the terms of the Mozilla Public
5
- * License, v. 2.0. If a copy of the MPL was not distributed with this
6
- * file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
- */
8
- import { assert, binarySearchLessOrEqual, closedIntervalsOverlap, isNumber, isWebKit, mergeRequestInit, promiseWithResolvers, retriedFetch, toDataView, toUint8Array, } from './misc.js';
9
- import * as nodeAlias from './node.js';
10
- import { InputDisposedError } from './input.js';
11
- const node = typeof nodeAlias !== 'undefined'
12
- ? nodeAlias // Aliasing it prevents some bundler warnings
13
- : undefined;
14
- /**
15
- * The source base class, representing a resource from which bytes can be read.
16
- * @group Input sources
17
- * @public
18
- */
19
- export class Source {
20
- constructor() {
21
- /** @internal */
22
- this._disposed = false;
23
- /** @internal */
24
- this._sizePromise = null;
25
- /** Called each time data is retrieved from the source. Will be called with the retrieved range (end exclusive). */
26
- this.onread = null;
27
- }
28
- /**
29
- * Resolves with the total size of the file in bytes. This function is memoized, meaning only the first call
30
- * will retrieve the size.
31
- *
32
- * Returns null if the source is unsized.
33
- */
34
- async getSizeOrNull() {
35
- if (this._disposed) {
36
- throw new InputDisposedError();
37
- }
38
- return this._sizePromise ??= Promise.resolve(this._retrieveSize());
39
- }
40
- /**
41
- * Resolves with the total size of the file in bytes. This function is memoized, meaning only the first call
42
- * will retrieve the size.
43
- *
44
- * Throws an error if the source is unsized.
45
- */
46
- async getSize() {
47
- if (this._disposed) {
48
- throw new InputDisposedError();
49
- }
50
- const result = await this.getSizeOrNull();
51
- if (result === null) {
52
- throw new Error('Cannot determine the size of an unsized source.');
53
- }
54
- return result;
55
- }
56
- }
57
- /**
58
- * A source backed by an ArrayBuffer or ArrayBufferView, with the entire file held in memory.
59
- * @group Input sources
60
- * @public
61
- */
62
- export class BufferSource extends Source {
63
- /** Creates a new {@link BufferSource} backed the specified `ArrayBuffer` or `ArrayBufferView`. */
64
- constructor(buffer) {
65
- if (!(buffer instanceof ArrayBuffer) && !ArrayBuffer.isView(buffer)) {
66
- throw new TypeError('buffer must be an ArrayBuffer or ArrayBufferView.');
67
- }
68
- super();
69
- /** @internal */
70
- this._onreadCalled = false;
71
- this._bytes = toUint8Array(buffer);
72
- this._view = toDataView(buffer);
73
- }
74
- /** @internal */
75
- _retrieveSize() {
76
- return this._bytes.byteLength;
77
- }
78
- /** @internal */
79
- _read() {
80
- if (!this._onreadCalled) {
81
- // We just say the first read retrives all bytes from the source (which, I mean, it does)
82
- this.onread?.(0, this._bytes.byteLength);
83
- this._onreadCalled = true;
84
- }
85
- return {
86
- bytes: this._bytes,
87
- view: this._view,
88
- offset: 0,
89
- };
90
- }
91
- /** @internal */
92
- _dispose() { }
93
- }
94
- /**
95
- * A source backed by a [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob). Since a
96
- * [`File`](https://developer.mozilla.org/en-US/docs/Web/API/File) is also a `Blob`, this is the source to use when
97
- * reading files off the disk.
98
- * @group Input sources
99
- * @public
100
- */
101
- export class BlobSource extends Source {
102
- /**
103
- * Creates a new {@link BlobSource} backed by the specified
104
- * [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob).
105
- */
106
- constructor(blob, options = {}) {
107
- if (!(blob instanceof Blob)) {
108
- throw new TypeError('blob must be a Blob.');
109
- }
110
- if (!options || typeof options !== 'object') {
111
- throw new TypeError('options must be an object.');
112
- }
113
- if (options.maxCacheSize !== undefined
114
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)) {
115
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
116
- }
117
- super();
118
- /** @internal */
119
- this._readers = new WeakMap();
120
- this._blob = blob;
121
- this._orchestrator = new ReadOrchestrator({
122
- maxCacheSize: options.maxCacheSize ?? (8 * 2 ** 20 /* 8 MiB */),
123
- maxWorkerCount: 4,
124
- runWorker: this._runWorker.bind(this),
125
- prefetchProfile: PREFETCH_PROFILES.fileSystem,
126
- });
127
- }
128
- /** @internal */
129
- _retrieveSize() {
130
- const size = this._blob.size;
131
- this._orchestrator.fileSize = size;
132
- return size;
133
- }
134
- /** @internal */
135
- _read(start, end) {
136
- return this._orchestrator.read(start, end);
137
- }
138
- /** @internal */
139
- async _runWorker(worker) {
140
- let reader = this._readers.get(worker);
141
- if (reader === undefined) {
142
- // https://github.com/Vanilagy/mediabunny/issues/184
143
- // WebKit has critical bugs with blob.stream():
144
- // - WebKitBlobResource error 1 when streaming large files
145
- // - Memory buildup and reload loops on iOS (network process crashes)
146
- // - ReadableStream stalls under backpressure (especially video)
147
- // Affects Safari and all iOS browsers (Chrome, Firefox, etc.).
148
- // Use arrayBuffer() fallback for WebKit browsers.
149
- if ('stream' in this._blob && !isWebKit()) {
150
- // Get a reader of the blob starting at the required offset, and then keep it around
151
- const slice = this._blob.slice(worker.currentPos);
152
- reader = slice.stream().getReader();
153
- }
154
- else {
155
- // We'll need to use more primitive ways
156
- reader = null;
157
- }
158
- this._readers.set(worker, reader);
159
- }
160
- while (worker.currentPos < worker.targetPos && !worker.aborted) {
161
- if (reader) {
162
- const { done, value } = await reader.read();
163
- if (done) {
164
- this._orchestrator.forgetWorker(worker);
165
- if (worker.currentPos < worker.targetPos) { // I think this `if` should always hit?
166
- throw new Error('Blob reader stopped unexpectedly before all requested data was read.');
167
- }
168
- break;
169
- }
170
- if (worker.aborted) {
171
- break;
172
- }
173
- this.onread?.(worker.currentPos, worker.currentPos + value.length);
174
- this._orchestrator.supplyWorkerData(worker, value);
175
- }
176
- else {
177
- const data = await this._blob.slice(worker.currentPos, worker.targetPos).arrayBuffer();
178
- if (worker.aborted) {
179
- break;
180
- }
181
- this.onread?.(worker.currentPos, worker.currentPos + data.byteLength);
182
- this._orchestrator.supplyWorkerData(worker, new Uint8Array(data));
183
- }
184
- }
185
- worker.running = false;
186
- }
187
- /** @internal */
188
- _dispose() {
189
- this._orchestrator.dispose();
190
- }
191
- }
192
- const URL_SOURCE_MIN_LOAD_AMOUNT = /* #__PURE__ */ 0.5 * 2 ** 20; // 0.5 MiB
193
- const DEFAULT_RETRY_DELAY = ((previousAttempts, error, src) => {
194
- // Check if this could be a CORS error. If so, we cannot recover from it and
195
- // should not attempt to retry.
196
- // CORS errors are intentionally not opaque, so we need to rely on heuristics.
197
- const couldBeCorsError = error instanceof Error && (error.message.includes('Failed to fetch') // Chrome
198
- || error.message.includes('Load failed') // Safari
199
- || error.message.includes('NetworkError when attempting to fetch resource') // Firefox
200
- );
201
- if (couldBeCorsError) {
202
- let originOfSrc = null;
203
- // Checking if the origin is different, because only then a CORS error could originate
204
- try {
205
- if (typeof window !== 'undefined' && typeof window.location !== 'undefined') {
206
- originOfSrc = new URL(src instanceof Request ? src.url : src, window.location.href).origin;
207
- }
208
- }
209
- catch {
210
- // URL parse failed
211
- }
212
- // If user is offline, it is probably not a CORS error.
213
- const isOnline = typeof navigator !== 'undefined' && typeof navigator.onLine === 'boolean' ? navigator.onLine : true;
214
- if (isOnline && originOfSrc !== null && originOfSrc !== window.location.origin) {
215
- return null;
216
- }
217
- }
218
- return Math.min(2 ** (previousAttempts - 2), 16);
219
- });
220
- /**
221
- * A source backed by a URL. This is useful for reading data from the network. Requests will be made using an optimized
222
- * reading and prefetching pattern to minimize request count and latency.
223
- * @group Input sources
224
- * @public
225
- */
226
- export class UrlSource extends Source {
227
- /** Creates a new {@link UrlSource} backed by the resource at the specified URL. */
228
- constructor(url, options = {}) {
229
- if (typeof url !== 'string'
230
- && !(url instanceof URL)
231
- && !(typeof Request !== 'undefined' && url instanceof Request)) {
232
- throw new TypeError('url must be a string, URL or Request.');
233
- }
234
- if (!options || typeof options !== 'object') {
235
- throw new TypeError('options must be an object.');
236
- }
237
- if (options.requestInit !== undefined && (!options.requestInit || typeof options.requestInit !== 'object')) {
238
- throw new TypeError('options.requestInit, when provided, must be an object.');
239
- }
240
- if (options.getRetryDelay !== undefined && typeof options.getRetryDelay !== 'function') {
241
- throw new TypeError('options.getRetryDelay, when provided, must be a function.');
242
- }
243
- if (options.maxCacheSize !== undefined
244
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)) {
245
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
246
- }
247
- if (options.fetchFn !== undefined && typeof options.fetchFn !== 'function') {
248
- throw new TypeError('options.fetchFn, when provided, must be a function.');
249
- // Won't bother validating this function beyond this
250
- }
251
- super();
252
- /** @internal */
253
- this._existingResponses = new WeakMap();
254
- this._url = url;
255
- this._options = options;
256
- this._getRetryDelay = options.getRetryDelay ?? DEFAULT_RETRY_DELAY;
257
- this._orchestrator = new ReadOrchestrator({
258
- maxCacheSize: options.maxCacheSize ?? (64 * 2 ** 20 /* 64 MiB */),
259
- // Most files in the real-world have a single sequential access pattern, but having two in parallel can
260
- // also happen
261
- maxWorkerCount: 2,
262
- runWorker: this._runWorker.bind(this),
263
- prefetchProfile: PREFETCH_PROFILES.network,
264
- });
265
- }
266
- /** @internal */
267
- async _retrieveSize() {
268
- // Retrieving the resource size for UrlSource is optimized: Almost always (= always), the first bytes we have to
269
- // read are the start of the file. This means it's smart to combine size fetching with fetching the start of the
270
- // file. We additionally use this step to probe if the server supports range requests, killing three birds with
271
- // one stone.
272
- const abortController = new AbortController();
273
- const response = await retriedFetch(this._options.fetchFn ?? fetch, this._url, mergeRequestInit(this._options.requestInit ?? {}, {
274
- headers: {
275
- // We could also send a non-range request to request the same bytes (all of them), but doing it like
276
- // this is an easy way to check if the server supports range requests in the first place
277
- Range: 'bytes=0-',
278
- },
279
- signal: abortController.signal,
280
- }), this._getRetryDelay);
281
- if (!response.ok) {
282
- // eslint-disable-next-line @typescript-eslint/no-base-to-string
283
- throw new Error(`Error fetching ${String(this._url)}: ${response.status} ${response.statusText}`);
284
- }
285
- let worker;
286
- let fileSize;
287
- if (response.status === 206) {
288
- fileSize = this._getPartialLengthFromRangeResponse(response);
289
- worker = this._orchestrator.createWorker(0, Math.min(fileSize, URL_SOURCE_MIN_LOAD_AMOUNT));
290
- }
291
- else {
292
- // Server probably returned a 200.
293
- const contentLength = response.headers.get('Content-Length');
294
- if (contentLength) {
295
- fileSize = Number(contentLength);
296
- worker = this._orchestrator.createWorker(0, fileSize);
297
- this._orchestrator.options.maxCacheSize = Infinity; // 🤷
298
- console.warn('HTTP server did not respond with 206 Partial Content, meaning the entire remote resource now has'
299
- + ' to be downloaded. For efficient media file streaming across a network, please make sure your'
300
- + ' server supports range requests.');
301
- }
302
- else {
303
- throw new Error(`HTTP response (status ${response.status}) must surface Content-Length header.`);
304
- }
305
- }
306
- this._orchestrator.fileSize = fileSize;
307
- this._existingResponses.set(worker, { response, abortController });
308
- this._orchestrator.runWorker(worker);
309
- return fileSize;
310
- }
311
- /** @internal */
312
- _read(start, end) {
313
- return this._orchestrator.read(start, end);
314
- }
315
- /** @internal */
316
- async _runWorker(worker) {
317
- // The outer loop is for resuming a request if it dies mid-response
318
- while (true) {
319
- const existing = this._existingResponses.get(worker);
320
- this._existingResponses.delete(worker);
321
- let abortController = existing?.abortController;
322
- let response = existing?.response;
323
- if (!abortController) {
324
- abortController = new AbortController();
325
- response = await retriedFetch(this._options.fetchFn ?? fetch, this._url, mergeRequestInit(this._options.requestInit ?? {}, {
326
- headers: {
327
- Range: `bytes=${worker.currentPos}-`,
328
- },
329
- signal: abortController.signal,
330
- }), this._getRetryDelay);
331
- }
332
- assert(response);
333
- if (!response.ok) {
334
- // eslint-disable-next-line @typescript-eslint/no-base-to-string
335
- throw new Error(`Error fetching ${String(this._url)}: ${response.status} ${response.statusText}`);
336
- }
337
- if (worker.currentPos > 0 && response.status !== 206) {
338
- throw new Error('HTTP server did not respond with 206 Partial Content to a range request. To enable efficient media'
339
- + ' file streaming across a network, please make sure your server supports range requests.');
340
- }
341
- const length = this._getPartialLengthFromRangeResponse(response);
342
- const required = worker.targetPos - worker.currentPos;
343
- if (length < required) {
344
- throw new Error(`HTTP response unexpectedly too short: Needed at least ${required} bytes, got only ${length}.`);
345
- }
346
- if (!response.body) {
347
- throw new Error('Missing HTTP response body stream. The used fetch function must provide the response body as a'
348
- + ' ReadableStream.');
349
- }
350
- const reader = response.body.getReader();
351
- while (true) {
352
- if (worker.currentPos >= worker.targetPos || worker.aborted) {
353
- abortController.abort();
354
- worker.running = false;
355
- return;
356
- }
357
- let readResult;
358
- try {
359
- readResult = await reader.read();
360
- }
361
- catch (error) {
362
- const retryDelayInSeconds = this._getRetryDelay(1, error, this._url);
363
- if (retryDelayInSeconds !== null) {
364
- console.error('Error while reading response stream. Attempting to resume.', error);
365
- await new Promise(resolve => setTimeout(resolve, 1000 * retryDelayInSeconds));
366
- break;
367
- }
368
- else {
369
- throw error;
370
- }
371
- }
372
- if (worker.aborted) {
373
- break;
374
- }
375
- const { done, value } = readResult;
376
- if (done) {
377
- this._orchestrator.forgetWorker(worker);
378
- if (worker.currentPos < worker.targetPos) {
379
- throw new Error('Response stream reader stopped unexpectedly before all requested data was read.');
380
- }
381
- worker.running = false;
382
- return;
383
- }
384
- this.onread?.(worker.currentPos, worker.currentPos + value.length);
385
- this._orchestrator.supplyWorkerData(worker, value);
386
- }
387
- if (worker.aborted) {
388
- break;
389
- }
390
- }
391
- worker.running = false;
392
- // The previous UrlSource had logic for circumventing https://issues.chromium.org/issues/436025873; I haven't
393
- // been able to observe this bug with the new UrlSource (maybe because we're using response streaming), so the
394
- // logic for that has vanished for now. Leaving a comment here if this becomes relevant again.
395
- }
396
- /** @internal */
397
- _getPartialLengthFromRangeResponse(response) {
398
- const contentRange = response.headers.get('Content-Range');
399
- if (contentRange) {
400
- const match = /\/(\d+)/.exec(contentRange);
401
- if (match) {
402
- return Number(match[1]);
403
- }
404
- else {
405
- throw new Error(`Invalid Content-Range header: ${contentRange}`);
406
- }
407
- }
408
- else {
409
- const contentLength = response.headers.get('Content-Length');
410
- if (contentLength) {
411
- return Number(contentLength);
412
- }
413
- else {
414
- throw new Error('Partial HTTP response (status 206) must surface either Content-Range or'
415
- + ' Content-Length header.');
416
- }
417
- }
418
- }
419
- /** @internal */
420
- _dispose() {
421
- this._orchestrator.dispose();
422
- }
423
- }
424
- /**
425
- * A source backed by a path to a file. Intended for server-side usage in Node, Bun, or Deno.
426
- *
427
- * Make sure to call `.dispose()` on the corresponding {@link Input} when done to explicitly free the internal file
428
- * handle acquired by this source.
429
- * @group Input sources
430
- * @public
431
- */
432
- export class FilePathSource extends Source {
433
- /** Creates a new {@link FilePathSource} backed by the file at the specified file path. */
434
- constructor(filePath, options = {}) {
435
- if (typeof filePath !== 'string') {
436
- throw new TypeError('filePath must be a string.');
437
- }
438
- if (!options || typeof options !== 'object') {
439
- throw new TypeError('options must be an object.');
440
- }
441
- if (options.maxCacheSize !== undefined
442
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)) {
443
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
444
- }
445
- super();
446
- /** @internal */
447
- this._fileHandle = null;
448
- // Let's back this source with a StreamSource, makes the implementation very simple
449
- this._streamSource = new StreamSource({
450
- getSize: async () => {
451
- this._fileHandle = await node.fs.open(filePath, 'r');
452
- const stats = await this._fileHandle.stat();
453
- return stats.size;
454
- },
455
- read: async (start, end) => {
456
- assert(this._fileHandle);
457
- const buffer = new Uint8Array(end - start);
458
- await this._fileHandle.read(buffer, 0, end - start, start);
459
- return buffer;
460
- },
461
- maxCacheSize: options.maxCacheSize,
462
- prefetchProfile: 'fileSystem',
463
- });
464
- }
465
- /** @internal */
466
- _read(start, end) {
467
- return this._streamSource._read(start, end);
468
- }
469
- /** @internal */
470
- _retrieveSize() {
471
- return this._streamSource._retrieveSize();
472
- }
473
- /** @internal */
474
- _dispose() {
475
- this._streamSource._dispose();
476
- void this._fileHandle?.close();
477
- this._fileHandle = null;
478
- }
479
- }
480
- /**
481
- * A general-purpose, callback-driven source that can get its data from anywhere.
482
- * @group Input sources
483
- * @public
484
- */
485
- export class StreamSource extends Source {
486
- /** Creates a new {@link StreamSource} whose behavior is specified by `options`. */
487
- constructor(options) {
488
- if (!options || typeof options !== 'object') {
489
- throw new TypeError('options must be an object.');
490
- }
491
- if (typeof options.getSize !== 'function') {
492
- throw new TypeError('options.getSize must be a function.');
493
- }
494
- if (typeof options.read !== 'function') {
495
- throw new TypeError('options.read must be a function.');
496
- }
497
- if (options.dispose !== undefined && typeof options.dispose !== 'function') {
498
- throw new TypeError('options.dispose, when provided, must be a function.');
499
- }
500
- if (options.maxCacheSize !== undefined
501
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)) {
502
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
503
- }
504
- if (options.prefetchProfile && !['none', 'fileSystem', 'network'].includes(options.prefetchProfile)) {
505
- throw new TypeError('options.prefetchProfile, when provided, must be one of \'none\', \'fileSystem\' or \'network\'.');
506
- }
507
- super();
508
- this._options = options;
509
- this._orchestrator = new ReadOrchestrator({
510
- maxCacheSize: options.maxCacheSize ?? (8 * 2 ** 20 /* 8 MiB */),
511
- maxWorkerCount: 2, // Fixed for now, *should* be fine
512
- prefetchProfile: PREFETCH_PROFILES[options.prefetchProfile ?? 'none'],
513
- runWorker: this._runWorker.bind(this),
514
- });
515
- }
516
- /** @internal */
517
- _retrieveSize() {
518
- const result = this._options.getSize();
519
- if (result instanceof Promise) {
520
- return result.then((size) => {
521
- if (!Number.isInteger(size) || size < 0) {
522
- throw new TypeError('options.getSize must return or resolve to a non-negative integer.');
523
- }
524
- this._orchestrator.fileSize = size;
525
- return size;
526
- });
527
- }
528
- else {
529
- if (!Number.isInteger(result) || result < 0) {
530
- throw new TypeError('options.getSize must return or resolve to a non-negative integer.');
531
- }
532
- this._orchestrator.fileSize = result;
533
- return result;
534
- }
535
- }
536
- /** @internal */
537
- _read(start, end) {
538
- return this._orchestrator.read(start, end);
539
- }
540
- /** @internal */
541
- async _runWorker(worker) {
542
- while (worker.currentPos < worker.targetPos && !worker.aborted) {
543
- const originalCurrentPos = worker.currentPos;
544
- const originalTargetPos = worker.targetPos;
545
- let data = this._options.read(worker.currentPos, originalTargetPos);
546
- if (data instanceof Promise)
547
- data = await data;
548
- if (worker.aborted) {
549
- break;
550
- }
551
- if (data instanceof Uint8Array) {
552
- data = toUint8Array(data); // Normalize things like Node.js Buffer to Uint8Array
553
- if (data.length !== originalTargetPos - worker.currentPos) {
554
- // Yes, we're that strict
555
- throw new Error(`options.read returned a Uint8Array with unexpected length: Requested ${originalTargetPos - worker.currentPos} bytes, but got ${data.length}.`);
556
- }
557
- this.onread?.(worker.currentPos, worker.currentPos + data.length);
558
- this._orchestrator.supplyWorkerData(worker, data);
559
- }
560
- else if (data instanceof ReadableStream) {
561
- const reader = data.getReader();
562
- while (worker.currentPos < originalTargetPos && !worker.aborted) {
563
- const { done, value } = await reader.read();
564
- if (done) {
565
- if (worker.currentPos < originalTargetPos) {
566
- // Yes, we're *that* strict
567
- throw new Error(`ReadableStream returned by options.read ended before supplying enough data.`
568
- + ` Requested ${originalTargetPos - originalCurrentPos} bytes, but got ${worker.currentPos - originalCurrentPos}`);
569
- }
570
- break;
571
- }
572
- if (!(value instanceof Uint8Array)) {
573
- throw new TypeError('ReadableStream returned by options.read must yield Uint8Array chunks.');
574
- }
575
- if (worker.aborted) {
576
- break;
577
- }
578
- const data = toUint8Array(value); // Normalize things like Node.js Buffer to Uint8Array
579
- this.onread?.(worker.currentPos, worker.currentPos + data.length);
580
- this._orchestrator.supplyWorkerData(worker, data);
581
- }
582
- }
583
- else {
584
- throw new TypeError('options.read must return or resolve to a Uint8Array or a ReadableStream.');
585
- }
586
- }
587
- worker.running = false;
588
- }
589
- /** @internal */
590
- _dispose() {
591
- this._orchestrator.dispose();
592
- this._options.dispose?.();
593
- }
594
- }
595
- /**
596
- * A source backed by a [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) of
597
- * `Uint8Array`, representing an append-only byte stream of unknown length. This is the source to use for incrementally
598
- * streaming in input files that are still being constructed and whose size we don't yet know, like for example the
599
- * output chunks of [MediaRecorder](https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder).
600
- *
601
- * This source is *unsized*, meaning calls to `.getSize()` will throw and readers are more limited due to the
602
- * lack of random file access. You should only use this source with sequential access patterns, such as reading all
603
- * packets from start to end. This source does not work well with random access patterns unless you increase its
604
- * max cache size.
605
- *
606
- * @group Input sources
607
- * @public
608
- */
609
- export class ReadableStreamSource extends Source {
610
- /** Creates a new {@link ReadableStreamSource} backed by the specified `ReadableStream<Uint8Array>`. */
611
- constructor(stream, options = {}) {
612
- if (!(stream instanceof ReadableStream)) {
613
- throw new TypeError('stream must be a ReadableStream.');
614
- }
615
- if (!options || typeof options !== 'object') {
616
- throw new TypeError('options must be an object.');
617
- }
618
- if (options.maxCacheSize !== undefined
619
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)) {
620
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
621
- }
622
- super();
623
- /** @internal */
624
- this._reader = null;
625
- /** @internal */
626
- this._cache = [];
627
- /** @internal */
628
- this._pendingSlices = [];
629
- /** @internal */
630
- this._currentIndex = 0;
631
- /** @internal */
632
- this._targetIndex = 0;
633
- /** @internal */
634
- this._maxRequestedIndex = 0;
635
- /** @internal */
636
- this._endIndex = null;
637
- /** @internal */
638
- this._pulling = false;
639
- this._stream = stream;
640
- this._maxCacheSize = options.maxCacheSize ?? (16 * 2 ** 20 /* 16 MiB */);
641
- }
642
- /** @internal */
643
- _retrieveSize() {
644
- return this._endIndex; // Starts out as null, meaning this source is unsized
645
- }
646
- /** @internal */
647
- _read(start, end) {
648
- if (this._endIndex !== null && end > this._endIndex) {
649
- return null;
650
- }
651
- this._maxRequestedIndex = Math.max(this._maxRequestedIndex, end);
652
- const cacheStartIndex = binarySearchLessOrEqual(this._cache, start, x => x.start);
653
- const cacheStartEntry = cacheStartIndex !== -1 ? this._cache[cacheStartIndex] : null;
654
- if (cacheStartEntry && cacheStartEntry.start <= start && end <= cacheStartEntry.end) {
655
- // The request can be satisfied with a single cache entry
656
- return {
657
- bytes: cacheStartEntry.bytes,
658
- view: cacheStartEntry.view,
659
- offset: cacheStartEntry.start,
660
- };
661
- }
662
- let lastEnd = start;
663
- const bytes = new Uint8Array(end - start);
664
- if (cacheStartIndex !== -1) {
665
- // Walk over the cache to see if we can satisfy the request using multiple cache entries
666
- for (let i = cacheStartIndex; i < this._cache.length; i++) {
667
- const cacheEntry = this._cache[i];
668
- if (cacheEntry.start >= end) {
669
- break;
670
- }
671
- const cappedStart = Math.max(start, cacheEntry.start);
672
- if (cappedStart > lastEnd) {
673
- // We're too far behind
674
- this._throwDueToCacheMiss();
675
- }
676
- const cappedEnd = Math.min(end, cacheEntry.end);
677
- if (cappedStart < cappedEnd) {
678
- bytes.set(cacheEntry.bytes.subarray(cappedStart - cacheEntry.start, cappedEnd - cacheEntry.start), cappedStart - start);
679
- lastEnd = cappedEnd;
680
- }
681
- }
682
- }
683
- if (lastEnd === end) {
684
- return {
685
- bytes,
686
- view: toDataView(bytes),
687
- offset: start,
688
- };
689
- }
690
- // We need to pull more data
691
- if (this._currentIndex > lastEnd) {
692
- // We're too far behind
693
- this._throwDueToCacheMiss();
694
- }
695
- const { promise, resolve, reject } = promiseWithResolvers();
696
- this._pendingSlices.push({
697
- start,
698
- end,
699
- bytes,
700
- resolve,
701
- reject,
702
- });
703
- this._targetIndex = Math.max(this._targetIndex, end);
704
- // Start pulling from the stream if we're not already doing it
705
- if (!this._pulling) {
706
- this._pulling = true;
707
- void this._pull()
708
- .catch((error) => {
709
- this._pulling = false;
710
- if (this._pendingSlices.length > 0) {
711
- this._pendingSlices.forEach(x => x.reject(error)); // Make sure to propagate any errors
712
- this._pendingSlices.length = 0;
713
- }
714
- else {
715
- throw error; // So it doesn't get swallowed
716
- }
717
- });
718
- }
719
- return promise;
720
- }
721
- /** @internal */
722
- _throwDueToCacheMiss() {
723
- throw new Error('Read is before the cached region. With ReadableStreamSource, you must access the data more'
724
- + ' sequentially or increase the size of its cache.');
725
- }
726
- /** @internal */
727
- async _pull() {
728
- this._reader ??= this._stream.getReader();
729
- // This is the loop that keeps pulling data from the stream until a target index is reached, filling requests
730
- // in the process
731
- while (this._currentIndex < this._targetIndex && !this._disposed) {
732
- const { done, value } = await this._reader.read();
733
- if (done) {
734
- for (const pendingSlice of this._pendingSlices) {
735
- pendingSlice.resolve(null);
736
- }
737
- this._pendingSlices.length = 0;
738
- this._endIndex = this._currentIndex; // We know how long the file is now!
739
- break;
740
- }
741
- const startIndex = this._currentIndex;
742
- const endIndex = this._currentIndex + value.byteLength;
743
- // Fill the pending slices with the data
744
- for (let i = 0; i < this._pendingSlices.length; i++) {
745
- const pendingSlice = this._pendingSlices[i];
746
- const cappedStart = Math.max(startIndex, pendingSlice.start);
747
- const cappedEnd = Math.min(endIndex, pendingSlice.end);
748
- if (cappedStart < cappedEnd) {
749
- pendingSlice.bytes.set(value.subarray(cappedStart - startIndex, cappedEnd - startIndex), cappedStart - pendingSlice.start);
750
- if (cappedEnd === pendingSlice.end) {
751
- // Pending slice fully filled
752
- pendingSlice.resolve({
753
- bytes: pendingSlice.bytes,
754
- view: toDataView(pendingSlice.bytes),
755
- offset: pendingSlice.start,
756
- });
757
- this._pendingSlices.splice(i, 1);
758
- i--;
759
- }
760
- }
761
- }
762
- this._cache.push({
763
- start: startIndex,
764
- end: endIndex,
765
- bytes: value,
766
- view: toDataView(value),
767
- age: 0, // Unused
768
- });
769
- // Do cache eviction, based on the distance from the last-requested index. It's important that we do it like
770
- // this and not based on where the reader is at, because if the reader is fast, we'll unnecessarily evict
771
- // data that we still might need.
772
- while (this._cache.length > 0) {
773
- const firstEntry = this._cache[0];
774
- const distance = this._maxRequestedIndex - firstEntry.end;
775
- if (distance <= this._maxCacheSize) {
776
- break;
777
- }
778
- this._cache.shift();
779
- }
780
- this._currentIndex += value.byteLength;
781
- }
782
- this._pulling = false;
783
- }
784
- /** @internal */
785
- _dispose() {
786
- this._pendingSlices.length = 0;
787
- this._cache.length = 0;
788
- }
789
- }
790
- const PREFETCH_PROFILES = {
791
- none: (start, end) => ({ start, end }),
792
- fileSystem: (start, end) => {
793
- const padding = 2 ** 16;
794
- start = Math.floor((start - padding) / padding) * padding;
795
- end = Math.ceil((end + padding) / padding) * padding;
796
- return { start, end };
797
- },
798
- network: (start, end, workers) => {
799
- // Add a slight bit of start padding because backwards reading is painful
800
- const paddingStart = 2 ** 16;
801
- start = Math.max(0, Math.floor((start - paddingStart) / paddingStart) * paddingStart);
802
- // Remote resources have extreme latency (relatively speaking), so the benefit from intelligent
803
- // prefetching is great. The network prefetch strategy is as follows: When we notice
804
- // successive reads to a worker's read region, we prefetch more data at the end of that region,
805
- // growing exponentially (up to a cap). This performs well for real-world use cases: Either we read a
806
- // small part of the file once and then never need it again, in which case the requested about of data
807
- // is small. Or, we're repeatedly doing a sequential access pattern (common in media files), in which
808
- // case we can become more and more confident to prefetch more and more data.
809
- for (const worker of workers) {
810
- const maxExtensionAmount = 8 * 2 ** 20; // 8 MiB
811
- // When the read region cross the threshold point, we trigger a prefetch. This point is typically
812
- // in the middle of the worker's read region, or a fixed offset from the end if the region has grown
813
- // really large.
814
- const thresholdPoint = Math.max((worker.startPos + worker.targetPos) / 2, worker.targetPos - maxExtensionAmount);
815
- if (closedIntervalsOverlap(start, end, thresholdPoint, worker.targetPos)) {
816
- const size = worker.targetPos - worker.startPos;
817
- // If we extend by maxExtensionAmount
818
- const a = Math.ceil((size + 1) / maxExtensionAmount) * maxExtensionAmount;
819
- // If we extend to the next power of 2
820
- const b = 2 ** Math.ceil(Math.log2(size + 1));
821
- const extent = Math.min(b, a);
822
- end = Math.max(end, worker.startPos + extent);
823
- }
824
- }
825
- end = Math.max(end, start + URL_SOURCE_MIN_LOAD_AMOUNT);
826
- return {
827
- start,
828
- end,
829
- };
830
- },
831
- };
832
- /**
833
- * Godclass for orchestrating complex, cached read operations. The reading model is as follows: Any reading task is
834
- * delegated to a *worker*, which is a sequential reader positioned somewhere along the file. All workers run in
835
- * parallel and can be stopped and resumed in their forward movement. When read requests come in, this orchestrator will
836
- * first try to satisfy the request with only the cached data. If this isn't possible, workers are spun up for all
837
- * missing parts (or existing workers are repurposed), and these workers will then fill the holes in the data as they
838
- * march along the file.
839
- */
840
- class ReadOrchestrator {
841
- constructor(options) {
842
- this.options = options;
843
- this.fileSize = null;
844
- this.nextAge = 0; // Used for LRU eviction of both cache entries and workers
845
- this.workers = [];
846
- this.cache = [];
847
- this.currentCacheSize = 0;
848
- this.disposed = false;
849
- }
850
- read(innerStart, innerEnd) {
851
- assert(this.fileSize !== null);
852
- const prefetchRange = this.options.prefetchProfile(innerStart, innerEnd, this.workers);
853
- const outerStart = Math.max(prefetchRange.start, 0);
854
- const outerEnd = Math.min(prefetchRange.end, this.fileSize);
855
- assert(outerStart <= innerStart && innerEnd <= outerEnd);
856
- let result = null;
857
- const innerCacheStartIndex = binarySearchLessOrEqual(this.cache, innerStart, x => x.start);
858
- const innerStartEntry = innerCacheStartIndex !== -1 ? this.cache[innerCacheStartIndex] : null;
859
- // See if the read request can be satisfied by a single cache entry
860
- if (innerStartEntry && innerStartEntry.start <= innerStart && innerEnd <= innerStartEntry.end) {
861
- innerStartEntry.age = this.nextAge++;
862
- result = {
863
- bytes: innerStartEntry.bytes,
864
- view: innerStartEntry.view,
865
- offset: innerStartEntry.start,
866
- };
867
- // Can't return yet though, still need to check if the prefetch range might lie outside the cached area
868
- }
869
- const outerCacheStartIndex = binarySearchLessOrEqual(this.cache, outerStart, x => x.start);
870
- const bytes = result ? null : new Uint8Array(innerEnd - innerStart);
871
- let contiguousBytesWriteEnd = 0; // Used to track if the cache is able to completely cover the bytes
872
- let lastEnd = outerStart;
873
- // The "holes" in the cache (the parts we need to load)
874
- const outerHoles = [];
875
- // Loop over the cache and build up the list of holes
876
- if (outerCacheStartIndex !== -1) {
877
- for (let i = outerCacheStartIndex; i < this.cache.length; i++) {
878
- const entry = this.cache[i];
879
- if (entry.start >= outerEnd) {
880
- break;
881
- }
882
- if (entry.end <= outerStart) {
883
- continue;
884
- }
885
- const cappedOuterStart = Math.max(outerStart, entry.start);
886
- const cappedOuterEnd = Math.min(outerEnd, entry.end);
887
- assert(cappedOuterStart <= cappedOuterEnd);
888
- if (lastEnd < cappedOuterStart) {
889
- outerHoles.push({ start: lastEnd, end: cappedOuterStart });
890
- }
891
- lastEnd = cappedOuterEnd;
892
- if (bytes) {
893
- const cappedInnerStart = Math.max(innerStart, entry.start);
894
- const cappedInnerEnd = Math.min(innerEnd, entry.end);
895
- if (cappedInnerStart < cappedInnerEnd) {
896
- const relativeOffset = cappedInnerStart - innerStart;
897
- // Fill the relevant section of the bytes with the cached data
898
- bytes.set(entry.bytes.subarray(cappedInnerStart - entry.start, cappedInnerEnd - entry.start), relativeOffset);
899
- if (relativeOffset === contiguousBytesWriteEnd) {
900
- contiguousBytesWriteEnd = cappedInnerEnd - innerStart;
901
- }
902
- }
903
- }
904
- entry.age = this.nextAge++;
905
- }
906
- if (lastEnd < outerEnd) {
907
- outerHoles.push({ start: lastEnd, end: outerEnd });
908
- }
909
- }
910
- else {
911
- outerHoles.push({ start: outerStart, end: outerEnd });
912
- }
913
- if (bytes && contiguousBytesWriteEnd >= bytes.length) {
914
- // Multiple cache entries were able to completely cover the requested bytes!
915
- result = {
916
- bytes,
917
- view: toDataView(bytes),
918
- offset: innerStart,
919
- };
920
- }
921
- if (outerHoles.length === 0) {
922
- assert(result);
923
- return result;
924
- }
925
- // We need to read more data, so now we're in async land
926
- const { promise, resolve, reject } = promiseWithResolvers();
927
- const innerHoles = [];
928
- for (const outerHole of outerHoles) {
929
- const cappedStart = Math.max(innerStart, outerHole.start);
930
- const cappedEnd = Math.min(innerEnd, outerHole.end);
931
- if (cappedStart === outerHole.start && cappedEnd === outerHole.end) {
932
- innerHoles.push(outerHole); // Can reuse without allocating a new object
933
- }
934
- else if (cappedStart < cappedEnd) {
935
- innerHoles.push({ start: cappedStart, end: cappedEnd });
936
- }
937
- }
938
- // Fire off workers to take care of patching the holes
939
- for (const outerHole of outerHoles) {
940
- const pendingSlice = bytes && {
941
- start: innerStart,
942
- bytes,
943
- holes: innerHoles,
944
- resolve,
945
- reject,
946
- };
947
- let workerFound = false;
948
- for (const worker of this.workers) {
949
- // A small tolerance in the case that the requested region is *just* after the target position of an
950
- // existing worker. In that case, it's probably more efficient to repurpose that worker than to spawn
951
- // another one so close to it
952
- const gapTolerance = 2 ** 17;
953
- // This check also implies worker.currentPos <= outerHole.start, a critical condition
954
- if (closedIntervalsOverlap(outerHole.start - gapTolerance, outerHole.start, worker.currentPos, worker.targetPos)) {
955
- worker.targetPos = Math.max(worker.targetPos, outerHole.end); // Update the worker's target position
956
- workerFound = true;
957
- if (pendingSlice && !worker.pendingSlices.includes(pendingSlice)) {
958
- worker.pendingSlices.push(pendingSlice);
959
- }
960
- if (!worker.running) {
961
- // Kick it off if it's idle
962
- this.runWorker(worker);
963
- }
964
- break;
965
- }
966
- }
967
- if (!workerFound) {
968
- // We need to spawn a new worker
969
- const newWorker = this.createWorker(outerHole.start, outerHole.end);
970
- if (pendingSlice) {
971
- newWorker.pendingSlices = [pendingSlice];
972
- }
973
- this.runWorker(newWorker);
974
- }
975
- }
976
- if (!result) {
977
- assert(bytes);
978
- result = promise.then(bytes => ({
979
- bytes,
980
- view: toDataView(bytes),
981
- offset: innerStart,
982
- }));
983
- }
984
- else {
985
- // The requested region was satisfied by the cache, but the entire prefetch region was not
986
- }
987
- return result;
988
- }
989
- createWorker(startPos, targetPos) {
990
- const worker = {
991
- startPos,
992
- currentPos: startPos,
993
- targetPos,
994
- running: false,
995
- // Due to async shenanigans, it can happen that workers are started after disposal. In this case, instead of
996
- // simply not creating the worker, we allow it to run but immediately label it as aborted, so it can then
997
- // shut itself down.
998
- aborted: this.disposed,
999
- pendingSlices: [],
1000
- age: this.nextAge++,
1001
- };
1002
- this.workers.push(worker);
1003
- // LRU eviction of the other workers
1004
- while (this.workers.length > this.options.maxWorkerCount) {
1005
- let oldestIndex = 0;
1006
- let oldestWorker = this.workers[0];
1007
- for (let i = 1; i < this.workers.length; i++) {
1008
- const worker = this.workers[i];
1009
- if (worker.age < oldestWorker.age) {
1010
- oldestIndex = i;
1011
- oldestWorker = worker;
1012
- }
1013
- }
1014
- if (oldestWorker.running && oldestWorker.pendingSlices.length > 0) {
1015
- break;
1016
- }
1017
- oldestWorker.aborted = true;
1018
- this.workers.splice(oldestIndex, 1);
1019
- }
1020
- return worker;
1021
- }
1022
- runWorker(worker) {
1023
- assert(!worker.running);
1024
- assert(worker.currentPos < worker.targetPos);
1025
- worker.running = true;
1026
- worker.age = this.nextAge++;
1027
- void this.options.runWorker(worker)
1028
- .catch((error) => {
1029
- worker.running = false;
1030
- if (worker.pendingSlices.length > 0) {
1031
- worker.pendingSlices.forEach(x => x.reject(error)); // Make sure to propagate any errors
1032
- worker.pendingSlices.length = 0;
1033
- }
1034
- else {
1035
- throw error; // So it doesn't get swallowed
1036
- }
1037
- });
1038
- }
1039
- /** Called by a worker when it has read some data. */
1040
- supplyWorkerData(worker, bytes) {
1041
- assert(!worker.aborted);
1042
- const start = worker.currentPos;
1043
- const end = start + bytes.length;
1044
- this.insertIntoCache({
1045
- start,
1046
- end,
1047
- bytes,
1048
- view: toDataView(bytes),
1049
- age: this.nextAge++,
1050
- });
1051
- worker.currentPos += bytes.length;
1052
- worker.targetPos = Math.max(worker.targetPos, worker.currentPos); // In case it overshoots
1053
- // Now, let's see if we can use the read bytes to fill any pending slice
1054
- for (let i = 0; i < worker.pendingSlices.length; i++) {
1055
- const pendingSlice = worker.pendingSlices[i];
1056
- const clampedStart = Math.max(start, pendingSlice.start);
1057
- const clampedEnd = Math.min(end, pendingSlice.start + pendingSlice.bytes.length);
1058
- if (clampedStart < clampedEnd) {
1059
- pendingSlice.bytes.set(bytes.subarray(clampedStart - start, clampedEnd - start), clampedStart - pendingSlice.start);
1060
- }
1061
- for (let j = 0; j < pendingSlice.holes.length; j++) {
1062
- // The hole is intentionally not modified here if the read section starts somewhere in the middle of
1063
- // the hole. We don't need to do "hole splitting", since the workers are spawned *by* the holes,
1064
- // meaning there's always a worker which will consume the hole left to right.
1065
- const hole = pendingSlice.holes[j];
1066
- if (start <= hole.start && end > hole.start) {
1067
- hole.start = end;
1068
- }
1069
- if (hole.end <= hole.start) {
1070
- pendingSlice.holes.splice(j, 1);
1071
- j--;
1072
- }
1073
- }
1074
- if (pendingSlice.holes.length === 0) {
1075
- // The slice has been fulfilled, everything has been read. Let's resolve the promise
1076
- pendingSlice.resolve(pendingSlice.bytes);
1077
- worker.pendingSlices.splice(i, 1);
1078
- i--;
1079
- }
1080
- }
1081
- // Remove other idle workers if we "ate" into their territory
1082
- for (let i = 0; i < this.workers.length; i++) {
1083
- const otherWorker = this.workers[i];
1084
- if (worker === otherWorker || otherWorker.running) {
1085
- continue;
1086
- }
1087
- if (closedIntervalsOverlap(start, end, otherWorker.currentPos, otherWorker.targetPos)) {
1088
- this.workers.splice(i, 1);
1089
- i--;
1090
- }
1091
- }
1092
- }
1093
- forgetWorker(worker) {
1094
- const index = this.workers.indexOf(worker);
1095
- assert(index !== -1);
1096
- this.workers.splice(index, 1);
1097
- }
1098
- insertIntoCache(entry) {
1099
- if (this.options.maxCacheSize === 0) {
1100
- return; // No caching
1101
- }
1102
- let insertionIndex = binarySearchLessOrEqual(this.cache, entry.start, x => x.start) + 1;
1103
- if (insertionIndex > 0) {
1104
- const previous = this.cache[insertionIndex - 1];
1105
- if (previous.end >= entry.end) {
1106
- // Previous entry swallows the one to be inserted; we don't need to do anything
1107
- return;
1108
- }
1109
- if (previous.end > entry.start) {
1110
- // Partial overlap with the previous entry, let's join
1111
- const joined = new Uint8Array(entry.end - previous.start);
1112
- joined.set(previous.bytes, 0);
1113
- joined.set(entry.bytes, entry.start - previous.start);
1114
- this.currentCacheSize += entry.end - previous.end;
1115
- previous.bytes = joined;
1116
- previous.view = toDataView(joined);
1117
- previous.end = entry.end;
1118
- // Do the rest of the logic with the previous entry instead
1119
- insertionIndex--;
1120
- entry = previous;
1121
- }
1122
- else {
1123
- this.cache.splice(insertionIndex, 0, entry);
1124
- this.currentCacheSize += entry.bytes.length;
1125
- }
1126
- }
1127
- else {
1128
- this.cache.splice(insertionIndex, 0, entry);
1129
- this.currentCacheSize += entry.bytes.length;
1130
- }
1131
- for (let i = insertionIndex + 1; i < this.cache.length; i++) {
1132
- const next = this.cache[i];
1133
- if (entry.end <= next.start) {
1134
- // Even if they touch, we don't wanna merge them, no need
1135
- break;
1136
- }
1137
- if (entry.end >= next.end) {
1138
- // The inserted entry completely swallows the next entry
1139
- this.cache.splice(i, 1);
1140
- this.currentCacheSize -= next.bytes.length;
1141
- i--;
1142
- continue;
1143
- }
1144
- // Partial overlap, let's join
1145
- const joined = new Uint8Array(next.end - entry.start);
1146
- joined.set(entry.bytes, 0);
1147
- joined.set(next.bytes, next.start - entry.start);
1148
- this.currentCacheSize -= entry.end - next.start; // Subtract the overlap
1149
- entry.bytes = joined;
1150
- entry.view = toDataView(joined);
1151
- entry.end = next.end;
1152
- this.cache.splice(i, 1);
1153
- break; // After the join case, we're done: the next entry cannot possibly overlap with the inserted one.
1154
- }
1155
- // LRU eviction of cache entries
1156
- while (this.currentCacheSize > this.options.maxCacheSize) {
1157
- let oldestIndex = 0;
1158
- let oldestEntry = this.cache[0];
1159
- for (let i = 1; i < this.cache.length; i++) {
1160
- const entry = this.cache[i];
1161
- if (entry.age < oldestEntry.age) {
1162
- oldestIndex = i;
1163
- oldestEntry = entry;
1164
- }
1165
- }
1166
- if (this.currentCacheSize - oldestEntry.bytes.length <= this.options.maxCacheSize) {
1167
- // Don't evict if it would shrink the cache below the max size
1168
- break;
1169
- }
1170
- this.cache.splice(oldestIndex, 1);
1171
- this.currentCacheSize -= oldestEntry.bytes.length;
1172
- }
1173
- }
1174
- dispose() {
1175
- for (const worker of this.workers) {
1176
- worker.aborted = true;
1177
- }
1178
- this.workers.length = 0;
1179
- this.cache.length = 0;
1180
- this.disposed = true;
1181
- }
1182
- }