@kenzuya/mediabunny 1.26.0 → 1.28.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. package/README.md +1 -1
  2. package/dist/bundles/{mediabunny.mjs → mediabunny.js} +21963 -21388
  3. package/dist/bundles/mediabunny.min.js +490 -0
  4. package/dist/modules/shared/mp3-misc.d.ts.map +1 -1
  5. package/dist/modules/src/adts/adts-demuxer.d.ts +6 -6
  6. package/dist/modules/src/adts/adts-demuxer.d.ts.map +1 -1
  7. package/dist/modules/src/adts/adts-muxer.d.ts +4 -4
  8. package/dist/modules/src/adts/adts-muxer.d.ts.map +1 -1
  9. package/dist/modules/src/adts/adts-reader.d.ts +1 -1
  10. package/dist/modules/src/adts/adts-reader.d.ts.map +1 -1
  11. package/dist/modules/src/avi/avi-demuxer.d.ts +44 -0
  12. package/dist/modules/src/avi/avi-demuxer.d.ts.map +1 -0
  13. package/dist/modules/src/avi/avi-misc.d.ts +88 -0
  14. package/dist/modules/src/avi/avi-misc.d.ts.map +1 -0
  15. package/dist/modules/src/avi/avi-muxer.d.ts +45 -0
  16. package/dist/modules/src/avi/avi-muxer.d.ts.map +1 -0
  17. package/dist/modules/src/avi/riff-writer.d.ts +26 -0
  18. package/dist/modules/src/avi/riff-writer.d.ts.map +1 -0
  19. package/dist/modules/src/codec-data.d.ts +8 -3
  20. package/dist/modules/src/codec-data.d.ts.map +1 -1
  21. package/dist/modules/src/codec.d.ts +10 -10
  22. package/dist/modules/src/codec.d.ts.map +1 -1
  23. package/dist/modules/src/conversion.d.ts +33 -16
  24. package/dist/modules/src/conversion.d.ts.map +1 -1
  25. package/dist/modules/src/custom-coder.d.ts +8 -8
  26. package/dist/modules/src/custom-coder.d.ts.map +1 -1
  27. package/dist/modules/src/demuxer.d.ts +3 -3
  28. package/dist/modules/src/demuxer.d.ts.map +1 -1
  29. package/dist/modules/src/encode.d.ts +8 -8
  30. package/dist/modules/src/encode.d.ts.map +1 -1
  31. package/dist/modules/src/flac/flac-demuxer.d.ts +7 -7
  32. package/dist/modules/src/flac/flac-demuxer.d.ts.map +1 -1
  33. package/dist/modules/src/flac/flac-misc.d.ts +3 -3
  34. package/dist/modules/src/flac/flac-misc.d.ts.map +1 -1
  35. package/dist/modules/src/flac/flac-muxer.d.ts +5 -5
  36. package/dist/modules/src/flac/flac-muxer.d.ts.map +1 -1
  37. package/dist/modules/src/id3.d.ts +3 -3
  38. package/dist/modules/src/id3.d.ts.map +1 -1
  39. package/dist/modules/src/index.d.ts +20 -20
  40. package/dist/modules/src/index.d.ts.map +1 -1
  41. package/dist/modules/src/input-format.d.ts +22 -0
  42. package/dist/modules/src/input-format.d.ts.map +1 -1
  43. package/dist/modules/src/input-track.d.ts +8 -8
  44. package/dist/modules/src/input-track.d.ts.map +1 -1
  45. package/dist/modules/src/input.d.ts +12 -12
  46. package/dist/modules/src/isobmff/isobmff-boxes.d.ts +2 -2
  47. package/dist/modules/src/isobmff/isobmff-boxes.d.ts.map +1 -1
  48. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts +12 -12
  49. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts.map +1 -1
  50. package/dist/modules/src/isobmff/isobmff-misc.d.ts.map +1 -1
  51. package/dist/modules/src/isobmff/isobmff-muxer.d.ts +11 -11
  52. package/dist/modules/src/isobmff/isobmff-muxer.d.ts.map +1 -1
  53. package/dist/modules/src/isobmff/isobmff-reader.d.ts +2 -2
  54. package/dist/modules/src/isobmff/isobmff-reader.d.ts.map +1 -1
  55. package/dist/modules/src/matroska/ebml.d.ts +3 -3
  56. package/dist/modules/src/matroska/ebml.d.ts.map +1 -1
  57. package/dist/modules/src/matroska/matroska-demuxer.d.ts +13 -13
  58. package/dist/modules/src/matroska/matroska-demuxer.d.ts.map +1 -1
  59. package/dist/modules/src/matroska/matroska-input.d.ts +33 -0
  60. package/dist/modules/src/matroska/matroska-input.d.ts.map +1 -0
  61. package/dist/modules/src/matroska/matroska-misc.d.ts.map +1 -1
  62. package/dist/modules/src/matroska/matroska-muxer.d.ts +5 -5
  63. package/dist/modules/src/matroska/matroska-muxer.d.ts.map +1 -1
  64. package/dist/modules/src/media-sink.d.ts +5 -5
  65. package/dist/modules/src/media-sink.d.ts.map +1 -1
  66. package/dist/modules/src/media-source.d.ts +22 -4
  67. package/dist/modules/src/media-source.d.ts.map +1 -1
  68. package/dist/modules/src/metadata.d.ts +2 -2
  69. package/dist/modules/src/metadata.d.ts.map +1 -1
  70. package/dist/modules/src/misc.d.ts +5 -4
  71. package/dist/modules/src/misc.d.ts.map +1 -1
  72. package/dist/modules/src/mp3/mp3-demuxer.d.ts +7 -7
  73. package/dist/modules/src/mp3/mp3-demuxer.d.ts.map +1 -1
  74. package/dist/modules/src/mp3/mp3-muxer.d.ts +4 -4
  75. package/dist/modules/src/mp3/mp3-muxer.d.ts.map +1 -1
  76. package/dist/modules/src/mp3/mp3-reader.d.ts +2 -2
  77. package/dist/modules/src/mp3/mp3-reader.d.ts.map +1 -1
  78. package/dist/modules/src/mp3/mp3-writer.d.ts +1 -1
  79. package/dist/modules/src/mp3/mp3-writer.d.ts.map +1 -1
  80. package/dist/modules/src/muxer.d.ts +4 -4
  81. package/dist/modules/src/muxer.d.ts.map +1 -1
  82. package/dist/modules/src/node.d.ts +1 -1
  83. package/dist/modules/src/ogg/ogg-demuxer.d.ts +7 -7
  84. package/dist/modules/src/ogg/ogg-demuxer.d.ts.map +1 -1
  85. package/dist/modules/src/ogg/ogg-misc.d.ts +1 -1
  86. package/dist/modules/src/ogg/ogg-misc.d.ts.map +1 -1
  87. package/dist/modules/src/ogg/ogg-muxer.d.ts +5 -5
  88. package/dist/modules/src/ogg/ogg-muxer.d.ts.map +1 -1
  89. package/dist/modules/src/ogg/ogg-reader.d.ts +1 -1
  90. package/dist/modules/src/ogg/ogg-reader.d.ts.map +1 -1
  91. package/dist/modules/src/output-format.d.ts +51 -6
  92. package/dist/modules/src/output-format.d.ts.map +1 -1
  93. package/dist/modules/src/output.d.ts +13 -13
  94. package/dist/modules/src/output.d.ts.map +1 -1
  95. package/dist/modules/src/packet.d.ts +1 -1
  96. package/dist/modules/src/packet.d.ts.map +1 -1
  97. package/dist/modules/src/pcm.d.ts.map +1 -1
  98. package/dist/modules/src/reader.d.ts +2 -2
  99. package/dist/modules/src/reader.d.ts.map +1 -1
  100. package/dist/modules/src/sample.d.ts +57 -15
  101. package/dist/modules/src/sample.d.ts.map +1 -1
  102. package/dist/modules/src/source.d.ts +3 -3
  103. package/dist/modules/src/source.d.ts.map +1 -1
  104. package/dist/modules/src/subtitles.d.ts +1 -1
  105. package/dist/modules/src/subtitles.d.ts.map +1 -1
  106. package/dist/modules/src/target.d.ts +2 -2
  107. package/dist/modules/src/target.d.ts.map +1 -1
  108. package/dist/modules/src/tsconfig.tsbuildinfo +1 -1
  109. package/dist/modules/src/wave/riff-writer.d.ts +1 -1
  110. package/dist/modules/src/wave/riff-writer.d.ts.map +1 -1
  111. package/dist/modules/src/wave/wave-demuxer.d.ts +6 -6
  112. package/dist/modules/src/wave/wave-demuxer.d.ts.map +1 -1
  113. package/dist/modules/src/wave/wave-muxer.d.ts +4 -4
  114. package/dist/modules/src/wave/wave-muxer.d.ts.map +1 -1
  115. package/dist/modules/src/writer.d.ts +1 -1
  116. package/dist/modules/src/writer.d.ts.map +1 -1
  117. package/dist/packages/eac3/eac3.wasm +0 -0
  118. package/dist/packages/eac3/mediabunny-eac3.js +1058 -0
  119. package/dist/packages/eac3/mediabunny-eac3.min.js +44 -0
  120. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.js +694 -0
  121. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.min.js +58 -0
  122. package/dist/packages/mpeg4/mediabunny-mpeg4.js +1198 -0
  123. package/dist/packages/mpeg4/mediabunny-mpeg4.min.js +44 -0
  124. package/dist/packages/mpeg4/xvid.wasm +0 -0
  125. package/package.json +18 -57
  126. package/dist/bundles/mediabunny.cjs +0 -26140
  127. package/dist/bundles/mediabunny.min.cjs +0 -147
  128. package/dist/bundles/mediabunny.min.mjs +0 -146
  129. package/dist/mediabunny.d.ts +0 -3319
  130. package/dist/modules/shared/mp3-misc.js +0 -147
  131. package/dist/modules/src/adts/adts-demuxer.js +0 -239
  132. package/dist/modules/src/adts/adts-muxer.js +0 -80
  133. package/dist/modules/src/adts/adts-reader.js +0 -63
  134. package/dist/modules/src/codec-data.js +0 -1730
  135. package/dist/modules/src/codec.js +0 -869
  136. package/dist/modules/src/conversion.js +0 -1459
  137. package/dist/modules/src/custom-coder.js +0 -117
  138. package/dist/modules/src/demuxer.js +0 -12
  139. package/dist/modules/src/encode.js +0 -442
  140. package/dist/modules/src/flac/flac-demuxer.js +0 -504
  141. package/dist/modules/src/flac/flac-misc.js +0 -135
  142. package/dist/modules/src/flac/flac-muxer.js +0 -222
  143. package/dist/modules/src/id3.js +0 -848
  144. package/dist/modules/src/index.js +0 -28
  145. package/dist/modules/src/input-format.js +0 -480
  146. package/dist/modules/src/input-track.js +0 -372
  147. package/dist/modules/src/input.js +0 -188
  148. package/dist/modules/src/isobmff/isobmff-boxes.js +0 -1480
  149. package/dist/modules/src/isobmff/isobmff-demuxer.js +0 -2618
  150. package/dist/modules/src/isobmff/isobmff-misc.js +0 -20
  151. package/dist/modules/src/isobmff/isobmff-muxer.js +0 -966
  152. package/dist/modules/src/isobmff/isobmff-reader.js +0 -72
  153. package/dist/modules/src/matroska/ebml.js +0 -653
  154. package/dist/modules/src/matroska/matroska-demuxer.js +0 -2133
  155. package/dist/modules/src/matroska/matroska-misc.js +0 -20
  156. package/dist/modules/src/matroska/matroska-muxer.js +0 -1017
  157. package/dist/modules/src/media-sink.js +0 -1736
  158. package/dist/modules/src/media-source.js +0 -1825
  159. package/dist/modules/src/metadata.js +0 -193
  160. package/dist/modules/src/misc.js +0 -623
  161. package/dist/modules/src/mp3/mp3-demuxer.js +0 -285
  162. package/dist/modules/src/mp3/mp3-muxer.js +0 -123
  163. package/dist/modules/src/mp3/mp3-reader.js +0 -26
  164. package/dist/modules/src/mp3/mp3-writer.js +0 -78
  165. package/dist/modules/src/muxer.js +0 -50
  166. package/dist/modules/src/node.js +0 -9
  167. package/dist/modules/src/ogg/ogg-demuxer.js +0 -763
  168. package/dist/modules/src/ogg/ogg-misc.js +0 -78
  169. package/dist/modules/src/ogg/ogg-muxer.js +0 -353
  170. package/dist/modules/src/ogg/ogg-reader.js +0 -65
  171. package/dist/modules/src/output-format.js +0 -527
  172. package/dist/modules/src/output.js +0 -300
  173. package/dist/modules/src/packet.js +0 -182
  174. package/dist/modules/src/pcm.js +0 -85
  175. package/dist/modules/src/reader.js +0 -236
  176. package/dist/modules/src/sample.js +0 -1056
  177. package/dist/modules/src/source.js +0 -1182
  178. package/dist/modules/src/subtitles.js +0 -575
  179. package/dist/modules/src/target.js +0 -140
  180. package/dist/modules/src/wave/riff-writer.js +0 -30
  181. package/dist/modules/src/wave/wave-demuxer.js +0 -447
  182. package/dist/modules/src/wave/wave-muxer.js +0 -318
  183. package/dist/modules/src/writer.js +0 -370
  184. package/src/adts/adts-demuxer.ts +0 -331
  185. package/src/adts/adts-muxer.ts +0 -111
  186. package/src/adts/adts-reader.ts +0 -85
  187. package/src/codec-data.ts +0 -2078
  188. package/src/codec.ts +0 -1092
  189. package/src/conversion.ts +0 -2112
  190. package/src/custom-coder.ts +0 -197
  191. package/src/demuxer.ts +0 -24
  192. package/src/encode.ts +0 -739
  193. package/src/flac/flac-demuxer.ts +0 -730
  194. package/src/flac/flac-misc.ts +0 -164
  195. package/src/flac/flac-muxer.ts +0 -320
  196. package/src/id3.ts +0 -925
  197. package/src/index.ts +0 -221
  198. package/src/input-format.ts +0 -541
  199. package/src/input-track.ts +0 -529
  200. package/src/input.ts +0 -235
  201. package/src/isobmff/isobmff-boxes.ts +0 -1719
  202. package/src/isobmff/isobmff-demuxer.ts +0 -3190
  203. package/src/isobmff/isobmff-misc.ts +0 -29
  204. package/src/isobmff/isobmff-muxer.ts +0 -1348
  205. package/src/isobmff/isobmff-reader.ts +0 -91
  206. package/src/matroska/ebml.ts +0 -730
  207. package/src/matroska/matroska-demuxer.ts +0 -2481
  208. package/src/matroska/matroska-misc.ts +0 -29
  209. package/src/matroska/matroska-muxer.ts +0 -1276
  210. package/src/media-sink.ts +0 -2179
  211. package/src/media-source.ts +0 -2243
  212. package/src/metadata.ts +0 -320
  213. package/src/misc.ts +0 -798
  214. package/src/mp3/mp3-demuxer.ts +0 -383
  215. package/src/mp3/mp3-muxer.ts +0 -166
  216. package/src/mp3/mp3-reader.ts +0 -34
  217. package/src/mp3/mp3-writer.ts +0 -120
  218. package/src/muxer.ts +0 -88
  219. package/src/node.ts +0 -11
  220. package/src/ogg/ogg-demuxer.ts +0 -1053
  221. package/src/ogg/ogg-misc.ts +0 -116
  222. package/src/ogg/ogg-muxer.ts +0 -497
  223. package/src/ogg/ogg-reader.ts +0 -93
  224. package/src/output-format.ts +0 -945
  225. package/src/output.ts +0 -488
  226. package/src/packet.ts +0 -263
  227. package/src/pcm.ts +0 -112
  228. package/src/reader.ts +0 -323
  229. package/src/sample.ts +0 -1461
  230. package/src/source.ts +0 -1688
  231. package/src/subtitles.ts +0 -711
  232. package/src/target.ts +0 -204
  233. package/src/tsconfig.json +0 -16
  234. package/src/wave/riff-writer.ts +0 -36
  235. package/src/wave/wave-demuxer.ts +0 -529
  236. package/src/wave/wave-muxer.ts +0 -371
  237. package/src/writer.ts +0 -490
package/src/source.ts DELETED
@@ -1,1688 +0,0 @@
1
- /*!
2
- * Copyright (c) 2025-present, Vanilagy and contributors
3
- *
4
- * This Source Code Form is subject to the terms of the Mozilla Public
5
- * License, v. 2.0. If a copy of the MPL was not distributed with this
6
- * file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
- */
8
-
9
- import type { FileHandle } from 'node:fs/promises';
10
- import {
11
- assert,
12
- binarySearchLessOrEqual,
13
- closedIntervalsOverlap,
14
- isNumber,
15
- isWebKit,
16
- MaybePromise,
17
- mergeRequestInit,
18
- promiseWithResolvers,
19
- retriedFetch,
20
- toDataView,
21
- toUint8Array,
22
- } from './misc';
23
- import * as nodeAlias from './node';
24
- import { InputDisposedError } from './input';
25
-
26
- const node = typeof nodeAlias !== 'undefined'
27
- ? nodeAlias // Aliasing it prevents some bundler warnings
28
- : undefined!;
29
-
30
- export type ReadResult = {
31
- bytes: Uint8Array;
32
- view: DataView;
33
- /** The offset of the bytes in the file. */
34
- offset: number;
35
- };
36
-
37
- /**
38
- * The source base class, representing a resource from which bytes can be read.
39
- * @group Input sources
40
- * @public
41
- */
42
- export abstract class Source {
43
- /** @internal */
44
- abstract _retrieveSize(): MaybePromise<number | null>;
45
- /** @internal */
46
- abstract _read(start: number, end: number): MaybePromise<ReadResult | null>;
47
- /** @internal */
48
- abstract _dispose(): void;
49
- /** @internal */
50
- _disposed = false;
51
-
52
- /** @internal */
53
- private _sizePromise: Promise<number | null> | null = null;
54
-
55
- /**
56
- * Resolves with the total size of the file in bytes. This function is memoized, meaning only the first call
57
- * will retrieve the size.
58
- *
59
- * Returns null if the source is unsized.
60
- */
61
- async getSizeOrNull() {
62
- if (this._disposed) {
63
- throw new InputDisposedError();
64
- }
65
-
66
- return this._sizePromise ??= Promise.resolve(this._retrieveSize());
67
- }
68
-
69
- /**
70
- * Resolves with the total size of the file in bytes. This function is memoized, meaning only the first call
71
- * will retrieve the size.
72
- *
73
- * Throws an error if the source is unsized.
74
- */
75
- async getSize() {
76
- if (this._disposed) {
77
- throw new InputDisposedError();
78
- }
79
-
80
- const result = await this.getSizeOrNull();
81
- if (result === null) {
82
- throw new Error('Cannot determine the size of an unsized source.');
83
- }
84
-
85
- return result;
86
- }
87
-
88
- /** Called each time data is retrieved from the source. Will be called with the retrieved range (end exclusive). */
89
- onread: ((start: number, end: number) => unknown) | null = null;
90
- }
91
-
92
- /**
93
- * A source backed by an ArrayBuffer or ArrayBufferView, with the entire file held in memory.
94
- * @group Input sources
95
- * @public
96
- */
97
- export class BufferSource extends Source {
98
- /** @internal */
99
- _bytes: Uint8Array;
100
- /** @internal */
101
- _view: DataView;
102
- /** @internal */
103
- _onreadCalled = false;
104
-
105
- /** Creates a new {@link BufferSource} backed the specified `ArrayBuffer` or `ArrayBufferView`. */
106
- constructor(buffer: ArrayBuffer | ArrayBufferView) {
107
- if (!(buffer instanceof ArrayBuffer) && !ArrayBuffer.isView(buffer)) {
108
- throw new TypeError('buffer must be an ArrayBuffer or ArrayBufferView.');
109
- }
110
-
111
- super();
112
-
113
- this._bytes = toUint8Array(buffer);
114
- this._view = toDataView(buffer);
115
- }
116
-
117
- /** @internal */
118
- _retrieveSize(): number {
119
- return this._bytes.byteLength;
120
- }
121
-
122
- /** @internal */
123
- _read(): ReadResult {
124
- if (!this._onreadCalled) {
125
- // We just say the first read retrives all bytes from the source (which, I mean, it does)
126
- this.onread?.(0, this._bytes.byteLength);
127
- this._onreadCalled = true;
128
- }
129
-
130
- return {
131
- bytes: this._bytes,
132
- view: this._view,
133
- offset: 0,
134
- };
135
- }
136
-
137
- /** @internal */
138
- _dispose() {}
139
- }
140
-
141
- /**
142
- * Options for {@link BlobSource}.
143
- * @group Input sources
144
- * @public
145
- */
146
- export type BlobSourceOptions = {
147
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 8 MiB. */
148
- maxCacheSize?: number;
149
- };
150
-
151
- /**
152
- * A source backed by a [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob). Since a
153
- * [`File`](https://developer.mozilla.org/en-US/docs/Web/API/File) is also a `Blob`, this is the source to use when
154
- * reading files off the disk.
155
- * @group Input sources
156
- * @public
157
- */
158
- export class BlobSource extends Source {
159
- /** @internal */
160
- _blob: Blob;
161
- /** @internal */
162
- _orchestrator: ReadOrchestrator;
163
-
164
- /**
165
- * Creates a new {@link BlobSource} backed by the specified
166
- * [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob).
167
- */
168
- constructor(blob: Blob, options: BlobSourceOptions = {}) {
169
- if (!(blob instanceof Blob)) {
170
- throw new TypeError('blob must be a Blob.');
171
- }
172
- if (!options || typeof options !== 'object') {
173
- throw new TypeError('options must be an object.');
174
- }
175
- if (
176
- options.maxCacheSize !== undefined
177
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)
178
- ) {
179
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
180
- }
181
-
182
- super();
183
-
184
- this._blob = blob;
185
- this._orchestrator = new ReadOrchestrator({
186
- maxCacheSize: options.maxCacheSize ?? (8 * 2 ** 20 /* 8 MiB */),
187
- maxWorkerCount: 4,
188
- runWorker: this._runWorker.bind(this),
189
- prefetchProfile: PREFETCH_PROFILES.fileSystem,
190
- });
191
- }
192
-
193
- /** @internal */
194
- _retrieveSize(): number {
195
- const size = this._blob.size;
196
- this._orchestrator.fileSize = size;
197
-
198
- return size;
199
- }
200
-
201
- /** @internal */
202
- _read(start: number, end: number): MaybePromise<ReadResult> {
203
- return this._orchestrator.read(start, end);
204
- }
205
-
206
- /** @internal */
207
- _readers = new WeakMap<ReadWorker, ReadableStreamDefaultReader<Uint8Array> | null>();
208
-
209
- /** @internal */
210
- private async _runWorker(worker: ReadWorker) {
211
- let reader = this._readers.get(worker);
212
- if (reader === undefined) {
213
- // https://github.com/Vanilagy/mediabunny/issues/184
214
- // WebKit has critical bugs with blob.stream():
215
- // - WebKitBlobResource error 1 when streaming large files
216
- // - Memory buildup and reload loops on iOS (network process crashes)
217
- // - ReadableStream stalls under backpressure (especially video)
218
- // Affects Safari and all iOS browsers (Chrome, Firefox, etc.).
219
- // Use arrayBuffer() fallback for WebKit browsers.
220
- if ('stream' in this._blob && !isWebKit()) {
221
- // Get a reader of the blob starting at the required offset, and then keep it around
222
- const slice = this._blob.slice(worker.currentPos);
223
- reader = slice.stream().getReader();
224
- } else {
225
- // We'll need to use more primitive ways
226
- reader = null;
227
- }
228
-
229
- this._readers.set(worker, reader);
230
- }
231
-
232
- while (worker.currentPos < worker.targetPos && !worker.aborted) {
233
- if (reader) {
234
- const { done, value } = await reader.read();
235
- if (done) {
236
- this._orchestrator.forgetWorker(worker);
237
-
238
- if (worker.currentPos < worker.targetPos) { // I think this `if` should always hit?
239
- throw new Error('Blob reader stopped unexpectedly before all requested data was read.');
240
- }
241
-
242
- break;
243
- }
244
-
245
- if (worker.aborted) {
246
- break;
247
- }
248
-
249
- this.onread?.(worker.currentPos, worker.currentPos + value.length);
250
- this._orchestrator.supplyWorkerData(worker, value);
251
- } else {
252
- const data = await this._blob.slice(worker.currentPos, worker.targetPos).arrayBuffer();
253
-
254
- if (worker.aborted) {
255
- break;
256
- }
257
-
258
- this.onread?.(worker.currentPos, worker.currentPos + data.byteLength);
259
- this._orchestrator.supplyWorkerData(worker, new Uint8Array(data));
260
- }
261
- }
262
-
263
- worker.running = false;
264
- }
265
-
266
- /** @internal */
267
- _dispose() {
268
- this._orchestrator.dispose();
269
- }
270
- }
271
-
272
- const URL_SOURCE_MIN_LOAD_AMOUNT = /* #__PURE__ */ 0.5 * 2 ** 20; // 0.5 MiB
273
- const DEFAULT_RETRY_DELAY
274
- = ((previousAttempts, error, src) => {
275
- // Check if this could be a CORS error. If so, we cannot recover from it and
276
- // should not attempt to retry.
277
- // CORS errors are intentionally not opaque, so we need to rely on heuristics.
278
- const couldBeCorsError = error instanceof Error && (
279
- error.message.includes('Failed to fetch') // Chrome
280
- || error.message.includes('Load failed') // Safari
281
- || error.message.includes('NetworkError when attempting to fetch resource') // Firefox
282
- );
283
-
284
- if (couldBeCorsError) {
285
- let originOfSrc: string | null = null;
286
- // Checking if the origin is different, because only then a CORS error could originate
287
- try {
288
- if (typeof window !== 'undefined' && typeof window.location !== 'undefined') {
289
- originOfSrc = new URL(src instanceof Request ? src.url : src, window.location.href).origin;
290
- }
291
- } catch {
292
- // URL parse failed
293
- }
294
-
295
- // If user is offline, it is probably not a CORS error.
296
- const isOnline
297
- = typeof navigator !== 'undefined' && typeof navigator.onLine === 'boolean' ? navigator.onLine : true;
298
-
299
- if (isOnline && originOfSrc !== null && originOfSrc !== window.location.origin) {
300
- return null;
301
- }
302
- }
303
-
304
- return Math.min(2 ** (previousAttempts - 2), 16);
305
- }) satisfies UrlSourceOptions['getRetryDelay'];
306
-
307
- /**
308
- * Options for {@link UrlSource}.
309
- * @group Input sources
310
- * @public
311
- */
312
- export type UrlSourceOptions = {
313
- /**
314
- * The [`RequestInit`](https://developer.mozilla.org/en-US/docs/Web/API/RequestInit) used by the Fetch API. Can be
315
- * used to further control the requests, such as setting custom headers.
316
- */
317
- requestInit?: RequestInit;
318
-
319
- /**
320
- * A function that returns the delay (in seconds) before retrying a failed request. The function is called
321
- * with the number of previous, unsuccessful attempts, as well as with the error with which the previous request
322
- * failed. If the function returns `null`, no more retries will be made.
323
- *
324
- * By default, it uses an exponential backoff algorithm that never gives up unless
325
- * a CORS error is suspected (`fetch()` did reject, `navigator.onLine` is true and origin is different)
326
- */
327
- getRetryDelay?: (previousAttempts: number, error: unknown, url: string | URL | Request) => number | null;
328
-
329
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 64 MiB. */
330
- maxCacheSize?: number;
331
-
332
- /**
333
- * A WHATWG-compatible fetch function. You can use this field to polyfill the `fetch` function, add missing
334
- * features, or use a custom implementation.
335
- */
336
- fetchFn?: typeof fetch;
337
- };
338
-
339
- /**
340
- * A source backed by a URL. This is useful for reading data from the network. Requests will be made using an optimized
341
- * reading and prefetching pattern to minimize request count and latency.
342
- * @group Input sources
343
- * @public
344
- */
345
- export class UrlSource extends Source {
346
- /** @internal */
347
- _url: string | URL | Request;
348
- /** @internal */
349
- _getRetryDelay: (previousAttempts: number, error: unknown, url: string | URL | Request) => number | null;
350
- /** @internal */
351
- _options: UrlSourceOptions;
352
- /** @internal */
353
- _orchestrator: ReadOrchestrator;
354
- /** @internal */
355
- _existingResponses = new WeakMap<ReadWorker, {
356
- response: Response;
357
- abortController: AbortController;
358
- }>();
359
-
360
- /** Creates a new {@link UrlSource} backed by the resource at the specified URL. */
361
- constructor(
362
- url: string | URL | Request,
363
- options: UrlSourceOptions = {},
364
- ) {
365
- if (
366
- typeof url !== 'string'
367
- && !(url instanceof URL)
368
- && !(typeof Request !== 'undefined' && url instanceof Request)
369
- ) {
370
- throw new TypeError('url must be a string, URL or Request.');
371
- }
372
- if (!options || typeof options !== 'object') {
373
- throw new TypeError('options must be an object.');
374
- }
375
- if (options.requestInit !== undefined && (!options.requestInit || typeof options.requestInit !== 'object')) {
376
- throw new TypeError('options.requestInit, when provided, must be an object.');
377
- }
378
- if (options.getRetryDelay !== undefined && typeof options.getRetryDelay !== 'function') {
379
- throw new TypeError('options.getRetryDelay, when provided, must be a function.');
380
- }
381
- if (
382
- options.maxCacheSize !== undefined
383
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)
384
- ) {
385
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
386
- }
387
- if (options.fetchFn !== undefined && typeof options.fetchFn !== 'function') {
388
- throw new TypeError('options.fetchFn, when provided, must be a function.');
389
- // Won't bother validating this function beyond this
390
- }
391
-
392
- super();
393
-
394
- this._url = url;
395
- this._options = options;
396
- this._getRetryDelay = options.getRetryDelay ?? DEFAULT_RETRY_DELAY;
397
-
398
- this._orchestrator = new ReadOrchestrator({
399
- maxCacheSize: options.maxCacheSize ?? (64 * 2 ** 20 /* 64 MiB */),
400
- // Most files in the real-world have a single sequential access pattern, but having two in parallel can
401
- // also happen
402
- maxWorkerCount: 2,
403
- runWorker: this._runWorker.bind(this),
404
- prefetchProfile: PREFETCH_PROFILES.network,
405
- });
406
- }
407
-
408
- /** @internal */
409
- async _retrieveSize(): Promise<number> {
410
- // Retrieving the resource size for UrlSource is optimized: Almost always (= always), the first bytes we have to
411
- // read are the start of the file. This means it's smart to combine size fetching with fetching the start of the
412
- // file. We additionally use this step to probe if the server supports range requests, killing three birds with
413
- // one stone.
414
-
415
- const abortController = new AbortController();
416
- const response = await retriedFetch(
417
- this._options.fetchFn ?? fetch,
418
- this._url,
419
- mergeRequestInit(this._options.requestInit ?? {}, {
420
- headers: {
421
- // We could also send a non-range request to request the same bytes (all of them), but doing it like
422
- // this is an easy way to check if the server supports range requests in the first place
423
- Range: 'bytes=0-',
424
- },
425
- signal: abortController.signal,
426
- }),
427
- this._getRetryDelay,
428
- );
429
-
430
- if (!response.ok) {
431
- // eslint-disable-next-line @typescript-eslint/no-base-to-string
432
- throw new Error(`Error fetching ${String(this._url)}: ${response.status} ${response.statusText}`);
433
- }
434
-
435
- let worker: ReadWorker;
436
- let fileSize: number;
437
-
438
- if (response.status === 206) {
439
- fileSize = this._getPartialLengthFromRangeResponse(response);
440
- worker = this._orchestrator.createWorker(0, Math.min(fileSize, URL_SOURCE_MIN_LOAD_AMOUNT));
441
- } else {
442
- // Server probably returned a 200.
443
-
444
- const contentLength = response.headers.get('Content-Length');
445
- if (contentLength) {
446
- fileSize = Number(contentLength);
447
- worker = this._orchestrator.createWorker(0, fileSize);
448
- this._orchestrator.options.maxCacheSize = Infinity; // 🤷
449
-
450
- console.warn(
451
- 'HTTP server did not respond with 206 Partial Content, meaning the entire remote resource now has'
452
- + ' to be downloaded. For efficient media file streaming across a network, please make sure your'
453
- + ' server supports range requests.',
454
- );
455
- } else {
456
- throw new Error(`HTTP response (status ${response.status}) must surface Content-Length header.`);
457
- }
458
- }
459
-
460
- this._orchestrator.fileSize = fileSize;
461
-
462
- this._existingResponses.set(worker, { response, abortController });
463
- this._orchestrator.runWorker(worker);
464
-
465
- return fileSize;
466
- }
467
-
468
- /** @internal */
469
- _read(start: number, end: number): MaybePromise<ReadResult> {
470
- return this._orchestrator.read(start, end);
471
- }
472
-
473
- /** @internal */
474
- private async _runWorker(worker: ReadWorker) {
475
- // The outer loop is for resuming a request if it dies mid-response
476
- while (true) {
477
- const existing = this._existingResponses.get(worker);
478
- this._existingResponses.delete(worker);
479
-
480
- let abortController = existing?.abortController;
481
- let response = existing?.response;
482
-
483
- if (!abortController) {
484
- abortController = new AbortController();
485
- response = await retriedFetch(
486
- this._options.fetchFn ?? fetch,
487
- this._url,
488
- mergeRequestInit(this._options.requestInit ?? {}, {
489
- headers: {
490
- Range: `bytes=${worker.currentPos}-`,
491
- },
492
- signal: abortController.signal,
493
- }),
494
- this._getRetryDelay,
495
- );
496
- }
497
-
498
- assert(response);
499
-
500
- if (!response.ok) {
501
- // eslint-disable-next-line @typescript-eslint/no-base-to-string
502
- throw new Error(`Error fetching ${String(this._url)}: ${response.status} ${response.statusText}`);
503
- }
504
-
505
- if (worker.currentPos > 0 && response.status !== 206) {
506
- throw new Error(
507
- 'HTTP server did not respond with 206 Partial Content to a range request. To enable efficient media'
508
- + ' file streaming across a network, please make sure your server supports range requests.',
509
- );
510
- }
511
-
512
- const length = this._getPartialLengthFromRangeResponse(response);
513
- const required = worker.targetPos - worker.currentPos;
514
- if (length < required) {
515
- throw new Error(
516
- `HTTP response unexpectedly too short: Needed at least ${required} bytes, got only ${length}.`,
517
- );
518
- }
519
-
520
- if (!response.body) {
521
- throw new Error(
522
- 'Missing HTTP response body stream. The used fetch function must provide the response body as a'
523
- + ' ReadableStream.',
524
- );
525
- }
526
-
527
- const reader = response.body.getReader();
528
-
529
- while (true) {
530
- if (worker.currentPos >= worker.targetPos || worker.aborted) {
531
- abortController.abort();
532
- worker.running = false;
533
-
534
- return;
535
- }
536
-
537
- let readResult: ReadableStreamReadResult<Uint8Array>;
538
-
539
- try {
540
- readResult = await reader.read();
541
- } catch (error) {
542
- const retryDelayInSeconds = this._getRetryDelay(1, error, this._url);
543
- if (retryDelayInSeconds !== null) {
544
- console.error('Error while reading response stream. Attempting to resume.', error);
545
- await new Promise(resolve => setTimeout(resolve, 1000 * retryDelayInSeconds));
546
-
547
- break;
548
- } else {
549
- throw error;
550
- }
551
- }
552
-
553
- if (worker.aborted) {
554
- break;
555
- }
556
-
557
- const { done, value } = readResult;
558
-
559
- if (done) {
560
- this._orchestrator.forgetWorker(worker);
561
-
562
- if (worker.currentPos < worker.targetPos) {
563
- throw new Error(
564
- 'Response stream reader stopped unexpectedly before all requested data was read.',
565
- );
566
- }
567
-
568
- worker.running = false;
569
- return;
570
- }
571
-
572
- this.onread?.(worker.currentPos, worker.currentPos + value.length);
573
- this._orchestrator.supplyWorkerData(worker, value);
574
- }
575
-
576
- if (worker.aborted) {
577
- break;
578
- }
579
- }
580
-
581
- worker.running = false;
582
-
583
- // The previous UrlSource had logic for circumventing https://issues.chromium.org/issues/436025873; I haven't
584
- // been able to observe this bug with the new UrlSource (maybe because we're using response streaming), so the
585
- // logic for that has vanished for now. Leaving a comment here if this becomes relevant again.
586
- }
587
-
588
- /** @internal */
589
- private _getPartialLengthFromRangeResponse(response: Response) {
590
- const contentRange = response.headers.get('Content-Range');
591
- if (contentRange) {
592
- const match = /\/(\d+)/.exec(contentRange);
593
- if (match) {
594
- return Number(match[1]);
595
- } else {
596
- throw new Error(`Invalid Content-Range header: ${contentRange}`);
597
- }
598
- } else {
599
- const contentLength = response.headers.get('Content-Length');
600
- if (contentLength) {
601
- return Number(contentLength);
602
- } else {
603
- throw new Error(
604
- 'Partial HTTP response (status 206) must surface either Content-Range or'
605
- + ' Content-Length header.',
606
- );
607
- }
608
- }
609
- }
610
-
611
- /** @internal */
612
- _dispose() {
613
- this._orchestrator.dispose();
614
- }
615
- }
616
-
617
- /**
618
- * Options for {@link FilePathSource}.
619
- * @group Input sources
620
- * @public
621
- */
622
- export type FilePathSourceOptions = {
623
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 8 MiB. */
624
- maxCacheSize?: number;
625
- };
626
-
627
- /**
628
- * A source backed by a path to a file. Intended for server-side usage in Node, Bun, or Deno.
629
- *
630
- * Make sure to call `.dispose()` on the corresponding {@link Input} when done to explicitly free the internal file
631
- * handle acquired by this source.
632
- * @group Input sources
633
- * @public
634
- */
635
- export class FilePathSource extends Source {
636
- /** @internal */
637
- _streamSource: StreamSource;
638
- /** @internal */
639
- _fileHandle: FileHandle | null = null;
640
-
641
- /** Creates a new {@link FilePathSource} backed by the file at the specified file path. */
642
- constructor(filePath: string, options: BlobSourceOptions = {}) {
643
- if (typeof filePath !== 'string') {
644
- throw new TypeError('filePath must be a string.');
645
- }
646
- if (!options || typeof options !== 'object') {
647
- throw new TypeError('options must be an object.');
648
- }
649
- if (
650
- options.maxCacheSize !== undefined
651
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)
652
- ) {
653
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
654
- }
655
-
656
- super();
657
-
658
- // Let's back this source with a StreamSource, makes the implementation very simple
659
- this._streamSource = new StreamSource({
660
- getSize: async () => {
661
- this._fileHandle = await node.fs.open(filePath, 'r');
662
-
663
- const stats = await this._fileHandle.stat();
664
- return stats.size;
665
- },
666
- read: async (start, end) => {
667
- assert(this._fileHandle);
668
-
669
- const buffer = new Uint8Array(end - start);
670
- await this._fileHandle.read(buffer, 0, end - start, start);
671
-
672
- return buffer;
673
- },
674
- maxCacheSize: options.maxCacheSize,
675
- prefetchProfile: 'fileSystem',
676
- });
677
- }
678
-
679
- /** @internal */
680
- _read(start: number, end: number): MaybePromise<ReadResult> {
681
- return this._streamSource._read(start, end);
682
- }
683
-
684
- /** @internal */
685
- _retrieveSize(): MaybePromise<number> {
686
- return this._streamSource._retrieveSize();
687
- }
688
-
689
- /** @internal */
690
- _dispose() {
691
- this._streamSource._dispose();
692
- void this._fileHandle?.close();
693
- this._fileHandle = null;
694
- }
695
- }
696
-
697
- /**
698
- * Options for defining a {@link StreamSource}.
699
- * @group Input sources
700
- * @public
701
- */
702
- export type StreamSourceOptions = {
703
- /**
704
- * Called when the size of the entire file is requested. Must return or resolve to the size in bytes. This function
705
- * is guaranteed to be called before `read`.
706
- */
707
- getSize: () => MaybePromise<number>;
708
-
709
- /**
710
- * Called when data is requested. Must return or resolve to the bytes from the specified byte range, or a stream
711
- * that yields these bytes.
712
- */
713
- read: (start: number, end: number) => MaybePromise<Uint8Array | ReadableStream<Uint8Array>>;
714
-
715
- /**
716
- * Called when the {@link Input} driven by this source is disposed.
717
- */
718
- dispose?: () => unknown;
719
-
720
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 8 MiB. */
721
- maxCacheSize?: number;
722
-
723
- /**
724
- * Specifies the prefetch profile that the reader should use with this source. A prefetch profile specifies the
725
- * pattern with which bytes outside of the requested range are preloaded to reduce latency for future reads.
726
- *
727
- * - `'none'` (default): No prefetching; only the data needed in the moment is requested.
728
- * - `'fileSystem'`: File system-optimized prefetching: a small amount of data is prefetched bidirectionally,
729
- * aligned with page boundaries.
730
- * - `'network'`: Network-optimized prefetching, or more generally, prefetching optimized for any high-latency
731
- * environment: tries to minimize the amount of read calls and aggressively prefetches data when sequential access
732
- * patterns are detected.
733
- */
734
- prefetchProfile?: 'none' | 'fileSystem' | 'network';
735
- };
736
-
737
- /**
738
- * A general-purpose, callback-driven source that can get its data from anywhere.
739
- * @group Input sources
740
- * @public
741
- */
742
- export class StreamSource extends Source {
743
- /** @internal */
744
- _options: StreamSourceOptions;
745
- /** @internal */
746
- _orchestrator: ReadOrchestrator;
747
-
748
- /** Creates a new {@link StreamSource} whose behavior is specified by `options`. */
749
- constructor(options: StreamSourceOptions) {
750
- if (!options || typeof options !== 'object') {
751
- throw new TypeError('options must be an object.');
752
- }
753
- if (typeof options.getSize !== 'function') {
754
- throw new TypeError('options.getSize must be a function.');
755
- }
756
- if (typeof options.read !== 'function') {
757
- throw new TypeError('options.read must be a function.');
758
- }
759
- if (options.dispose !== undefined && typeof options.dispose !== 'function') {
760
- throw new TypeError('options.dispose, when provided, must be a function.');
761
- }
762
- if (
763
- options.maxCacheSize !== undefined
764
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)
765
- ) {
766
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
767
- }
768
- if (options.prefetchProfile && !['none', 'fileSystem', 'network'].includes(options.prefetchProfile)) {
769
- throw new TypeError(
770
- 'options.prefetchProfile, when provided, must be one of \'none\', \'fileSystem\' or \'network\'.',
771
- );
772
- }
773
-
774
- super();
775
-
776
- this._options = options;
777
-
778
- this._orchestrator = new ReadOrchestrator({
779
- maxCacheSize: options.maxCacheSize ?? (8 * 2 ** 20 /* 8 MiB */),
780
- maxWorkerCount: 2, // Fixed for now, *should* be fine
781
- prefetchProfile: PREFETCH_PROFILES[options.prefetchProfile ?? 'none'],
782
- runWorker: this._runWorker.bind(this),
783
- });
784
- }
785
-
786
- /** @internal */
787
- _retrieveSize(): MaybePromise<number> {
788
- const result = this._options.getSize();
789
-
790
- if (result instanceof Promise) {
791
- return result.then((size) => {
792
- if (!Number.isInteger(size) || size < 0) {
793
- throw new TypeError('options.getSize must return or resolve to a non-negative integer.');
794
- }
795
-
796
- this._orchestrator.fileSize = size;
797
- return size;
798
- });
799
- } else {
800
- if (!Number.isInteger(result) || result < 0) {
801
- throw new TypeError('options.getSize must return or resolve to a non-negative integer.');
802
- }
803
-
804
- this._orchestrator.fileSize = result;
805
- return result;
806
- }
807
- }
808
-
809
- /** @internal */
810
- _read(start: number, end: number): MaybePromise<ReadResult> {
811
- return this._orchestrator.read(start, end);
812
- }
813
-
814
- /** @internal */
815
- private async _runWorker(worker: ReadWorker) {
816
- while (worker.currentPos < worker.targetPos && !worker.aborted) {
817
- const originalCurrentPos = worker.currentPos;
818
- const originalTargetPos = worker.targetPos;
819
-
820
- let data = this._options.read(worker.currentPos, originalTargetPos);
821
- if (data instanceof Promise) data = await data;
822
-
823
- if (worker.aborted) {
824
- break;
825
- }
826
-
827
- if (data instanceof Uint8Array) {
828
- data = toUint8Array(data); // Normalize things like Node.js Buffer to Uint8Array
829
-
830
- if (data.length !== originalTargetPos - worker.currentPos) {
831
- // Yes, we're that strict
832
- throw new Error(
833
- `options.read returned a Uint8Array with unexpected length: Requested ${
834
- originalTargetPos - worker.currentPos
835
- } bytes, but got ${data.length}.`,
836
- );
837
- }
838
-
839
- this.onread?.(worker.currentPos, worker.currentPos + data.length);
840
- this._orchestrator.supplyWorkerData(worker, data);
841
- } else if (data instanceof ReadableStream) {
842
- const reader = data.getReader();
843
-
844
- while (worker.currentPos < originalTargetPos && !worker.aborted) {
845
- const { done, value } = await reader.read();
846
- if (done) {
847
- if (worker.currentPos < originalTargetPos) {
848
- // Yes, we're *that* strict
849
- throw new Error(
850
- `ReadableStream returned by options.read ended before supplying enough data.`
851
- + ` Requested ${originalTargetPos - originalCurrentPos} bytes, but got ${
852
- worker.currentPos - originalCurrentPos
853
- }`,
854
- );
855
- }
856
-
857
- break;
858
- }
859
-
860
- if (!(value instanceof Uint8Array)) {
861
- throw new TypeError('ReadableStream returned by options.read must yield Uint8Array chunks.');
862
- }
863
-
864
- if (worker.aborted) {
865
- break;
866
- }
867
-
868
- const data = toUint8Array(value); // Normalize things like Node.js Buffer to Uint8Array
869
-
870
- this.onread?.(worker.currentPos, worker.currentPos + data.length);
871
- this._orchestrator.supplyWorkerData(worker, data);
872
- }
873
- } else {
874
- throw new TypeError('options.read must return or resolve to a Uint8Array or a ReadableStream.');
875
- }
876
- }
877
-
878
- worker.running = false;
879
- }
880
-
881
- /** @internal */
882
- _dispose() {
883
- this._orchestrator.dispose();
884
- this._options.dispose?.();
885
- }
886
- }
887
-
888
- type ReadableStreamSourcePendingSlice = {
889
- start: number;
890
- end: number;
891
- bytes: Uint8Array;
892
- resolve: (bytes: ReadResult | null) => void;
893
- reject: (error: unknown) => void;
894
- };
895
-
896
- /**
897
- * Options for {@link ReadableStreamSource}.
898
- * @group Input sources
899
- * @public
900
- */
901
- export type ReadableStreamSourceOptions = {
902
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 16 MiB. */
903
- maxCacheSize?: number;
904
- };
905
-
906
- /**
907
- * A source backed by a [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) of
908
- * `Uint8Array`, representing an append-only byte stream of unknown length. This is the source to use for incrementally
909
- * streaming in input files that are still being constructed and whose size we don't yet know, like for example the
910
- * output chunks of [MediaRecorder](https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder).
911
- *
912
- * This source is *unsized*, meaning calls to `.getSize()` will throw and readers are more limited due to the
913
- * lack of random file access. You should only use this source with sequential access patterns, such as reading all
914
- * packets from start to end. This source does not work well with random access patterns unless you increase its
915
- * max cache size.
916
- *
917
- * @group Input sources
918
- * @public
919
- */
920
- export class ReadableStreamSource extends Source {
921
- /** @internal */
922
- _stream: ReadableStream<Uint8Array>;
923
- /** @internal */
924
- _reader: ReadableStreamDefaultReader<Uint8Array> | null = null;
925
- /** @internal */
926
- _cache: CacheEntry[] = [];
927
- /** @internal */
928
- _maxCacheSize: number;
929
- /** @internal */
930
- _pendingSlices: ReadableStreamSourcePendingSlice[] = [];
931
- /** @internal */
932
- _currentIndex = 0;
933
- /** @internal */
934
- _targetIndex = 0;
935
- /** @internal */
936
- _maxRequestedIndex = 0;
937
- /** @internal */
938
- _endIndex: number | null = null;
939
- /** @internal */
940
- _pulling = false;
941
-
942
- /** Creates a new {@link ReadableStreamSource} backed by the specified `ReadableStream<Uint8Array>`. */
943
- constructor(stream: ReadableStream<Uint8Array>, options: ReadableStreamSourceOptions = {}) {
944
- if (!(stream instanceof ReadableStream)) {
945
- throw new TypeError('stream must be a ReadableStream.');
946
- }
947
- if (!options || typeof options !== 'object') {
948
- throw new TypeError('options must be an object.');
949
- }
950
- if (
951
- options.maxCacheSize !== undefined
952
- && (!isNumber(options.maxCacheSize) || options.maxCacheSize < 0)
953
- ) {
954
- throw new TypeError('options.maxCacheSize, when provided, must be a non-negative number.');
955
- }
956
-
957
- super();
958
-
959
- this._stream = stream;
960
- this._maxCacheSize = options.maxCacheSize ?? (16 * 2 ** 20 /* 16 MiB */);
961
- }
962
-
963
- /** @internal */
964
- _retrieveSize() {
965
- return this._endIndex; // Starts out as null, meaning this source is unsized
966
- }
967
-
968
- /** @internal */
969
- _read(start: number, end: number): MaybePromise<ReadResult | null> {
970
- if (this._endIndex !== null && end > this._endIndex) {
971
- return null;
972
- }
973
-
974
- this._maxRequestedIndex = Math.max(this._maxRequestedIndex, end);
975
-
976
- const cacheStartIndex = binarySearchLessOrEqual(this._cache, start, x => x.start);
977
- const cacheStartEntry = cacheStartIndex !== -1 ? this._cache[cacheStartIndex]! : null;
978
-
979
- if (cacheStartEntry && cacheStartEntry.start <= start && end <= cacheStartEntry.end) {
980
- // The request can be satisfied with a single cache entry
981
- return {
982
- bytes: cacheStartEntry.bytes,
983
- view: cacheStartEntry.view,
984
- offset: cacheStartEntry.start,
985
- };
986
- }
987
-
988
- let lastEnd = start;
989
- const bytes = new Uint8Array(end - start);
990
-
991
- if (cacheStartIndex !== -1) {
992
- // Walk over the cache to see if we can satisfy the request using multiple cache entries
993
- for (let i = cacheStartIndex; i < this._cache.length; i++) {
994
- const cacheEntry = this._cache[i]!;
995
- if (cacheEntry.start >= end) {
996
- break;
997
- }
998
-
999
- const cappedStart = Math.max(start, cacheEntry.start);
1000
- if (cappedStart > lastEnd) {
1001
- // We're too far behind
1002
- this._throwDueToCacheMiss();
1003
- }
1004
-
1005
- const cappedEnd = Math.min(end, cacheEntry.end);
1006
-
1007
- if (cappedStart < cappedEnd) {
1008
- bytes.set(
1009
- cacheEntry.bytes.subarray(cappedStart - cacheEntry.start, cappedEnd - cacheEntry.start),
1010
- cappedStart - start,
1011
- );
1012
-
1013
- lastEnd = cappedEnd;
1014
- }
1015
- }
1016
- }
1017
-
1018
- if (lastEnd === end) {
1019
- return {
1020
- bytes,
1021
- view: toDataView(bytes),
1022
- offset: start,
1023
- };
1024
- }
1025
-
1026
- // We need to pull more data
1027
-
1028
- if (this._currentIndex > lastEnd) {
1029
- // We're too far behind
1030
- this._throwDueToCacheMiss();
1031
- }
1032
-
1033
- const { promise, resolve, reject } = promiseWithResolvers<ReadResult | null>();
1034
-
1035
- this._pendingSlices.push({
1036
- start,
1037
- end,
1038
- bytes,
1039
- resolve,
1040
- reject,
1041
- });
1042
-
1043
- this._targetIndex = Math.max(this._targetIndex, end);
1044
-
1045
- // Start pulling from the stream if we're not already doing it
1046
- if (!this._pulling) {
1047
- this._pulling = true;
1048
- void this._pull()
1049
- .catch((error) => {
1050
- this._pulling = false;
1051
-
1052
- if (this._pendingSlices.length > 0) {
1053
- this._pendingSlices.forEach(x => x.reject(error)); // Make sure to propagate any errors
1054
- this._pendingSlices.length = 0;
1055
- } else {
1056
- throw error; // So it doesn't get swallowed
1057
- }
1058
- });
1059
- }
1060
-
1061
- return promise;
1062
- }
1063
-
1064
- /** @internal */
1065
- _throwDueToCacheMiss() {
1066
- throw new Error(
1067
- 'Read is before the cached region. With ReadableStreamSource, you must access the data more'
1068
- + ' sequentially or increase the size of its cache.',
1069
- );
1070
- }
1071
-
1072
- /** @internal */
1073
- async _pull() {
1074
- this._reader ??= this._stream.getReader();
1075
-
1076
- // This is the loop that keeps pulling data from the stream until a target index is reached, filling requests
1077
- // in the process
1078
- while (this._currentIndex < this._targetIndex && !this._disposed) {
1079
- const { done, value } = await this._reader.read();
1080
- if (done) {
1081
- for (const pendingSlice of this._pendingSlices) {
1082
- pendingSlice.resolve(null);
1083
- }
1084
- this._pendingSlices.length = 0;
1085
- this._endIndex = this._currentIndex; // We know how long the file is now!
1086
-
1087
- break;
1088
- }
1089
-
1090
- const startIndex = this._currentIndex;
1091
- const endIndex = this._currentIndex + value.byteLength;
1092
-
1093
- // Fill the pending slices with the data
1094
- for (let i = 0; i < this._pendingSlices.length; i++) {
1095
- const pendingSlice = this._pendingSlices[i]!;
1096
-
1097
- const cappedStart = Math.max(startIndex, pendingSlice.start);
1098
- const cappedEnd = Math.min(endIndex, pendingSlice.end);
1099
-
1100
- if (cappedStart < cappedEnd) {
1101
- pendingSlice.bytes.set(
1102
- value.subarray(cappedStart - startIndex, cappedEnd - startIndex),
1103
- cappedStart - pendingSlice.start,
1104
- );
1105
- if (cappedEnd === pendingSlice.end) {
1106
- // Pending slice fully filled
1107
- pendingSlice.resolve({
1108
- bytes: pendingSlice.bytes,
1109
- view: toDataView(pendingSlice.bytes),
1110
- offset: pendingSlice.start,
1111
- });
1112
- this._pendingSlices.splice(i, 1);
1113
- i--;
1114
- }
1115
- }
1116
- }
1117
-
1118
- this._cache.push({
1119
- start: startIndex,
1120
- end: endIndex,
1121
- bytes: value,
1122
- view: toDataView(value),
1123
- age: 0, // Unused
1124
- });
1125
-
1126
- // Do cache eviction, based on the distance from the last-requested index. It's important that we do it like
1127
- // this and not based on where the reader is at, because if the reader is fast, we'll unnecessarily evict
1128
- // data that we still might need.
1129
- while (this._cache.length > 0) {
1130
- const firstEntry = this._cache[0]!;
1131
- const distance = this._maxRequestedIndex - firstEntry.end;
1132
-
1133
- if (distance <= this._maxCacheSize) {
1134
- break;
1135
- }
1136
-
1137
- this._cache.shift();
1138
- }
1139
-
1140
- this._currentIndex += value.byteLength;
1141
- }
1142
-
1143
- this._pulling = false;
1144
- }
1145
-
1146
- /** @internal */
1147
- _dispose() {
1148
- this._pendingSlices.length = 0;
1149
- this._cache.length = 0;
1150
- }
1151
- }
1152
-
1153
- type PrefetchProfile = (start: number, end: number, workers: ReadWorker[]) => {
1154
- start: number;
1155
- end: number;
1156
- };
1157
-
1158
- const PREFETCH_PROFILES = {
1159
- none: (start, end) => ({ start, end }),
1160
- fileSystem: (start, end) => {
1161
- const padding = 2 ** 16;
1162
-
1163
- start = Math.floor((start - padding) / padding) * padding;
1164
- end = Math.ceil((end + padding) / padding) * padding;
1165
-
1166
- return { start, end };
1167
- },
1168
- network: (start, end, workers) => {
1169
- // Add a slight bit of start padding because backwards reading is painful
1170
- const paddingStart = 2 ** 16;
1171
- start = Math.max(0, Math.floor((start - paddingStart) / paddingStart) * paddingStart);
1172
-
1173
- // Remote resources have extreme latency (relatively speaking), so the benefit from intelligent
1174
- // prefetching is great. The network prefetch strategy is as follows: When we notice
1175
- // successive reads to a worker's read region, we prefetch more data at the end of that region,
1176
- // growing exponentially (up to a cap). This performs well for real-world use cases: Either we read a
1177
- // small part of the file once and then never need it again, in which case the requested about of data
1178
- // is small. Or, we're repeatedly doing a sequential access pattern (common in media files), in which
1179
- // case we can become more and more confident to prefetch more and more data.
1180
- for (const worker of workers) {
1181
- const maxExtensionAmount = 8 * 2 ** 20; // 8 MiB
1182
-
1183
- // When the read region cross the threshold point, we trigger a prefetch. This point is typically
1184
- // in the middle of the worker's read region, or a fixed offset from the end if the region has grown
1185
- // really large.
1186
- const thresholdPoint = Math.max(
1187
- (worker.startPos + worker.targetPos) / 2,
1188
- worker.targetPos - maxExtensionAmount,
1189
- );
1190
-
1191
- if (closedIntervalsOverlap(
1192
- start, end,
1193
- thresholdPoint, worker.targetPos,
1194
- )) {
1195
- const size = worker.targetPos - worker.startPos;
1196
-
1197
- // If we extend by maxExtensionAmount
1198
- const a = Math.ceil((size + 1) / maxExtensionAmount) * maxExtensionAmount;
1199
- // If we extend to the next power of 2
1200
- const b = 2 ** Math.ceil(Math.log2(size + 1));
1201
-
1202
- const extent = Math.min(b, a);
1203
- end = Math.max(end, worker.startPos + extent);
1204
- }
1205
- }
1206
-
1207
- end = Math.max(end, start + URL_SOURCE_MIN_LOAD_AMOUNT);
1208
-
1209
- return {
1210
- start,
1211
- end,
1212
- };
1213
- },
1214
- } satisfies Record<string, PrefetchProfile>;
1215
-
1216
- type PendingSlice = {
1217
- start: number;
1218
- bytes: Uint8Array;
1219
- holes: {
1220
- start: number;
1221
- end: number;
1222
- }[];
1223
- resolve: (bytes: Uint8Array) => void;
1224
- reject: (error: unknown) => void;
1225
- };
1226
-
1227
- type CacheEntry = {
1228
- start: number;
1229
- end: number;
1230
- bytes: Uint8Array;
1231
- view: DataView;
1232
- age: number;
1233
- };
1234
-
1235
- type ReadWorker = {
1236
- startPos: number;
1237
- currentPos: number;
1238
- targetPos: number;
1239
- running: boolean;
1240
- aborted: boolean;
1241
- pendingSlices: PendingSlice[];
1242
- age: number;
1243
- };
1244
-
1245
- /**
1246
- * Godclass for orchestrating complex, cached read operations. The reading model is as follows: Any reading task is
1247
- * delegated to a *worker*, which is a sequential reader positioned somewhere along the file. All workers run in
1248
- * parallel and can be stopped and resumed in their forward movement. When read requests come in, this orchestrator will
1249
- * first try to satisfy the request with only the cached data. If this isn't possible, workers are spun up for all
1250
- * missing parts (or existing workers are repurposed), and these workers will then fill the holes in the data as they
1251
- * march along the file.
1252
- */
1253
- class ReadOrchestrator {
1254
- fileSize: number | null = null;
1255
- nextAge = 0; // Used for LRU eviction of both cache entries and workers
1256
- workers: ReadWorker[] = [];
1257
- cache: CacheEntry[] = [];
1258
- currentCacheSize = 0;
1259
- disposed = false;
1260
-
1261
- constructor(public options: {
1262
- maxCacheSize: number;
1263
- runWorker: (worker: ReadWorker) => Promise<void>;
1264
- prefetchProfile: PrefetchProfile;
1265
- maxWorkerCount: number;
1266
- }) {}
1267
-
1268
- read(innerStart: number, innerEnd: number): MaybePromise<ReadResult> {
1269
- assert(this.fileSize !== null);
1270
-
1271
- const prefetchRange = this.options.prefetchProfile(innerStart, innerEnd, this.workers);
1272
- const outerStart = Math.max(prefetchRange.start, 0);
1273
- const outerEnd = Math.min(prefetchRange.end, this.fileSize);
1274
- assert(outerStart <= innerStart && innerEnd <= outerEnd);
1275
-
1276
- let result: MaybePromise<{
1277
- bytes: Uint8Array;
1278
- view: DataView;
1279
- offset: number;
1280
- }> | null = null;
1281
-
1282
- const innerCacheStartIndex = binarySearchLessOrEqual(this.cache, innerStart, x => x.start);
1283
- const innerStartEntry = innerCacheStartIndex !== -1 ? this.cache[innerCacheStartIndex] : null;
1284
-
1285
- // See if the read request can be satisfied by a single cache entry
1286
- if (innerStartEntry && innerStartEntry.start <= innerStart && innerEnd <= innerStartEntry.end) {
1287
- innerStartEntry.age = this.nextAge++;
1288
-
1289
- result = {
1290
- bytes: innerStartEntry.bytes,
1291
- view: innerStartEntry.view,
1292
- offset: innerStartEntry.start,
1293
- };
1294
- // Can't return yet though, still need to check if the prefetch range might lie outside the cached area
1295
- }
1296
-
1297
- const outerCacheStartIndex = binarySearchLessOrEqual(this.cache, outerStart, x => x.start);
1298
-
1299
- const bytes = result ? null : new Uint8Array(innerEnd - innerStart);
1300
- let contiguousBytesWriteEnd = 0; // Used to track if the cache is able to completely cover the bytes
1301
-
1302
- let lastEnd = outerStart;
1303
- // The "holes" in the cache (the parts we need to load)
1304
- const outerHoles: {
1305
- start: number;
1306
- end: number;
1307
- }[] = [];
1308
-
1309
- // Loop over the cache and build up the list of holes
1310
- if (outerCacheStartIndex !== -1) {
1311
- for (let i = outerCacheStartIndex; i < this.cache.length; i++) {
1312
- const entry = this.cache[i]!;
1313
- if (entry.start >= outerEnd) {
1314
- break;
1315
- }
1316
- if (entry.end <= outerStart) {
1317
- continue;
1318
- }
1319
-
1320
- const cappedOuterStart = Math.max(outerStart, entry.start);
1321
- const cappedOuterEnd = Math.min(outerEnd, entry.end);
1322
- assert(cappedOuterStart <= cappedOuterEnd);
1323
-
1324
- if (lastEnd < cappedOuterStart) {
1325
- outerHoles.push({ start: lastEnd, end: cappedOuterStart });
1326
- }
1327
- lastEnd = cappedOuterEnd;
1328
-
1329
- if (bytes) {
1330
- const cappedInnerStart = Math.max(innerStart, entry.start);
1331
- const cappedInnerEnd = Math.min(innerEnd, entry.end);
1332
-
1333
- if (cappedInnerStart < cappedInnerEnd) {
1334
- const relativeOffset = cappedInnerStart - innerStart;
1335
-
1336
- // Fill the relevant section of the bytes with the cached data
1337
- bytes.set(
1338
- entry.bytes.subarray(cappedInnerStart - entry.start, cappedInnerEnd - entry.start),
1339
- relativeOffset,
1340
- );
1341
-
1342
- if (relativeOffset === contiguousBytesWriteEnd) {
1343
- contiguousBytesWriteEnd = cappedInnerEnd - innerStart;
1344
- }
1345
- }
1346
- }
1347
- entry.age = this.nextAge++;
1348
- }
1349
-
1350
- if (lastEnd < outerEnd) {
1351
- outerHoles.push({ start: lastEnd, end: outerEnd });
1352
- }
1353
- } else {
1354
- outerHoles.push({ start: outerStart, end: outerEnd });
1355
- }
1356
-
1357
- if (bytes && contiguousBytesWriteEnd >= bytes.length) {
1358
- // Multiple cache entries were able to completely cover the requested bytes!
1359
- result = {
1360
- bytes,
1361
- view: toDataView(bytes),
1362
- offset: innerStart,
1363
- };
1364
- }
1365
-
1366
- if (outerHoles.length === 0) {
1367
- assert(result);
1368
- return result;
1369
- }
1370
-
1371
- // We need to read more data, so now we're in async land
1372
- const { promise, resolve, reject } = promiseWithResolvers<Uint8Array>();
1373
-
1374
- const innerHoles: typeof outerHoles = [];
1375
- for (const outerHole of outerHoles) {
1376
- const cappedStart = Math.max(innerStart, outerHole.start);
1377
- const cappedEnd = Math.min(innerEnd, outerHole.end);
1378
-
1379
- if (cappedStart === outerHole.start && cappedEnd === outerHole.end) {
1380
- innerHoles.push(outerHole); // Can reuse without allocating a new object
1381
- } else if (cappedStart < cappedEnd) {
1382
- innerHoles.push({ start: cappedStart, end: cappedEnd });
1383
- }
1384
- }
1385
-
1386
- // Fire off workers to take care of patching the holes
1387
- for (const outerHole of outerHoles) {
1388
- const pendingSlice: PendingSlice | null = bytes && {
1389
- start: innerStart,
1390
- bytes,
1391
- holes: innerHoles,
1392
- resolve,
1393
- reject,
1394
- };
1395
-
1396
- let workerFound = false;
1397
- for (const worker of this.workers) {
1398
- // A small tolerance in the case that the requested region is *just* after the target position of an
1399
- // existing worker. In that case, it's probably more efficient to repurpose that worker than to spawn
1400
- // another one so close to it
1401
- const gapTolerance = 2 ** 17;
1402
-
1403
- // This check also implies worker.currentPos <= outerHole.start, a critical condition
1404
- if (closedIntervalsOverlap(
1405
- outerHole.start - gapTolerance, outerHole.start,
1406
- worker.currentPos, worker.targetPos,
1407
- )) {
1408
- worker.targetPos = Math.max(worker.targetPos, outerHole.end); // Update the worker's target position
1409
- workerFound = true;
1410
-
1411
- if (pendingSlice && !worker.pendingSlices.includes(pendingSlice)) {
1412
- worker.pendingSlices.push(pendingSlice);
1413
- }
1414
-
1415
- if (!worker.running) {
1416
- // Kick it off if it's idle
1417
- this.runWorker(worker);
1418
- }
1419
-
1420
- break;
1421
- }
1422
- }
1423
-
1424
- if (!workerFound) {
1425
- // We need to spawn a new worker
1426
- const newWorker = this.createWorker(outerHole.start, outerHole.end);
1427
- if (pendingSlice) {
1428
- newWorker.pendingSlices = [pendingSlice];
1429
- }
1430
-
1431
- this.runWorker(newWorker);
1432
- }
1433
- }
1434
-
1435
- if (!result) {
1436
- assert(bytes);
1437
- result = promise.then(bytes => ({
1438
- bytes,
1439
- view: toDataView(bytes),
1440
- offset: innerStart,
1441
- }));
1442
- } else {
1443
- // The requested region was satisfied by the cache, but the entire prefetch region was not
1444
- }
1445
-
1446
- return result;
1447
- }
1448
-
1449
- createWorker(startPos: number, targetPos: number) {
1450
- const worker: ReadWorker = {
1451
- startPos,
1452
- currentPos: startPos,
1453
- targetPos,
1454
- running: false,
1455
- // Due to async shenanigans, it can happen that workers are started after disposal. In this case, instead of
1456
- // simply not creating the worker, we allow it to run but immediately label it as aborted, so it can then
1457
- // shut itself down.
1458
- aborted: this.disposed,
1459
- pendingSlices: [],
1460
- age: this.nextAge++,
1461
- };
1462
- this.workers.push(worker);
1463
-
1464
- // LRU eviction of the other workers
1465
- while (this.workers.length > this.options.maxWorkerCount) {
1466
- let oldestIndex = 0;
1467
- let oldestWorker = this.workers[0]!;
1468
-
1469
- for (let i = 1; i < this.workers.length; i++) {
1470
- const worker = this.workers[i]!;
1471
-
1472
- if (worker.age < oldestWorker.age) {
1473
- oldestIndex = i;
1474
- oldestWorker = worker;
1475
- }
1476
- }
1477
-
1478
- if (oldestWorker.running && oldestWorker.pendingSlices.length > 0) {
1479
- break;
1480
- }
1481
-
1482
- oldestWorker.aborted = true;
1483
- this.workers.splice(oldestIndex, 1);
1484
- }
1485
-
1486
- return worker;
1487
- }
1488
-
1489
- runWorker(worker: ReadWorker) {
1490
- assert(!worker.running);
1491
- assert(worker.currentPos < worker.targetPos);
1492
-
1493
- worker.running = true;
1494
- worker.age = this.nextAge++;
1495
-
1496
- void this.options.runWorker(worker)
1497
- .catch((error) => {
1498
- worker.running = false;
1499
-
1500
- if (worker.pendingSlices.length > 0) {
1501
- worker.pendingSlices.forEach(x => x.reject(error)); // Make sure to propagate any errors
1502
- worker.pendingSlices.length = 0;
1503
- } else {
1504
- throw error; // So it doesn't get swallowed
1505
- }
1506
- });
1507
- }
1508
-
1509
- /** Called by a worker when it has read some data. */
1510
- supplyWorkerData(worker: ReadWorker, bytes: Uint8Array) {
1511
- assert(!worker.aborted);
1512
-
1513
- const start = worker.currentPos;
1514
- const end = start + bytes.length;
1515
-
1516
- this.insertIntoCache({
1517
- start,
1518
- end,
1519
- bytes,
1520
- view: toDataView(bytes),
1521
- age: this.nextAge++,
1522
- });
1523
- worker.currentPos += bytes.length;
1524
- worker.targetPos = Math.max(worker.targetPos, worker.currentPos); // In case it overshoots
1525
-
1526
- // Now, let's see if we can use the read bytes to fill any pending slice
1527
- for (let i = 0; i < worker.pendingSlices.length; i++) {
1528
- const pendingSlice = worker.pendingSlices[i]!;
1529
-
1530
- const clampedStart = Math.max(start, pendingSlice.start);
1531
- const clampedEnd = Math.min(end, pendingSlice.start + pendingSlice.bytes.length);
1532
-
1533
- if (clampedStart < clampedEnd) {
1534
- pendingSlice.bytes.set(
1535
- bytes.subarray(clampedStart - start, clampedEnd - start),
1536
- clampedStart - pendingSlice.start,
1537
- );
1538
- }
1539
-
1540
- for (let j = 0; j < pendingSlice.holes.length; j++) {
1541
- // The hole is intentionally not modified here if the read section starts somewhere in the middle of
1542
- // the hole. We don't need to do "hole splitting", since the workers are spawned *by* the holes,
1543
- // meaning there's always a worker which will consume the hole left to right.
1544
- const hole = pendingSlice.holes[j]!;
1545
- if (start <= hole.start && end > hole.start) {
1546
- hole.start = end;
1547
- }
1548
-
1549
- if (hole.end <= hole.start) {
1550
- pendingSlice.holes.splice(j, 1);
1551
- j--;
1552
- }
1553
- }
1554
-
1555
- if (pendingSlice.holes.length === 0) {
1556
- // The slice has been fulfilled, everything has been read. Let's resolve the promise
1557
- pendingSlice.resolve(pendingSlice.bytes);
1558
- worker.pendingSlices.splice(i, 1);
1559
- i--;
1560
- }
1561
- }
1562
-
1563
- // Remove other idle workers if we "ate" into their territory
1564
- for (let i = 0; i < this.workers.length; i++) {
1565
- const otherWorker = this.workers[i]!;
1566
- if (worker === otherWorker || otherWorker.running) {
1567
- continue;
1568
- }
1569
-
1570
- if (closedIntervalsOverlap(
1571
- start, end,
1572
- otherWorker.currentPos, otherWorker.targetPos, // These should typically be equal when the worker's idle
1573
- )) {
1574
- this.workers.splice(i, 1);
1575
- i--;
1576
- }
1577
- }
1578
- }
1579
-
1580
- forgetWorker(worker: ReadWorker) {
1581
- const index = this.workers.indexOf(worker);
1582
- assert(index !== -1);
1583
-
1584
- this.workers.splice(index, 1);
1585
- }
1586
-
1587
- insertIntoCache(entry: CacheEntry) {
1588
- if (this.options.maxCacheSize === 0) {
1589
- return; // No caching
1590
- }
1591
-
1592
- let insertionIndex = binarySearchLessOrEqual(this.cache, entry.start, x => x.start) + 1;
1593
-
1594
- if (insertionIndex > 0) {
1595
- const previous = this.cache[insertionIndex - 1]!;
1596
- if (previous.end >= entry.end) {
1597
- // Previous entry swallows the one to be inserted; we don't need to do anything
1598
- return;
1599
- }
1600
-
1601
- if (previous.end > entry.start) {
1602
- // Partial overlap with the previous entry, let's join
1603
- const joined = new Uint8Array(entry.end - previous.start);
1604
- joined.set(previous.bytes, 0);
1605
- joined.set(entry.bytes, entry.start - previous.start);
1606
-
1607
- this.currentCacheSize += entry.end - previous.end;
1608
-
1609
- previous.bytes = joined;
1610
- previous.view = toDataView(joined);
1611
- previous.end = entry.end;
1612
-
1613
- // Do the rest of the logic with the previous entry instead
1614
- insertionIndex--;
1615
- entry = previous;
1616
- } else {
1617
- this.cache.splice(insertionIndex, 0, entry);
1618
- this.currentCacheSize += entry.bytes.length;
1619
- }
1620
- } else {
1621
- this.cache.splice(insertionIndex, 0, entry);
1622
- this.currentCacheSize += entry.bytes.length;
1623
- }
1624
-
1625
- for (let i = insertionIndex + 1; i < this.cache.length; i++) {
1626
- const next = this.cache[i]!;
1627
- if (entry.end <= next.start) {
1628
- // Even if they touch, we don't wanna merge them, no need
1629
- break;
1630
- }
1631
-
1632
- if (entry.end >= next.end) {
1633
- // The inserted entry completely swallows the next entry
1634
- this.cache.splice(i, 1);
1635
- this.currentCacheSize -= next.bytes.length;
1636
- i--;
1637
- continue;
1638
- }
1639
-
1640
- // Partial overlap, let's join
1641
- const joined = new Uint8Array(next.end - entry.start);
1642
- joined.set(entry.bytes, 0);
1643
- joined.set(next.bytes, next.start - entry.start);
1644
-
1645
- this.currentCacheSize -= entry.end - next.start; // Subtract the overlap
1646
-
1647
- entry.bytes = joined;
1648
- entry.view = toDataView(joined);
1649
- entry.end = next.end;
1650
- this.cache.splice(i, 1);
1651
-
1652
- break; // After the join case, we're done: the next entry cannot possibly overlap with the inserted one.
1653
- }
1654
-
1655
- // LRU eviction of cache entries
1656
- while (this.currentCacheSize > this.options.maxCacheSize) {
1657
- let oldestIndex = 0;
1658
- let oldestEntry = this.cache[0]!;
1659
-
1660
- for (let i = 1; i < this.cache.length; i++) {
1661
- const entry = this.cache[i]!;
1662
-
1663
- if (entry.age < oldestEntry.age) {
1664
- oldestIndex = i;
1665
- oldestEntry = entry;
1666
- }
1667
- }
1668
-
1669
- if (this.currentCacheSize - oldestEntry.bytes.length <= this.options.maxCacheSize) {
1670
- // Don't evict if it would shrink the cache below the max size
1671
- break;
1672
- }
1673
-
1674
- this.cache.splice(oldestIndex, 1);
1675
- this.currentCacheSize -= oldestEntry.bytes.length;
1676
- }
1677
- }
1678
-
1679
- dispose() {
1680
- for (const worker of this.workers) {
1681
- worker.aborted = true;
1682
- }
1683
-
1684
- this.workers.length = 0;
1685
- this.cache.length = 0;
1686
- this.disposed = true;
1687
- }
1688
- }