@php-wasm/stream-compression 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.json +18 -0
- package/README.md +11 -0
- package/package.json +31 -0
- package/project.json +34 -0
- package/src/index.ts +7 -0
- package/src/test/append-bytes.spec.ts +25 -0
- package/src/test/decode-zip.spec.ts +22 -0
- package/src/test/encode-zip.spec.ts +47 -0
- package/src/test/fixtures/hello-dolly.zip +0 -0
- package/src/test/prepend-bytes.spec.ts +25 -0
- package/src/test/skip-first-bytes.spec.ts +41 -0
- package/src/test/skip-last-bytes.spec.ts +27 -0
- package/src/test/vitest-setup-file.ts +7 -0
- package/src/utils/append-bytes.ts +16 -0
- package/src/utils/collect-bytes.ts +24 -0
- package/src/utils/collect-file.ts +16 -0
- package/src/utils/collect-string.ts +25 -0
- package/src/utils/concat-bytes.ts +38 -0
- package/src/utils/concat-string.ts +17 -0
- package/src/utils/concat-uint8-array.ts +17 -0
- package/src/utils/filter-stream.ts +15 -0
- package/src/utils/iterable-stream-polyfill.ts +35 -0
- package/src/utils/iterator-to-stream.ts +39 -0
- package/src/utils/limit-bytes.ts +40 -0
- package/src/utils/prepend-bytes.ts +18 -0
- package/src/utils/skip-first-bytes.ts +21 -0
- package/src/utils/skip-last-bytes.ts +24 -0
- package/src/utils/streamed-file.ts +58 -0
- package/src/zip/decode-remote-zip.ts +409 -0
- package/src/zip/decode-zip.ts +349 -0
- package/src/zip/encode-zip.ts +278 -0
- package/src/zip/index.ts +5 -0
- package/src/zip/types.ts +76 -0
- package/tsconfig.json +23 -0
- package/tsconfig.lib.json +14 -0
- package/tsconfig.spec.json +25 -0
- package/vite.config.ts +55 -0
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { collectBytes } from './collect-bytes';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Represents a file that is streamed and not fully
|
|
5
|
+
* loaded into memory.
|
|
6
|
+
*/
|
|
7
|
+
export class StreamedFile extends File {
|
|
8
|
+
/**
|
|
9
|
+
* Creates a new StreamedFile instance.
|
|
10
|
+
*
|
|
11
|
+
* @param readableStream The readable stream containing the file data.
|
|
12
|
+
* @param name The name of the file.
|
|
13
|
+
* @param type The MIME type of the file.
|
|
14
|
+
*/
|
|
15
|
+
constructor(
|
|
16
|
+
private readableStream: ReadableStream<Uint8Array>,
|
|
17
|
+
name: string,
|
|
18
|
+
type?: string
|
|
19
|
+
) {
|
|
20
|
+
super([], name, { type });
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Overrides the slice() method of the File class.
|
|
25
|
+
*
|
|
26
|
+
* @returns A Blob representing a portion of the file.
|
|
27
|
+
*/
|
|
28
|
+
override slice(): Blob {
|
|
29
|
+
throw new Error('slice() is not possible on a StreamedFile');
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Returns the readable stream associated with the file.
|
|
34
|
+
*
|
|
35
|
+
* @returns The readable stream.
|
|
36
|
+
*/
|
|
37
|
+
override stream() {
|
|
38
|
+
return this.readableStream;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Loads the file data into memory and then returns it as a string.
|
|
43
|
+
*
|
|
44
|
+
* @returns File data as text.
|
|
45
|
+
*/
|
|
46
|
+
override async text() {
|
|
47
|
+
return new TextDecoder().decode(await this.arrayBuffer());
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Loads the file data into memory and then returns it as an ArrayBuffer.
|
|
52
|
+
*
|
|
53
|
+
* @returns File data as an ArrayBuffer.
|
|
54
|
+
*/
|
|
55
|
+
override async arrayBuffer() {
|
|
56
|
+
return await collectBytes(this.stream());
|
|
57
|
+
}
|
|
58
|
+
}
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
import { Semaphore } from '@php-wasm/util';
|
|
2
|
+
import { filterStream } from '../utils/filter-stream';
|
|
3
|
+
import { concatUint8Array } from '../utils/concat-uint8-array';
|
|
4
|
+
import { collectBytes } from '../utils/collect-bytes';
|
|
5
|
+
import {
|
|
6
|
+
readCentralDirectoryEntry,
|
|
7
|
+
readFileEntry,
|
|
8
|
+
decodeZip,
|
|
9
|
+
} from './decode-zip';
|
|
10
|
+
import { CentralDirectoryEntry, FileEntry } from './types';
|
|
11
|
+
import { SIGNATURE_CENTRAL_DIRECTORY_END } from './types';
|
|
12
|
+
import { IterableReadableStream } from '../utils/iterable-stream-polyfill';
|
|
13
|
+
|
|
14
|
+
const CENTRAL_DIRECTORY_END_SCAN_CHUNK_SIZE = 110 * 1024;
|
|
15
|
+
const BATCH_DOWNLOAD_OF_FILES_IF_CLOSER_THAN = 10 * 1024;
|
|
16
|
+
const PREFER_RANGES_IF_FILE_LARGER_THAN = 1024 * 1024 * 1;
|
|
17
|
+
const fetchSemaphore = new Semaphore({ concurrency: 10 });
|
|
18
|
+
|
|
19
|
+
const DEFAULT_PREDICATE = () => true;
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Streams the contents of a remote zip file.
|
|
23
|
+
*
|
|
24
|
+
* If the zip is large and the predicate is filtering the zip contents,
|
|
25
|
+
* only the matching files will be downloaded using the Range header
|
|
26
|
+
* (if supported by the server).
|
|
27
|
+
*
|
|
28
|
+
* @param url The URL of the zip file.
|
|
29
|
+
* @param predicate Optional. A function that returns true if the file should be downloaded.
|
|
30
|
+
* @returns A stream of zip entries.
|
|
31
|
+
*/
|
|
32
|
+
export async function decodeRemoteZip(
|
|
33
|
+
url: string,
|
|
34
|
+
predicate: (
|
|
35
|
+
dirEntry: CentralDirectoryEntry | FileEntry
|
|
36
|
+
) => boolean = DEFAULT_PREDICATE
|
|
37
|
+
) {
|
|
38
|
+
if (predicate === DEFAULT_PREDICATE) {
|
|
39
|
+
// If we're not filtering the zip contents, let's just
|
|
40
|
+
// grab the entire zip.
|
|
41
|
+
const response = await fetch(url);
|
|
42
|
+
return decodeZip(response.body!);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const contentLength = await fetchContentLength(url);
|
|
46
|
+
if (contentLength <= PREFER_RANGES_IF_FILE_LARGER_THAN) {
|
|
47
|
+
// If the zip is small enough, let's just grab it.
|
|
48
|
+
const response = await fetch(url);
|
|
49
|
+
return decodeZip(response.body!);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Ensure ranges query support:
|
|
53
|
+
// Fetch one byte
|
|
54
|
+
const response = await fetch(url, {
|
|
55
|
+
headers: {
|
|
56
|
+
// 0-0 looks weird, doesn't it?
|
|
57
|
+
// The Range header is inclusive so it's actually
|
|
58
|
+
// a valid header asking for the first byte.
|
|
59
|
+
Range: 'bytes=0-0',
|
|
60
|
+
'Accept-Encoding': 'none',
|
|
61
|
+
},
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
// Fork the stream so that we can reuse it in case
|
|
65
|
+
// the Range header is unsupported and we're now streaming
|
|
66
|
+
// the entire file
|
|
67
|
+
const [peekStream, responseStream] = response.body!.tee();
|
|
68
|
+
|
|
69
|
+
// Read from the forked stream and close it.
|
|
70
|
+
const peekReader = peekStream.getReader();
|
|
71
|
+
const { value: peekBytes } = await peekReader.read();
|
|
72
|
+
const { done: peekDone } = await peekReader.read();
|
|
73
|
+
peekReader.releaseLock();
|
|
74
|
+
peekStream.cancel();
|
|
75
|
+
|
|
76
|
+
// Confirm our Range query worked as intended:
|
|
77
|
+
const rangesSupported = peekBytes?.length === 1 && peekDone;
|
|
78
|
+
if (!rangesSupported) {
|
|
79
|
+
// Uh-oh, we're actually streaming the entire file.
|
|
80
|
+
// Let's reuse the forked stream as our response stream.
|
|
81
|
+
return decodeZip(responseStream);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// We're good, let's clean up the other branch of the response stream.
|
|
85
|
+
responseStream.cancel();
|
|
86
|
+
const source = await createFetchSource(url, contentLength);
|
|
87
|
+
return streamCentralDirectoryEntries(source)
|
|
88
|
+
.pipeThrough(filterStream(predicate))
|
|
89
|
+
.pipeThrough(partitionNearbyEntries())
|
|
90
|
+
.pipeThrough(
|
|
91
|
+
fetchPartitionedEntries(source)
|
|
92
|
+
) as IterableReadableStream<FileEntry>;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Streams the central directory entries of a zip file.
|
|
97
|
+
*
|
|
98
|
+
* @param source
|
|
99
|
+
* @returns
|
|
100
|
+
*/
|
|
101
|
+
function streamCentralDirectoryEntries(source: BytesSource) {
|
|
102
|
+
let centralDirectoryStream: ReadableStream<Uint8Array>;
|
|
103
|
+
|
|
104
|
+
return new ReadableStream<CentralDirectoryEntry>({
|
|
105
|
+
async start() {
|
|
106
|
+
centralDirectoryStream = await streamCentralDirectoryBytes(source);
|
|
107
|
+
},
|
|
108
|
+
async pull(controller) {
|
|
109
|
+
const entry = await readCentralDirectoryEntry(
|
|
110
|
+
centralDirectoryStream
|
|
111
|
+
);
|
|
112
|
+
if (!entry) {
|
|
113
|
+
controller.close();
|
|
114
|
+
return;
|
|
115
|
+
}
|
|
116
|
+
controller.enqueue(entry);
|
|
117
|
+
},
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Streams the central directory bytes of a zip file.
|
|
123
|
+
*
|
|
124
|
+
* @param source
|
|
125
|
+
* @returns
|
|
126
|
+
*/
|
|
127
|
+
async function streamCentralDirectoryBytes(source: BytesSource) {
|
|
128
|
+
const chunkSize = CENTRAL_DIRECTORY_END_SCAN_CHUNK_SIZE;
|
|
129
|
+
let centralDirectory: Uint8Array = new Uint8Array();
|
|
130
|
+
|
|
131
|
+
let chunkStart = source.length;
|
|
132
|
+
do {
|
|
133
|
+
chunkStart = Math.max(0, chunkStart - chunkSize);
|
|
134
|
+
const chunkEnd = Math.min(
|
|
135
|
+
chunkStart + chunkSize - 1,
|
|
136
|
+
source.length - 1
|
|
137
|
+
);
|
|
138
|
+
const bytes = await collectBytes(
|
|
139
|
+
await source.streamBytes(chunkStart, chunkEnd)
|
|
140
|
+
);
|
|
141
|
+
centralDirectory = concatUint8Array(bytes!, centralDirectory);
|
|
142
|
+
|
|
143
|
+
// Scan the buffer for the signature
|
|
144
|
+
const view = new DataView(bytes!.buffer);
|
|
145
|
+
for (let i = view.byteLength - 4; i >= 0; i--) {
|
|
146
|
+
if (view.getUint32(i, true) !== SIGNATURE_CENTRAL_DIRECTORY_END) {
|
|
147
|
+
continue;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Confirm we have enough data to read the offset and the
|
|
151
|
+
// length of the central directory.
|
|
152
|
+
const centralDirectoryLengthAt = i + 12;
|
|
153
|
+
const centralDirectoryOffsetAt = centralDirectoryLengthAt + 4;
|
|
154
|
+
if (centralDirectory.byteLength < centralDirectoryOffsetAt + 4) {
|
|
155
|
+
throw new Error('Central directory not found');
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Read where the central directory starts
|
|
159
|
+
const dirStart = view.getUint32(centralDirectoryOffsetAt, true);
|
|
160
|
+
if (dirStart < chunkStart) {
|
|
161
|
+
// We're missing some bytes, let's grab them
|
|
162
|
+
const missingBytes = await collectBytes(
|
|
163
|
+
await source.streamBytes(dirStart, chunkStart - 1)
|
|
164
|
+
);
|
|
165
|
+
centralDirectory = concatUint8Array(
|
|
166
|
+
missingBytes!,
|
|
167
|
+
centralDirectory
|
|
168
|
+
);
|
|
169
|
+
} else if (dirStart > chunkStart) {
|
|
170
|
+
// We've read too many bytes, let's trim them
|
|
171
|
+
centralDirectory = centralDirectory.slice(
|
|
172
|
+
dirStart - chunkStart
|
|
173
|
+
);
|
|
174
|
+
}
|
|
175
|
+
return new Blob([centralDirectory]).stream();
|
|
176
|
+
}
|
|
177
|
+
} while (chunkStart >= 0);
|
|
178
|
+
|
|
179
|
+
throw new Error('Central directory not found');
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Partitions files that are no further apart in the zip
|
|
184
|
+
* archive than BATCH_DOWNLOAD_OF_FILES_IF_CLOSER_THAN.
|
|
185
|
+
* It may download some extra files living within the gaps
|
|
186
|
+
* between the partitions.
|
|
187
|
+
*/
|
|
188
|
+
function partitionNearbyEntries() {
|
|
189
|
+
let lastFileEndsAt = 0;
|
|
190
|
+
let currentChunk: CentralDirectoryEntry[] = [];
|
|
191
|
+
return new TransformStream<CentralDirectoryEntry, CentralDirectoryEntry[]>({
|
|
192
|
+
transform(zipEntry, controller) {
|
|
193
|
+
// Byte distance too large, flush and start a new chunk
|
|
194
|
+
if (
|
|
195
|
+
zipEntry.firstByteAt >
|
|
196
|
+
lastFileEndsAt + BATCH_DOWNLOAD_OF_FILES_IF_CLOSER_THAN
|
|
197
|
+
) {
|
|
198
|
+
controller.enqueue(currentChunk);
|
|
199
|
+
currentChunk = [];
|
|
200
|
+
}
|
|
201
|
+
lastFileEndsAt = zipEntry.lastByteAt;
|
|
202
|
+
currentChunk.push(zipEntry);
|
|
203
|
+
},
|
|
204
|
+
flush(controller) {
|
|
205
|
+
controller.enqueue(currentChunk);
|
|
206
|
+
},
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
/**
|
|
211
|
+
* Fetches a chunk of files from the zip archive.
|
|
212
|
+
*
|
|
213
|
+
* If any extra files are present in the received
|
|
214
|
+
* bytes stream, they are filtered out.
|
|
215
|
+
*/
|
|
216
|
+
function fetchPartitionedEntries(
|
|
217
|
+
source: BytesSource
|
|
218
|
+
): ReadableWritablePair<FileEntry, CentralDirectoryEntry[]> {
|
|
219
|
+
/**
|
|
220
|
+
* This function implements a ReadableStream and a WritableStream
|
|
221
|
+
* instead of a TransformStream. This is intentional.
|
|
222
|
+
*
|
|
223
|
+
* In TransformStream, the `transform` function may return a
|
|
224
|
+
* promise. The next call to `transform` will be delayed until
|
|
225
|
+
* the promise resolves. This is a problem for us because we
|
|
226
|
+
* want to issue many fetch() requests in parallel.
|
|
227
|
+
*
|
|
228
|
+
* The only way to do that seems to be creating separate ReadableStream
|
|
229
|
+
* and WritableStream implementations.
|
|
230
|
+
*/
|
|
231
|
+
let isWritableClosed = false;
|
|
232
|
+
let requestsInProgress = 0;
|
|
233
|
+
let readableController: ReadableStreamDefaultController<FileEntry>;
|
|
234
|
+
const byteStreams: Array<
|
|
235
|
+
[CentralDirectoryEntry[], ReadableStream<Uint8Array>]
|
|
236
|
+
> = [];
|
|
237
|
+
/**
|
|
238
|
+
* Receives chunks of CentralDirectoryEntries, and fetches
|
|
239
|
+
* the corresponding byte ranges from the remote zip file.
|
|
240
|
+
*/
|
|
241
|
+
const writable = new WritableStream<CentralDirectoryEntry[]>({
|
|
242
|
+
write(zipEntries, controller) {
|
|
243
|
+
if (!zipEntries.length) {
|
|
244
|
+
return;
|
|
245
|
+
}
|
|
246
|
+
++requestsInProgress;
|
|
247
|
+
// If the write() method returns a promise, the next
|
|
248
|
+
// call will be delayed until the promise resolves.
|
|
249
|
+
// Let's not return the promise, then.
|
|
250
|
+
// This will effectively issue many requests in parallel.
|
|
251
|
+
requestChunkRange(source, zipEntries)
|
|
252
|
+
.then((byteStream) => {
|
|
253
|
+
byteStreams.push([zipEntries, byteStream]);
|
|
254
|
+
})
|
|
255
|
+
.catch((e) => {
|
|
256
|
+
controller.error(e);
|
|
257
|
+
})
|
|
258
|
+
.finally(() => {
|
|
259
|
+
--requestsInProgress;
|
|
260
|
+
});
|
|
261
|
+
},
|
|
262
|
+
abort() {
|
|
263
|
+
isWritableClosed = true;
|
|
264
|
+
readableController.close();
|
|
265
|
+
},
|
|
266
|
+
async close() {
|
|
267
|
+
isWritableClosed = true;
|
|
268
|
+
},
|
|
269
|
+
});
|
|
270
|
+
/**
|
|
271
|
+
* Decodes zipped bytes into FileEntry objects.
|
|
272
|
+
*/
|
|
273
|
+
const readable = new ReadableStream<FileEntry>({
|
|
274
|
+
start(controller) {
|
|
275
|
+
readableController = controller;
|
|
276
|
+
},
|
|
277
|
+
async pull(controller) {
|
|
278
|
+
while (true) {
|
|
279
|
+
const allChunksProcessed =
|
|
280
|
+
isWritableClosed &&
|
|
281
|
+
!byteStreams.length &&
|
|
282
|
+
requestsInProgress === 0;
|
|
283
|
+
if (allChunksProcessed) {
|
|
284
|
+
controller.close();
|
|
285
|
+
return;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
// There's no bytes available, but the writable
|
|
289
|
+
// stream is still open or there are still requests
|
|
290
|
+
// in progress. Let's wait for more bytes.
|
|
291
|
+
const waitingForMoreBytes = !byteStreams.length;
|
|
292
|
+
if (waitingForMoreBytes) {
|
|
293
|
+
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
294
|
+
continue;
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
const [requestedPaths, stream] = byteStreams[0];
|
|
298
|
+
const file = await readFileEntry(stream);
|
|
299
|
+
// The stream is exhausted, let's remove it from the queue
|
|
300
|
+
// and try the next one.
|
|
301
|
+
const streamExhausted = !file;
|
|
302
|
+
if (streamExhausted) {
|
|
303
|
+
byteStreams.shift();
|
|
304
|
+
continue;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// There may be some extra files between the ones we're
|
|
308
|
+
// interested in. Let's filter out any files that got
|
|
309
|
+
// intertwined in the byte stream.
|
|
310
|
+
const isOneOfRequestedPaths = requestedPaths.find(
|
|
311
|
+
(entry) => entry.path === file.path
|
|
312
|
+
);
|
|
313
|
+
if (!isOneOfRequestedPaths) {
|
|
314
|
+
continue;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Finally! We've got a file we're interested in.
|
|
318
|
+
controller.enqueue(file);
|
|
319
|
+
break;
|
|
320
|
+
}
|
|
321
|
+
},
|
|
322
|
+
});
|
|
323
|
+
|
|
324
|
+
return {
|
|
325
|
+
readable,
|
|
326
|
+
writable,
|
|
327
|
+
};
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Requests a chunk of bytes from the bytes source.
|
|
332
|
+
*
|
|
333
|
+
* @param source
|
|
334
|
+
* @param zipEntries
|
|
335
|
+
*/
|
|
336
|
+
async function requestChunkRange(
|
|
337
|
+
source: BytesSource,
|
|
338
|
+
zipEntries: CentralDirectoryEntry[]
|
|
339
|
+
) {
|
|
340
|
+
const release = await fetchSemaphore.acquire();
|
|
341
|
+
try {
|
|
342
|
+
const lastZipEntry = zipEntries[zipEntries.length - 1];
|
|
343
|
+
const substream = await source.streamBytes(
|
|
344
|
+
zipEntries[0].firstByteAt,
|
|
345
|
+
lastZipEntry.lastByteAt
|
|
346
|
+
);
|
|
347
|
+
return substream;
|
|
348
|
+
} finally {
|
|
349
|
+
release();
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
/**
|
|
354
|
+
* Fetches the Content-Length header from a remote URL.
|
|
355
|
+
*/
|
|
356
|
+
async function fetchContentLength(url: string) {
|
|
357
|
+
return await fetch(url, { method: 'HEAD' })
|
|
358
|
+
.then((response) => response.headers.get('Content-Length'))
|
|
359
|
+
.then((contentLength) => {
|
|
360
|
+
if (!contentLength) {
|
|
361
|
+
throw new Error('Content-Length header is missing');
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
const parsedLength = parseInt(contentLength, 10);
|
|
365
|
+
if (isNaN(parsedLength) || parsedLength < 0) {
|
|
366
|
+
throw new Error('Content-Length header is invalid');
|
|
367
|
+
}
|
|
368
|
+
return parsedLength;
|
|
369
|
+
});
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Private and experimental API: Range-based data sources.
|
|
374
|
+
*
|
|
375
|
+
* The idea is that if we can read arbitrary byte ranges from
|
|
376
|
+
* a file, we can retrieve a specific subset of a zip file.
|
|
377
|
+
*/
|
|
378
|
+
type BytesSource = {
|
|
379
|
+
length: number;
|
|
380
|
+
streamBytes: (
|
|
381
|
+
start: number,
|
|
382
|
+
end: number
|
|
383
|
+
) => Promise<ReadableStream<Uint8Array>>;
|
|
384
|
+
};
|
|
385
|
+
|
|
386
|
+
/**
|
|
387
|
+
* Creates a BytesSource enabling fetching ranges of bytes
|
|
388
|
+
* from a remote URL.
|
|
389
|
+
*/
|
|
390
|
+
async function createFetchSource(
|
|
391
|
+
url: string,
|
|
392
|
+
contentLength?: number
|
|
393
|
+
): Promise<BytesSource> {
|
|
394
|
+
if (contentLength === undefined) {
|
|
395
|
+
contentLength = await fetchContentLength(url);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
return {
|
|
399
|
+
length: contentLength,
|
|
400
|
+
streamBytes: async (from: number, to: number) =>
|
|
401
|
+
await fetch(url, {
|
|
402
|
+
headers: {
|
|
403
|
+
// The Range header is inclusive, so we need to subtract 1
|
|
404
|
+
Range: `bytes=${from}-${to - 1}`,
|
|
405
|
+
'Accept-Encoding': 'none',
|
|
406
|
+
},
|
|
407
|
+
}).then((response) => response.body!),
|
|
408
|
+
};
|
|
409
|
+
}
|