unzipit 1.4.3 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,379 +1,110 @@
1
- /* unzipit@1.4.3, license MIT */
2
- /* global SharedArrayBuffer, process */
3
-
1
+ /* unzipit@2.0.1, license MIT */
2
+ var _a, _b;
4
3
  function readBlobAsArrayBuffer(blob) {
5
- if (blob.arrayBuffer) {
6
- return blob.arrayBuffer();
7
- }
8
- return new Promise((resolve, reject) => {
9
- const reader = new FileReader();
10
- reader.addEventListener('loadend', () => {
11
- resolve(reader.result);
4
+ if (blob.arrayBuffer) {
5
+ return blob.arrayBuffer();
6
+ }
7
+ return new Promise((resolve, reject) => {
8
+ const reader = new FileReader();
9
+ reader.addEventListener('loadend', () => {
10
+ resolve(reader.result);
11
+ });
12
+ reader.addEventListener('error', reject);
13
+ reader.readAsArrayBuffer(blob);
12
14
  });
13
- reader.addEventListener('error', reject);
14
- reader.readAsArrayBuffer(blob);
15
- });
16
15
  }
17
-
18
16
  async function readBlobAsUint8Array(blob) {
19
- const arrayBuffer = await readBlobAsArrayBuffer(blob);
20
- return new Uint8Array(arrayBuffer);
17
+ const arrayBuffer = await readBlobAsArrayBuffer(blob);
18
+ return new Uint8Array(arrayBuffer);
21
19
  }
22
-
23
20
  function isBlob(v) {
24
- return typeof Blob !== 'undefined' && v instanceof Blob;
21
+ return typeof Blob !== 'undefined' && v instanceof Blob;
25
22
  }
26
-
27
23
  function isSharedArrayBuffer(b) {
28
- return typeof SharedArrayBuffer !== 'undefined' && b instanceof SharedArrayBuffer;
24
+ return typeof SharedArrayBuffer !== 'undefined' && b instanceof SharedArrayBuffer;
29
25
  }
30
-
31
- const isNode =
32
- (typeof process !== 'undefined') &&
33
- process.versions &&
34
- (typeof process.versions.node !== 'undefined') &&
35
- (typeof process.versions.electron === 'undefined');
36
-
26
+ const isNode = (typeof process !== 'undefined') &&
27
+ !!(process === null || process === void 0 ? void 0 : process.versions) &&
28
+ (typeof ((_a = process === null || process === void 0 ? void 0 : process.versions) === null || _a === void 0 ? void 0 : _a.node) !== 'undefined') &&
29
+ (typeof ((_b = process === null || process === void 0 ? void 0 : process.versions) === null || _b === void 0 ? void 0 : _b.electron) === 'undefined');
37
30
  function isTypedArraySameAsArrayBuffer(typedArray) {
38
- return typedArray.byteOffset === 0 && typedArray.byteLength === typedArray.buffer.byteLength;
31
+ return typedArray.byteOffset === 0 && typedArray.byteLength === typedArray.buffer.byteLength;
39
32
  }
40
33
 
41
34
  class ArrayBufferReader {
42
- constructor(arrayBufferOrView) {
43
- this.typedArray = (arrayBufferOrView instanceof ArrayBuffer || isSharedArrayBuffer(arrayBufferOrView))
44
- ? new Uint8Array(arrayBufferOrView)
45
- : new Uint8Array(arrayBufferOrView.buffer, arrayBufferOrView.byteOffset, arrayBufferOrView.byteLength);
46
- }
47
- async getLength() {
48
- return this.typedArray.byteLength;
49
- }
50
- async read(offset, length) {
51
- return new Uint8Array(this.typedArray.buffer, this.typedArray.byteOffset + offset, length);
52
- }
35
+ constructor(arrayBufferOrView) {
36
+ this.typedArray = (arrayBufferOrView instanceof ArrayBuffer || isSharedArrayBuffer(arrayBufferOrView))
37
+ ? new Uint8Array(arrayBufferOrView)
38
+ : new Uint8Array(arrayBufferOrView.buffer, arrayBufferOrView.byteOffset, arrayBufferOrView.byteLength);
39
+ }
40
+ async getLength() {
41
+ return this.typedArray.byteLength;
42
+ }
43
+ async read(offset, length) {
44
+ // Cast is necessary: backing buffer may be SharedArrayBuffer (ArrayBufferLike),
45
+ // but callers need Uint8Array<ArrayBuffer>. The data is read-only view so this is safe.
46
+ return new Uint8Array(this.typedArray.buffer, this.typedArray.byteOffset + offset, length);
47
+ }
53
48
  }
54
49
 
55
50
  class BlobReader {
56
- constructor(blob) {
57
- this.blob = blob;
58
- }
59
- async getLength() {
60
- return this.blob.size;
61
- }
62
- async read(offset, length) {
63
- const blob = this.blob.slice(offset, offset + length);
64
- const arrayBuffer = await readBlobAsArrayBuffer(blob);
65
- return new Uint8Array(arrayBuffer);
66
- }
67
- async sliceAsBlob(offset, length, type = '') {
68
- return this.blob.slice(offset, offset + length, type);
69
- }
51
+ constructor(blob) {
52
+ this.blob = blob;
53
+ }
54
+ async getLength() {
55
+ return this.blob.size;
56
+ }
57
+ async read(offset, length) {
58
+ const blob = this.blob.slice(offset, offset + length);
59
+ const arrayBuffer = await readBlobAsArrayBuffer(blob);
60
+ return new Uint8Array(arrayBuffer);
61
+ }
62
+ async sliceAsBlob(offset, length, type = '') {
63
+ return this.blob.slice(offset, offset + length, type);
64
+ }
70
65
  }
71
66
 
72
67
  class HTTPRangeReader {
73
- constructor(url) {
74
- this.url = url;
75
- }
76
- async getLength() {
77
- if (this.length === undefined) {
78
- const req = await fetch(this.url, { method: 'HEAD' });
79
- if (!req.ok) {
80
- throw new Error(`failed http request ${this.url}, status: ${req.status}: ${req.statusText}`);
81
- }
82
- this.length = parseInt(req.headers.get('content-length'));
83
- if (Number.isNaN(this.length)) {
84
- throw Error('could not get length');
85
- }
68
+ constructor(url) {
69
+ this.url = url;
86
70
  }
87
- return this.length;
88
- }
89
- async read(offset, size) {
90
- if (size === 0) {
91
- return new Uint8Array(0);
71
+ async getLength() {
72
+ if (this.length === undefined) {
73
+ const req = await fetch(this.url, { method: 'HEAD' });
74
+ if (!req.ok) {
75
+ throw new Error(`failed http request ${this.url}, status: ${req.status}: ${req.statusText}`);
76
+ }
77
+ this.length = parseInt(req.headers.get('content-length'));
78
+ if (Number.isNaN(this.length)) {
79
+ throw Error('could not get length');
80
+ }
81
+ }
82
+ return this.length;
92
83
  }
93
- const req = await fetch(this.url, {
94
- headers: {
95
- Range: `bytes=${offset}-${offset + size - 1}`,
96
- },
97
- });
98
- if (!req.ok) {
99
- throw new Error(`failed http request ${this.url}, status: ${req.status} offset: ${offset} size: ${size}: ${req.statusText}`);
84
+ async read(offset, size) {
85
+ if (size === 0) {
86
+ return new Uint8Array(0);
87
+ }
88
+ const req = await fetch(this.url, {
89
+ headers: {
90
+ Range: `bytes=${offset}-${offset + size - 1}`,
91
+ },
92
+ });
93
+ if (!req.ok) {
94
+ throw new Error(`failed http request ${this.url}, status: ${req.status} offset: ${offset} size: ${size}: ${req.statusText}`);
95
+ }
96
+ const buffer = await req.arrayBuffer();
97
+ return new Uint8Array(buffer);
100
98
  }
101
- const buffer = await req.arrayBuffer();
102
- return new Uint8Array(buffer);
103
- }
104
99
  }
105
100
 
106
- function inflate(data, buf) {
107
- var u8=Uint8Array;
108
- if(data[0]==3 && data[1]==0) return (buf ? buf : new u8(0));
109
- var bitsF = _bitsF, bitsE = _bitsE, decodeTiny = _decodeTiny, get17 = _get17;
110
-
111
- var noBuf = (buf==null);
112
- if(noBuf) buf = new u8((data.length>>>2)<<3);
113
-
114
- var BFINAL=0, BTYPE=0, HLIT=0, HDIST=0, HCLEN=0, ML=0, MD=0;
115
- var off = 0, pos = 0;
116
- var lmap, dmap;
117
-
118
- while(BFINAL==0) {
119
- BFINAL = bitsF(data, pos , 1);
120
- BTYPE = bitsF(data, pos+1, 2); pos+=3;
121
- //console.log(BFINAL, BTYPE);
122
-
123
- if(BTYPE==0) {
124
- if((pos&7)!=0) pos+=8-(pos&7);
125
- var p8 = (pos>>>3)+4, len = data[p8-4]|(data[p8-3]<<8); //console.log(len);//bitsF(data, pos, 16),
126
- if(noBuf) buf=_check(buf, off+len);
127
- buf.set(new u8(data.buffer, data.byteOffset+p8, len), off);
128
- //for(var i=0; i<len; i++) buf[off+i] = data[p8+i];
129
- //for(var i=0; i<len; i++) if(buf[off+i] != data[p8+i]) throw "e";
130
- pos = ((p8+len)<<3); off+=len; continue;
131
- }
132
- if(noBuf) buf=_check(buf, off+(1<<17)); // really not enough in many cases (but PNG and ZIP provide buffer in advance)
133
- if(BTYPE==1) { lmap = U.flmap; dmap = U.fdmap; ML = (1<<9)-1; MD = (1<<5)-1; }
134
- if(BTYPE==2) {
135
- HLIT = bitsE(data, pos , 5)+257;
136
- HDIST = bitsE(data, pos+ 5, 5)+ 1;
137
- HCLEN = bitsE(data, pos+10, 4)+ 4; pos+=14;
138
- for(var i=0; i<38; i+=2) { U.itree[i]=0; U.itree[i+1]=0; }
139
- var tl = 1;
140
- for(var i=0; i<HCLEN; i++) { var l=bitsE(data, pos+i*3, 3); U.itree[(U.ordr[i]<<1)+1] = l; if(l>tl)tl=l; } pos+=3*HCLEN; //console.log(itree);
141
- makeCodes(U.itree, tl);
142
- codes2map(U.itree, tl, U.imap);
143
-
144
- lmap = U.lmap; dmap = U.dmap;
145
-
146
- pos = decodeTiny(U.imap, (1<<tl)-1, HLIT+HDIST, data, pos, U.ttree);
147
- var mx0 = _copyOut(U.ttree, 0, HLIT , U.ltree); ML = (1<<mx0)-1;
148
- var mx1 = _copyOut(U.ttree, HLIT, HDIST, U.dtree); MD = (1<<mx1)-1;
149
-
150
- //var ml = decodeTiny(U.imap, (1<<tl)-1, HLIT , data, pos, U.ltree); ML = (1<<(ml>>>24))-1; pos+=(ml&0xffffff);
151
- makeCodes(U.ltree, mx0);
152
- codes2map(U.ltree, mx0, lmap);
153
-
154
- //var md = decodeTiny(U.imap, (1<<tl)-1, HDIST, data, pos, U.dtree); MD = (1<<(md>>>24))-1; pos+=(md&0xffffff);
155
- makeCodes(U.dtree, mx1);
156
- codes2map(U.dtree, mx1, dmap);
157
- }
158
- //var ooff=off, opos=pos;
159
- while(true) {
160
- var code = lmap[get17(data, pos) & ML]; pos += code&15;
161
- var lit = code>>>4; //U.lhst[lit]++;
162
- if((lit>>>8)==0) { buf[off++] = lit; }
163
- else if(lit==256) { break; }
164
- else {
165
- var end = off+lit-254;
166
- if(lit>264) { var ebs = U.ldef[lit-257]; end = off + (ebs>>>3) + bitsE(data, pos, ebs&7); pos += ebs&7; }
167
- //dst[end-off]++;
168
-
169
- var dcode = dmap[get17(data, pos) & MD]; pos += dcode&15;
170
- var dlit = dcode>>>4;
171
- var dbs = U.ddef[dlit], dst = (dbs>>>4) + bitsF(data, pos, dbs&15); pos += dbs&15;
172
-
173
- //var o0 = off-dst, stp = Math.min(end-off, dst);
174
- //if(stp>20) while(off<end) { buf.copyWithin(off, o0, o0+stp); off+=stp; } else
175
- //if(end-dst<=off) buf.copyWithin(off, off-dst, end-dst); else
176
- //if(dst==1) buf.fill(buf[off-1], off, end); else
177
- if(noBuf) buf=_check(buf, off+(1<<17));
178
- while(off<end) { buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; }
179
- off=end;
180
- //while(off!=end) { buf[off]=buf[off++-dst]; }
181
- }
182
- }
183
- //console.log(off-ooff, (pos-opos)>>>3);
184
- }
185
- //console.log(dst);
186
- //console.log(tlen, dlen, off-tlen+tcnt);
187
- return buf.length==off ? buf : buf.slice(0,off);
188
- }
189
- function _check(buf, len) {
190
- var bl=buf.length; if(len<=bl) return buf;
191
- var nbuf = new Uint8Array(Math.max(bl<<1,len)); nbuf.set(buf,0);
192
- //for(var i=0; i<bl; i+=4) { nbuf[i]=buf[i]; nbuf[i+1]=buf[i+1]; nbuf[i+2]=buf[i+2]; nbuf[i+3]=buf[i+3]; }
193
- return nbuf;
194
- }
195
-
196
- function _decodeTiny(lmap, LL, len, data, pos, tree) {
197
- var bitsE = _bitsE, get17 = _get17;
198
- var i = 0;
199
- while(i<len) {
200
- var code = lmap[get17(data, pos)&LL]; pos+=code&15;
201
- var lit = code>>>4;
202
- if(lit<=15) { tree[i]=lit; i++; }
203
- else {
204
- var ll = 0, n = 0;
205
- if(lit==16) {
206
- n = (3 + bitsE(data, pos, 2)); pos += 2; ll = tree[i-1];
207
- }
208
- else if(lit==17) {
209
- n = (3 + bitsE(data, pos, 3)); pos += 3;
210
- }
211
- else if(lit==18) {
212
- n = (11 + bitsE(data, pos, 7)); pos += 7;
213
- }
214
- var ni = i+n;
215
- while(i<ni) { tree[i]=ll; i++; }
216
- }
217
- }
218
- return pos;
219
- }
220
- function _copyOut(src, off, len, tree) {
221
- var mx=0, i=0, tl=tree.length>>>1;
222
- while(i<len) { var v=src[i+off]; tree[(i<<1)]=0; tree[(i<<1)+1]=v; if(v>mx)mx=v; i++; }
223
- while(i<tl ) { tree[(i<<1)]=0; tree[(i<<1)+1]=0; i++; }
224
- return mx;
225
- }
226
-
227
- function makeCodes(tree, MAX_BITS) { // code, length
228
- var max_code = tree.length;
229
- var code, bits, n, i, len;
230
-
231
- var bl_count = U.bl_count; for(var i=0; i<=MAX_BITS; i++) bl_count[i]=0;
232
- for(i=1; i<max_code; i+=2) bl_count[tree[i]]++;
233
-
234
- var next_code = U.next_code; // smallest code for each length
235
-
236
- code = 0;
237
- bl_count[0] = 0;
238
- for (bits = 1; bits <= MAX_BITS; bits++) {
239
- code = (code + bl_count[bits-1]) << 1;
240
- next_code[bits] = code;
241
- }
242
-
243
- for (n = 0; n < max_code; n+=2) {
244
- len = tree[n+1];
245
- if (len != 0) {
246
- tree[n] = next_code[len];
247
- next_code[len]++;
248
- }
249
- }
250
- }
251
- function codes2map(tree, MAX_BITS, map) {
252
- var max_code = tree.length;
253
- var r15 = U.rev15;
254
- for(var i=0; i<max_code; i+=2) if(tree[i+1]!=0) {
255
- var lit = i>>1;
256
- var cl = tree[i+1], val = (lit<<4)|cl; // : (0x8000 | (U.of0[lit-257]<<7) | (U.exb[lit-257]<<4) | cl);
257
- var rest = (MAX_BITS-cl), i0 = tree[i]<<rest, i1 = i0 + (1<<rest);
258
- //tree[i]=r15[i0]>>>(15-MAX_BITS);
259
- while(i0!=i1) {
260
- var p0 = r15[i0]>>>(15-MAX_BITS);
261
- map[p0]=val; i0++;
262
- }
263
- }
264
- }
265
- function revCodes(tree, MAX_BITS) {
266
- var r15 = U.rev15, imb = 15-MAX_BITS;
267
- for(var i=0; i<tree.length; i+=2) { var i0 = (tree[i]<<(MAX_BITS-tree[i+1])); tree[i] = r15[i0]>>>imb; }
268
- }
269
-
270
- function _bitsE(dt, pos, length) { return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) )>>>(pos&7))&((1<<length)-1); }
271
- function _bitsF(dt, pos, length) { return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) | (dt[(pos>>>3)+2]<<16))>>>(pos&7))&((1<<length)-1); }
272
- /*
273
- function _get9(dt, pos) {
274
- return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8))>>>(pos&7))&511;
275
- } */
276
- function _get17(dt, pos) { // return at least 17 meaningful bytes
277
- return (dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) | (dt[(pos>>>3)+2]<<16) )>>>(pos&7);
278
- }
279
- const U = function(){
280
- var u16=Uint16Array, u32=Uint32Array;
281
- return {
282
- next_code : new u16(16),
283
- bl_count : new u16(16),
284
- ordr : [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ],
285
- of0 : [3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,999,999,999],
286
- exb : [0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0],
287
- ldef : new u16(32),
288
- df0 : [1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 65535, 65535],
289
- dxb : [0,0,0,0,1,1,2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0],
290
- ddef : new u32(32),
291
- flmap: new u16( 512), fltree: [],
292
- fdmap: new u16( 32), fdtree: [],
293
- lmap : new u16(32768), ltree : [], ttree:[],
294
- dmap : new u16(32768), dtree : [],
295
- imap : new u16( 512), itree : [],
296
- //rev9 : new u16( 512)
297
- rev15: new u16(1<<15),
298
- lhst : new u32(286), dhst : new u32( 30), ihst : new u32(19),
299
- lits : new u32(15000),
300
- strt : new u16(1<<16),
301
- prev : new u16(1<<15)
302
- };
303
- } ();
304
-
305
- (function(){
306
- var len = 1<<15;
307
- for(var i=0; i<len; i++) {
308
- var x = i;
309
- x = (((x & 0xaaaaaaaa) >>> 1) | ((x & 0x55555555) << 1));
310
- x = (((x & 0xcccccccc) >>> 2) | ((x & 0x33333333) << 2));
311
- x = (((x & 0xf0f0f0f0) >>> 4) | ((x & 0x0f0f0f0f) << 4));
312
- x = (((x & 0xff00ff00) >>> 8) | ((x & 0x00ff00ff) << 8));
313
- U.rev15[i] = (((x >>> 16) | (x << 16)))>>>17;
314
- }
315
-
316
- function pushV(tgt, n, sv) { while(n--!=0) tgt.push(0,sv); }
317
-
318
- for(var i=0; i<32; i++) { U.ldef[i]=(U.of0[i]<<3)|U.exb[i]; U.ddef[i]=(U.df0[i]<<4)|U.dxb[i]; }
319
-
320
- pushV(U.fltree, 144, 8); pushV(U.fltree, 255-143, 9); pushV(U.fltree, 279-255, 7); pushV(U.fltree,287-279,8);
321
- /*
322
- var i = 0;
323
- for(; i<=143; i++) U.fltree.push(0,8);
324
- for(; i<=255; i++) U.fltree.push(0,9);
325
- for(; i<=279; i++) U.fltree.push(0,7);
326
- for(; i<=287; i++) U.fltree.push(0,8);
327
- */
328
- makeCodes(U.fltree, 9);
329
- codes2map(U.fltree, 9, U.flmap);
330
- revCodes (U.fltree, 9);
331
-
332
- pushV(U.fdtree,32,5);
333
- //for(i=0;i<32; i++) U.fdtree.push(0,5);
334
- makeCodes(U.fdtree, 5);
335
- codes2map(U.fdtree, 5, U.fdmap);
336
- revCodes (U.fdtree, 5);
337
-
338
- pushV(U.itree,19,0); pushV(U.ltree,286,0); pushV(U.dtree,30,0); pushV(U.ttree,320,0);
339
- /*
340
- for(var i=0; i< 19; i++) U.itree.push(0,0);
341
- for(var i=0; i<286; i++) U.ltree.push(0,0);
342
- for(var i=0; i< 30; i++) U.dtree.push(0,0);
343
- for(var i=0; i<320; i++) U.ttree.push(0,0);
344
- */
345
- })();
346
-
347
- const crc = {
348
- table : ( function() {
349
- var tab = new Uint32Array(256);
350
- for (var n=0; n<256; n++) {
351
- var c = n;
352
- for (var k=0; k<8; k++) {
353
- if (c & 1) c = 0xedb88320 ^ (c >>> 1);
354
- else c = c >>> 1;
355
- }
356
- tab[n] = c; }
357
- return tab; })(),
358
- update : function(c, buf, off, len) {
359
- for (var i=0; i<len; i++) c = crc.table[(c ^ buf[off+i]) & 0xff] ^ (c >>> 8);
360
- return c;
361
- },
362
- crc : function(b,o,l) { return crc.update(0xffffffff,b,o,l) ^ 0xffffffff; }
363
- };
364
-
365
- function inflateRaw(file, buf) { return inflate(file, buf); }
366
-
367
- /* global module */
368
-
101
+ /* global DecompressionStream */
369
102
  const config = {
370
- numWorkers: 1,
371
- workerURL: '',
372
- useWorkers: false,
103
+ numWorkers: 1,
104
+ workerURL: '',
105
+ useWorkers: false,
373
106
  };
374
-
375
107
  let nextId = 0;
376
-
377
108
  // Requests are put on a queue.
378
109
  // We don't send the request to the worker until the worker
379
110
  // is finished. This probably adds a small amount of latency
@@ -382,225 +113,247 @@ let nextId = 0;
382
113
  // do. You don't know which will finish first. If you give
383
114
  // the worker with more work to do the request then you'll
384
115
  // waste time.
385
-
386
116
  // note: we can't check `workers.length` for deciding if
387
117
  // we've reached `config.numWorkers` because creation the worker
388
118
  // is async which means other requests to make workers might
389
119
  // come in before a worker gets added to `workers`
390
120
  let numWorkers = 0;
391
- let canUseWorkers = true; // gets set to false if we can't start a worker
121
+ let canUseWorkers = true; // gets set to false if we can't start a worker
122
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
392
123
  const workers = [];
124
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
393
125
  const availableWorkers = [];
394
126
  const waitingForWorkerQueue = [];
395
127
  const currentlyProcessingIdToRequestMap = new Map();
396
-
128
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
397
129
  function handleResult(e) {
398
- makeWorkerAvailable(e.target);
399
- const {id, error, data} = e.data;
400
- const request = currentlyProcessingIdToRequestMap.get(id);
401
- currentlyProcessingIdToRequestMap.delete(id);
402
- if (error) {
403
- request.reject(error);
404
- } else {
405
- request.resolve(data);
406
- }
130
+ makeWorkerAvailable(e.target);
131
+ const { id, error, data } = e.data;
132
+ const request = currentlyProcessingIdToRequestMap.get(id);
133
+ currentlyProcessingIdToRequestMap.delete(id);
134
+ if (error) {
135
+ request.reject(error);
136
+ }
137
+ else {
138
+ request.resolve(data);
139
+ }
407
140
  }
408
-
409
141
  // Because Firefox uses non-standard onerror to signal an error.
410
142
  function startWorker(url) {
411
- return new Promise((resolve, reject) => {
412
- const worker = new Worker(url);
413
- worker.onmessage = (e) => {
414
- if (e.data === 'start') {
415
- worker.onerror = undefined;
416
- worker.onmessage = undefined;
417
- resolve(worker);
418
- } else {
419
- reject(new Error(`unexpected message: ${e.data}`));
420
- }
421
- };
422
- worker.onerror = reject;
423
- });
143
+ return new Promise((resolve, reject) => {
144
+ const worker = new Worker(url);
145
+ worker.onmessage = (e) => {
146
+ if (e.data === 'start') {
147
+ worker.onerror = null;
148
+ worker.onmessage = null;
149
+ resolve(worker);
150
+ }
151
+ else {
152
+ reject(new Error(`unexpected message: ${e.data}`));
153
+ }
154
+ };
155
+ worker.onerror = reject;
156
+ });
424
157
  }
425
-
426
- function dynamicRequire(mod, request) {
427
- return mod.require ? mod.require(request) : {};
158
+ const workerHelper = (function () {
159
+ if (isNode) {
160
+ return {
161
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
162
+ async createWorker(url) {
163
+ const moduleId = 'node:worker_threads';
164
+ const { Worker } = await import(moduleId);
165
+ return new Worker(url);
166
+ },
167
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
168
+ addEventListener(worker, fn) {
169
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
170
+ worker.on('message', (data) => {
171
+ fn({ target: worker, data });
172
+ });
173
+ },
174
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
175
+ async terminate(worker) {
176
+ await worker.terminate();
177
+ },
178
+ };
179
+ }
180
+ else {
181
+ return {
182
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
183
+ async createWorker(url) {
184
+ // I don't understand this security issue
185
+ // Apparently there is some iframe setting or http header
186
+ // that prevents cross domain workers. But, I can manually
187
+ // download the text and do it. I reported this to Chrome
188
+ // and they said it was fine so ¯\_(ツ)_/¯
189
+ try {
190
+ const worker = await startWorker(url);
191
+ return worker;
192
+ }
193
+ catch (_e) {
194
+ console.warn('could not load worker:', url);
195
+ }
196
+ let text;
197
+ try {
198
+ const req = await fetch(url, { mode: 'cors' });
199
+ if (!req.ok) {
200
+ throw new Error(`could not load: ${url}`);
201
+ }
202
+ text = await req.text();
203
+ url = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
204
+ const worker = await startWorker(url);
205
+ config.workerURL = url; // this is a hack. What's a better way to structure this code?
206
+ return worker;
207
+ }
208
+ catch (_e) {
209
+ console.warn('could not load worker via fetch:', url);
210
+ }
211
+ if (text !== undefined) {
212
+ try {
213
+ url = `data:application/javascript;base64,${btoa(text)}`;
214
+ const worker = await startWorker(url);
215
+ config.workerURL = url;
216
+ return worker;
217
+ }
218
+ catch (_e) {
219
+ console.warn('could not load worker via dataURI');
220
+ }
221
+ }
222
+ console.warn('workers will not be used');
223
+ throw new Error('can not start workers');
224
+ },
225
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
226
+ addEventListener(worker, fn) {
227
+ worker.addEventListener('message', fn);
228
+ },
229
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
230
+ async terminate(worker) {
231
+ worker.terminate();
232
+ },
233
+ };
234
+ }
235
+ }());
236
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
237
+ function makeWorkerAvailable(worker) {
238
+ availableWorkers.push(worker);
239
+ processWaitingForWorkerQueue();
428
240
  }
429
-
430
- const workerHelper = (function() {
431
- if (isNode) {
432
- // We need to use `dynamicRequire` because `require` on it's own will be optimized by webpack.
433
- const {Worker} = dynamicRequire(module, 'worker_threads');
434
- return {
435
- async createWorker(url) {
436
- return new Worker(url);
437
- },
438
- addEventListener(worker, fn) {
439
- worker.on('message', (data) => {
440
- fn({target: worker, data});
441
- });
442
- },
443
- async terminate(worker) {
444
- await worker.terminate();
445
- },
446
- };
447
- } else {
448
- return {
449
- async createWorker(url) {
450
- // I don't understand this security issue
451
- // Apparently there is some iframe setting or http header
452
- // that prevents cross domain workers. But, I can manually
453
- // download the text and do it. I reported this to Chrome
454
- // and they said it was fine so ¯\_(ツ)_/¯
241
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
242
+ async function getAvailableWorker() {
243
+ if (availableWorkers.length === 0 && numWorkers < config.numWorkers) {
244
+ ++numWorkers; // see comment at numWorkers declaration
455
245
  try {
456
- const worker = await startWorker(url);
457
- return worker;
458
- } catch (e) {
459
- console.warn('could not load worker:', url);
246
+ const worker = await workerHelper.createWorker(config.workerURL);
247
+ workers.push(worker);
248
+ availableWorkers.push(worker);
249
+ workerHelper.addEventListener(worker, handleResult);
460
250
  }
461
-
462
- let text;
463
- try {
464
- const req = await fetch(url, {mode: 'cors'});
465
- if (!req.ok) {
466
- throw new Error(`could not load: ${url}`);
467
- }
468
- text = await req.text();
469
- url = URL.createObjectURL(new Blob([text], {type: 'application/javascript'}));
470
- const worker = await startWorker(url);
471
- config.workerURL = url; // this is a hack. What's a better way to structure this code?
472
- return worker;
473
- } catch (e) {
474
- console.warn('could not load worker via fetch:', url);
251
+ catch (_e) {
252
+ // set this global out-of-band (needs refactor)
253
+ canUseWorkers = false;
475
254
  }
476
-
477
- if (text !== undefined) {
478
- try {
479
- url = `data:application/javascript;base64,${btoa(text)}`;
480
- const worker = await startWorker(url);
481
- config.workerURL = url;
482
- return worker;
483
- } catch (e) {
484
- console.warn('could not load worker via dataURI');
485
- }
255
+ }
256
+ return availableWorkers.pop();
257
+ }
258
+ async function decompressRaw(src) {
259
+ const ds = new DecompressionStream('deflate-raw');
260
+ const writer = ds.writable.getWriter();
261
+ // Do not await the write — doing so before reading causes a deadlock when
262
+ // the internal buffer fills due to backpressure.
263
+ writer.write(src).then(() => writer.close()).catch(() => { });
264
+ const chunks = [];
265
+ const reader = ds.readable.getReader();
266
+ for (;;) {
267
+ const { done, value } = await reader.read();
268
+ if (done) {
269
+ break;
486
270
  }
487
-
488
- console.warn('workers will not be used');
489
- throw new Error('can not start workers');
490
- },
491
- addEventListener(worker, fn) {
492
- worker.addEventListener('message', fn);
493
- },
494
- async terminate(worker) {
495
- worker.terminate();
496
- },
497
- };
498
- }
499
- }());
500
-
501
- function makeWorkerAvailable(worker) {
502
- availableWorkers.push(worker);
503
- processWaitingForWorkerQueue();
504
- }
505
-
506
- async function getAvailableWorker() {
507
- if (availableWorkers.length === 0 && numWorkers < config.numWorkers) {
508
- ++numWorkers; // see comment at numWorkers declaration
509
- try {
510
- const worker = await workerHelper.createWorker(config.workerURL);
511
- workers.push(worker);
512
- availableWorkers.push(worker);
513
- workerHelper.addEventListener(worker, handleResult);
514
- } catch (e) {
515
- // set this global out-of-band (needs refactor)
516
- canUseWorkers = false;
271
+ chunks.push(value);
272
+ }
273
+ const size = chunks.reduce((s, c) => s + c.byteLength, 0);
274
+ const result = new Uint8Array(size);
275
+ let offset = 0;
276
+ for (const chunk of chunks) {
277
+ result.set(chunk, offset);
278
+ offset += chunk.byteLength;
517
279
  }
518
- }
519
- return availableWorkers.pop();
280
+ return result;
520
281
  }
521
-
522
282
  // @param {Uint8Array} src
523
- // @param {number} uncompressedSize
524
283
  // @param {string} [type] mime-type
525
284
  // @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
526
- function inflateRawLocal(src, uncompressedSize, type, resolve) {
527
- const dst = new Uint8Array(uncompressedSize);
528
- inflateRaw(src, dst);
529
- resolve(type
530
- ? new Blob([dst], {type})
531
- : dst.buffer);
285
+ async function inflateRawLocal(src, type, resolve, reject) {
286
+ try {
287
+ const dst = await decompressRaw(src);
288
+ resolve(type ? new Blob([dst], { type }) : dst.buffer);
289
+ }
290
+ catch (e) {
291
+ reject(e);
292
+ }
532
293
  }
533
-
534
294
  async function processWaitingForWorkerQueue() {
535
- if (waitingForWorkerQueue.length === 0) {
536
- return;
537
- }
538
-
539
- if (config.useWorkers && canUseWorkers) {
540
- const worker = await getAvailableWorker();
541
- // canUseWorkers might have been set out-of-band (need refactor)
542
- if (canUseWorkers) {
543
- if (worker) {
544
- if (waitingForWorkerQueue.length === 0) {
545
- // the queue might be empty while we awaited for a worker.
546
- makeWorkerAvailable(worker);
547
- return;
295
+ if (waitingForWorkerQueue.length === 0) {
296
+ return;
297
+ }
298
+ if (config.useWorkers && canUseWorkers) {
299
+ const worker = await getAvailableWorker();
300
+ // canUseWorkers might have been set out-of-band (need refactor)
301
+ if (canUseWorkers) {
302
+ if (worker) {
303
+ if (waitingForWorkerQueue.length === 0) {
304
+ // the queue might be empty while we awaited for a worker.
305
+ makeWorkerAvailable(worker);
306
+ return;
307
+ }
308
+ const { id, src, uncompressedSize, type, resolve, reject } = waitingForWorkerQueue.shift();
309
+ currentlyProcessingIdToRequestMap.set(id, { id, src, uncompressedSize, type, resolve, reject });
310
+ const transferables = [];
311
+ // NOTE: Originally I thought you could transfer an ArrayBuffer.
312
+ // The code on this side is often using views into the entire file
313
+ // which means if we transferred we'd lose the entire file. That sucks
314
+ // because it means there's an expensive copy to send the uncompressed
315
+ // data to the worker.
316
+ //
317
+ // Also originally I thought we could send a Blob but we'd need to refactor
318
+ // the code in unzipit/readEntryData as currently it reads the uncompressed
319
+ // bytes.
320
+ //
321
+ //if (!isBlob(src) && !isSharedArrayBuffer(src)) {
322
+ // transferables.push(src);
323
+ //}
324
+ worker.postMessage({
325
+ type: 'inflate',
326
+ data: {
327
+ id,
328
+ type,
329
+ src,
330
+ uncompressedSize,
331
+ },
332
+ }, transferables);
333
+ }
334
+ return;
548
335
  }
549
- const {id, src, uncompressedSize, type, resolve, reject} = waitingForWorkerQueue.shift();
550
- currentlyProcessingIdToRequestMap.set(id, {id, resolve, reject});
551
- const transferables = [];
552
- // NOTE: Originally I thought you could transfer an ArrayBuffer.
553
- // The code on this side is often using views into the entire file
554
- // which means if we transferred we'd lose the entire file. That sucks
555
- // because it means there's an expensive copy to send the uncompressed
556
- // data to the worker.
557
- //
558
- // Also originally I thought we could send a Blob but we'd need to refactor
559
- // the code in unzipit/readEntryData as currently it reads the uncompressed
560
- // bytes.
561
- //
562
- //if (!isBlob(src) && !isSharedArrayBuffer(src)) {
563
- // transferables.push(src);
564
- //}
565
- worker.postMessage({
566
- type: 'inflate',
567
- data: {
568
- id,
569
- type,
570
- src,
571
- uncompressedSize,
572
- },
573
- }, transferables);
574
- }
575
- return;
576
336
  }
577
- }
578
-
579
- // inflate locally
580
- // We loop here because what happens if many requests happen at once
581
- // the first N requests will try to async make a worker. Other requests
582
- // will then be on the queue. But if we fail to make workers then there
583
- // are pending requests.
584
- while (waitingForWorkerQueue.length) {
585
- const {src, uncompressedSize, type, resolve} = waitingForWorkerQueue.shift();
586
- let data = src;
587
- if (isBlob(src)) {
588
- data = await readBlobAsUint8Array(src);
337
+ // inflate locally
338
+ // We loop here because what happens if many requests happen at once
339
+ // the first N requests will try to async make a worker. Other requests
340
+ // will then be on the queue. But if we fail to make workers then there
341
+ // are pending requests.
342
+ while (waitingForWorkerQueue.length) {
343
+ const { src, type, resolve, reject } = waitingForWorkerQueue.shift();
344
+ const data = isBlob(src) ? await readBlobAsUint8Array(src) : src;
345
+ inflateRawLocal(data, type, resolve, reject);
589
346
  }
590
- inflateRawLocal(data, uncompressedSize, type, resolve);
591
- }
592
347
  }
593
-
594
- function setOptions(options) {
595
- config.workerURL = options.workerURL || config.workerURL;
596
- // there's no reason to set the workerURL if you're not going to use workers
597
- if (options.workerURL) {
598
- config.useWorkers = true;
599
- }
600
- config.useWorkers = options.useWorkers !== undefined ? options.useWorkers : config.useWorkers;
601
- config.numWorkers = options.numWorkers || config.numWorkers;
348
+ function setOptions$1(options) {
349
+ config.workerURL = options.workerURL || config.workerURL;
350
+ // there's no reason to set the workerURL if you're not going to use workers
351
+ if (options.workerURL) {
352
+ config.useWorkers = true;
353
+ }
354
+ config.useWorkers = options.useWorkers !== undefined ? options.useWorkers : config.useWorkers;
355
+ config.numWorkers = options.numWorkers || config.numWorkers;
602
356
  }
603
-
604
357
  // It has to take non-zero time to put a large typed array in a Blob since the very
605
358
  // next instruction you could change the contents of the array. So, if you're reading
606
359
  // the zip file for images/video/audio then all you want is a Blob on which to get a URL.
@@ -614,113 +367,97 @@ function setOptions(options) {
614
367
  // @param {string} [type] falsy or mimeType string (eg: 'image/png')
615
368
  // @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
616
369
  function inflateRawAsync(src, uncompressedSize, type) {
617
- return new Promise((resolve, reject) => {
618
- // note: there is potential an expensive copy here. In order for the data
619
- // to make it into the worker we need to copy the data to the worker unless
620
- // it's a Blob or a SharedArrayBuffer.
621
- //
622
- // Solutions:
623
- //
624
- // 1. A minor enhancement, if `uncompressedSize` is small don't call the worker.
625
- //
626
- // might be a win period as their is overhead calling the worker
627
- //
628
- // 2. Move the entire library to the worker
629
- //
630
- // Good, Maybe faster if you pass a URL, Blob, or SharedArrayBuffer? Not sure about that
631
- // as those are also easy to transfer. Still slow if you pass an ArrayBuffer
632
- // as the ArrayBuffer has to be copied to the worker.
633
- //
634
- // I guess benchmarking is really the only thing to try.
635
- waitingForWorkerQueue.push({src, uncompressedSize, type, resolve, reject, id: nextId++});
636
- processWaitingForWorkerQueue();
637
- });
370
+ return new Promise((resolve, reject) => {
371
+ // note: there is potential an expensive copy here. In order for the data
372
+ // to make it into the worker we need to copy the data to the worker unless
373
+ // it's a Blob or a SharedArrayBuffer.
374
+ //
375
+ // Solutions:
376
+ //
377
+ // 1. A minor enhancement, if `uncompressedSize` is small don't call the worker.
378
+ //
379
+ // might be a win period as their is overhead calling the worker
380
+ //
381
+ // 2. Move the entire library to the worker
382
+ //
383
+ // Good, Maybe faster if you pass a URL, Blob, or SharedArrayBuffer? Not sure about that
384
+ // as those are also easy to transfer. Still slow if you pass an ArrayBuffer
385
+ // as the ArrayBuffer has to be copied to the worker.
386
+ //
387
+ // I guess benchmarking is really the only thing to try.
388
+ waitingForWorkerQueue.push({ src, uncompressedSize, type, resolve, reject, id: nextId++ });
389
+ processWaitingForWorkerQueue();
390
+ });
638
391
  }
639
-
640
392
  function clearArray(arr) {
641
- arr.splice(0, arr.length);
393
+ arr.splice(0, arr.length);
642
394
  }
643
-
644
- async function cleanup() {
645
- for (const worker of workers) {
646
- await workerHelper.terminate(worker);
647
- }
648
- clearArray(workers);
649
- clearArray(availableWorkers);
650
- clearArray(waitingForWorkerQueue);
651
- currentlyProcessingIdToRequestMap.clear();
652
- numWorkers = 0;
653
- canUseWorkers = true;
654
- }
655
-
656
- /*
657
- class Zip {
658
- constructor(reader) {
659
- comment, // the comment for this entry
660
- commentBytes, // the raw comment for this entry
661
- }
395
+ async function cleanup$1() {
396
+ for (const worker of workers) {
397
+ await workerHelper.terminate(worker);
398
+ }
399
+ clearArray(workers);
400
+ clearArray(availableWorkers);
401
+ clearArray(waitingForWorkerQueue);
402
+ currentlyProcessingIdToRequestMap.clear();
403
+ numWorkers = 0;
404
+ canUseWorkers = true;
662
405
  }
663
- */
664
406
 
665
407
  function dosDateTimeToDate(date, time) {
666
- const day = date & 0x1f; // 1-31
667
- const month = (date >> 5 & 0xf) - 1; // 1-12, 0-11
668
- const year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108
669
-
670
- const millisecond = 0;
671
- const second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers)
672
- const minute = time >> 5 & 0x3f; // 0-59
673
- const hour = time >> 11 & 0x1f; // 0-23
674
-
675
- return new Date(year, month, day, hour, minute, second, millisecond);
408
+ const day = date & 0x1f; // 1-31
409
+ const month = (date >> 5 & 0xf) - 1; // 1-12, 0-11
410
+ const year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108
411
+ const millisecond = 0;
412
+ const second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers)
413
+ const minute = time >> 5 & 0x3f; // 0-59
414
+ const hour = time >> 11 & 0x1f; // 0-23
415
+ return new Date(year, month, day, hour, minute, second, millisecond);
676
416
  }
677
-
678
417
  class ZipEntry {
679
- constructor(reader, rawEntry) {
680
- this._reader = reader;
681
- this._rawEntry = rawEntry;
682
- this.name = rawEntry.name;
683
- this.nameBytes = rawEntry.nameBytes;
684
- this.size = rawEntry.uncompressedSize;
685
- this.compressedSize = rawEntry.compressedSize;
686
- this.comment = rawEntry.comment;
687
- this.commentBytes = rawEntry.commentBytes;
688
- this.compressionMethod = rawEntry.compressionMethod;
689
- this.lastModDate = dosDateTimeToDate(rawEntry.lastModFileDate, rawEntry.lastModFileTime);
690
- this.isDirectory = rawEntry.uncompressedSize === 0 && rawEntry.name.endsWith('/');
691
- this.encrypted = !!(rawEntry.generalPurposeBitFlag & 0x1);
692
- this.externalFileAttributes = rawEntry.externalFileAttributes;
693
- this.versionMadeBy = rawEntry.versionMadeBy;
694
- }
695
- // returns a promise that returns a Blob for this entry
696
- async blob(type = 'application/octet-stream') {
697
- return await readEntryDataAsBlob(this._reader, this._rawEntry, type);
698
- }
699
- // returns a promise that returns an ArrayBuffer for this entry
700
- async arrayBuffer() {
701
- return await readEntryDataAsArrayBuffer(this._reader, this._rawEntry);
702
- }
703
- // returns text, assumes the text is valid utf8. If you want more options decode arrayBuffer yourself
704
- async text() {
705
- const buffer = await this.arrayBuffer();
706
- return decodeBuffer(new Uint8Array(buffer));
707
- }
708
- // returns text with JSON.parse called on it. If you want more options decode arrayBuffer yourself
709
- async json() {
710
- const text = await this.text();
711
- return JSON.parse(text);
712
- }
418
+ constructor(reader, rawEntry) {
419
+ this._reader = reader;
420
+ this._rawEntry = rawEntry;
421
+ this.name = rawEntry.name;
422
+ this.nameBytes = rawEntry.nameBytes;
423
+ this.size = rawEntry.uncompressedSize;
424
+ this.compressedSize = rawEntry.compressedSize;
425
+ this.comment = rawEntry.comment;
426
+ this.commentBytes = rawEntry.commentBytes;
427
+ this.compressionMethod = rawEntry.compressionMethod;
428
+ this.lastModDate = dosDateTimeToDate(rawEntry.lastModFileDate, rawEntry.lastModFileTime);
429
+ this.isDirectory = rawEntry.uncompressedSize === 0 && rawEntry.name.endsWith('/');
430
+ this.encrypted = !!(rawEntry.generalPurposeBitFlag & 0x1);
431
+ this.externalFileAttributes = rawEntry.externalFileAttributes;
432
+ this.versionMadeBy = rawEntry.versionMadeBy;
433
+ }
434
+ // returns a promise that returns a Blob for this entry
435
+ async blob(type = 'application/octet-stream') {
436
+ return await readEntryDataAsBlob(this._reader, this._rawEntry, type);
437
+ }
438
+ // returns a promise that returns an ArrayBuffer for this entry
439
+ async arrayBuffer() {
440
+ return await readEntryDataAsArrayBuffer(this._reader, this._rawEntry);
441
+ }
442
+ // returns text, assumes the text is valid utf8. If you want more options decode arrayBuffer yourself
443
+ async text() {
444
+ const buffer = await this.arrayBuffer();
445
+ return decodeBuffer(new Uint8Array(buffer));
446
+ }
447
+ // returns text with JSON.parse called on it. If you want more options decode arrayBuffer yourself
448
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
449
+ async json() {
450
+ const text = await this.text();
451
+ return JSON.parse(text);
452
+ }
713
453
  }
714
-
715
454
  const EOCDR_WITHOUT_COMMENT_SIZE = 22;
716
455
  const MAX_COMMENT_SIZE = 0xffff; // 2-byte size
717
456
  const EOCDR_SIGNATURE = 0x06054b50;
718
457
  const ZIP64_EOCDR_SIGNATURE = 0x06064b50;
719
-
720
458
  async function readAs(reader, offset, length) {
721
- return await reader.read(offset, length);
459
+ return await reader.read(offset, length);
722
460
  }
723
-
724
461
  // The point of this function is we want to be able to pass the data
725
462
  // to a worker as fast as possible so when decompressing if the data
726
463
  // is already a blob and we can get a blob then get a blob.
@@ -742,447 +479,412 @@ async function readAs(reader, offset, length) {
742
479
  //
743
480
  // For now this hack works even if it's not generic.
744
481
  async function readAsBlobOrTypedArray(reader, offset, length, type) {
745
- if (reader.sliceAsBlob) {
746
- return await reader.sliceAsBlob(offset, length, type);
747
- }
748
- return await reader.read(offset, length);
482
+ if (reader.sliceAsBlob) {
483
+ return await reader.sliceAsBlob(offset, length, type);
484
+ }
485
+ return await reader.read(offset, length);
749
486
  }
750
-
751
- const crc$1 = {
752
- unsigned() {
753
- return 0;
754
- },
487
+ const crc = {
488
+ unsigned() {
489
+ return 0;
490
+ },
755
491
  };
756
-
757
492
  function getUint16LE(uint8View, offset) {
758
- return uint8View[offset ] +
759
- uint8View[offset + 1] * 0x100;
493
+ return uint8View[offset] +
494
+ uint8View[offset + 1] * 0x100;
760
495
  }
761
-
762
496
  function getUint32LE(uint8View, offset) {
763
- return uint8View[offset ] +
764
- uint8View[offset + 1] * 0x100 +
765
- uint8View[offset + 2] * 0x10000 +
766
- uint8View[offset + 3] * 0x1000000;
497
+ return uint8View[offset] +
498
+ uint8View[offset + 1] * 0x100 +
499
+ uint8View[offset + 2] * 0x10000 +
500
+ uint8View[offset + 3] * 0x1000000;
767
501
  }
768
-
769
502
  function getUint64LE(uint8View, offset) {
770
- return getUint32LE(uint8View, offset) +
771
- getUint32LE(uint8View, offset + 4) * 0x100000000;
503
+ return getUint32LE(uint8View, offset) +
504
+ getUint32LE(uint8View, offset + 4) * 0x100000000;
772
505
  }
773
-
774
- /* eslint-disable no-irregular-whitespace */
775
506
  // const decodeCP437 = (function() {
776
- // const cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ ';
507
+ // const cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ ';
777
508
  //
778
509
  // return function(uint8view) {
779
510
  // return Array.from(uint8view).map(v => cp437[v]).join('');
780
511
  // };
781
512
  // }());
782
- /* eslint-enable no-irregular-whitespace */
783
-
784
513
  const utf8Decoder = new TextDecoder();
785
- function decodeBuffer(uint8View, isUTF8) { /* eslint-disable-line no-unused-vars */ /* lgtm [js/superfluous-trailing-arguments] */
786
- if (isSharedArrayBuffer(uint8View.buffer)) {
787
- uint8View = new Uint8Array(uint8View);
788
- }
789
- return utf8Decoder.decode(uint8View);
790
- /*
791
- AFAICT the UTF8 flat is not set so it's 100% up to the user
792
- to self decode if their file is not utf8 filenames
793
- return isUTF8
794
- ? utf8Decoder.decode(uint8View)
795
- : decodeCP437(uint8View);
796
- */
514
+ function decodeBuffer(uint8View, _isUTF8) {
515
+ if (isSharedArrayBuffer(uint8View.buffer)) {
516
+ uint8View = new Uint8Array(uint8View);
517
+ }
518
+ return utf8Decoder.decode(uint8View);
519
+ /*
520
+ AFAICT the UTF8 flat is not set so it's 100% up to the user
521
+ to self decode if their file is not utf8 filenames
522
+ return isUTF8
523
+ ? utf8Decoder.decode(uint8View)
524
+ : decodeCP437(uint8View);
525
+ */
797
526
  }
798
-
799
527
  async function findEndOfCentralDirector(reader, totalLength) {
800
- const size = Math.min(EOCDR_WITHOUT_COMMENT_SIZE + MAX_COMMENT_SIZE, totalLength);
801
- const readStart = totalLength - size;
802
- const data = await readAs(reader, readStart, size);
803
- for (let i = size - EOCDR_WITHOUT_COMMENT_SIZE; i >= 0; --i) {
804
- if (getUint32LE(data, i) !== EOCDR_SIGNATURE) {
805
- continue;
806
- }
807
-
808
- // 0 - End of central directory signature
809
- const eocdr = new Uint8Array(data.buffer, data.byteOffset + i, data.byteLength - i);
810
- // 4 - Number of this disk
811
- const diskNumber = getUint16LE(eocdr, 4);
812
- if (diskNumber !== 0) {
813
- throw new Error(`multi-volume zip files are not supported. This is volume: ${diskNumber}`);
814
- }
815
-
816
- // 6 - Disk where central directory starts
817
- // 8 - Number of central directory records on this disk
818
- // 10 - Total number of central directory records
819
- const entryCount = getUint16LE(eocdr, 10);
820
- // 12 - Size of central directory (bytes)
821
- const centralDirectorySize = getUint32LE(eocdr, 12);
822
- // 16 - Offset of start of central directory, relative to start of archive
823
- const centralDirectoryOffset = getUint32LE(eocdr, 16);
824
- // 20 - Comment length
825
- const commentLength = getUint16LE(eocdr, 20);
826
- const expectedCommentLength = eocdr.length - EOCDR_WITHOUT_COMMENT_SIZE;
827
- if (commentLength !== expectedCommentLength) {
828
- throw new Error(`invalid comment length. expected: ${expectedCommentLength}, actual: ${commentLength}`);
829
- }
830
-
831
- // 22 - Comment
832
- // the encoding is always cp437.
833
- const commentBytes = new Uint8Array(eocdr.buffer, eocdr.byteOffset + 22, commentLength);
834
- const comment = decodeBuffer(commentBytes);
835
-
836
- if (entryCount === 0xffff || centralDirectoryOffset === 0xffffffff) {
837
- return await readZip64CentralDirectory(reader, readStart + i, comment, commentBytes);
838
- } else {
839
- return await readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
528
+ const size = Math.min(EOCDR_WITHOUT_COMMENT_SIZE + MAX_COMMENT_SIZE, totalLength);
529
+ const readStart = totalLength - size;
530
+ const data = await readAs(reader, readStart, size);
531
+ for (let i = size - EOCDR_WITHOUT_COMMENT_SIZE; i >= 0; --i) {
532
+ if (getUint32LE(data, i) !== EOCDR_SIGNATURE) {
533
+ continue;
534
+ }
535
+ // 0 - End of central directory signature
536
+ const eocdr = new Uint8Array(data.buffer, data.byteOffset + i, data.byteLength - i);
537
+ // 4 - Number of this disk
538
+ const diskNumber = getUint16LE(eocdr, 4);
539
+ if (diskNumber !== 0) {
540
+ throw new Error(`multi-volume zip files are not supported. This is volume: ${diskNumber}`);
541
+ }
542
+ // 6 - Disk where central directory starts
543
+ // 8 - Number of central directory records on this disk
544
+ // 10 - Total number of central directory records
545
+ const entryCount = getUint16LE(eocdr, 10);
546
+ // 12 - Size of central directory (bytes)
547
+ const centralDirectorySize = getUint32LE(eocdr, 12);
548
+ // 16 - Offset of start of central directory, relative to start of archive
549
+ const centralDirectoryOffset = getUint32LE(eocdr, 16);
550
+ // 20 - Comment length
551
+ const commentLength = getUint16LE(eocdr, 20);
552
+ const expectedCommentLength = eocdr.length - EOCDR_WITHOUT_COMMENT_SIZE;
553
+ if (commentLength !== expectedCommentLength) {
554
+ throw new Error(`invalid comment length. expected: ${expectedCommentLength}, actual: ${commentLength}`);
555
+ }
556
+ // 22 - Comment
557
+ // the encoding is always cp437.
558
+ const commentBytes = new Uint8Array(eocdr.buffer, eocdr.byteOffset + 22, commentLength);
559
+ const comment = decodeBuffer(commentBytes);
560
+ if (entryCount === 0xffff || centralDirectoryOffset === 0xffffffff) {
561
+ return await readZip64CentralDirectory(reader, readStart + i, comment, commentBytes);
562
+ }
563
+ else {
564
+ return await readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
565
+ }
840
566
  }
841
- }
842
-
843
- throw new Error('could not find end of central directory. maybe not zip file');
567
+ throw new Error('could not find end of central directory. maybe not zip file');
844
568
  }
845
-
846
569
  const END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE = 0x07064b50;
847
-
848
570
  async function readZip64CentralDirectory(reader, offset, comment, commentBytes) {
849
- // ZIP64 Zip64 end of central directory locator
850
- const zip64EocdlOffset = offset - 20;
851
- const eocdl = await readAs(reader, zip64EocdlOffset, 20);
852
-
853
- // 0 - zip64 end of central dir locator signature
854
- if (getUint32LE(eocdl, 0) !== END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE) {
855
- throw new Error('invalid zip64 end of central directory locator signature');
856
- }
857
-
858
- // 4 - number of the disk with the start of the zip64 end of central directory
859
- // 8 - relative offset of the zip64 end of central directory record
860
- const zip64EocdrOffset = getUint64LE(eocdl, 8);
861
- // 16 - total number of disks
862
-
863
- // ZIP64 end of central directory record
864
- const zip64Eocdr = await readAs(reader, zip64EocdrOffset, 56);
865
-
866
- // 0 - zip64 end of central dir signature 4 bytes (0x06064b50)
867
- if (getUint32LE(zip64Eocdr, 0) !== ZIP64_EOCDR_SIGNATURE) {
868
- throw new Error('invalid zip64 end of central directory record signature');
869
- }
870
- // 4 - size of zip64 end of central directory record 8 bytes
871
- // 12 - version made by 2 bytes
872
- // 14 - version needed to extract 2 bytes
873
- // 16 - number of this disk 4 bytes
874
- // 20 - number of the disk with the start of the central directory 4 bytes
875
- // 24 - total number of entries in the central directory on this disk 8 bytes
876
- // 32 - total number of entries in the central directory 8 bytes
877
- const entryCount = getUint64LE(zip64Eocdr, 32);
878
- // 40 - size of the central directory 8 bytes
879
- const centralDirectorySize = getUint64LE(zip64Eocdr, 40);
880
- // 48 - offset of start of central directory with respect to the starting disk number 8 bytes
881
- const centralDirectoryOffset = getUint64LE(zip64Eocdr, 48);
882
- // 56 - zip64 extensible data sector (variable size)
883
- return readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
571
+ // ZIP64 Zip64 end of central directory locator
572
+ const zip64EocdlOffset = offset - 20;
573
+ const eocdl = await readAs(reader, zip64EocdlOffset, 20);
574
+ // 0 - zip64 end of central dir locator signature
575
+ if (getUint32LE(eocdl, 0) !== END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE) {
576
+ throw new Error('invalid zip64 end of central directory locator signature');
577
+ }
578
+ // 4 - number of the disk with the start of the zip64 end of central directory
579
+ // 8 - relative offset of the zip64 end of central directory record
580
+ const zip64EocdrOffset = getUint64LE(eocdl, 8);
581
+ // 16 - total number of disks
582
+ // ZIP64 end of central directory record
583
+ const zip64Eocdr = await readAs(reader, zip64EocdrOffset, 56);
584
+ // 0 - zip64 end of central dir signature 4 bytes (0x06064b50)
585
+ if (getUint32LE(zip64Eocdr, 0) !== ZIP64_EOCDR_SIGNATURE) {
586
+ throw new Error('invalid zip64 end of central directory record signature');
587
+ }
588
+ // 4 - size of zip64 end of central directory record 8 bytes
589
+ // 12 - version made by 2 bytes
590
+ // 14 - version needed to extract 2 bytes
591
+ // 16 - number of this disk 4 bytes
592
+ // 20 - number of the disk with the start of the central directory 4 bytes
593
+ // 24 - total number of entries in the central directory on this disk 8 bytes
594
+ // 32 - total number of entries in the central directory 8 bytes
595
+ const entryCount = getUint64LE(zip64Eocdr, 32);
596
+ // 40 - size of the central directory 8 bytes
597
+ const centralDirectorySize = getUint64LE(zip64Eocdr, 40);
598
+ // 48 - offset of start of central directory with respect to the starting disk number 8 bytes
599
+ const centralDirectoryOffset = getUint64LE(zip64Eocdr, 48);
600
+ // 56 - zip64 extensible data sector (variable size)
601
+ return readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
884
602
  }
885
-
886
603
  const CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE = 0x02014b50;
887
-
888
604
  async function readEntries(reader, centralDirectoryOffset, centralDirectorySize, rawEntryCount, comment, commentBytes) {
889
- let readEntryCursor = 0;
890
- const allEntriesBuffer = await readAs(reader, centralDirectoryOffset, centralDirectorySize);
891
- const rawEntries = [];
892
-
893
- for (let e = 0; e < rawEntryCount; ++e) {
894
- const buffer = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + 46);
895
- // 0 - Central directory file header signature
896
- const signature = getUint32LE(buffer, 0);
897
- if (signature !== CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE) {
898
- throw new Error(`invalid central directory file header signature: 0x${signature.toString(16)}`);
899
- }
900
- const rawEntry = {
901
- // 4 - Version made by
902
- versionMadeBy: getUint16LE(buffer, 4),
903
- // 6 - Version needed to extract (minimum)
904
- versionNeededToExtract: getUint16LE(buffer, 6),
905
- // 8 - General purpose bit flag
906
- generalPurposeBitFlag: getUint16LE(buffer, 8),
907
- // 10 - Compression method
908
- compressionMethod: getUint16LE(buffer, 10),
909
- // 12 - File last modification time
910
- lastModFileTime: getUint16LE(buffer, 12),
911
- // 14 - File last modification date
912
- lastModFileDate: getUint16LE(buffer, 14),
913
- // 16 - CRC-32
914
- crc32: getUint32LE(buffer, 16),
915
- // 20 - Compressed size
916
- compressedSize: getUint32LE(buffer, 20),
917
- // 24 - Uncompressed size
918
- uncompressedSize: getUint32LE(buffer, 24),
919
- // 28 - File name length (n)
920
- fileNameLength: getUint16LE(buffer, 28),
921
- // 30 - Extra field length (m)
922
- extraFieldLength: getUint16LE(buffer, 30),
923
- // 32 - File comment length (k)
924
- fileCommentLength: getUint16LE(buffer, 32),
925
- // 34 - Disk number where file starts
926
- // 36 - Internal file attributes
927
- internalFileAttributes: getUint16LE(buffer, 36),
928
- // 38 - External file attributes
929
- externalFileAttributes: getUint32LE(buffer, 38),
930
- // 42 - Relative offset of local file header
931
- relativeOffsetOfLocalHeader: getUint32LE(buffer, 42),
932
- };
933
-
934
- if (rawEntry.generalPurposeBitFlag & 0x40) {
935
- throw new Error('strong encryption is not supported');
936
- }
937
-
938
- readEntryCursor += 46;
939
-
940
- const data = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + rawEntry.fileNameLength + rawEntry.extraFieldLength + rawEntry.fileCommentLength);
941
- rawEntry.nameBytes = data.slice(0, rawEntry.fileNameLength);
942
- rawEntry.name = decodeBuffer(rawEntry.nameBytes);
943
-
944
- // 46+n - Extra field
945
- const fileCommentStart = rawEntry.fileNameLength + rawEntry.extraFieldLength;
946
- const extraFieldBuffer = data.slice(rawEntry.fileNameLength, fileCommentStart);
947
- rawEntry.extraFields = [];
948
- let i = 0;
949
- while (i < extraFieldBuffer.length - 3) {
950
- const headerId = getUint16LE(extraFieldBuffer, i + 0);
951
- const dataSize = getUint16LE(extraFieldBuffer, i + 2);
952
- const dataStart = i + 4;
953
- const dataEnd = dataStart + dataSize;
954
- if (dataEnd > extraFieldBuffer.length) {
955
- throw new Error('extra field length exceeds extra field buffer size');
956
- }
957
- rawEntry.extraFields.push({
958
- id: headerId,
959
- data: extraFieldBuffer.slice(dataStart, dataEnd),
960
- });
961
- i = dataEnd;
962
- }
963
-
964
- // 46+n+m - File comment
965
- rawEntry.commentBytes = data.slice(fileCommentStart, fileCommentStart + rawEntry.fileCommentLength);
966
- rawEntry.comment = decodeBuffer(rawEntry.commentBytes);
967
-
968
- readEntryCursor += data.length;
969
-
970
- if (rawEntry.uncompressedSize === 0xffffffff ||
971
- rawEntry.compressedSize === 0xffffffff ||
972
- rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
973
- // ZIP64 format
974
- // find the Zip64 Extended Information Extra Field
975
- const zip64ExtraField = rawEntry.extraFields.find(e => e.id === 0x0001);
976
- if (!zip64ExtraField) {
977
- throw new Error('expected zip64 extended information extra field');
978
- }
979
- const zip64EiefBuffer = zip64ExtraField.data;
980
- let index = 0;
981
- // 0 - Original Size 8 bytes
982
- if (rawEntry.uncompressedSize === 0xffffffff) {
983
- if (index + 8 > zip64EiefBuffer.length) {
984
- throw new Error('zip64 extended information extra field does not include uncompressed size');
605
+ let readEntryCursor = 0;
606
+ const allEntriesBuffer = await readAs(reader, centralDirectoryOffset, centralDirectorySize);
607
+ const rawEntries = [];
608
+ for (let e = 0; e < rawEntryCount; ++e) {
609
+ const buffer = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + 46);
610
+ // 0 - Central directory file header signature
611
+ const signature = getUint32LE(buffer, 0);
612
+ if (signature !== CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE) {
613
+ throw new Error(`invalid central directory file header signature: 0x${signature.toString(16)}`);
614
+ }
615
+ const rawEntry = {
616
+ // 4 - Version made by
617
+ versionMadeBy: getUint16LE(buffer, 4),
618
+ // 6 - Version needed to extract (minimum)
619
+ versionNeededToExtract: getUint16LE(buffer, 6),
620
+ // 8 - General purpose bit flag
621
+ generalPurposeBitFlag: getUint16LE(buffer, 8),
622
+ // 10 - Compression method
623
+ compressionMethod: getUint16LE(buffer, 10),
624
+ // 12 - File last modification time
625
+ lastModFileTime: getUint16LE(buffer, 12),
626
+ // 14 - File last modification date
627
+ lastModFileDate: getUint16LE(buffer, 14),
628
+ // 16 - CRC-32
629
+ crc32: getUint32LE(buffer, 16),
630
+ // 20 - Compressed size
631
+ compressedSize: getUint32LE(buffer, 20),
632
+ // 24 - Uncompressed size
633
+ uncompressedSize: getUint32LE(buffer, 24),
634
+ // 28 - File name length (n)
635
+ fileNameLength: getUint16LE(buffer, 28),
636
+ // 30 - Extra field length (m)
637
+ extraFieldLength: getUint16LE(buffer, 30),
638
+ // 32 - File comment length (k)
639
+ fileCommentLength: getUint16LE(buffer, 32),
640
+ // 34 - Disk number where file starts
641
+ // 36 - Internal file attributes
642
+ internalFileAttributes: getUint16LE(buffer, 36),
643
+ // 38 - External file attributes
644
+ externalFileAttributes: getUint32LE(buffer, 38),
645
+ // 42 - Relative offset of local file header
646
+ relativeOffsetOfLocalHeader: getUint32LE(buffer, 42),
647
+ };
648
+ if (rawEntry.generalPurposeBitFlag & 0x40) {
649
+ throw new Error('strong encryption is not supported');
985
650
  }
986
- rawEntry.uncompressedSize = getUint64LE(zip64EiefBuffer, index);
987
- index += 8;
988
- }
989
- // 8 - Compressed Size 8 bytes
990
- if (rawEntry.compressedSize === 0xffffffff) {
991
- if (index + 8 > zip64EiefBuffer.length) {
992
- throw new Error('zip64 extended information extra field does not include compressed size');
651
+ readEntryCursor += 46;
652
+ const data = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + rawEntry.fileNameLength + rawEntry.extraFieldLength + rawEntry.fileCommentLength);
653
+ // 46 - File name
654
+ (rawEntry.generalPurposeBitFlag & 0x800) !== 0;
655
+ rawEntry.nameBytes = data.slice(0, rawEntry.fileNameLength);
656
+ rawEntry.name = decodeBuffer(rawEntry.nameBytes);
657
+ // 46+n - Extra field
658
+ const fileCommentStart = rawEntry.fileNameLength + rawEntry.extraFieldLength;
659
+ const extraFieldBuffer = data.slice(rawEntry.fileNameLength, fileCommentStart);
660
+ rawEntry.extraFields = [];
661
+ let i = 0;
662
+ while (i < extraFieldBuffer.length - 3) {
663
+ const headerId = getUint16LE(extraFieldBuffer, i + 0);
664
+ const dataSize = getUint16LE(extraFieldBuffer, i + 2);
665
+ const dataStart = i + 4;
666
+ const dataEnd = dataStart + dataSize;
667
+ if (dataEnd > extraFieldBuffer.length) {
668
+ throw new Error('extra field length exceeds extra field buffer size');
669
+ }
670
+ rawEntry.extraFields.push({
671
+ id: headerId,
672
+ data: extraFieldBuffer.slice(dataStart, dataEnd),
673
+ });
674
+ i = dataEnd;
993
675
  }
994
- rawEntry.compressedSize = getUint64LE(zip64EiefBuffer, index);
995
- index += 8;
996
- }
997
- // 16 - Relative Header Offset 8 bytes
998
- if (rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
999
- if (index + 8 > zip64EiefBuffer.length) {
1000
- throw new Error('zip64 extended information extra field does not include relative header offset');
676
+ // 46+n+m - File comment
677
+ rawEntry.commentBytes = data.slice(fileCommentStart, fileCommentStart + rawEntry.fileCommentLength);
678
+ rawEntry.comment = decodeBuffer(rawEntry.commentBytes);
679
+ readEntryCursor += data.length;
680
+ if (rawEntry.uncompressedSize === 0xffffffff ||
681
+ rawEntry.compressedSize === 0xffffffff ||
682
+ rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
683
+ // ZIP64 format
684
+ // find the Zip64 Extended Information Extra Field
685
+ const zip64ExtraField = rawEntry.extraFields.find(e => e.id === 0x0001);
686
+ if (!zip64ExtraField) {
687
+ throw new Error('expected zip64 extended information extra field');
688
+ }
689
+ const zip64EiefBuffer = zip64ExtraField.data;
690
+ let index = 0;
691
+ // 0 - Original Size 8 bytes
692
+ if (rawEntry.uncompressedSize === 0xffffffff) {
693
+ if (index + 8 > zip64EiefBuffer.length) {
694
+ throw new Error('zip64 extended information extra field does not include uncompressed size');
695
+ }
696
+ rawEntry.uncompressedSize = getUint64LE(zip64EiefBuffer, index);
697
+ index += 8;
698
+ }
699
+ // 8 - Compressed Size 8 bytes
700
+ if (rawEntry.compressedSize === 0xffffffff) {
701
+ if (index + 8 > zip64EiefBuffer.length) {
702
+ throw new Error('zip64 extended information extra field does not include compressed size');
703
+ }
704
+ rawEntry.compressedSize = getUint64LE(zip64EiefBuffer, index);
705
+ index += 8;
706
+ }
707
+ // 16 - Relative Header Offset 8 bytes
708
+ if (rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
709
+ if (index + 8 > zip64EiefBuffer.length) {
710
+ throw new Error('zip64 extended information extra field does not include relative header offset');
711
+ }
712
+ rawEntry.relativeOffsetOfLocalHeader = getUint64LE(zip64EiefBuffer, index);
713
+ index += 8;
714
+ }
715
+ // 24 - Disk Start Number 4 bytes
1001
716
  }
1002
- rawEntry.relativeOffsetOfLocalHeader = getUint64LE(zip64EiefBuffer, index);
1003
- index += 8;
1004
- }
1005
- // 24 - Disk Start Number 4 bytes
717
+ // check for Info-ZIP Unicode Path Extra Field (0x7075)
718
+ // see https://github.com/thejoshwolfe/yauzl/issues/33
719
+ const nameField = rawEntry.extraFields.find(e => e.id === 0x7075 &&
720
+ e.data.length >= 6 && // too short to be meaningful
721
+ e.data[0] === 1 && // Version 1 byte version of this extra field, currently 1
722
+ getUint32LE(e.data, 1), crc.unsigned()); // NameCRC32 4 bytes File Name Field CRC32 Checksum
723
+ // > If the CRC check fails, this UTF-8 Path Extra Field should be
724
+ // > ignored and the File Name field in the header should be used instead.
725
+ if (nameField) {
726
+ // UnicodeName Variable UTF-8 version of the entry File Name
727
+ rawEntry.fileName = decodeBuffer(nameField.data.slice(5));
728
+ }
729
+ // validate file size
730
+ if (rawEntry.compressionMethod === 0) {
731
+ let expectedCompressedSize = rawEntry.uncompressedSize;
732
+ if ((rawEntry.generalPurposeBitFlag & 0x1) !== 0) {
733
+ // traditional encryption prefixes the file data with a header
734
+ expectedCompressedSize += 12;
735
+ }
736
+ if (rawEntry.compressedSize !== expectedCompressedSize) {
737
+ throw new Error(`compressed size mismatch for stored file: ${rawEntry.compressedSize} != ${expectedCompressedSize}`);
738
+ }
739
+ }
740
+ rawEntries.push(rawEntry);
1006
741
  }
1007
-
1008
- // check for Info-ZIP Unicode Path Extra Field (0x7075)
1009
- // see https://github.com/thejoshwolfe/yauzl/issues/33
1010
- const nameField = rawEntry.extraFields.find(e =>
1011
- e.id === 0x7075 &&
1012
- e.data.length >= 6 && // too short to be meaningful
1013
- e.data[0] === 1 && // Version 1 byte version of this extra field, currently 1
1014
- getUint32LE(e.data, 1), crc$1.unsigned(rawEntry.nameBytes)); // NameCRC32 4 bytes File Name Field CRC32 Checksum
1015
- // > If the CRC check fails, this UTF-8 Path Extra Field should be
1016
- // > ignored and the File Name field in the header should be used instead.
1017
- if (nameField) {
1018
- // UnicodeName Variable UTF-8 version of the entry File Name
1019
- rawEntry.fileName = decodeBuffer(nameField.data.slice(5));
742
+ const zip = {
743
+ comment,
744
+ commentBytes,
745
+ };
746
+ return {
747
+ zip,
748
+ entries: rawEntries.map(e => new ZipEntry(reader, e)),
749
+ };
750
+ }
751
+ async function readEntryDataHeader(reader, rawEntry) {
752
+ if (rawEntry.generalPurposeBitFlag & 0x1) {
753
+ throw new Error('encrypted entries not supported');
1020
754
  }
1021
-
1022
- // validate file size
755
+ const buffer = await readAs(reader, rawEntry.relativeOffsetOfLocalHeader, 30);
756
+ // note: maybe this should be passed in or cached on entry
757
+ // as it's async so there will be at least one tick (not sure about that)
758
+ const totalLength = await reader.getLength();
759
+ // 0 - Local file header signature = 0x04034b50
760
+ const signature = getUint32LE(buffer, 0);
761
+ if (signature !== 0x04034b50) {
762
+ throw new Error(`invalid local file header signature: 0x${signature.toString(16)}`);
763
+ }
764
+ // all this should be redundant
765
+ // 4 - Version needed to extract (minimum)
766
+ // 6 - General purpose bit flag
767
+ // 8 - Compression method
768
+ // 10 - File last modification time
769
+ // 12 - File last modification date
770
+ // 14 - CRC-32
771
+ // 18 - Compressed size
772
+ // 22 - Uncompressed size
773
+ // 26 - File name length (n)
774
+ const fileNameLength = getUint16LE(buffer, 26);
775
+ // 28 - Extra field length (m)
776
+ const extraFieldLength = getUint16LE(buffer, 28);
777
+ // 30 - File name
778
+ // 30+n - Extra field
779
+ const localFileHeaderEnd = rawEntry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
780
+ let decompress;
1023
781
  if (rawEntry.compressionMethod === 0) {
1024
- let expectedCompressedSize = rawEntry.uncompressedSize;
1025
- if ((rawEntry.generalPurposeBitFlag & 0x1) !== 0) {
1026
- // traditional encryption prefixes the file data with a header
1027
- expectedCompressedSize += 12;
1028
- }
1029
- if (rawEntry.compressedSize !== expectedCompressedSize) {
1030
- throw new Error(`compressed size mismatch for stored file: ${rawEntry.compressedSize} != ${expectedCompressedSize}`);
1031
- }
782
+ // 0 - The file is stored (no compression)
783
+ decompress = false;
1032
784
  }
1033
- rawEntries.push(rawEntry);
1034
- }
1035
- const zip = {
1036
- comment,
1037
- commentBytes,
1038
- };
1039
- return {
1040
- zip,
1041
- entries: rawEntries.map(e => new ZipEntry(reader, e)),
1042
- };
1043
- }
1044
-
1045
- async function readEntryDataHeader(reader, rawEntry) {
1046
- if (rawEntry.generalPurposeBitFlag & 0x1) {
1047
- throw new Error('encrypted entries not supported');
1048
- }
1049
- const buffer = await readAs(reader, rawEntry.relativeOffsetOfLocalHeader, 30);
1050
- // note: maybe this should be passed in or cached on entry
1051
- // as it's async so there will be at least one tick (not sure about that)
1052
- const totalLength = await reader.getLength();
1053
-
1054
- // 0 - Local file header signature = 0x04034b50
1055
- const signature = getUint32LE(buffer, 0);
1056
- if (signature !== 0x04034b50) {
1057
- throw new Error(`invalid local file header signature: 0x${signature.toString(16)}`);
1058
- }
1059
-
1060
- // all this should be redundant
1061
- // 4 - Version needed to extract (minimum)
1062
- // 6 - General purpose bit flag
1063
- // 8 - Compression method
1064
- // 10 - File last modification time
1065
- // 12 - File last modification date
1066
- // 14 - CRC-32
1067
- // 18 - Compressed size
1068
- // 22 - Uncompressed size
1069
- // 26 - File name length (n)
1070
- const fileNameLength = getUint16LE(buffer, 26);
1071
- // 28 - Extra field length (m)
1072
- const extraFieldLength = getUint16LE(buffer, 28);
1073
- // 30 - File name
1074
- // 30+n - Extra field
1075
- const localFileHeaderEnd = rawEntry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
1076
- let decompress;
1077
- if (rawEntry.compressionMethod === 0) {
1078
- // 0 - The file is stored (no compression)
1079
- decompress = false;
1080
- } else if (rawEntry.compressionMethod === 8) {
1081
- // 8 - The file is Deflated
1082
- decompress = true;
1083
- } else {
1084
- throw new Error(`unsupported compression method: ${rawEntry.compressionMethod}`);
1085
- }
1086
- const fileDataStart = localFileHeaderEnd;
1087
- const fileDataEnd = fileDataStart + rawEntry.compressedSize;
1088
- if (rawEntry.compressedSize !== 0) {
1089
- // bounds check now, because the read streams will probably not complain loud enough.
1090
- // since we're dealing with an unsigned offset plus an unsigned size,
1091
- // we only have 1 thing to check for.
1092
- if (fileDataEnd > totalLength) {
1093
- throw new Error(`file data overflows file bounds: ${fileDataStart} + ${rawEntry.compressedSize} > ${totalLength}`);
785
+ else if (rawEntry.compressionMethod === 8) {
786
+ // 8 - The file is Deflated
787
+ decompress = true;
788
+ }
789
+ else {
790
+ throw new Error(`unsupported compression method: ${rawEntry.compressionMethod}`);
791
+ }
792
+ const fileDataStart = localFileHeaderEnd;
793
+ const fileDataEnd = fileDataStart + rawEntry.compressedSize;
794
+ if (rawEntry.compressedSize !== 0) {
795
+ // bounds check now, because the read streams will probably not complain loud enough.
796
+ // since we're dealing with an unsigned offset plus an unsigned size,
797
+ // we only have 1 thing to check for.
798
+ if (fileDataEnd > totalLength) {
799
+ throw new Error(`file data overflows file bounds: ${fileDataStart} + ${rawEntry.compressedSize} > ${totalLength}`);
800
+ }
1094
801
  }
1095
- }
1096
- return {
1097
- decompress,
1098
- fileDataStart,
1099
- };
802
+ return {
803
+ decompress,
804
+ fileDataStart,
805
+ };
1100
806
  }
1101
-
1102
807
  async function readEntryDataAsArrayBuffer(reader, rawEntry) {
1103
- const {decompress, fileDataStart} = await readEntryDataHeader(reader, rawEntry);
1104
- if (!decompress) {
1105
- const dataView = await readAs(reader, fileDataStart, rawEntry.compressedSize);
1106
- // make copy?
1107
- //
1108
- // 1. The source is a Blob/file. In this case we'll get back TypedArray we can just hand to the user
1109
- // 2. The source is a TypedArray. In this case we'll get back TypedArray that is a view into a larger buffer
1110
- // but because ultimately this is used to return an ArrayBuffer to `someEntry.arrayBuffer()`
1111
- // we need to return copy since we need the `ArrayBuffer`, not the TypedArray to exactly match the data.
1112
- // Note: We could add another API function `bytes()` or something that returned a `Uint8Array`
1113
- // instead of an `ArrayBuffer`. This would let us skip a copy here. But this case only happens for uncompressed
1114
- // data. That seems like a rare enough case that adding a new API is not worth it? Or is it? A zip of jpegs or mp3s
1115
- // might not be compressed. For now that's a TBD.
1116
- return isTypedArraySameAsArrayBuffer(dataView) ? dataView.buffer : dataView.slice().buffer;
1117
- }
1118
- // see comment in readEntryDateAsBlob
1119
- const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
1120
- const result = await inflateRawAsync(typedArrayOrBlob, rawEntry.uncompressedSize);
1121
- return result;
808
+ const { decompress, fileDataStart } = await readEntryDataHeader(reader, rawEntry);
809
+ if (!decompress) {
810
+ const dataView = await readAs(reader, fileDataStart, rawEntry.compressedSize);
811
+ // make copy?
812
+ //
813
+ // 1. The source is a Blob/file. In this case we'll get back TypedArray we can just hand to the user
814
+ // 2. The source is a TypedArray. In this case we'll get back TypedArray that is a view into a larger buffer
815
+ // but because ultimately this is used to return an ArrayBuffer to `someEntry.arrayBuffer()`
816
+ // we need to return copy since we need the `ArrayBuffer`, not the TypedArray to exactly match the data.
817
+ // Note: We could add another API function `bytes()` or something that returned a `Uint8Array`
818
+ // instead of an `ArrayBuffer`. This would let us skip a copy here. But this case only happens for uncompressed
819
+ // data. That seems like a rare enough case that adding a new API is not worth it? Or is it? A zip of jpegs or mp3s
820
+ // might not be compressed. For now that's a TBD.
821
+ return isTypedArraySameAsArrayBuffer(dataView) ? dataView.buffer : dataView.slice().buffer;
822
+ }
823
+ // see comment in readEntryDateAsBlob
824
+ const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
825
+ const result = await inflateRawAsync(typedArrayOrBlob instanceof Uint8Array ? typedArrayOrBlob : typedArrayOrBlob, rawEntry.uncompressedSize);
826
+ return result;
1122
827
  }
1123
-
1124
828
  async function readEntryDataAsBlob(reader, rawEntry, type) {
1125
- const {decompress, fileDataStart} = await readEntryDataHeader(reader, rawEntry);
1126
- if (!decompress) {
1127
- const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize, type);
1128
- if (isBlob(typedArrayOrBlob)) {
1129
- return typedArrayOrBlob;
829
+ const { decompress, fileDataStart } = await readEntryDataHeader(reader, rawEntry);
830
+ if (!decompress) {
831
+ const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize, type);
832
+ if (isBlob(typedArrayOrBlob)) {
833
+ return typedArrayOrBlob;
834
+ }
835
+ return new Blob([typedArrayOrBlob], { type });
1130
836
  }
1131
- return new Blob([isSharedArrayBuffer(typedArrayOrBlob.buffer) ? new Uint8Array(typedArrayOrBlob) : typedArrayOrBlob], {type});
1132
- }
1133
- // Here's the issue with this mess (should refactor?)
1134
- // if the source is a blob then we really want to pass a blob to inflateRawAsync to avoid a large
1135
- // copy if we're going to a worker.
1136
- const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
1137
- const result = await inflateRawAsync(typedArrayOrBlob, rawEntry.uncompressedSize, type);
1138
- return result;
837
+ // Here's the issue with this mess (should refactor?)
838
+ // if the source is a blob then we really want to pass a blob to inflateRawAsync to avoid a large
839
+ // copy if we're going to a worker.
840
+ const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
841
+ const result = await inflateRawAsync(typedArrayOrBlob instanceof Uint8Array ? typedArrayOrBlob : typedArrayOrBlob, rawEntry.uncompressedSize, type);
842
+ return result;
1139
843
  }
1140
-
1141
- function setOptions$1(options) {
1142
- setOptions(options);
844
+ function setOptions(options) {
845
+ setOptions$1(options);
1143
846
  }
1144
-
1145
847
  async function unzipRaw(source) {
1146
- let reader;
1147
- if (typeof Blob !== 'undefined' && source instanceof Blob) {
1148
- reader = new BlobReader(source);
1149
- } else if (source instanceof ArrayBuffer || (source && source.buffer && source.buffer instanceof ArrayBuffer)) {
1150
- reader = new ArrayBufferReader(source);
1151
- } else if (isSharedArrayBuffer(source) || isSharedArrayBuffer(source.buffer)) {
1152
- reader = new ArrayBufferReader(source);
1153
- } else if (typeof source === 'string') {
1154
- const req = await fetch(source);
1155
- if (!req.ok) {
1156
- throw new Error(`failed http request ${source}, status: ${req.status}: ${req.statusText}`);
848
+ let reader;
849
+ if (typeof Blob !== 'undefined' && source instanceof Blob) {
850
+ reader = new BlobReader(source);
1157
851
  }
1158
- const blob = await req.blob();
1159
- reader = new BlobReader(blob);
1160
- } else if (typeof source.getLength === 'function' && typeof source.read === 'function') {
1161
- reader = source;
1162
- } else {
1163
- throw new Error('unsupported source type');
1164
- }
1165
-
1166
- const totalLength = await reader.getLength();
1167
-
1168
- if (totalLength > Number.MAX_SAFE_INTEGER) {
1169
- throw new Error(`file too large. size: ${totalLength}. Only file sizes up 4503599627370496 bytes are supported`);
1170
- }
1171
-
1172
- return await findEndOfCentralDirector(reader, totalLength);
852
+ else if (source instanceof ArrayBuffer || (source && source.buffer && source.buffer instanceof ArrayBuffer)) {
853
+ reader = new ArrayBufferReader(source);
854
+ }
855
+ else if (isSharedArrayBuffer(source) || isSharedArrayBuffer(source.buffer)) {
856
+ reader = new ArrayBufferReader(source);
857
+ }
858
+ else if (typeof source === 'string') {
859
+ const req = await fetch(source);
860
+ if (!req.ok) {
861
+ throw new Error(`failed http request ${source}, status: ${req.status}: ${req.statusText}`);
862
+ }
863
+ const blob = await req.blob();
864
+ reader = new BlobReader(blob);
865
+ }
866
+ else if (typeof source.getLength === 'function' && typeof source.read === 'function') {
867
+ reader = source;
868
+ }
869
+ else {
870
+ throw new Error('unsupported source type');
871
+ }
872
+ const totalLength = await reader.getLength();
873
+ if (totalLength > Number.MAX_SAFE_INTEGER) {
874
+ throw new Error(`file too large. size: ${totalLength}. Only file sizes up 4503599627370496 bytes are supported`);
875
+ }
876
+ return await findEndOfCentralDirector(reader, totalLength);
1173
877
  }
1174
-
1175
878
  // If the names are not utf8 you should use unzipitRaw
1176
879
  async function unzip(source) {
1177
- const {zip, entries} = await unzipRaw(source);
1178
- return {
1179
- zip,
1180
- entries: Object.fromEntries(entries.map(v => [v.name, v])),
1181
- };
880
+ const { zip, entries } = await unzipRaw(source);
881
+ return {
882
+ zip,
883
+ entries: Object.fromEntries(entries.map(v => [v.name, v])),
884
+ };
1182
885
  }
1183
-
1184
- function cleanup$1() {
1185
- cleanup();
886
+ function cleanup() {
887
+ cleanup$1();
1186
888
  }
1187
889
 
1188
- export { HTTPRangeReader, cleanup$1 as cleanup, setOptions$1 as setOptions, unzip, unzipRaw };
890
+ export { ArrayBufferReader, BlobReader, HTTPRangeReader, ZipEntry, cleanup, setOptions, unzip, unzipRaw };