unzipit 1.4.3 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -5
- package/dist/ArrayBufferReader.d.ts +7 -0
- package/dist/BlobReader.d.ts +12 -0
- package/dist/HTTPRangeReader.d.ts +8 -0
- package/dist/inflate-worker.d.ts +1 -0
- package/dist/inflate.d.ts +8 -0
- package/dist/unzipit-worker.js +92 -355
- package/dist/unzipit-worker.min.js +1 -12
- package/dist/unzipit-worker.module.js +90 -353
- package/dist/unzipit.d.ts +71 -56
- package/dist/unzipit.js +734 -1031
- package/dist/unzipit.min.js +1 -31
- package/dist/unzipit.module.js +728 -1026
- package/dist/utils.d.ts +6 -0
- package/package.json +26 -25
- package/dist/unzipit.module.d.ts +0 -59
package/dist/unzipit.js
CHANGED
|
@@ -1,385 +1,116 @@
|
|
|
1
|
-
/* unzipit@
|
|
1
|
+
/* unzipit@2.0.1, license MIT */
|
|
2
2
|
(function (global, factory) {
|
|
3
3
|
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
|
|
4
4
|
typeof define === 'function' && define.amd ? define(['exports'], factory) :
|
|
5
5
|
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.unzipit = {}));
|
|
6
|
-
}(this, (function (exports) { 'use strict';
|
|
7
|
-
|
|
8
|
-
/* global SharedArrayBuffer, process */
|
|
6
|
+
})(this, (function (exports) { 'use strict';
|
|
9
7
|
|
|
8
|
+
var _a, _b;
|
|
10
9
|
function readBlobAsArrayBuffer(blob) {
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
10
|
+
if (blob.arrayBuffer) {
|
|
11
|
+
return blob.arrayBuffer();
|
|
12
|
+
}
|
|
13
|
+
return new Promise((resolve, reject) => {
|
|
14
|
+
const reader = new FileReader();
|
|
15
|
+
reader.addEventListener('loadend', () => {
|
|
16
|
+
resolve(reader.result);
|
|
17
|
+
});
|
|
18
|
+
reader.addEventListener('error', reject);
|
|
19
|
+
reader.readAsArrayBuffer(blob);
|
|
18
20
|
});
|
|
19
|
-
reader.addEventListener('error', reject);
|
|
20
|
-
reader.readAsArrayBuffer(blob);
|
|
21
|
-
});
|
|
22
21
|
}
|
|
23
|
-
|
|
24
22
|
async function readBlobAsUint8Array(blob) {
|
|
25
|
-
|
|
26
|
-
|
|
23
|
+
const arrayBuffer = await readBlobAsArrayBuffer(blob);
|
|
24
|
+
return new Uint8Array(arrayBuffer);
|
|
27
25
|
}
|
|
28
|
-
|
|
29
26
|
function isBlob(v) {
|
|
30
|
-
|
|
27
|
+
return typeof Blob !== 'undefined' && v instanceof Blob;
|
|
31
28
|
}
|
|
32
|
-
|
|
33
29
|
function isSharedArrayBuffer(b) {
|
|
34
|
-
|
|
30
|
+
return typeof SharedArrayBuffer !== 'undefined' && b instanceof SharedArrayBuffer;
|
|
35
31
|
}
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
(typeof process !== 'undefined') &&
|
|
39
|
-
process.versions
|
|
40
|
-
(typeof process.versions.node !== 'undefined') &&
|
|
41
|
-
(typeof process.versions.electron === 'undefined');
|
|
42
|
-
|
|
32
|
+
const isNode = (typeof process !== 'undefined') &&
|
|
33
|
+
!!(process === null || process === void 0 ? void 0 : process.versions) &&
|
|
34
|
+
(typeof ((_a = process === null || process === void 0 ? void 0 : process.versions) === null || _a === void 0 ? void 0 : _a.node) !== 'undefined') &&
|
|
35
|
+
(typeof ((_b = process === null || process === void 0 ? void 0 : process.versions) === null || _b === void 0 ? void 0 : _b.electron) === 'undefined');
|
|
43
36
|
function isTypedArraySameAsArrayBuffer(typedArray) {
|
|
44
|
-
|
|
37
|
+
return typedArray.byteOffset === 0 && typedArray.byteLength === typedArray.buffer.byteLength;
|
|
45
38
|
}
|
|
46
39
|
|
|
47
40
|
class ArrayBufferReader {
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
41
|
+
constructor(arrayBufferOrView) {
|
|
42
|
+
this.typedArray = (arrayBufferOrView instanceof ArrayBuffer || isSharedArrayBuffer(arrayBufferOrView))
|
|
43
|
+
? new Uint8Array(arrayBufferOrView)
|
|
44
|
+
: new Uint8Array(arrayBufferOrView.buffer, arrayBufferOrView.byteOffset, arrayBufferOrView.byteLength);
|
|
45
|
+
}
|
|
46
|
+
async getLength() {
|
|
47
|
+
return this.typedArray.byteLength;
|
|
48
|
+
}
|
|
49
|
+
async read(offset, length) {
|
|
50
|
+
// Cast is necessary: backing buffer may be SharedArrayBuffer (ArrayBufferLike),
|
|
51
|
+
// but callers need Uint8Array<ArrayBuffer>. The data is read-only view so this is safe.
|
|
52
|
+
return new Uint8Array(this.typedArray.buffer, this.typedArray.byteOffset + offset, length);
|
|
53
|
+
}
|
|
59
54
|
}
|
|
60
55
|
|
|
61
56
|
class BlobReader {
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
57
|
+
constructor(blob) {
|
|
58
|
+
this.blob = blob;
|
|
59
|
+
}
|
|
60
|
+
async getLength() {
|
|
61
|
+
return this.blob.size;
|
|
62
|
+
}
|
|
63
|
+
async read(offset, length) {
|
|
64
|
+
const blob = this.blob.slice(offset, offset + length);
|
|
65
|
+
const arrayBuffer = await readBlobAsArrayBuffer(blob);
|
|
66
|
+
return new Uint8Array(arrayBuffer);
|
|
67
|
+
}
|
|
68
|
+
async sliceAsBlob(offset, length, type = '') {
|
|
69
|
+
return this.blob.slice(offset, offset + length, type);
|
|
70
|
+
}
|
|
76
71
|
}
|
|
77
72
|
|
|
78
73
|
class HTTPRangeReader {
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
}
|
|
82
|
-
async getLength() {
|
|
83
|
-
if (this.length === undefined) {
|
|
84
|
-
const req = await fetch(this.url, { method: 'HEAD' });
|
|
85
|
-
if (!req.ok) {
|
|
86
|
-
throw new Error(`failed http request ${this.url}, status: ${req.status}: ${req.statusText}`);
|
|
87
|
-
}
|
|
88
|
-
this.length = parseInt(req.headers.get('content-length'));
|
|
89
|
-
if (Number.isNaN(this.length)) {
|
|
90
|
-
throw Error('could not get length');
|
|
91
|
-
}
|
|
74
|
+
constructor(url) {
|
|
75
|
+
this.url = url;
|
|
92
76
|
}
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
77
|
+
async getLength() {
|
|
78
|
+
if (this.length === undefined) {
|
|
79
|
+
const req = await fetch(this.url, { method: 'HEAD' });
|
|
80
|
+
if (!req.ok) {
|
|
81
|
+
throw new Error(`failed http request ${this.url}, status: ${req.status}: ${req.statusText}`);
|
|
82
|
+
}
|
|
83
|
+
this.length = parseInt(req.headers.get('content-length'));
|
|
84
|
+
if (Number.isNaN(this.length)) {
|
|
85
|
+
throw Error('could not get length');
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
return this.length;
|
|
98
89
|
}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
90
|
+
async read(offset, size) {
|
|
91
|
+
if (size === 0) {
|
|
92
|
+
return new Uint8Array(0);
|
|
93
|
+
}
|
|
94
|
+
const req = await fetch(this.url, {
|
|
95
|
+
headers: {
|
|
96
|
+
Range: `bytes=${offset}-${offset + size - 1}`,
|
|
97
|
+
},
|
|
98
|
+
});
|
|
99
|
+
if (!req.ok) {
|
|
100
|
+
throw new Error(`failed http request ${this.url}, status: ${req.status} offset: ${offset} size: ${size}: ${req.statusText}`);
|
|
101
|
+
}
|
|
102
|
+
const buffer = await req.arrayBuffer();
|
|
103
|
+
return new Uint8Array(buffer);
|
|
106
104
|
}
|
|
107
|
-
const buffer = await req.arrayBuffer();
|
|
108
|
-
return new Uint8Array(buffer);
|
|
109
|
-
}
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
function inflate(data, buf) {
|
|
113
|
-
var u8=Uint8Array;
|
|
114
|
-
if(data[0]==3 && data[1]==0) return (buf ? buf : new u8(0));
|
|
115
|
-
var bitsF = _bitsF, bitsE = _bitsE, decodeTiny = _decodeTiny, get17 = _get17;
|
|
116
|
-
|
|
117
|
-
var noBuf = (buf==null);
|
|
118
|
-
if(noBuf) buf = new u8((data.length>>>2)<<3);
|
|
119
|
-
|
|
120
|
-
var BFINAL=0, BTYPE=0, HLIT=0, HDIST=0, HCLEN=0, ML=0, MD=0;
|
|
121
|
-
var off = 0, pos = 0;
|
|
122
|
-
var lmap, dmap;
|
|
123
|
-
|
|
124
|
-
while(BFINAL==0) {
|
|
125
|
-
BFINAL = bitsF(data, pos , 1);
|
|
126
|
-
BTYPE = bitsF(data, pos+1, 2); pos+=3;
|
|
127
|
-
//console.log(BFINAL, BTYPE);
|
|
128
|
-
|
|
129
|
-
if(BTYPE==0) {
|
|
130
|
-
if((pos&7)!=0) pos+=8-(pos&7);
|
|
131
|
-
var p8 = (pos>>>3)+4, len = data[p8-4]|(data[p8-3]<<8); //console.log(len);//bitsF(data, pos, 16),
|
|
132
|
-
if(noBuf) buf=_check(buf, off+len);
|
|
133
|
-
buf.set(new u8(data.buffer, data.byteOffset+p8, len), off);
|
|
134
|
-
//for(var i=0; i<len; i++) buf[off+i] = data[p8+i];
|
|
135
|
-
//for(var i=0; i<len; i++) if(buf[off+i] != data[p8+i]) throw "e";
|
|
136
|
-
pos = ((p8+len)<<3); off+=len; continue;
|
|
137
|
-
}
|
|
138
|
-
if(noBuf) buf=_check(buf, off+(1<<17)); // really not enough in many cases (but PNG and ZIP provide buffer in advance)
|
|
139
|
-
if(BTYPE==1) { lmap = U.flmap; dmap = U.fdmap; ML = (1<<9)-1; MD = (1<<5)-1; }
|
|
140
|
-
if(BTYPE==2) {
|
|
141
|
-
HLIT = bitsE(data, pos , 5)+257;
|
|
142
|
-
HDIST = bitsE(data, pos+ 5, 5)+ 1;
|
|
143
|
-
HCLEN = bitsE(data, pos+10, 4)+ 4; pos+=14;
|
|
144
|
-
for(var i=0; i<38; i+=2) { U.itree[i]=0; U.itree[i+1]=0; }
|
|
145
|
-
var tl = 1;
|
|
146
|
-
for(var i=0; i<HCLEN; i++) { var l=bitsE(data, pos+i*3, 3); U.itree[(U.ordr[i]<<1)+1] = l; if(l>tl)tl=l; } pos+=3*HCLEN; //console.log(itree);
|
|
147
|
-
makeCodes(U.itree, tl);
|
|
148
|
-
codes2map(U.itree, tl, U.imap);
|
|
149
|
-
|
|
150
|
-
lmap = U.lmap; dmap = U.dmap;
|
|
151
|
-
|
|
152
|
-
pos = decodeTiny(U.imap, (1<<tl)-1, HLIT+HDIST, data, pos, U.ttree);
|
|
153
|
-
var mx0 = _copyOut(U.ttree, 0, HLIT , U.ltree); ML = (1<<mx0)-1;
|
|
154
|
-
var mx1 = _copyOut(U.ttree, HLIT, HDIST, U.dtree); MD = (1<<mx1)-1;
|
|
155
|
-
|
|
156
|
-
//var ml = decodeTiny(U.imap, (1<<tl)-1, HLIT , data, pos, U.ltree); ML = (1<<(ml>>>24))-1; pos+=(ml&0xffffff);
|
|
157
|
-
makeCodes(U.ltree, mx0);
|
|
158
|
-
codes2map(U.ltree, mx0, lmap);
|
|
159
|
-
|
|
160
|
-
//var md = decodeTiny(U.imap, (1<<tl)-1, HDIST, data, pos, U.dtree); MD = (1<<(md>>>24))-1; pos+=(md&0xffffff);
|
|
161
|
-
makeCodes(U.dtree, mx1);
|
|
162
|
-
codes2map(U.dtree, mx1, dmap);
|
|
163
|
-
}
|
|
164
|
-
//var ooff=off, opos=pos;
|
|
165
|
-
while(true) {
|
|
166
|
-
var code = lmap[get17(data, pos) & ML]; pos += code&15;
|
|
167
|
-
var lit = code>>>4; //U.lhst[lit]++;
|
|
168
|
-
if((lit>>>8)==0) { buf[off++] = lit; }
|
|
169
|
-
else if(lit==256) { break; }
|
|
170
|
-
else {
|
|
171
|
-
var end = off+lit-254;
|
|
172
|
-
if(lit>264) { var ebs = U.ldef[lit-257]; end = off + (ebs>>>3) + bitsE(data, pos, ebs&7); pos += ebs&7; }
|
|
173
|
-
//dst[end-off]++;
|
|
174
|
-
|
|
175
|
-
var dcode = dmap[get17(data, pos) & MD]; pos += dcode&15;
|
|
176
|
-
var dlit = dcode>>>4;
|
|
177
|
-
var dbs = U.ddef[dlit], dst = (dbs>>>4) + bitsF(data, pos, dbs&15); pos += dbs&15;
|
|
178
|
-
|
|
179
|
-
//var o0 = off-dst, stp = Math.min(end-off, dst);
|
|
180
|
-
//if(stp>20) while(off<end) { buf.copyWithin(off, o0, o0+stp); off+=stp; } else
|
|
181
|
-
//if(end-dst<=off) buf.copyWithin(off, off-dst, end-dst); else
|
|
182
|
-
//if(dst==1) buf.fill(buf[off-1], off, end); else
|
|
183
|
-
if(noBuf) buf=_check(buf, off+(1<<17));
|
|
184
|
-
while(off<end) { buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; }
|
|
185
|
-
off=end;
|
|
186
|
-
//while(off!=end) { buf[off]=buf[off++-dst]; }
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
//console.log(off-ooff, (pos-opos)>>>3);
|
|
190
|
-
}
|
|
191
|
-
//console.log(dst);
|
|
192
|
-
//console.log(tlen, dlen, off-tlen+tcnt);
|
|
193
|
-
return buf.length==off ? buf : buf.slice(0,off);
|
|
194
105
|
}
|
|
195
|
-
function _check(buf, len) {
|
|
196
|
-
var bl=buf.length; if(len<=bl) return buf;
|
|
197
|
-
var nbuf = new Uint8Array(Math.max(bl<<1,len)); nbuf.set(buf,0);
|
|
198
|
-
//for(var i=0; i<bl; i+=4) { nbuf[i]=buf[i]; nbuf[i+1]=buf[i+1]; nbuf[i+2]=buf[i+2]; nbuf[i+3]=buf[i+3]; }
|
|
199
|
-
return nbuf;
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
function _decodeTiny(lmap, LL, len, data, pos, tree) {
|
|
203
|
-
var bitsE = _bitsE, get17 = _get17;
|
|
204
|
-
var i = 0;
|
|
205
|
-
while(i<len) {
|
|
206
|
-
var code = lmap[get17(data, pos)&LL]; pos+=code&15;
|
|
207
|
-
var lit = code>>>4;
|
|
208
|
-
if(lit<=15) { tree[i]=lit; i++; }
|
|
209
|
-
else {
|
|
210
|
-
var ll = 0, n = 0;
|
|
211
|
-
if(lit==16) {
|
|
212
|
-
n = (3 + bitsE(data, pos, 2)); pos += 2; ll = tree[i-1];
|
|
213
|
-
}
|
|
214
|
-
else if(lit==17) {
|
|
215
|
-
n = (3 + bitsE(data, pos, 3)); pos += 3;
|
|
216
|
-
}
|
|
217
|
-
else if(lit==18) {
|
|
218
|
-
n = (11 + bitsE(data, pos, 7)); pos += 7;
|
|
219
|
-
}
|
|
220
|
-
var ni = i+n;
|
|
221
|
-
while(i<ni) { tree[i]=ll; i++; }
|
|
222
|
-
}
|
|
223
|
-
}
|
|
224
|
-
return pos;
|
|
225
|
-
}
|
|
226
|
-
function _copyOut(src, off, len, tree) {
|
|
227
|
-
var mx=0, i=0, tl=tree.length>>>1;
|
|
228
|
-
while(i<len) { var v=src[i+off]; tree[(i<<1)]=0; tree[(i<<1)+1]=v; if(v>mx)mx=v; i++; }
|
|
229
|
-
while(i<tl ) { tree[(i<<1)]=0; tree[(i<<1)+1]=0; i++; }
|
|
230
|
-
return mx;
|
|
231
|
-
}
|
|
232
|
-
|
|
233
|
-
function makeCodes(tree, MAX_BITS) { // code, length
|
|
234
|
-
var max_code = tree.length;
|
|
235
|
-
var code, bits, n, i, len;
|
|
236
|
-
|
|
237
|
-
var bl_count = U.bl_count; for(var i=0; i<=MAX_BITS; i++) bl_count[i]=0;
|
|
238
|
-
for(i=1; i<max_code; i+=2) bl_count[tree[i]]++;
|
|
239
|
-
|
|
240
|
-
var next_code = U.next_code; // smallest code for each length
|
|
241
|
-
|
|
242
|
-
code = 0;
|
|
243
|
-
bl_count[0] = 0;
|
|
244
|
-
for (bits = 1; bits <= MAX_BITS; bits++) {
|
|
245
|
-
code = (code + bl_count[bits-1]) << 1;
|
|
246
|
-
next_code[bits] = code;
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
for (n = 0; n < max_code; n+=2) {
|
|
250
|
-
len = tree[n+1];
|
|
251
|
-
if (len != 0) {
|
|
252
|
-
tree[n] = next_code[len];
|
|
253
|
-
next_code[len]++;
|
|
254
|
-
}
|
|
255
|
-
}
|
|
256
|
-
}
|
|
257
|
-
function codes2map(tree, MAX_BITS, map) {
|
|
258
|
-
var max_code = tree.length;
|
|
259
|
-
var r15 = U.rev15;
|
|
260
|
-
for(var i=0; i<max_code; i+=2) if(tree[i+1]!=0) {
|
|
261
|
-
var lit = i>>1;
|
|
262
|
-
var cl = tree[i+1], val = (lit<<4)|cl; // : (0x8000 | (U.of0[lit-257]<<7) | (U.exb[lit-257]<<4) | cl);
|
|
263
|
-
var rest = (MAX_BITS-cl), i0 = tree[i]<<rest, i1 = i0 + (1<<rest);
|
|
264
|
-
//tree[i]=r15[i0]>>>(15-MAX_BITS);
|
|
265
|
-
while(i0!=i1) {
|
|
266
|
-
var p0 = r15[i0]>>>(15-MAX_BITS);
|
|
267
|
-
map[p0]=val; i0++;
|
|
268
|
-
}
|
|
269
|
-
}
|
|
270
|
-
}
|
|
271
|
-
function revCodes(tree, MAX_BITS) {
|
|
272
|
-
var r15 = U.rev15, imb = 15-MAX_BITS;
|
|
273
|
-
for(var i=0; i<tree.length; i+=2) { var i0 = (tree[i]<<(MAX_BITS-tree[i+1])); tree[i] = r15[i0]>>>imb; }
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
function _bitsE(dt, pos, length) { return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) )>>>(pos&7))&((1<<length)-1); }
|
|
277
|
-
function _bitsF(dt, pos, length) { return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) | (dt[(pos>>>3)+2]<<16))>>>(pos&7))&((1<<length)-1); }
|
|
278
|
-
/*
|
|
279
|
-
function _get9(dt, pos) {
|
|
280
|
-
return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8))>>>(pos&7))&511;
|
|
281
|
-
} */
|
|
282
|
-
function _get17(dt, pos) { // return at least 17 meaningful bytes
|
|
283
|
-
return (dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) | (dt[(pos>>>3)+2]<<16) )>>>(pos&7);
|
|
284
|
-
}
|
|
285
|
-
const U = function(){
|
|
286
|
-
var u16=Uint16Array, u32=Uint32Array;
|
|
287
|
-
return {
|
|
288
|
-
next_code : new u16(16),
|
|
289
|
-
bl_count : new u16(16),
|
|
290
|
-
ordr : [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ],
|
|
291
|
-
of0 : [3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,999,999,999],
|
|
292
|
-
exb : [0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0],
|
|
293
|
-
ldef : new u16(32),
|
|
294
|
-
df0 : [1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 65535, 65535],
|
|
295
|
-
dxb : [0,0,0,0,1,1,2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0],
|
|
296
|
-
ddef : new u32(32),
|
|
297
|
-
flmap: new u16( 512), fltree: [],
|
|
298
|
-
fdmap: new u16( 32), fdtree: [],
|
|
299
|
-
lmap : new u16(32768), ltree : [], ttree:[],
|
|
300
|
-
dmap : new u16(32768), dtree : [],
|
|
301
|
-
imap : new u16( 512), itree : [],
|
|
302
|
-
//rev9 : new u16( 512)
|
|
303
|
-
rev15: new u16(1<<15),
|
|
304
|
-
lhst : new u32(286), dhst : new u32( 30), ihst : new u32(19),
|
|
305
|
-
lits : new u32(15000),
|
|
306
|
-
strt : new u16(1<<16),
|
|
307
|
-
prev : new u16(1<<15)
|
|
308
|
-
};
|
|
309
|
-
} ();
|
|
310
|
-
|
|
311
|
-
(function(){
|
|
312
|
-
var len = 1<<15;
|
|
313
|
-
for(var i=0; i<len; i++) {
|
|
314
|
-
var x = i;
|
|
315
|
-
x = (((x & 0xaaaaaaaa) >>> 1) | ((x & 0x55555555) << 1));
|
|
316
|
-
x = (((x & 0xcccccccc) >>> 2) | ((x & 0x33333333) << 2));
|
|
317
|
-
x = (((x & 0xf0f0f0f0) >>> 4) | ((x & 0x0f0f0f0f) << 4));
|
|
318
|
-
x = (((x & 0xff00ff00) >>> 8) | ((x & 0x00ff00ff) << 8));
|
|
319
|
-
U.rev15[i] = (((x >>> 16) | (x << 16)))>>>17;
|
|
320
|
-
}
|
|
321
|
-
|
|
322
|
-
function pushV(tgt, n, sv) { while(n--!=0) tgt.push(0,sv); }
|
|
323
|
-
|
|
324
|
-
for(var i=0; i<32; i++) { U.ldef[i]=(U.of0[i]<<3)|U.exb[i]; U.ddef[i]=(U.df0[i]<<4)|U.dxb[i]; }
|
|
325
|
-
|
|
326
|
-
pushV(U.fltree, 144, 8); pushV(U.fltree, 255-143, 9); pushV(U.fltree, 279-255, 7); pushV(U.fltree,287-279,8);
|
|
327
|
-
/*
|
|
328
|
-
var i = 0;
|
|
329
|
-
for(; i<=143; i++) U.fltree.push(0,8);
|
|
330
|
-
for(; i<=255; i++) U.fltree.push(0,9);
|
|
331
|
-
for(; i<=279; i++) U.fltree.push(0,7);
|
|
332
|
-
for(; i<=287; i++) U.fltree.push(0,8);
|
|
333
|
-
*/
|
|
334
|
-
makeCodes(U.fltree, 9);
|
|
335
|
-
codes2map(U.fltree, 9, U.flmap);
|
|
336
|
-
revCodes (U.fltree, 9);
|
|
337
|
-
|
|
338
|
-
pushV(U.fdtree,32,5);
|
|
339
|
-
//for(i=0;i<32; i++) U.fdtree.push(0,5);
|
|
340
|
-
makeCodes(U.fdtree, 5);
|
|
341
|
-
codes2map(U.fdtree, 5, U.fdmap);
|
|
342
|
-
revCodes (U.fdtree, 5);
|
|
343
|
-
|
|
344
|
-
pushV(U.itree,19,0); pushV(U.ltree,286,0); pushV(U.dtree,30,0); pushV(U.ttree,320,0);
|
|
345
|
-
/*
|
|
346
|
-
for(var i=0; i< 19; i++) U.itree.push(0,0);
|
|
347
|
-
for(var i=0; i<286; i++) U.ltree.push(0,0);
|
|
348
|
-
for(var i=0; i< 30; i++) U.dtree.push(0,0);
|
|
349
|
-
for(var i=0; i<320; i++) U.ttree.push(0,0);
|
|
350
|
-
*/
|
|
351
|
-
})();
|
|
352
|
-
|
|
353
|
-
const crc = {
|
|
354
|
-
table : ( function() {
|
|
355
|
-
var tab = new Uint32Array(256);
|
|
356
|
-
for (var n=0; n<256; n++) {
|
|
357
|
-
var c = n;
|
|
358
|
-
for (var k=0; k<8; k++) {
|
|
359
|
-
if (c & 1) c = 0xedb88320 ^ (c >>> 1);
|
|
360
|
-
else c = c >>> 1;
|
|
361
|
-
}
|
|
362
|
-
tab[n] = c; }
|
|
363
|
-
return tab; })(),
|
|
364
|
-
update : function(c, buf, off, len) {
|
|
365
|
-
for (var i=0; i<len; i++) c = crc.table[(c ^ buf[off+i]) & 0xff] ^ (c >>> 8);
|
|
366
|
-
return c;
|
|
367
|
-
},
|
|
368
|
-
crc : function(b,o,l) { return crc.update(0xffffffff,b,o,l) ^ 0xffffffff; }
|
|
369
|
-
};
|
|
370
|
-
|
|
371
|
-
function inflateRaw(file, buf) { return inflate(file, buf); }
|
|
372
|
-
|
|
373
|
-
/* global module */
|
|
374
106
|
|
|
107
|
+
/* global DecompressionStream */
|
|
375
108
|
const config = {
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
109
|
+
numWorkers: 1,
|
|
110
|
+
workerURL: '',
|
|
111
|
+
useWorkers: false,
|
|
379
112
|
};
|
|
380
|
-
|
|
381
113
|
let nextId = 0;
|
|
382
|
-
|
|
383
114
|
// Requests are put on a queue.
|
|
384
115
|
// We don't send the request to the worker until the worker
|
|
385
116
|
// is finished. This probably adds a small amount of latency
|
|
@@ -388,225 +119,247 @@
|
|
|
388
119
|
// do. You don't know which will finish first. If you give
|
|
389
120
|
// the worker with more work to do the request then you'll
|
|
390
121
|
// waste time.
|
|
391
|
-
|
|
392
122
|
// note: we can't check `workers.length` for deciding if
|
|
393
123
|
// we've reached `config.numWorkers` because creation the worker
|
|
394
124
|
// is async which means other requests to make workers might
|
|
395
125
|
// come in before a worker gets added to `workers`
|
|
396
126
|
let numWorkers = 0;
|
|
397
|
-
let canUseWorkers = true;
|
|
127
|
+
let canUseWorkers = true; // gets set to false if we can't start a worker
|
|
128
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
398
129
|
const workers = [];
|
|
130
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
399
131
|
const availableWorkers = [];
|
|
400
132
|
const waitingForWorkerQueue = [];
|
|
401
133
|
const currentlyProcessingIdToRequestMap = new Map();
|
|
402
|
-
|
|
134
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
403
135
|
function handleResult(e) {
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
136
|
+
makeWorkerAvailable(e.target);
|
|
137
|
+
const { id, error, data } = e.data;
|
|
138
|
+
const request = currentlyProcessingIdToRequestMap.get(id);
|
|
139
|
+
currentlyProcessingIdToRequestMap.delete(id);
|
|
140
|
+
if (error) {
|
|
141
|
+
request.reject(error);
|
|
142
|
+
}
|
|
143
|
+
else {
|
|
144
|
+
request.resolve(data);
|
|
145
|
+
}
|
|
413
146
|
}
|
|
414
|
-
|
|
415
147
|
// Because Firefox uses non-standard onerror to signal an error.
|
|
416
148
|
function startWorker(url) {
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
149
|
+
return new Promise((resolve, reject) => {
|
|
150
|
+
const worker = new Worker(url);
|
|
151
|
+
worker.onmessage = (e) => {
|
|
152
|
+
if (e.data === 'start') {
|
|
153
|
+
worker.onerror = null;
|
|
154
|
+
worker.onmessage = null;
|
|
155
|
+
resolve(worker);
|
|
156
|
+
}
|
|
157
|
+
else {
|
|
158
|
+
reject(new Error(`unexpected message: ${e.data}`));
|
|
159
|
+
}
|
|
160
|
+
};
|
|
161
|
+
worker.onerror = reject;
|
|
162
|
+
});
|
|
430
163
|
}
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
164
|
+
const workerHelper = (function () {
|
|
165
|
+
if (isNode) {
|
|
166
|
+
return {
|
|
167
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
168
|
+
async createWorker(url) {
|
|
169
|
+
const moduleId = 'node:worker_threads';
|
|
170
|
+
const { Worker } = await import(moduleId);
|
|
171
|
+
return new Worker(url);
|
|
172
|
+
},
|
|
173
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
174
|
+
addEventListener(worker, fn) {
|
|
175
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
176
|
+
worker.on('message', (data) => {
|
|
177
|
+
fn({ target: worker, data });
|
|
178
|
+
});
|
|
179
|
+
},
|
|
180
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
181
|
+
async terminate(worker) {
|
|
182
|
+
await worker.terminate();
|
|
183
|
+
},
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
else {
|
|
187
|
+
return {
|
|
188
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
189
|
+
async createWorker(url) {
|
|
190
|
+
// I don't understand this security issue
|
|
191
|
+
// Apparently there is some iframe setting or http header
|
|
192
|
+
// that prevents cross domain workers. But, I can manually
|
|
193
|
+
// download the text and do it. I reported this to Chrome
|
|
194
|
+
// and they said it was fine so ¯\_(ツ)_/¯
|
|
195
|
+
try {
|
|
196
|
+
const worker = await startWorker(url);
|
|
197
|
+
return worker;
|
|
198
|
+
}
|
|
199
|
+
catch (_e) {
|
|
200
|
+
console.warn('could not load worker:', url);
|
|
201
|
+
}
|
|
202
|
+
let text;
|
|
203
|
+
try {
|
|
204
|
+
const req = await fetch(url, { mode: 'cors' });
|
|
205
|
+
if (!req.ok) {
|
|
206
|
+
throw new Error(`could not load: ${url}`);
|
|
207
|
+
}
|
|
208
|
+
text = await req.text();
|
|
209
|
+
url = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
|
|
210
|
+
const worker = await startWorker(url);
|
|
211
|
+
config.workerURL = url; // this is a hack. What's a better way to structure this code?
|
|
212
|
+
return worker;
|
|
213
|
+
}
|
|
214
|
+
catch (_e) {
|
|
215
|
+
console.warn('could not load worker via fetch:', url);
|
|
216
|
+
}
|
|
217
|
+
if (text !== undefined) {
|
|
218
|
+
try {
|
|
219
|
+
url = `data:application/javascript;base64,${btoa(text)}`;
|
|
220
|
+
const worker = await startWorker(url);
|
|
221
|
+
config.workerURL = url;
|
|
222
|
+
return worker;
|
|
223
|
+
}
|
|
224
|
+
catch (_e) {
|
|
225
|
+
console.warn('could not load worker via dataURI');
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
console.warn('workers will not be used');
|
|
229
|
+
throw new Error('can not start workers');
|
|
230
|
+
},
|
|
231
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
232
|
+
addEventListener(worker, fn) {
|
|
233
|
+
worker.addEventListener('message', fn);
|
|
234
|
+
},
|
|
235
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
236
|
+
async terminate(worker) {
|
|
237
|
+
worker.terminate();
|
|
238
|
+
},
|
|
239
|
+
};
|
|
240
|
+
}
|
|
241
|
+
}());
|
|
242
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
243
|
+
function makeWorkerAvailable(worker) {
|
|
244
|
+
availableWorkers.push(worker);
|
|
245
|
+
processWaitingForWorkerQueue();
|
|
434
246
|
}
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
const {Worker} = dynamicRequire(module, 'worker_threads');
|
|
440
|
-
return {
|
|
441
|
-
async createWorker(url) {
|
|
442
|
-
return new Worker(url);
|
|
443
|
-
},
|
|
444
|
-
addEventListener(worker, fn) {
|
|
445
|
-
worker.on('message', (data) => {
|
|
446
|
-
fn({target: worker, data});
|
|
447
|
-
});
|
|
448
|
-
},
|
|
449
|
-
async terminate(worker) {
|
|
450
|
-
await worker.terminate();
|
|
451
|
-
},
|
|
452
|
-
};
|
|
453
|
-
} else {
|
|
454
|
-
return {
|
|
455
|
-
async createWorker(url) {
|
|
456
|
-
// I don't understand this security issue
|
|
457
|
-
// Apparently there is some iframe setting or http header
|
|
458
|
-
// that prevents cross domain workers. But, I can manually
|
|
459
|
-
// download the text and do it. I reported this to Chrome
|
|
460
|
-
// and they said it was fine so ¯\_(ツ)_/¯
|
|
247
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
248
|
+
async function getAvailableWorker() {
|
|
249
|
+
if (availableWorkers.length === 0 && numWorkers < config.numWorkers) {
|
|
250
|
+
++numWorkers; // see comment at numWorkers declaration
|
|
461
251
|
try {
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
252
|
+
const worker = await workerHelper.createWorker(config.workerURL);
|
|
253
|
+
workers.push(worker);
|
|
254
|
+
availableWorkers.push(worker);
|
|
255
|
+
workerHelper.addEventListener(worker, handleResult);
|
|
466
256
|
}
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
const req = await fetch(url, {mode: 'cors'});
|
|
471
|
-
if (!req.ok) {
|
|
472
|
-
throw new Error(`could not load: ${url}`);
|
|
473
|
-
}
|
|
474
|
-
text = await req.text();
|
|
475
|
-
url = URL.createObjectURL(new Blob([text], {type: 'application/javascript'}));
|
|
476
|
-
const worker = await startWorker(url);
|
|
477
|
-
config.workerURL = url; // this is a hack. What's a better way to structure this code?
|
|
478
|
-
return worker;
|
|
479
|
-
} catch (e) {
|
|
480
|
-
console.warn('could not load worker via fetch:', url);
|
|
257
|
+
catch (_e) {
|
|
258
|
+
// set this global out-of-band (needs refactor)
|
|
259
|
+
canUseWorkers = false;
|
|
481
260
|
}
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
261
|
+
}
|
|
262
|
+
return availableWorkers.pop();
|
|
263
|
+
}
|
|
264
|
+
async function decompressRaw(src) {
|
|
265
|
+
const ds = new DecompressionStream('deflate-raw');
|
|
266
|
+
const writer = ds.writable.getWriter();
|
|
267
|
+
// Do not await the write — doing so before reading causes a deadlock when
|
|
268
|
+
// the internal buffer fills due to backpressure.
|
|
269
|
+
writer.write(src).then(() => writer.close()).catch(() => { });
|
|
270
|
+
const chunks = [];
|
|
271
|
+
const reader = ds.readable.getReader();
|
|
272
|
+
for (;;) {
|
|
273
|
+
const { done, value } = await reader.read();
|
|
274
|
+
if (done) {
|
|
275
|
+
break;
|
|
492
276
|
}
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
worker.terminate();
|
|
502
|
-
},
|
|
503
|
-
};
|
|
504
|
-
}
|
|
505
|
-
}());
|
|
506
|
-
|
|
507
|
-
function makeWorkerAvailable(worker) {
|
|
508
|
-
availableWorkers.push(worker);
|
|
509
|
-
processWaitingForWorkerQueue();
|
|
510
|
-
}
|
|
511
|
-
|
|
512
|
-
async function getAvailableWorker() {
|
|
513
|
-
if (availableWorkers.length === 0 && numWorkers < config.numWorkers) {
|
|
514
|
-
++numWorkers; // see comment at numWorkers declaration
|
|
515
|
-
try {
|
|
516
|
-
const worker = await workerHelper.createWorker(config.workerURL);
|
|
517
|
-
workers.push(worker);
|
|
518
|
-
availableWorkers.push(worker);
|
|
519
|
-
workerHelper.addEventListener(worker, handleResult);
|
|
520
|
-
} catch (e) {
|
|
521
|
-
// set this global out-of-band (needs refactor)
|
|
522
|
-
canUseWorkers = false;
|
|
277
|
+
chunks.push(value);
|
|
278
|
+
}
|
|
279
|
+
const size = chunks.reduce((s, c) => s + c.byteLength, 0);
|
|
280
|
+
const result = new Uint8Array(size);
|
|
281
|
+
let offset = 0;
|
|
282
|
+
for (const chunk of chunks) {
|
|
283
|
+
result.set(chunk, offset);
|
|
284
|
+
offset += chunk.byteLength;
|
|
523
285
|
}
|
|
524
|
-
|
|
525
|
-
return availableWorkers.pop();
|
|
286
|
+
return result;
|
|
526
287
|
}
|
|
527
|
-
|
|
528
288
|
// @param {Uint8Array} src
|
|
529
|
-
// @param {number} uncompressedSize
|
|
530
289
|
// @param {string} [type] mime-type
|
|
531
290
|
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
|
|
532
|
-
function inflateRawLocal(src,
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
291
|
+
async function inflateRawLocal(src, type, resolve, reject) {
|
|
292
|
+
try {
|
|
293
|
+
const dst = await decompressRaw(src);
|
|
294
|
+
resolve(type ? new Blob([dst], { type }) : dst.buffer);
|
|
295
|
+
}
|
|
296
|
+
catch (e) {
|
|
297
|
+
reject(e);
|
|
298
|
+
}
|
|
538
299
|
}
|
|
539
|
-
|
|
540
300
|
async function processWaitingForWorkerQueue() {
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
301
|
+
if (waitingForWorkerQueue.length === 0) {
|
|
302
|
+
return;
|
|
303
|
+
}
|
|
304
|
+
if (config.useWorkers && canUseWorkers) {
|
|
305
|
+
const worker = await getAvailableWorker();
|
|
306
|
+
// canUseWorkers might have been set out-of-band (need refactor)
|
|
307
|
+
if (canUseWorkers) {
|
|
308
|
+
if (worker) {
|
|
309
|
+
if (waitingForWorkerQueue.length === 0) {
|
|
310
|
+
// the queue might be empty while we awaited for a worker.
|
|
311
|
+
makeWorkerAvailable(worker);
|
|
312
|
+
return;
|
|
313
|
+
}
|
|
314
|
+
const { id, src, uncompressedSize, type, resolve, reject } = waitingForWorkerQueue.shift();
|
|
315
|
+
currentlyProcessingIdToRequestMap.set(id, { id, src, uncompressedSize, type, resolve, reject });
|
|
316
|
+
const transferables = [];
|
|
317
|
+
// NOTE: Originally I thought you could transfer an ArrayBuffer.
|
|
318
|
+
// The code on this side is often using views into the entire file
|
|
319
|
+
// which means if we transferred we'd lose the entire file. That sucks
|
|
320
|
+
// because it means there's an expensive copy to send the uncompressed
|
|
321
|
+
// data to the worker.
|
|
322
|
+
//
|
|
323
|
+
// Also originally I thought we could send a Blob but we'd need to refactor
|
|
324
|
+
// the code in unzipit/readEntryData as currently it reads the uncompressed
|
|
325
|
+
// bytes.
|
|
326
|
+
//
|
|
327
|
+
//if (!isBlob(src) && !isSharedArrayBuffer(src)) {
|
|
328
|
+
// transferables.push(src);
|
|
329
|
+
//}
|
|
330
|
+
worker.postMessage({
|
|
331
|
+
type: 'inflate',
|
|
332
|
+
data: {
|
|
333
|
+
id,
|
|
334
|
+
type,
|
|
335
|
+
src,
|
|
336
|
+
uncompressedSize,
|
|
337
|
+
},
|
|
338
|
+
}, transferables);
|
|
339
|
+
}
|
|
340
|
+
return;
|
|
554
341
|
}
|
|
555
|
-
const {id, src, uncompressedSize, type, resolve, reject} = waitingForWorkerQueue.shift();
|
|
556
|
-
currentlyProcessingIdToRequestMap.set(id, {id, resolve, reject});
|
|
557
|
-
const transferables = [];
|
|
558
|
-
// NOTE: Originally I thought you could transfer an ArrayBuffer.
|
|
559
|
-
// The code on this side is often using views into the entire file
|
|
560
|
-
// which means if we transferred we'd lose the entire file. That sucks
|
|
561
|
-
// because it means there's an expensive copy to send the uncompressed
|
|
562
|
-
// data to the worker.
|
|
563
|
-
//
|
|
564
|
-
// Also originally I thought we could send a Blob but we'd need to refactor
|
|
565
|
-
// the code in unzipit/readEntryData as currently it reads the uncompressed
|
|
566
|
-
// bytes.
|
|
567
|
-
//
|
|
568
|
-
//if (!isBlob(src) && !isSharedArrayBuffer(src)) {
|
|
569
|
-
// transferables.push(src);
|
|
570
|
-
//}
|
|
571
|
-
worker.postMessage({
|
|
572
|
-
type: 'inflate',
|
|
573
|
-
data: {
|
|
574
|
-
id,
|
|
575
|
-
type,
|
|
576
|
-
src,
|
|
577
|
-
uncompressedSize,
|
|
578
|
-
},
|
|
579
|
-
}, transferables);
|
|
580
|
-
}
|
|
581
|
-
return;
|
|
582
342
|
}
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
let data = src;
|
|
593
|
-
if (isBlob(src)) {
|
|
594
|
-
data = await readBlobAsUint8Array(src);
|
|
343
|
+
// inflate locally
|
|
344
|
+
// We loop here because what happens if many requests happen at once
|
|
345
|
+
// the first N requests will try to async make a worker. Other requests
|
|
346
|
+
// will then be on the queue. But if we fail to make workers then there
|
|
347
|
+
// are pending requests.
|
|
348
|
+
while (waitingForWorkerQueue.length) {
|
|
349
|
+
const { src, type, resolve, reject } = waitingForWorkerQueue.shift();
|
|
350
|
+
const data = isBlob(src) ? await readBlobAsUint8Array(src) : src;
|
|
351
|
+
inflateRawLocal(data, type, resolve, reject);
|
|
595
352
|
}
|
|
596
|
-
inflateRawLocal(data, uncompressedSize, type, resolve);
|
|
597
|
-
}
|
|
598
353
|
}
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
config.numWorkers = options.numWorkers || config.numWorkers;
|
|
354
|
+
function setOptions$1(options) {
|
|
355
|
+
config.workerURL = options.workerURL || config.workerURL;
|
|
356
|
+
// there's no reason to set the workerURL if you're not going to use workers
|
|
357
|
+
if (options.workerURL) {
|
|
358
|
+
config.useWorkers = true;
|
|
359
|
+
}
|
|
360
|
+
config.useWorkers = options.useWorkers !== undefined ? options.useWorkers : config.useWorkers;
|
|
361
|
+
config.numWorkers = options.numWorkers || config.numWorkers;
|
|
608
362
|
}
|
|
609
|
-
|
|
610
363
|
// It has to take non-zero time to put a large typed array in a Blob since the very
|
|
611
364
|
// next instruction you could change the contents of the array. So, if you're reading
|
|
612
365
|
// the zip file for images/video/audio then all you want is a Blob on which to get a URL.
|
|
@@ -620,113 +373,97 @@
|
|
|
620
373
|
// @param {string} [type] falsy or mimeType string (eg: 'image/png')
|
|
621
374
|
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
|
|
622
375
|
function inflateRawAsync(src, uncompressedSize, type) {
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
376
|
+
return new Promise((resolve, reject) => {
|
|
377
|
+
// note: there is potential an expensive copy here. In order for the data
|
|
378
|
+
// to make it into the worker we need to copy the data to the worker unless
|
|
379
|
+
// it's a Blob or a SharedArrayBuffer.
|
|
380
|
+
//
|
|
381
|
+
// Solutions:
|
|
382
|
+
//
|
|
383
|
+
// 1. A minor enhancement, if `uncompressedSize` is small don't call the worker.
|
|
384
|
+
//
|
|
385
|
+
// might be a win period as their is overhead calling the worker
|
|
386
|
+
//
|
|
387
|
+
// 2. Move the entire library to the worker
|
|
388
|
+
//
|
|
389
|
+
// Good, Maybe faster if you pass a URL, Blob, or SharedArrayBuffer? Not sure about that
|
|
390
|
+
// as those are also easy to transfer. Still slow if you pass an ArrayBuffer
|
|
391
|
+
// as the ArrayBuffer has to be copied to the worker.
|
|
392
|
+
//
|
|
393
|
+
// I guess benchmarking is really the only thing to try.
|
|
394
|
+
waitingForWorkerQueue.push({ src, uncompressedSize, type, resolve, reject, id: nextId++ });
|
|
395
|
+
processWaitingForWorkerQueue();
|
|
396
|
+
});
|
|
644
397
|
}
|
|
645
|
-
|
|
646
398
|
function clearArray(arr) {
|
|
647
|
-
|
|
648
|
-
}
|
|
649
|
-
|
|
650
|
-
async function cleanup() {
|
|
651
|
-
for (const worker of workers) {
|
|
652
|
-
await workerHelper.terminate(worker);
|
|
653
|
-
}
|
|
654
|
-
clearArray(workers);
|
|
655
|
-
clearArray(availableWorkers);
|
|
656
|
-
clearArray(waitingForWorkerQueue);
|
|
657
|
-
currentlyProcessingIdToRequestMap.clear();
|
|
658
|
-
numWorkers = 0;
|
|
659
|
-
canUseWorkers = true;
|
|
399
|
+
arr.splice(0, arr.length);
|
|
660
400
|
}
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
401
|
+
async function cleanup$1() {
|
|
402
|
+
for (const worker of workers) {
|
|
403
|
+
await workerHelper.terminate(worker);
|
|
404
|
+
}
|
|
405
|
+
clearArray(workers);
|
|
406
|
+
clearArray(availableWorkers);
|
|
407
|
+
clearArray(waitingForWorkerQueue);
|
|
408
|
+
currentlyProcessingIdToRequestMap.clear();
|
|
409
|
+
numWorkers = 0;
|
|
410
|
+
canUseWorkers = true;
|
|
668
411
|
}
|
|
669
|
-
*/
|
|
670
412
|
|
|
671
413
|
function dosDateTimeToDate(date, time) {
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
return new Date(year, month, day, hour, minute, second, millisecond);
|
|
414
|
+
const day = date & 0x1f; // 1-31
|
|
415
|
+
const month = (date >> 5 & 0xf) - 1; // 1-12, 0-11
|
|
416
|
+
const year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108
|
|
417
|
+
const millisecond = 0;
|
|
418
|
+
const second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers)
|
|
419
|
+
const minute = time >> 5 & 0x3f; // 0-59
|
|
420
|
+
const hour = time >> 11 & 0x1f; // 0-23
|
|
421
|
+
return new Date(year, month, day, hour, minute, second, millisecond);
|
|
682
422
|
}
|
|
683
|
-
|
|
684
423
|
class ZipEntry {
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
424
|
+
constructor(reader, rawEntry) {
|
|
425
|
+
this._reader = reader;
|
|
426
|
+
this._rawEntry = rawEntry;
|
|
427
|
+
this.name = rawEntry.name;
|
|
428
|
+
this.nameBytes = rawEntry.nameBytes;
|
|
429
|
+
this.size = rawEntry.uncompressedSize;
|
|
430
|
+
this.compressedSize = rawEntry.compressedSize;
|
|
431
|
+
this.comment = rawEntry.comment;
|
|
432
|
+
this.commentBytes = rawEntry.commentBytes;
|
|
433
|
+
this.compressionMethod = rawEntry.compressionMethod;
|
|
434
|
+
this.lastModDate = dosDateTimeToDate(rawEntry.lastModFileDate, rawEntry.lastModFileTime);
|
|
435
|
+
this.isDirectory = rawEntry.uncompressedSize === 0 && rawEntry.name.endsWith('/');
|
|
436
|
+
this.encrypted = !!(rawEntry.generalPurposeBitFlag & 0x1);
|
|
437
|
+
this.externalFileAttributes = rawEntry.externalFileAttributes;
|
|
438
|
+
this.versionMadeBy = rawEntry.versionMadeBy;
|
|
439
|
+
}
|
|
440
|
+
// returns a promise that returns a Blob for this entry
|
|
441
|
+
async blob(type = 'application/octet-stream') {
|
|
442
|
+
return await readEntryDataAsBlob(this._reader, this._rawEntry, type);
|
|
443
|
+
}
|
|
444
|
+
// returns a promise that returns an ArrayBuffer for this entry
|
|
445
|
+
async arrayBuffer() {
|
|
446
|
+
return await readEntryDataAsArrayBuffer(this._reader, this._rawEntry);
|
|
447
|
+
}
|
|
448
|
+
// returns text, assumes the text is valid utf8. If you want more options decode arrayBuffer yourself
|
|
449
|
+
async text() {
|
|
450
|
+
const buffer = await this.arrayBuffer();
|
|
451
|
+
return decodeBuffer(new Uint8Array(buffer));
|
|
452
|
+
}
|
|
453
|
+
// returns text with JSON.parse called on it. If you want more options decode arrayBuffer yourself
|
|
454
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
455
|
+
async json() {
|
|
456
|
+
const text = await this.text();
|
|
457
|
+
return JSON.parse(text);
|
|
458
|
+
}
|
|
719
459
|
}
|
|
720
|
-
|
|
721
460
|
const EOCDR_WITHOUT_COMMENT_SIZE = 22;
|
|
722
461
|
const MAX_COMMENT_SIZE = 0xffff; // 2-byte size
|
|
723
462
|
const EOCDR_SIGNATURE = 0x06054b50;
|
|
724
463
|
const ZIP64_EOCDR_SIGNATURE = 0x06064b50;
|
|
725
|
-
|
|
726
464
|
async function readAs(reader, offset, length) {
|
|
727
|
-
|
|
465
|
+
return await reader.read(offset, length);
|
|
728
466
|
}
|
|
729
|
-
|
|
730
467
|
// The point of this function is we want to be able to pass the data
|
|
731
468
|
// to a worker as fast as possible so when decompressing if the data
|
|
732
469
|
// is already a blob and we can get a blob then get a blob.
|
|
@@ -748,455 +485,421 @@
|
|
|
748
485
|
//
|
|
749
486
|
// For now this hack works even if it's not generic.
|
|
750
487
|
async function readAsBlobOrTypedArray(reader, offset, length, type) {
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
488
|
+
if (reader.sliceAsBlob) {
|
|
489
|
+
return await reader.sliceAsBlob(offset, length, type);
|
|
490
|
+
}
|
|
491
|
+
return await reader.read(offset, length);
|
|
755
492
|
}
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
},
|
|
493
|
+
const crc = {
|
|
494
|
+
unsigned() {
|
|
495
|
+
return 0;
|
|
496
|
+
},
|
|
761
497
|
};
|
|
762
|
-
|
|
763
498
|
function getUint16LE(uint8View, offset) {
|
|
764
|
-
|
|
765
|
-
|
|
499
|
+
return uint8View[offset] +
|
|
500
|
+
uint8View[offset + 1] * 0x100;
|
|
766
501
|
}
|
|
767
|
-
|
|
768
502
|
function getUint32LE(uint8View, offset) {
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
503
|
+
return uint8View[offset] +
|
|
504
|
+
uint8View[offset + 1] * 0x100 +
|
|
505
|
+
uint8View[offset + 2] * 0x10000 +
|
|
506
|
+
uint8View[offset + 3] * 0x1000000;
|
|
773
507
|
}
|
|
774
|
-
|
|
775
508
|
function getUint64LE(uint8View, offset) {
|
|
776
|
-
|
|
777
|
-
|
|
509
|
+
return getUint32LE(uint8View, offset) +
|
|
510
|
+
getUint32LE(uint8View, offset + 4) * 0x100000000;
|
|
778
511
|
}
|
|
779
|
-
|
|
780
|
-
/* eslint-disable no-irregular-whitespace */
|
|
781
512
|
// const decodeCP437 = (function() {
|
|
782
|
-
// const cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■
|
|
513
|
+
// const cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ ';
|
|
783
514
|
//
|
|
784
515
|
// return function(uint8view) {
|
|
785
516
|
// return Array.from(uint8view).map(v => cp437[v]).join('');
|
|
786
517
|
// };
|
|
787
518
|
// }());
|
|
788
|
-
/* eslint-enable no-irregular-whitespace */
|
|
789
|
-
|
|
790
519
|
const utf8Decoder = new TextDecoder();
|
|
791
|
-
function decodeBuffer(uint8View,
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
520
|
+
function decodeBuffer(uint8View, _isUTF8) {
|
|
521
|
+
if (isSharedArrayBuffer(uint8View.buffer)) {
|
|
522
|
+
uint8View = new Uint8Array(uint8View);
|
|
523
|
+
}
|
|
524
|
+
return utf8Decoder.decode(uint8View);
|
|
525
|
+
/*
|
|
526
|
+
AFAICT the UTF8 flat is not set so it's 100% up to the user
|
|
527
|
+
to self decode if their file is not utf8 filenames
|
|
528
|
+
return isUTF8
|
|
529
|
+
? utf8Decoder.decode(uint8View)
|
|
530
|
+
: decodeCP437(uint8View);
|
|
531
|
+
*/
|
|
803
532
|
}
|
|
804
|
-
|
|
805
533
|
async function findEndOfCentralDirector(reader, totalLength) {
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
} else {
|
|
845
|
-
return await readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
534
|
+
const size = Math.min(EOCDR_WITHOUT_COMMENT_SIZE + MAX_COMMENT_SIZE, totalLength);
|
|
535
|
+
const readStart = totalLength - size;
|
|
536
|
+
const data = await readAs(reader, readStart, size);
|
|
537
|
+
for (let i = size - EOCDR_WITHOUT_COMMENT_SIZE; i >= 0; --i) {
|
|
538
|
+
if (getUint32LE(data, i) !== EOCDR_SIGNATURE) {
|
|
539
|
+
continue;
|
|
540
|
+
}
|
|
541
|
+
// 0 - End of central directory signature
|
|
542
|
+
const eocdr = new Uint8Array(data.buffer, data.byteOffset + i, data.byteLength - i);
|
|
543
|
+
// 4 - Number of this disk
|
|
544
|
+
const diskNumber = getUint16LE(eocdr, 4);
|
|
545
|
+
if (diskNumber !== 0) {
|
|
546
|
+
throw new Error(`multi-volume zip files are not supported. This is volume: ${diskNumber}`);
|
|
547
|
+
}
|
|
548
|
+
// 6 - Disk where central directory starts
|
|
549
|
+
// 8 - Number of central directory records on this disk
|
|
550
|
+
// 10 - Total number of central directory records
|
|
551
|
+
const entryCount = getUint16LE(eocdr, 10);
|
|
552
|
+
// 12 - Size of central directory (bytes)
|
|
553
|
+
const centralDirectorySize = getUint32LE(eocdr, 12);
|
|
554
|
+
// 16 - Offset of start of central directory, relative to start of archive
|
|
555
|
+
const centralDirectoryOffset = getUint32LE(eocdr, 16);
|
|
556
|
+
// 20 - Comment length
|
|
557
|
+
const commentLength = getUint16LE(eocdr, 20);
|
|
558
|
+
const expectedCommentLength = eocdr.length - EOCDR_WITHOUT_COMMENT_SIZE;
|
|
559
|
+
if (commentLength !== expectedCommentLength) {
|
|
560
|
+
throw new Error(`invalid comment length. expected: ${expectedCommentLength}, actual: ${commentLength}`);
|
|
561
|
+
}
|
|
562
|
+
// 22 - Comment
|
|
563
|
+
// the encoding is always cp437.
|
|
564
|
+
const commentBytes = new Uint8Array(eocdr.buffer, eocdr.byteOffset + 22, commentLength);
|
|
565
|
+
const comment = decodeBuffer(commentBytes);
|
|
566
|
+
if (entryCount === 0xffff || centralDirectoryOffset === 0xffffffff) {
|
|
567
|
+
return await readZip64CentralDirectory(reader, readStart + i, comment, commentBytes);
|
|
568
|
+
}
|
|
569
|
+
else {
|
|
570
|
+
return await readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
571
|
+
}
|
|
846
572
|
}
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
throw new Error('could not find end of central directory. maybe not zip file');
|
|
573
|
+
throw new Error('could not find end of central directory. maybe not zip file');
|
|
850
574
|
}
|
|
851
|
-
|
|
852
575
|
const END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE = 0x07064b50;
|
|
853
|
-
|
|
854
576
|
async function readZip64CentralDirectory(reader, offset, comment, commentBytes) {
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
// 48 - offset of start of central directory with respect to the starting disk number 8 bytes
|
|
887
|
-
const centralDirectoryOffset = getUint64LE(zip64Eocdr, 48);
|
|
888
|
-
// 56 - zip64 extensible data sector (variable size)
|
|
889
|
-
return readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
577
|
+
// ZIP64 Zip64 end of central directory locator
|
|
578
|
+
const zip64EocdlOffset = offset - 20;
|
|
579
|
+
const eocdl = await readAs(reader, zip64EocdlOffset, 20);
|
|
580
|
+
// 0 - zip64 end of central dir locator signature
|
|
581
|
+
if (getUint32LE(eocdl, 0) !== END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE) {
|
|
582
|
+
throw new Error('invalid zip64 end of central directory locator signature');
|
|
583
|
+
}
|
|
584
|
+
// 4 - number of the disk with the start of the zip64 end of central directory
|
|
585
|
+
// 8 - relative offset of the zip64 end of central directory record
|
|
586
|
+
const zip64EocdrOffset = getUint64LE(eocdl, 8);
|
|
587
|
+
// 16 - total number of disks
|
|
588
|
+
// ZIP64 end of central directory record
|
|
589
|
+
const zip64Eocdr = await readAs(reader, zip64EocdrOffset, 56);
|
|
590
|
+
// 0 - zip64 end of central dir signature 4 bytes (0x06064b50)
|
|
591
|
+
if (getUint32LE(zip64Eocdr, 0) !== ZIP64_EOCDR_SIGNATURE) {
|
|
592
|
+
throw new Error('invalid zip64 end of central directory record signature');
|
|
593
|
+
}
|
|
594
|
+
// 4 - size of zip64 end of central directory record 8 bytes
|
|
595
|
+
// 12 - version made by 2 bytes
|
|
596
|
+
// 14 - version needed to extract 2 bytes
|
|
597
|
+
// 16 - number of this disk 4 bytes
|
|
598
|
+
// 20 - number of the disk with the start of the central directory 4 bytes
|
|
599
|
+
// 24 - total number of entries in the central directory on this disk 8 bytes
|
|
600
|
+
// 32 - total number of entries in the central directory 8 bytes
|
|
601
|
+
const entryCount = getUint64LE(zip64Eocdr, 32);
|
|
602
|
+
// 40 - size of the central directory 8 bytes
|
|
603
|
+
const centralDirectorySize = getUint64LE(zip64Eocdr, 40);
|
|
604
|
+
// 48 - offset of start of central directory with respect to the starting disk number 8 bytes
|
|
605
|
+
const centralDirectoryOffset = getUint64LE(zip64Eocdr, 48);
|
|
606
|
+
// 56 - zip64 extensible data sector (variable size)
|
|
607
|
+
return readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
890
608
|
}
|
|
891
|
-
|
|
892
609
|
const CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE = 0x02014b50;
|
|
893
|
-
|
|
894
610
|
async function readEntries(reader, centralDirectoryOffset, centralDirectorySize, rawEntryCount, comment, commentBytes) {
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
data: extraFieldBuffer.slice(dataStart, dataEnd),
|
|
966
|
-
});
|
|
967
|
-
i = dataEnd;
|
|
968
|
-
}
|
|
969
|
-
|
|
970
|
-
// 46+n+m - File comment
|
|
971
|
-
rawEntry.commentBytes = data.slice(fileCommentStart, fileCommentStart + rawEntry.fileCommentLength);
|
|
972
|
-
rawEntry.comment = decodeBuffer(rawEntry.commentBytes);
|
|
973
|
-
|
|
974
|
-
readEntryCursor += data.length;
|
|
975
|
-
|
|
976
|
-
if (rawEntry.uncompressedSize === 0xffffffff ||
|
|
977
|
-
rawEntry.compressedSize === 0xffffffff ||
|
|
978
|
-
rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
|
|
979
|
-
// ZIP64 format
|
|
980
|
-
// find the Zip64 Extended Information Extra Field
|
|
981
|
-
const zip64ExtraField = rawEntry.extraFields.find(e => e.id === 0x0001);
|
|
982
|
-
if (!zip64ExtraField) {
|
|
983
|
-
throw new Error('expected zip64 extended information extra field');
|
|
984
|
-
}
|
|
985
|
-
const zip64EiefBuffer = zip64ExtraField.data;
|
|
986
|
-
let index = 0;
|
|
987
|
-
// 0 - Original Size 8 bytes
|
|
988
|
-
if (rawEntry.uncompressedSize === 0xffffffff) {
|
|
989
|
-
if (index + 8 > zip64EiefBuffer.length) {
|
|
990
|
-
throw new Error('zip64 extended information extra field does not include uncompressed size');
|
|
611
|
+
let readEntryCursor = 0;
|
|
612
|
+
const allEntriesBuffer = await readAs(reader, centralDirectoryOffset, centralDirectorySize);
|
|
613
|
+
const rawEntries = [];
|
|
614
|
+
for (let e = 0; e < rawEntryCount; ++e) {
|
|
615
|
+
const buffer = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + 46);
|
|
616
|
+
// 0 - Central directory file header signature
|
|
617
|
+
const signature = getUint32LE(buffer, 0);
|
|
618
|
+
if (signature !== CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE) {
|
|
619
|
+
throw new Error(`invalid central directory file header signature: 0x${signature.toString(16)}`);
|
|
620
|
+
}
|
|
621
|
+
const rawEntry = {
|
|
622
|
+
// 4 - Version made by
|
|
623
|
+
versionMadeBy: getUint16LE(buffer, 4),
|
|
624
|
+
// 6 - Version needed to extract (minimum)
|
|
625
|
+
versionNeededToExtract: getUint16LE(buffer, 6),
|
|
626
|
+
// 8 - General purpose bit flag
|
|
627
|
+
generalPurposeBitFlag: getUint16LE(buffer, 8),
|
|
628
|
+
// 10 - Compression method
|
|
629
|
+
compressionMethod: getUint16LE(buffer, 10),
|
|
630
|
+
// 12 - File last modification time
|
|
631
|
+
lastModFileTime: getUint16LE(buffer, 12),
|
|
632
|
+
// 14 - File last modification date
|
|
633
|
+
lastModFileDate: getUint16LE(buffer, 14),
|
|
634
|
+
// 16 - CRC-32
|
|
635
|
+
crc32: getUint32LE(buffer, 16),
|
|
636
|
+
// 20 - Compressed size
|
|
637
|
+
compressedSize: getUint32LE(buffer, 20),
|
|
638
|
+
// 24 - Uncompressed size
|
|
639
|
+
uncompressedSize: getUint32LE(buffer, 24),
|
|
640
|
+
// 28 - File name length (n)
|
|
641
|
+
fileNameLength: getUint16LE(buffer, 28),
|
|
642
|
+
// 30 - Extra field length (m)
|
|
643
|
+
extraFieldLength: getUint16LE(buffer, 30),
|
|
644
|
+
// 32 - File comment length (k)
|
|
645
|
+
fileCommentLength: getUint16LE(buffer, 32),
|
|
646
|
+
// 34 - Disk number where file starts
|
|
647
|
+
// 36 - Internal file attributes
|
|
648
|
+
internalFileAttributes: getUint16LE(buffer, 36),
|
|
649
|
+
// 38 - External file attributes
|
|
650
|
+
externalFileAttributes: getUint32LE(buffer, 38),
|
|
651
|
+
// 42 - Relative offset of local file header
|
|
652
|
+
relativeOffsetOfLocalHeader: getUint32LE(buffer, 42),
|
|
653
|
+
};
|
|
654
|
+
if (rawEntry.generalPurposeBitFlag & 0x40) {
|
|
655
|
+
throw new Error('strong encryption is not supported');
|
|
656
|
+
}
|
|
657
|
+
readEntryCursor += 46;
|
|
658
|
+
const data = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + rawEntry.fileNameLength + rawEntry.extraFieldLength + rawEntry.fileCommentLength);
|
|
659
|
+
// 46 - File name
|
|
660
|
+
(rawEntry.generalPurposeBitFlag & 0x800) !== 0;
|
|
661
|
+
rawEntry.nameBytes = data.slice(0, rawEntry.fileNameLength);
|
|
662
|
+
rawEntry.name = decodeBuffer(rawEntry.nameBytes);
|
|
663
|
+
// 46+n - Extra field
|
|
664
|
+
const fileCommentStart = rawEntry.fileNameLength + rawEntry.extraFieldLength;
|
|
665
|
+
const extraFieldBuffer = data.slice(rawEntry.fileNameLength, fileCommentStart);
|
|
666
|
+
rawEntry.extraFields = [];
|
|
667
|
+
let i = 0;
|
|
668
|
+
while (i < extraFieldBuffer.length - 3) {
|
|
669
|
+
const headerId = getUint16LE(extraFieldBuffer, i + 0);
|
|
670
|
+
const dataSize = getUint16LE(extraFieldBuffer, i + 2);
|
|
671
|
+
const dataStart = i + 4;
|
|
672
|
+
const dataEnd = dataStart + dataSize;
|
|
673
|
+
if (dataEnd > extraFieldBuffer.length) {
|
|
674
|
+
throw new Error('extra field length exceeds extra field buffer size');
|
|
675
|
+
}
|
|
676
|
+
rawEntry.extraFields.push({
|
|
677
|
+
id: headerId,
|
|
678
|
+
data: extraFieldBuffer.slice(dataStart, dataEnd),
|
|
679
|
+
});
|
|
680
|
+
i = dataEnd;
|
|
991
681
|
}
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
682
|
+
// 46+n+m - File comment
|
|
683
|
+
rawEntry.commentBytes = data.slice(fileCommentStart, fileCommentStart + rawEntry.fileCommentLength);
|
|
684
|
+
rawEntry.comment = decodeBuffer(rawEntry.commentBytes);
|
|
685
|
+
readEntryCursor += data.length;
|
|
686
|
+
if (rawEntry.uncompressedSize === 0xffffffff ||
|
|
687
|
+
rawEntry.compressedSize === 0xffffffff ||
|
|
688
|
+
rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
|
|
689
|
+
// ZIP64 format
|
|
690
|
+
// find the Zip64 Extended Information Extra Field
|
|
691
|
+
const zip64ExtraField = rawEntry.extraFields.find(e => e.id === 0x0001);
|
|
692
|
+
if (!zip64ExtraField) {
|
|
693
|
+
throw new Error('expected zip64 extended information extra field');
|
|
694
|
+
}
|
|
695
|
+
const zip64EiefBuffer = zip64ExtraField.data;
|
|
696
|
+
let index = 0;
|
|
697
|
+
// 0 - Original Size 8 bytes
|
|
698
|
+
if (rawEntry.uncompressedSize === 0xffffffff) {
|
|
699
|
+
if (index + 8 > zip64EiefBuffer.length) {
|
|
700
|
+
throw new Error('zip64 extended information extra field does not include uncompressed size');
|
|
701
|
+
}
|
|
702
|
+
rawEntry.uncompressedSize = getUint64LE(zip64EiefBuffer, index);
|
|
703
|
+
index += 8;
|
|
704
|
+
}
|
|
705
|
+
// 8 - Compressed Size 8 bytes
|
|
706
|
+
if (rawEntry.compressedSize === 0xffffffff) {
|
|
707
|
+
if (index + 8 > zip64EiefBuffer.length) {
|
|
708
|
+
throw new Error('zip64 extended information extra field does not include compressed size');
|
|
709
|
+
}
|
|
710
|
+
rawEntry.compressedSize = getUint64LE(zip64EiefBuffer, index);
|
|
711
|
+
index += 8;
|
|
712
|
+
}
|
|
713
|
+
// 16 - Relative Header Offset 8 bytes
|
|
714
|
+
if (rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
|
|
715
|
+
if (index + 8 > zip64EiefBuffer.length) {
|
|
716
|
+
throw new Error('zip64 extended information extra field does not include relative header offset');
|
|
717
|
+
}
|
|
718
|
+
rawEntry.relativeOffsetOfLocalHeader = getUint64LE(zip64EiefBuffer, index);
|
|
719
|
+
index += 8;
|
|
720
|
+
}
|
|
721
|
+
// 24 - Disk Start Number 4 bytes
|
|
999
722
|
}
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
723
|
+
// check for Info-ZIP Unicode Path Extra Field (0x7075)
|
|
724
|
+
// see https://github.com/thejoshwolfe/yauzl/issues/33
|
|
725
|
+
const nameField = rawEntry.extraFields.find(e => e.id === 0x7075 &&
|
|
726
|
+
e.data.length >= 6 && // too short to be meaningful
|
|
727
|
+
e.data[0] === 1 && // Version 1 byte version of this extra field, currently 1
|
|
728
|
+
getUint32LE(e.data, 1), crc.unsigned()); // NameCRC32 4 bytes File Name Field CRC32 Checksum
|
|
729
|
+
// > If the CRC check fails, this UTF-8 Path Extra Field should be
|
|
730
|
+
// > ignored and the File Name field in the header should be used instead.
|
|
731
|
+
if (nameField) {
|
|
732
|
+
// UnicodeName Variable UTF-8 version of the entry File Name
|
|
733
|
+
rawEntry.fileName = decodeBuffer(nameField.data.slice(5));
|
|
1007
734
|
}
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
735
|
+
// validate file size
|
|
736
|
+
if (rawEntry.compressionMethod === 0) {
|
|
737
|
+
let expectedCompressedSize = rawEntry.uncompressedSize;
|
|
738
|
+
if ((rawEntry.generalPurposeBitFlag & 0x1) !== 0) {
|
|
739
|
+
// traditional encryption prefixes the file data with a header
|
|
740
|
+
expectedCompressedSize += 12;
|
|
741
|
+
}
|
|
742
|
+
if (rawEntry.compressedSize !== expectedCompressedSize) {
|
|
743
|
+
throw new Error(`compressed size mismatch for stored file: ${rawEntry.compressedSize} != ${expectedCompressedSize}`);
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
rawEntries.push(rawEntry);
|
|
1012
747
|
}
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
e
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
if (
|
|
1024
|
-
|
|
1025
|
-
rawEntry.fileName = decodeBuffer(nameField.data.slice(5));
|
|
748
|
+
const zip = {
|
|
749
|
+
comment,
|
|
750
|
+
commentBytes,
|
|
751
|
+
};
|
|
752
|
+
return {
|
|
753
|
+
zip,
|
|
754
|
+
entries: rawEntries.map(e => new ZipEntry(reader, e)),
|
|
755
|
+
};
|
|
756
|
+
}
|
|
757
|
+
async function readEntryDataHeader(reader, rawEntry) {
|
|
758
|
+
if (rawEntry.generalPurposeBitFlag & 0x1) {
|
|
759
|
+
throw new Error('encrypted entries not supported');
|
|
1026
760
|
}
|
|
1027
|
-
|
|
1028
|
-
//
|
|
761
|
+
const buffer = await readAs(reader, rawEntry.relativeOffsetOfLocalHeader, 30);
|
|
762
|
+
// note: maybe this should be passed in or cached on entry
|
|
763
|
+
// as it's async so there will be at least one tick (not sure about that)
|
|
764
|
+
const totalLength = await reader.getLength();
|
|
765
|
+
// 0 - Local file header signature = 0x04034b50
|
|
766
|
+
const signature = getUint32LE(buffer, 0);
|
|
767
|
+
if (signature !== 0x04034b50) {
|
|
768
|
+
throw new Error(`invalid local file header signature: 0x${signature.toString(16)}`);
|
|
769
|
+
}
|
|
770
|
+
// all this should be redundant
|
|
771
|
+
// 4 - Version needed to extract (minimum)
|
|
772
|
+
// 6 - General purpose bit flag
|
|
773
|
+
// 8 - Compression method
|
|
774
|
+
// 10 - File last modification time
|
|
775
|
+
// 12 - File last modification date
|
|
776
|
+
// 14 - CRC-32
|
|
777
|
+
// 18 - Compressed size
|
|
778
|
+
// 22 - Uncompressed size
|
|
779
|
+
// 26 - File name length (n)
|
|
780
|
+
const fileNameLength = getUint16LE(buffer, 26);
|
|
781
|
+
// 28 - Extra field length (m)
|
|
782
|
+
const extraFieldLength = getUint16LE(buffer, 28);
|
|
783
|
+
// 30 - File name
|
|
784
|
+
// 30+n - Extra field
|
|
785
|
+
const localFileHeaderEnd = rawEntry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
|
|
786
|
+
let decompress;
|
|
1029
787
|
if (rawEntry.compressionMethod === 0) {
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
// traditional encryption prefixes the file data with a header
|
|
1033
|
-
expectedCompressedSize += 12;
|
|
1034
|
-
}
|
|
1035
|
-
if (rawEntry.compressedSize !== expectedCompressedSize) {
|
|
1036
|
-
throw new Error(`compressed size mismatch for stored file: ${rawEntry.compressedSize} != ${expectedCompressedSize}`);
|
|
1037
|
-
}
|
|
788
|
+
// 0 - The file is stored (no compression)
|
|
789
|
+
decompress = false;
|
|
1038
790
|
}
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
return {
|
|
1046
|
-
zip,
|
|
1047
|
-
entries: rawEntries.map(e => new ZipEntry(reader, e)),
|
|
1048
|
-
};
|
|
1049
|
-
}
|
|
1050
|
-
|
|
1051
|
-
async function readEntryDataHeader(reader, rawEntry) {
|
|
1052
|
-
if (rawEntry.generalPurposeBitFlag & 0x1) {
|
|
1053
|
-
throw new Error('encrypted entries not supported');
|
|
1054
|
-
}
|
|
1055
|
-
const buffer = await readAs(reader, rawEntry.relativeOffsetOfLocalHeader, 30);
|
|
1056
|
-
// note: maybe this should be passed in or cached on entry
|
|
1057
|
-
// as it's async so there will be at least one tick (not sure about that)
|
|
1058
|
-
const totalLength = await reader.getLength();
|
|
1059
|
-
|
|
1060
|
-
// 0 - Local file header signature = 0x04034b50
|
|
1061
|
-
const signature = getUint32LE(buffer, 0);
|
|
1062
|
-
if (signature !== 0x04034b50) {
|
|
1063
|
-
throw new Error(`invalid local file header signature: 0x${signature.toString(16)}`);
|
|
1064
|
-
}
|
|
1065
|
-
|
|
1066
|
-
// all this should be redundant
|
|
1067
|
-
// 4 - Version needed to extract (minimum)
|
|
1068
|
-
// 6 - General purpose bit flag
|
|
1069
|
-
// 8 - Compression method
|
|
1070
|
-
// 10 - File last modification time
|
|
1071
|
-
// 12 - File last modification date
|
|
1072
|
-
// 14 - CRC-32
|
|
1073
|
-
// 18 - Compressed size
|
|
1074
|
-
// 22 - Uncompressed size
|
|
1075
|
-
// 26 - File name length (n)
|
|
1076
|
-
const fileNameLength = getUint16LE(buffer, 26);
|
|
1077
|
-
// 28 - Extra field length (m)
|
|
1078
|
-
const extraFieldLength = getUint16LE(buffer, 28);
|
|
1079
|
-
// 30 - File name
|
|
1080
|
-
// 30+n - Extra field
|
|
1081
|
-
const localFileHeaderEnd = rawEntry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
|
|
1082
|
-
let decompress;
|
|
1083
|
-
if (rawEntry.compressionMethod === 0) {
|
|
1084
|
-
// 0 - The file is stored (no compression)
|
|
1085
|
-
decompress = false;
|
|
1086
|
-
} else if (rawEntry.compressionMethod === 8) {
|
|
1087
|
-
// 8 - The file is Deflated
|
|
1088
|
-
decompress = true;
|
|
1089
|
-
} else {
|
|
1090
|
-
throw new Error(`unsupported compression method: ${rawEntry.compressionMethod}`);
|
|
1091
|
-
}
|
|
1092
|
-
const fileDataStart = localFileHeaderEnd;
|
|
1093
|
-
const fileDataEnd = fileDataStart + rawEntry.compressedSize;
|
|
1094
|
-
if (rawEntry.compressedSize !== 0) {
|
|
1095
|
-
// bounds check now, because the read streams will probably not complain loud enough.
|
|
1096
|
-
// since we're dealing with an unsigned offset plus an unsigned size,
|
|
1097
|
-
// we only have 1 thing to check for.
|
|
1098
|
-
if (fileDataEnd > totalLength) {
|
|
1099
|
-
throw new Error(`file data overflows file bounds: ${fileDataStart} + ${rawEntry.compressedSize} > ${totalLength}`);
|
|
791
|
+
else if (rawEntry.compressionMethod === 8) {
|
|
792
|
+
// 8 - The file is Deflated
|
|
793
|
+
decompress = true;
|
|
794
|
+
}
|
|
795
|
+
else {
|
|
796
|
+
throw new Error(`unsupported compression method: ${rawEntry.compressionMethod}`);
|
|
1100
797
|
}
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
798
|
+
const fileDataStart = localFileHeaderEnd;
|
|
799
|
+
const fileDataEnd = fileDataStart + rawEntry.compressedSize;
|
|
800
|
+
if (rawEntry.compressedSize !== 0) {
|
|
801
|
+
// bounds check now, because the read streams will probably not complain loud enough.
|
|
802
|
+
// since we're dealing with an unsigned offset plus an unsigned size,
|
|
803
|
+
// we only have 1 thing to check for.
|
|
804
|
+
if (fileDataEnd > totalLength) {
|
|
805
|
+
throw new Error(`file data overflows file bounds: ${fileDataStart} + ${rawEntry.compressedSize} > ${totalLength}`);
|
|
806
|
+
}
|
|
807
|
+
}
|
|
808
|
+
return {
|
|
809
|
+
decompress,
|
|
810
|
+
fileDataStart,
|
|
811
|
+
};
|
|
1106
812
|
}
|
|
1107
|
-
|
|
1108
813
|
async function readEntryDataAsArrayBuffer(reader, rawEntry) {
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
814
|
+
const { decompress, fileDataStart } = await readEntryDataHeader(reader, rawEntry);
|
|
815
|
+
if (!decompress) {
|
|
816
|
+
const dataView = await readAs(reader, fileDataStart, rawEntry.compressedSize);
|
|
817
|
+
// make copy?
|
|
818
|
+
//
|
|
819
|
+
// 1. The source is a Blob/file. In this case we'll get back TypedArray we can just hand to the user
|
|
820
|
+
// 2. The source is a TypedArray. In this case we'll get back TypedArray that is a view into a larger buffer
|
|
821
|
+
// but because ultimately this is used to return an ArrayBuffer to `someEntry.arrayBuffer()`
|
|
822
|
+
// we need to return copy since we need the `ArrayBuffer`, not the TypedArray to exactly match the data.
|
|
823
|
+
// Note: We could add another API function `bytes()` or something that returned a `Uint8Array`
|
|
824
|
+
// instead of an `ArrayBuffer`. This would let us skip a copy here. But this case only happens for uncompressed
|
|
825
|
+
// data. That seems like a rare enough case that adding a new API is not worth it? Or is it? A zip of jpegs or mp3s
|
|
826
|
+
// might not be compressed. For now that's a TBD.
|
|
827
|
+
return isTypedArraySameAsArrayBuffer(dataView) ? dataView.buffer : dataView.slice().buffer;
|
|
828
|
+
}
|
|
829
|
+
// see comment in readEntryDateAsBlob
|
|
830
|
+
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
|
|
831
|
+
const result = await inflateRawAsync(typedArrayOrBlob instanceof Uint8Array ? typedArrayOrBlob : typedArrayOrBlob, rawEntry.uncompressedSize);
|
|
832
|
+
return result;
|
|
1128
833
|
}
|
|
1129
|
-
|
|
1130
834
|
async function readEntryDataAsBlob(reader, rawEntry, type) {
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
835
|
+
const { decompress, fileDataStart } = await readEntryDataHeader(reader, rawEntry);
|
|
836
|
+
if (!decompress) {
|
|
837
|
+
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize, type);
|
|
838
|
+
if (isBlob(typedArrayOrBlob)) {
|
|
839
|
+
return typedArrayOrBlob;
|
|
840
|
+
}
|
|
841
|
+
return new Blob([typedArrayOrBlob], { type });
|
|
1136
842
|
}
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
const result = await inflateRawAsync(typedArrayOrBlob, rawEntry.uncompressedSize, type);
|
|
1144
|
-
return result;
|
|
843
|
+
// Here's the issue with this mess (should refactor?)
|
|
844
|
+
// if the source is a blob then we really want to pass a blob to inflateRawAsync to avoid a large
|
|
845
|
+
// copy if we're going to a worker.
|
|
846
|
+
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
|
|
847
|
+
const result = await inflateRawAsync(typedArrayOrBlob instanceof Uint8Array ? typedArrayOrBlob : typedArrayOrBlob, rawEntry.uncompressedSize, type);
|
|
848
|
+
return result;
|
|
1145
849
|
}
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
setOptions(options);
|
|
850
|
+
function setOptions(options) {
|
|
851
|
+
setOptions$1(options);
|
|
1149
852
|
}
|
|
1150
|
-
|
|
1151
853
|
async function unzipRaw(source) {
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
} else if (source instanceof ArrayBuffer || (source && source.buffer && source.buffer instanceof ArrayBuffer)) {
|
|
1156
|
-
reader = new ArrayBufferReader(source);
|
|
1157
|
-
} else if (isSharedArrayBuffer(source) || isSharedArrayBuffer(source.buffer)) {
|
|
1158
|
-
reader = new ArrayBufferReader(source);
|
|
1159
|
-
} else if (typeof source === 'string') {
|
|
1160
|
-
const req = await fetch(source);
|
|
1161
|
-
if (!req.ok) {
|
|
1162
|
-
throw new Error(`failed http request ${source}, status: ${req.status}: ${req.statusText}`);
|
|
854
|
+
let reader;
|
|
855
|
+
if (typeof Blob !== 'undefined' && source instanceof Blob) {
|
|
856
|
+
reader = new BlobReader(source);
|
|
1163
857
|
}
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
858
|
+
else if (source instanceof ArrayBuffer || (source && source.buffer && source.buffer instanceof ArrayBuffer)) {
|
|
859
|
+
reader = new ArrayBufferReader(source);
|
|
860
|
+
}
|
|
861
|
+
else if (isSharedArrayBuffer(source) || isSharedArrayBuffer(source.buffer)) {
|
|
862
|
+
reader = new ArrayBufferReader(source);
|
|
863
|
+
}
|
|
864
|
+
else if (typeof source === 'string') {
|
|
865
|
+
const req = await fetch(source);
|
|
866
|
+
if (!req.ok) {
|
|
867
|
+
throw new Error(`failed http request ${source}, status: ${req.status}: ${req.statusText}`);
|
|
868
|
+
}
|
|
869
|
+
const blob = await req.blob();
|
|
870
|
+
reader = new BlobReader(blob);
|
|
871
|
+
}
|
|
872
|
+
else if (typeof source.getLength === 'function' && typeof source.read === 'function') {
|
|
873
|
+
reader = source;
|
|
874
|
+
}
|
|
875
|
+
else {
|
|
876
|
+
throw new Error('unsupported source type');
|
|
877
|
+
}
|
|
878
|
+
const totalLength = await reader.getLength();
|
|
879
|
+
if (totalLength > Number.MAX_SAFE_INTEGER) {
|
|
880
|
+
throw new Error(`file too large. size: ${totalLength}. Only file sizes up 4503599627370496 bytes are supported`);
|
|
881
|
+
}
|
|
882
|
+
return await findEndOfCentralDirector(reader, totalLength);
|
|
1179
883
|
}
|
|
1180
|
-
|
|
1181
884
|
// If the names are not utf8 you should use unzipitRaw
|
|
1182
885
|
async function unzip(source) {
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
886
|
+
const { zip, entries } = await unzipRaw(source);
|
|
887
|
+
return {
|
|
888
|
+
zip,
|
|
889
|
+
entries: Object.fromEntries(entries.map(v => [v.name, v])),
|
|
890
|
+
};
|
|
1188
891
|
}
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
cleanup();
|
|
892
|
+
function cleanup() {
|
|
893
|
+
cleanup$1();
|
|
1192
894
|
}
|
|
1193
895
|
|
|
896
|
+
exports.ArrayBufferReader = ArrayBufferReader;
|
|
897
|
+
exports.BlobReader = BlobReader;
|
|
1194
898
|
exports.HTTPRangeReader = HTTPRangeReader;
|
|
1195
|
-
exports.
|
|
1196
|
-
exports.
|
|
899
|
+
exports.ZipEntry = ZipEntry;
|
|
900
|
+
exports.cleanup = cleanup;
|
|
901
|
+
exports.setOptions = setOptions;
|
|
1197
902
|
exports.unzip = unzip;
|
|
1198
903
|
exports.unzipRaw = unzipRaw;
|
|
1199
904
|
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
})));
|
|
905
|
+
}));
|