unzipit 1.4.3 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -5
- package/dist/ArrayBufferReader.d.ts +7 -0
- package/dist/BlobReader.d.ts +12 -0
- package/dist/HTTPRangeReader.d.ts +8 -0
- package/dist/inflate-worker.d.ts +1 -0
- package/dist/inflate.d.ts +8 -0
- package/dist/unzipit-worker.js +91 -355
- package/dist/unzipit-worker.min.js +1 -12
- package/dist/unzipit-worker.module.js +89 -353
- package/dist/unzipit.d.ts +71 -56
- package/dist/unzipit.js +733 -1031
- package/dist/unzipit.min.js +1 -31
- package/dist/unzipit.module.js +727 -1026
- package/dist/utils.d.ts +6 -0
- package/package.json +25 -25
- package/dist/unzipit.module.d.ts +0 -59
package/dist/unzipit.module.js
CHANGED
|
@@ -1,379 +1,110 @@
|
|
|
1
|
-
/* unzipit@
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
/* unzipit@2.0.0, license MIT */
|
|
2
|
+
var _a, _b;
|
|
4
3
|
function readBlobAsArrayBuffer(blob) {
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
4
|
+
if (blob.arrayBuffer) {
|
|
5
|
+
return blob.arrayBuffer();
|
|
6
|
+
}
|
|
7
|
+
return new Promise((resolve, reject) => {
|
|
8
|
+
const reader = new FileReader();
|
|
9
|
+
reader.addEventListener('loadend', () => {
|
|
10
|
+
resolve(reader.result);
|
|
11
|
+
});
|
|
12
|
+
reader.addEventListener('error', reject);
|
|
13
|
+
reader.readAsArrayBuffer(blob);
|
|
12
14
|
});
|
|
13
|
-
reader.addEventListener('error', reject);
|
|
14
|
-
reader.readAsArrayBuffer(blob);
|
|
15
|
-
});
|
|
16
15
|
}
|
|
17
|
-
|
|
18
16
|
async function readBlobAsUint8Array(blob) {
|
|
19
|
-
|
|
20
|
-
|
|
17
|
+
const arrayBuffer = await readBlobAsArrayBuffer(blob);
|
|
18
|
+
return new Uint8Array(arrayBuffer);
|
|
21
19
|
}
|
|
22
|
-
|
|
23
20
|
function isBlob(v) {
|
|
24
|
-
|
|
21
|
+
return typeof Blob !== 'undefined' && v instanceof Blob;
|
|
25
22
|
}
|
|
26
|
-
|
|
27
23
|
function isSharedArrayBuffer(b) {
|
|
28
|
-
|
|
24
|
+
return typeof SharedArrayBuffer !== 'undefined' && b instanceof SharedArrayBuffer;
|
|
29
25
|
}
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
(typeof process !== 'undefined') &&
|
|
33
|
-
process.versions
|
|
34
|
-
(typeof process.versions.node !== 'undefined') &&
|
|
35
|
-
(typeof process.versions.electron === 'undefined');
|
|
36
|
-
|
|
26
|
+
const isNode = (typeof process !== 'undefined') &&
|
|
27
|
+
!!(process === null || process === void 0 ? void 0 : process.versions) &&
|
|
28
|
+
(typeof ((_a = process === null || process === void 0 ? void 0 : process.versions) === null || _a === void 0 ? void 0 : _a.node) !== 'undefined') &&
|
|
29
|
+
(typeof ((_b = process === null || process === void 0 ? void 0 : process.versions) === null || _b === void 0 ? void 0 : _b.electron) === 'undefined');
|
|
37
30
|
function isTypedArraySameAsArrayBuffer(typedArray) {
|
|
38
|
-
|
|
31
|
+
return typedArray.byteOffset === 0 && typedArray.byteLength === typedArray.buffer.byteLength;
|
|
39
32
|
}
|
|
40
33
|
|
|
41
34
|
class ArrayBufferReader {
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
35
|
+
constructor(arrayBufferOrView) {
|
|
36
|
+
this.typedArray = (arrayBufferOrView instanceof ArrayBuffer || isSharedArrayBuffer(arrayBufferOrView))
|
|
37
|
+
? new Uint8Array(arrayBufferOrView)
|
|
38
|
+
: new Uint8Array(arrayBufferOrView.buffer, arrayBufferOrView.byteOffset, arrayBufferOrView.byteLength);
|
|
39
|
+
}
|
|
40
|
+
async getLength() {
|
|
41
|
+
return this.typedArray.byteLength;
|
|
42
|
+
}
|
|
43
|
+
async read(offset, length) {
|
|
44
|
+
// Cast is necessary: backing buffer may be SharedArrayBuffer (ArrayBufferLike),
|
|
45
|
+
// but callers need Uint8Array<ArrayBuffer>. The data is read-only view so this is safe.
|
|
46
|
+
return new Uint8Array(this.typedArray.buffer, this.typedArray.byteOffset + offset, length);
|
|
47
|
+
}
|
|
53
48
|
}
|
|
54
49
|
|
|
55
50
|
class BlobReader {
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
51
|
+
constructor(blob) {
|
|
52
|
+
this.blob = blob;
|
|
53
|
+
}
|
|
54
|
+
async getLength() {
|
|
55
|
+
return this.blob.size;
|
|
56
|
+
}
|
|
57
|
+
async read(offset, length) {
|
|
58
|
+
const blob = this.blob.slice(offset, offset + length);
|
|
59
|
+
const arrayBuffer = await readBlobAsArrayBuffer(blob);
|
|
60
|
+
return new Uint8Array(arrayBuffer);
|
|
61
|
+
}
|
|
62
|
+
async sliceAsBlob(offset, length, type = '') {
|
|
63
|
+
return this.blob.slice(offset, offset + length, type);
|
|
64
|
+
}
|
|
70
65
|
}
|
|
71
66
|
|
|
72
67
|
class HTTPRangeReader {
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
}
|
|
76
|
-
async getLength() {
|
|
77
|
-
if (this.length === undefined) {
|
|
78
|
-
const req = await fetch(this.url, { method: 'HEAD' });
|
|
79
|
-
if (!req.ok) {
|
|
80
|
-
throw new Error(`failed http request ${this.url}, status: ${req.status}: ${req.statusText}`);
|
|
81
|
-
}
|
|
82
|
-
this.length = parseInt(req.headers.get('content-length'));
|
|
83
|
-
if (Number.isNaN(this.length)) {
|
|
84
|
-
throw Error('could not get length');
|
|
85
|
-
}
|
|
68
|
+
constructor(url) {
|
|
69
|
+
this.url = url;
|
|
86
70
|
}
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
71
|
+
async getLength() {
|
|
72
|
+
if (this.length === undefined) {
|
|
73
|
+
const req = await fetch(this.url, { method: 'HEAD' });
|
|
74
|
+
if (!req.ok) {
|
|
75
|
+
throw new Error(`failed http request ${this.url}, status: ${req.status}: ${req.statusText}`);
|
|
76
|
+
}
|
|
77
|
+
this.length = parseInt(req.headers.get('content-length'));
|
|
78
|
+
if (Number.isNaN(this.length)) {
|
|
79
|
+
throw Error('could not get length');
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
return this.length;
|
|
92
83
|
}
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
84
|
+
async read(offset, size) {
|
|
85
|
+
if (size === 0) {
|
|
86
|
+
return new Uint8Array(0);
|
|
87
|
+
}
|
|
88
|
+
const req = await fetch(this.url, {
|
|
89
|
+
headers: {
|
|
90
|
+
Range: `bytes=${offset}-${offset + size - 1}`,
|
|
91
|
+
},
|
|
92
|
+
});
|
|
93
|
+
if (!req.ok) {
|
|
94
|
+
throw new Error(`failed http request ${this.url}, status: ${req.status} offset: ${offset} size: ${size}: ${req.statusText}`);
|
|
95
|
+
}
|
|
96
|
+
const buffer = await req.arrayBuffer();
|
|
97
|
+
return new Uint8Array(buffer);
|
|
100
98
|
}
|
|
101
|
-
const buffer = await req.arrayBuffer();
|
|
102
|
-
return new Uint8Array(buffer);
|
|
103
|
-
}
|
|
104
99
|
}
|
|
105
100
|
|
|
106
|
-
|
|
107
|
-
var u8=Uint8Array;
|
|
108
|
-
if(data[0]==3 && data[1]==0) return (buf ? buf : new u8(0));
|
|
109
|
-
var bitsF = _bitsF, bitsE = _bitsE, decodeTiny = _decodeTiny, get17 = _get17;
|
|
110
|
-
|
|
111
|
-
var noBuf = (buf==null);
|
|
112
|
-
if(noBuf) buf = new u8((data.length>>>2)<<3);
|
|
113
|
-
|
|
114
|
-
var BFINAL=0, BTYPE=0, HLIT=0, HDIST=0, HCLEN=0, ML=0, MD=0;
|
|
115
|
-
var off = 0, pos = 0;
|
|
116
|
-
var lmap, dmap;
|
|
117
|
-
|
|
118
|
-
while(BFINAL==0) {
|
|
119
|
-
BFINAL = bitsF(data, pos , 1);
|
|
120
|
-
BTYPE = bitsF(data, pos+1, 2); pos+=3;
|
|
121
|
-
//console.log(BFINAL, BTYPE);
|
|
122
|
-
|
|
123
|
-
if(BTYPE==0) {
|
|
124
|
-
if((pos&7)!=0) pos+=8-(pos&7);
|
|
125
|
-
var p8 = (pos>>>3)+4, len = data[p8-4]|(data[p8-3]<<8); //console.log(len);//bitsF(data, pos, 16),
|
|
126
|
-
if(noBuf) buf=_check(buf, off+len);
|
|
127
|
-
buf.set(new u8(data.buffer, data.byteOffset+p8, len), off);
|
|
128
|
-
//for(var i=0; i<len; i++) buf[off+i] = data[p8+i];
|
|
129
|
-
//for(var i=0; i<len; i++) if(buf[off+i] != data[p8+i]) throw "e";
|
|
130
|
-
pos = ((p8+len)<<3); off+=len; continue;
|
|
131
|
-
}
|
|
132
|
-
if(noBuf) buf=_check(buf, off+(1<<17)); // really not enough in many cases (but PNG and ZIP provide buffer in advance)
|
|
133
|
-
if(BTYPE==1) { lmap = U.flmap; dmap = U.fdmap; ML = (1<<9)-1; MD = (1<<5)-1; }
|
|
134
|
-
if(BTYPE==2) {
|
|
135
|
-
HLIT = bitsE(data, pos , 5)+257;
|
|
136
|
-
HDIST = bitsE(data, pos+ 5, 5)+ 1;
|
|
137
|
-
HCLEN = bitsE(data, pos+10, 4)+ 4; pos+=14;
|
|
138
|
-
for(var i=0; i<38; i+=2) { U.itree[i]=0; U.itree[i+1]=0; }
|
|
139
|
-
var tl = 1;
|
|
140
|
-
for(var i=0; i<HCLEN; i++) { var l=bitsE(data, pos+i*3, 3); U.itree[(U.ordr[i]<<1)+1] = l; if(l>tl)tl=l; } pos+=3*HCLEN; //console.log(itree);
|
|
141
|
-
makeCodes(U.itree, tl);
|
|
142
|
-
codes2map(U.itree, tl, U.imap);
|
|
143
|
-
|
|
144
|
-
lmap = U.lmap; dmap = U.dmap;
|
|
145
|
-
|
|
146
|
-
pos = decodeTiny(U.imap, (1<<tl)-1, HLIT+HDIST, data, pos, U.ttree);
|
|
147
|
-
var mx0 = _copyOut(U.ttree, 0, HLIT , U.ltree); ML = (1<<mx0)-1;
|
|
148
|
-
var mx1 = _copyOut(U.ttree, HLIT, HDIST, U.dtree); MD = (1<<mx1)-1;
|
|
149
|
-
|
|
150
|
-
//var ml = decodeTiny(U.imap, (1<<tl)-1, HLIT , data, pos, U.ltree); ML = (1<<(ml>>>24))-1; pos+=(ml&0xffffff);
|
|
151
|
-
makeCodes(U.ltree, mx0);
|
|
152
|
-
codes2map(U.ltree, mx0, lmap);
|
|
153
|
-
|
|
154
|
-
//var md = decodeTiny(U.imap, (1<<tl)-1, HDIST, data, pos, U.dtree); MD = (1<<(md>>>24))-1; pos+=(md&0xffffff);
|
|
155
|
-
makeCodes(U.dtree, mx1);
|
|
156
|
-
codes2map(U.dtree, mx1, dmap);
|
|
157
|
-
}
|
|
158
|
-
//var ooff=off, opos=pos;
|
|
159
|
-
while(true) {
|
|
160
|
-
var code = lmap[get17(data, pos) & ML]; pos += code&15;
|
|
161
|
-
var lit = code>>>4; //U.lhst[lit]++;
|
|
162
|
-
if((lit>>>8)==0) { buf[off++] = lit; }
|
|
163
|
-
else if(lit==256) { break; }
|
|
164
|
-
else {
|
|
165
|
-
var end = off+lit-254;
|
|
166
|
-
if(lit>264) { var ebs = U.ldef[lit-257]; end = off + (ebs>>>3) + bitsE(data, pos, ebs&7); pos += ebs&7; }
|
|
167
|
-
//dst[end-off]++;
|
|
168
|
-
|
|
169
|
-
var dcode = dmap[get17(data, pos) & MD]; pos += dcode&15;
|
|
170
|
-
var dlit = dcode>>>4;
|
|
171
|
-
var dbs = U.ddef[dlit], dst = (dbs>>>4) + bitsF(data, pos, dbs&15); pos += dbs&15;
|
|
172
|
-
|
|
173
|
-
//var o0 = off-dst, stp = Math.min(end-off, dst);
|
|
174
|
-
//if(stp>20) while(off<end) { buf.copyWithin(off, o0, o0+stp); off+=stp; } else
|
|
175
|
-
//if(end-dst<=off) buf.copyWithin(off, off-dst, end-dst); else
|
|
176
|
-
//if(dst==1) buf.fill(buf[off-1], off, end); else
|
|
177
|
-
if(noBuf) buf=_check(buf, off+(1<<17));
|
|
178
|
-
while(off<end) { buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; buf[off]=buf[off++-dst]; }
|
|
179
|
-
off=end;
|
|
180
|
-
//while(off!=end) { buf[off]=buf[off++-dst]; }
|
|
181
|
-
}
|
|
182
|
-
}
|
|
183
|
-
//console.log(off-ooff, (pos-opos)>>>3);
|
|
184
|
-
}
|
|
185
|
-
//console.log(dst);
|
|
186
|
-
//console.log(tlen, dlen, off-tlen+tcnt);
|
|
187
|
-
return buf.length==off ? buf : buf.slice(0,off);
|
|
188
|
-
}
|
|
189
|
-
function _check(buf, len) {
|
|
190
|
-
var bl=buf.length; if(len<=bl) return buf;
|
|
191
|
-
var nbuf = new Uint8Array(Math.max(bl<<1,len)); nbuf.set(buf,0);
|
|
192
|
-
//for(var i=0; i<bl; i+=4) { nbuf[i]=buf[i]; nbuf[i+1]=buf[i+1]; nbuf[i+2]=buf[i+2]; nbuf[i+3]=buf[i+3]; }
|
|
193
|
-
return nbuf;
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
function _decodeTiny(lmap, LL, len, data, pos, tree) {
|
|
197
|
-
var bitsE = _bitsE, get17 = _get17;
|
|
198
|
-
var i = 0;
|
|
199
|
-
while(i<len) {
|
|
200
|
-
var code = lmap[get17(data, pos)&LL]; pos+=code&15;
|
|
201
|
-
var lit = code>>>4;
|
|
202
|
-
if(lit<=15) { tree[i]=lit; i++; }
|
|
203
|
-
else {
|
|
204
|
-
var ll = 0, n = 0;
|
|
205
|
-
if(lit==16) {
|
|
206
|
-
n = (3 + bitsE(data, pos, 2)); pos += 2; ll = tree[i-1];
|
|
207
|
-
}
|
|
208
|
-
else if(lit==17) {
|
|
209
|
-
n = (3 + bitsE(data, pos, 3)); pos += 3;
|
|
210
|
-
}
|
|
211
|
-
else if(lit==18) {
|
|
212
|
-
n = (11 + bitsE(data, pos, 7)); pos += 7;
|
|
213
|
-
}
|
|
214
|
-
var ni = i+n;
|
|
215
|
-
while(i<ni) { tree[i]=ll; i++; }
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
return pos;
|
|
219
|
-
}
|
|
220
|
-
function _copyOut(src, off, len, tree) {
|
|
221
|
-
var mx=0, i=0, tl=tree.length>>>1;
|
|
222
|
-
while(i<len) { var v=src[i+off]; tree[(i<<1)]=0; tree[(i<<1)+1]=v; if(v>mx)mx=v; i++; }
|
|
223
|
-
while(i<tl ) { tree[(i<<1)]=0; tree[(i<<1)+1]=0; i++; }
|
|
224
|
-
return mx;
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
function makeCodes(tree, MAX_BITS) { // code, length
|
|
228
|
-
var max_code = tree.length;
|
|
229
|
-
var code, bits, n, i, len;
|
|
230
|
-
|
|
231
|
-
var bl_count = U.bl_count; for(var i=0; i<=MAX_BITS; i++) bl_count[i]=0;
|
|
232
|
-
for(i=1; i<max_code; i+=2) bl_count[tree[i]]++;
|
|
233
|
-
|
|
234
|
-
var next_code = U.next_code; // smallest code for each length
|
|
235
|
-
|
|
236
|
-
code = 0;
|
|
237
|
-
bl_count[0] = 0;
|
|
238
|
-
for (bits = 1; bits <= MAX_BITS; bits++) {
|
|
239
|
-
code = (code + bl_count[bits-1]) << 1;
|
|
240
|
-
next_code[bits] = code;
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
for (n = 0; n < max_code; n+=2) {
|
|
244
|
-
len = tree[n+1];
|
|
245
|
-
if (len != 0) {
|
|
246
|
-
tree[n] = next_code[len];
|
|
247
|
-
next_code[len]++;
|
|
248
|
-
}
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
function codes2map(tree, MAX_BITS, map) {
|
|
252
|
-
var max_code = tree.length;
|
|
253
|
-
var r15 = U.rev15;
|
|
254
|
-
for(var i=0; i<max_code; i+=2) if(tree[i+1]!=0) {
|
|
255
|
-
var lit = i>>1;
|
|
256
|
-
var cl = tree[i+1], val = (lit<<4)|cl; // : (0x8000 | (U.of0[lit-257]<<7) | (U.exb[lit-257]<<4) | cl);
|
|
257
|
-
var rest = (MAX_BITS-cl), i0 = tree[i]<<rest, i1 = i0 + (1<<rest);
|
|
258
|
-
//tree[i]=r15[i0]>>>(15-MAX_BITS);
|
|
259
|
-
while(i0!=i1) {
|
|
260
|
-
var p0 = r15[i0]>>>(15-MAX_BITS);
|
|
261
|
-
map[p0]=val; i0++;
|
|
262
|
-
}
|
|
263
|
-
}
|
|
264
|
-
}
|
|
265
|
-
function revCodes(tree, MAX_BITS) {
|
|
266
|
-
var r15 = U.rev15, imb = 15-MAX_BITS;
|
|
267
|
-
for(var i=0; i<tree.length; i+=2) { var i0 = (tree[i]<<(MAX_BITS-tree[i+1])); tree[i] = r15[i0]>>>imb; }
|
|
268
|
-
}
|
|
269
|
-
|
|
270
|
-
function _bitsE(dt, pos, length) { return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) )>>>(pos&7))&((1<<length)-1); }
|
|
271
|
-
function _bitsF(dt, pos, length) { return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) | (dt[(pos>>>3)+2]<<16))>>>(pos&7))&((1<<length)-1); }
|
|
272
|
-
/*
|
|
273
|
-
function _get9(dt, pos) {
|
|
274
|
-
return ((dt[pos>>>3] | (dt[(pos>>>3)+1]<<8))>>>(pos&7))&511;
|
|
275
|
-
} */
|
|
276
|
-
function _get17(dt, pos) { // return at least 17 meaningful bytes
|
|
277
|
-
return (dt[pos>>>3] | (dt[(pos>>>3)+1]<<8) | (dt[(pos>>>3)+2]<<16) )>>>(pos&7);
|
|
278
|
-
}
|
|
279
|
-
const U = function(){
|
|
280
|
-
var u16=Uint16Array, u32=Uint32Array;
|
|
281
|
-
return {
|
|
282
|
-
next_code : new u16(16),
|
|
283
|
-
bl_count : new u16(16),
|
|
284
|
-
ordr : [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ],
|
|
285
|
-
of0 : [3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,999,999,999],
|
|
286
|
-
exb : [0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0],
|
|
287
|
-
ldef : new u16(32),
|
|
288
|
-
df0 : [1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 65535, 65535],
|
|
289
|
-
dxb : [0,0,0,0,1,1,2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0],
|
|
290
|
-
ddef : new u32(32),
|
|
291
|
-
flmap: new u16( 512), fltree: [],
|
|
292
|
-
fdmap: new u16( 32), fdtree: [],
|
|
293
|
-
lmap : new u16(32768), ltree : [], ttree:[],
|
|
294
|
-
dmap : new u16(32768), dtree : [],
|
|
295
|
-
imap : new u16( 512), itree : [],
|
|
296
|
-
//rev9 : new u16( 512)
|
|
297
|
-
rev15: new u16(1<<15),
|
|
298
|
-
lhst : new u32(286), dhst : new u32( 30), ihst : new u32(19),
|
|
299
|
-
lits : new u32(15000),
|
|
300
|
-
strt : new u16(1<<16),
|
|
301
|
-
prev : new u16(1<<15)
|
|
302
|
-
};
|
|
303
|
-
} ();
|
|
304
|
-
|
|
305
|
-
(function(){
|
|
306
|
-
var len = 1<<15;
|
|
307
|
-
for(var i=0; i<len; i++) {
|
|
308
|
-
var x = i;
|
|
309
|
-
x = (((x & 0xaaaaaaaa) >>> 1) | ((x & 0x55555555) << 1));
|
|
310
|
-
x = (((x & 0xcccccccc) >>> 2) | ((x & 0x33333333) << 2));
|
|
311
|
-
x = (((x & 0xf0f0f0f0) >>> 4) | ((x & 0x0f0f0f0f) << 4));
|
|
312
|
-
x = (((x & 0xff00ff00) >>> 8) | ((x & 0x00ff00ff) << 8));
|
|
313
|
-
U.rev15[i] = (((x >>> 16) | (x << 16)))>>>17;
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
function pushV(tgt, n, sv) { while(n--!=0) tgt.push(0,sv); }
|
|
317
|
-
|
|
318
|
-
for(var i=0; i<32; i++) { U.ldef[i]=(U.of0[i]<<3)|U.exb[i]; U.ddef[i]=(U.df0[i]<<4)|U.dxb[i]; }
|
|
319
|
-
|
|
320
|
-
pushV(U.fltree, 144, 8); pushV(U.fltree, 255-143, 9); pushV(U.fltree, 279-255, 7); pushV(U.fltree,287-279,8);
|
|
321
|
-
/*
|
|
322
|
-
var i = 0;
|
|
323
|
-
for(; i<=143; i++) U.fltree.push(0,8);
|
|
324
|
-
for(; i<=255; i++) U.fltree.push(0,9);
|
|
325
|
-
for(; i<=279; i++) U.fltree.push(0,7);
|
|
326
|
-
for(; i<=287; i++) U.fltree.push(0,8);
|
|
327
|
-
*/
|
|
328
|
-
makeCodes(U.fltree, 9);
|
|
329
|
-
codes2map(U.fltree, 9, U.flmap);
|
|
330
|
-
revCodes (U.fltree, 9);
|
|
331
|
-
|
|
332
|
-
pushV(U.fdtree,32,5);
|
|
333
|
-
//for(i=0;i<32; i++) U.fdtree.push(0,5);
|
|
334
|
-
makeCodes(U.fdtree, 5);
|
|
335
|
-
codes2map(U.fdtree, 5, U.fdmap);
|
|
336
|
-
revCodes (U.fdtree, 5);
|
|
337
|
-
|
|
338
|
-
pushV(U.itree,19,0); pushV(U.ltree,286,0); pushV(U.dtree,30,0); pushV(U.ttree,320,0);
|
|
339
|
-
/*
|
|
340
|
-
for(var i=0; i< 19; i++) U.itree.push(0,0);
|
|
341
|
-
for(var i=0; i<286; i++) U.ltree.push(0,0);
|
|
342
|
-
for(var i=0; i< 30; i++) U.dtree.push(0,0);
|
|
343
|
-
for(var i=0; i<320; i++) U.ttree.push(0,0);
|
|
344
|
-
*/
|
|
345
|
-
})();
|
|
346
|
-
|
|
347
|
-
const crc = {
|
|
348
|
-
table : ( function() {
|
|
349
|
-
var tab = new Uint32Array(256);
|
|
350
|
-
for (var n=0; n<256; n++) {
|
|
351
|
-
var c = n;
|
|
352
|
-
for (var k=0; k<8; k++) {
|
|
353
|
-
if (c & 1) c = 0xedb88320 ^ (c >>> 1);
|
|
354
|
-
else c = c >>> 1;
|
|
355
|
-
}
|
|
356
|
-
tab[n] = c; }
|
|
357
|
-
return tab; })(),
|
|
358
|
-
update : function(c, buf, off, len) {
|
|
359
|
-
for (var i=0; i<len; i++) c = crc.table[(c ^ buf[off+i]) & 0xff] ^ (c >>> 8);
|
|
360
|
-
return c;
|
|
361
|
-
},
|
|
362
|
-
crc : function(b,o,l) { return crc.update(0xffffffff,b,o,l) ^ 0xffffffff; }
|
|
363
|
-
};
|
|
364
|
-
|
|
365
|
-
function inflateRaw(file, buf) { return inflate(file, buf); }
|
|
366
|
-
|
|
367
|
-
/* global module */
|
|
368
|
-
|
|
101
|
+
/* global DecompressionStream */
|
|
369
102
|
const config = {
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
103
|
+
numWorkers: 1,
|
|
104
|
+
workerURL: '',
|
|
105
|
+
useWorkers: false,
|
|
373
106
|
};
|
|
374
|
-
|
|
375
107
|
let nextId = 0;
|
|
376
|
-
|
|
377
108
|
// Requests are put on a queue.
|
|
378
109
|
// We don't send the request to the worker until the worker
|
|
379
110
|
// is finished. This probably adds a small amount of latency
|
|
@@ -382,225 +113,246 @@ let nextId = 0;
|
|
|
382
113
|
// do. You don't know which will finish first. If you give
|
|
383
114
|
// the worker with more work to do the request then you'll
|
|
384
115
|
// waste time.
|
|
385
|
-
|
|
386
116
|
// note: we can't check `workers.length` for deciding if
|
|
387
117
|
// we've reached `config.numWorkers` because creation the worker
|
|
388
118
|
// is async which means other requests to make workers might
|
|
389
119
|
// come in before a worker gets added to `workers`
|
|
390
120
|
let numWorkers = 0;
|
|
391
|
-
let canUseWorkers = true;
|
|
121
|
+
let canUseWorkers = true; // gets set to false if we can't start a worker
|
|
122
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
392
123
|
const workers = [];
|
|
124
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
393
125
|
const availableWorkers = [];
|
|
394
126
|
const waitingForWorkerQueue = [];
|
|
395
127
|
const currentlyProcessingIdToRequestMap = new Map();
|
|
396
|
-
|
|
128
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
397
129
|
function handleResult(e) {
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
130
|
+
makeWorkerAvailable(e.target);
|
|
131
|
+
const { id, error, data } = e.data;
|
|
132
|
+
const request = currentlyProcessingIdToRequestMap.get(id);
|
|
133
|
+
currentlyProcessingIdToRequestMap.delete(id);
|
|
134
|
+
if (error) {
|
|
135
|
+
request.reject(error);
|
|
136
|
+
}
|
|
137
|
+
else {
|
|
138
|
+
request.resolve(data);
|
|
139
|
+
}
|
|
407
140
|
}
|
|
408
|
-
|
|
409
141
|
// Because Firefox uses non-standard onerror to signal an error.
|
|
410
142
|
function startWorker(url) {
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
143
|
+
return new Promise((resolve, reject) => {
|
|
144
|
+
const worker = new Worker(url);
|
|
145
|
+
worker.onmessage = (e) => {
|
|
146
|
+
if (e.data === 'start') {
|
|
147
|
+
worker.onerror = null;
|
|
148
|
+
worker.onmessage = null;
|
|
149
|
+
resolve(worker);
|
|
150
|
+
}
|
|
151
|
+
else {
|
|
152
|
+
reject(new Error(`unexpected message: ${e.data}`));
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
worker.onerror = reject;
|
|
156
|
+
});
|
|
424
157
|
}
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
158
|
+
const workerHelper = (function () {
|
|
159
|
+
if (isNode) {
|
|
160
|
+
return {
|
|
161
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
162
|
+
async createWorker(url) {
|
|
163
|
+
const { Worker } = await import('worker_threads');
|
|
164
|
+
return new Worker(url);
|
|
165
|
+
},
|
|
166
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
167
|
+
addEventListener(worker, fn) {
|
|
168
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
169
|
+
worker.on('message', (data) => {
|
|
170
|
+
fn({ target: worker, data });
|
|
171
|
+
});
|
|
172
|
+
},
|
|
173
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
174
|
+
async terminate(worker) {
|
|
175
|
+
await worker.terminate();
|
|
176
|
+
},
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
return {
|
|
181
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
182
|
+
async createWorker(url) {
|
|
183
|
+
// I don't understand this security issue
|
|
184
|
+
// Apparently there is some iframe setting or http header
|
|
185
|
+
// that prevents cross domain workers. But, I can manually
|
|
186
|
+
// download the text and do it. I reported this to Chrome
|
|
187
|
+
// and they said it was fine so ¯\_(ツ)_/¯
|
|
188
|
+
try {
|
|
189
|
+
const worker = await startWorker(url);
|
|
190
|
+
return worker;
|
|
191
|
+
}
|
|
192
|
+
catch (_e) {
|
|
193
|
+
console.warn('could not load worker:', url);
|
|
194
|
+
}
|
|
195
|
+
let text;
|
|
196
|
+
try {
|
|
197
|
+
const req = await fetch(url, { mode: 'cors' });
|
|
198
|
+
if (!req.ok) {
|
|
199
|
+
throw new Error(`could not load: ${url}`);
|
|
200
|
+
}
|
|
201
|
+
text = await req.text();
|
|
202
|
+
url = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
|
|
203
|
+
const worker = await startWorker(url);
|
|
204
|
+
config.workerURL = url; // this is a hack. What's a better way to structure this code?
|
|
205
|
+
return worker;
|
|
206
|
+
}
|
|
207
|
+
catch (_e) {
|
|
208
|
+
console.warn('could not load worker via fetch:', url);
|
|
209
|
+
}
|
|
210
|
+
if (text !== undefined) {
|
|
211
|
+
try {
|
|
212
|
+
url = `data:application/javascript;base64,${btoa(text)}`;
|
|
213
|
+
const worker = await startWorker(url);
|
|
214
|
+
config.workerURL = url;
|
|
215
|
+
return worker;
|
|
216
|
+
}
|
|
217
|
+
catch (_e) {
|
|
218
|
+
console.warn('could not load worker via dataURI');
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
console.warn('workers will not be used');
|
|
222
|
+
throw new Error('can not start workers');
|
|
223
|
+
},
|
|
224
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
225
|
+
addEventListener(worker, fn) {
|
|
226
|
+
worker.addEventListener('message', fn);
|
|
227
|
+
},
|
|
228
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
229
|
+
async terminate(worker) {
|
|
230
|
+
worker.terminate();
|
|
231
|
+
},
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
}());
|
|
235
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
236
|
+
function makeWorkerAvailable(worker) {
|
|
237
|
+
availableWorkers.push(worker);
|
|
238
|
+
processWaitingForWorkerQueue();
|
|
428
239
|
}
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
const {Worker} = dynamicRequire(module, 'worker_threads');
|
|
434
|
-
return {
|
|
435
|
-
async createWorker(url) {
|
|
436
|
-
return new Worker(url);
|
|
437
|
-
},
|
|
438
|
-
addEventListener(worker, fn) {
|
|
439
|
-
worker.on('message', (data) => {
|
|
440
|
-
fn({target: worker, data});
|
|
441
|
-
});
|
|
442
|
-
},
|
|
443
|
-
async terminate(worker) {
|
|
444
|
-
await worker.terminate();
|
|
445
|
-
},
|
|
446
|
-
};
|
|
447
|
-
} else {
|
|
448
|
-
return {
|
|
449
|
-
async createWorker(url) {
|
|
450
|
-
// I don't understand this security issue
|
|
451
|
-
// Apparently there is some iframe setting or http header
|
|
452
|
-
// that prevents cross domain workers. But, I can manually
|
|
453
|
-
// download the text and do it. I reported this to Chrome
|
|
454
|
-
// and they said it was fine so ¯\_(ツ)_/¯
|
|
240
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
241
|
+
async function getAvailableWorker() {
|
|
242
|
+
if (availableWorkers.length === 0 && numWorkers < config.numWorkers) {
|
|
243
|
+
++numWorkers; // see comment at numWorkers declaration
|
|
455
244
|
try {
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
245
|
+
const worker = await workerHelper.createWorker(config.workerURL);
|
|
246
|
+
workers.push(worker);
|
|
247
|
+
availableWorkers.push(worker);
|
|
248
|
+
workerHelper.addEventListener(worker, handleResult);
|
|
460
249
|
}
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
const req = await fetch(url, {mode: 'cors'});
|
|
465
|
-
if (!req.ok) {
|
|
466
|
-
throw new Error(`could not load: ${url}`);
|
|
467
|
-
}
|
|
468
|
-
text = await req.text();
|
|
469
|
-
url = URL.createObjectURL(new Blob([text], {type: 'application/javascript'}));
|
|
470
|
-
const worker = await startWorker(url);
|
|
471
|
-
config.workerURL = url; // this is a hack. What's a better way to structure this code?
|
|
472
|
-
return worker;
|
|
473
|
-
} catch (e) {
|
|
474
|
-
console.warn('could not load worker via fetch:', url);
|
|
250
|
+
catch (_e) {
|
|
251
|
+
// set this global out-of-band (needs refactor)
|
|
252
|
+
canUseWorkers = false;
|
|
475
253
|
}
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
254
|
+
}
|
|
255
|
+
return availableWorkers.pop();
|
|
256
|
+
}
|
|
257
|
+
async function decompressRaw(src) {
|
|
258
|
+
const ds = new DecompressionStream('deflate-raw');
|
|
259
|
+
const writer = ds.writable.getWriter();
|
|
260
|
+
// Do not await the write — doing so before reading causes a deadlock when
|
|
261
|
+
// the internal buffer fills due to backpressure.
|
|
262
|
+
writer.write(src).then(() => writer.close()).catch(() => { });
|
|
263
|
+
const chunks = [];
|
|
264
|
+
const reader = ds.readable.getReader();
|
|
265
|
+
for (;;) {
|
|
266
|
+
const { done, value } = await reader.read();
|
|
267
|
+
if (done) {
|
|
268
|
+
break;
|
|
486
269
|
}
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
worker.terminate();
|
|
496
|
-
},
|
|
497
|
-
};
|
|
498
|
-
}
|
|
499
|
-
}());
|
|
500
|
-
|
|
501
|
-
function makeWorkerAvailable(worker) {
|
|
502
|
-
availableWorkers.push(worker);
|
|
503
|
-
processWaitingForWorkerQueue();
|
|
504
|
-
}
|
|
505
|
-
|
|
506
|
-
async function getAvailableWorker() {
|
|
507
|
-
if (availableWorkers.length === 0 && numWorkers < config.numWorkers) {
|
|
508
|
-
++numWorkers; // see comment at numWorkers declaration
|
|
509
|
-
try {
|
|
510
|
-
const worker = await workerHelper.createWorker(config.workerURL);
|
|
511
|
-
workers.push(worker);
|
|
512
|
-
availableWorkers.push(worker);
|
|
513
|
-
workerHelper.addEventListener(worker, handleResult);
|
|
514
|
-
} catch (e) {
|
|
515
|
-
// set this global out-of-band (needs refactor)
|
|
516
|
-
canUseWorkers = false;
|
|
270
|
+
chunks.push(value);
|
|
271
|
+
}
|
|
272
|
+
const size = chunks.reduce((s, c) => s + c.byteLength, 0);
|
|
273
|
+
const result = new Uint8Array(size);
|
|
274
|
+
let offset = 0;
|
|
275
|
+
for (const chunk of chunks) {
|
|
276
|
+
result.set(chunk, offset);
|
|
277
|
+
offset += chunk.byteLength;
|
|
517
278
|
}
|
|
518
|
-
|
|
519
|
-
return availableWorkers.pop();
|
|
279
|
+
return result;
|
|
520
280
|
}
|
|
521
|
-
|
|
522
281
|
// @param {Uint8Array} src
|
|
523
|
-
// @param {number} uncompressedSize
|
|
524
282
|
// @param {string} [type] mime-type
|
|
525
283
|
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
|
|
526
|
-
function inflateRawLocal(src,
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
284
|
+
async function inflateRawLocal(src, type, resolve, reject) {
|
|
285
|
+
try {
|
|
286
|
+
const dst = await decompressRaw(src);
|
|
287
|
+
resolve(type ? new Blob([dst], { type }) : dst.buffer);
|
|
288
|
+
}
|
|
289
|
+
catch (e) {
|
|
290
|
+
reject(e);
|
|
291
|
+
}
|
|
532
292
|
}
|
|
533
|
-
|
|
534
293
|
async function processWaitingForWorkerQueue() {
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
294
|
+
if (waitingForWorkerQueue.length === 0) {
|
|
295
|
+
return;
|
|
296
|
+
}
|
|
297
|
+
if (config.useWorkers && canUseWorkers) {
|
|
298
|
+
const worker = await getAvailableWorker();
|
|
299
|
+
// canUseWorkers might have been set out-of-band (need refactor)
|
|
300
|
+
if (canUseWorkers) {
|
|
301
|
+
if (worker) {
|
|
302
|
+
if (waitingForWorkerQueue.length === 0) {
|
|
303
|
+
// the queue might be empty while we awaited for a worker.
|
|
304
|
+
makeWorkerAvailable(worker);
|
|
305
|
+
return;
|
|
306
|
+
}
|
|
307
|
+
const { id, src, uncompressedSize, type, resolve, reject } = waitingForWorkerQueue.shift();
|
|
308
|
+
currentlyProcessingIdToRequestMap.set(id, { id, src, uncompressedSize, type, resolve, reject });
|
|
309
|
+
const transferables = [];
|
|
310
|
+
// NOTE: Originally I thought you could transfer an ArrayBuffer.
|
|
311
|
+
// The code on this side is often using views into the entire file
|
|
312
|
+
// which means if we transferred we'd lose the entire file. That sucks
|
|
313
|
+
// because it means there's an expensive copy to send the uncompressed
|
|
314
|
+
// data to the worker.
|
|
315
|
+
//
|
|
316
|
+
// Also originally I thought we could send a Blob but we'd need to refactor
|
|
317
|
+
// the code in unzipit/readEntryData as currently it reads the uncompressed
|
|
318
|
+
// bytes.
|
|
319
|
+
//
|
|
320
|
+
//if (!isBlob(src) && !isSharedArrayBuffer(src)) {
|
|
321
|
+
// transferables.push(src);
|
|
322
|
+
//}
|
|
323
|
+
worker.postMessage({
|
|
324
|
+
type: 'inflate',
|
|
325
|
+
data: {
|
|
326
|
+
id,
|
|
327
|
+
type,
|
|
328
|
+
src,
|
|
329
|
+
uncompressedSize,
|
|
330
|
+
},
|
|
331
|
+
}, transferables);
|
|
332
|
+
}
|
|
333
|
+
return;
|
|
548
334
|
}
|
|
549
|
-
const {id, src, uncompressedSize, type, resolve, reject} = waitingForWorkerQueue.shift();
|
|
550
|
-
currentlyProcessingIdToRequestMap.set(id, {id, resolve, reject});
|
|
551
|
-
const transferables = [];
|
|
552
|
-
// NOTE: Originally I thought you could transfer an ArrayBuffer.
|
|
553
|
-
// The code on this side is often using views into the entire file
|
|
554
|
-
// which means if we transferred we'd lose the entire file. That sucks
|
|
555
|
-
// because it means there's an expensive copy to send the uncompressed
|
|
556
|
-
// data to the worker.
|
|
557
|
-
//
|
|
558
|
-
// Also originally I thought we could send a Blob but we'd need to refactor
|
|
559
|
-
// the code in unzipit/readEntryData as currently it reads the uncompressed
|
|
560
|
-
// bytes.
|
|
561
|
-
//
|
|
562
|
-
//if (!isBlob(src) && !isSharedArrayBuffer(src)) {
|
|
563
|
-
// transferables.push(src);
|
|
564
|
-
//}
|
|
565
|
-
worker.postMessage({
|
|
566
|
-
type: 'inflate',
|
|
567
|
-
data: {
|
|
568
|
-
id,
|
|
569
|
-
type,
|
|
570
|
-
src,
|
|
571
|
-
uncompressedSize,
|
|
572
|
-
},
|
|
573
|
-
}, transferables);
|
|
574
|
-
}
|
|
575
|
-
return;
|
|
576
335
|
}
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
let data = src;
|
|
587
|
-
if (isBlob(src)) {
|
|
588
|
-
data = await readBlobAsUint8Array(src);
|
|
336
|
+
// inflate locally
|
|
337
|
+
// We loop here because what happens if many requests happen at once
|
|
338
|
+
// the first N requests will try to async make a worker. Other requests
|
|
339
|
+
// will then be on the queue. But if we fail to make workers then there
|
|
340
|
+
// are pending requests.
|
|
341
|
+
while (waitingForWorkerQueue.length) {
|
|
342
|
+
const { src, type, resolve, reject } = waitingForWorkerQueue.shift();
|
|
343
|
+
const data = isBlob(src) ? await readBlobAsUint8Array(src) : src;
|
|
344
|
+
inflateRawLocal(data, type, resolve, reject);
|
|
589
345
|
}
|
|
590
|
-
inflateRawLocal(data, uncompressedSize, type, resolve);
|
|
591
|
-
}
|
|
592
346
|
}
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
config.numWorkers = options.numWorkers || config.numWorkers;
|
|
347
|
+
function setOptions$1(options) {
|
|
348
|
+
config.workerURL = options.workerURL || config.workerURL;
|
|
349
|
+
// there's no reason to set the workerURL if you're not going to use workers
|
|
350
|
+
if (options.workerURL) {
|
|
351
|
+
config.useWorkers = true;
|
|
352
|
+
}
|
|
353
|
+
config.useWorkers = options.useWorkers !== undefined ? options.useWorkers : config.useWorkers;
|
|
354
|
+
config.numWorkers = options.numWorkers || config.numWorkers;
|
|
602
355
|
}
|
|
603
|
-
|
|
604
356
|
// It has to take non-zero time to put a large typed array in a Blob since the very
|
|
605
357
|
// next instruction you could change the contents of the array. So, if you're reading
|
|
606
358
|
// the zip file for images/video/audio then all you want is a Blob on which to get a URL.
|
|
@@ -614,113 +366,97 @@ function setOptions(options) {
|
|
|
614
366
|
// @param {string} [type] falsy or mimeType string (eg: 'image/png')
|
|
615
367
|
// @returns {ArrayBuffer|Blob} ArrayBuffer if type is falsy or Blob otherwise.
|
|
616
368
|
function inflateRawAsync(src, uncompressedSize, type) {
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
369
|
+
return new Promise((resolve, reject) => {
|
|
370
|
+
// note: there is potential an expensive copy here. In order for the data
|
|
371
|
+
// to make it into the worker we need to copy the data to the worker unless
|
|
372
|
+
// it's a Blob or a SharedArrayBuffer.
|
|
373
|
+
//
|
|
374
|
+
// Solutions:
|
|
375
|
+
//
|
|
376
|
+
// 1. A minor enhancement, if `uncompressedSize` is small don't call the worker.
|
|
377
|
+
//
|
|
378
|
+
// might be a win period as their is overhead calling the worker
|
|
379
|
+
//
|
|
380
|
+
// 2. Move the entire library to the worker
|
|
381
|
+
//
|
|
382
|
+
// Good, Maybe faster if you pass a URL, Blob, or SharedArrayBuffer? Not sure about that
|
|
383
|
+
// as those are also easy to transfer. Still slow if you pass an ArrayBuffer
|
|
384
|
+
// as the ArrayBuffer has to be copied to the worker.
|
|
385
|
+
//
|
|
386
|
+
// I guess benchmarking is really the only thing to try.
|
|
387
|
+
waitingForWorkerQueue.push({ src, uncompressedSize, type, resolve, reject, id: nextId++ });
|
|
388
|
+
processWaitingForWorkerQueue();
|
|
389
|
+
});
|
|
638
390
|
}
|
|
639
|
-
|
|
640
391
|
function clearArray(arr) {
|
|
641
|
-
|
|
392
|
+
arr.splice(0, arr.length);
|
|
642
393
|
}
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
canUseWorkers = true;
|
|
654
|
-
}
|
|
655
|
-
|
|
656
|
-
/*
|
|
657
|
-
class Zip {
|
|
658
|
-
constructor(reader) {
|
|
659
|
-
comment, // the comment for this entry
|
|
660
|
-
commentBytes, // the raw comment for this entry
|
|
661
|
-
}
|
|
394
|
+
async function cleanup$1() {
|
|
395
|
+
for (const worker of workers) {
|
|
396
|
+
await workerHelper.terminate(worker);
|
|
397
|
+
}
|
|
398
|
+
clearArray(workers);
|
|
399
|
+
clearArray(availableWorkers);
|
|
400
|
+
clearArray(waitingForWorkerQueue);
|
|
401
|
+
currentlyProcessingIdToRequestMap.clear();
|
|
402
|
+
numWorkers = 0;
|
|
403
|
+
canUseWorkers = true;
|
|
662
404
|
}
|
|
663
|
-
*/
|
|
664
405
|
|
|
665
406
|
function dosDateTimeToDate(date, time) {
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
return new Date(year, month, day, hour, minute, second, millisecond);
|
|
407
|
+
const day = date & 0x1f; // 1-31
|
|
408
|
+
const month = (date >> 5 & 0xf) - 1; // 1-12, 0-11
|
|
409
|
+
const year = (date >> 9 & 0x7f) + 1980; // 0-128, 1980-2108
|
|
410
|
+
const millisecond = 0;
|
|
411
|
+
const second = (time & 0x1f) * 2; // 0-29, 0-58 (even numbers)
|
|
412
|
+
const minute = time >> 5 & 0x3f; // 0-59
|
|
413
|
+
const hour = time >> 11 & 0x1f; // 0-23
|
|
414
|
+
return new Date(year, month, day, hour, minute, second, millisecond);
|
|
676
415
|
}
|
|
677
|
-
|
|
678
416
|
class ZipEntry {
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
417
|
+
constructor(reader, rawEntry) {
|
|
418
|
+
this._reader = reader;
|
|
419
|
+
this._rawEntry = rawEntry;
|
|
420
|
+
this.name = rawEntry.name;
|
|
421
|
+
this.nameBytes = rawEntry.nameBytes;
|
|
422
|
+
this.size = rawEntry.uncompressedSize;
|
|
423
|
+
this.compressedSize = rawEntry.compressedSize;
|
|
424
|
+
this.comment = rawEntry.comment;
|
|
425
|
+
this.commentBytes = rawEntry.commentBytes;
|
|
426
|
+
this.compressionMethod = rawEntry.compressionMethod;
|
|
427
|
+
this.lastModDate = dosDateTimeToDate(rawEntry.lastModFileDate, rawEntry.lastModFileTime);
|
|
428
|
+
this.isDirectory = rawEntry.uncompressedSize === 0 && rawEntry.name.endsWith('/');
|
|
429
|
+
this.encrypted = !!(rawEntry.generalPurposeBitFlag & 0x1);
|
|
430
|
+
this.externalFileAttributes = rawEntry.externalFileAttributes;
|
|
431
|
+
this.versionMadeBy = rawEntry.versionMadeBy;
|
|
432
|
+
}
|
|
433
|
+
// returns a promise that returns a Blob for this entry
|
|
434
|
+
async blob(type = 'application/octet-stream') {
|
|
435
|
+
return await readEntryDataAsBlob(this._reader, this._rawEntry, type);
|
|
436
|
+
}
|
|
437
|
+
// returns a promise that returns an ArrayBuffer for this entry
|
|
438
|
+
async arrayBuffer() {
|
|
439
|
+
return await readEntryDataAsArrayBuffer(this._reader, this._rawEntry);
|
|
440
|
+
}
|
|
441
|
+
// returns text, assumes the text is valid utf8. If you want more options decode arrayBuffer yourself
|
|
442
|
+
async text() {
|
|
443
|
+
const buffer = await this.arrayBuffer();
|
|
444
|
+
return decodeBuffer(new Uint8Array(buffer));
|
|
445
|
+
}
|
|
446
|
+
// returns text with JSON.parse called on it. If you want more options decode arrayBuffer yourself
|
|
447
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
448
|
+
async json() {
|
|
449
|
+
const text = await this.text();
|
|
450
|
+
return JSON.parse(text);
|
|
451
|
+
}
|
|
713
452
|
}
|
|
714
|
-
|
|
715
453
|
const EOCDR_WITHOUT_COMMENT_SIZE = 22;
|
|
716
454
|
const MAX_COMMENT_SIZE = 0xffff; // 2-byte size
|
|
717
455
|
const EOCDR_SIGNATURE = 0x06054b50;
|
|
718
456
|
const ZIP64_EOCDR_SIGNATURE = 0x06064b50;
|
|
719
|
-
|
|
720
457
|
async function readAs(reader, offset, length) {
|
|
721
|
-
|
|
458
|
+
return await reader.read(offset, length);
|
|
722
459
|
}
|
|
723
|
-
|
|
724
460
|
// The point of this function is we want to be able to pass the data
|
|
725
461
|
// to a worker as fast as possible so when decompressing if the data
|
|
726
462
|
// is already a blob and we can get a blob then get a blob.
|
|
@@ -742,447 +478,412 @@ async function readAs(reader, offset, length) {
|
|
|
742
478
|
//
|
|
743
479
|
// For now this hack works even if it's not generic.
|
|
744
480
|
async function readAsBlobOrTypedArray(reader, offset, length, type) {
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
481
|
+
if (reader.sliceAsBlob) {
|
|
482
|
+
return await reader.sliceAsBlob(offset, length, type);
|
|
483
|
+
}
|
|
484
|
+
return await reader.read(offset, length);
|
|
749
485
|
}
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
},
|
|
486
|
+
const crc = {
|
|
487
|
+
unsigned() {
|
|
488
|
+
return 0;
|
|
489
|
+
},
|
|
755
490
|
};
|
|
756
|
-
|
|
757
491
|
function getUint16LE(uint8View, offset) {
|
|
758
|
-
|
|
759
|
-
|
|
492
|
+
return uint8View[offset] +
|
|
493
|
+
uint8View[offset + 1] * 0x100;
|
|
760
494
|
}
|
|
761
|
-
|
|
762
495
|
function getUint32LE(uint8View, offset) {
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
496
|
+
return uint8View[offset] +
|
|
497
|
+
uint8View[offset + 1] * 0x100 +
|
|
498
|
+
uint8View[offset + 2] * 0x10000 +
|
|
499
|
+
uint8View[offset + 3] * 0x1000000;
|
|
767
500
|
}
|
|
768
|
-
|
|
769
501
|
function getUint64LE(uint8View, offset) {
|
|
770
|
-
|
|
771
|
-
|
|
502
|
+
return getUint32LE(uint8View, offset) +
|
|
503
|
+
getUint32LE(uint8View, offset + 4) * 0x100000000;
|
|
772
504
|
}
|
|
773
|
-
|
|
774
|
-
/* eslint-disable no-irregular-whitespace */
|
|
775
505
|
// const decodeCP437 = (function() {
|
|
776
|
-
// const cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■
|
|
506
|
+
// const cp437 = '\u0000☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ ';
|
|
777
507
|
//
|
|
778
508
|
// return function(uint8view) {
|
|
779
509
|
// return Array.from(uint8view).map(v => cp437[v]).join('');
|
|
780
510
|
// };
|
|
781
511
|
// }());
|
|
782
|
-
/* eslint-enable no-irregular-whitespace */
|
|
783
|
-
|
|
784
512
|
const utf8Decoder = new TextDecoder();
|
|
785
|
-
function decodeBuffer(uint8View,
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
513
|
+
function decodeBuffer(uint8View, _isUTF8) {
|
|
514
|
+
if (isSharedArrayBuffer(uint8View.buffer)) {
|
|
515
|
+
uint8View = new Uint8Array(uint8View);
|
|
516
|
+
}
|
|
517
|
+
return utf8Decoder.decode(uint8View);
|
|
518
|
+
/*
|
|
519
|
+
AFAICT the UTF8 flat is not set so it's 100% up to the user
|
|
520
|
+
to self decode if their file is not utf8 filenames
|
|
521
|
+
return isUTF8
|
|
522
|
+
? utf8Decoder.decode(uint8View)
|
|
523
|
+
: decodeCP437(uint8View);
|
|
524
|
+
*/
|
|
797
525
|
}
|
|
798
|
-
|
|
799
526
|
async function findEndOfCentralDirector(reader, totalLength) {
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
} else {
|
|
839
|
-
return await readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
527
|
+
const size = Math.min(EOCDR_WITHOUT_COMMENT_SIZE + MAX_COMMENT_SIZE, totalLength);
|
|
528
|
+
const readStart = totalLength - size;
|
|
529
|
+
const data = await readAs(reader, readStart, size);
|
|
530
|
+
for (let i = size - EOCDR_WITHOUT_COMMENT_SIZE; i >= 0; --i) {
|
|
531
|
+
if (getUint32LE(data, i) !== EOCDR_SIGNATURE) {
|
|
532
|
+
continue;
|
|
533
|
+
}
|
|
534
|
+
// 0 - End of central directory signature
|
|
535
|
+
const eocdr = new Uint8Array(data.buffer, data.byteOffset + i, data.byteLength - i);
|
|
536
|
+
// 4 - Number of this disk
|
|
537
|
+
const diskNumber = getUint16LE(eocdr, 4);
|
|
538
|
+
if (diskNumber !== 0) {
|
|
539
|
+
throw new Error(`multi-volume zip files are not supported. This is volume: ${diskNumber}`);
|
|
540
|
+
}
|
|
541
|
+
// 6 - Disk where central directory starts
|
|
542
|
+
// 8 - Number of central directory records on this disk
|
|
543
|
+
// 10 - Total number of central directory records
|
|
544
|
+
const entryCount = getUint16LE(eocdr, 10);
|
|
545
|
+
// 12 - Size of central directory (bytes)
|
|
546
|
+
const centralDirectorySize = getUint32LE(eocdr, 12);
|
|
547
|
+
// 16 - Offset of start of central directory, relative to start of archive
|
|
548
|
+
const centralDirectoryOffset = getUint32LE(eocdr, 16);
|
|
549
|
+
// 20 - Comment length
|
|
550
|
+
const commentLength = getUint16LE(eocdr, 20);
|
|
551
|
+
const expectedCommentLength = eocdr.length - EOCDR_WITHOUT_COMMENT_SIZE;
|
|
552
|
+
if (commentLength !== expectedCommentLength) {
|
|
553
|
+
throw new Error(`invalid comment length. expected: ${expectedCommentLength}, actual: ${commentLength}`);
|
|
554
|
+
}
|
|
555
|
+
// 22 - Comment
|
|
556
|
+
// the encoding is always cp437.
|
|
557
|
+
const commentBytes = new Uint8Array(eocdr.buffer, eocdr.byteOffset + 22, commentLength);
|
|
558
|
+
const comment = decodeBuffer(commentBytes);
|
|
559
|
+
if (entryCount === 0xffff || centralDirectoryOffset === 0xffffffff) {
|
|
560
|
+
return await readZip64CentralDirectory(reader, readStart + i, comment, commentBytes);
|
|
561
|
+
}
|
|
562
|
+
else {
|
|
563
|
+
return await readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
564
|
+
}
|
|
840
565
|
}
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
throw new Error('could not find end of central directory. maybe not zip file');
|
|
566
|
+
throw new Error('could not find end of central directory. maybe not zip file');
|
|
844
567
|
}
|
|
845
|
-
|
|
846
568
|
const END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE = 0x07064b50;
|
|
847
|
-
|
|
848
569
|
async function readZip64CentralDirectory(reader, offset, comment, commentBytes) {
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
// 48 - offset of start of central directory with respect to the starting disk number 8 bytes
|
|
881
|
-
const centralDirectoryOffset = getUint64LE(zip64Eocdr, 48);
|
|
882
|
-
// 56 - zip64 extensible data sector (variable size)
|
|
883
|
-
return readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
570
|
+
// ZIP64 Zip64 end of central directory locator
|
|
571
|
+
const zip64EocdlOffset = offset - 20;
|
|
572
|
+
const eocdl = await readAs(reader, zip64EocdlOffset, 20);
|
|
573
|
+
// 0 - zip64 end of central dir locator signature
|
|
574
|
+
if (getUint32LE(eocdl, 0) !== END_OF_CENTRAL_DIRECTORY_LOCATOR_SIGNATURE) {
|
|
575
|
+
throw new Error('invalid zip64 end of central directory locator signature');
|
|
576
|
+
}
|
|
577
|
+
// 4 - number of the disk with the start of the zip64 end of central directory
|
|
578
|
+
// 8 - relative offset of the zip64 end of central directory record
|
|
579
|
+
const zip64EocdrOffset = getUint64LE(eocdl, 8);
|
|
580
|
+
// 16 - total number of disks
|
|
581
|
+
// ZIP64 end of central directory record
|
|
582
|
+
const zip64Eocdr = await readAs(reader, zip64EocdrOffset, 56);
|
|
583
|
+
// 0 - zip64 end of central dir signature 4 bytes (0x06064b50)
|
|
584
|
+
if (getUint32LE(zip64Eocdr, 0) !== ZIP64_EOCDR_SIGNATURE) {
|
|
585
|
+
throw new Error('invalid zip64 end of central directory record signature');
|
|
586
|
+
}
|
|
587
|
+
// 4 - size of zip64 end of central directory record 8 bytes
|
|
588
|
+
// 12 - version made by 2 bytes
|
|
589
|
+
// 14 - version needed to extract 2 bytes
|
|
590
|
+
// 16 - number of this disk 4 bytes
|
|
591
|
+
// 20 - number of the disk with the start of the central directory 4 bytes
|
|
592
|
+
// 24 - total number of entries in the central directory on this disk 8 bytes
|
|
593
|
+
// 32 - total number of entries in the central directory 8 bytes
|
|
594
|
+
const entryCount = getUint64LE(zip64Eocdr, 32);
|
|
595
|
+
// 40 - size of the central directory 8 bytes
|
|
596
|
+
const centralDirectorySize = getUint64LE(zip64Eocdr, 40);
|
|
597
|
+
// 48 - offset of start of central directory with respect to the starting disk number 8 bytes
|
|
598
|
+
const centralDirectoryOffset = getUint64LE(zip64Eocdr, 48);
|
|
599
|
+
// 56 - zip64 extensible data sector (variable size)
|
|
600
|
+
return readEntries(reader, centralDirectoryOffset, centralDirectorySize, entryCount, comment, commentBytes);
|
|
884
601
|
}
|
|
885
|
-
|
|
886
602
|
const CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE = 0x02014b50;
|
|
887
|
-
|
|
888
603
|
async function readEntries(reader, centralDirectoryOffset, centralDirectorySize, rawEntryCount, comment, commentBytes) {
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
if (rawEntry.generalPurposeBitFlag & 0x40) {
|
|
935
|
-
throw new Error('strong encryption is not supported');
|
|
936
|
-
}
|
|
937
|
-
|
|
938
|
-
readEntryCursor += 46;
|
|
939
|
-
|
|
940
|
-
const data = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + rawEntry.fileNameLength + rawEntry.extraFieldLength + rawEntry.fileCommentLength);
|
|
941
|
-
rawEntry.nameBytes = data.slice(0, rawEntry.fileNameLength);
|
|
942
|
-
rawEntry.name = decodeBuffer(rawEntry.nameBytes);
|
|
943
|
-
|
|
944
|
-
// 46+n - Extra field
|
|
945
|
-
const fileCommentStart = rawEntry.fileNameLength + rawEntry.extraFieldLength;
|
|
946
|
-
const extraFieldBuffer = data.slice(rawEntry.fileNameLength, fileCommentStart);
|
|
947
|
-
rawEntry.extraFields = [];
|
|
948
|
-
let i = 0;
|
|
949
|
-
while (i < extraFieldBuffer.length - 3) {
|
|
950
|
-
const headerId = getUint16LE(extraFieldBuffer, i + 0);
|
|
951
|
-
const dataSize = getUint16LE(extraFieldBuffer, i + 2);
|
|
952
|
-
const dataStart = i + 4;
|
|
953
|
-
const dataEnd = dataStart + dataSize;
|
|
954
|
-
if (dataEnd > extraFieldBuffer.length) {
|
|
955
|
-
throw new Error('extra field length exceeds extra field buffer size');
|
|
956
|
-
}
|
|
957
|
-
rawEntry.extraFields.push({
|
|
958
|
-
id: headerId,
|
|
959
|
-
data: extraFieldBuffer.slice(dataStart, dataEnd),
|
|
960
|
-
});
|
|
961
|
-
i = dataEnd;
|
|
962
|
-
}
|
|
963
|
-
|
|
964
|
-
// 46+n+m - File comment
|
|
965
|
-
rawEntry.commentBytes = data.slice(fileCommentStart, fileCommentStart + rawEntry.fileCommentLength);
|
|
966
|
-
rawEntry.comment = decodeBuffer(rawEntry.commentBytes);
|
|
967
|
-
|
|
968
|
-
readEntryCursor += data.length;
|
|
969
|
-
|
|
970
|
-
if (rawEntry.uncompressedSize === 0xffffffff ||
|
|
971
|
-
rawEntry.compressedSize === 0xffffffff ||
|
|
972
|
-
rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
|
|
973
|
-
// ZIP64 format
|
|
974
|
-
// find the Zip64 Extended Information Extra Field
|
|
975
|
-
const zip64ExtraField = rawEntry.extraFields.find(e => e.id === 0x0001);
|
|
976
|
-
if (!zip64ExtraField) {
|
|
977
|
-
throw new Error('expected zip64 extended information extra field');
|
|
978
|
-
}
|
|
979
|
-
const zip64EiefBuffer = zip64ExtraField.data;
|
|
980
|
-
let index = 0;
|
|
981
|
-
// 0 - Original Size 8 bytes
|
|
982
|
-
if (rawEntry.uncompressedSize === 0xffffffff) {
|
|
983
|
-
if (index + 8 > zip64EiefBuffer.length) {
|
|
984
|
-
throw new Error('zip64 extended information extra field does not include uncompressed size');
|
|
604
|
+
let readEntryCursor = 0;
|
|
605
|
+
const allEntriesBuffer = await readAs(reader, centralDirectoryOffset, centralDirectorySize);
|
|
606
|
+
const rawEntries = [];
|
|
607
|
+
for (let e = 0; e < rawEntryCount; ++e) {
|
|
608
|
+
const buffer = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + 46);
|
|
609
|
+
// 0 - Central directory file header signature
|
|
610
|
+
const signature = getUint32LE(buffer, 0);
|
|
611
|
+
if (signature !== CENTRAL_DIRECTORY_FILE_HEADER_SIGNATURE) {
|
|
612
|
+
throw new Error(`invalid central directory file header signature: 0x${signature.toString(16)}`);
|
|
613
|
+
}
|
|
614
|
+
const rawEntry = {
|
|
615
|
+
// 4 - Version made by
|
|
616
|
+
versionMadeBy: getUint16LE(buffer, 4),
|
|
617
|
+
// 6 - Version needed to extract (minimum)
|
|
618
|
+
versionNeededToExtract: getUint16LE(buffer, 6),
|
|
619
|
+
// 8 - General purpose bit flag
|
|
620
|
+
generalPurposeBitFlag: getUint16LE(buffer, 8),
|
|
621
|
+
// 10 - Compression method
|
|
622
|
+
compressionMethod: getUint16LE(buffer, 10),
|
|
623
|
+
// 12 - File last modification time
|
|
624
|
+
lastModFileTime: getUint16LE(buffer, 12),
|
|
625
|
+
// 14 - File last modification date
|
|
626
|
+
lastModFileDate: getUint16LE(buffer, 14),
|
|
627
|
+
// 16 - CRC-32
|
|
628
|
+
crc32: getUint32LE(buffer, 16),
|
|
629
|
+
// 20 - Compressed size
|
|
630
|
+
compressedSize: getUint32LE(buffer, 20),
|
|
631
|
+
// 24 - Uncompressed size
|
|
632
|
+
uncompressedSize: getUint32LE(buffer, 24),
|
|
633
|
+
// 28 - File name length (n)
|
|
634
|
+
fileNameLength: getUint16LE(buffer, 28),
|
|
635
|
+
// 30 - Extra field length (m)
|
|
636
|
+
extraFieldLength: getUint16LE(buffer, 30),
|
|
637
|
+
// 32 - File comment length (k)
|
|
638
|
+
fileCommentLength: getUint16LE(buffer, 32),
|
|
639
|
+
// 34 - Disk number where file starts
|
|
640
|
+
// 36 - Internal file attributes
|
|
641
|
+
internalFileAttributes: getUint16LE(buffer, 36),
|
|
642
|
+
// 38 - External file attributes
|
|
643
|
+
externalFileAttributes: getUint32LE(buffer, 38),
|
|
644
|
+
// 42 - Relative offset of local file header
|
|
645
|
+
relativeOffsetOfLocalHeader: getUint32LE(buffer, 42),
|
|
646
|
+
};
|
|
647
|
+
if (rawEntry.generalPurposeBitFlag & 0x40) {
|
|
648
|
+
throw new Error('strong encryption is not supported');
|
|
985
649
|
}
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
650
|
+
readEntryCursor += 46;
|
|
651
|
+
const data = allEntriesBuffer.subarray(readEntryCursor, readEntryCursor + rawEntry.fileNameLength + rawEntry.extraFieldLength + rawEntry.fileCommentLength);
|
|
652
|
+
// 46 - File name
|
|
653
|
+
(rawEntry.generalPurposeBitFlag & 0x800) !== 0;
|
|
654
|
+
rawEntry.nameBytes = data.slice(0, rawEntry.fileNameLength);
|
|
655
|
+
rawEntry.name = decodeBuffer(rawEntry.nameBytes);
|
|
656
|
+
// 46+n - Extra field
|
|
657
|
+
const fileCommentStart = rawEntry.fileNameLength + rawEntry.extraFieldLength;
|
|
658
|
+
const extraFieldBuffer = data.slice(rawEntry.fileNameLength, fileCommentStart);
|
|
659
|
+
rawEntry.extraFields = [];
|
|
660
|
+
let i = 0;
|
|
661
|
+
while (i < extraFieldBuffer.length - 3) {
|
|
662
|
+
const headerId = getUint16LE(extraFieldBuffer, i + 0);
|
|
663
|
+
const dataSize = getUint16LE(extraFieldBuffer, i + 2);
|
|
664
|
+
const dataStart = i + 4;
|
|
665
|
+
const dataEnd = dataStart + dataSize;
|
|
666
|
+
if (dataEnd > extraFieldBuffer.length) {
|
|
667
|
+
throw new Error('extra field length exceeds extra field buffer size');
|
|
668
|
+
}
|
|
669
|
+
rawEntry.extraFields.push({
|
|
670
|
+
id: headerId,
|
|
671
|
+
data: extraFieldBuffer.slice(dataStart, dataEnd),
|
|
672
|
+
});
|
|
673
|
+
i = dataEnd;
|
|
993
674
|
}
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
675
|
+
// 46+n+m - File comment
|
|
676
|
+
rawEntry.commentBytes = data.slice(fileCommentStart, fileCommentStart + rawEntry.fileCommentLength);
|
|
677
|
+
rawEntry.comment = decodeBuffer(rawEntry.commentBytes);
|
|
678
|
+
readEntryCursor += data.length;
|
|
679
|
+
if (rawEntry.uncompressedSize === 0xffffffff ||
|
|
680
|
+
rawEntry.compressedSize === 0xffffffff ||
|
|
681
|
+
rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
|
|
682
|
+
// ZIP64 format
|
|
683
|
+
// find the Zip64 Extended Information Extra Field
|
|
684
|
+
const zip64ExtraField = rawEntry.extraFields.find(e => e.id === 0x0001);
|
|
685
|
+
if (!zip64ExtraField) {
|
|
686
|
+
throw new Error('expected zip64 extended information extra field');
|
|
687
|
+
}
|
|
688
|
+
const zip64EiefBuffer = zip64ExtraField.data;
|
|
689
|
+
let index = 0;
|
|
690
|
+
// 0 - Original Size 8 bytes
|
|
691
|
+
if (rawEntry.uncompressedSize === 0xffffffff) {
|
|
692
|
+
if (index + 8 > zip64EiefBuffer.length) {
|
|
693
|
+
throw new Error('zip64 extended information extra field does not include uncompressed size');
|
|
694
|
+
}
|
|
695
|
+
rawEntry.uncompressedSize = getUint64LE(zip64EiefBuffer, index);
|
|
696
|
+
index += 8;
|
|
697
|
+
}
|
|
698
|
+
// 8 - Compressed Size 8 bytes
|
|
699
|
+
if (rawEntry.compressedSize === 0xffffffff) {
|
|
700
|
+
if (index + 8 > zip64EiefBuffer.length) {
|
|
701
|
+
throw new Error('zip64 extended information extra field does not include compressed size');
|
|
702
|
+
}
|
|
703
|
+
rawEntry.compressedSize = getUint64LE(zip64EiefBuffer, index);
|
|
704
|
+
index += 8;
|
|
705
|
+
}
|
|
706
|
+
// 16 - Relative Header Offset 8 bytes
|
|
707
|
+
if (rawEntry.relativeOffsetOfLocalHeader === 0xffffffff) {
|
|
708
|
+
if (index + 8 > zip64EiefBuffer.length) {
|
|
709
|
+
throw new Error('zip64 extended information extra field does not include relative header offset');
|
|
710
|
+
}
|
|
711
|
+
rawEntry.relativeOffsetOfLocalHeader = getUint64LE(zip64EiefBuffer, index);
|
|
712
|
+
index += 8;
|
|
713
|
+
}
|
|
714
|
+
// 24 - Disk Start Number 4 bytes
|
|
1001
715
|
}
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
716
|
+
// check for Info-ZIP Unicode Path Extra Field (0x7075)
|
|
717
|
+
// see https://github.com/thejoshwolfe/yauzl/issues/33
|
|
718
|
+
const nameField = rawEntry.extraFields.find(e => e.id === 0x7075 &&
|
|
719
|
+
e.data.length >= 6 && // too short to be meaningful
|
|
720
|
+
e.data[0] === 1 && // Version 1 byte version of this extra field, currently 1
|
|
721
|
+
getUint32LE(e.data, 1), crc.unsigned()); // NameCRC32 4 bytes File Name Field CRC32 Checksum
|
|
722
|
+
// > If the CRC check fails, this UTF-8 Path Extra Field should be
|
|
723
|
+
// > ignored and the File Name field in the header should be used instead.
|
|
724
|
+
if (nameField) {
|
|
725
|
+
// UnicodeName Variable UTF-8 version of the entry File Name
|
|
726
|
+
rawEntry.fileName = decodeBuffer(nameField.data.slice(5));
|
|
727
|
+
}
|
|
728
|
+
// validate file size
|
|
729
|
+
if (rawEntry.compressionMethod === 0) {
|
|
730
|
+
let expectedCompressedSize = rawEntry.uncompressedSize;
|
|
731
|
+
if ((rawEntry.generalPurposeBitFlag & 0x1) !== 0) {
|
|
732
|
+
// traditional encryption prefixes the file data with a header
|
|
733
|
+
expectedCompressedSize += 12;
|
|
734
|
+
}
|
|
735
|
+
if (rawEntry.compressedSize !== expectedCompressedSize) {
|
|
736
|
+
throw new Error(`compressed size mismatch for stored file: ${rawEntry.compressedSize} != ${expectedCompressedSize}`);
|
|
737
|
+
}
|
|
738
|
+
}
|
|
739
|
+
rawEntries.push(rawEntry);
|
|
1006
740
|
}
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
e
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
if (
|
|
1018
|
-
|
|
1019
|
-
rawEntry.fileName = decodeBuffer(nameField.data.slice(5));
|
|
741
|
+
const zip = {
|
|
742
|
+
comment,
|
|
743
|
+
commentBytes,
|
|
744
|
+
};
|
|
745
|
+
return {
|
|
746
|
+
zip,
|
|
747
|
+
entries: rawEntries.map(e => new ZipEntry(reader, e)),
|
|
748
|
+
};
|
|
749
|
+
}
|
|
750
|
+
async function readEntryDataHeader(reader, rawEntry) {
|
|
751
|
+
if (rawEntry.generalPurposeBitFlag & 0x1) {
|
|
752
|
+
throw new Error('encrypted entries not supported');
|
|
1020
753
|
}
|
|
1021
|
-
|
|
1022
|
-
//
|
|
754
|
+
const buffer = await readAs(reader, rawEntry.relativeOffsetOfLocalHeader, 30);
|
|
755
|
+
// note: maybe this should be passed in or cached on entry
|
|
756
|
+
// as it's async so there will be at least one tick (not sure about that)
|
|
757
|
+
const totalLength = await reader.getLength();
|
|
758
|
+
// 0 - Local file header signature = 0x04034b50
|
|
759
|
+
const signature = getUint32LE(buffer, 0);
|
|
760
|
+
if (signature !== 0x04034b50) {
|
|
761
|
+
throw new Error(`invalid local file header signature: 0x${signature.toString(16)}`);
|
|
762
|
+
}
|
|
763
|
+
// all this should be redundant
|
|
764
|
+
// 4 - Version needed to extract (minimum)
|
|
765
|
+
// 6 - General purpose bit flag
|
|
766
|
+
// 8 - Compression method
|
|
767
|
+
// 10 - File last modification time
|
|
768
|
+
// 12 - File last modification date
|
|
769
|
+
// 14 - CRC-32
|
|
770
|
+
// 18 - Compressed size
|
|
771
|
+
// 22 - Uncompressed size
|
|
772
|
+
// 26 - File name length (n)
|
|
773
|
+
const fileNameLength = getUint16LE(buffer, 26);
|
|
774
|
+
// 28 - Extra field length (m)
|
|
775
|
+
const extraFieldLength = getUint16LE(buffer, 28);
|
|
776
|
+
// 30 - File name
|
|
777
|
+
// 30+n - Extra field
|
|
778
|
+
const localFileHeaderEnd = rawEntry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
|
|
779
|
+
let decompress;
|
|
1023
780
|
if (rawEntry.compressionMethod === 0) {
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
// traditional encryption prefixes the file data with a header
|
|
1027
|
-
expectedCompressedSize += 12;
|
|
1028
|
-
}
|
|
1029
|
-
if (rawEntry.compressedSize !== expectedCompressedSize) {
|
|
1030
|
-
throw new Error(`compressed size mismatch for stored file: ${rawEntry.compressedSize} != ${expectedCompressedSize}`);
|
|
1031
|
-
}
|
|
781
|
+
// 0 - The file is stored (no compression)
|
|
782
|
+
decompress = false;
|
|
1032
783
|
}
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
const buffer = await readAs(reader, rawEntry.relativeOffsetOfLocalHeader, 30);
|
|
1050
|
-
// note: maybe this should be passed in or cached on entry
|
|
1051
|
-
// as it's async so there will be at least one tick (not sure about that)
|
|
1052
|
-
const totalLength = await reader.getLength();
|
|
1053
|
-
|
|
1054
|
-
// 0 - Local file header signature = 0x04034b50
|
|
1055
|
-
const signature = getUint32LE(buffer, 0);
|
|
1056
|
-
if (signature !== 0x04034b50) {
|
|
1057
|
-
throw new Error(`invalid local file header signature: 0x${signature.toString(16)}`);
|
|
1058
|
-
}
|
|
1059
|
-
|
|
1060
|
-
// all this should be redundant
|
|
1061
|
-
// 4 - Version needed to extract (minimum)
|
|
1062
|
-
// 6 - General purpose bit flag
|
|
1063
|
-
// 8 - Compression method
|
|
1064
|
-
// 10 - File last modification time
|
|
1065
|
-
// 12 - File last modification date
|
|
1066
|
-
// 14 - CRC-32
|
|
1067
|
-
// 18 - Compressed size
|
|
1068
|
-
// 22 - Uncompressed size
|
|
1069
|
-
// 26 - File name length (n)
|
|
1070
|
-
const fileNameLength = getUint16LE(buffer, 26);
|
|
1071
|
-
// 28 - Extra field length (m)
|
|
1072
|
-
const extraFieldLength = getUint16LE(buffer, 28);
|
|
1073
|
-
// 30 - File name
|
|
1074
|
-
// 30+n - Extra field
|
|
1075
|
-
const localFileHeaderEnd = rawEntry.relativeOffsetOfLocalHeader + buffer.length + fileNameLength + extraFieldLength;
|
|
1076
|
-
let decompress;
|
|
1077
|
-
if (rawEntry.compressionMethod === 0) {
|
|
1078
|
-
// 0 - The file is stored (no compression)
|
|
1079
|
-
decompress = false;
|
|
1080
|
-
} else if (rawEntry.compressionMethod === 8) {
|
|
1081
|
-
// 8 - The file is Deflated
|
|
1082
|
-
decompress = true;
|
|
1083
|
-
} else {
|
|
1084
|
-
throw new Error(`unsupported compression method: ${rawEntry.compressionMethod}`);
|
|
1085
|
-
}
|
|
1086
|
-
const fileDataStart = localFileHeaderEnd;
|
|
1087
|
-
const fileDataEnd = fileDataStart + rawEntry.compressedSize;
|
|
1088
|
-
if (rawEntry.compressedSize !== 0) {
|
|
1089
|
-
// bounds check now, because the read streams will probably not complain loud enough.
|
|
1090
|
-
// since we're dealing with an unsigned offset plus an unsigned size,
|
|
1091
|
-
// we only have 1 thing to check for.
|
|
1092
|
-
if (fileDataEnd > totalLength) {
|
|
1093
|
-
throw new Error(`file data overflows file bounds: ${fileDataStart} + ${rawEntry.compressedSize} > ${totalLength}`);
|
|
784
|
+
else if (rawEntry.compressionMethod === 8) {
|
|
785
|
+
// 8 - The file is Deflated
|
|
786
|
+
decompress = true;
|
|
787
|
+
}
|
|
788
|
+
else {
|
|
789
|
+
throw new Error(`unsupported compression method: ${rawEntry.compressionMethod}`);
|
|
790
|
+
}
|
|
791
|
+
const fileDataStart = localFileHeaderEnd;
|
|
792
|
+
const fileDataEnd = fileDataStart + rawEntry.compressedSize;
|
|
793
|
+
if (rawEntry.compressedSize !== 0) {
|
|
794
|
+
// bounds check now, because the read streams will probably not complain loud enough.
|
|
795
|
+
// since we're dealing with an unsigned offset plus an unsigned size,
|
|
796
|
+
// we only have 1 thing to check for.
|
|
797
|
+
if (fileDataEnd > totalLength) {
|
|
798
|
+
throw new Error(`file data overflows file bounds: ${fileDataStart} + ${rawEntry.compressedSize} > ${totalLength}`);
|
|
799
|
+
}
|
|
1094
800
|
}
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
};
|
|
801
|
+
return {
|
|
802
|
+
decompress,
|
|
803
|
+
fileDataStart,
|
|
804
|
+
};
|
|
1100
805
|
}
|
|
1101
|
-
|
|
1102
806
|
async function readEntryDataAsArrayBuffer(reader, rawEntry) {
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
807
|
+
const { decompress, fileDataStart } = await readEntryDataHeader(reader, rawEntry);
|
|
808
|
+
if (!decompress) {
|
|
809
|
+
const dataView = await readAs(reader, fileDataStart, rawEntry.compressedSize);
|
|
810
|
+
// make copy?
|
|
811
|
+
//
|
|
812
|
+
// 1. The source is a Blob/file. In this case we'll get back TypedArray we can just hand to the user
|
|
813
|
+
// 2. The source is a TypedArray. In this case we'll get back TypedArray that is a view into a larger buffer
|
|
814
|
+
// but because ultimately this is used to return an ArrayBuffer to `someEntry.arrayBuffer()`
|
|
815
|
+
// we need to return copy since we need the `ArrayBuffer`, not the TypedArray to exactly match the data.
|
|
816
|
+
// Note: We could add another API function `bytes()` or something that returned a `Uint8Array`
|
|
817
|
+
// instead of an `ArrayBuffer`. This would let us skip a copy here. But this case only happens for uncompressed
|
|
818
|
+
// data. That seems like a rare enough case that adding a new API is not worth it? Or is it? A zip of jpegs or mp3s
|
|
819
|
+
// might not be compressed. For now that's a TBD.
|
|
820
|
+
return isTypedArraySameAsArrayBuffer(dataView) ? dataView.buffer : dataView.slice().buffer;
|
|
821
|
+
}
|
|
822
|
+
// see comment in readEntryDateAsBlob
|
|
823
|
+
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
|
|
824
|
+
const result = await inflateRawAsync(typedArrayOrBlob instanceof Uint8Array ? typedArrayOrBlob : typedArrayOrBlob, rawEntry.uncompressedSize);
|
|
825
|
+
return result;
|
|
1122
826
|
}
|
|
1123
|
-
|
|
1124
827
|
async function readEntryDataAsBlob(reader, rawEntry, type) {
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
828
|
+
const { decompress, fileDataStart } = await readEntryDataHeader(reader, rawEntry);
|
|
829
|
+
if (!decompress) {
|
|
830
|
+
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize, type);
|
|
831
|
+
if (isBlob(typedArrayOrBlob)) {
|
|
832
|
+
return typedArrayOrBlob;
|
|
833
|
+
}
|
|
834
|
+
return new Blob([typedArrayOrBlob], { type });
|
|
1130
835
|
}
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
const result = await inflateRawAsync(typedArrayOrBlob, rawEntry.uncompressedSize, type);
|
|
1138
|
-
return result;
|
|
836
|
+
// Here's the issue with this mess (should refactor?)
|
|
837
|
+
// if the source is a blob then we really want to pass a blob to inflateRawAsync to avoid a large
|
|
838
|
+
// copy if we're going to a worker.
|
|
839
|
+
const typedArrayOrBlob = await readAsBlobOrTypedArray(reader, fileDataStart, rawEntry.compressedSize);
|
|
840
|
+
const result = await inflateRawAsync(typedArrayOrBlob instanceof Uint8Array ? typedArrayOrBlob : typedArrayOrBlob, rawEntry.uncompressedSize, type);
|
|
841
|
+
return result;
|
|
1139
842
|
}
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
setOptions(options);
|
|
843
|
+
function setOptions(options) {
|
|
844
|
+
setOptions$1(options);
|
|
1143
845
|
}
|
|
1144
|
-
|
|
1145
846
|
async function unzipRaw(source) {
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
} else if (source instanceof ArrayBuffer || (source && source.buffer && source.buffer instanceof ArrayBuffer)) {
|
|
1150
|
-
reader = new ArrayBufferReader(source);
|
|
1151
|
-
} else if (isSharedArrayBuffer(source) || isSharedArrayBuffer(source.buffer)) {
|
|
1152
|
-
reader = new ArrayBufferReader(source);
|
|
1153
|
-
} else if (typeof source === 'string') {
|
|
1154
|
-
const req = await fetch(source);
|
|
1155
|
-
if (!req.ok) {
|
|
1156
|
-
throw new Error(`failed http request ${source}, status: ${req.status}: ${req.statusText}`);
|
|
847
|
+
let reader;
|
|
848
|
+
if (typeof Blob !== 'undefined' && source instanceof Blob) {
|
|
849
|
+
reader = new BlobReader(source);
|
|
1157
850
|
}
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
851
|
+
else if (source instanceof ArrayBuffer || (source && source.buffer && source.buffer instanceof ArrayBuffer)) {
|
|
852
|
+
reader = new ArrayBufferReader(source);
|
|
853
|
+
}
|
|
854
|
+
else if (isSharedArrayBuffer(source) || isSharedArrayBuffer(source.buffer)) {
|
|
855
|
+
reader = new ArrayBufferReader(source);
|
|
856
|
+
}
|
|
857
|
+
else if (typeof source === 'string') {
|
|
858
|
+
const req = await fetch(source);
|
|
859
|
+
if (!req.ok) {
|
|
860
|
+
throw new Error(`failed http request ${source}, status: ${req.status}: ${req.statusText}`);
|
|
861
|
+
}
|
|
862
|
+
const blob = await req.blob();
|
|
863
|
+
reader = new BlobReader(blob);
|
|
864
|
+
}
|
|
865
|
+
else if (typeof source.getLength === 'function' && typeof source.read === 'function') {
|
|
866
|
+
reader = source;
|
|
867
|
+
}
|
|
868
|
+
else {
|
|
869
|
+
throw new Error('unsupported source type');
|
|
870
|
+
}
|
|
871
|
+
const totalLength = await reader.getLength();
|
|
872
|
+
if (totalLength > Number.MAX_SAFE_INTEGER) {
|
|
873
|
+
throw new Error(`file too large. size: ${totalLength}. Only file sizes up 4503599627370496 bytes are supported`);
|
|
874
|
+
}
|
|
875
|
+
return await findEndOfCentralDirector(reader, totalLength);
|
|
1173
876
|
}
|
|
1174
|
-
|
|
1175
877
|
// If the names are not utf8 you should use unzipitRaw
|
|
1176
878
|
async function unzip(source) {
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
879
|
+
const { zip, entries } = await unzipRaw(source);
|
|
880
|
+
return {
|
|
881
|
+
zip,
|
|
882
|
+
entries: Object.fromEntries(entries.map(v => [v.name, v])),
|
|
883
|
+
};
|
|
1182
884
|
}
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
cleanup();
|
|
885
|
+
function cleanup() {
|
|
886
|
+
cleanup$1();
|
|
1186
887
|
}
|
|
1187
888
|
|
|
1188
|
-
export { HTTPRangeReader,
|
|
889
|
+
export { ArrayBufferReader, BlobReader, HTTPRangeReader, ZipEntry, cleanup, setOptions, unzip, unzipRaw };
|