@helloao/cli 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/actions.d.ts +57 -0
  2. package/actions.js +262 -0
  3. package/cli.d.ts +2 -0
  4. package/cli.js +139 -0
  5. package/db.d.ts +110 -0
  6. package/db.js +754 -0
  7. package/downloads.d.ts +2 -0
  8. package/downloads.js +12 -0
  9. package/files.d.ts +56 -0
  10. package/files.js +232 -0
  11. package/index.d.ts +8 -0
  12. package/index.js +38 -0
  13. package/migrations/20240420231455_initial/migration.sql +66 -0
  14. package/migrations/20240623183848_add_book_order/migration.sql +26 -0
  15. package/migrations/20240629194121_add_chapter_links/migration.sql +45 -0
  16. package/migrations/20240629194513_add_chapter_content/migration.sql +30 -0
  17. package/migrations/20240705221833_remove_unused_columns/migration.sql +27 -0
  18. package/migrations/20240711173108_add_chapter_audio/migration.sql +13 -0
  19. package/migrations/20240724212651_add_hashing/migration.sql +25 -0
  20. package/node_modules/@zip.js/zip.js/LICENSE +28 -0
  21. package/node_modules/@zip.js/zip.js/README.md +173 -0
  22. package/node_modules/@zip.js/zip.js/deno.json +8 -0
  23. package/node_modules/@zip.js/zip.js/dist/README.md +28 -0
  24. package/node_modules/@zip.js/zip.js/dist/z-worker-fflate.js +1 -0
  25. package/node_modules/@zip.js/zip.js/dist/z-worker-pako.js +1 -0
  26. package/node_modules/@zip.js/zip.js/dist/z-worker.js +1 -0
  27. package/node_modules/@zip.js/zip.js/dist/zip-fs-full.js +11935 -0
  28. package/node_modules/@zip.js/zip.js/dist/zip-fs-full.min.js +1 -0
  29. package/node_modules/@zip.js/zip.js/dist/zip-fs.js +6079 -0
  30. package/node_modules/@zip.js/zip.js/dist/zip-fs.min.js +1 -0
  31. package/node_modules/@zip.js/zip.js/dist/zip-full.js +9463 -0
  32. package/node_modules/@zip.js/zip.js/dist/zip-full.min.js +1 -0
  33. package/node_modules/@zip.js/zip.js/dist/zip-no-worker-deflate.min.js +1 -0
  34. package/node_modules/@zip.js/zip.js/dist/zip-no-worker-inflate.min.js +1 -0
  35. package/node_modules/@zip.js/zip.js/dist/zip-no-worker.min.js +1 -0
  36. package/node_modules/@zip.js/zip.js/dist/zip.js +5240 -0
  37. package/node_modules/@zip.js/zip.js/dist/zip.min.js +1 -0
  38. package/node_modules/@zip.js/zip.js/index-fflate.js +82 -0
  39. package/node_modules/@zip.js/zip.js/index.cjs +11927 -0
  40. package/node_modules/@zip.js/zip.js/index.d.ts +2048 -0
  41. package/node_modules/@zip.js/zip.js/index.js +87 -0
  42. package/node_modules/@zip.js/zip.js/index.min.js +1 -0
  43. package/node_modules/@zip.js/zip.js/lib/core/codec-pool.js +127 -0
  44. package/node_modules/@zip.js/zip.js/lib/core/codec-worker.js +348 -0
  45. package/node_modules/@zip.js/zip.js/lib/core/configuration.js +127 -0
  46. package/node_modules/@zip.js/zip.js/lib/core/constants.js +114 -0
  47. package/node_modules/@zip.js/zip.js/lib/core/io.js +749 -0
  48. package/node_modules/@zip.js/zip.js/lib/core/streams/aes-crypto-stream.js +326 -0
  49. package/node_modules/@zip.js/zip.js/lib/core/streams/codec-stream.js +154 -0
  50. package/node_modules/@zip.js/zip.js/lib/core/streams/codecs/crc32.js +63 -0
  51. package/node_modules/@zip.js/zip.js/lib/core/streams/codecs/deflate.js +2063 -0
  52. package/node_modules/@zip.js/zip.js/lib/core/streams/codecs/inflate.js +2167 -0
  53. package/node_modules/@zip.js/zip.js/lib/core/streams/codecs/sjcl.js +827 -0
  54. package/node_modules/@zip.js/zip.js/lib/core/streams/common-crypto.js +55 -0
  55. package/node_modules/@zip.js/zip.js/lib/core/streams/crc32-stream.js +56 -0
  56. package/node_modules/@zip.js/zip.js/lib/core/streams/stream-adapter.js +55 -0
  57. package/node_modules/@zip.js/zip.js/lib/core/streams/zip-crypto-stream.js +162 -0
  58. package/node_modules/@zip.js/zip.js/lib/core/streams/zip-entry-stream.js +165 -0
  59. package/node_modules/@zip.js/zip.js/lib/core/util/cp437-decode.js +48 -0
  60. package/node_modules/@zip.js/zip.js/lib/core/util/decode-text.js +43 -0
  61. package/node_modules/@zip.js/zip.js/lib/core/util/default-mime-type.js +38 -0
  62. package/node_modules/@zip.js/zip.js/lib/core/util/encode-text.js +48 -0
  63. package/node_modules/@zip.js/zip.js/lib/core/util/mime-type.js +1639 -0
  64. package/node_modules/@zip.js/zip.js/lib/core/util/stream-codec-shim.js +91 -0
  65. package/node_modules/@zip.js/zip.js/lib/core/z-worker-core.js +176 -0
  66. package/node_modules/@zip.js/zip.js/lib/core/zip-entry.js +86 -0
  67. package/node_modules/@zip.js/zip.js/lib/core/zip-fs-core.js +865 -0
  68. package/node_modules/@zip.js/zip.js/lib/core/zip-reader.js +757 -0
  69. package/node_modules/@zip.js/zip.js/lib/core/zip-writer.js +1186 -0
  70. package/node_modules/@zip.js/zip.js/lib/z-worker-bootstrap-fflate.js +40 -0
  71. package/node_modules/@zip.js/zip.js/lib/z-worker-bootstrap-pako.js +39 -0
  72. package/node_modules/@zip.js/zip.js/lib/z-worker-fflate.js +40 -0
  73. package/node_modules/@zip.js/zip.js/lib/z-worker-inline-template.js +42 -0
  74. package/node_modules/@zip.js/zip.js/lib/z-worker-inline.js +1 -0
  75. package/node_modules/@zip.js/zip.js/lib/z-worker.js +38 -0
  76. package/node_modules/@zip.js/zip.js/lib/zip-data-uri.js +53 -0
  77. package/node_modules/@zip.js/zip.js/lib/zip-fflate-shim.js +37 -0
  78. package/node_modules/@zip.js/zip.js/lib/zip-fs.js +53 -0
  79. package/node_modules/@zip.js/zip.js/lib/zip-full-fflate.js +53 -0
  80. package/node_modules/@zip.js/zip.js/lib/zip-full.js +54 -0
  81. package/node_modules/@zip.js/zip.js/lib/zip-no-worker-deflate.js +42 -0
  82. package/node_modules/@zip.js/zip.js/lib/zip-no-worker-fflate-deflate.js +42 -0
  83. package/node_modules/@zip.js/zip.js/lib/zip-no-worker-fflate-inflate.js +42 -0
  84. package/node_modules/@zip.js/zip.js/lib/zip-no-worker-fflate.js +43 -0
  85. package/node_modules/@zip.js/zip.js/lib/zip-no-worker-inflate.js +42 -0
  86. package/node_modules/@zip.js/zip.js/lib/zip-no-worker.js +44 -0
  87. package/node_modules/@zip.js/zip.js/lib/zip.js +52 -0
  88. package/node_modules/@zip.js/zip.js/package.json +86 -0
  89. package/package.json +43 -0
  90. package/prisma-gen/default.d.ts +1 -0
  91. package/prisma-gen/default.js +1 -0
  92. package/prisma-gen/edge.d.ts +1 -0
  93. package/prisma-gen/edge.js +242 -0
  94. package/prisma-gen/index-browser.js +236 -0
  95. package/prisma-gen/index.d.ts +13248 -0
  96. package/prisma-gen/index.js +265 -0
  97. package/prisma-gen/runtime/edge-esm.js +28 -0
  98. package/prisma-gen/runtime/edge.js +28 -0
  99. package/prisma-gen/runtime/index-browser.d.ts +365 -0
  100. package/prisma-gen/runtime/index-browser.js +13 -0
  101. package/prisma-gen/runtime/library.d.ts +3168 -0
  102. package/prisma-gen/runtime/library.js +140 -0
  103. package/prisma-gen/runtime/wasm.js +29 -0
  104. package/prisma-gen/wasm.d.ts +1 -0
  105. package/prisma-gen/wasm.js +236 -0
  106. package/s3.d.ts +14 -0
  107. package/s3.js +76 -0
  108. package/schema.prisma +154 -0
  109. package/uploads.d.ts +54 -0
  110. package/uploads.js +141 -0
@@ -0,0 +1,2063 @@
1
+ /*
2
+ Copyright (c) 2022 Gildas Lormeau. All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright notice,
8
+ this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in
12
+ the documentation and/or other materials provided with the distribution.
13
+
14
+ 3. The names of the authors may not be used to endorse or promote products
15
+ derived from this software without specific prior written permission.
16
+
17
+ THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
18
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
19
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
20
+ INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
21
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
23
+ OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
26
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ /*
30
+ * This program is based on JZlib 1.0.2 ymnk, JCraft,Inc.
31
+ * JZlib is based on zlib-1.1.3, so all credit should go authors
32
+ * Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
33
+ * and contributors of zlib.
34
+ */
35
+
36
+ // deno-lint-ignore-file no-this-alias prefer-const
37
+
38
+ // Global
39
+
40
+ const MAX_BITS = 15;
41
+ const D_CODES = 30;
42
+ const BL_CODES = 19;
43
+
44
+ const LENGTH_CODES = 29;
45
+ const LITERALS = 256;
46
+ const L_CODES = (LITERALS + 1 + LENGTH_CODES);
47
+ const HEAP_SIZE = (2 * L_CODES + 1);
48
+
49
+ const END_BLOCK = 256;
50
+
51
+ // Bit length codes must not exceed MAX_BL_BITS bits
52
+ const MAX_BL_BITS = 7;
53
+
54
+ // repeat previous bit length 3-6 times (2 bits of repeat count)
55
+ const REP_3_6 = 16;
56
+
57
+ // repeat a zero length 3-10 times (3 bits of repeat count)
58
+ const REPZ_3_10 = 17;
59
+
60
+ // repeat a zero length 11-138 times (7 bits of repeat count)
61
+ const REPZ_11_138 = 18;
62
+
63
+ // The lengths of the bit length codes are sent in order of decreasing
64
+ // probability, to avoid transmitting the lengths for unused bit
65
+ // length codes.
66
+
67
+ const Buf_size = 8 * 2;
68
+
69
+ // JZlib version : "1.0.2"
70
+ const Z_DEFAULT_COMPRESSION = -1;
71
+
72
+ // compression strategy
73
+ const Z_FILTERED = 1;
74
+ const Z_HUFFMAN_ONLY = 2;
75
+ const Z_DEFAULT_STRATEGY = 0;
76
+
77
+ const Z_NO_FLUSH = 0;
78
+ const Z_PARTIAL_FLUSH = 1;
79
+ const Z_FULL_FLUSH = 3;
80
+ const Z_FINISH = 4;
81
+
82
+ const Z_OK = 0;
83
+ const Z_STREAM_END = 1;
84
+ const Z_NEED_DICT = 2;
85
+ const Z_STREAM_ERROR = -2;
86
+ const Z_DATA_ERROR = -3;
87
+ const Z_BUF_ERROR = -5;
88
+
89
+ // Tree
90
+
91
+ function extractArray(array) {
92
+ return flatArray(array.map(([length, value]) => (new Array(length)).fill(value, 0, length)));
93
+ }
94
+
95
+ function flatArray(array) {
96
+ return array.reduce((a, b) => a.concat(Array.isArray(b) ? flatArray(b) : b), []);
97
+ }
98
+
99
+ // see definition of array dist_code below
100
+ const _dist_code = [0, 1, 2, 3].concat(...extractArray([
101
+ [2, 4], [2, 5], [4, 6], [4, 7], [8, 8], [8, 9], [16, 10], [16, 11], [32, 12], [32, 13], [64, 14], [64, 15], [2, 0], [1, 16],
102
+ [1, 17], [2, 18], [2, 19], [4, 20], [4, 21], [8, 22], [8, 23], [16, 24], [16, 25], [32, 26], [32, 27], [64, 28], [64, 29]
103
+ ]));
104
+
105
+ function Tree() {
106
+ const that = this;
107
+
108
+ // dyn_tree; // the dynamic tree
109
+ // max_code; // largest code with non zero frequency
110
+ // stat_desc; // the corresponding static tree
111
+
112
+ // Compute the optimal bit lengths for a tree and update the total bit
113
+ // length
114
+ // for the current block.
115
+ // IN assertion: the fields freq and dad are set, heap[heap_max] and
116
+ // above are the tree nodes sorted by increasing frequency.
117
+ // OUT assertions: the field len is set to the optimal bit length, the
118
+ // array bl_count contains the frequencies for each bit length.
119
+ // The length opt_len is updated; static_len is also updated if stree is
120
+ // not null.
121
+ function gen_bitlen(s) {
122
+ const tree = that.dyn_tree;
123
+ const stree = that.stat_desc.static_tree;
124
+ const extra = that.stat_desc.extra_bits;
125
+ const base = that.stat_desc.extra_base;
126
+ const max_length = that.stat_desc.max_length;
127
+ let h; // heap index
128
+ let n, m; // iterate over the tree elements
129
+ let bits; // bit length
130
+ let xbits; // extra bits
131
+ let f; // frequency
132
+ let overflow = 0; // number of elements with bit length too large
133
+
134
+ for (bits = 0; bits <= MAX_BITS; bits++)
135
+ s.bl_count[bits] = 0;
136
+
137
+ // In a first pass, compute the optimal bit lengths (which may
138
+ // overflow in the case of the bit length tree).
139
+ tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
140
+
141
+ for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {
142
+ n = s.heap[h];
143
+ bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
144
+ if (bits > max_length) {
145
+ bits = max_length;
146
+ overflow++;
147
+ }
148
+ tree[n * 2 + 1] = bits;
149
+ // We overwrite tree[n*2+1] which is no longer needed
150
+
151
+ if (n > that.max_code)
152
+ continue; // not a leaf node
153
+
154
+ s.bl_count[bits]++;
155
+ xbits = 0;
156
+ if (n >= base)
157
+ xbits = extra[n - base];
158
+ f = tree[n * 2];
159
+ s.opt_len += f * (bits + xbits);
160
+ if (stree)
161
+ s.static_len += f * (stree[n * 2 + 1] + xbits);
162
+ }
163
+ if (overflow === 0)
164
+ return;
165
+
166
+ // This happens for example on obj2 and pic of the Calgary corpus
167
+ // Find the first bit length which could increase:
168
+ do {
169
+ bits = max_length - 1;
170
+ while (s.bl_count[bits] === 0)
171
+ bits--;
172
+ s.bl_count[bits]--; // move one leaf down the tree
173
+ s.bl_count[bits + 1] += 2; // move one overflow item as its brother
174
+ s.bl_count[max_length]--;
175
+ // The brother of the overflow item also moves one step up,
176
+ // but this does not affect bl_count[max_length]
177
+ overflow -= 2;
178
+ } while (overflow > 0);
179
+
180
+ for (bits = max_length; bits !== 0; bits--) {
181
+ n = s.bl_count[bits];
182
+ while (n !== 0) {
183
+ m = s.heap[--h];
184
+ if (m > that.max_code)
185
+ continue;
186
+ if (tree[m * 2 + 1] != bits) {
187
+ s.opt_len += (bits - tree[m * 2 + 1]) * tree[m * 2];
188
+ tree[m * 2 + 1] = bits;
189
+ }
190
+ n--;
191
+ }
192
+ }
193
+ }
194
+
195
+ // Reverse the first len bits of a code, using straightforward code (a
196
+ // faster
197
+ // method would use a table)
198
+ // IN assertion: 1 <= len <= 15
199
+ function bi_reverse(code, // the value to invert
200
+ len // its bit length
201
+ ) {
202
+ let res = 0;
203
+ do {
204
+ res |= code & 1;
205
+ code >>>= 1;
206
+ res <<= 1;
207
+ } while (--len > 0);
208
+ return res >>> 1;
209
+ }
210
+
211
+ // Generate the codes for a given tree and bit counts (which need not be
212
+ // optimal).
213
+ // IN assertion: the array bl_count contains the bit length statistics for
214
+ // the given tree and the field len is set for all tree elements.
215
+ // OUT assertion: the field code is set for all tree elements of non
216
+ // zero code length.
217
+ function gen_codes(tree, // the tree to decorate
218
+ max_code, // largest code with non zero frequency
219
+ bl_count // number of codes at each bit length
220
+ ) {
221
+ const next_code = []; // next code value for each
222
+ // bit length
223
+ let code = 0; // running code value
224
+ let bits; // bit index
225
+ let n; // code index
226
+ let len;
227
+
228
+ // The distribution counts are first used to generate the code values
229
+ // without bit reversal.
230
+ for (bits = 1; bits <= MAX_BITS; bits++) {
231
+ next_code[bits] = code = ((code + bl_count[bits - 1]) << 1);
232
+ }
233
+
234
+ // Check that the bit counts in bl_count are consistent. The last code
235
+ // must be all ones.
236
+ // Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
237
+ // "inconsistent bit counts");
238
+ // Tracev((stderr,"gen_codes: max_code %d ", max_code));
239
+
240
+ for (n = 0; n <= max_code; n++) {
241
+ len = tree[n * 2 + 1];
242
+ if (len === 0)
243
+ continue;
244
+ // Now reverse the bits
245
+ tree[n * 2] = bi_reverse(next_code[len]++, len);
246
+ }
247
+ }
248
+
249
+ // Construct one Huffman tree and assigns the code bit strings and lengths.
250
+ // Update the total bit length for the current block.
251
+ // IN assertion: the field freq is set for all tree elements.
252
+ // OUT assertions: the fields len and code are set to the optimal bit length
253
+ // and corresponding code. The length opt_len is updated; static_len is
254
+ // also updated if stree is not null. The field max_code is set.
255
+ that.build_tree = function (s) {
256
+ const tree = that.dyn_tree;
257
+ const stree = that.stat_desc.static_tree;
258
+ const elems = that.stat_desc.elems;
259
+ let n, m; // iterate over heap elements
260
+ let max_code = -1; // largest code with non zero frequency
261
+ let node; // new node being created
262
+
263
+ // Construct the initial heap, with least frequent element in
264
+ // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
265
+ // heap[0] is not used.
266
+ s.heap_len = 0;
267
+ s.heap_max = HEAP_SIZE;
268
+
269
+ for (n = 0; n < elems; n++) {
270
+ if (tree[n * 2] !== 0) {
271
+ s.heap[++s.heap_len] = max_code = n;
272
+ s.depth[n] = 0;
273
+ } else {
274
+ tree[n * 2 + 1] = 0;
275
+ }
276
+ }
277
+
278
+ // The pkzip format requires that at least one distance code exists,
279
+ // and that at least one bit should be sent even if there is only one
280
+ // possible code. So to avoid special checks later on we force at least
281
+ // two codes of non zero frequency.
282
+ while (s.heap_len < 2) {
283
+ node = s.heap[++s.heap_len] = max_code < 2 ? ++max_code : 0;
284
+ tree[node * 2] = 1;
285
+ s.depth[node] = 0;
286
+ s.opt_len--;
287
+ if (stree)
288
+ s.static_len -= stree[node * 2 + 1];
289
+ // node is 0 or 1 so it does not have extra bits
290
+ }
291
+ that.max_code = max_code;
292
+
293
+ // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
294
+ // establish sub-heaps of increasing lengths:
295
+
296
+ for (n = Math.floor(s.heap_len / 2); n >= 1; n--)
297
+ s.pqdownheap(tree, n);
298
+
299
+ // Construct the Huffman tree by repeatedly combining the least two
300
+ // frequent nodes.
301
+
302
+ node = elems; // next internal node of the tree
303
+ do {
304
+ // n = node of least frequency
305
+ n = s.heap[1];
306
+ s.heap[1] = s.heap[s.heap_len--];
307
+ s.pqdownheap(tree, 1);
308
+ m = s.heap[1]; // m = node of next least frequency
309
+
310
+ s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
311
+ s.heap[--s.heap_max] = m;
312
+
313
+ // Create a new node father of n and m
314
+ tree[node * 2] = (tree[n * 2] + tree[m * 2]);
315
+ s.depth[node] = Math.max(s.depth[n], s.depth[m]) + 1;
316
+ tree[n * 2 + 1] = tree[m * 2 + 1] = node;
317
+
318
+ // and insert the new node in the heap
319
+ s.heap[1] = node++;
320
+ s.pqdownheap(tree, 1);
321
+ } while (s.heap_len >= 2);
322
+
323
+ s.heap[--s.heap_max] = s.heap[1];
324
+
325
+ // At this point, the fields freq and dad are set. We can now
326
+ // generate the bit lengths.
327
+
328
+ gen_bitlen(s);
329
+
330
+ // The field len is now set, we can generate the bit codes
331
+ gen_codes(tree, that.max_code, s.bl_count);
332
+ };
333
+
334
+ }
335
+
336
+ Tree._length_code = [0, 1, 2, 3, 4, 5, 6, 7].concat(...extractArray([
337
+ [2, 8], [2, 9], [2, 10], [2, 11], [4, 12], [4, 13], [4, 14], [4, 15], [8, 16], [8, 17], [8, 18], [8, 19],
338
+ [16, 20], [16, 21], [16, 22], [16, 23], [32, 24], [32, 25], [32, 26], [31, 27], [1, 28]]));
339
+
340
+ Tree.base_length = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0];
341
+
342
+ Tree.base_dist = [0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384,
343
+ 24576];
344
+
345
+ // Mapping from a distance to a distance code. dist is the distance - 1 and
346
+ // must not have side effects. _dist_code[256] and _dist_code[257] are never
347
+ // used.
348
+ Tree.d_code = function (dist) {
349
+ return ((dist) < 256 ? _dist_code[dist] : _dist_code[256 + ((dist) >>> 7)]);
350
+ };
351
+
352
+ // extra bits for each length code
353
+ Tree.extra_lbits = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0];
354
+
355
+ // extra bits for each distance code
356
+ Tree.extra_dbits = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13];
357
+
358
+ // extra bits for each bit length code
359
+ Tree.extra_blbits = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7];
360
+
361
+ Tree.bl_order = [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
362
+
363
+ // StaticTree
364
+
365
+ function StaticTree(static_tree, extra_bits, extra_base, elems, max_length) {
366
+ const that = this;
367
+ that.static_tree = static_tree;
368
+ that.extra_bits = extra_bits;
369
+ that.extra_base = extra_base;
370
+ that.elems = elems;
371
+ that.max_length = max_length;
372
+ }
373
+
374
+ const static_ltree2_first_part = [12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82,
375
+ 210, 50, 178, 114, 242, 10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134, 70, 198, 38, 166, 102, 230, 22, 150, 86,
376
+ 214, 54, 182, 118, 246, 14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81,
377
+ 209, 49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229, 21, 149, 85,
378
+ 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173, 109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 19, 275, 147, 403, 83, 339, 211, 467, 51, 307,
379
+ 179, 435, 115, 371, 243, 499, 11, 267, 139, 395, 75, 331, 203, 459, 43, 299, 171, 427, 107, 363, 235, 491, 27, 283, 155, 411, 91, 347, 219, 475,
380
+ 59, 315, 187, 443, 123, 379, 251, 507, 7, 263, 135, 391, 71, 327, 199, 455, 39, 295, 167, 423, 103, 359, 231, 487, 23, 279, 151, 407, 87, 343, 215,
381
+ 471, 55, 311, 183, 439, 119, 375, 247, 503, 15, 271, 143, 399, 79, 335, 207, 463, 47, 303, 175, 431, 111, 367, 239, 495, 31, 287, 159, 415, 95,
382
+ 351, 223, 479, 63, 319, 191, 447, 127, 383, 255, 511, 0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120, 4, 68, 36, 100, 20, 84, 52,
383
+ 116, 3, 131, 67, 195, 35, 163, 99, 227];
384
+ const static_ltree2_second_part = extractArray([[144, 8], [112, 9], [24, 7], [8, 8]]);
385
+ StaticTree.static_ltree = flatArray(static_ltree2_first_part.map((value, index) => [value, static_ltree2_second_part[index]]));
386
+
387
+ const static_dtree_first_part = [0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19, 11, 27, 7, 23];
388
+ const static_dtree_second_part = extractArray([[30, 5]]);
389
+ StaticTree.static_dtree = flatArray(static_dtree_first_part.map((value, index) => [value, static_dtree_second_part[index]]));
390
+
391
+ StaticTree.static_l_desc = new StaticTree(StaticTree.static_ltree, Tree.extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);
392
+
393
+ StaticTree.static_d_desc = new StaticTree(StaticTree.static_dtree, Tree.extra_dbits, 0, D_CODES, MAX_BITS);
394
+
395
+ StaticTree.static_bl_desc = new StaticTree(null, Tree.extra_blbits, 0, BL_CODES, MAX_BL_BITS);
396
+
397
+ // Deflate
398
+
399
+ const MAX_MEM_LEVEL = 9;
400
+ const DEF_MEM_LEVEL = 8;
401
+
402
+ function Config(good_length, max_lazy, nice_length, max_chain, func) {
403
+ const that = this;
404
+ that.good_length = good_length;
405
+ that.max_lazy = max_lazy;
406
+ that.nice_length = nice_length;
407
+ that.max_chain = max_chain;
408
+ that.func = func;
409
+ }
410
+
411
+ const STORED = 0;
412
+ const FAST = 1;
413
+ const SLOW = 2;
414
+ const config_table = [
415
+ new Config(0, 0, 0, 0, STORED),
416
+ new Config(4, 4, 8, 4, FAST),
417
+ new Config(4, 5, 16, 8, FAST),
418
+ new Config(4, 6, 32, 32, FAST),
419
+ new Config(4, 4, 16, 16, SLOW),
420
+ new Config(8, 16, 32, 32, SLOW),
421
+ new Config(8, 16, 128, 128, SLOW),
422
+ new Config(8, 32, 128, 256, SLOW),
423
+ new Config(32, 128, 258, 1024, SLOW),
424
+ new Config(32, 258, 258, 4096, SLOW)
425
+ ];
426
+
427
+ const z_errmsg = ["need dictionary", // Z_NEED_DICT
428
+ // 2
429
+ "stream end", // Z_STREAM_END 1
430
+ "", // Z_OK 0
431
+ "", // Z_ERRNO (-1)
432
+ "stream error", // Z_STREAM_ERROR (-2)
433
+ "data error", // Z_DATA_ERROR (-3)
434
+ "", // Z_MEM_ERROR (-4)
435
+ "buffer error", // Z_BUF_ERROR (-5)
436
+ "",// Z_VERSION_ERROR (-6)
437
+ ""];
438
+
439
+ // block not completed, need more input or more output
440
+ const NeedMore = 0;
441
+
442
+ // block flush performed
443
+ const BlockDone = 1;
444
+
445
+ // finish started, need only more output at next deflate
446
+ const FinishStarted = 2;
447
+
448
+ // finish done, accept no more input or output
449
+ const FinishDone = 3;
450
+
451
+ // preset dictionary flag in zlib header
452
+ const PRESET_DICT = 0x20;
453
+
454
+ const INIT_STATE = 42;
455
+ const BUSY_STATE = 113;
456
+ const FINISH_STATE = 666;
457
+
458
+ // The deflate compression method
459
+ const Z_DEFLATED = 8;
460
+
461
+ const STORED_BLOCK = 0;
462
+ const STATIC_TREES = 1;
463
+ const DYN_TREES = 2;
464
+
465
+ const MIN_MATCH = 3;
466
+ const MAX_MATCH = 258;
467
+ const MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
468
+
469
+ function smaller(tree, n, m, depth) {
470
+ const tn2 = tree[n * 2];
471
+ const tm2 = tree[m * 2];
472
+ return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
473
+ }
474
+
475
+ function Deflate() {
476
+
477
+ const that = this;
478
+ let strm; // pointer back to this zlib stream
479
+ let status; // as the name implies
480
+ // pending_buf; // output still pending
481
+ let pending_buf_size; // size of pending_buf
482
+ // pending_out; // next pending byte to output to the stream
483
+ // pending; // nb of bytes in the pending buffer
484
+
485
+ // dist_buf; // buffer for distances
486
+ // lc_buf; // buffer for literals or lengths
487
+ // To simplify the code, dist_buf and lc_buf have the same number of elements.
488
+ // To use different lengths, an extra flag array would be necessary.
489
+
490
+ let last_flush; // value of flush param for previous deflate call
491
+
492
+ let w_size; // LZ77 win size (32K by default)
493
+ let w_bits; // log2(w_size) (8..16)
494
+ let w_mask; // w_size - 1
495
+
496
+ let win;
497
+ // Sliding win. Input bytes are read into the second half of the win,
498
+ // and move to the first half later to keep a dictionary of at least wSize
499
+ // bytes. With this organization, matches are limited to a distance of
500
+ // wSize-MAX_MATCH bytes, but this ensures that IO is always
501
+ // performed with a length multiple of the block size. Also, it limits
502
+ // the win size to 64K, which is quite useful on MSDOS.
503
+ // To do: use the user input buffer as sliding win.
504
+
505
+ let window_size;
506
+ // Actual size of win: 2*wSize, except when the user input buffer
507
+ // is directly used as sliding win.
508
+
509
+ let prev;
510
+ // Link to older string with same hash index. To limit the size of this
511
+ // array to 64K, this link is maintained only for the last 32K strings.
512
+ // An index in this array is thus a win index modulo 32K.
513
+
514
+ let head; // Heads of the hash chains or NIL.
515
+
516
+ let ins_h; // hash index of string to be inserted
517
+ let hash_size; // number of elements in hash table
518
+ let hash_bits; // log2(hash_size)
519
+ let hash_mask; // hash_size-1
520
+
521
+ // Number of bits by which ins_h must be shifted at each input
522
+ // step. It must be such that after MIN_MATCH steps, the oldest
523
+ // byte no longer takes part in the hash key, that is:
524
+ // hash_shift * MIN_MATCH >= hash_bits
525
+ let hash_shift;
526
+
527
+ // Window position at the beginning of the current output block. Gets
528
+ // negative when the win is moved backwards.
529
+
530
+ let block_start;
531
+
532
+ let match_length; // length of best match
533
+ let prev_match; // previous match
534
+ let match_available; // set if previous match exists
535
+ let strstart; // start of string to insert
536
+ let match_start; // start of matching string
537
+ let lookahead; // number of valid bytes ahead in win
538
+
539
+ // Length of the best match at previous step. Matches not greater than this
540
+ // are discarded. This is used in the lazy match evaluation.
541
+ let prev_length;
542
+
543
+ // To speed up deflation, hash chains are never searched beyond this
544
+ // length. A higher limit improves compression ratio but degrades the speed.
545
+ let max_chain_length;
546
+
547
+ // Attempt to find a better match only when the current match is strictly
548
+ // smaller than this value. This mechanism is used only for compression
549
+ // levels >= 4.
550
+ let max_lazy_match;
551
+
552
+ // Insert new strings in the hash table only if the match length is not
553
+ // greater than this length. This saves time but degrades compression.
554
+ // max_insert_length is used only for compression levels <= 3.
555
+
556
+ let level; // compression level (1..9)
557
+ let strategy; // favor or force Huffman coding
558
+
559
+ // Use a faster search when the previous match is longer than this
560
+ let good_match;
561
+
562
+ // Stop searching when current match exceeds this
563
+ let nice_match;
564
+
565
+ let dyn_ltree; // literal and length tree
566
+ let dyn_dtree; // distance tree
567
+ let bl_tree; // Huffman tree for bit lengths
568
+
569
+ const l_desc = new Tree(); // desc for literal tree
570
+ const d_desc = new Tree(); // desc for distance tree
571
+ const bl_desc = new Tree(); // desc for bit length tree
572
+
573
+ // that.heap_len; // number of elements in the heap
574
+ // that.heap_max; // element of largest frequency
575
+ // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
576
+ // The same heap array is used to build all trees.
577
+
578
+ // Depth of each subtree used as tie breaker for trees of equal frequency
579
+ that.depth = [];
580
+
581
+ // Size of match buffer for literals/lengths. There are 4 reasons for
582
+ // limiting lit_bufsize to 64K:
583
+ // - frequencies can be kept in 16 bit counters
584
+ // - if compression is not successful for the first block, all input
585
+ // data is still in the win so we can still emit a stored block even
586
+ // when input comes from standard input. (This can also be done for
587
+ // all blocks if lit_bufsize is not greater than 32K.)
588
+ // - if compression is not successful for a file smaller than 64K, we can
589
+ // even emit a stored file instead of a stored block (saving 5 bytes).
590
+ // This is applicable only for zip (not gzip or zlib).
591
+ // - creating new Huffman trees less frequently may not provide fast
592
+ // adaptation to changes in the input data statistics. (Take for
593
+ // example a binary file with poorly compressible code followed by
594
+ // a highly compressible string table.) Smaller buffer sizes give
595
+ // fast adaptation but have of course the overhead of transmitting
596
+ // trees more frequently.
597
+ // - I can't count above 4
598
+ let lit_bufsize;
599
+
600
+ let last_lit; // running index in dist_buf and lc_buf
601
+
602
+ // that.opt_len; // bit length of current block with optimal trees
603
+ // that.static_len; // bit length of current block with static trees
604
+ let matches; // number of string matches in current block
605
+ let last_eob_len; // bit length of EOB code for last block
606
+
607
+ // Output buffer. bits are inserted starting at the bottom (least
608
+ // significant bits).
609
+ let bi_buf;
610
+
611
+ // Number of valid bits in bi_buf. All bits above the last valid bit
612
+ // are always zero.
613
+ let bi_valid;
614
+
615
+ // number of codes at each bit length for an optimal tree
616
+ that.bl_count = [];
617
+
618
+ // heap used to build the Huffman trees
619
+ that.heap = [];
620
+
621
+ dyn_ltree = [];
622
+ dyn_dtree = [];
623
+ bl_tree = [];
624
+
625
+ function lm_init() {
626
+ window_size = 2 * w_size;
627
+
628
+ head[hash_size - 1] = 0;
629
+ for (let i = 0; i < hash_size - 1; i++) {
630
+ head[i] = 0;
631
+ }
632
+
633
+ // Set the default configuration parameters:
634
+ max_lazy_match = config_table[level].max_lazy;
635
+ good_match = config_table[level].good_length;
636
+ nice_match = config_table[level].nice_length;
637
+ max_chain_length = config_table[level].max_chain;
638
+
639
+ strstart = 0;
640
+ block_start = 0;
641
+ lookahead = 0;
642
+ match_length = prev_length = MIN_MATCH - 1;
643
+ match_available = 0;
644
+ ins_h = 0;
645
+ }
646
+
647
+ function init_block() {
648
+ let i;
649
+ // Initialize the trees.
650
+ for (i = 0; i < L_CODES; i++)
651
+ dyn_ltree[i * 2] = 0;
652
+ for (i = 0; i < D_CODES; i++)
653
+ dyn_dtree[i * 2] = 0;
654
+ for (i = 0; i < BL_CODES; i++)
655
+ bl_tree[i * 2] = 0;
656
+
657
+ dyn_ltree[END_BLOCK * 2] = 1;
658
+ that.opt_len = that.static_len = 0;
659
+ last_lit = matches = 0;
660
+ }
661
+
662
+ // Initialize the tree data structures for a new zlib stream.
663
+ function tr_init() {
664
+
665
+ l_desc.dyn_tree = dyn_ltree;
666
+ l_desc.stat_desc = StaticTree.static_l_desc;
667
+
668
+ d_desc.dyn_tree = dyn_dtree;
669
+ d_desc.stat_desc = StaticTree.static_d_desc;
670
+
671
+ bl_desc.dyn_tree = bl_tree;
672
+ bl_desc.stat_desc = StaticTree.static_bl_desc;
673
+
674
+ bi_buf = 0;
675
+ bi_valid = 0;
676
+ last_eob_len = 8; // enough lookahead for inflate
677
+
678
+ // Initialize the first block of the first file:
679
+ init_block();
680
+ }
681
+
682
+ // Restore the heap property by moving down the tree starting at node k,
683
+ // exchanging a node with the smallest of its two sons if necessary,
684
+ // stopping
685
+ // when the heap property is re-established (each father smaller than its
686
+ // two sons).
687
+ that.pqdownheap = function (tree, // the tree to restore
688
+ k // node to move down
689
+ ) {
690
+ const heap = that.heap;
691
+ const v = heap[k];
692
+ let j = k << 1; // left son of k
693
+ while (j <= that.heap_len) {
694
+ // Set j to the smallest of the two sons:
695
+ if (j < that.heap_len && smaller(tree, heap[j + 1], heap[j], that.depth)) {
696
+ j++;
697
+ }
698
+ // Exit if v is smaller than both sons
699
+ if (smaller(tree, v, heap[j], that.depth))
700
+ break;
701
+
702
+ // Exchange v with the smallest son
703
+ heap[k] = heap[j];
704
+ k = j;
705
+ // And continue down the tree, setting j to the left son of k
706
+ j <<= 1;
707
+ }
708
+ heap[k] = v;
709
+ };
710
+
711
+ // Scan a literal or distance tree to determine the frequencies of the codes
712
+ // in the bit length tree.
713
+ function scan_tree(tree,// the tree to be scanned
714
+ max_code // and its largest code of non zero frequency
715
+ ) {
716
+ let prevlen = -1; // last emitted length
717
+ let curlen; // length of current code
718
+ let nextlen = tree[0 * 2 + 1]; // length of next code
719
+ let count = 0; // repeat count of the current code
720
+ let max_count = 7; // max repeat count
721
+ let min_count = 4; // min repeat count
722
+
723
+ if (nextlen === 0) {
724
+ max_count = 138;
725
+ min_count = 3;
726
+ }
727
+ tree[(max_code + 1) * 2 + 1] = 0xffff; // guard
728
+
729
+ for (let n = 0; n <= max_code; n++) {
730
+ curlen = nextlen;
731
+ nextlen = tree[(n + 1) * 2 + 1];
732
+ if (++count < max_count && curlen == nextlen) {
733
+ continue;
734
+ } else if (count < min_count) {
735
+ bl_tree[curlen * 2] += count;
736
+ } else if (curlen !== 0) {
737
+ if (curlen != prevlen)
738
+ bl_tree[curlen * 2]++;
739
+ bl_tree[REP_3_6 * 2]++;
740
+ } else if (count <= 10) {
741
+ bl_tree[REPZ_3_10 * 2]++;
742
+ } else {
743
+ bl_tree[REPZ_11_138 * 2]++;
744
+ }
745
+ count = 0;
746
+ prevlen = curlen;
747
+ if (nextlen === 0) {
748
+ max_count = 138;
749
+ min_count = 3;
750
+ } else if (curlen == nextlen) {
751
+ max_count = 6;
752
+ min_count = 3;
753
+ } else {
754
+ max_count = 7;
755
+ min_count = 4;
756
+ }
757
+ }
758
+ }
759
+
760
+ // Construct the Huffman tree for the bit lengths and return the index in
761
+ // bl_order of the last bit length code to send.
762
+ function build_bl_tree() {
763
+ let max_blindex; // index of last bit length code of non zero freq
764
+
765
+ // Determine the bit length frequencies for literal and distance trees
766
+ scan_tree(dyn_ltree, l_desc.max_code);
767
+ scan_tree(dyn_dtree, d_desc.max_code);
768
+
769
+ // Build the bit length tree:
770
+ bl_desc.build_tree(that);
771
+ // opt_len now includes the length of the tree representations, except
772
+ // the lengths of the bit lengths codes and the 5+5+4 bits for the
773
+ // counts.
774
+
775
+ // Determine the number of bit length codes to send. The pkzip format
776
+ // requires that at least 4 bit length codes be sent. (appnote.txt says
777
+ // 3 but the actual value used is 4.)
778
+ for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {
779
+ if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] !== 0)
780
+ break;
781
+ }
782
+ // Update opt_len to include the bit length tree and counts
783
+ that.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
784
+
785
+ return max_blindex;
786
+ }
787
+
788
+ // Output a byte on the stream.
789
+ // IN assertion: there is enough room in pending_buf.
790
+ function put_byte(p) {
791
+ that.pending_buf[that.pending++] = p;
792
+ }
793
+
794
+ function put_short(w) {
795
+ put_byte(w & 0xff);
796
+ put_byte((w >>> 8) & 0xff);
797
+ }
798
+
799
+ function putShortMSB(b) {
800
+ put_byte((b >> 8) & 0xff);
801
+ put_byte((b & 0xff) & 0xff);
802
+ }
803
+
804
+ function send_bits(value, length) {
805
+ let val;
806
+ const len = length;
807
+ if (bi_valid > Buf_size - len) {
808
+ val = value;
809
+ // bi_buf |= (val << bi_valid);
810
+ bi_buf |= ((val << bi_valid) & 0xffff);
811
+ put_short(bi_buf);
812
+ bi_buf = val >>> (Buf_size - bi_valid);
813
+ bi_valid += len - Buf_size;
814
+ } else {
815
+ // bi_buf |= (value) << bi_valid;
816
+ bi_buf |= (((value) << bi_valid) & 0xffff);
817
+ bi_valid += len;
818
+ }
819
+ }
820
+
821
+ function send_code(c, tree) {
822
+ const c2 = c * 2;
823
+ send_bits(tree[c2] & 0xffff, tree[c2 + 1] & 0xffff);
824
+ }
825
+
826
+ // Send a literal or distance tree in compressed form, using the codes in
827
+ // bl_tree.
828
+ function send_tree(tree,// the tree to be sent
829
+ max_code // and its largest code of non zero frequency
830
+ ) {
831
+ let n; // iterates over all tree elements
832
+ let prevlen = -1; // last emitted length
833
+ let curlen; // length of current code
834
+ let nextlen = tree[0 * 2 + 1]; // length of next code
835
+ let count = 0; // repeat count of the current code
836
+ let max_count = 7; // max repeat count
837
+ let min_count = 4; // min repeat count
838
+
839
+ if (nextlen === 0) {
840
+ max_count = 138;
841
+ min_count = 3;
842
+ }
843
+
844
+ for (n = 0; n <= max_code; n++) {
845
+ curlen = nextlen;
846
+ nextlen = tree[(n + 1) * 2 + 1];
847
+ if (++count < max_count && curlen == nextlen) {
848
+ continue;
849
+ } else if (count < min_count) {
850
+ do {
851
+ send_code(curlen, bl_tree);
852
+ } while (--count !== 0);
853
+ } else if (curlen !== 0) {
854
+ if (curlen != prevlen) {
855
+ send_code(curlen, bl_tree);
856
+ count--;
857
+ }
858
+ send_code(REP_3_6, bl_tree);
859
+ send_bits(count - 3, 2);
860
+ } else if (count <= 10) {
861
+ send_code(REPZ_3_10, bl_tree);
862
+ send_bits(count - 3, 3);
863
+ } else {
864
+ send_code(REPZ_11_138, bl_tree);
865
+ send_bits(count - 11, 7);
866
+ }
867
+ count = 0;
868
+ prevlen = curlen;
869
+ if (nextlen === 0) {
870
+ max_count = 138;
871
+ min_count = 3;
872
+ } else if (curlen == nextlen) {
873
+ max_count = 6;
874
+ min_count = 3;
875
+ } else {
876
+ max_count = 7;
877
+ min_count = 4;
878
+ }
879
+ }
880
+ }
881
+
882
+ // Send the header for a block using dynamic Huffman trees: the counts, the
883
+ // lengths of the bit length codes, the literal tree and the distance tree.
884
+ // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
885
+ function send_all_trees(lcodes, dcodes, blcodes) {
886
+ let rank; // index in bl_order
887
+
888
+ send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
889
+ send_bits(dcodes - 1, 5);
890
+ send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
891
+ for (rank = 0; rank < blcodes; rank++) {
892
+ send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
893
+ }
894
+ send_tree(dyn_ltree, lcodes - 1); // literal tree
895
+ send_tree(dyn_dtree, dcodes - 1); // distance tree
896
+ }
897
+
898
+ // Flush the bit buffer, keeping at most 7 bits in it.
899
+ function bi_flush() {
900
+ if (bi_valid == 16) {
901
+ put_short(bi_buf);
902
+ bi_buf = 0;
903
+ bi_valid = 0;
904
+ } else if (bi_valid >= 8) {
905
+ put_byte(bi_buf & 0xff);
906
+ bi_buf >>>= 8;
907
+ bi_valid -= 8;
908
+ }
909
+ }
910
+
911
+ // Send one empty static block to give enough lookahead for inflate.
912
+ // This takes 10 bits, of which 7 may remain in the bit buffer.
913
+ // The current inflate code requires 9 bits of lookahead. If the
914
+ // last two codes for the previous block (real code plus EOB) were coded
915
+ // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
916
+ // the last real code. In this case we send two empty static blocks instead
917
+ // of one. (There are no problems if the previous block is stored or fixed.)
918
+ // To simplify the code, we assume the worst case of last real code encoded
919
+ // on one bit only.
920
+ function _tr_align() {
921
+ send_bits(STATIC_TREES << 1, 3);
922
+ send_code(END_BLOCK, StaticTree.static_ltree);
923
+
924
+ bi_flush();
925
+
926
+ // Of the 10 bits for the empty block, we have already sent
927
+ // (10 - bi_valid) bits. The lookahead for the last real code (before
928
+ // the EOB of the previous block) was thus at least one plus the length
929
+ // of the EOB plus what we have just sent of the empty static block.
930
+ if (1 + last_eob_len + 10 - bi_valid < 9) {
931
+ send_bits(STATIC_TREES << 1, 3);
932
+ send_code(END_BLOCK, StaticTree.static_ltree);
933
+ bi_flush();
934
+ }
935
+ last_eob_len = 7;
936
+ }
937
+
938
+ // Save the match info and tally the frequency counts. Return true if
939
+ // the current block must be flushed.
940
+ function _tr_tally(dist, // distance of matched string
941
+ lc // match length-MIN_MATCH or unmatched char (if dist==0)
942
+ ) {
943
+ let out_length, in_length, dcode;
944
+ that.dist_buf[last_lit] = dist;
945
+ that.lc_buf[last_lit] = lc & 0xff;
946
+ last_lit++;
947
+
948
+ if (dist === 0) {
949
+ // lc is the unmatched char
950
+ dyn_ltree[lc * 2]++;
951
+ } else {
952
+ matches++;
953
+ // Here, lc is the match length - MIN_MATCH
954
+ dist--; // dist = match distance - 1
955
+ dyn_ltree[(Tree._length_code[lc] + LITERALS + 1) * 2]++;
956
+ dyn_dtree[Tree.d_code(dist) * 2]++;
957
+ }
958
+
959
+ if ((last_lit & 0x1fff) === 0 && level > 2) {
960
+ // Compute an upper bound for the compressed length
961
+ out_length = last_lit * 8;
962
+ in_length = strstart - block_start;
963
+ for (dcode = 0; dcode < D_CODES; dcode++) {
964
+ out_length += dyn_dtree[dcode * 2] * (5 + Tree.extra_dbits[dcode]);
965
+ }
966
+ out_length >>>= 3;
967
+ if ((matches < Math.floor(last_lit / 2)) && out_length < Math.floor(in_length / 2))
968
+ return true;
969
+ }
970
+
971
+ return (last_lit == lit_bufsize - 1);
972
+ // We avoid equality with lit_bufsize because of wraparound at 64K
973
+ // on 16 bit machines and because stored blocks are restricted to
974
+ // 64K-1 bytes.
975
+ }
976
+
977
+ // Send the block data compressed using the given Huffman trees
978
+ function compress_block(ltree, dtree) {
979
+ let dist; // distance of matched string
980
+ let lc; // match length or unmatched char (if dist === 0)
981
+ let lx = 0; // running index in dist_buf and lc_buf
982
+ let code; // the code to send
983
+ let extra; // number of extra bits to send
984
+
985
+ if (last_lit !== 0) {
986
+ do {
987
+ dist = that.dist_buf[lx];
988
+ lc = that.lc_buf[lx];
989
+ lx++;
990
+
991
+ if (dist === 0) {
992
+ send_code(lc, ltree); // send a literal byte
993
+ } else {
994
+ // Here, lc is the match length - MIN_MATCH
995
+ code = Tree._length_code[lc];
996
+
997
+ send_code(code + LITERALS + 1, ltree); // send the length
998
+ // code
999
+ extra = Tree.extra_lbits[code];
1000
+ if (extra !== 0) {
1001
+ lc -= Tree.base_length[code];
1002
+ send_bits(lc, extra); // send the extra length bits
1003
+ }
1004
+ dist--; // dist is now the match distance - 1
1005
+ code = Tree.d_code(dist);
1006
+
1007
+ send_code(code, dtree); // send the distance code
1008
+ extra = Tree.extra_dbits[code];
1009
+ if (extra !== 0) {
1010
+ dist -= Tree.base_dist[code];
1011
+ send_bits(dist, extra); // send the extra distance bits
1012
+ }
1013
+ } // literal or match pair ?
1014
+ } while (lx < last_lit);
1015
+ }
1016
+
1017
+ send_code(END_BLOCK, ltree);
1018
+ last_eob_len = ltree[END_BLOCK * 2 + 1];
1019
+ }
1020
+
1021
+ // Flush the bit buffer and align the output on a byte boundary
1022
+ function bi_windup() {
1023
+ if (bi_valid > 8) {
1024
+ put_short(bi_buf);
1025
+ } else if (bi_valid > 0) {
1026
+ put_byte(bi_buf & 0xff);
1027
+ }
1028
+ bi_buf = 0;
1029
+ bi_valid = 0;
1030
+ }
1031
+
1032
+ // Copy a stored block, storing first the length and its
1033
+ // one's complement if requested.
1034
+ function copy_block(buf, // the input data
1035
+ len, // its length
1036
+ header // true if block header must be written
1037
+ ) {
1038
+ bi_windup(); // align on byte boundary
1039
+ last_eob_len = 8; // enough lookahead for inflate
1040
+
1041
+ if (header) {
1042
+ put_short(len);
1043
+ put_short(~len);
1044
+ }
1045
+
1046
+ that.pending_buf.set(win.subarray(buf, buf + len), that.pending);
1047
+ that.pending += len;
1048
+ }
1049
+
1050
+ // Send a stored block
1051
+ function _tr_stored_block(buf, // input block
1052
+ stored_len, // length of input block
1053
+ eof // true if this is the last block for a file
1054
+ ) {
1055
+ send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
1056
+ copy_block(buf, stored_len, true); // with header
1057
+ }
1058
+
1059
+ // Determine the best encoding for the current block: dynamic trees, static
1060
+ // trees or store, and output the encoded block to the zip file.
1061
+ function _tr_flush_block(buf, // input block, or NULL if too old
1062
+ stored_len, // length of input block
1063
+ eof // true if this is the last block for a file
1064
+ ) {
1065
+ let opt_lenb, static_lenb;// opt_len and static_len in bytes
1066
+ let max_blindex = 0; // index of last bit length code of non zero freq
1067
+
1068
+ // Build the Huffman trees unless a stored block is forced
1069
+ if (level > 0) {
1070
+ // Construct the literal and distance trees
1071
+ l_desc.build_tree(that);
1072
+
1073
+ d_desc.build_tree(that);
1074
+
1075
+ // At this point, opt_len and static_len are the total bit lengths
1076
+ // of
1077
+ // the compressed block data, excluding the tree representations.
1078
+
1079
+ // Build the bit length tree for the above two trees, and get the
1080
+ // index
1081
+ // in bl_order of the last bit length code to send.
1082
+ max_blindex = build_bl_tree();
1083
+
1084
+ // Determine the best encoding. Compute first the block length in
1085
+ // bytes
1086
+ opt_lenb = (that.opt_len + 3 + 7) >>> 3;
1087
+ static_lenb = (that.static_len + 3 + 7) >>> 3;
1088
+
1089
+ if (static_lenb <= opt_lenb)
1090
+ opt_lenb = static_lenb;
1091
+ } else {
1092
+ opt_lenb = static_lenb = stored_len + 5; // force a stored block
1093
+ }
1094
+
1095
+ if ((stored_len + 4 <= opt_lenb) && buf != -1) {
1096
+ // 4: two words for the lengths
1097
+ // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
1098
+ // Otherwise we can't have processed more than WSIZE input bytes
1099
+ // since
1100
+ // the last block flush, because compression would have been
1101
+ // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
1102
+ // transform a block into a stored block.
1103
+ _tr_stored_block(buf, stored_len, eof);
1104
+ } else if (static_lenb == opt_lenb) {
1105
+ send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
1106
+ compress_block(StaticTree.static_ltree, StaticTree.static_dtree);
1107
+ } else {
1108
+ send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
1109
+ send_all_trees(l_desc.max_code + 1, d_desc.max_code + 1, max_blindex + 1);
1110
+ compress_block(dyn_ltree, dyn_dtree);
1111
+ }
1112
+
1113
+ // The above check is made mod 2^32, for files larger than 512 MB
1114
+ // and uLong implemented on 32 bits.
1115
+
1116
+ init_block();
1117
+
1118
+ if (eof) {
1119
+ bi_windup();
1120
+ }
1121
+ }
1122
+
1123
+ function flush_block_only(eof) {
1124
+ _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
1125
+ block_start = strstart;
1126
+ strm.flush_pending();
1127
+ }
1128
+
1129
+ // Fill the win when the lookahead becomes insufficient.
1130
+ // Updates strstart and lookahead.
1131
+ //
1132
+ // IN assertion: lookahead < MIN_LOOKAHEAD
1133
+ // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1134
+ // At least one byte has been read, or avail_in === 0; reads are
1135
+ // performed for at least two bytes (required for the zip translate_eol
1136
+ // option -- not supported here).
1137
+ function fill_window() {
1138
+ let n, m;
1139
+ let p;
1140
+ let more; // Amount of free space at the end of the win.
1141
+
1142
+ do {
1143
+ more = (window_size - lookahead - strstart);
1144
+
1145
+ // Deal with !@#$% 64K limit:
1146
+ if (more === 0 && strstart === 0 && lookahead === 0) {
1147
+ more = w_size;
1148
+ } else if (more == -1) {
1149
+ // Very unlikely, but possible on 16 bit machine if strstart ==
1150
+ // 0
1151
+ // and lookahead == 1 (input done one byte at time)
1152
+ more--;
1153
+
1154
+ // If the win is almost full and there is insufficient
1155
+ // lookahead,
1156
+ // move the upper half to the lower one to make room in the
1157
+ // upper half.
1158
+ } else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) {
1159
+ win.set(win.subarray(w_size, w_size + w_size), 0);
1160
+
1161
+ match_start -= w_size;
1162
+ strstart -= w_size; // we now have strstart >= MAX_DIST
1163
+ block_start -= w_size;
1164
+
1165
+ // Slide the hash table (could be avoided with 32 bit values
1166
+ // at the expense of memory usage). We slide even when level ==
1167
+ // 0
1168
+ // to keep the hash table consistent if we switch back to level
1169
+ // > 0
1170
+ // later. (Using level 0 permanently is not an optimal usage of
1171
+ // zlib, so we don't care about this pathological case.)
1172
+
1173
+ n = hash_size;
1174
+ p = n;
1175
+ do {
1176
+ m = (head[--p] & 0xffff);
1177
+ head[p] = (m >= w_size ? m - w_size : 0);
1178
+ } while (--n !== 0);
1179
+
1180
+ n = w_size;
1181
+ p = n;
1182
+ do {
1183
+ m = (prev[--p] & 0xffff);
1184
+ prev[p] = (m >= w_size ? m - w_size : 0);
1185
+ // If n is not on any hash chain, prev[n] is garbage but
1186
+ // its value will never be used.
1187
+ } while (--n !== 0);
1188
+ more += w_size;
1189
+ }
1190
+
1191
+ if (strm.avail_in === 0)
1192
+ return;
1193
+
1194
+ // If there was no sliding:
1195
+ // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1196
+ // more == window_size - lookahead - strstart
1197
+ // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1198
+ // => more >= window_size - 2*WSIZE + 2
1199
+ // In the BIG_MEM or MMAP case (not yet supported),
1200
+ // window_size == input_size + MIN_LOOKAHEAD &&
1201
+ // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1202
+ // Otherwise, window_size == 2*WSIZE so more >= 2.
1203
+ // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1204
+
1205
+ n = strm.read_buf(win, strstart + lookahead, more);
1206
+ lookahead += n;
1207
+
1208
+ // Initialize the hash value now that we have some input:
1209
+ if (lookahead >= MIN_MATCH) {
1210
+ ins_h = win[strstart] & 0xff;
1211
+ ins_h = (((ins_h) << hash_shift) ^ (win[strstart + 1] & 0xff)) & hash_mask;
1212
+ }
1213
+ // If the whole input has less than MIN_MATCH bytes, ins_h is
1214
+ // garbage,
1215
+ // but this is not important since only literal bytes will be
1216
+ // emitted.
1217
+ } while (lookahead < MIN_LOOKAHEAD && strm.avail_in !== 0);
1218
+ }
1219
+
1220
+ // Copy without compression as much as possible from the input stream,
1221
+ // return
1222
+ // the current block state.
1223
+ // This function does not insert new strings in the dictionary since
1224
+ // uncompressible data is probably not useful. This function is used
1225
+ // only for the level=0 compression option.
1226
+ // NOTE: this function should be optimized to avoid extra copying from
1227
+ // win to pending_buf.
1228
+ function deflate_stored(flush) {
1229
+ // Stored blocks are limited to 0xffff bytes, pending_buf is limited
1230
+ // to pending_buf_size, and each stored block has a 5 byte header:
1231
+
1232
+ let max_block_size = 0xffff;
1233
+ let max_start;
1234
+
1235
+ if (max_block_size > pending_buf_size - 5) {
1236
+ max_block_size = pending_buf_size - 5;
1237
+ }
1238
+
1239
+ // Copy as much as possible from input to output:
1240
+ // eslint-disable-next-line no-constant-condition
1241
+ while (true) {
1242
+ // Fill the win as much as possible:
1243
+ if (lookahead <= 1) {
1244
+ fill_window();
1245
+ if (lookahead === 0 && flush == Z_NO_FLUSH)
1246
+ return NeedMore;
1247
+ if (lookahead === 0)
1248
+ break; // flush the current block
1249
+ }
1250
+
1251
+ strstart += lookahead;
1252
+ lookahead = 0;
1253
+
1254
+ // Emit a stored block if pending_buf will be full:
1255
+ max_start = block_start + max_block_size;
1256
+ if (strstart === 0 || strstart >= max_start) {
1257
+ // strstart === 0 is possible when wraparound on 16-bit machine
1258
+ lookahead = (strstart - max_start);
1259
+ strstart = max_start;
1260
+
1261
+ flush_block_only(false);
1262
+ if (strm.avail_out === 0)
1263
+ return NeedMore;
1264
+
1265
+ }
1266
+
1267
+ // Flush if we may have to slide, otherwise block_start may become
1268
+ // negative and the data will be gone:
1269
+ if (strstart - block_start >= w_size - MIN_LOOKAHEAD) {
1270
+ flush_block_only(false);
1271
+ if (strm.avail_out === 0)
1272
+ return NeedMore;
1273
+ }
1274
+ }
1275
+
1276
+ flush_block_only(flush == Z_FINISH);
1277
+ if (strm.avail_out === 0)
1278
+ return (flush == Z_FINISH) ? FinishStarted : NeedMore;
1279
+
1280
+ return flush == Z_FINISH ? FinishDone : BlockDone;
1281
+ }
1282
+
1283
+ function longest_match(cur_match) {
1284
+ let chain_length = max_chain_length; // max hash chain length
1285
+ let scan = strstart; // current string
1286
+ let match; // matched string
1287
+ let len; // length of current match
1288
+ let best_len = prev_length; // best match length so far
1289
+ const limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
1290
+ let _nice_match = nice_match;
1291
+
1292
+ // Stop when cur_match becomes <= limit. To simplify the code,
1293
+ // we prevent matches with the string of win index 0.
1294
+
1295
+ const wmask = w_mask;
1296
+
1297
+ const strend = strstart + MAX_MATCH;
1298
+ let scan_end1 = win[scan + best_len - 1];
1299
+ let scan_end = win[scan + best_len];
1300
+
1301
+ // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of
1302
+ // 16.
1303
+ // It is easy to get rid of this optimization if necessary.
1304
+
1305
+ // Do not waste too much time if we already have a good match:
1306
+ if (prev_length >= good_match) {
1307
+ chain_length >>= 2;
1308
+ }
1309
+
1310
+ // Do not look for matches beyond the end of the input. This is
1311
+ // necessary
1312
+ // to make deflate deterministic.
1313
+ if (_nice_match > lookahead)
1314
+ _nice_match = lookahead;
1315
+
1316
+ do {
1317
+ match = cur_match;
1318
+
1319
+ // Skip to next match if the match length cannot increase
1320
+ // or if the match length is less than 2:
1321
+ if (win[match + best_len] != scan_end || win[match + best_len - 1] != scan_end1 || win[match] != win[scan]
1322
+ || win[++match] != win[scan + 1])
1323
+ continue;
1324
+
1325
+ // The check at best_len-1 can be removed because it will be made
1326
+ // again later. (This heuristic is not always a win.)
1327
+ // It is not necessary to compare scan[2] and match[2] since they
1328
+ // are always equal when the other bytes match, given that
1329
+ // the hash keys are equal and that HASH_BITS >= 8.
1330
+ scan += 2;
1331
+ match++;
1332
+
1333
+ // We check for insufficient lookahead only every 8th comparison;
1334
+ // the 256th check will be made at strstart+258.
1335
+ // eslint-disable-next-line no-empty
1336
+ do {
1337
+ // empty block
1338
+ } while (win[++scan] == win[++match] && win[++scan] == win[++match] && win[++scan] == win[++match]
1339
+ && win[++scan] == win[++match] && win[++scan] == win[++match] && win[++scan] == win[++match]
1340
+ && win[++scan] == win[++match] && win[++scan] == win[++match] && scan < strend);
1341
+
1342
+ len = MAX_MATCH - (strend - scan);
1343
+ scan = strend - MAX_MATCH;
1344
+
1345
+ if (len > best_len) {
1346
+ match_start = cur_match;
1347
+ best_len = len;
1348
+ if (len >= _nice_match)
1349
+ break;
1350
+ scan_end1 = win[scan + best_len - 1];
1351
+ scan_end = win[scan + best_len];
1352
+ }
1353
+
1354
+ } while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length !== 0);
1355
+
1356
+ if (best_len <= lookahead)
1357
+ return best_len;
1358
+ return lookahead;
1359
+ }
1360
+
1361
+ // Compress as much as possible from the input stream, return the current
1362
+ // block state.
1363
+ // This function does not perform lazy evaluation of matches and inserts
1364
+ // new strings in the dictionary only for unmatched strings or for short
1365
+ // matches. It is used only for the fast compression options.
1366
+ function deflate_fast(flush) {
1367
+ // short hash_head = 0; // head of the hash chain
1368
+ let hash_head = 0; // head of the hash chain
1369
+ let bflush; // set if current block must be flushed
1370
+
1371
+ // eslint-disable-next-line no-constant-condition
1372
+ while (true) {
1373
+ // Make sure that we always have enough lookahead, except
1374
+ // at the end of the input file. We need MAX_MATCH bytes
1375
+ // for the next match, plus MIN_MATCH bytes to insert the
1376
+ // string following the next match.
1377
+ if (lookahead < MIN_LOOKAHEAD) {
1378
+ fill_window();
1379
+ if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1380
+ return NeedMore;
1381
+ }
1382
+ if (lookahead === 0)
1383
+ break; // flush the current block
1384
+ }
1385
+
1386
+ // Insert the string win[strstart .. strstart+2] in the
1387
+ // dictionary, and set hash_head to the head of the hash chain:
1388
+ if (lookahead >= MIN_MATCH) {
1389
+ ins_h = (((ins_h) << hash_shift) ^ (win[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1390
+
1391
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
1392
+ hash_head = (head[ins_h] & 0xffff);
1393
+ prev[strstart & w_mask] = head[ins_h];
1394
+ head[ins_h] = strstart;
1395
+ }
1396
+
1397
+ // Find the longest match, discarding those <= prev_length.
1398
+ // At this point we have always match_length < MIN_MATCH
1399
+
1400
+ if (hash_head !== 0 && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
1401
+ // To simplify the code, we prevent matches with the string
1402
+ // of win index 0 (in particular we have to avoid a match
1403
+ // of the string with itself at the start of the input file).
1404
+ if (strategy != Z_HUFFMAN_ONLY) {
1405
+ match_length = longest_match(hash_head);
1406
+ }
1407
+ // longest_match() sets match_start
1408
+ }
1409
+ if (match_length >= MIN_MATCH) {
1410
+ // check_match(strstart, match_start, match_length);
1411
+
1412
+ bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
1413
+
1414
+ lookahead -= match_length;
1415
+
1416
+ // Insert new strings in the hash table only if the match length
1417
+ // is not too large. This saves time but degrades compression.
1418
+ if (match_length <= max_lazy_match && lookahead >= MIN_MATCH) {
1419
+ match_length--; // string at strstart already in hash table
1420
+ do {
1421
+ strstart++;
1422
+
1423
+ ins_h = ((ins_h << hash_shift) ^ (win[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1424
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
1425
+ hash_head = (head[ins_h] & 0xffff);
1426
+ prev[strstart & w_mask] = head[ins_h];
1427
+ head[ins_h] = strstart;
1428
+
1429
+ // strstart never exceeds WSIZE-MAX_MATCH, so there are
1430
+ // always MIN_MATCH bytes ahead.
1431
+ } while (--match_length !== 0);
1432
+ strstart++;
1433
+ } else {
1434
+ strstart += match_length;
1435
+ match_length = 0;
1436
+ ins_h = win[strstart] & 0xff;
1437
+
1438
+ ins_h = (((ins_h) << hash_shift) ^ (win[strstart + 1] & 0xff)) & hash_mask;
1439
+ // If lookahead < MIN_MATCH, ins_h is garbage, but it does
1440
+ // not
1441
+ // matter since it will be recomputed at next deflate call.
1442
+ }
1443
+ } else {
1444
+ // No match, output a literal byte
1445
+
1446
+ bflush = _tr_tally(0, win[strstart] & 0xff);
1447
+ lookahead--;
1448
+ strstart++;
1449
+ }
1450
+ if (bflush) {
1451
+
1452
+ flush_block_only(false);
1453
+ if (strm.avail_out === 0)
1454
+ return NeedMore;
1455
+ }
1456
+ }
1457
+
1458
+ flush_block_only(flush == Z_FINISH);
1459
+ if (strm.avail_out === 0) {
1460
+ if (flush == Z_FINISH)
1461
+ return FinishStarted;
1462
+ else
1463
+ return NeedMore;
1464
+ }
1465
+ return flush == Z_FINISH ? FinishDone : BlockDone;
1466
+ }
1467
+
1468
+ // Same as above, but achieves better compression. We use a lazy
1469
+ // evaluation for matches: a match is finally adopted only if there is
1470
+ // no better match at the next win position.
1471
+ function deflate_slow(flush) {
1472
+ // short hash_head = 0; // head of hash chain
1473
+ let hash_head = 0; // head of hash chain
1474
+ let bflush; // set if current block must be flushed
1475
+ let max_insert;
1476
+
1477
+ // Process the input block.
1478
+ // eslint-disable-next-line no-constant-condition
1479
+ while (true) {
1480
+ // Make sure that we always have enough lookahead, except
1481
+ // at the end of the input file. We need MAX_MATCH bytes
1482
+ // for the next match, plus MIN_MATCH bytes to insert the
1483
+ // string following the next match.
1484
+
1485
+ if (lookahead < MIN_LOOKAHEAD) {
1486
+ fill_window();
1487
+ if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1488
+ return NeedMore;
1489
+ }
1490
+ if (lookahead === 0)
1491
+ break; // flush the current block
1492
+ }
1493
+
1494
+ // Insert the string win[strstart .. strstart+2] in the
1495
+ // dictionary, and set hash_head to the head of the hash chain:
1496
+
1497
+ if (lookahead >= MIN_MATCH) {
1498
+ ins_h = (((ins_h) << hash_shift) ^ (win[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1499
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
1500
+ hash_head = (head[ins_h] & 0xffff);
1501
+ prev[strstart & w_mask] = head[ins_h];
1502
+ head[ins_h] = strstart;
1503
+ }
1504
+
1505
+ // Find the longest match, discarding those <= prev_length.
1506
+ prev_length = match_length;
1507
+ prev_match = match_start;
1508
+ match_length = MIN_MATCH - 1;
1509
+
1510
+ if (hash_head !== 0 && prev_length < max_lazy_match && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
1511
+ // To simplify the code, we prevent matches with the string
1512
+ // of win index 0 (in particular we have to avoid a match
1513
+ // of the string with itself at the start of the input file).
1514
+
1515
+ if (strategy != Z_HUFFMAN_ONLY) {
1516
+ match_length = longest_match(hash_head);
1517
+ }
1518
+ // longest_match() sets match_start
1519
+
1520
+ if (match_length <= 5 && (strategy == Z_FILTERED || (match_length == MIN_MATCH && strstart - match_start > 4096))) {
1521
+
1522
+ // If prev_match is also MIN_MATCH, match_start is garbage
1523
+ // but we will ignore the current match anyway.
1524
+ match_length = MIN_MATCH - 1;
1525
+ }
1526
+ }
1527
+
1528
+ // If there was a match at the previous step and the current
1529
+ // match is not better, output the previous match:
1530
+ if (prev_length >= MIN_MATCH && match_length <= prev_length) {
1531
+ max_insert = strstart + lookahead - MIN_MATCH;
1532
+ // Do not insert strings in hash table beyond this.
1533
+
1534
+ // check_match(strstart-1, prev_match, prev_length);
1535
+
1536
+ bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
1537
+
1538
+ // Insert in hash table all strings up to the end of the match.
1539
+ // strstart-1 and strstart are already inserted. If there is not
1540
+ // enough lookahead, the last two strings are not inserted in
1541
+ // the hash table.
1542
+ lookahead -= prev_length - 1;
1543
+ prev_length -= 2;
1544
+ do {
1545
+ if (++strstart <= max_insert) {
1546
+ ins_h = (((ins_h) << hash_shift) ^ (win[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1547
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
1548
+ hash_head = (head[ins_h] & 0xffff);
1549
+ prev[strstart & w_mask] = head[ins_h];
1550
+ head[ins_h] = strstart;
1551
+ }
1552
+ } while (--prev_length !== 0);
1553
+ match_available = 0;
1554
+ match_length = MIN_MATCH - 1;
1555
+ strstart++;
1556
+
1557
+ if (bflush) {
1558
+ flush_block_only(false);
1559
+ if (strm.avail_out === 0)
1560
+ return NeedMore;
1561
+ }
1562
+ } else if (match_available !== 0) {
1563
+
1564
+ // If there was no match at the previous position, output a
1565
+ // single literal. If there was a match but the current match
1566
+ // is longer, truncate the previous match to a single literal.
1567
+
1568
+ bflush = _tr_tally(0, win[strstart - 1] & 0xff);
1569
+
1570
+ if (bflush) {
1571
+ flush_block_only(false);
1572
+ }
1573
+ strstart++;
1574
+ lookahead--;
1575
+ if (strm.avail_out === 0)
1576
+ return NeedMore;
1577
+ } else {
1578
+ // There is no previous match to compare with, wait for
1579
+ // the next step to decide.
1580
+
1581
+ match_available = 1;
1582
+ strstart++;
1583
+ lookahead--;
1584
+ }
1585
+ }
1586
+
1587
+ if (match_available !== 0) {
1588
+ bflush = _tr_tally(0, win[strstart - 1] & 0xff);
1589
+ match_available = 0;
1590
+ }
1591
+ flush_block_only(flush == Z_FINISH);
1592
+
1593
+ if (strm.avail_out === 0) {
1594
+ if (flush == Z_FINISH)
1595
+ return FinishStarted;
1596
+ else
1597
+ return NeedMore;
1598
+ }
1599
+
1600
+ return flush == Z_FINISH ? FinishDone : BlockDone;
1601
+ }
1602
+
1603
+ function deflateReset(strm) {
1604
+ strm.total_in = strm.total_out = 0;
1605
+ strm.msg = null; //
1606
+
1607
+ that.pending = 0;
1608
+ that.pending_out = 0;
1609
+
1610
+ status = BUSY_STATE;
1611
+
1612
+ last_flush = Z_NO_FLUSH;
1613
+
1614
+ tr_init();
1615
+ lm_init();
1616
+ return Z_OK;
1617
+ }
1618
+
1619
+ that.deflateInit = function (strm, _level, bits, _method, memLevel, _strategy) {
1620
+ if (!_method)
1621
+ _method = Z_DEFLATED;
1622
+ if (!memLevel)
1623
+ memLevel = DEF_MEM_LEVEL;
1624
+ if (!_strategy)
1625
+ _strategy = Z_DEFAULT_STRATEGY;
1626
+
1627
+ // byte[] my_version=ZLIB_VERSION;
1628
+
1629
+ //
1630
+ // if (!version || version[0] != my_version[0]
1631
+ // || stream_size != sizeof(z_stream)) {
1632
+ // return Z_VERSION_ERROR;
1633
+ // }
1634
+
1635
+ strm.msg = null;
1636
+
1637
+ if (_level == Z_DEFAULT_COMPRESSION)
1638
+ _level = 6;
1639
+
1640
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || _method != Z_DEFLATED || bits < 9 || bits > 15 || _level < 0 || _level > 9 || _strategy < 0
1641
+ || _strategy > Z_HUFFMAN_ONLY) {
1642
+ return Z_STREAM_ERROR;
1643
+ }
1644
+
1645
+ strm.dstate = that;
1646
+
1647
+ w_bits = bits;
1648
+ w_size = 1 << w_bits;
1649
+ w_mask = w_size - 1;
1650
+
1651
+ hash_bits = memLevel + 7;
1652
+ hash_size = 1 << hash_bits;
1653
+ hash_mask = hash_size - 1;
1654
+ hash_shift = Math.floor((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
1655
+
1656
+ win = new Uint8Array(w_size * 2);
1657
+ prev = [];
1658
+ head = [];
1659
+
1660
+ lit_bufsize = 1 << (memLevel + 6); // 16K elements by default
1661
+
1662
+ that.pending_buf = new Uint8Array(lit_bufsize * 4);
1663
+ pending_buf_size = lit_bufsize * 4;
1664
+
1665
+ that.dist_buf = new Uint16Array(lit_bufsize);
1666
+ that.lc_buf = new Uint8Array(lit_bufsize);
1667
+
1668
+ level = _level;
1669
+
1670
+ strategy = _strategy;
1671
+
1672
+ return deflateReset(strm);
1673
+ };
1674
+
1675
+ that.deflateEnd = function () {
1676
+ if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) {
1677
+ return Z_STREAM_ERROR;
1678
+ }
1679
+ // Deallocate in reverse order of allocations:
1680
+ that.lc_buf = null;
1681
+ that.dist_buf = null;
1682
+ that.pending_buf = null;
1683
+ head = null;
1684
+ prev = null;
1685
+ win = null;
1686
+ // free
1687
+ that.dstate = null;
1688
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1689
+ };
1690
+
1691
+ that.deflateParams = function (strm, _level, _strategy) {
1692
+ let err = Z_OK;
1693
+
1694
+ if (_level == Z_DEFAULT_COMPRESSION) {
1695
+ _level = 6;
1696
+ }
1697
+ if (_level < 0 || _level > 9 || _strategy < 0 || _strategy > Z_HUFFMAN_ONLY) {
1698
+ return Z_STREAM_ERROR;
1699
+ }
1700
+
1701
+ if (config_table[level].func != config_table[_level].func && strm.total_in !== 0) {
1702
+ // Flush the last buffer:
1703
+ err = strm.deflate(Z_PARTIAL_FLUSH);
1704
+ }
1705
+
1706
+ if (level != _level) {
1707
+ level = _level;
1708
+ max_lazy_match = config_table[level].max_lazy;
1709
+ good_match = config_table[level].good_length;
1710
+ nice_match = config_table[level].nice_length;
1711
+ max_chain_length = config_table[level].max_chain;
1712
+ }
1713
+ strategy = _strategy;
1714
+ return err;
1715
+ };
1716
+
1717
+ that.deflateSetDictionary = function (_strm, dictionary, dictLength) {
1718
+ let length = dictLength;
1719
+ let n, index = 0;
1720
+
1721
+ if (!dictionary || status != INIT_STATE)
1722
+ return Z_STREAM_ERROR;
1723
+
1724
+ if (length < MIN_MATCH)
1725
+ return Z_OK;
1726
+ if (length > w_size - MIN_LOOKAHEAD) {
1727
+ length = w_size - MIN_LOOKAHEAD;
1728
+ index = dictLength - length; // use the tail of the dictionary
1729
+ }
1730
+ win.set(dictionary.subarray(index, index + length), 0);
1731
+
1732
+ strstart = length;
1733
+ block_start = length;
1734
+
1735
+ // Insert all strings in the hash table (except for the last two bytes).
1736
+ // s->lookahead stays null, so s->ins_h will be recomputed at the next
1737
+ // call of fill_window.
1738
+
1739
+ ins_h = win[0] & 0xff;
1740
+ ins_h = (((ins_h) << hash_shift) ^ (win[1] & 0xff)) & hash_mask;
1741
+
1742
+ for (n = 0; n <= length - MIN_MATCH; n++) {
1743
+ ins_h = (((ins_h) << hash_shift) ^ (win[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1744
+ prev[n & w_mask] = head[ins_h];
1745
+ head[ins_h] = n;
1746
+ }
1747
+ return Z_OK;
1748
+ };
1749
+
1750
+ that.deflate = function (_strm, flush) {
1751
+ let i, header, level_flags, old_flush, bstate;
1752
+
1753
+ if (flush > Z_FINISH || flush < 0) {
1754
+ return Z_STREAM_ERROR;
1755
+ }
1756
+
1757
+ if (!_strm.next_out || (!_strm.next_in && _strm.avail_in !== 0) || (status == FINISH_STATE && flush != Z_FINISH)) {
1758
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_STREAM_ERROR)];
1759
+ return Z_STREAM_ERROR;
1760
+ }
1761
+ if (_strm.avail_out === 0) {
1762
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
1763
+ return Z_BUF_ERROR;
1764
+ }
1765
+
1766
+ strm = _strm; // just in case
1767
+ old_flush = last_flush;
1768
+ last_flush = flush;
1769
+
1770
+ // Write the zlib header
1771
+ if (status == INIT_STATE) {
1772
+ header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
1773
+ level_flags = ((level - 1) & 0xff) >> 1;
1774
+
1775
+ if (level_flags > 3)
1776
+ level_flags = 3;
1777
+ header |= (level_flags << 6);
1778
+ if (strstart !== 0)
1779
+ header |= PRESET_DICT;
1780
+ header += 31 - (header % 31);
1781
+
1782
+ status = BUSY_STATE;
1783
+ putShortMSB(header);
1784
+ }
1785
+
1786
+ // Flush as much pending output as possible
1787
+ if (that.pending !== 0) {
1788
+ strm.flush_pending();
1789
+ if (strm.avail_out === 0) {
1790
+ // console.log(" avail_out==0");
1791
+ // Since avail_out is 0, deflate will be called again with
1792
+ // more output space, but possibly with both pending and
1793
+ // avail_in equal to zero. There won't be anything to do,
1794
+ // but this is not an error situation so make sure we
1795
+ // return OK instead of BUF_ERROR at next call of deflate:
1796
+ last_flush = -1;
1797
+ return Z_OK;
1798
+ }
1799
+
1800
+ // Make sure there is something to do and avoid duplicate
1801
+ // consecutive
1802
+ // flushes. For repeated and useless calls with Z_FINISH, we keep
1803
+ // returning Z_STREAM_END instead of Z_BUFF_ERROR.
1804
+ } else if (strm.avail_in === 0 && flush <= old_flush && flush != Z_FINISH) {
1805
+ strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
1806
+ return Z_BUF_ERROR;
1807
+ }
1808
+
1809
+ // User must not provide more input after the first FINISH:
1810
+ if (status == FINISH_STATE && strm.avail_in !== 0) {
1811
+ _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
1812
+ return Z_BUF_ERROR;
1813
+ }
1814
+
1815
+ // Start a new block or continue the current one.
1816
+ if (strm.avail_in !== 0 || lookahead !== 0 || (flush != Z_NO_FLUSH && status != FINISH_STATE)) {
1817
+ bstate = -1;
1818
+ switch (config_table[level].func) {
1819
+ case STORED:
1820
+ bstate = deflate_stored(flush);
1821
+ break;
1822
+ case FAST:
1823
+ bstate = deflate_fast(flush);
1824
+ break;
1825
+ case SLOW:
1826
+ bstate = deflate_slow(flush);
1827
+ break;
1828
+ default:
1829
+ }
1830
+
1831
+ if (bstate == FinishStarted || bstate == FinishDone) {
1832
+ status = FINISH_STATE;
1833
+ }
1834
+ if (bstate == NeedMore || bstate == FinishStarted) {
1835
+ if (strm.avail_out === 0) {
1836
+ last_flush = -1; // avoid BUF_ERROR next call, see above
1837
+ }
1838
+ return Z_OK;
1839
+ // If flush != Z_NO_FLUSH && avail_out === 0, the next call
1840
+ // of deflate should use the same flush parameter to make sure
1841
+ // that the flush is complete. So we don't have to output an
1842
+ // empty block here, this will be done at next call. This also
1843
+ // ensures that for a very small output buffer, we emit at most
1844
+ // one empty block.
1845
+ }
1846
+
1847
+ if (bstate == BlockDone) {
1848
+ if (flush == Z_PARTIAL_FLUSH) {
1849
+ _tr_align();
1850
+ } else { // FULL_FLUSH or SYNC_FLUSH
1851
+ _tr_stored_block(0, 0, false);
1852
+ // For a full flush, this empty block will be recognized
1853
+ // as a special marker by inflate_sync().
1854
+ if (flush == Z_FULL_FLUSH) {
1855
+ // state.head[s.hash_size-1]=0;
1856
+ for (i = 0; i < hash_size/*-1*/; i++)
1857
+ // forget history
1858
+ head[i] = 0;
1859
+ }
1860
+ }
1861
+ strm.flush_pending();
1862
+ if (strm.avail_out === 0) {
1863
+ last_flush = -1; // avoid BUF_ERROR at next call, see above
1864
+ return Z_OK;
1865
+ }
1866
+ }
1867
+ }
1868
+
1869
+ if (flush != Z_FINISH)
1870
+ return Z_OK;
1871
+ return Z_STREAM_END;
1872
+ };
1873
+ }
1874
+
1875
+ // ZStream
1876
+
1877
+ function ZStream() {
1878
+ const that = this;
1879
+ that.next_in_index = 0;
1880
+ that.next_out_index = 0;
1881
+ // that.next_in; // next input byte
1882
+ that.avail_in = 0; // number of bytes available at next_in
1883
+ that.total_in = 0; // total nb of input bytes read so far
1884
+ // that.next_out; // next output byte should be put there
1885
+ that.avail_out = 0; // remaining free space at next_out
1886
+ that.total_out = 0; // total nb of bytes output so far
1887
+ // that.msg;
1888
+ // that.dstate;
1889
+ }
1890
+
1891
+ ZStream.prototype = {
1892
+ deflateInit(level, bits) {
1893
+ const that = this;
1894
+ that.dstate = new Deflate();
1895
+ if (!bits)
1896
+ bits = MAX_BITS;
1897
+ return that.dstate.deflateInit(that, level, bits);
1898
+ },
1899
+
1900
+ deflate(flush) {
1901
+ const that = this;
1902
+ if (!that.dstate) {
1903
+ return Z_STREAM_ERROR;
1904
+ }
1905
+ return that.dstate.deflate(that, flush);
1906
+ },
1907
+
1908
+ deflateEnd() {
1909
+ const that = this;
1910
+ if (!that.dstate)
1911
+ return Z_STREAM_ERROR;
1912
+ const ret = that.dstate.deflateEnd();
1913
+ that.dstate = null;
1914
+ return ret;
1915
+ },
1916
+
1917
+ deflateParams(level, strategy) {
1918
+ const that = this;
1919
+ if (!that.dstate)
1920
+ return Z_STREAM_ERROR;
1921
+ return that.dstate.deflateParams(that, level, strategy);
1922
+ },
1923
+
1924
+ deflateSetDictionary(dictionary, dictLength) {
1925
+ const that = this;
1926
+ if (!that.dstate)
1927
+ return Z_STREAM_ERROR;
1928
+ return that.dstate.deflateSetDictionary(that, dictionary, dictLength);
1929
+ },
1930
+
1931
+ // Read a new buffer from the current input stream, update the
1932
+ // total number of bytes read. All deflate() input goes through
1933
+ // this function so some applications may wish to modify it to avoid
1934
+ // allocating a large strm->next_in buffer and copying from it.
1935
+ // (See also flush_pending()).
1936
+ read_buf(buf, start, size) {
1937
+ const that = this;
1938
+ let len = that.avail_in;
1939
+ if (len > size)
1940
+ len = size;
1941
+ if (len === 0)
1942
+ return 0;
1943
+ that.avail_in -= len;
1944
+ buf.set(that.next_in.subarray(that.next_in_index, that.next_in_index + len), start);
1945
+ that.next_in_index += len;
1946
+ that.total_in += len;
1947
+ return len;
1948
+ },
1949
+
1950
+ // Flush as much pending output as possible. All deflate() output goes
1951
+ // through this function so some applications may wish to modify it
1952
+ // to avoid allocating a large strm->next_out buffer and copying into it.
1953
+ // (See also read_buf()).
1954
+ flush_pending() {
1955
+ const that = this;
1956
+ let len = that.dstate.pending;
1957
+
1958
+ if (len > that.avail_out)
1959
+ len = that.avail_out;
1960
+ if (len === 0)
1961
+ return;
1962
+
1963
+ // if (that.dstate.pending_buf.length <= that.dstate.pending_out || that.next_out.length <= that.next_out_index
1964
+ // || that.dstate.pending_buf.length < (that.dstate.pending_out + len) || that.next_out.length < (that.next_out_index +
1965
+ // len)) {
1966
+ // console.log(that.dstate.pending_buf.length + ", " + that.dstate.pending_out + ", " + that.next_out.length + ", " +
1967
+ // that.next_out_index + ", " + len);
1968
+ // console.log("avail_out=" + that.avail_out);
1969
+ // }
1970
+
1971
+ that.next_out.set(that.dstate.pending_buf.subarray(that.dstate.pending_out, that.dstate.pending_out + len), that.next_out_index);
1972
+
1973
+ that.next_out_index += len;
1974
+ that.dstate.pending_out += len;
1975
+ that.total_out += len;
1976
+ that.avail_out -= len;
1977
+ that.dstate.pending -= len;
1978
+ if (that.dstate.pending === 0) {
1979
+ that.dstate.pending_out = 0;
1980
+ }
1981
+ }
1982
+ };
1983
+
1984
+ // Deflate
1985
+
1986
+ function ZipDeflate(options) {
1987
+ const that = this;
1988
+ const z = new ZStream();
1989
+ const bufsize = getMaximumCompressedSize(options && options.chunkSize ? options.chunkSize : 64 * 1024);
1990
+ const flush = Z_NO_FLUSH;
1991
+ const buf = new Uint8Array(bufsize);
1992
+ let level = options ? options.level : Z_DEFAULT_COMPRESSION;
1993
+ if (typeof level == "undefined")
1994
+ level = Z_DEFAULT_COMPRESSION;
1995
+ z.deflateInit(level);
1996
+ z.next_out = buf;
1997
+
1998
+ that.append = function (data, onprogress) {
1999
+ let err, array, lastIndex = 0, bufferIndex = 0, bufferSize = 0;
2000
+ const buffers = [];
2001
+ if (!data.length)
2002
+ return;
2003
+ z.next_in_index = 0;
2004
+ z.next_in = data;
2005
+ z.avail_in = data.length;
2006
+ do {
2007
+ z.next_out_index = 0;
2008
+ z.avail_out = bufsize;
2009
+ err = z.deflate(flush);
2010
+ if (err != Z_OK)
2011
+ throw new Error("deflating: " + z.msg);
2012
+ if (z.next_out_index)
2013
+ if (z.next_out_index == bufsize)
2014
+ buffers.push(new Uint8Array(buf));
2015
+ else
2016
+ buffers.push(buf.subarray(0, z.next_out_index));
2017
+ bufferSize += z.next_out_index;
2018
+ if (onprogress && z.next_in_index > 0 && z.next_in_index != lastIndex) {
2019
+ onprogress(z.next_in_index);
2020
+ lastIndex = z.next_in_index;
2021
+ }
2022
+ } while (z.avail_in > 0 || z.avail_out === 0);
2023
+ if (buffers.length > 1) {
2024
+ array = new Uint8Array(bufferSize);
2025
+ buffers.forEach(function (chunk) {
2026
+ array.set(chunk, bufferIndex);
2027
+ bufferIndex += chunk.length;
2028
+ });
2029
+ } else {
2030
+ array = buffers[0] ? new Uint8Array(buffers[0]) : new Uint8Array();
2031
+ }
2032
+ return array;
2033
+ };
2034
+ that.flush = function () {
2035
+ let err, array, bufferIndex = 0, bufferSize = 0;
2036
+ const buffers = [];
2037
+ do {
2038
+ z.next_out_index = 0;
2039
+ z.avail_out = bufsize;
2040
+ err = z.deflate(Z_FINISH);
2041
+ if (err != Z_STREAM_END && err != Z_OK)
2042
+ throw new Error("deflating: " + z.msg);
2043
+ if (bufsize - z.avail_out > 0)
2044
+ buffers.push(buf.slice(0, z.next_out_index));
2045
+ bufferSize += z.next_out_index;
2046
+ } while (z.avail_in > 0 || z.avail_out === 0);
2047
+ z.deflateEnd();
2048
+ array = new Uint8Array(bufferSize);
2049
+ buffers.forEach(function (chunk) {
2050
+ array.set(chunk, bufferIndex);
2051
+ bufferIndex += chunk.length;
2052
+ });
2053
+ return array;
2054
+ };
2055
+ }
2056
+
2057
+ function getMaximumCompressedSize(uncompressedSize) {
2058
+ return uncompressedSize + (5 * (Math.floor(uncompressedSize / 16383) + 1));
2059
+ }
2060
+
2061
+ export {
2062
+ ZipDeflate as Deflate
2063
+ };