zstdlib 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (99) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGES.md +9 -0
  3. data/Gemfile +3 -0
  4. data/README.md +96 -0
  5. data/Rakefile +29 -0
  6. data/ext/zstdlib/extconf.rb +53 -0
  7. data/ext/zstdlib/ruby/zlib-2.2/zlib.c +4659 -0
  8. data/ext/zstdlib/ruby/zlib-2.3/zlib.c +4686 -0
  9. data/ext/zstdlib/ruby/zlib-2.4/zlib.c +4843 -0
  10. data/ext/zstdlib/ruby/zlib-2.5/zlib.c +4848 -0
  11. data/ext/zstdlib/ruby/zlib-2.6/zlib.c +4890 -0
  12. data/ext/zstdlib/zlib-1.2.11/adler32.c +186 -0
  13. data/ext/zstdlib/zlib-1.2.11/compress.c +86 -0
  14. data/ext/zstdlib/zlib-1.2.11/crc32.c +442 -0
  15. data/ext/zstdlib/zlib-1.2.11/crc32.h +441 -0
  16. data/ext/zstdlib/zlib-1.2.11/deflate.c +2163 -0
  17. data/ext/zstdlib/zlib-1.2.11/deflate.h +349 -0
  18. data/ext/zstdlib/zlib-1.2.11/gzclose.c +25 -0
  19. data/ext/zstdlib/zlib-1.2.11/gzguts.h +218 -0
  20. data/ext/zstdlib/zlib-1.2.11/gzlib.c +637 -0
  21. data/ext/zstdlib/zlib-1.2.11/gzread.c +654 -0
  22. data/ext/zstdlib/zlib-1.2.11/gzwrite.c +665 -0
  23. data/ext/zstdlib/zlib-1.2.11/infback.c +640 -0
  24. data/ext/zstdlib/zlib-1.2.11/inffast.c +323 -0
  25. data/ext/zstdlib/zlib-1.2.11/inffast.h +11 -0
  26. data/ext/zstdlib/zlib-1.2.11/inffixed.h +94 -0
  27. data/ext/zstdlib/zlib-1.2.11/inflate.c +1561 -0
  28. data/ext/zstdlib/zlib-1.2.11/inflate.h +125 -0
  29. data/ext/zstdlib/zlib-1.2.11/inftrees.c +304 -0
  30. data/ext/zstdlib/zlib-1.2.11/inftrees.h +62 -0
  31. data/ext/zstdlib/zlib-1.2.11/trees.c +1203 -0
  32. data/ext/zstdlib/zlib-1.2.11/trees.h +128 -0
  33. data/ext/zstdlib/zlib-1.2.11/uncompr.c +93 -0
  34. data/ext/zstdlib/zlib-1.2.11/zconf.h +534 -0
  35. data/ext/zstdlib/zlib-1.2.11/zlib.h +1912 -0
  36. data/ext/zstdlib/zlib-1.2.11/zutil.c +325 -0
  37. data/ext/zstdlib/zlib-1.2.11/zutil.h +271 -0
  38. data/ext/zstdlib/zlib.mk +14 -0
  39. data/ext/zstdlib/zlibwrapper/zlibwrapper.c +14 -0
  40. data/ext/zstdlib/zlibwrapper.mk +14 -0
  41. data/ext/zstdlib/zstd-1.3.8/lib/common/bitstream.h +455 -0
  42. data/ext/zstdlib/zstd-1.3.8/lib/common/compiler.h +140 -0
  43. data/ext/zstdlib/zstd-1.3.8/lib/common/cpu.h +215 -0
  44. data/ext/zstdlib/zstd-1.3.8/lib/common/debug.c +44 -0
  45. data/ext/zstdlib/zstd-1.3.8/lib/common/debug.h +134 -0
  46. data/ext/zstdlib/zstd-1.3.8/lib/common/entropy_common.c +236 -0
  47. data/ext/zstdlib/zstd-1.3.8/lib/common/error_private.c +54 -0
  48. data/ext/zstdlib/zstd-1.3.8/lib/common/error_private.h +76 -0
  49. data/ext/zstdlib/zstd-1.3.8/lib/common/fse.h +708 -0
  50. data/ext/zstdlib/zstd-1.3.8/lib/common/fse_decompress.c +309 -0
  51. data/ext/zstdlib/zstd-1.3.8/lib/common/huf.h +358 -0
  52. data/ext/zstdlib/zstd-1.3.8/lib/common/mem.h +380 -0
  53. data/ext/zstdlib/zstd-1.3.8/lib/common/pool.c +340 -0
  54. data/ext/zstdlib/zstd-1.3.8/lib/common/pool.h +84 -0
  55. data/ext/zstdlib/zstd-1.3.8/lib/common/threading.c +75 -0
  56. data/ext/zstdlib/zstd-1.3.8/lib/common/threading.h +123 -0
  57. data/ext/zstdlib/zstd-1.3.8/lib/common/xxhash.c +876 -0
  58. data/ext/zstdlib/zstd-1.3.8/lib/common/xxhash.h +305 -0
  59. data/ext/zstdlib/zstd-1.3.8/lib/common/zstd_common.c +83 -0
  60. data/ext/zstdlib/zstd-1.3.8/lib/common/zstd_errors.h +93 -0
  61. data/ext/zstdlib/zstd-1.3.8/lib/common/zstd_internal.h +266 -0
  62. data/ext/zstdlib/zstd-1.3.8/lib/compress/fse_compress.c +721 -0
  63. data/ext/zstdlib/zstd-1.3.8/lib/compress/hist.c +203 -0
  64. data/ext/zstdlib/zstd-1.3.8/lib/compress/hist.h +95 -0
  65. data/ext/zstdlib/zstd-1.3.8/lib/compress/huf_compress.c +798 -0
  66. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_compress.c +4290 -0
  67. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_compress_internal.h +860 -0
  68. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_double_fast.c +499 -0
  69. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_double_fast.h +38 -0
  70. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_fast.c +391 -0
  71. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_fast.h +37 -0
  72. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_lazy.c +1106 -0
  73. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_lazy.h +67 -0
  74. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_ldm.c +597 -0
  75. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_ldm.h +105 -0
  76. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_opt.c +1217 -0
  77. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstd_opt.h +56 -0
  78. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstdmt_compress.c +2107 -0
  79. data/ext/zstdlib/zstd-1.3.8/lib/compress/zstdmt_compress.h +174 -0
  80. data/ext/zstdlib/zstd-1.3.8/lib/decompress/huf_decompress.c +1232 -0
  81. data/ext/zstdlib/zstd-1.3.8/lib/decompress/zstd_ddict.c +240 -0
  82. data/ext/zstdlib/zstd-1.3.8/lib/decompress/zstd_ddict.h +44 -0
  83. data/ext/zstdlib/zstd-1.3.8/lib/decompress/zstd_decompress.c +1672 -0
  84. data/ext/zstdlib/zstd-1.3.8/lib/decompress/zstd_decompress_block.c +1307 -0
  85. data/ext/zstdlib/zstd-1.3.8/lib/decompress/zstd_decompress_block.h +59 -0
  86. data/ext/zstdlib/zstd-1.3.8/lib/decompress/zstd_decompress_internal.h +168 -0
  87. data/ext/zstdlib/zstd-1.3.8/lib/zstd.h +1766 -0
  88. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/gzclose.c +28 -0
  89. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/gzcompatibility.h +68 -0
  90. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/gzguts.h +227 -0
  91. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/gzlib.c +640 -0
  92. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/gzread.c +670 -0
  93. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/gzwrite.c +671 -0
  94. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/zstd_zlibwrapper.c +1105 -0
  95. data/ext/zstdlib/zstd-1.3.8/zlibWrapper/zstd_zlibwrapper.h +88 -0
  96. data/ext/zstdlib/zstd.mk +14 -0
  97. data/lib/zstdlib.rb +6 -0
  98. data/test/zstdlib_test.rb +21 -0
  99. metadata +198 -0
@@ -0,0 +1,2163 @@
1
+ /* deflate.c -- compress data using the deflation algorithm
2
+ * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
3
+ * For conditions of distribution and use, see copyright notice in zlib.h
4
+ */
5
+
6
+ /*
7
+ * ALGORITHM
8
+ *
9
+ * The "deflation" process depends on being able to identify portions
10
+ * of the input text which are identical to earlier input (within a
11
+ * sliding window trailing behind the input currently being processed).
12
+ *
13
+ * The most straightforward technique turns out to be the fastest for
14
+ * most input files: try all possible matches and select the longest.
15
+ * The key feature of this algorithm is that insertions into the string
16
+ * dictionary are very simple and thus fast, and deletions are avoided
17
+ * completely. Insertions are performed at each input character, whereas
18
+ * string matches are performed only when the previous match ends. So it
19
+ * is preferable to spend more time in matches to allow very fast string
20
+ * insertions and avoid deletions. The matching algorithm for small
21
+ * strings is inspired from that of Rabin & Karp. A brute force approach
22
+ * is used to find longer strings when a small match has been found.
23
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24
+ * (by Leonid Broukhis).
25
+ * A previous version of this file used a more sophisticated algorithm
26
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
27
+ * time, but has a larger average cost, uses more memory and is patented.
28
+ * However the F&G algorithm may be faster for some highly redundant
29
+ * files if the parameter max_chain_length (described below) is too large.
30
+ *
31
+ * ACKNOWLEDGEMENTS
32
+ *
33
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34
+ * I found it in 'freeze' written by Leonid Broukhis.
35
+ * Thanks to many people for bug reports and testing.
36
+ *
37
+ * REFERENCES
38
+ *
39
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40
+ * Available in http://tools.ietf.org/html/rfc1951
41
+ *
42
+ * A description of the Rabin and Karp algorithm is given in the book
43
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
44
+ *
45
+ * Fiala,E.R., and Greene,D.H.
46
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
47
+ *
48
+ */
49
+
50
+ /* @(#) $Id$ */
51
+
52
+ #include "deflate.h"
53
+
54
+ const char deflate_copyright[] =
55
+ " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler ";
56
+ /*
57
+ If you use the zlib library in a product, an acknowledgment is welcome
58
+ in the documentation of your product. If for some reason you cannot
59
+ include such an acknowledgment, I would appreciate that you keep this
60
+ copyright string in the executable of your product.
61
+ */
62
+
63
+ /* ===========================================================================
64
+ * Function prototypes.
65
+ */
66
+ typedef enum {
67
+ need_more, /* block not completed, need more input or more output */
68
+ block_done, /* block flush performed */
69
+ finish_started, /* finish started, need only more output at next deflate */
70
+ finish_done /* finish done, accept no more input or output */
71
+ } block_state;
72
+
73
+ typedef block_state (*compress_func) OF((deflate_state *s, int flush));
74
+ /* Compression function. Returns the block state after the call. */
75
+
76
+ local int deflateStateCheck OF((z_streamp strm));
77
+ local void slide_hash OF((deflate_state *s));
78
+ local void fill_window OF((deflate_state *s));
79
+ local block_state deflate_stored OF((deflate_state *s, int flush));
80
+ local block_state deflate_fast OF((deflate_state *s, int flush));
81
+ #ifndef FASTEST
82
+ local block_state deflate_slow OF((deflate_state *s, int flush));
83
+ #endif
84
+ local block_state deflate_rle OF((deflate_state *s, int flush));
85
+ local block_state deflate_huff OF((deflate_state *s, int flush));
86
+ local void lm_init OF((deflate_state *s));
87
+ local void putShortMSB OF((deflate_state *s, uInt b));
88
+ local void flush_pending OF((z_streamp strm));
89
+ local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
90
+ #ifdef ASMV
91
+ # pragma message("Assembler code may have bugs -- use at your own risk")
92
+ void match_init OF((void)); /* asm code initialization */
93
+ uInt longest_match OF((deflate_state *s, IPos cur_match));
94
+ #else
95
+ local uInt longest_match OF((deflate_state *s, IPos cur_match));
96
+ #endif
97
+
98
+ #ifdef ZLIB_DEBUG
99
+ local void check_match OF((deflate_state *s, IPos start, IPos match,
100
+ int length));
101
+ #endif
102
+
103
+ /* ===========================================================================
104
+ * Local data
105
+ */
106
+
107
+ #define NIL 0
108
+ /* Tail of hash chains */
109
+
110
+ #ifndef TOO_FAR
111
+ # define TOO_FAR 4096
112
+ #endif
113
+ /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
114
+
115
+ /* Values for max_lazy_match, good_match and max_chain_length, depending on
116
+ * the desired pack level (0..9). The values given below have been tuned to
117
+ * exclude worst case performance for pathological files. Better values may be
118
+ * found for specific files.
119
+ */
120
+ typedef struct config_s {
121
+ ush good_length; /* reduce lazy search above this match length */
122
+ ush max_lazy; /* do not perform lazy search above this match length */
123
+ ush nice_length; /* quit search above this match length */
124
+ ush max_chain;
125
+ compress_func func;
126
+ } config;
127
+
128
+ #ifdef FASTEST
129
+ local const config configuration_table[2] = {
130
+ /* good lazy nice chain */
131
+ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
132
+ /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
133
+ #else
134
+ local const config configuration_table[10] = {
135
+ /* good lazy nice chain */
136
+ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
137
+ /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
138
+ /* 2 */ {4, 5, 16, 8, deflate_fast},
139
+ /* 3 */ {4, 6, 32, 32, deflate_fast},
140
+
141
+ /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
142
+ /* 5 */ {8, 16, 32, 32, deflate_slow},
143
+ /* 6 */ {8, 16, 128, 128, deflate_slow},
144
+ /* 7 */ {8, 32, 128, 256, deflate_slow},
145
+ /* 8 */ {32, 128, 258, 1024, deflate_slow},
146
+ /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
147
+ #endif
148
+
149
+ /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
150
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
151
+ * meaning.
152
+ */
153
+
154
+ /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
155
+ #define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0))
156
+
157
+ /* ===========================================================================
158
+ * Update a hash value with the given input byte
159
+ * IN assertion: all calls to UPDATE_HASH are made with consecutive input
160
+ * characters, so that a running hash key can be computed from the previous
161
+ * key instead of complete recalculation each time.
162
+ */
163
+ #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
164
+
165
+
166
+ /* ===========================================================================
167
+ * Insert string str in the dictionary and set match_head to the previous head
168
+ * of the hash chain (the most recent string with same hash key). Return
169
+ * the previous length of the hash chain.
170
+ * If this file is compiled with -DFASTEST, the compression level is forced
171
+ * to 1, and no hash chains are maintained.
172
+ * IN assertion: all calls to INSERT_STRING are made with consecutive input
173
+ * characters and the first MIN_MATCH bytes of str are valid (except for
174
+ * the last MIN_MATCH-1 bytes of the input file).
175
+ */
176
+ #ifdef FASTEST
177
+ #define INSERT_STRING(s, str, match_head) \
178
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
179
+ match_head = s->head[s->ins_h], \
180
+ s->head[s->ins_h] = (Pos)(str))
181
+ #else
182
+ #define INSERT_STRING(s, str, match_head) \
183
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
184
+ match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
185
+ s->head[s->ins_h] = (Pos)(str))
186
+ #endif
187
+
188
+ /* ===========================================================================
189
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
190
+ * prev[] will be initialized on the fly.
191
+ */
192
+ #define CLEAR_HASH(s) \
193
+ s->head[s->hash_size-1] = NIL; \
194
+ zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
195
+
196
+ /* ===========================================================================
197
+ * Slide the hash table when sliding the window down (could be avoided with 32
198
+ * bit values at the expense of memory usage). We slide even when level == 0 to
199
+ * keep the hash table consistent if we switch back to level > 0 later.
200
+ */
201
+ local void slide_hash(s)
202
+ deflate_state *s;
203
+ {
204
+ unsigned n, m;
205
+ Posf *p;
206
+ uInt wsize = s->w_size;
207
+
208
+ n = s->hash_size;
209
+ p = &s->head[n];
210
+ do {
211
+ m = *--p;
212
+ *p = (Pos)(m >= wsize ? m - wsize : NIL);
213
+ } while (--n);
214
+ n = wsize;
215
+ #ifndef FASTEST
216
+ p = &s->prev[n];
217
+ do {
218
+ m = *--p;
219
+ *p = (Pos)(m >= wsize ? m - wsize : NIL);
220
+ /* If n is not on any hash chain, prev[n] is garbage but
221
+ * its value will never be used.
222
+ */
223
+ } while (--n);
224
+ #endif
225
+ }
226
+
227
+ /* ========================================================================= */
228
+ int ZEXPORT deflateInit_(strm, level, version, stream_size)
229
+ z_streamp strm;
230
+ int level;
231
+ const char *version;
232
+ int stream_size;
233
+ {
234
+ return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
235
+ Z_DEFAULT_STRATEGY, version, stream_size);
236
+ /* To do: ignore strm->next_in if we use it as window */
237
+ }
238
+
239
+ /* ========================================================================= */
240
+ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
241
+ version, stream_size)
242
+ z_streamp strm;
243
+ int level;
244
+ int method;
245
+ int windowBits;
246
+ int memLevel;
247
+ int strategy;
248
+ const char *version;
249
+ int stream_size;
250
+ {
251
+ deflate_state *s;
252
+ int wrap = 1;
253
+ static const char my_version[] = ZLIB_VERSION;
254
+
255
+ ushf *overlay;
256
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
257
+ * output size for (length,distance) codes is <= 24 bits.
258
+ */
259
+
260
+ if (version == Z_NULL || version[0] != my_version[0] ||
261
+ stream_size != sizeof(z_stream)) {
262
+ return Z_VERSION_ERROR;
263
+ }
264
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
265
+
266
+ strm->msg = Z_NULL;
267
+ if (strm->zalloc == (alloc_func)0) {
268
+ #ifdef Z_SOLO
269
+ return Z_STREAM_ERROR;
270
+ #else
271
+ strm->zalloc = zcalloc;
272
+ strm->opaque = (voidpf)0;
273
+ #endif
274
+ }
275
+ if (strm->zfree == (free_func)0)
276
+ #ifdef Z_SOLO
277
+ return Z_STREAM_ERROR;
278
+ #else
279
+ strm->zfree = zcfree;
280
+ #endif
281
+
282
+ #ifdef FASTEST
283
+ if (level != 0) level = 1;
284
+ #else
285
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
286
+ #endif
287
+
288
+ if (windowBits < 0) { /* suppress zlib wrapper */
289
+ wrap = 0;
290
+ windowBits = -windowBits;
291
+ }
292
+ #ifdef GZIP
293
+ else if (windowBits > 15) {
294
+ wrap = 2; /* write gzip wrapper instead */
295
+ windowBits -= 16;
296
+ }
297
+ #endif
298
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
299
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
300
+ strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) {
301
+ return Z_STREAM_ERROR;
302
+ }
303
+ if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
304
+ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
305
+ if (s == Z_NULL) return Z_MEM_ERROR;
306
+ strm->state = (struct internal_state FAR *)s;
307
+ s->strm = strm;
308
+ s->status = INIT_STATE; /* to pass state test in deflateReset() */
309
+
310
+ s->wrap = wrap;
311
+ s->gzhead = Z_NULL;
312
+ s->w_bits = (uInt)windowBits;
313
+ s->w_size = 1 << s->w_bits;
314
+ s->w_mask = s->w_size - 1;
315
+
316
+ s->hash_bits = (uInt)memLevel + 7;
317
+ s->hash_size = 1 << s->hash_bits;
318
+ s->hash_mask = s->hash_size - 1;
319
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
320
+
321
+ s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
322
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
323
+ s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
324
+
325
+ s->high_water = 0; /* nothing written to s->window yet */
326
+
327
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
328
+
329
+ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
330
+ s->pending_buf = (uchf *) overlay;
331
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
332
+
333
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
334
+ s->pending_buf == Z_NULL) {
335
+ s->status = FINISH_STATE;
336
+ strm->msg = ERR_MSG(Z_MEM_ERROR);
337
+ deflateEnd (strm);
338
+ return Z_MEM_ERROR;
339
+ }
340
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
341
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
342
+
343
+ s->level = level;
344
+ s->strategy = strategy;
345
+ s->method = (Byte)method;
346
+
347
+ return deflateReset(strm);
348
+ }
349
+
350
+ /* =========================================================================
351
+ * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
352
+ */
353
+ local int deflateStateCheck (strm)
354
+ z_streamp strm;
355
+ {
356
+ deflate_state *s;
357
+ if (strm == Z_NULL ||
358
+ strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
359
+ return 1;
360
+ s = strm->state;
361
+ if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE &&
362
+ #ifdef GZIP
363
+ s->status != GZIP_STATE &&
364
+ #endif
365
+ s->status != EXTRA_STATE &&
366
+ s->status != NAME_STATE &&
367
+ s->status != COMMENT_STATE &&
368
+ s->status != HCRC_STATE &&
369
+ s->status != BUSY_STATE &&
370
+ s->status != FINISH_STATE))
371
+ return 1;
372
+ return 0;
373
+ }
374
+
375
+ /* ========================================================================= */
376
+ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
377
+ z_streamp strm;
378
+ const Bytef *dictionary;
379
+ uInt dictLength;
380
+ {
381
+ deflate_state *s;
382
+ uInt str, n;
383
+ int wrap;
384
+ unsigned avail;
385
+ z_const unsigned char *next;
386
+
387
+ if (deflateStateCheck(strm) || dictionary == Z_NULL)
388
+ return Z_STREAM_ERROR;
389
+ s = strm->state;
390
+ wrap = s->wrap;
391
+ if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
392
+ return Z_STREAM_ERROR;
393
+
394
+ /* when using zlib wrappers, compute Adler-32 for provided dictionary */
395
+ if (wrap == 1)
396
+ strm->adler = adler32(strm->adler, dictionary, dictLength);
397
+ s->wrap = 0; /* avoid computing Adler-32 in read_buf */
398
+
399
+ /* if dictionary would fill window, just replace the history */
400
+ if (dictLength >= s->w_size) {
401
+ if (wrap == 0) { /* already empty otherwise */
402
+ CLEAR_HASH(s);
403
+ s->strstart = 0;
404
+ s->block_start = 0L;
405
+ s->insert = 0;
406
+ }
407
+ dictionary += dictLength - s->w_size; /* use the tail */
408
+ dictLength = s->w_size;
409
+ }
410
+
411
+ /* insert dictionary into window and hash */
412
+ avail = strm->avail_in;
413
+ next = strm->next_in;
414
+ strm->avail_in = dictLength;
415
+ strm->next_in = (z_const Bytef *)dictionary;
416
+ fill_window(s);
417
+ while (s->lookahead >= MIN_MATCH) {
418
+ str = s->strstart;
419
+ n = s->lookahead - (MIN_MATCH-1);
420
+ do {
421
+ UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
422
+ #ifndef FASTEST
423
+ s->prev[str & s->w_mask] = s->head[s->ins_h];
424
+ #endif
425
+ s->head[s->ins_h] = (Pos)str;
426
+ str++;
427
+ } while (--n);
428
+ s->strstart = str;
429
+ s->lookahead = MIN_MATCH-1;
430
+ fill_window(s);
431
+ }
432
+ s->strstart += s->lookahead;
433
+ s->block_start = (long)s->strstart;
434
+ s->insert = s->lookahead;
435
+ s->lookahead = 0;
436
+ s->match_length = s->prev_length = MIN_MATCH-1;
437
+ s->match_available = 0;
438
+ strm->next_in = next;
439
+ strm->avail_in = avail;
440
+ s->wrap = wrap;
441
+ return Z_OK;
442
+ }
443
+
444
+ /* ========================================================================= */
445
+ int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
446
+ z_streamp strm;
447
+ Bytef *dictionary;
448
+ uInt *dictLength;
449
+ {
450
+ deflate_state *s;
451
+ uInt len;
452
+
453
+ if (deflateStateCheck(strm))
454
+ return Z_STREAM_ERROR;
455
+ s = strm->state;
456
+ len = s->strstart + s->lookahead;
457
+ if (len > s->w_size)
458
+ len = s->w_size;
459
+ if (dictionary != Z_NULL && len)
460
+ zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len);
461
+ if (dictLength != Z_NULL)
462
+ *dictLength = len;
463
+ return Z_OK;
464
+ }
465
+
466
+ /* ========================================================================= */
467
+ int ZEXPORT deflateResetKeep (strm)
468
+ z_streamp strm;
469
+ {
470
+ deflate_state *s;
471
+
472
+ if (deflateStateCheck(strm)) {
473
+ return Z_STREAM_ERROR;
474
+ }
475
+
476
+ strm->total_in = strm->total_out = 0;
477
+ strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
478
+ strm->data_type = Z_UNKNOWN;
479
+
480
+ s = (deflate_state *)strm->state;
481
+ s->pending = 0;
482
+ s->pending_out = s->pending_buf;
483
+
484
+ if (s->wrap < 0) {
485
+ s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
486
+ }
487
+ s->status =
488
+ #ifdef GZIP
489
+ s->wrap == 2 ? GZIP_STATE :
490
+ #endif
491
+ s->wrap ? INIT_STATE : BUSY_STATE;
492
+ strm->adler =
493
+ #ifdef GZIP
494
+ s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
495
+ #endif
496
+ adler32(0L, Z_NULL, 0);
497
+ s->last_flush = Z_NO_FLUSH;
498
+
499
+ _tr_init(s);
500
+
501
+ return Z_OK;
502
+ }
503
+
504
+ /* ========================================================================= */
505
+ int ZEXPORT deflateReset (strm)
506
+ z_streamp strm;
507
+ {
508
+ int ret;
509
+
510
+ ret = deflateResetKeep(strm);
511
+ if (ret == Z_OK)
512
+ lm_init(strm->state);
513
+ return ret;
514
+ }
515
+
516
+ /* ========================================================================= */
517
+ int ZEXPORT deflateSetHeader (strm, head)
518
+ z_streamp strm;
519
+ gz_headerp head;
520
+ {
521
+ if (deflateStateCheck(strm) || strm->state->wrap != 2)
522
+ return Z_STREAM_ERROR;
523
+ strm->state->gzhead = head;
524
+ return Z_OK;
525
+ }
526
+
527
+ /* ========================================================================= */
528
+ int ZEXPORT deflatePending (strm, pending, bits)
529
+ unsigned *pending;
530
+ int *bits;
531
+ z_streamp strm;
532
+ {
533
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
534
+ if (pending != Z_NULL)
535
+ *pending = strm->state->pending;
536
+ if (bits != Z_NULL)
537
+ *bits = strm->state->bi_valid;
538
+ return Z_OK;
539
+ }
540
+
541
+ /* ========================================================================= */
542
+ int ZEXPORT deflatePrime (strm, bits, value)
543
+ z_streamp strm;
544
+ int bits;
545
+ int value;
546
+ {
547
+ deflate_state *s;
548
+ int put;
549
+
550
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
551
+ s = strm->state;
552
+ if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
553
+ return Z_BUF_ERROR;
554
+ do {
555
+ put = Buf_size - s->bi_valid;
556
+ if (put > bits)
557
+ put = bits;
558
+ s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
559
+ s->bi_valid += put;
560
+ _tr_flush_bits(s);
561
+ value >>= put;
562
+ bits -= put;
563
+ } while (bits);
564
+ return Z_OK;
565
+ }
566
+
567
+ /* ========================================================================= */
568
+ int ZEXPORT deflateParams(strm, level, strategy)
569
+ z_streamp strm;
570
+ int level;
571
+ int strategy;
572
+ {
573
+ deflate_state *s;
574
+ compress_func func;
575
+
576
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
577
+ s = strm->state;
578
+
579
+ #ifdef FASTEST
580
+ if (level != 0) level = 1;
581
+ #else
582
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
583
+ #endif
584
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
585
+ return Z_STREAM_ERROR;
586
+ }
587
+ func = configuration_table[s->level].func;
588
+
589
+ if ((strategy != s->strategy || func != configuration_table[level].func) &&
590
+ s->high_water) {
591
+ /* Flush the last buffer: */
592
+ int err = deflate(strm, Z_BLOCK);
593
+ if (err == Z_STREAM_ERROR)
594
+ return err;
595
+ if (strm->avail_out == 0)
596
+ return Z_BUF_ERROR;
597
+ }
598
+ if (s->level != level) {
599
+ if (s->level == 0 && s->matches != 0) {
600
+ if (s->matches == 1)
601
+ slide_hash(s);
602
+ else
603
+ CLEAR_HASH(s);
604
+ s->matches = 0;
605
+ }
606
+ s->level = level;
607
+ s->max_lazy_match = configuration_table[level].max_lazy;
608
+ s->good_match = configuration_table[level].good_length;
609
+ s->nice_match = configuration_table[level].nice_length;
610
+ s->max_chain_length = configuration_table[level].max_chain;
611
+ }
612
+ s->strategy = strategy;
613
+ return Z_OK;
614
+ }
615
+
616
+ /* ========================================================================= */
617
+ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
618
+ z_streamp strm;
619
+ int good_length;
620
+ int max_lazy;
621
+ int nice_length;
622
+ int max_chain;
623
+ {
624
+ deflate_state *s;
625
+
626
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
627
+ s = strm->state;
628
+ s->good_match = (uInt)good_length;
629
+ s->max_lazy_match = (uInt)max_lazy;
630
+ s->nice_match = nice_length;
631
+ s->max_chain_length = (uInt)max_chain;
632
+ return Z_OK;
633
+ }
634
+
635
+ /* =========================================================================
636
+ * For the default windowBits of 15 and memLevel of 8, this function returns
637
+ * a close to exact, as well as small, upper bound on the compressed size.
638
+ * They are coded as constants here for a reason--if the #define's are
639
+ * changed, then this function needs to be changed as well. The return
640
+ * value for 15 and 8 only works for those exact settings.
641
+ *
642
+ * For any setting other than those defaults for windowBits and memLevel,
643
+ * the value returned is a conservative worst case for the maximum expansion
644
+ * resulting from using fixed blocks instead of stored blocks, which deflate
645
+ * can emit on compressed data for some combinations of the parameters.
646
+ *
647
+ * This function could be more sophisticated to provide closer upper bounds for
648
+ * every combination of windowBits and memLevel. But even the conservative
649
+ * upper bound of about 14% expansion does not seem onerous for output buffer
650
+ * allocation.
651
+ */
652
+ uLong ZEXPORT deflateBound(strm, sourceLen)
653
+ z_streamp strm;
654
+ uLong sourceLen;
655
+ {
656
+ deflate_state *s;
657
+ uLong complen, wraplen;
658
+
659
+ /* conservative upper bound for compressed data */
660
+ complen = sourceLen +
661
+ ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
662
+
663
+ /* if can't get parameters, return conservative bound plus zlib wrapper */
664
+ if (deflateStateCheck(strm))
665
+ return complen + 6;
666
+
667
+ /* compute wrapper length */
668
+ s = strm->state;
669
+ switch (s->wrap) {
670
+ case 0: /* raw deflate */
671
+ wraplen = 0;
672
+ break;
673
+ case 1: /* zlib wrapper */
674
+ wraplen = 6 + (s->strstart ? 4 : 0);
675
+ break;
676
+ #ifdef GZIP
677
+ case 2: /* gzip wrapper */
678
+ wraplen = 18;
679
+ if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
680
+ Bytef *str;
681
+ if (s->gzhead->extra != Z_NULL)
682
+ wraplen += 2 + s->gzhead->extra_len;
683
+ str = s->gzhead->name;
684
+ if (str != Z_NULL)
685
+ do {
686
+ wraplen++;
687
+ } while (*str++);
688
+ str = s->gzhead->comment;
689
+ if (str != Z_NULL)
690
+ do {
691
+ wraplen++;
692
+ } while (*str++);
693
+ if (s->gzhead->hcrc)
694
+ wraplen += 2;
695
+ }
696
+ break;
697
+ #endif
698
+ default: /* for compiler happiness */
699
+ wraplen = 6;
700
+ }
701
+
702
+ /* if not default parameters, return conservative bound */
703
+ if (s->w_bits != 15 || s->hash_bits != 8 + 7)
704
+ return complen + wraplen;
705
+
706
+ /* default settings: return tight bound for that case */
707
+ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
708
+ (sourceLen >> 25) + 13 - 6 + wraplen;
709
+ }
710
+
711
+ /* =========================================================================
712
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
713
+ * IN assertion: the stream state is correct and there is enough room in
714
+ * pending_buf.
715
+ */
716
+ local void putShortMSB (s, b)
717
+ deflate_state *s;
718
+ uInt b;
719
+ {
720
+ put_byte(s, (Byte)(b >> 8));
721
+ put_byte(s, (Byte)(b & 0xff));
722
+ }
723
+
724
+ /* =========================================================================
725
+ * Flush as much pending output as possible. All deflate() output, except for
726
+ * some deflate_stored() output, goes through this function so some
727
+ * applications may wish to modify it to avoid allocating a large
728
+ * strm->next_out buffer and copying into it. (See also read_buf()).
729
+ */
730
+ local void flush_pending(strm)
731
+ z_streamp strm;
732
+ {
733
+ unsigned len;
734
+ deflate_state *s = strm->state;
735
+
736
+ _tr_flush_bits(s);
737
+ len = s->pending;
738
+ if (len > strm->avail_out) len = strm->avail_out;
739
+ if (len == 0) return;
740
+
741
+ zmemcpy(strm->next_out, s->pending_out, len);
742
+ strm->next_out += len;
743
+ s->pending_out += len;
744
+ strm->total_out += len;
745
+ strm->avail_out -= len;
746
+ s->pending -= len;
747
+ if (s->pending == 0) {
748
+ s->pending_out = s->pending_buf;
749
+ }
750
+ }
751
+
752
+ /* ===========================================================================
753
+ * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1].
754
+ */
755
+ #define HCRC_UPDATE(beg) \
756
+ do { \
757
+ if (s->gzhead->hcrc && s->pending > (beg)) \
758
+ strm->adler = crc32(strm->adler, s->pending_buf + (beg), \
759
+ s->pending - (beg)); \
760
+ } while (0)
761
+
762
+ /* ========================================================================= */
763
+ int ZEXPORT deflate (strm, flush)
764
+ z_streamp strm;
765
+ int flush;
766
+ {
767
+ int old_flush; /* value of flush param for previous deflate call */
768
+ deflate_state *s;
769
+
770
+ if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
771
+ return Z_STREAM_ERROR;
772
+ }
773
+ s = strm->state;
774
+
775
+ if (strm->next_out == Z_NULL ||
776
+ (strm->avail_in != 0 && strm->next_in == Z_NULL) ||
777
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
778
+ ERR_RETURN(strm, Z_STREAM_ERROR);
779
+ }
780
+ if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
781
+
782
+ old_flush = s->last_flush;
783
+ s->last_flush = flush;
784
+
785
+ /* Flush as much pending output as possible */
786
+ if (s->pending != 0) {
787
+ flush_pending(strm);
788
+ if (strm->avail_out == 0) {
789
+ /* Since avail_out is 0, deflate will be called again with
790
+ * more output space, but possibly with both pending and
791
+ * avail_in equal to zero. There won't be anything to do,
792
+ * but this is not an error situation so make sure we
793
+ * return OK instead of BUF_ERROR at next call of deflate:
794
+ */
795
+ s->last_flush = -1;
796
+ return Z_OK;
797
+ }
798
+
799
+ /* Make sure there is something to do and avoid duplicate consecutive
800
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
801
+ * returning Z_STREAM_END instead of Z_BUF_ERROR.
802
+ */
803
+ } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
804
+ flush != Z_FINISH) {
805
+ ERR_RETURN(strm, Z_BUF_ERROR);
806
+ }
807
+
808
+ /* User must not provide more input after the first FINISH: */
809
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
810
+ ERR_RETURN(strm, Z_BUF_ERROR);
811
+ }
812
+
813
+ /* Write the header */
814
+ if (s->status == INIT_STATE) {
815
+ /* zlib header */
816
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
817
+ uInt level_flags;
818
+
819
+ if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
820
+ level_flags = 0;
821
+ else if (s->level < 6)
822
+ level_flags = 1;
823
+ else if (s->level == 6)
824
+ level_flags = 2;
825
+ else
826
+ level_flags = 3;
827
+ header |= (level_flags << 6);
828
+ if (s->strstart != 0) header |= PRESET_DICT;
829
+ header += 31 - (header % 31);
830
+
831
+ putShortMSB(s, header);
832
+
833
+ /* Save the adler32 of the preset dictionary: */
834
+ if (s->strstart != 0) {
835
+ putShortMSB(s, (uInt)(strm->adler >> 16));
836
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
837
+ }
838
+ strm->adler = adler32(0L, Z_NULL, 0);
839
+ s->status = BUSY_STATE;
840
+
841
+ /* Compression must start with an empty pending buffer */
842
+ flush_pending(strm);
843
+ if (s->pending != 0) {
844
+ s->last_flush = -1;
845
+ return Z_OK;
846
+ }
847
+ }
848
+ #ifdef GZIP
849
+ if (s->status == GZIP_STATE) {
850
+ /* gzip header */
851
+ strm->adler = crc32(0L, Z_NULL, 0);
852
+ put_byte(s, 31);
853
+ put_byte(s, 139);
854
+ put_byte(s, 8);
855
+ if (s->gzhead == Z_NULL) {
856
+ put_byte(s, 0);
857
+ put_byte(s, 0);
858
+ put_byte(s, 0);
859
+ put_byte(s, 0);
860
+ put_byte(s, 0);
861
+ put_byte(s, s->level == 9 ? 2 :
862
+ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
863
+ 4 : 0));
864
+ put_byte(s, OS_CODE);
865
+ s->status = BUSY_STATE;
866
+
867
+ /* Compression must start with an empty pending buffer */
868
+ flush_pending(strm);
869
+ if (s->pending != 0) {
870
+ s->last_flush = -1;
871
+ return Z_OK;
872
+ }
873
+ }
874
+ else {
875
+ put_byte(s, (s->gzhead->text ? 1 : 0) +
876
+ (s->gzhead->hcrc ? 2 : 0) +
877
+ (s->gzhead->extra == Z_NULL ? 0 : 4) +
878
+ (s->gzhead->name == Z_NULL ? 0 : 8) +
879
+ (s->gzhead->comment == Z_NULL ? 0 : 16)
880
+ );
881
+ put_byte(s, (Byte)(s->gzhead->time & 0xff));
882
+ put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
883
+ put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
884
+ put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
885
+ put_byte(s, s->level == 9 ? 2 :
886
+ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
887
+ 4 : 0));
888
+ put_byte(s, s->gzhead->os & 0xff);
889
+ if (s->gzhead->extra != Z_NULL) {
890
+ put_byte(s, s->gzhead->extra_len & 0xff);
891
+ put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
892
+ }
893
+ if (s->gzhead->hcrc)
894
+ strm->adler = crc32(strm->adler, s->pending_buf,
895
+ s->pending);
896
+ s->gzindex = 0;
897
+ s->status = EXTRA_STATE;
898
+ }
899
+ }
900
+ if (s->status == EXTRA_STATE) {
901
+ if (s->gzhead->extra != Z_NULL) {
902
+ ulg beg = s->pending; /* start of bytes to update crc */
903
+ uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
904
+ while (s->pending + left > s->pending_buf_size) {
905
+ uInt copy = s->pending_buf_size - s->pending;
906
+ zmemcpy(s->pending_buf + s->pending,
907
+ s->gzhead->extra + s->gzindex, copy);
908
+ s->pending = s->pending_buf_size;
909
+ HCRC_UPDATE(beg);
910
+ s->gzindex += copy;
911
+ flush_pending(strm);
912
+ if (s->pending != 0) {
913
+ s->last_flush = -1;
914
+ return Z_OK;
915
+ }
916
+ beg = 0;
917
+ left -= copy;
918
+ }
919
+ zmemcpy(s->pending_buf + s->pending,
920
+ s->gzhead->extra + s->gzindex, left);
921
+ s->pending += left;
922
+ HCRC_UPDATE(beg);
923
+ s->gzindex = 0;
924
+ }
925
+ s->status = NAME_STATE;
926
+ }
927
+ if (s->status == NAME_STATE) {
928
+ if (s->gzhead->name != Z_NULL) {
929
+ ulg beg = s->pending; /* start of bytes to update crc */
930
+ int val;
931
+ do {
932
+ if (s->pending == s->pending_buf_size) {
933
+ HCRC_UPDATE(beg);
934
+ flush_pending(strm);
935
+ if (s->pending != 0) {
936
+ s->last_flush = -1;
937
+ return Z_OK;
938
+ }
939
+ beg = 0;
940
+ }
941
+ val = s->gzhead->name[s->gzindex++];
942
+ put_byte(s, val);
943
+ } while (val != 0);
944
+ HCRC_UPDATE(beg);
945
+ s->gzindex = 0;
946
+ }
947
+ s->status = COMMENT_STATE;
948
+ }
949
+ if (s->status == COMMENT_STATE) {
950
+ if (s->gzhead->comment != Z_NULL) {
951
+ ulg beg = s->pending; /* start of bytes to update crc */
952
+ int val;
953
+ do {
954
+ if (s->pending == s->pending_buf_size) {
955
+ HCRC_UPDATE(beg);
956
+ flush_pending(strm);
957
+ if (s->pending != 0) {
958
+ s->last_flush = -1;
959
+ return Z_OK;
960
+ }
961
+ beg = 0;
962
+ }
963
+ val = s->gzhead->comment[s->gzindex++];
964
+ put_byte(s, val);
965
+ } while (val != 0);
966
+ HCRC_UPDATE(beg);
967
+ }
968
+ s->status = HCRC_STATE;
969
+ }
970
+ if (s->status == HCRC_STATE) {
971
+ if (s->gzhead->hcrc) {
972
+ if (s->pending + 2 > s->pending_buf_size) {
973
+ flush_pending(strm);
974
+ if (s->pending != 0) {
975
+ s->last_flush = -1;
976
+ return Z_OK;
977
+ }
978
+ }
979
+ put_byte(s, (Byte)(strm->adler & 0xff));
980
+ put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
981
+ strm->adler = crc32(0L, Z_NULL, 0);
982
+ }
983
+ s->status = BUSY_STATE;
984
+
985
+ /* Compression must start with an empty pending buffer */
986
+ flush_pending(strm);
987
+ if (s->pending != 0) {
988
+ s->last_flush = -1;
989
+ return Z_OK;
990
+ }
991
+ }
992
+ #endif
993
+
994
+ /* Start a new block or continue the current one.
995
+ */
996
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
997
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
998
+ block_state bstate;
999
+
1000
+ bstate = s->level == 0 ? deflate_stored(s, flush) :
1001
+ s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
1002
+ s->strategy == Z_RLE ? deflate_rle(s, flush) :
1003
+ (*(configuration_table[s->level].func))(s, flush);
1004
+
1005
+ if (bstate == finish_started || bstate == finish_done) {
1006
+ s->status = FINISH_STATE;
1007
+ }
1008
+ if (bstate == need_more || bstate == finish_started) {
1009
+ if (strm->avail_out == 0) {
1010
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
1011
+ }
1012
+ return Z_OK;
1013
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
1014
+ * of deflate should use the same flush parameter to make sure
1015
+ * that the flush is complete. So we don't have to output an
1016
+ * empty block here, this will be done at next call. This also
1017
+ * ensures that for a very small output buffer, we emit at most
1018
+ * one empty block.
1019
+ */
1020
+ }
1021
+ if (bstate == block_done) {
1022
+ if (flush == Z_PARTIAL_FLUSH) {
1023
+ _tr_align(s);
1024
+ } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
1025
+ _tr_stored_block(s, (char*)0, 0L, 0);
1026
+ /* For a full flush, this empty block will be recognized
1027
+ * as a special marker by inflate_sync().
1028
+ */
1029
+ if (flush == Z_FULL_FLUSH) {
1030
+ CLEAR_HASH(s); /* forget history */
1031
+ if (s->lookahead == 0) {
1032
+ s->strstart = 0;
1033
+ s->block_start = 0L;
1034
+ s->insert = 0;
1035
+ }
1036
+ }
1037
+ }
1038
+ flush_pending(strm);
1039
+ if (strm->avail_out == 0) {
1040
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
1041
+ return Z_OK;
1042
+ }
1043
+ }
1044
+ }
1045
+
1046
+ if (flush != Z_FINISH) return Z_OK;
1047
+ if (s->wrap <= 0) return Z_STREAM_END;
1048
+
1049
+ /* Write the trailer */
1050
+ #ifdef GZIP
1051
+ if (s->wrap == 2) {
1052
+ put_byte(s, (Byte)(strm->adler & 0xff));
1053
+ put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1054
+ put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
1055
+ put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
1056
+ put_byte(s, (Byte)(strm->total_in & 0xff));
1057
+ put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
1058
+ put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
1059
+ put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
1060
+ }
1061
+ else
1062
+ #endif
1063
+ {
1064
+ putShortMSB(s, (uInt)(strm->adler >> 16));
1065
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
1066
+ }
1067
+ flush_pending(strm);
1068
+ /* If avail_out is zero, the application will call deflate again
1069
+ * to flush the rest.
1070
+ */
1071
+ if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
1072
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
1073
+ }
1074
+
1075
+ /* ========================================================================= */
1076
+ int ZEXPORT deflateEnd (strm)
1077
+ z_streamp strm;
1078
+ {
1079
+ int status;
1080
+
1081
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
1082
+
1083
+ status = strm->state->status;
1084
+
1085
+ /* Deallocate in reverse order of allocations: */
1086
+ TRY_FREE(strm, strm->state->pending_buf);
1087
+ TRY_FREE(strm, strm->state->head);
1088
+ TRY_FREE(strm, strm->state->prev);
1089
+ TRY_FREE(strm, strm->state->window);
1090
+
1091
+ ZFREE(strm, strm->state);
1092
+ strm->state = Z_NULL;
1093
+
1094
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1095
+ }
1096
+
1097
+ /* =========================================================================
1098
+ * Copy the source state to the destination state.
1099
+ * To simplify the source, this is not supported for 16-bit MSDOS (which
1100
+ * doesn't have enough memory anyway to duplicate compression states).
1101
+ */
1102
+ int ZEXPORT deflateCopy (dest, source)
1103
+ z_streamp dest;
1104
+ z_streamp source;
1105
+ {
1106
+ #ifdef MAXSEG_64K
1107
+ return Z_STREAM_ERROR;
1108
+ #else
1109
+ deflate_state *ds;
1110
+ deflate_state *ss;
1111
+ ushf *overlay;
1112
+
1113
+
1114
+ if (deflateStateCheck(source) || dest == Z_NULL) {
1115
+ return Z_STREAM_ERROR;
1116
+ }
1117
+
1118
+ ss = source->state;
1119
+
1120
+ zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
1121
+
1122
+ ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
1123
+ if (ds == Z_NULL) return Z_MEM_ERROR;
1124
+ dest->state = (struct internal_state FAR *) ds;
1125
+ zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
1126
+ ds->strm = dest;
1127
+
1128
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1129
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1130
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1131
+ overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
1132
+ ds->pending_buf = (uchf *) overlay;
1133
+
1134
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1135
+ ds->pending_buf == Z_NULL) {
1136
+ deflateEnd (dest);
1137
+ return Z_MEM_ERROR;
1138
+ }
1139
+ /* following zmemcpy do not work for 16-bit MSDOS */
1140
+ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1141
+ zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
1142
+ zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
1143
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1144
+
1145
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1146
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
1147
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
1148
+
1149
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
1150
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
1151
+ ds->bl_desc.dyn_tree = ds->bl_tree;
1152
+
1153
+ return Z_OK;
1154
+ #endif /* MAXSEG_64K */
1155
+ }
1156
+
1157
+ /* ===========================================================================
1158
+ * Read a new buffer from the current input stream, update the adler32
1159
+ * and total number of bytes read. All deflate() input goes through
1160
+ * this function so some applications may wish to modify it to avoid
1161
+ * allocating a large strm->next_in buffer and copying from it.
1162
+ * (See also flush_pending()).
1163
+ */
1164
+ local unsigned read_buf(strm, buf, size)
1165
+ z_streamp strm;
1166
+ Bytef *buf;
1167
+ unsigned size;
1168
+ {
1169
+ unsigned len = strm->avail_in;
1170
+
1171
+ if (len > size) len = size;
1172
+ if (len == 0) return 0;
1173
+
1174
+ strm->avail_in -= len;
1175
+
1176
+ zmemcpy(buf, strm->next_in, len);
1177
+ if (strm->state->wrap == 1) {
1178
+ strm->adler = adler32(strm->adler, buf, len);
1179
+ }
1180
+ #ifdef GZIP
1181
+ else if (strm->state->wrap == 2) {
1182
+ strm->adler = crc32(strm->adler, buf, len);
1183
+ }
1184
+ #endif
1185
+ strm->next_in += len;
1186
+ strm->total_in += len;
1187
+
1188
+ return len;
1189
+ }
1190
+
1191
+ /* ===========================================================================
1192
+ * Initialize the "longest match" routines for a new zlib stream
1193
+ */
1194
+ local void lm_init (s)
1195
+ deflate_state *s;
1196
+ {
1197
+ s->window_size = (ulg)2L*s->w_size;
1198
+
1199
+ CLEAR_HASH(s);
1200
+
1201
+ /* Set the default configuration parameters:
1202
+ */
1203
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
1204
+ s->good_match = configuration_table[s->level].good_length;
1205
+ s->nice_match = configuration_table[s->level].nice_length;
1206
+ s->max_chain_length = configuration_table[s->level].max_chain;
1207
+
1208
+ s->strstart = 0;
1209
+ s->block_start = 0L;
1210
+ s->lookahead = 0;
1211
+ s->insert = 0;
1212
+ s->match_length = s->prev_length = MIN_MATCH-1;
1213
+ s->match_available = 0;
1214
+ s->ins_h = 0;
1215
+ #ifndef FASTEST
1216
+ #ifdef ASMV
1217
+ match_init(); /* initialize the asm code */
1218
+ #endif
1219
+ #endif
1220
+ }
1221
+
1222
+ #ifndef FASTEST
1223
+ /* ===========================================================================
1224
+ * Set match_start to the longest match starting at the given string and
1225
+ * return its length. Matches shorter or equal to prev_length are discarded,
1226
+ * in which case the result is equal to prev_length and match_start is
1227
+ * garbage.
1228
+ * IN assertions: cur_match is the head of the hash chain for the current
1229
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1230
+ * OUT assertion: the match length is not greater than s->lookahead.
1231
+ */
1232
+ #ifndef ASMV
1233
+ /* For 80x86 and 680x0, an optimized version will be provided in match.asm or
1234
+ * match.S. The code will be functionally equivalent.
1235
+ */
1236
+ local uInt longest_match(s, cur_match)
1237
+ deflate_state *s;
1238
+ IPos cur_match; /* current match */
1239
+ {
1240
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
1241
+ register Bytef *scan = s->window + s->strstart; /* current string */
1242
+ register Bytef *match; /* matched string */
1243
+ register int len; /* length of current match */
1244
+ int best_len = (int)s->prev_length; /* best match length so far */
1245
+ int nice_match = s->nice_match; /* stop if match long enough */
1246
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1247
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
1248
+ /* Stop when cur_match becomes <= limit. To simplify the code,
1249
+ * we prevent matches with the string of window index 0.
1250
+ */
1251
+ Posf *prev = s->prev;
1252
+ uInt wmask = s->w_mask;
1253
+
1254
+ #ifdef UNALIGNED_OK
1255
+ /* Compare two bytes at a time. Note: this is not always beneficial.
1256
+ * Try with and without -DUNALIGNED_OK to check.
1257
+ */
1258
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1259
+ register ush scan_start = *(ushf*)scan;
1260
+ register ush scan_end = *(ushf*)(scan+best_len-1);
1261
+ #else
1262
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1263
+ register Byte scan_end1 = scan[best_len-1];
1264
+ register Byte scan_end = scan[best_len];
1265
+ #endif
1266
+
1267
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1268
+ * It is easy to get rid of this optimization if necessary.
1269
+ */
1270
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1271
+
1272
+ /* Do not waste too much time if we already have a good match: */
1273
+ if (s->prev_length >= s->good_match) {
1274
+ chain_length >>= 2;
1275
+ }
1276
+ /* Do not look for matches beyond the end of the input. This is necessary
1277
+ * to make deflate deterministic.
1278
+ */
1279
+ if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
1280
+
1281
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1282
+
1283
+ do {
1284
+ Assert(cur_match < s->strstart, "no future");
1285
+ match = s->window + cur_match;
1286
+
1287
+ /* Skip to next match if the match length cannot increase
1288
+ * or if the match length is less than 2. Note that the checks below
1289
+ * for insufficient lookahead only occur occasionally for performance
1290
+ * reasons. Therefore uninitialized memory will be accessed, and
1291
+ * conditional jumps will be made that depend on those values.
1292
+ * However the length of the match is limited to the lookahead, so
1293
+ * the output of deflate is not affected by the uninitialized values.
1294
+ */
1295
+ #if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1296
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
1297
+ * UNALIGNED_OK if your compiler uses a different size.
1298
+ */
1299
+ if (*(ushf*)(match+best_len-1) != scan_end ||
1300
+ *(ushf*)match != scan_start) continue;
1301
+
1302
+ /* It is not necessary to compare scan[2] and match[2] since they are
1303
+ * always equal when the other bytes match, given that the hash keys
1304
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1305
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
1306
+ * lookahead only every 4th comparison; the 128th check will be made
1307
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
1308
+ * necessary to put more guard bytes at the end of the window, or
1309
+ * to check more often for insufficient lookahead.
1310
+ */
1311
+ Assert(scan[2] == match[2], "scan[2]?");
1312
+ scan++, match++;
1313
+ do {
1314
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1315
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1316
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1317
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1318
+ scan < strend);
1319
+ /* The funny "do {}" generates better code on most compilers */
1320
+
1321
+ /* Here, scan <= window+strstart+257 */
1322
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1323
+ if (*scan == *match) scan++;
1324
+
1325
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
1326
+ scan = strend - (MAX_MATCH-1);
1327
+
1328
+ #else /* UNALIGNED_OK */
1329
+
1330
+ if (match[best_len] != scan_end ||
1331
+ match[best_len-1] != scan_end1 ||
1332
+ *match != *scan ||
1333
+ *++match != scan[1]) continue;
1334
+
1335
+ /* The check at best_len-1 can be removed because it will be made
1336
+ * again later. (This heuristic is not always a win.)
1337
+ * It is not necessary to compare scan[2] and match[2] since they
1338
+ * are always equal when the other bytes match, given that
1339
+ * the hash keys are equal and that HASH_BITS >= 8.
1340
+ */
1341
+ scan += 2, match++;
1342
+ Assert(*scan == *match, "match[2]?");
1343
+
1344
+ /* We check for insufficient lookahead only every 8th comparison;
1345
+ * the 256th check will be made at strstart+258.
1346
+ */
1347
+ do {
1348
+ } while (*++scan == *++match && *++scan == *++match &&
1349
+ *++scan == *++match && *++scan == *++match &&
1350
+ *++scan == *++match && *++scan == *++match &&
1351
+ *++scan == *++match && *++scan == *++match &&
1352
+ scan < strend);
1353
+
1354
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1355
+
1356
+ len = MAX_MATCH - (int)(strend - scan);
1357
+ scan = strend - MAX_MATCH;
1358
+
1359
+ #endif /* UNALIGNED_OK */
1360
+
1361
+ if (len > best_len) {
1362
+ s->match_start = cur_match;
1363
+ best_len = len;
1364
+ if (len >= nice_match) break;
1365
+ #ifdef UNALIGNED_OK
1366
+ scan_end = *(ushf*)(scan+best_len-1);
1367
+ #else
1368
+ scan_end1 = scan[best_len-1];
1369
+ scan_end = scan[best_len];
1370
+ #endif
1371
+ }
1372
+ } while ((cur_match = prev[cur_match & wmask]) > limit
1373
+ && --chain_length != 0);
1374
+
1375
+ if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1376
+ return s->lookahead;
1377
+ }
1378
+ #endif /* ASMV */
1379
+
1380
+ #else /* FASTEST */
1381
+
1382
+ /* ---------------------------------------------------------------------------
1383
+ * Optimized version for FASTEST only
1384
+ */
1385
+ local uInt longest_match(s, cur_match)
1386
+ deflate_state *s;
1387
+ IPos cur_match; /* current match */
1388
+ {
1389
+ register Bytef *scan = s->window + s->strstart; /* current string */
1390
+ register Bytef *match; /* matched string */
1391
+ register int len; /* length of current match */
1392
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1393
+
1394
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1395
+ * It is easy to get rid of this optimization if necessary.
1396
+ */
1397
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1398
+
1399
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1400
+
1401
+ Assert(cur_match < s->strstart, "no future");
1402
+
1403
+ match = s->window + cur_match;
1404
+
1405
+ /* Return failure if the match length is less than 2:
1406
+ */
1407
+ if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1408
+
1409
+ /* The check at best_len-1 can be removed because it will be made
1410
+ * again later. (This heuristic is not always a win.)
1411
+ * It is not necessary to compare scan[2] and match[2] since they
1412
+ * are always equal when the other bytes match, given that
1413
+ * the hash keys are equal and that HASH_BITS >= 8.
1414
+ */
1415
+ scan += 2, match += 2;
1416
+ Assert(*scan == *match, "match[2]?");
1417
+
1418
+ /* We check for insufficient lookahead only every 8th comparison;
1419
+ * the 256th check will be made at strstart+258.
1420
+ */
1421
+ do {
1422
+ } while (*++scan == *++match && *++scan == *++match &&
1423
+ *++scan == *++match && *++scan == *++match &&
1424
+ *++scan == *++match && *++scan == *++match &&
1425
+ *++scan == *++match && *++scan == *++match &&
1426
+ scan < strend);
1427
+
1428
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1429
+
1430
+ len = MAX_MATCH - (int)(strend - scan);
1431
+
1432
+ if (len < MIN_MATCH) return MIN_MATCH - 1;
1433
+
1434
+ s->match_start = cur_match;
1435
+ return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
1436
+ }
1437
+
1438
+ #endif /* FASTEST */
1439
+
1440
+ #ifdef ZLIB_DEBUG
1441
+
1442
+ #define EQUAL 0
1443
+ /* result of memcmp for equal strings */
1444
+
1445
+ /* ===========================================================================
1446
+ * Check that the match at match_start is indeed a match.
1447
+ */
1448
+ local void check_match(s, start, match, length)
1449
+ deflate_state *s;
1450
+ IPos start, match;
1451
+ int length;
1452
+ {
1453
+ /* check that the match is indeed a match */
1454
+ if (zmemcmp(s->window + match,
1455
+ s->window + start, length) != EQUAL) {
1456
+ fprintf(stderr, " start %u, match %u, length %d\n",
1457
+ start, match, length);
1458
+ do {
1459
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1460
+ } while (--length != 0);
1461
+ z_error("invalid match");
1462
+ }
1463
+ if (z_verbose > 1) {
1464
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
1465
+ do { putc(s->window[start++], stderr); } while (--length != 0);
1466
+ }
1467
+ }
1468
+ #else
1469
+ # define check_match(s, start, match, length)
1470
+ #endif /* ZLIB_DEBUG */
1471
+
1472
+ /* ===========================================================================
1473
+ * Fill the window when the lookahead becomes insufficient.
1474
+ * Updates strstart and lookahead.
1475
+ *
1476
+ * IN assertion: lookahead < MIN_LOOKAHEAD
1477
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1478
+ * At least one byte has been read, or avail_in == 0; reads are
1479
+ * performed for at least two bytes (required for the zip translate_eol
1480
+ * option -- not supported here).
1481
+ */
1482
+ local void fill_window(s)
1483
+ deflate_state *s;
1484
+ {
1485
+ unsigned n;
1486
+ unsigned more; /* Amount of free space at the end of the window. */
1487
+ uInt wsize = s->w_size;
1488
+
1489
+ Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
1490
+
1491
+ do {
1492
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
1493
+
1494
+ /* Deal with !@#$% 64K limit: */
1495
+ if (sizeof(int) <= 2) {
1496
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1497
+ more = wsize;
1498
+
1499
+ } else if (more == (unsigned)(-1)) {
1500
+ /* Very unlikely, but possible on 16 bit machine if
1501
+ * strstart == 0 && lookahead == 1 (input done a byte at time)
1502
+ */
1503
+ more--;
1504
+ }
1505
+ }
1506
+
1507
+ /* If the window is almost full and there is insufficient lookahead,
1508
+ * move the upper half to the lower one to make room in the upper half.
1509
+ */
1510
+ if (s->strstart >= wsize+MAX_DIST(s)) {
1511
+
1512
+ zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
1513
+ s->match_start -= wsize;
1514
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1515
+ s->block_start -= (long) wsize;
1516
+ slide_hash(s);
1517
+ more += wsize;
1518
+ }
1519
+ if (s->strm->avail_in == 0) break;
1520
+
1521
+ /* If there was no sliding:
1522
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1523
+ * more == window_size - lookahead - strstart
1524
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1525
+ * => more >= window_size - 2*WSIZE + 2
1526
+ * In the BIG_MEM or MMAP case (not yet supported),
1527
+ * window_size == input_size + MIN_LOOKAHEAD &&
1528
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1529
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
1530
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1531
+ */
1532
+ Assert(more >= 2, "more < 2");
1533
+
1534
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
1535
+ s->lookahead += n;
1536
+
1537
+ /* Initialize the hash value now that we have some input: */
1538
+ if (s->lookahead + s->insert >= MIN_MATCH) {
1539
+ uInt str = s->strstart - s->insert;
1540
+ s->ins_h = s->window[str];
1541
+ UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
1542
+ #if MIN_MATCH != 3
1543
+ Call UPDATE_HASH() MIN_MATCH-3 more times
1544
+ #endif
1545
+ while (s->insert) {
1546
+ UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
1547
+ #ifndef FASTEST
1548
+ s->prev[str & s->w_mask] = s->head[s->ins_h];
1549
+ #endif
1550
+ s->head[s->ins_h] = (Pos)str;
1551
+ str++;
1552
+ s->insert--;
1553
+ if (s->lookahead + s->insert < MIN_MATCH)
1554
+ break;
1555
+ }
1556
+ }
1557
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1558
+ * but this is not important since only literal bytes will be emitted.
1559
+ */
1560
+
1561
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1562
+
1563
+ /* If the WIN_INIT bytes after the end of the current data have never been
1564
+ * written, then zero those bytes in order to avoid memory check reports of
1565
+ * the use of uninitialized (or uninitialised as Julian writes) bytes by
1566
+ * the longest match routines. Update the high water mark for the next
1567
+ * time through here. WIN_INIT is set to MAX_MATCH since the longest match
1568
+ * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
1569
+ */
1570
+ if (s->high_water < s->window_size) {
1571
+ ulg curr = s->strstart + (ulg)(s->lookahead);
1572
+ ulg init;
1573
+
1574
+ if (s->high_water < curr) {
1575
+ /* Previous high water mark below current data -- zero WIN_INIT
1576
+ * bytes or up to end of window, whichever is less.
1577
+ */
1578
+ init = s->window_size - curr;
1579
+ if (init > WIN_INIT)
1580
+ init = WIN_INIT;
1581
+ zmemzero(s->window + curr, (unsigned)init);
1582
+ s->high_water = curr + init;
1583
+ }
1584
+ else if (s->high_water < (ulg)curr + WIN_INIT) {
1585
+ /* High water mark at or above current data, but below current data
1586
+ * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
1587
+ * to end of window, whichever is less.
1588
+ */
1589
+ init = (ulg)curr + WIN_INIT - s->high_water;
1590
+ if (init > s->window_size - s->high_water)
1591
+ init = s->window_size - s->high_water;
1592
+ zmemzero(s->window + s->high_water, (unsigned)init);
1593
+ s->high_water += init;
1594
+ }
1595
+ }
1596
+
1597
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1598
+ "not enough room for search");
1599
+ }
1600
+
1601
+ /* ===========================================================================
1602
+ * Flush the current block, with given end-of-file flag.
1603
+ * IN assertion: strstart is set to the end of the current match.
1604
+ */
1605
+ #define FLUSH_BLOCK_ONLY(s, last) { \
1606
+ _tr_flush_block(s, (s->block_start >= 0L ? \
1607
+ (charf *)&s->window[(unsigned)s->block_start] : \
1608
+ (charf *)Z_NULL), \
1609
+ (ulg)((long)s->strstart - s->block_start), \
1610
+ (last)); \
1611
+ s->block_start = s->strstart; \
1612
+ flush_pending(s->strm); \
1613
+ Tracev((stderr,"[FLUSH]")); \
1614
+ }
1615
+
1616
+ /* Same but force premature exit if necessary. */
1617
+ #define FLUSH_BLOCK(s, last) { \
1618
+ FLUSH_BLOCK_ONLY(s, last); \
1619
+ if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
1620
+ }
1621
+
1622
+ /* Maximum stored block length in deflate format (not including header). */
1623
+ #define MAX_STORED 65535
1624
+
1625
+ /* Minimum of a and b. */
1626
+ #define MIN(a, b) ((a) > (b) ? (b) : (a))
1627
+
1628
+ /* ===========================================================================
1629
+ * Copy without compression as much as possible from the input stream, return
1630
+ * the current block state.
1631
+ *
1632
+ * In case deflateParams() is used to later switch to a non-zero compression
1633
+ * level, s->matches (otherwise unused when storing) keeps track of the number
1634
+ * of hash table slides to perform. If s->matches is 1, then one hash table
1635
+ * slide will be done when switching. If s->matches is 2, the maximum value
1636
+ * allowed here, then the hash table will be cleared, since two or more slides
1637
+ * is the same as a clear.
1638
+ *
1639
+ * deflate_stored() is written to minimize the number of times an input byte is
1640
+ * copied. It is most efficient with large input and output buffers, which
1641
+ * maximizes the opportunites to have a single copy from next_in to next_out.
1642
+ */
1643
+ local block_state deflate_stored(s, flush)
1644
+ deflate_state *s;
1645
+ int flush;
1646
+ {
1647
+ /* Smallest worthy block size when not flushing or finishing. By default
1648
+ * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
1649
+ * large input and output buffers, the stored block size will be larger.
1650
+ */
1651
+ unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
1652
+
1653
+ /* Copy as many min_block or larger stored blocks directly to next_out as
1654
+ * possible. If flushing, copy the remaining available input to next_out as
1655
+ * stored blocks, if there is enough space.
1656
+ */
1657
+ unsigned len, left, have, last = 0;
1658
+ unsigned used = s->strm->avail_in;
1659
+ do {
1660
+ /* Set len to the maximum size block that we can copy directly with the
1661
+ * available input data and output space. Set left to how much of that
1662
+ * would be copied from what's left in the window.
1663
+ */
1664
+ len = MAX_STORED; /* maximum deflate stored block length */
1665
+ have = (s->bi_valid + 42) >> 3; /* number of header bytes */
1666
+ if (s->strm->avail_out < have) /* need room for header */
1667
+ break;
1668
+ /* maximum stored block length that will fit in avail_out: */
1669
+ have = s->strm->avail_out - have;
1670
+ left = s->strstart - s->block_start; /* bytes left in window */
1671
+ if (len > (ulg)left + s->strm->avail_in)
1672
+ len = left + s->strm->avail_in; /* limit len to the input */
1673
+ if (len > have)
1674
+ len = have; /* limit len to the output */
1675
+
1676
+ /* If the stored block would be less than min_block in length, or if
1677
+ * unable to copy all of the available input when flushing, then try
1678
+ * copying to the window and the pending buffer instead. Also don't
1679
+ * write an empty block when flushing -- deflate() does that.
1680
+ */
1681
+ if (len < min_block && ((len == 0 && flush != Z_FINISH) ||
1682
+ flush == Z_NO_FLUSH ||
1683
+ len != left + s->strm->avail_in))
1684
+ break;
1685
+
1686
+ /* Make a dummy stored block in pending to get the header bytes,
1687
+ * including any pending bits. This also updates the debugging counts.
1688
+ */
1689
+ last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
1690
+ _tr_stored_block(s, (char *)0, 0L, last);
1691
+
1692
+ /* Replace the lengths in the dummy stored block with len. */
1693
+ s->pending_buf[s->pending - 4] = len;
1694
+ s->pending_buf[s->pending - 3] = len >> 8;
1695
+ s->pending_buf[s->pending - 2] = ~len;
1696
+ s->pending_buf[s->pending - 1] = ~len >> 8;
1697
+
1698
+ /* Write the stored block header bytes. */
1699
+ flush_pending(s->strm);
1700
+
1701
+ #ifdef ZLIB_DEBUG
1702
+ /* Update debugging counts for the data about to be copied. */
1703
+ s->compressed_len += len << 3;
1704
+ s->bits_sent += len << 3;
1705
+ #endif
1706
+
1707
+ /* Copy uncompressed bytes from the window to next_out. */
1708
+ if (left) {
1709
+ if (left > len)
1710
+ left = len;
1711
+ zmemcpy(s->strm->next_out, s->window + s->block_start, left);
1712
+ s->strm->next_out += left;
1713
+ s->strm->avail_out -= left;
1714
+ s->strm->total_out += left;
1715
+ s->block_start += left;
1716
+ len -= left;
1717
+ }
1718
+
1719
+ /* Copy uncompressed bytes directly from next_in to next_out, updating
1720
+ * the check value.
1721
+ */
1722
+ if (len) {
1723
+ read_buf(s->strm, s->strm->next_out, len);
1724
+ s->strm->next_out += len;
1725
+ s->strm->avail_out -= len;
1726
+ s->strm->total_out += len;
1727
+ }
1728
+ } while (last == 0);
1729
+
1730
+ /* Update the sliding window with the last s->w_size bytes of the copied
1731
+ * data, or append all of the copied data to the existing window if less
1732
+ * than s->w_size bytes were copied. Also update the number of bytes to
1733
+ * insert in the hash tables, in the event that deflateParams() switches to
1734
+ * a non-zero compression level.
1735
+ */
1736
+ used -= s->strm->avail_in; /* number of input bytes directly copied */
1737
+ if (used) {
1738
+ /* If any input was used, then no unused input remains in the window,
1739
+ * therefore s->block_start == s->strstart.
1740
+ */
1741
+ if (used >= s->w_size) { /* supplant the previous history */
1742
+ s->matches = 2; /* clear hash */
1743
+ zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
1744
+ s->strstart = s->w_size;
1745
+ }
1746
+ else {
1747
+ if (s->window_size - s->strstart <= used) {
1748
+ /* Slide the window down. */
1749
+ s->strstart -= s->w_size;
1750
+ zmemcpy(s->window, s->window + s->w_size, s->strstart);
1751
+ if (s->matches < 2)
1752
+ s->matches++; /* add a pending slide_hash() */
1753
+ }
1754
+ zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
1755
+ s->strstart += used;
1756
+ }
1757
+ s->block_start = s->strstart;
1758
+ s->insert += MIN(used, s->w_size - s->insert);
1759
+ }
1760
+ if (s->high_water < s->strstart)
1761
+ s->high_water = s->strstart;
1762
+
1763
+ /* If the last block was written to next_out, then done. */
1764
+ if (last)
1765
+ return finish_done;
1766
+
1767
+ /* If flushing and all input has been consumed, then done. */
1768
+ if (flush != Z_NO_FLUSH && flush != Z_FINISH &&
1769
+ s->strm->avail_in == 0 && (long)s->strstart == s->block_start)
1770
+ return block_done;
1771
+
1772
+ /* Fill the window with any remaining input. */
1773
+ have = s->window_size - s->strstart - 1;
1774
+ if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
1775
+ /* Slide the window down. */
1776
+ s->block_start -= s->w_size;
1777
+ s->strstart -= s->w_size;
1778
+ zmemcpy(s->window, s->window + s->w_size, s->strstart);
1779
+ if (s->matches < 2)
1780
+ s->matches++; /* add a pending slide_hash() */
1781
+ have += s->w_size; /* more space now */
1782
+ }
1783
+ if (have > s->strm->avail_in)
1784
+ have = s->strm->avail_in;
1785
+ if (have) {
1786
+ read_buf(s->strm, s->window + s->strstart, have);
1787
+ s->strstart += have;
1788
+ }
1789
+ if (s->high_water < s->strstart)
1790
+ s->high_water = s->strstart;
1791
+
1792
+ /* There was not enough avail_out to write a complete worthy or flushed
1793
+ * stored block to next_out. Write a stored block to pending instead, if we
1794
+ * have enough input for a worthy block, or if flushing and there is enough
1795
+ * room for the remaining input as a stored block in the pending buffer.
1796
+ */
1797
+ have = (s->bi_valid + 42) >> 3; /* number of header bytes */
1798
+ /* maximum stored block length that will fit in pending: */
1799
+ have = MIN(s->pending_buf_size - have, MAX_STORED);
1800
+ min_block = MIN(have, s->w_size);
1801
+ left = s->strstart - s->block_start;
1802
+ if (left >= min_block ||
1803
+ ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH &&
1804
+ s->strm->avail_in == 0 && left <= have)) {
1805
+ len = MIN(left, have);
1806
+ last = flush == Z_FINISH && s->strm->avail_in == 0 &&
1807
+ len == left ? 1 : 0;
1808
+ _tr_stored_block(s, (charf *)s->window + s->block_start, len, last);
1809
+ s->block_start += len;
1810
+ flush_pending(s->strm);
1811
+ }
1812
+
1813
+ /* We've done all we can with the available input and output. */
1814
+ return last ? finish_started : need_more;
1815
+ }
1816
+
1817
+ /* ===========================================================================
1818
+ * Compress as much as possible from the input stream, return the current
1819
+ * block state.
1820
+ * This function does not perform lazy evaluation of matches and inserts
1821
+ * new strings in the dictionary only for unmatched strings or for short
1822
+ * matches. It is used only for the fast compression options.
1823
+ */
1824
+ local block_state deflate_fast(s, flush)
1825
+ deflate_state *s;
1826
+ int flush;
1827
+ {
1828
+ IPos hash_head; /* head of the hash chain */
1829
+ int bflush; /* set if current block must be flushed */
1830
+
1831
+ for (;;) {
1832
+ /* Make sure that we always have enough lookahead, except
1833
+ * at the end of the input file. We need MAX_MATCH bytes
1834
+ * for the next match, plus MIN_MATCH bytes to insert the
1835
+ * string following the next match.
1836
+ */
1837
+ if (s->lookahead < MIN_LOOKAHEAD) {
1838
+ fill_window(s);
1839
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1840
+ return need_more;
1841
+ }
1842
+ if (s->lookahead == 0) break; /* flush the current block */
1843
+ }
1844
+
1845
+ /* Insert the string window[strstart .. strstart+2] in the
1846
+ * dictionary, and set hash_head to the head of the hash chain:
1847
+ */
1848
+ hash_head = NIL;
1849
+ if (s->lookahead >= MIN_MATCH) {
1850
+ INSERT_STRING(s, s->strstart, hash_head);
1851
+ }
1852
+
1853
+ /* Find the longest match, discarding those <= prev_length.
1854
+ * At this point we have always match_length < MIN_MATCH
1855
+ */
1856
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1857
+ /* To simplify the code, we prevent matches with the string
1858
+ * of window index 0 (in particular we have to avoid a match
1859
+ * of the string with itself at the start of the input file).
1860
+ */
1861
+ s->match_length = longest_match (s, hash_head);
1862
+ /* longest_match() sets match_start */
1863
+ }
1864
+ if (s->match_length >= MIN_MATCH) {
1865
+ check_match(s, s->strstart, s->match_start, s->match_length);
1866
+
1867
+ _tr_tally_dist(s, s->strstart - s->match_start,
1868
+ s->match_length - MIN_MATCH, bflush);
1869
+
1870
+ s->lookahead -= s->match_length;
1871
+
1872
+ /* Insert new strings in the hash table only if the match length
1873
+ * is not too large. This saves time but degrades compression.
1874
+ */
1875
+ #ifndef FASTEST
1876
+ if (s->match_length <= s->max_insert_length &&
1877
+ s->lookahead >= MIN_MATCH) {
1878
+ s->match_length--; /* string at strstart already in table */
1879
+ do {
1880
+ s->strstart++;
1881
+ INSERT_STRING(s, s->strstart, hash_head);
1882
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1883
+ * always MIN_MATCH bytes ahead.
1884
+ */
1885
+ } while (--s->match_length != 0);
1886
+ s->strstart++;
1887
+ } else
1888
+ #endif
1889
+ {
1890
+ s->strstart += s->match_length;
1891
+ s->match_length = 0;
1892
+ s->ins_h = s->window[s->strstart];
1893
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1894
+ #if MIN_MATCH != 3
1895
+ Call UPDATE_HASH() MIN_MATCH-3 more times
1896
+ #endif
1897
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1898
+ * matter since it will be recomputed at next deflate call.
1899
+ */
1900
+ }
1901
+ } else {
1902
+ /* No match, output a literal byte */
1903
+ Tracevv((stderr,"%c", s->window[s->strstart]));
1904
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
1905
+ s->lookahead--;
1906
+ s->strstart++;
1907
+ }
1908
+ if (bflush) FLUSH_BLOCK(s, 0);
1909
+ }
1910
+ s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1911
+ if (flush == Z_FINISH) {
1912
+ FLUSH_BLOCK(s, 1);
1913
+ return finish_done;
1914
+ }
1915
+ if (s->last_lit)
1916
+ FLUSH_BLOCK(s, 0);
1917
+ return block_done;
1918
+ }
1919
+
1920
+ #ifndef FASTEST
1921
+ /* ===========================================================================
1922
+ * Same as above, but achieves better compression. We use a lazy
1923
+ * evaluation for matches: a match is finally adopted only if there is
1924
+ * no better match at the next window position.
1925
+ */
1926
+ local block_state deflate_slow(s, flush)
1927
+ deflate_state *s;
1928
+ int flush;
1929
+ {
1930
+ IPos hash_head; /* head of hash chain */
1931
+ int bflush; /* set if current block must be flushed */
1932
+
1933
+ /* Process the input block. */
1934
+ for (;;) {
1935
+ /* Make sure that we always have enough lookahead, except
1936
+ * at the end of the input file. We need MAX_MATCH bytes
1937
+ * for the next match, plus MIN_MATCH bytes to insert the
1938
+ * string following the next match.
1939
+ */
1940
+ if (s->lookahead < MIN_LOOKAHEAD) {
1941
+ fill_window(s);
1942
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1943
+ return need_more;
1944
+ }
1945
+ if (s->lookahead == 0) break; /* flush the current block */
1946
+ }
1947
+
1948
+ /* Insert the string window[strstart .. strstart+2] in the
1949
+ * dictionary, and set hash_head to the head of the hash chain:
1950
+ */
1951
+ hash_head = NIL;
1952
+ if (s->lookahead >= MIN_MATCH) {
1953
+ INSERT_STRING(s, s->strstart, hash_head);
1954
+ }
1955
+
1956
+ /* Find the longest match, discarding those <= prev_length.
1957
+ */
1958
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
1959
+ s->match_length = MIN_MATCH-1;
1960
+
1961
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1962
+ s->strstart - hash_head <= MAX_DIST(s)) {
1963
+ /* To simplify the code, we prevent matches with the string
1964
+ * of window index 0 (in particular we have to avoid a match
1965
+ * of the string with itself at the start of the input file).
1966
+ */
1967
+ s->match_length = longest_match (s, hash_head);
1968
+ /* longest_match() sets match_start */
1969
+
1970
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED
1971
+ #if TOO_FAR <= 32767
1972
+ || (s->match_length == MIN_MATCH &&
1973
+ s->strstart - s->match_start > TOO_FAR)
1974
+ #endif
1975
+ )) {
1976
+
1977
+ /* If prev_match is also MIN_MATCH, match_start is garbage
1978
+ * but we will ignore the current match anyway.
1979
+ */
1980
+ s->match_length = MIN_MATCH-1;
1981
+ }
1982
+ }
1983
+ /* If there was a match at the previous step and the current
1984
+ * match is not better, output the previous match:
1985
+ */
1986
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1987
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1988
+ /* Do not insert strings in hash table beyond this. */
1989
+
1990
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
1991
+
1992
+ _tr_tally_dist(s, s->strstart -1 - s->prev_match,
1993
+ s->prev_length - MIN_MATCH, bflush);
1994
+
1995
+ /* Insert in hash table all strings up to the end of the match.
1996
+ * strstart-1 and strstart are already inserted. If there is not
1997
+ * enough lookahead, the last two strings are not inserted in
1998
+ * the hash table.
1999
+ */
2000
+ s->lookahead -= s->prev_length-1;
2001
+ s->prev_length -= 2;
2002
+ do {
2003
+ if (++s->strstart <= max_insert) {
2004
+ INSERT_STRING(s, s->strstart, hash_head);
2005
+ }
2006
+ } while (--s->prev_length != 0);
2007
+ s->match_available = 0;
2008
+ s->match_length = MIN_MATCH-1;
2009
+ s->strstart++;
2010
+
2011
+ if (bflush) FLUSH_BLOCK(s, 0);
2012
+
2013
+ } else if (s->match_available) {
2014
+ /* If there was no match at the previous position, output a
2015
+ * single literal. If there was a match but the current match
2016
+ * is longer, truncate the previous match to a single literal.
2017
+ */
2018
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
2019
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
2020
+ if (bflush) {
2021
+ FLUSH_BLOCK_ONLY(s, 0);
2022
+ }
2023
+ s->strstart++;
2024
+ s->lookahead--;
2025
+ if (s->strm->avail_out == 0) return need_more;
2026
+ } else {
2027
+ /* There is no previous match to compare with, wait for
2028
+ * the next step to decide.
2029
+ */
2030
+ s->match_available = 1;
2031
+ s->strstart++;
2032
+ s->lookahead--;
2033
+ }
2034
+ }
2035
+ Assert (flush != Z_NO_FLUSH, "no flush?");
2036
+ if (s->match_available) {
2037
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
2038
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
2039
+ s->match_available = 0;
2040
+ }
2041
+ s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
2042
+ if (flush == Z_FINISH) {
2043
+ FLUSH_BLOCK(s, 1);
2044
+ return finish_done;
2045
+ }
2046
+ if (s->last_lit)
2047
+ FLUSH_BLOCK(s, 0);
2048
+ return block_done;
2049
+ }
2050
+ #endif /* FASTEST */
2051
+
2052
+ /* ===========================================================================
2053
+ * For Z_RLE, simply look for runs of bytes, generate matches only of distance
2054
+ * one. Do not maintain a hash table. (It will be regenerated if this run of
2055
+ * deflate switches away from Z_RLE.)
2056
+ */
2057
+ local block_state deflate_rle(s, flush)
2058
+ deflate_state *s;
2059
+ int flush;
2060
+ {
2061
+ int bflush; /* set if current block must be flushed */
2062
+ uInt prev; /* byte at distance one to match */
2063
+ Bytef *scan, *strend; /* scan goes up to strend for length of run */
2064
+
2065
+ for (;;) {
2066
+ /* Make sure that we always have enough lookahead, except
2067
+ * at the end of the input file. We need MAX_MATCH bytes
2068
+ * for the longest run, plus one for the unrolled loop.
2069
+ */
2070
+ if (s->lookahead <= MAX_MATCH) {
2071
+ fill_window(s);
2072
+ if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
2073
+ return need_more;
2074
+ }
2075
+ if (s->lookahead == 0) break; /* flush the current block */
2076
+ }
2077
+
2078
+ /* See how many times the previous byte repeats */
2079
+ s->match_length = 0;
2080
+ if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
2081
+ scan = s->window + s->strstart - 1;
2082
+ prev = *scan;
2083
+ if (prev == *++scan && prev == *++scan && prev == *++scan) {
2084
+ strend = s->window + s->strstart + MAX_MATCH;
2085
+ do {
2086
+ } while (prev == *++scan && prev == *++scan &&
2087
+ prev == *++scan && prev == *++scan &&
2088
+ prev == *++scan && prev == *++scan &&
2089
+ prev == *++scan && prev == *++scan &&
2090
+ scan < strend);
2091
+ s->match_length = MAX_MATCH - (uInt)(strend - scan);
2092
+ if (s->match_length > s->lookahead)
2093
+ s->match_length = s->lookahead;
2094
+ }
2095
+ Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
2096
+ }
2097
+
2098
+ /* Emit match if have run of MIN_MATCH or longer, else emit literal */
2099
+ if (s->match_length >= MIN_MATCH) {
2100
+ check_match(s, s->strstart, s->strstart - 1, s->match_length);
2101
+
2102
+ _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
2103
+
2104
+ s->lookahead -= s->match_length;
2105
+ s->strstart += s->match_length;
2106
+ s->match_length = 0;
2107
+ } else {
2108
+ /* No match, output a literal byte */
2109
+ Tracevv((stderr,"%c", s->window[s->strstart]));
2110
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
2111
+ s->lookahead--;
2112
+ s->strstart++;
2113
+ }
2114
+ if (bflush) FLUSH_BLOCK(s, 0);
2115
+ }
2116
+ s->insert = 0;
2117
+ if (flush == Z_FINISH) {
2118
+ FLUSH_BLOCK(s, 1);
2119
+ return finish_done;
2120
+ }
2121
+ if (s->last_lit)
2122
+ FLUSH_BLOCK(s, 0);
2123
+ return block_done;
2124
+ }
2125
+
2126
+ /* ===========================================================================
2127
+ * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
2128
+ * (It will be regenerated if this run of deflate switches away from Huffman.)
2129
+ */
2130
+ local block_state deflate_huff(s, flush)
2131
+ deflate_state *s;
2132
+ int flush;
2133
+ {
2134
+ int bflush; /* set if current block must be flushed */
2135
+
2136
+ for (;;) {
2137
+ /* Make sure that we have a literal to write. */
2138
+ if (s->lookahead == 0) {
2139
+ fill_window(s);
2140
+ if (s->lookahead == 0) {
2141
+ if (flush == Z_NO_FLUSH)
2142
+ return need_more;
2143
+ break; /* flush the current block */
2144
+ }
2145
+ }
2146
+
2147
+ /* Output a literal byte */
2148
+ s->match_length = 0;
2149
+ Tracevv((stderr,"%c", s->window[s->strstart]));
2150
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
2151
+ s->lookahead--;
2152
+ s->strstart++;
2153
+ if (bflush) FLUSH_BLOCK(s, 0);
2154
+ }
2155
+ s->insert = 0;
2156
+ if (flush == Z_FINISH) {
2157
+ FLUSH_BLOCK(s, 1);
2158
+ return finish_done;
2159
+ }
2160
+ if (s->last_lit)
2161
+ FLUSH_BLOCK(s, 0);
2162
+ return block_done;
2163
+ }