zstdlib 0.10.0-x86-mingw32 → 0.12.0-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +17 -0
  3. data/ext/zstdlib_c/extconf.rb +9 -4
  4. data/ext/zstdlib_c/ruby/zlib-3.2/zstdlib.c +5090 -0
  5. data/ext/zstdlib_c/ruby/zlib-3.3/zstdlib.c +5090 -0
  6. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/adler32.c +5 -27
  7. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/compress.c +5 -16
  8. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/crc32.c +94 -161
  9. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/deflate.c +362 -434
  10. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/deflate.h +43 -12
  11. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/gzclose.c +1 -3
  12. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/gzguts.h +13 -18
  13. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/gzlib.c +28 -85
  14. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/gzread.c +23 -73
  15. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/gzwrite.c +19 -65
  16. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/infback.c +17 -30
  17. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inffast.c +1 -4
  18. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inffast.h +1 -1
  19. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inflate.c +36 -102
  20. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inftrees.c +6 -11
  21. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inftrees.h +6 -6
  22. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/trees.c +290 -355
  23. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/uncompr.c +4 -12
  24. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/zconf.h +23 -14
  25. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/zlib.h +202 -199
  26. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/zutil.c +18 -44
  27. data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/zutil.h +13 -33
  28. data/ext/zstdlib_c/zstd-1.5.5/lib/common/allocations.h +55 -0
  29. data/ext/zstdlib_c/zstd-1.5.5/lib/common/bits.h +200 -0
  30. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/bitstream.h +19 -60
  31. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/compiler.h +26 -3
  32. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/cpu.h +1 -1
  33. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/debug.c +1 -1
  34. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/debug.h +1 -1
  35. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/entropy_common.c +12 -40
  36. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/error_private.c +9 -2
  37. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/error_private.h +1 -1
  38. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/fse.h +5 -83
  39. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/fse_decompress.c +7 -99
  40. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/huf.h +65 -156
  41. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/mem.h +39 -46
  42. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/pool.c +26 -10
  43. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/pool.h +7 -1
  44. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/portability_macros.h +22 -3
  45. data/ext/zstdlib_c/zstd-1.5.5/lib/common/threading.c +176 -0
  46. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/threading.h +5 -10
  47. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/xxhash.c +2 -2
  48. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/xxhash.h +8 -8
  49. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_common.c +1 -36
  50. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_deps.h +1 -1
  51. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_internal.h +17 -118
  52. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_trace.h +3 -3
  53. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/clevels.h +1 -1
  54. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/fse_compress.c +7 -124
  55. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/hist.c +1 -1
  56. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/hist.h +1 -1
  57. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/huf_compress.c +234 -169
  58. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress.c +1243 -538
  59. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_internal.h +225 -151
  60. data/ext/zstdlib_c/zstd-1.5.5/lib/compress/zstd_compress_literals.c +235 -0
  61. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_literals.h +16 -8
  62. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_sequences.c +3 -3
  63. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_sequences.h +1 -1
  64. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_superblock.c +25 -21
  65. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_superblock.h +1 -1
  66. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_cwksp.h +128 -62
  67. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_double_fast.c +95 -33
  68. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_double_fast.h +3 -2
  69. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_fast.c +433 -148
  70. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_fast.h +3 -2
  71. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_lazy.c +398 -345
  72. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_lazy.h +4 -2
  73. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_ldm.c +5 -5
  74. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_ldm.h +1 -1
  75. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_ldm_geartab.h +1 -1
  76. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_opt.c +106 -80
  77. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_opt.h +1 -1
  78. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstdmt_compress.c +17 -9
  79. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstdmt_compress.h +1 -1
  80. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/huf_decompress.c +434 -441
  81. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/huf_decompress_amd64.S +30 -39
  82. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_ddict.c +4 -4
  83. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_ddict.h +1 -1
  84. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress.c +205 -80
  85. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress_block.c +201 -81
  86. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress_block.h +6 -1
  87. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress_internal.h +4 -2
  88. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/zdict.h +53 -31
  89. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/zstd.h +580 -135
  90. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/zstd_errors.h +27 -8
  91. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzclose.c +1 -1
  92. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzcompatibility.h +8 -8
  93. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzguts.h +10 -10
  94. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzlib.c +3 -3
  95. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzread.c +10 -10
  96. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzwrite.c +5 -5
  97. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/zstd_zlibwrapper.c +46 -44
  98. data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/zstd_zlibwrapper.h +4 -1
  99. data/lib/2.4/zstdlib_c.so +0 -0
  100. data/lib/2.5/zstdlib_c.so +0 -0
  101. data/lib/2.6/zstdlib_c.so +0 -0
  102. data/lib/2.7/zstdlib_c.so +0 -0
  103. data/lib/3.0/zstdlib_c.so +0 -0
  104. data/lib/3.1/zstdlib_c.so +0 -0
  105. data/lib/3.2/zstdlib_c.so +0 -0
  106. data/lib/3.3/zstdlib_c.so +0 -0
  107. metadata +111 -105
  108. data/ext/zstdlib_c/zstd-1.5.2/lib/common/threading.c +0 -122
  109. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress_literals.c +0 -159
  110. /data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/crc32.h +0 -0
  111. /data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inffixed.h +0 -0
  112. /data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/inflate.h +0 -0
  113. /data/ext/zstdlib_c/{zlib-1.2.12 → zlib-1.3.1}/trees.h +0 -0
@@ -1,5 +1,5 @@
1
1
  /* deflate.c -- compress data using the deflation algorithm
2
- * Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
2
+ * Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler
3
3
  * For conditions of distribution and use, see copyright notice in zlib.h
4
4
  */
5
5
 
@@ -52,7 +52,7 @@
52
52
  #include "deflate.h"
53
53
 
54
54
  const char deflate_copyright[] =
55
- " deflate 1.2.12 Copyright 1995-2022 Jean-loup Gailly and Mark Adler ";
55
+ " deflate 1.3.1 Copyright 1995-2024 Jean-loup Gailly and Mark Adler ";
56
56
  /*
57
57
  If you use the zlib library in a product, an acknowledgment is welcome
58
58
  in the documentation of your product. If for some reason you cannot
@@ -60,9 +60,6 @@ const char deflate_copyright[] =
60
60
  copyright string in the executable of your product.
61
61
  */
62
62
 
63
- /* ===========================================================================
64
- * Function prototypes.
65
- */
66
63
  typedef enum {
67
64
  need_more, /* block not completed, need more input or more output */
68
65
  block_done, /* block flush performed */
@@ -70,35 +67,16 @@ typedef enum {
70
67
  finish_done /* finish done, accept no more input or output */
71
68
  } block_state;
72
69
 
73
- typedef block_state (*compress_func) OF((deflate_state *s, int flush));
70
+ typedef block_state (*compress_func)(deflate_state *s, int flush);
74
71
  /* Compression function. Returns the block state after the call. */
75
72
 
76
- local int deflateStateCheck OF((z_streamp strm));
77
- local void slide_hash OF((deflate_state *s));
78
- local void fill_window OF((deflate_state *s));
79
- local block_state deflate_stored OF((deflate_state *s, int flush));
80
- local block_state deflate_fast OF((deflate_state *s, int flush));
73
+ local block_state deflate_stored(deflate_state *s, int flush);
74
+ local block_state deflate_fast(deflate_state *s, int flush);
81
75
  #ifndef FASTEST
82
- local block_state deflate_slow OF((deflate_state *s, int flush));
83
- #endif
84
- local block_state deflate_rle OF((deflate_state *s, int flush));
85
- local block_state deflate_huff OF((deflate_state *s, int flush));
86
- local void lm_init OF((deflate_state *s));
87
- local void putShortMSB OF((deflate_state *s, uInt b));
88
- local void flush_pending OF((z_streamp strm));
89
- local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
90
- #ifdef ASMV
91
- # pragma message("Assembler code may have bugs -- use at your own risk")
92
- void match_init OF((void)); /* asm code initialization */
93
- uInt longest_match OF((deflate_state *s, IPos cur_match));
94
- #else
95
- local uInt longest_match OF((deflate_state *s, IPos cur_match));
96
- #endif
97
-
98
- #ifdef ZLIB_DEBUG
99
- local void check_match OF((deflate_state *s, IPos start, IPos match,
100
- int length));
76
+ local block_state deflate_slow(deflate_state *s, int flush);
101
77
  #endif
78
+ local block_state deflate_rle(deflate_state *s, int flush);
79
+ local block_state deflate_huff(deflate_state *s, int flush);
102
80
 
103
81
  /* ===========================================================================
104
82
  * Local data
@@ -160,7 +138,7 @@ local const config configuration_table[10] = {
160
138
  * characters, so that a running hash key can be computed from the previous
161
139
  * key instead of complete recalculation each time.
162
140
  */
163
- #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
141
+ #define UPDATE_HASH(s,h,c) (h = (((h) << s->hash_shift) ^ (c)) & s->hash_mask)
164
142
 
165
143
 
166
144
  /* ===========================================================================
@@ -191,9 +169,9 @@ local const config configuration_table[10] = {
191
169
  */
192
170
  #define CLEAR_HASH(s) \
193
171
  do { \
194
- s->head[s->hash_size-1] = NIL; \
172
+ s->head[s->hash_size - 1] = NIL; \
195
173
  zmemzero((Bytef *)s->head, \
196
- (unsigned)(s->hash_size-1)*sizeof(*s->head)); \
174
+ (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
197
175
  } while (0)
198
176
 
199
177
  /* ===========================================================================
@@ -201,9 +179,12 @@ local const config configuration_table[10] = {
201
179
  * bit values at the expense of memory usage). We slide even when level == 0 to
202
180
  * keep the hash table consistent if we switch back to level > 0 later.
203
181
  */
204
- local void slide_hash(s)
205
- deflate_state *s;
206
- {
182
+ #if defined(__has_feature)
183
+ # if __has_feature(memory_sanitizer)
184
+ __attribute__((no_sanitize("memory")))
185
+ # endif
186
+ #endif
187
+ local void slide_hash(deflate_state *s) {
207
188
  unsigned n, m;
208
189
  Posf *p;
209
190
  uInt wsize = s->w_size;
@@ -227,30 +208,177 @@ local void slide_hash(s)
227
208
  #endif
228
209
  }
229
210
 
211
+ /* ===========================================================================
212
+ * Read a new buffer from the current input stream, update the adler32
213
+ * and total number of bytes read. All deflate() input goes through
214
+ * this function so some applications may wish to modify it to avoid
215
+ * allocating a large strm->next_in buffer and copying from it.
216
+ * (See also flush_pending()).
217
+ */
218
+ local unsigned read_buf(z_streamp strm, Bytef *buf, unsigned size) {
219
+ unsigned len = strm->avail_in;
220
+
221
+ if (len > size) len = size;
222
+ if (len == 0) return 0;
223
+
224
+ strm->avail_in -= len;
225
+
226
+ zmemcpy(buf, strm->next_in, len);
227
+ if (strm->state->wrap == 1) {
228
+ strm->adler = adler32(strm->adler, buf, len);
229
+ }
230
+ #ifdef GZIP
231
+ else if (strm->state->wrap == 2) {
232
+ strm->adler = crc32(strm->adler, buf, len);
233
+ }
234
+ #endif
235
+ strm->next_in += len;
236
+ strm->total_in += len;
237
+
238
+ return len;
239
+ }
240
+
241
+ /* ===========================================================================
242
+ * Fill the window when the lookahead becomes insufficient.
243
+ * Updates strstart and lookahead.
244
+ *
245
+ * IN assertion: lookahead < MIN_LOOKAHEAD
246
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
247
+ * At least one byte has been read, or avail_in == 0; reads are
248
+ * performed for at least two bytes (required for the zip translate_eol
249
+ * option -- not supported here).
250
+ */
251
+ local void fill_window(deflate_state *s) {
252
+ unsigned n;
253
+ unsigned more; /* Amount of free space at the end of the window. */
254
+ uInt wsize = s->w_size;
255
+
256
+ Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
257
+
258
+ do {
259
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
260
+
261
+ /* Deal with !@#$% 64K limit: */
262
+ if (sizeof(int) <= 2) {
263
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
264
+ more = wsize;
265
+
266
+ } else if (more == (unsigned)(-1)) {
267
+ /* Very unlikely, but possible on 16 bit machine if
268
+ * strstart == 0 && lookahead == 1 (input done a byte at time)
269
+ */
270
+ more--;
271
+ }
272
+ }
273
+
274
+ /* If the window is almost full and there is insufficient lookahead,
275
+ * move the upper half to the lower one to make room in the upper half.
276
+ */
277
+ if (s->strstart >= wsize + MAX_DIST(s)) {
278
+
279
+ zmemcpy(s->window, s->window + wsize, (unsigned)wsize - more);
280
+ s->match_start -= wsize;
281
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
282
+ s->block_start -= (long) wsize;
283
+ if (s->insert > s->strstart)
284
+ s->insert = s->strstart;
285
+ slide_hash(s);
286
+ more += wsize;
287
+ }
288
+ if (s->strm->avail_in == 0) break;
289
+
290
+ /* If there was no sliding:
291
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
292
+ * more == window_size - lookahead - strstart
293
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
294
+ * => more >= window_size - 2*WSIZE + 2
295
+ * In the BIG_MEM or MMAP case (not yet supported),
296
+ * window_size == input_size + MIN_LOOKAHEAD &&
297
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
298
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
299
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
300
+ */
301
+ Assert(more >= 2, "more < 2");
302
+
303
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
304
+ s->lookahead += n;
305
+
306
+ /* Initialize the hash value now that we have some input: */
307
+ if (s->lookahead + s->insert >= MIN_MATCH) {
308
+ uInt str = s->strstart - s->insert;
309
+ s->ins_h = s->window[str];
310
+ UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
311
+ #if MIN_MATCH != 3
312
+ Call UPDATE_HASH() MIN_MATCH-3 more times
313
+ #endif
314
+ while (s->insert) {
315
+ UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
316
+ #ifndef FASTEST
317
+ s->prev[str & s->w_mask] = s->head[s->ins_h];
318
+ #endif
319
+ s->head[s->ins_h] = (Pos)str;
320
+ str++;
321
+ s->insert--;
322
+ if (s->lookahead + s->insert < MIN_MATCH)
323
+ break;
324
+ }
325
+ }
326
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
327
+ * but this is not important since only literal bytes will be emitted.
328
+ */
329
+
330
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
331
+
332
+ /* If the WIN_INIT bytes after the end of the current data have never been
333
+ * written, then zero those bytes in order to avoid memory check reports of
334
+ * the use of uninitialized (or uninitialised as Julian writes) bytes by
335
+ * the longest match routines. Update the high water mark for the next
336
+ * time through here. WIN_INIT is set to MAX_MATCH since the longest match
337
+ * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
338
+ */
339
+ if (s->high_water < s->window_size) {
340
+ ulg curr = s->strstart + (ulg)(s->lookahead);
341
+ ulg init;
342
+
343
+ if (s->high_water < curr) {
344
+ /* Previous high water mark below current data -- zero WIN_INIT
345
+ * bytes or up to end of window, whichever is less.
346
+ */
347
+ init = s->window_size - curr;
348
+ if (init > WIN_INIT)
349
+ init = WIN_INIT;
350
+ zmemzero(s->window + curr, (unsigned)init);
351
+ s->high_water = curr + init;
352
+ }
353
+ else if (s->high_water < (ulg)curr + WIN_INIT) {
354
+ /* High water mark at or above current data, but below current data
355
+ * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
356
+ * to end of window, whichever is less.
357
+ */
358
+ init = (ulg)curr + WIN_INIT - s->high_water;
359
+ if (init > s->window_size - s->high_water)
360
+ init = s->window_size - s->high_water;
361
+ zmemzero(s->window + s->high_water, (unsigned)init);
362
+ s->high_water += init;
363
+ }
364
+ }
365
+
366
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
367
+ "not enough room for search");
368
+ }
369
+
230
370
  /* ========================================================================= */
231
- int ZEXPORT deflateInit_(strm, level, version, stream_size)
232
- z_streamp strm;
233
- int level;
234
- const char *version;
235
- int stream_size;
236
- {
371
+ int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version,
372
+ int stream_size) {
237
373
  return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
238
374
  Z_DEFAULT_STRATEGY, version, stream_size);
239
375
  /* To do: ignore strm->next_in if we use it as window */
240
376
  }
241
377
 
242
378
  /* ========================================================================= */
243
- int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
244
- version, stream_size)
245
- z_streamp strm;
246
- int level;
247
- int method;
248
- int windowBits;
249
- int memLevel;
250
- int strategy;
251
- const char *version;
252
- int stream_size;
253
- {
379
+ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method,
380
+ int windowBits, int memLevel, int strategy,
381
+ const char *version, int stream_size) {
254
382
  deflate_state *s;
255
383
  int wrap = 1;
256
384
  static const char my_version[] = ZLIB_VERSION;
@@ -285,6 +413,8 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
285
413
 
286
414
  if (windowBits < 0) { /* suppress zlib wrapper */
287
415
  wrap = 0;
416
+ if (windowBits < -15)
417
+ return Z_STREAM_ERROR;
288
418
  windowBits = -windowBits;
289
419
  }
290
420
  #ifdef GZIP
@@ -314,7 +444,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
314
444
  s->hash_bits = (uInt)memLevel + 7;
315
445
  s->hash_size = 1 << s->hash_bits;
316
446
  s->hash_mask = s->hash_size - 1;
317
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
447
+ s->hash_shift = ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH);
318
448
 
319
449
  s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
320
450
  s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
@@ -340,11 +470,11 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
340
470
  * sym_buf value to read moves forward three bytes. From that symbol, up to
341
471
  * 31 bits are written to pending_buf. The closest the written pending_buf
342
472
  * bits gets to the next sym_buf symbol to read is just before the last
343
- * code is written. At that time, 31*(n-2) bits have been written, just
344
- * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
345
- * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
473
+ * code is written. At that time, 31*(n - 2) bits have been written, just
474
+ * after 24*(n - 2) bits have been consumed from sym_buf. sym_buf starts at
475
+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n - 1
346
476
  * symbols are written.) The closest the writing gets to what is unread is
347
- * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
477
+ * then n + 14 bits. Here n is lit_bufsize, which is 16384 by default, and
348
478
  * can range from 128 to 32768.
349
479
  *
350
480
  * Therefore, at a minimum, there are 142 bits of space between what is
@@ -363,7 +493,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
363
493
  * symbols from which it is being constructed.
364
494
  */
365
495
 
366
- s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
496
+ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, LIT_BUFS);
367
497
  s->pending_buf_size = (ulg)s->lit_bufsize * 4;
368
498
 
369
499
  if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
@@ -373,8 +503,14 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
373
503
  deflateEnd (strm);
374
504
  return Z_MEM_ERROR;
375
505
  }
506
+ #ifdef LIT_MEM
507
+ s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1));
508
+ s->l_buf = s->pending_buf + (s->lit_bufsize << 2);
509
+ s->sym_end = s->lit_bufsize - 1;
510
+ #else
376
511
  s->sym_buf = s->pending_buf + s->lit_bufsize;
377
512
  s->sym_end = (s->lit_bufsize - 1) * 3;
513
+ #endif
378
514
  /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
379
515
  * on 16 bit machines and because stored blocks are restricted to
380
516
  * 64K-1 bytes.
@@ -390,9 +526,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
390
526
  /* =========================================================================
391
527
  * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
392
528
  */
393
- local int deflateStateCheck (strm)
394
- z_streamp strm;
395
- {
529
+ local int deflateStateCheck(z_streamp strm) {
396
530
  deflate_state *s;
397
531
  if (strm == Z_NULL ||
398
532
  strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
@@ -413,11 +547,8 @@ local int deflateStateCheck (strm)
413
547
  }
414
548
 
415
549
  /* ========================================================================= */
416
- int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
417
- z_streamp strm;
418
- const Bytef *dictionary;
419
- uInt dictLength;
420
- {
550
+ int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary,
551
+ uInt dictLength) {
421
552
  deflate_state *s;
422
553
  uInt str, n;
423
554
  int wrap;
@@ -482,11 +613,8 @@ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
482
613
  }
483
614
 
484
615
  /* ========================================================================= */
485
- int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
486
- z_streamp strm;
487
- Bytef *dictionary;
488
- uInt *dictLength;
489
- {
616
+ int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary,
617
+ uInt *dictLength) {
490
618
  deflate_state *s;
491
619
  uInt len;
492
620
 
@@ -504,9 +632,7 @@ int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
504
632
  }
505
633
 
506
634
  /* ========================================================================= */
507
- int ZEXPORT deflateResetKeep (strm)
508
- z_streamp strm;
509
- {
635
+ int ZEXPORT deflateResetKeep(z_streamp strm) {
510
636
  deflate_state *s;
511
637
 
512
638
  if (deflateStateCheck(strm)) {
@@ -541,10 +667,32 @@ int ZEXPORT deflateResetKeep (strm)
541
667
  return Z_OK;
542
668
  }
543
669
 
670
+ /* ===========================================================================
671
+ * Initialize the "longest match" routines for a new zlib stream
672
+ */
673
+ local void lm_init(deflate_state *s) {
674
+ s->window_size = (ulg)2L*s->w_size;
675
+
676
+ CLEAR_HASH(s);
677
+
678
+ /* Set the default configuration parameters:
679
+ */
680
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
681
+ s->good_match = configuration_table[s->level].good_length;
682
+ s->nice_match = configuration_table[s->level].nice_length;
683
+ s->max_chain_length = configuration_table[s->level].max_chain;
684
+
685
+ s->strstart = 0;
686
+ s->block_start = 0L;
687
+ s->lookahead = 0;
688
+ s->insert = 0;
689
+ s->match_length = s->prev_length = MIN_MATCH-1;
690
+ s->match_available = 0;
691
+ s->ins_h = 0;
692
+ }
693
+
544
694
  /* ========================================================================= */
545
- int ZEXPORT deflateReset (strm)
546
- z_streamp strm;
547
- {
695
+ int ZEXPORT deflateReset(z_streamp strm) {
548
696
  int ret;
549
697
 
550
698
  ret = deflateResetKeep(strm);
@@ -554,10 +702,7 @@ int ZEXPORT deflateReset (strm)
554
702
  }
555
703
 
556
704
  /* ========================================================================= */
557
- int ZEXPORT deflateSetHeader (strm, head)
558
- z_streamp strm;
559
- gz_headerp head;
560
- {
705
+ int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head) {
561
706
  if (deflateStateCheck(strm) || strm->state->wrap != 2)
562
707
  return Z_STREAM_ERROR;
563
708
  strm->state->gzhead = head;
@@ -565,11 +710,7 @@ int ZEXPORT deflateSetHeader (strm, head)
565
710
  }
566
711
 
567
712
  /* ========================================================================= */
568
- int ZEXPORT deflatePending (strm, pending, bits)
569
- unsigned *pending;
570
- int *bits;
571
- z_streamp strm;
572
- {
713
+ int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) {
573
714
  if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
574
715
  if (pending != Z_NULL)
575
716
  *pending = strm->state->pending;
@@ -579,19 +720,21 @@ int ZEXPORT deflatePending (strm, pending, bits)
579
720
  }
580
721
 
581
722
  /* ========================================================================= */
582
- int ZEXPORT deflatePrime (strm, bits, value)
583
- z_streamp strm;
584
- int bits;
585
- int value;
586
- {
723
+ int ZEXPORT deflatePrime(z_streamp strm, int bits, int value) {
587
724
  deflate_state *s;
588
725
  int put;
589
726
 
590
727
  if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
591
728
  s = strm->state;
729
+ #ifdef LIT_MEM
730
+ if (bits < 0 || bits > 16 ||
731
+ (uchf *)s->d_buf < s->pending_out + ((Buf_size + 7) >> 3))
732
+ return Z_BUF_ERROR;
733
+ #else
592
734
  if (bits < 0 || bits > 16 ||
593
735
  s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
594
736
  return Z_BUF_ERROR;
737
+ #endif
595
738
  do {
596
739
  put = Buf_size - s->bi_valid;
597
740
  if (put > bits)
@@ -606,11 +749,7 @@ int ZEXPORT deflatePrime (strm, bits, value)
606
749
  }
607
750
 
608
751
  /* ========================================================================= */
609
- int ZEXPORT deflateParams(strm, level, strategy)
610
- z_streamp strm;
611
- int level;
612
- int strategy;
613
- {
752
+ int ZEXPORT deflateParams(z_streamp strm, int level, int strategy) {
614
753
  deflate_state *s;
615
754
  compress_func func;
616
755
 
@@ -655,13 +794,8 @@ int ZEXPORT deflateParams(strm, level, strategy)
655
794
  }
656
795
 
657
796
  /* ========================================================================= */
658
- int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
659
- z_streamp strm;
660
- int good_length;
661
- int max_lazy;
662
- int nice_length;
663
- int max_chain;
664
- {
797
+ int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy,
798
+ int nice_length, int max_chain) {
665
799
  deflate_state *s;
666
800
 
667
801
  if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
@@ -674,36 +808,47 @@ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
674
808
  }
675
809
 
676
810
  /* =========================================================================
677
- * For the default windowBits of 15 and memLevel of 8, this function returns
678
- * a close to exact, as well as small, upper bound on the compressed size.
679
- * They are coded as constants here for a reason--if the #define's are
680
- * changed, then this function needs to be changed as well. The return
681
- * value for 15 and 8 only works for those exact settings.
811
+ * For the default windowBits of 15 and memLevel of 8, this function returns a
812
+ * close to exact, as well as small, upper bound on the compressed size. This
813
+ * is an expansion of ~0.03%, plus a small constant.
814
+ *
815
+ * For any setting other than those defaults for windowBits and memLevel, one
816
+ * of two worst case bounds is returned. This is at most an expansion of ~4% or
817
+ * ~13%, plus a small constant.
682
818
  *
683
- * For any setting other than those defaults for windowBits and memLevel,
684
- * the value returned is a conservative worst case for the maximum expansion
685
- * resulting from using fixed blocks instead of stored blocks, which deflate
686
- * can emit on compressed data for some combinations of the parameters.
819
+ * Both the 0.03% and 4% derive from the overhead of stored blocks. The first
820
+ * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second
821
+ * is for stored blocks of 127 bytes (the worst case memLevel == 1). The
822
+ * expansion results from five bytes of header for each stored block.
687
823
  *
688
- * This function could be more sophisticated to provide closer upper bounds for
689
- * every combination of windowBits and memLevel. But even the conservative
690
- * upper bound of about 14% expansion does not seem onerous for output buffer
691
- * allocation.
824
+ * The larger expansion of 13% results from a window size less than or equal to
825
+ * the symbols buffer size (windowBits <= memLevel + 7). In that case some of
826
+ * the data being compressed may have slid out of the sliding window, impeding
827
+ * a stored block from being emitted. Then the only choice is a fixed or
828
+ * dynamic block, where a fixed block limits the maximum expansion to 9 bits
829
+ * per 8-bit byte, plus 10 bits for every block. The smallest block size for
830
+ * which this can occur is 255 (memLevel == 2).
831
+ *
832
+ * Shifts are used to approximate divisions, for speed.
692
833
  */
693
- uLong ZEXPORT deflateBound(strm, sourceLen)
694
- z_streamp strm;
695
- uLong sourceLen;
696
- {
834
+ uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
697
835
  deflate_state *s;
698
- uLong complen, wraplen;
836
+ uLong fixedlen, storelen, wraplen;
837
+
838
+ /* upper bound for fixed blocks with 9-bit literals and length 255
839
+ (memLevel == 2, which is the lowest that may not use stored blocks) --
840
+ ~13% overhead plus a small constant */
841
+ fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) +
842
+ (sourceLen >> 9) + 4;
699
843
 
700
- /* conservative upper bound for compressed data */
701
- complen = sourceLen +
702
- ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
844
+ /* upper bound for stored blocks with length 127 (memLevel == 1) --
845
+ ~4% overhead plus a small constant */
846
+ storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) +
847
+ (sourceLen >> 11) + 7;
703
848
 
704
- /* if can't get parameters, return conservative bound plus zlib wrapper */
849
+ /* if can't get parameters, return larger bound plus a zlib wrapper */
705
850
  if (deflateStateCheck(strm))
706
- return complen + 6;
851
+ return (fixedlen > storelen ? fixedlen : storelen) + 6;
707
852
 
708
853
  /* compute wrapper length */
709
854
  s = strm->state;
@@ -740,11 +885,13 @@ uLong ZEXPORT deflateBound(strm, sourceLen)
740
885
  wraplen = 6;
741
886
  }
742
887
 
743
- /* if not default parameters, return conservative bound */
888
+ /* if not default parameters, return one of the conservative bounds */
744
889
  if (s->w_bits != 15 || s->hash_bits != 8 + 7)
745
- return complen + wraplen;
890
+ return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) +
891
+ wraplen;
746
892
 
747
- /* default settings: return tight bound for that case */
893
+ /* default settings: return tight bound for that case -- ~0.03% overhead
894
+ plus a small constant */
748
895
  return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
749
896
  (sourceLen >> 25) + 13 - 6 + wraplen;
750
897
  }
@@ -754,10 +901,7 @@ uLong ZEXPORT deflateBound(strm, sourceLen)
754
901
  * IN assertion: the stream state is correct and there is enough room in
755
902
  * pending_buf.
756
903
  */
757
- local void putShortMSB (s, b)
758
- deflate_state *s;
759
- uInt b;
760
- {
904
+ local void putShortMSB(deflate_state *s, uInt b) {
761
905
  put_byte(s, (Byte)(b >> 8));
762
906
  put_byte(s, (Byte)(b & 0xff));
763
907
  }
@@ -768,9 +912,7 @@ local void putShortMSB (s, b)
768
912
  * applications may wish to modify it to avoid allocating a large
769
913
  * strm->next_out buffer and copying into it. (See also read_buf()).
770
914
  */
771
- local void flush_pending(strm)
772
- z_streamp strm;
773
- {
915
+ local void flush_pending(z_streamp strm) {
774
916
  unsigned len;
775
917
  deflate_state *s = strm->state;
776
918
 
@@ -801,10 +943,7 @@ local void flush_pending(strm)
801
943
  } while (0)
802
944
 
803
945
  /* ========================================================================= */
804
- int ZEXPORT deflate (strm, flush)
805
- z_streamp strm;
806
- int flush;
807
- {
946
+ int ZEXPORT deflate(z_streamp strm, int flush) {
808
947
  int old_flush; /* value of flush param for previous deflate call */
809
948
  deflate_state *s;
810
949
 
@@ -856,7 +995,7 @@ int ZEXPORT deflate (strm, flush)
856
995
  s->status = BUSY_STATE;
857
996
  if (s->status == INIT_STATE) {
858
997
  /* zlib header */
859
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
998
+ uInt header = (Z_DEFLATED + ((s->w_bits - 8) << 4)) << 8;
860
999
  uInt level_flags;
861
1000
 
862
1001
  if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
@@ -1116,9 +1255,7 @@ int ZEXPORT deflate (strm, flush)
1116
1255
  }
1117
1256
 
1118
1257
  /* ========================================================================= */
1119
- int ZEXPORT deflateEnd (strm)
1120
- z_streamp strm;
1121
- {
1258
+ int ZEXPORT deflateEnd(z_streamp strm) {
1122
1259
  int status;
1123
1260
 
1124
1261
  if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
@@ -1142,11 +1279,10 @@ int ZEXPORT deflateEnd (strm)
1142
1279
  * To simplify the source, this is not supported for 16-bit MSDOS (which
1143
1280
  * doesn't have enough memory anyway to duplicate compression states).
1144
1281
  */
1145
- int ZEXPORT deflateCopy (dest, source)
1146
- z_streamp dest;
1147
- z_streamp source;
1148
- {
1282
+ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
1149
1283
  #ifdef MAXSEG_64K
1284
+ (void)dest;
1285
+ (void)source;
1150
1286
  return Z_STREAM_ERROR;
1151
1287
  #else
1152
1288
  deflate_state *ds;
@@ -1170,7 +1306,7 @@ int ZEXPORT deflateCopy (dest, source)
1170
1306
  ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1171
1307
  ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1172
1308
  ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1173
- ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
1309
+ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, LIT_BUFS);
1174
1310
 
1175
1311
  if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1176
1312
  ds->pending_buf == Z_NULL) {
@@ -1181,10 +1317,15 @@ int ZEXPORT deflateCopy (dest, source)
1181
1317
  zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1182
1318
  zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
1183
1319
  zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
1184
- zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1320
+ zmemcpy(ds->pending_buf, ss->pending_buf, ds->lit_bufsize * LIT_BUFS);
1185
1321
 
1186
1322
  ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1323
+ #ifdef LIT_MEM
1324
+ ds->d_buf = (ushf *)(ds->pending_buf + (ds->lit_bufsize << 1));
1325
+ ds->l_buf = ds->pending_buf + (ds->lit_bufsize << 2);
1326
+ #else
1187
1327
  ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
1328
+ #endif
1188
1329
 
1189
1330
  ds->l_desc.dyn_tree = ds->dyn_ltree;
1190
1331
  ds->d_desc.dyn_tree = ds->dyn_dtree;
@@ -1194,71 +1335,6 @@ int ZEXPORT deflateCopy (dest, source)
1194
1335
  #endif /* MAXSEG_64K */
1195
1336
  }
1196
1337
 
1197
- /* ===========================================================================
1198
- * Read a new buffer from the current input stream, update the adler32
1199
- * and total number of bytes read. All deflate() input goes through
1200
- * this function so some applications may wish to modify it to avoid
1201
- * allocating a large strm->next_in buffer and copying from it.
1202
- * (See also flush_pending()).
1203
- */
1204
- local unsigned read_buf(strm, buf, size)
1205
- z_streamp strm;
1206
- Bytef *buf;
1207
- unsigned size;
1208
- {
1209
- unsigned len = strm->avail_in;
1210
-
1211
- if (len > size) len = size;
1212
- if (len == 0) return 0;
1213
-
1214
- strm->avail_in -= len;
1215
-
1216
- zmemcpy(buf, strm->next_in, len);
1217
- if (strm->state->wrap == 1) {
1218
- strm->adler = adler32(strm->adler, buf, len);
1219
- }
1220
- #ifdef GZIP
1221
- else if (strm->state->wrap == 2) {
1222
- strm->adler = crc32(strm->adler, buf, len);
1223
- }
1224
- #endif
1225
- strm->next_in += len;
1226
- strm->total_in += len;
1227
-
1228
- return len;
1229
- }
1230
-
1231
- /* ===========================================================================
1232
- * Initialize the "longest match" routines for a new zlib stream
1233
- */
1234
- local void lm_init (s)
1235
- deflate_state *s;
1236
- {
1237
- s->window_size = (ulg)2L*s->w_size;
1238
-
1239
- CLEAR_HASH(s);
1240
-
1241
- /* Set the default configuration parameters:
1242
- */
1243
- s->max_lazy_match = configuration_table[s->level].max_lazy;
1244
- s->good_match = configuration_table[s->level].good_length;
1245
- s->nice_match = configuration_table[s->level].nice_length;
1246
- s->max_chain_length = configuration_table[s->level].max_chain;
1247
-
1248
- s->strstart = 0;
1249
- s->block_start = 0L;
1250
- s->lookahead = 0;
1251
- s->insert = 0;
1252
- s->match_length = s->prev_length = MIN_MATCH-1;
1253
- s->match_available = 0;
1254
- s->ins_h = 0;
1255
- #ifndef FASTEST
1256
- #ifdef ASMV
1257
- match_init(); /* initialize the asm code */
1258
- #endif
1259
- #endif
1260
- }
1261
-
1262
1338
  #ifndef FASTEST
1263
1339
  /* ===========================================================================
1264
1340
  * Set match_start to the longest match starting at the given string and
@@ -1269,14 +1345,7 @@ local void lm_init (s)
1269
1345
  * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1270
1346
  * OUT assertion: the match length is not greater than s->lookahead.
1271
1347
  */
1272
- #ifndef ASMV
1273
- /* For 80x86 and 680x0, an optimized version will be provided in match.asm or
1274
- * match.S. The code will be functionally equivalent.
1275
- */
1276
- local uInt longest_match(s, cur_match)
1277
- deflate_state *s;
1278
- IPos cur_match; /* current match */
1279
- {
1348
+ local uInt longest_match(deflate_state *s, IPos cur_match) {
1280
1349
  unsigned chain_length = s->max_chain_length;/* max hash chain length */
1281
1350
  register Bytef *scan = s->window + s->strstart; /* current string */
1282
1351
  register Bytef *match; /* matched string */
@@ -1297,10 +1366,10 @@ local uInt longest_match(s, cur_match)
1297
1366
  */
1298
1367
  register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1299
1368
  register ush scan_start = *(ushf*)scan;
1300
- register ush scan_end = *(ushf*)(scan+best_len-1);
1369
+ register ush scan_end = *(ushf*)(scan + best_len - 1);
1301
1370
  #else
1302
1371
  register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1303
- register Byte scan_end1 = scan[best_len-1];
1372
+ register Byte scan_end1 = scan[best_len - 1];
1304
1373
  register Byte scan_end = scan[best_len];
1305
1374
  #endif
1306
1375
 
@@ -1318,7 +1387,8 @@ local uInt longest_match(s, cur_match)
1318
1387
  */
1319
1388
  if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
1320
1389
 
1321
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1390
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1391
+ "need lookahead");
1322
1392
 
1323
1393
  do {
1324
1394
  Assert(cur_match < s->strstart, "no future");
@@ -1336,43 +1406,44 @@ local uInt longest_match(s, cur_match)
1336
1406
  /* This code assumes sizeof(unsigned short) == 2. Do not use
1337
1407
  * UNALIGNED_OK if your compiler uses a different size.
1338
1408
  */
1339
- if (*(ushf*)(match+best_len-1) != scan_end ||
1409
+ if (*(ushf*)(match + best_len - 1) != scan_end ||
1340
1410
  *(ushf*)match != scan_start) continue;
1341
1411
 
1342
1412
  /* It is not necessary to compare scan[2] and match[2] since they are
1343
1413
  * always equal when the other bytes match, given that the hash keys
1344
1414
  * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1345
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
1415
+ * strstart + 3, + 5, up to strstart + 257. We check for insufficient
1346
1416
  * lookahead only every 4th comparison; the 128th check will be made
1347
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
1417
+ * at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is
1348
1418
  * necessary to put more guard bytes at the end of the window, or
1349
1419
  * to check more often for insufficient lookahead.
1350
1420
  */
1351
1421
  Assert(scan[2] == match[2], "scan[2]?");
1352
1422
  scan++, match++;
1353
1423
  do {
1354
- } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1355
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1356
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1357
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1424
+ } while (*(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1425
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1426
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1427
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1358
1428
  scan < strend);
1359
1429
  /* The funny "do {}" generates better code on most compilers */
1360
1430
 
1361
- /* Here, scan <= window+strstart+257 */
1362
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1431
+ /* Here, scan <= window + strstart + 257 */
1432
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1),
1433
+ "wild scan");
1363
1434
  if (*scan == *match) scan++;
1364
1435
 
1365
- len = (MAX_MATCH - 1) - (int)(strend-scan);
1436
+ len = (MAX_MATCH - 1) - (int)(strend - scan);
1366
1437
  scan = strend - (MAX_MATCH-1);
1367
1438
 
1368
1439
  #else /* UNALIGNED_OK */
1369
1440
 
1370
- if (match[best_len] != scan_end ||
1371
- match[best_len-1] != scan_end1 ||
1372
- *match != *scan ||
1373
- *++match != scan[1]) continue;
1441
+ if (match[best_len] != scan_end ||
1442
+ match[best_len - 1] != scan_end1 ||
1443
+ *match != *scan ||
1444
+ *++match != scan[1]) continue;
1374
1445
 
1375
- /* The check at best_len-1 can be removed because it will be made
1446
+ /* The check at best_len - 1 can be removed because it will be made
1376
1447
  * again later. (This heuristic is not always a win.)
1377
1448
  * It is not necessary to compare scan[2] and match[2] since they
1378
1449
  * are always equal when the other bytes match, given that
@@ -1382,7 +1453,7 @@ local uInt longest_match(s, cur_match)
1382
1453
  Assert(*scan == *match, "match[2]?");
1383
1454
 
1384
1455
  /* We check for insufficient lookahead only every 8th comparison;
1385
- * the 256th check will be made at strstart+258.
1456
+ * the 256th check will be made at strstart + 258.
1386
1457
  */
1387
1458
  do {
1388
1459
  } while (*++scan == *++match && *++scan == *++match &&
@@ -1391,7 +1462,8 @@ local uInt longest_match(s, cur_match)
1391
1462
  *++scan == *++match && *++scan == *++match &&
1392
1463
  scan < strend);
1393
1464
 
1394
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1465
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1),
1466
+ "wild scan");
1395
1467
 
1396
1468
  len = MAX_MATCH - (int)(strend - scan);
1397
1469
  scan = strend - MAX_MATCH;
@@ -1403,9 +1475,9 @@ local uInt longest_match(s, cur_match)
1403
1475
  best_len = len;
1404
1476
  if (len >= nice_match) break;
1405
1477
  #ifdef UNALIGNED_OK
1406
- scan_end = *(ushf*)(scan+best_len-1);
1478
+ scan_end = *(ushf*)(scan + best_len - 1);
1407
1479
  #else
1408
- scan_end1 = scan[best_len-1];
1480
+ scan_end1 = scan[best_len - 1];
1409
1481
  scan_end = scan[best_len];
1410
1482
  #endif
1411
1483
  }
@@ -1415,17 +1487,13 @@ local uInt longest_match(s, cur_match)
1415
1487
  if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1416
1488
  return s->lookahead;
1417
1489
  }
1418
- #endif /* ASMV */
1419
1490
 
1420
1491
  #else /* FASTEST */
1421
1492
 
1422
1493
  /* ---------------------------------------------------------------------------
1423
1494
  * Optimized version for FASTEST only
1424
1495
  */
1425
- local uInt longest_match(s, cur_match)
1426
- deflate_state *s;
1427
- IPos cur_match; /* current match */
1428
- {
1496
+ local uInt longest_match(deflate_state *s, IPos cur_match) {
1429
1497
  register Bytef *scan = s->window + s->strstart; /* current string */
1430
1498
  register Bytef *match; /* matched string */
1431
1499
  register int len; /* length of current match */
@@ -1436,7 +1504,8 @@ local uInt longest_match(s, cur_match)
1436
1504
  */
1437
1505
  Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1438
1506
 
1439
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1507
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1508
+ "need lookahead");
1440
1509
 
1441
1510
  Assert(cur_match < s->strstart, "no future");
1442
1511
 
@@ -1446,7 +1515,7 @@ local uInt longest_match(s, cur_match)
1446
1515
  */
1447
1516
  if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1448
1517
 
1449
- /* The check at best_len-1 can be removed because it will be made
1518
+ /* The check at best_len - 1 can be removed because it will be made
1450
1519
  * again later. (This heuristic is not always a win.)
1451
1520
  * It is not necessary to compare scan[2] and match[2] since they
1452
1521
  * are always equal when the other bytes match, given that
@@ -1456,7 +1525,7 @@ local uInt longest_match(s, cur_match)
1456
1525
  Assert(*scan == *match, "match[2]?");
1457
1526
 
1458
1527
  /* We check for insufficient lookahead only every 8th comparison;
1459
- * the 256th check will be made at strstart+258.
1528
+ * the 256th check will be made at strstart + 258.
1460
1529
  */
1461
1530
  do {
1462
1531
  } while (*++scan == *++match && *++scan == *++match &&
@@ -1465,7 +1534,7 @@ local uInt longest_match(s, cur_match)
1465
1534
  *++scan == *++match && *++scan == *++match &&
1466
1535
  scan < strend);
1467
1536
 
1468
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1537
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan");
1469
1538
 
1470
1539
  len = MAX_MATCH - (int)(strend - scan);
1471
1540
 
@@ -1485,23 +1554,27 @@ local uInt longest_match(s, cur_match)
1485
1554
  /* ===========================================================================
1486
1555
  * Check that the match at match_start is indeed a match.
1487
1556
  */
1488
- local void check_match(s, start, match, length)
1489
- deflate_state *s;
1490
- IPos start, match;
1491
- int length;
1492
- {
1557
+ local void check_match(deflate_state *s, IPos start, IPos match, int length) {
1493
1558
  /* check that the match is indeed a match */
1494
- if (zmemcmp(s->window + match,
1495
- s->window + start, length) != EQUAL) {
1496
- fprintf(stderr, " start %u, match %u, length %d\n",
1497
- start, match, length);
1559
+ Bytef *back = s->window + (int)match, *here = s->window + start;
1560
+ IPos len = length;
1561
+ if (match == (IPos)-1) {
1562
+ /* match starts one byte before the current window -- just compare the
1563
+ subsequent length-1 bytes */
1564
+ back++;
1565
+ here++;
1566
+ len--;
1567
+ }
1568
+ if (zmemcmp(back, here, len) != EQUAL) {
1569
+ fprintf(stderr, " start %u, match %d, length %d\n",
1570
+ start, (int)match, length);
1498
1571
  do {
1499
- fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1500
- } while (--length != 0);
1572
+ fprintf(stderr, "(%02x %02x)", *back++, *here++);
1573
+ } while (--len != 0);
1501
1574
  z_error("invalid match");
1502
1575
  }
1503
1576
  if (z_verbose > 1) {
1504
- fprintf(stderr,"\\[%d,%d]", start-match, length);
1577
+ fprintf(stderr,"\\[%d,%d]", start - match, length);
1505
1578
  do { putc(s->window[start++], stderr); } while (--length != 0);
1506
1579
  }
1507
1580
  }
@@ -1509,137 +1582,6 @@ local void check_match(s, start, match, length)
1509
1582
  # define check_match(s, start, match, length)
1510
1583
  #endif /* ZLIB_DEBUG */
1511
1584
 
1512
- /* ===========================================================================
1513
- * Fill the window when the lookahead becomes insufficient.
1514
- * Updates strstart and lookahead.
1515
- *
1516
- * IN assertion: lookahead < MIN_LOOKAHEAD
1517
- * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1518
- * At least one byte has been read, or avail_in == 0; reads are
1519
- * performed for at least two bytes (required for the zip translate_eol
1520
- * option -- not supported here).
1521
- */
1522
- local void fill_window(s)
1523
- deflate_state *s;
1524
- {
1525
- unsigned n;
1526
- unsigned more; /* Amount of free space at the end of the window. */
1527
- uInt wsize = s->w_size;
1528
-
1529
- Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
1530
-
1531
- do {
1532
- more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
1533
-
1534
- /* Deal with !@#$% 64K limit: */
1535
- if (sizeof(int) <= 2) {
1536
- if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1537
- more = wsize;
1538
-
1539
- } else if (more == (unsigned)(-1)) {
1540
- /* Very unlikely, but possible on 16 bit machine if
1541
- * strstart == 0 && lookahead == 1 (input done a byte at time)
1542
- */
1543
- more--;
1544
- }
1545
- }
1546
-
1547
- /* If the window is almost full and there is insufficient lookahead,
1548
- * move the upper half to the lower one to make room in the upper half.
1549
- */
1550
- if (s->strstart >= wsize+MAX_DIST(s)) {
1551
-
1552
- zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
1553
- s->match_start -= wsize;
1554
- s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1555
- s->block_start -= (long) wsize;
1556
- if (s->insert > s->strstart)
1557
- s->insert = s->strstart;
1558
- slide_hash(s);
1559
- more += wsize;
1560
- }
1561
- if (s->strm->avail_in == 0) break;
1562
-
1563
- /* If there was no sliding:
1564
- * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1565
- * more == window_size - lookahead - strstart
1566
- * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1567
- * => more >= window_size - 2*WSIZE + 2
1568
- * In the BIG_MEM or MMAP case (not yet supported),
1569
- * window_size == input_size + MIN_LOOKAHEAD &&
1570
- * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1571
- * Otherwise, window_size == 2*WSIZE so more >= 2.
1572
- * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1573
- */
1574
- Assert(more >= 2, "more < 2");
1575
-
1576
- n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
1577
- s->lookahead += n;
1578
-
1579
- /* Initialize the hash value now that we have some input: */
1580
- if (s->lookahead + s->insert >= MIN_MATCH) {
1581
- uInt str = s->strstart - s->insert;
1582
- s->ins_h = s->window[str];
1583
- UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
1584
- #if MIN_MATCH != 3
1585
- Call UPDATE_HASH() MIN_MATCH-3 more times
1586
- #endif
1587
- while (s->insert) {
1588
- UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
1589
- #ifndef FASTEST
1590
- s->prev[str & s->w_mask] = s->head[s->ins_h];
1591
- #endif
1592
- s->head[s->ins_h] = (Pos)str;
1593
- str++;
1594
- s->insert--;
1595
- if (s->lookahead + s->insert < MIN_MATCH)
1596
- break;
1597
- }
1598
- }
1599
- /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1600
- * but this is not important since only literal bytes will be emitted.
1601
- */
1602
-
1603
- } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1604
-
1605
- /* If the WIN_INIT bytes after the end of the current data have never been
1606
- * written, then zero those bytes in order to avoid memory check reports of
1607
- * the use of uninitialized (or uninitialised as Julian writes) bytes by
1608
- * the longest match routines. Update the high water mark for the next
1609
- * time through here. WIN_INIT is set to MAX_MATCH since the longest match
1610
- * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
1611
- */
1612
- if (s->high_water < s->window_size) {
1613
- ulg curr = s->strstart + (ulg)(s->lookahead);
1614
- ulg init;
1615
-
1616
- if (s->high_water < curr) {
1617
- /* Previous high water mark below current data -- zero WIN_INIT
1618
- * bytes or up to end of window, whichever is less.
1619
- */
1620
- init = s->window_size - curr;
1621
- if (init > WIN_INIT)
1622
- init = WIN_INIT;
1623
- zmemzero(s->window + curr, (unsigned)init);
1624
- s->high_water = curr + init;
1625
- }
1626
- else if (s->high_water < (ulg)curr + WIN_INIT) {
1627
- /* High water mark at or above current data, but below current data
1628
- * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
1629
- * to end of window, whichever is less.
1630
- */
1631
- init = (ulg)curr + WIN_INIT - s->high_water;
1632
- if (init > s->window_size - s->high_water)
1633
- init = s->window_size - s->high_water;
1634
- zmemzero(s->window + s->high_water, (unsigned)init);
1635
- s->high_water += init;
1636
- }
1637
- }
1638
-
1639
- Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1640
- "not enough room for search");
1641
- }
1642
-
1643
1585
  /* ===========================================================================
1644
1586
  * Flush the current block, with given end-of-file flag.
1645
1587
  * IN assertion: strstart is set to the end of the current match.
@@ -1680,12 +1622,9 @@ local void fill_window(s)
1680
1622
  *
1681
1623
  * deflate_stored() is written to minimize the number of times an input byte is
1682
1624
  * copied. It is most efficient with large input and output buffers, which
1683
- * maximizes the opportunites to have a single copy from next_in to next_out.
1625
+ * maximizes the opportunities to have a single copy from next_in to next_out.
1684
1626
  */
1685
- local block_state deflate_stored(s, flush)
1686
- deflate_state *s;
1687
- int flush;
1688
- {
1627
+ local block_state deflate_stored(deflate_state *s, int flush) {
1689
1628
  /* Smallest worthy block size when not flushing or finishing. By default
1690
1629
  * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
1691
1630
  * large input and output buffers, the stored block size will be larger.
@@ -1869,10 +1808,7 @@ local block_state deflate_stored(s, flush)
1869
1808
  * new strings in the dictionary only for unmatched strings or for short
1870
1809
  * matches. It is used only for the fast compression options.
1871
1810
  */
1872
- local block_state deflate_fast(s, flush)
1873
- deflate_state *s;
1874
- int flush;
1875
- {
1811
+ local block_state deflate_fast(deflate_state *s, int flush) {
1876
1812
  IPos hash_head; /* head of the hash chain */
1877
1813
  int bflush; /* set if current block must be flushed */
1878
1814
 
@@ -1890,7 +1826,7 @@ local block_state deflate_fast(s, flush)
1890
1826
  if (s->lookahead == 0) break; /* flush the current block */
1891
1827
  }
1892
1828
 
1893
- /* Insert the string window[strstart .. strstart+2] in the
1829
+ /* Insert the string window[strstart .. strstart + 2] in the
1894
1830
  * dictionary, and set hash_head to the head of the hash chain:
1895
1831
  */
1896
1832
  hash_head = NIL;
@@ -1938,7 +1874,7 @@ local block_state deflate_fast(s, flush)
1938
1874
  s->strstart += s->match_length;
1939
1875
  s->match_length = 0;
1940
1876
  s->ins_h = s->window[s->strstart];
1941
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1877
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart + 1]);
1942
1878
  #if MIN_MATCH != 3
1943
1879
  Call UPDATE_HASH() MIN_MATCH-3 more times
1944
1880
  #endif
@@ -1949,7 +1885,7 @@ local block_state deflate_fast(s, flush)
1949
1885
  } else {
1950
1886
  /* No match, output a literal byte */
1951
1887
  Tracevv((stderr,"%c", s->window[s->strstart]));
1952
- _tr_tally_lit (s, s->window[s->strstart], bflush);
1888
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
1953
1889
  s->lookahead--;
1954
1890
  s->strstart++;
1955
1891
  }
@@ -1971,10 +1907,7 @@ local block_state deflate_fast(s, flush)
1971
1907
  * evaluation for matches: a match is finally adopted only if there is
1972
1908
  * no better match at the next window position.
1973
1909
  */
1974
- local block_state deflate_slow(s, flush)
1975
- deflate_state *s;
1976
- int flush;
1977
- {
1910
+ local block_state deflate_slow(deflate_state *s, int flush) {
1978
1911
  IPos hash_head; /* head of hash chain */
1979
1912
  int bflush; /* set if current block must be flushed */
1980
1913
 
@@ -1993,7 +1926,7 @@ local block_state deflate_slow(s, flush)
1993
1926
  if (s->lookahead == 0) break; /* flush the current block */
1994
1927
  }
1995
1928
 
1996
- /* Insert the string window[strstart .. strstart+2] in the
1929
+ /* Insert the string window[strstart .. strstart + 2] in the
1997
1930
  * dictionary, and set hash_head to the head of the hash chain:
1998
1931
  */
1999
1932
  hash_head = NIL;
@@ -2035,17 +1968,17 @@ local block_state deflate_slow(s, flush)
2035
1968
  uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
2036
1969
  /* Do not insert strings in hash table beyond this. */
2037
1970
 
2038
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
1971
+ check_match(s, s->strstart - 1, s->prev_match, s->prev_length);
2039
1972
 
2040
- _tr_tally_dist(s, s->strstart -1 - s->prev_match,
1973
+ _tr_tally_dist(s, s->strstart - 1 - s->prev_match,
2041
1974
  s->prev_length - MIN_MATCH, bflush);
2042
1975
 
2043
1976
  /* Insert in hash table all strings up to the end of the match.
2044
- * strstart-1 and strstart are already inserted. If there is not
1977
+ * strstart - 1 and strstart are already inserted. If there is not
2045
1978
  * enough lookahead, the last two strings are not inserted in
2046
1979
  * the hash table.
2047
1980
  */
2048
- s->lookahead -= s->prev_length-1;
1981
+ s->lookahead -= s->prev_length - 1;
2049
1982
  s->prev_length -= 2;
2050
1983
  do {
2051
1984
  if (++s->strstart <= max_insert) {
@@ -2063,8 +1996,8 @@ local block_state deflate_slow(s, flush)
2063
1996
  * single literal. If there was a match but the current match
2064
1997
  * is longer, truncate the previous match to a single literal.
2065
1998
  */
2066
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
2067
- _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1999
+ Tracevv((stderr,"%c", s->window[s->strstart - 1]));
2000
+ _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
2068
2001
  if (bflush) {
2069
2002
  FLUSH_BLOCK_ONLY(s, 0);
2070
2003
  }
@@ -2082,8 +2015,8 @@ local block_state deflate_slow(s, flush)
2082
2015
  }
2083
2016
  Assert (flush != Z_NO_FLUSH, "no flush?");
2084
2017
  if (s->match_available) {
2085
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
2086
- _tr_tally_lit(s, s->window[s->strstart-1], bflush);
2018
+ Tracevv((stderr,"%c", s->window[s->strstart - 1]));
2019
+ _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
2087
2020
  s->match_available = 0;
2088
2021
  }
2089
2022
  s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
@@ -2102,10 +2035,7 @@ local block_state deflate_slow(s, flush)
2102
2035
  * one. Do not maintain a hash table. (It will be regenerated if this run of
2103
2036
  * deflate switches away from Z_RLE.)
2104
2037
  */
2105
- local block_state deflate_rle(s, flush)
2106
- deflate_state *s;
2107
- int flush;
2108
- {
2038
+ local block_state deflate_rle(deflate_state *s, int flush) {
2109
2039
  int bflush; /* set if current block must be flushed */
2110
2040
  uInt prev; /* byte at distance one to match */
2111
2041
  Bytef *scan, *strend; /* scan goes up to strend for length of run */
@@ -2140,7 +2070,8 @@ local block_state deflate_rle(s, flush)
2140
2070
  if (s->match_length > s->lookahead)
2141
2071
  s->match_length = s->lookahead;
2142
2072
  }
2143
- Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
2073
+ Assert(scan <= s->window + (uInt)(s->window_size - 1),
2074
+ "wild scan");
2144
2075
  }
2145
2076
 
2146
2077
  /* Emit match if have run of MIN_MATCH or longer, else emit literal */
@@ -2155,7 +2086,7 @@ local block_state deflate_rle(s, flush)
2155
2086
  } else {
2156
2087
  /* No match, output a literal byte */
2157
2088
  Tracevv((stderr,"%c", s->window[s->strstart]));
2158
- _tr_tally_lit (s, s->window[s->strstart], bflush);
2089
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
2159
2090
  s->lookahead--;
2160
2091
  s->strstart++;
2161
2092
  }
@@ -2175,10 +2106,7 @@ local block_state deflate_rle(s, flush)
2175
2106
  * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
2176
2107
  * (It will be regenerated if this run of deflate switches away from Huffman.)
2177
2108
  */
2178
- local block_state deflate_huff(s, flush)
2179
- deflate_state *s;
2180
- int flush;
2181
- {
2109
+ local block_state deflate_huff(deflate_state *s, int flush) {
2182
2110
  int bflush; /* set if current block must be flushed */
2183
2111
 
2184
2112
  for (;;) {
@@ -2195,7 +2123,7 @@ local block_state deflate_huff(s, flush)
2195
2123
  /* Output a literal byte */
2196
2124
  s->match_length = 0;
2197
2125
  Tracevv((stderr,"%c", s->window[s->strstart]));
2198
- _tr_tally_lit (s, s->window[s->strstart], bflush);
2126
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
2199
2127
  s->lookahead--;
2200
2128
  s->strstart++;
2201
2129
  if (bflush) FLUSH_BLOCK(s, 0);