rugged 1.4.3 → 1.4.4

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,5 +1,5 @@
1
1
  /* deflate.c -- compress data using the deflation algorithm
2
- * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
2
+ * Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
3
3
  * For conditions of distribution and use, see copyright notice in zlib.h
4
4
  */
5
5
 
@@ -52,7 +52,7 @@
52
52
  #include "deflate.h"
53
53
 
54
54
  const char deflate_copyright[] =
55
- " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler ";
55
+ " deflate 1.2.12 Copyright 1995-2022 Jean-loup Gailly and Mark Adler ";
56
56
  /*
57
57
  If you use the zlib library in a product, an acknowledgment is welcome
58
58
  in the documentation of your product. If for some reason you cannot
@@ -190,14 +190,22 @@ local const config configuration_table[10] = {
190
190
  * prev[] will be initialized on the fly.
191
191
  */
192
192
  #define CLEAR_HASH(s) \
193
- s->head[s->hash_size-1] = NIL; \
194
- zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
193
+ do { \
194
+ s->head[s->hash_size-1] = NIL; \
195
+ zmemzero((Bytef *)s->head, \
196
+ (unsigned)(s->hash_size-1)*sizeof(*s->head)); \
197
+ } while (0)
195
198
 
196
199
  /* ===========================================================================
197
200
  * Slide the hash table when sliding the window down (could be avoided with 32
198
201
  * bit values at the expense of memory usage). We slide even when level == 0 to
199
202
  * keep the hash table consistent if we switch back to level > 0 later.
200
203
  */
204
+ #if defined(__has_feature)
205
+ # if __has_feature(memory_sanitizer)
206
+ __attribute__((no_sanitize("memory")))
207
+ # endif
208
+ #endif
201
209
  local void slide_hash(s)
202
210
  deflate_state *s;
203
211
  {
@@ -252,11 +260,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
252
260
  int wrap = 1;
253
261
  static const char my_version[] = ZLIB_VERSION;
254
262
 
255
- ushf *overlay;
256
- /* We overlay pending_buf and d_buf+l_buf. This works since the average
257
- * output size for (length,distance) codes is <= 24 bits.
258
- */
259
-
260
263
  if (version == Z_NULL || version[0] != my_version[0] ||
261
264
  stream_size != sizeof(z_stream)) {
262
265
  return Z_VERSION_ERROR;
@@ -320,16 +323,53 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
320
323
 
321
324
  s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
322
325
  s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
323
- memset(s->prev, 0, s->w_size * sizeof(Pos));
324
326
  s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
325
327
 
326
328
  s->high_water = 0; /* nothing written to s->window yet */
327
329
 
328
330
  s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
329
331
 
330
- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
331
- s->pending_buf = (uchf *) overlay;
332
- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
332
+ /* We overlay pending_buf and sym_buf. This works since the average size
333
+ * for length/distance pairs over any compressed block is assured to be 31
334
+ * bits or less.
335
+ *
336
+ * Analysis: The longest fixed codes are a length code of 8 bits plus 5
337
+ * extra bits, for lengths 131 to 257. The longest fixed distance codes are
338
+ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
339
+ * possible fixed-codes length/distance pair is then 31 bits total.
340
+ *
341
+ * sym_buf starts one-fourth of the way into pending_buf. So there are
342
+ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
343
+ * in sym_buf is three bytes -- two for the distance and one for the
344
+ * literal/length. As each symbol is consumed, the pointer to the next
345
+ * sym_buf value to read moves forward three bytes. From that symbol, up to
346
+ * 31 bits are written to pending_buf. The closest the written pending_buf
347
+ * bits gets to the next sym_buf symbol to read is just before the last
348
+ * code is written. At that time, 31*(n-2) bits have been written, just
349
+ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
350
+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
351
+ * symbols are written.) The closest the writing gets to what is unread is
352
+ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
353
+ * can range from 128 to 32768.
354
+ *
355
+ * Therefore, at a minimum, there are 142 bits of space between what is
356
+ * written and what is read in the overlain buffers, so the symbols cannot
357
+ * be overwritten by the compressed data. That space is actually 139 bits,
358
+ * due to the three-bit fixed-code block header.
359
+ *
360
+ * That covers the case where either Z_FIXED is specified, forcing fixed
361
+ * codes, or when the use of fixed codes is chosen, because that choice
362
+ * results in a smaller compressed block than dynamic codes. That latter
363
+ * condition then assures that the above analysis also covers all dynamic
364
+ * blocks. A dynamic-code block will only be chosen to be emitted if it has
365
+ * fewer bits than a fixed-code block would for the same set of symbols.
366
+ * Therefore its average symbol length is assured to be less than 31. So
367
+ * the compressed data for a dynamic block also cannot overwrite the
368
+ * symbols from which it is being constructed.
369
+ */
370
+
371
+ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
372
+ s->pending_buf_size = (ulg)s->lit_bufsize * 4;
333
373
 
334
374
  if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
335
375
  s->pending_buf == Z_NULL) {
@@ -338,8 +378,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
338
378
  deflateEnd (strm);
339
379
  return Z_MEM_ERROR;
340
380
  }
341
- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
342
- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
381
+ s->sym_buf = s->pending_buf + s->lit_bufsize;
382
+ s->sym_end = (s->lit_bufsize - 1) * 3;
383
+ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
384
+ * on 16 bit machines and because stored blocks are restricted to
385
+ * 64K-1 bytes.
386
+ */
343
387
 
344
388
  s->level = level;
345
389
  s->strategy = strategy;
@@ -489,13 +533,13 @@ int ZEXPORT deflateResetKeep (strm)
489
533
  #ifdef GZIP
490
534
  s->wrap == 2 ? GZIP_STATE :
491
535
  #endif
492
- s->wrap ? INIT_STATE : BUSY_STATE;
536
+ INIT_STATE;
493
537
  strm->adler =
494
538
  #ifdef GZIP
495
539
  s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
496
540
  #endif
497
541
  adler32(0L, Z_NULL, 0);
498
- s->last_flush = Z_NO_FLUSH;
542
+ s->last_flush = -2;
499
543
 
500
544
  _tr_init(s);
501
545
 
@@ -550,7 +594,8 @@ int ZEXPORT deflatePrime (strm, bits, value)
550
594
 
551
595
  if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
552
596
  s = strm->state;
553
- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
597
+ if (bits < 0 || bits > 16 ||
598
+ s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
554
599
  return Z_BUF_ERROR;
555
600
  do {
556
601
  put = Buf_size - s->bi_valid;
@@ -588,12 +633,12 @@ int ZEXPORT deflateParams(strm, level, strategy)
588
633
  func = configuration_table[s->level].func;
589
634
 
590
635
  if ((strategy != s->strategy || func != configuration_table[level].func) &&
591
- s->high_water) {
636
+ s->last_flush != -2) {
592
637
  /* Flush the last buffer: */
593
638
  int err = deflate(strm, Z_BLOCK);
594
639
  if (err == Z_STREAM_ERROR)
595
640
  return err;
596
- if (strm->avail_out == 0)
641
+ if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead)
597
642
  return Z_BUF_ERROR;
598
643
  }
599
644
  if (s->level != level) {
@@ -812,6 +857,8 @@ int ZEXPORT deflate (strm, flush)
812
857
  }
813
858
 
814
859
  /* Write the header */
860
+ if (s->status == INIT_STATE && s->wrap == 0)
861
+ s->status = BUSY_STATE;
815
862
  if (s->status == INIT_STATE) {
816
863
  /* zlib header */
817
864
  uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
@@ -1109,7 +1156,6 @@ int ZEXPORT deflateCopy (dest, source)
1109
1156
  #else
1110
1157
  deflate_state *ds;
1111
1158
  deflate_state *ss;
1112
- ushf *overlay;
1113
1159
 
1114
1160
 
1115
1161
  if (deflateStateCheck(source) || dest == Z_NULL) {
@@ -1129,8 +1175,7 @@ int ZEXPORT deflateCopy (dest, source)
1129
1175
  ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1130
1176
  ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1131
1177
  ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1132
- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
1133
- ds->pending_buf = (uchf *) overlay;
1178
+ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
1134
1179
 
1135
1180
  if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1136
1181
  ds->pending_buf == Z_NULL) {
@@ -1144,8 +1189,7 @@ int ZEXPORT deflateCopy (dest, source)
1144
1189
  zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1145
1190
 
1146
1191
  ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1147
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
1148
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
1192
+ ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
1149
1193
 
1150
1194
  ds->l_desc.dyn_tree = ds->dyn_ltree;
1151
1195
  ds->d_desc.dyn_tree = ds->dyn_dtree;
@@ -1514,6 +1558,8 @@ local void fill_window(s)
1514
1558
  s->match_start -= wsize;
1515
1559
  s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1516
1560
  s->block_start -= (long) wsize;
1561
+ if (s->insert > s->strstart)
1562
+ s->insert = s->strstart;
1517
1563
  slide_hash(s);
1518
1564
  more += wsize;
1519
1565
  }
@@ -1743,6 +1789,7 @@ local block_state deflate_stored(s, flush)
1743
1789
  s->matches = 2; /* clear hash */
1744
1790
  zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
1745
1791
  s->strstart = s->w_size;
1792
+ s->insert = s->strstart;
1746
1793
  }
1747
1794
  else {
1748
1795
  if (s->window_size - s->strstart <= used) {
@@ -1751,12 +1798,14 @@ local block_state deflate_stored(s, flush)
1751
1798
  zmemcpy(s->window, s->window + s->w_size, s->strstart);
1752
1799
  if (s->matches < 2)
1753
1800
  s->matches++; /* add a pending slide_hash() */
1801
+ if (s->insert > s->strstart)
1802
+ s->insert = s->strstart;
1754
1803
  }
1755
1804
  zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
1756
1805
  s->strstart += used;
1806
+ s->insert += MIN(used, s->w_size - s->insert);
1757
1807
  }
1758
1808
  s->block_start = s->strstart;
1759
- s->insert += MIN(used, s->w_size - s->insert);
1760
1809
  }
1761
1810
  if (s->high_water < s->strstart)
1762
1811
  s->high_water = s->strstart;
@@ -1771,7 +1820,7 @@ local block_state deflate_stored(s, flush)
1771
1820
  return block_done;
1772
1821
 
1773
1822
  /* Fill the window with any remaining input. */
1774
- have = s->window_size - s->strstart - 1;
1823
+ have = s->window_size - s->strstart;
1775
1824
  if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
1776
1825
  /* Slide the window down. */
1777
1826
  s->block_start -= s->w_size;
@@ -1780,12 +1829,15 @@ local block_state deflate_stored(s, flush)
1780
1829
  if (s->matches < 2)
1781
1830
  s->matches++; /* add a pending slide_hash() */
1782
1831
  have += s->w_size; /* more space now */
1832
+ if (s->insert > s->strstart)
1833
+ s->insert = s->strstart;
1783
1834
  }
1784
1835
  if (have > s->strm->avail_in)
1785
1836
  have = s->strm->avail_in;
1786
1837
  if (have) {
1787
1838
  read_buf(s->strm, s->window + s->strstart, have);
1788
1839
  s->strstart += have;
1840
+ s->insert += MIN(have, s->w_size - s->insert);
1789
1841
  }
1790
1842
  if (s->high_water < s->strstart)
1791
1843
  s->high_water = s->strstart;
@@ -1913,7 +1965,7 @@ local block_state deflate_fast(s, flush)
1913
1965
  FLUSH_BLOCK(s, 1);
1914
1966
  return finish_done;
1915
1967
  }
1916
- if (s->last_lit)
1968
+ if (s->sym_next)
1917
1969
  FLUSH_BLOCK(s, 0);
1918
1970
  return block_done;
1919
1971
  }
@@ -2044,7 +2096,7 @@ local block_state deflate_slow(s, flush)
2044
2096
  FLUSH_BLOCK(s, 1);
2045
2097
  return finish_done;
2046
2098
  }
2047
- if (s->last_lit)
2099
+ if (s->sym_next)
2048
2100
  FLUSH_BLOCK(s, 0);
2049
2101
  return block_done;
2050
2102
  }
@@ -2119,7 +2171,7 @@ local block_state deflate_rle(s, flush)
2119
2171
  FLUSH_BLOCK(s, 1);
2120
2172
  return finish_done;
2121
2173
  }
2122
- if (s->last_lit)
2174
+ if (s->sym_next)
2123
2175
  FLUSH_BLOCK(s, 0);
2124
2176
  return block_done;
2125
2177
  }
@@ -2158,7 +2210,7 @@ local block_state deflate_huff(s, flush)
2158
2210
  FLUSH_BLOCK(s, 1);
2159
2211
  return finish_done;
2160
2212
  }
2161
- if (s->last_lit)
2213
+ if (s->sym_next)
2162
2214
  FLUSH_BLOCK(s, 0);
2163
2215
  return block_done;
2164
2216
  }
@@ -1,5 +1,5 @@
1
1
  /* deflate.h -- internal compression state
2
- * Copyright (C) 1995-2016 Jean-loup Gailly
2
+ * Copyright (C) 1995-2018 Jean-loup Gailly
3
3
  * For conditions of distribution and use, see copyright notice in zlib.h
4
4
  */
5
5
 
@@ -217,7 +217,7 @@ typedef struct internal_state {
217
217
  /* Depth of each subtree used as tie breaker for trees of equal frequency
218
218
  */
219
219
 
220
- uchf *l_buf; /* buffer for literals or lengths */
220
+ uchf *sym_buf; /* buffer for distances and literals/lengths */
221
221
 
222
222
  uInt lit_bufsize;
223
223
  /* Size of match buffer for literals/lengths. There are 4 reasons for
@@ -239,13 +239,8 @@ typedef struct internal_state {
239
239
  * - I can't count above 4
240
240
  */
241
241
 
242
- uInt last_lit; /* running index in l_buf */
243
-
244
- ushf *d_buf;
245
- /* Buffer for distances. To simplify the code, d_buf and l_buf have
246
- * the same number of elements. To use different lengths, an extra flag
247
- * array would be necessary.
248
- */
242
+ uInt sym_next; /* running index in sym_buf */
243
+ uInt sym_end; /* symbol table full when sym_next reaches this */
249
244
 
250
245
  ulg opt_len; /* bit length of current block with optimal trees */
251
246
  ulg static_len; /* bit length of current block with static trees */
@@ -325,20 +320,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
325
320
 
326
321
  # define _tr_tally_lit(s, c, flush) \
327
322
  { uch cc = (c); \
328
- s->d_buf[s->last_lit] = 0; \
329
- s->l_buf[s->last_lit++] = cc; \
323
+ s->sym_buf[s->sym_next++] = 0; \
324
+ s->sym_buf[s->sym_next++] = 0; \
325
+ s->sym_buf[s->sym_next++] = cc; \
330
326
  s->dyn_ltree[cc].Freq++; \
331
- flush = (s->last_lit == s->lit_bufsize-1); \
327
+ flush = (s->sym_next == s->sym_end); \
332
328
  }
333
329
  # define _tr_tally_dist(s, distance, length, flush) \
334
330
  { uch len = (uch)(length); \
335
331
  ush dist = (ush)(distance); \
336
- s->d_buf[s->last_lit] = dist; \
337
- s->l_buf[s->last_lit++] = len; \
332
+ s->sym_buf[s->sym_next++] = (uch)dist; \
333
+ s->sym_buf[s->sym_next++] = (uch)(dist >> 8); \
334
+ s->sym_buf[s->sym_next++] = len; \
338
335
  dist--; \
339
336
  s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
340
337
  s->dyn_dtree[d_code(dist)].Freq++; \
341
- flush = (s->last_lit == s->lit_bufsize-1); \
338
+ flush = (s->sym_next == s->sym_end); \
342
339
  }
343
340
  #else
344
341
  # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
@@ -1,5 +1,5 @@
1
1
  /* gzguts.h -- zlib internal header definitions for gz* operations
2
- * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler
2
+ * Copyright (C) 2004-2019 Mark Adler
3
3
  * For conditions of distribution and use, see copyright notice in zlib.h
4
4
  */
5
5
 
@@ -39,7 +39,7 @@
39
39
  # include <io.h>
40
40
  #endif
41
41
 
42
- #if defined(_WIN32) || defined(__CYGWIN__)
42
+ #if defined(_WIN32)
43
43
  # define WIDECHAR
44
44
  #endif
45
45
 
@@ -190,6 +190,7 @@ typedef struct {
190
190
  /* just for writing */
191
191
  int level; /* compression level */
192
192
  int strategy; /* compression strategy */
193
+ int reset; /* true if a reset is pending after a Z_FINISH */
193
194
  /* seek request */
194
195
  z_off64_t skip; /* amount to skip (already rewound if backwards) */
195
196
  int seek; /* true if seek request pending */
@@ -1,5 +1,5 @@
1
1
  /* infback.c -- inflate using a call-back interface
2
- * Copyright (C) 1995-2016 Mark Adler
2
+ * Copyright (C) 1995-2022 Mark Adler
3
3
  * For conditions of distribution and use, see copyright notice in zlib.h
4
4
  */
5
5
 
@@ -477,6 +477,7 @@ void FAR *out_desc;
477
477
  }
478
478
  Tracev((stderr, "inflate: codes ok\n"));
479
479
  state->mode = LEN;
480
+ /* fallthrough */
480
481
 
481
482
  case LEN:
482
483
  /* use inflate_fast() if we have enough input and output */
@@ -70,7 +70,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
70
70
  code const FAR *dcode; /* local strm->distcode */
71
71
  unsigned lmask; /* mask for first level of length codes */
72
72
  unsigned dmask; /* mask for first level of distance codes */
73
- code here; /* retrieved table entry */
73
+ code const *here; /* retrieved table entry */
74
74
  unsigned op; /* code bits, operation, extra bits, or */
75
75
  /* window position, window bytes to copy */
76
76
  unsigned len; /* match length, unused bytes */
@@ -107,20 +107,20 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
107
107
  hold += (unsigned long)(*in++) << bits;
108
108
  bits += 8;
109
109
  }
110
- here = lcode[hold & lmask];
110
+ here = lcode + (hold & lmask);
111
111
  dolen:
112
- op = (unsigned)(here.bits);
112
+ op = (unsigned)(here->bits);
113
113
  hold >>= op;
114
114
  bits -= op;
115
- op = (unsigned)(here.op);
115
+ op = (unsigned)(here->op);
116
116
  if (op == 0) { /* literal */
117
- Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
117
+ Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ?
118
118
  "inflate: literal '%c'\n" :
119
- "inflate: literal 0x%02x\n", here.val));
120
- *out++ = (unsigned char)(here.val);
119
+ "inflate: literal 0x%02x\n", here->val));
120
+ *out++ = (unsigned char)(here->val);
121
121
  }
122
122
  else if (op & 16) { /* length base */
123
- len = (unsigned)(here.val);
123
+ len = (unsigned)(here->val);
124
124
  op &= 15; /* number of extra bits */
125
125
  if (op) {
126
126
  if (bits < op) {
@@ -138,14 +138,14 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
138
138
  hold += (unsigned long)(*in++) << bits;
139
139
  bits += 8;
140
140
  }
141
- here = dcode[hold & dmask];
141
+ here = dcode + (hold & dmask);
142
142
  dodist:
143
- op = (unsigned)(here.bits);
143
+ op = (unsigned)(here->bits);
144
144
  hold >>= op;
145
145
  bits -= op;
146
- op = (unsigned)(here.op);
146
+ op = (unsigned)(here->op);
147
147
  if (op & 16) { /* distance base */
148
- dist = (unsigned)(here.val);
148
+ dist = (unsigned)(here->val);
149
149
  op &= 15; /* number of extra bits */
150
150
  if (bits < op) {
151
151
  hold += (unsigned long)(*in++) << bits;
@@ -264,7 +264,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
264
264
  }
265
265
  }
266
266
  else if ((op & 64) == 0) { /* 2nd level distance code */
267
- here = dcode[here.val + (hold & ((1U << op) - 1))];
267
+ here = dcode + here->val + (hold & ((1U << op) - 1));
268
268
  goto dodist;
269
269
  }
270
270
  else {
@@ -274,7 +274,7 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */
274
274
  }
275
275
  }
276
276
  else if ((op & 64) == 0) { /* 2nd level length code */
277
- here = lcode[here.val + (hold & ((1U << op) - 1))];
277
+ here = lcode + here->val + (hold & ((1U << op) - 1));
278
278
  goto dolen;
279
279
  }
280
280
  else if (op & 32) { /* end-of-block */