zstdlib 0.6.0-x86-mingw32 → 0.9.0-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +20 -0
  3. data/README.md +7 -1
  4. data/Rakefile +38 -8
  5. data/ext/{zstdlib → zstdlib_c}/extconf.rb +10 -5
  6. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.2/zstdlib.c +2 -2
  7. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.3/zstdlib.c +2 -2
  8. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.4/zstdlib.c +2 -2
  9. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.5/zstdlib.c +2 -2
  10. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.6/zstdlib.c +2 -2
  11. data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.7/zstdlib.c +2 -2
  12. data/ext/zstdlib_c/ruby/zlib-3.0/zstdlib.c +4994 -0
  13. data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
  14. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/adler32.c +0 -0
  15. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/compress.c +0 -0
  16. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/crc32.c +0 -0
  17. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/crc32.h +0 -0
  18. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/deflate.c +0 -0
  19. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/deflate.h +0 -0
  20. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzclose.c +0 -0
  21. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzguts.h +0 -0
  22. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzlib.c +0 -0
  23. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzread.c +0 -0
  24. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzwrite.c +0 -0
  25. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/infback.c +0 -0
  26. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffast.c +0 -0
  27. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffast.h +0 -0
  28. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffixed.h +0 -0
  29. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inflate.c +0 -0
  30. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inflate.h +0 -0
  31. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inftrees.c +0 -0
  32. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inftrees.h +0 -0
  33. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/trees.c +0 -0
  34. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/trees.h +0 -0
  35. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/uncompr.c +0 -0
  36. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zconf.h +0 -0
  37. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zlib.h +0 -0
  38. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zutil.c +0 -0
  39. data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zutil.h +0 -0
  40. data/ext/{zstdlib → zstdlib_c}/zlib.mk +0 -0
  41. data/ext/{zstdlib → zstdlib_c}/zlibwrapper/zlibwrapper.c +1 -5
  42. data/ext/{zstdlib → zstdlib_c}/zlibwrapper.mk +0 -0
  43. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/bitstream.h +75 -57
  44. data/ext/zstdlib_c/zstd-1.5.2/lib/common/compiler.h +335 -0
  45. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/cpu.h +1 -3
  46. data/ext/zstdlib_c/zstd-1.5.2/lib/common/debug.c +24 -0
  47. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/debug.h +22 -49
  48. data/ext/zstdlib_c/zstd-1.5.2/lib/common/entropy_common.c +368 -0
  49. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/error_private.c +3 -1
  50. data/ext/zstdlib_c/zstd-1.5.2/lib/common/error_private.h +159 -0
  51. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/fse.h +51 -42
  52. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/fse_decompress.c +149 -57
  53. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/huf.h +60 -54
  54. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/mem.h +87 -98
  55. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/pool.c +34 -23
  56. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/pool.h +5 -5
  57. data/ext/zstdlib_c/zstd-1.5.2/lib/common/portability_macros.h +137 -0
  58. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/threading.c +10 -8
  59. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/threading.h +4 -3
  60. data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.c +24 -0
  61. data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.h +5686 -0
  62. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_common.c +10 -10
  63. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_deps.h +111 -0
  64. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_internal.h +493 -0
  65. data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_trace.h +163 -0
  66. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/clevels.h +134 -0
  67. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/fse_compress.c +105 -85
  68. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.c +41 -63
  69. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.h +13 -33
  70. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/huf_compress.c +1370 -0
  71. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress.c +6327 -0
  72. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_internal.h +537 -82
  73. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.c +21 -16
  74. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.h +4 -2
  75. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.c +61 -34
  76. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.h +10 -3
  77. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress_superblock.c +573 -0
  78. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress_superblock.h +32 -0
  79. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_cwksp.h +236 -95
  80. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.c +321 -143
  81. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.h +2 -2
  82. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.c +328 -137
  83. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.h +2 -2
  84. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.c +2104 -0
  85. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.h +125 -0
  86. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.c +336 -209
  87. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.h +15 -3
  88. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_ldm_geartab.h +106 -0
  89. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.c +439 -239
  90. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.h +1 -1
  91. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/compress/zstdmt_compress.c +205 -462
  92. data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstdmt_compress.h +113 -0
  93. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress.c +1889 -0
  94. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress_amd64.S +585 -0
  95. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.c +20 -16
  96. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.h +3 -3
  97. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress.c +691 -230
  98. data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/zstd_decompress_block.c +2072 -0
  99. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.h +16 -7
  100. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_internal.h +71 -10
  101. data/ext/zstdlib_c/zstd-1.5.2/lib/zdict.h +452 -0
  102. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/lib/zstd.h +760 -234
  103. data/ext/{zstdlib/zstd-1.4.4/lib/common → zstdlib_c/zstd-1.5.2/lib}/zstd_errors.h +3 -1
  104. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzclose.c +0 -0
  105. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzcompatibility.h +1 -1
  106. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzguts.h +0 -0
  107. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzlib.c +0 -0
  108. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzread.c +0 -0
  109. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzwrite.c +0 -0
  110. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.c +133 -44
  111. data/ext/{zstdlib/zstd-1.4.4 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.h +1 -1
  112. data/ext/zstdlib_c/zstd.mk +15 -0
  113. data/lib/2.4/zstdlib_c.so +0 -0
  114. data/lib/2.5/zstdlib_c.so +0 -0
  115. data/lib/2.6/zstdlib_c.so +0 -0
  116. data/lib/2.7/zstdlib_c.so +0 -0
  117. data/lib/3.0/zstdlib_c.so +0 -0
  118. data/lib/3.1/zstdlib_c.so +0 -0
  119. data/lib/zstdlib.rb +2 -2
  120. metadata +125 -114
  121. data/ext/zstdlib/zstd-1.4.4/lib/common/compiler.h +0 -159
  122. data/ext/zstdlib/zstd-1.4.4/lib/common/debug.c +0 -44
  123. data/ext/zstdlib/zstd-1.4.4/lib/common/entropy_common.c +0 -236
  124. data/ext/zstdlib/zstd-1.4.4/lib/common/error_private.h +0 -76
  125. data/ext/zstdlib/zstd-1.4.4/lib/common/xxhash.c +0 -882
  126. data/ext/zstdlib/zstd-1.4.4/lib/common/xxhash.h +0 -305
  127. data/ext/zstdlib/zstd-1.4.4/lib/common/zstd_internal.h +0 -350
  128. data/ext/zstdlib/zstd-1.4.4/lib/compress/huf_compress.c +0 -798
  129. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstd_compress.c +0 -4103
  130. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstd_lazy.c +0 -1115
  131. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstd_lazy.h +0 -67
  132. data/ext/zstdlib/zstd-1.4.4/lib/compress/zstdmt_compress.h +0 -192
  133. data/ext/zstdlib/zstd-1.4.4/lib/decompress/huf_decompress.c +0 -1234
  134. data/ext/zstdlib/zstd-1.4.4/lib/decompress/zstd_decompress_block.c +0 -1323
  135. data/ext/zstdlib/zstd.mk +0 -14
  136. data/lib/2.2/zstdlib.so +0 -0
  137. data/lib/2.3/zstdlib.so +0 -0
  138. data/lib/2.4/zstdlib.so +0 -0
  139. data/lib/2.5/zstdlib.so +0 -0
  140. data/lib/2.6/zstdlib.so +0 -0
  141. data/lib/2.7/zstdlib.so +0 -0
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,7 @@
14
14
  /*-*************************************
15
15
  * Dependencies
16
16
  ***************************************/
17
- #include "zstd_internal.h"
17
+ #include "../common/zstd_internal.h"
18
18
 
19
19
  #if defined (__cplusplus)
20
20
  extern "C" {
@@ -24,16 +24,6 @@ extern "C" {
24
24
  * Constants
25
25
  ***************************************/
26
26
 
27
- /* define "workspace is too large" as this number of times larger than needed */
28
- #define ZSTD_WORKSPACETOOLARGE_FACTOR 3
29
-
30
- /* when workspace is continuously too large
31
- * during at least this number of times,
32
- * context's memory usage is considered wasteful,
33
- * because it's sized to handle a worst case scenario which rarely happens.
34
- * In which case, resize it down to free some memory */
35
- #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
36
-
37
27
  /* Since the workspace is effectively its own little malloc implementation /
38
28
  * arena, when we run under ASAN, we should similarly insert redzones between
39
29
  * each internal element of the workspace, so ASAN will catch overruns that
@@ -45,6 +35,10 @@ extern "C" {
45
35
  #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
46
36
  #endif
47
37
 
38
+
39
+ /* Set our tables and aligneds to align by 64 bytes */
40
+ #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
41
+
48
42
  /*-*************************************
49
43
  * Structures
50
44
  ***************************************/
@@ -54,6 +48,16 @@ typedef enum {
54
48
  ZSTD_cwksp_alloc_aligned
55
49
  } ZSTD_cwksp_alloc_phase_e;
56
50
 
51
+ /**
52
+ * Used to describe whether the workspace is statically allocated (and will not
53
+ * necessarily ever be freed), or if it's dynamically allocated and we can
54
+ * expect a well-formed caller to free this.
55
+ */
56
+ typedef enum {
57
+ ZSTD_cwksp_dynamic_alloc,
58
+ ZSTD_cwksp_static_alloc
59
+ } ZSTD_cwksp_static_alloc_e;
60
+
57
61
  /**
58
62
  * Zstd fits all its internal datastructures into a single continuous buffer,
59
63
  * so that it only needs to perform a single OS allocation (or so that a buffer
@@ -102,7 +106,7 @@ typedef enum {
102
106
  *
103
107
  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
104
108
  * so that literally everything fits in a single buffer. Note: if present,
105
- * this must be the first object in the workspace, since ZSTD_free{CCtx,
109
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
106
110
  * CDict}() rely on a pointer comparison to see whether one or two frees are
107
111
  * required.
108
112
  *
@@ -117,10 +121,11 @@ typedef enum {
117
121
  * - Tables: these are any of several different datastructures (hash tables,
118
122
  * chain tables, binary trees) that all respect a common format: they are
119
123
  * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
120
- * Their sizes depend on the cparams.
124
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
121
125
  *
122
126
  * - Aligned: these buffers are used for various purposes that require 4 byte
123
- * alignment, but don't require any initialization before they're used.
127
+ * alignment, but don't require any initialization before they're used. These
128
+ * buffers are each aligned to 64 bytes.
124
129
  *
125
130
  * - Buffers: these buffers are used for various purposes that don't require
126
131
  * any alignment or initialization before they're used. This means they can
@@ -133,8 +138,7 @@ typedef enum {
133
138
  *
134
139
  * 1. Objects
135
140
  * 2. Buffers
136
- * 3. Aligned
137
- * 4. Tables
141
+ * 3. Aligned/Tables
138
142
  *
139
143
  * Attempts to reserve objects of different types out of order will fail.
140
144
  */
@@ -147,9 +151,10 @@ typedef struct {
147
151
  void* tableValidEnd;
148
152
  void* allocStart;
149
153
 
150
- int allocFailed;
154
+ BYTE allocFailed;
151
155
  int workspaceOversizedDuration;
152
156
  ZSTD_cwksp_alloc_phase_e phase;
157
+ ZSTD_cwksp_static_alloc_e isStatic;
153
158
  } ZSTD_cwksp;
154
159
 
155
160
  /*-*************************************
@@ -186,82 +191,166 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
186
191
  * Since tables aren't currently redzoned, you don't need to call through this
187
192
  * to figure out how much space you need for the matchState tables. Everything
188
193
  * else is though.
194
+ *
195
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
189
196
  */
190
197
  MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
191
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
198
+ if (size == 0)
199
+ return 0;
200
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
192
201
  return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
193
202
  #else
194
203
  return size;
195
204
  #endif
196
205
  }
197
206
 
198
- MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
199
- ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
207
+ /**
208
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
209
+ * Used to determine the number of bytes required for a given "aligned".
210
+ */
211
+ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
212
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
213
+ }
214
+
215
+ /**
216
+ * Returns the amount of additional space the cwksp must allocate
217
+ * for internal purposes (currently only alignment).
218
+ */
219
+ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
220
+ /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
221
+ * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
222
+ * to align the beginning of the aligned section.
223
+ *
224
+ * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
225
+ * aligneds being sized in multiples of 64 bytes.
226
+ */
227
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
228
+ return slackSpace;
229
+ }
230
+
231
+
232
+ /**
233
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
234
+ * alignBytes must be a power of two.
235
+ */
236
+ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
237
+ size_t const alignBytesMask = alignBytes - 1;
238
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
239
+ assert((alignBytes & alignBytesMask) == 0);
240
+ assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
241
+ return bytes;
242
+ }
243
+
244
+ /**
245
+ * Internal function. Do not use directly.
246
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
247
+ * which counts from the end of the wksp (as opposed to the object/table segment).
248
+ *
249
+ * Returns a pointer to the beginning of that space.
250
+ */
251
+ MEM_STATIC void*
252
+ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
253
+ {
254
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
255
+ void* const bottom = ws->tableEnd;
256
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
257
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
258
+ ZSTD_cwksp_assert_internal_consistency(ws);
259
+ assert(alloc >= bottom);
260
+ if (alloc < bottom) {
261
+ DEBUGLOG(4, "cwksp: alloc failed!");
262
+ ws->allocFailed = 1;
263
+ return NULL;
264
+ }
265
+ /* the area is reserved from the end of wksp.
266
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
267
+ if (alloc < ws->tableValidEnd) {
268
+ ws->tableValidEnd = alloc;
269
+ }
270
+ ws->allocStart = alloc;
271
+ return alloc;
272
+ }
273
+
274
+ /**
275
+ * Moves the cwksp to the next phase, and does any necessary allocations.
276
+ * cwksp initialization must necessarily go through each phase in order.
277
+ * Returns a 0 on success, or zstd error
278
+ */
279
+ MEM_STATIC size_t
280
+ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
281
+ {
200
282
  assert(phase >= ws->phase);
201
283
  if (phase > ws->phase) {
284
+ /* Going from allocating objects to allocating buffers */
202
285
  if (ws->phase < ZSTD_cwksp_alloc_buffers &&
203
286
  phase >= ZSTD_cwksp_alloc_buffers) {
204
287
  ws->tableValidEnd = ws->objectEnd;
205
288
  }
289
+
290
+ /* Going from allocating buffers to allocating aligneds/tables */
206
291
  if (ws->phase < ZSTD_cwksp_alloc_aligned &&
207
292
  phase >= ZSTD_cwksp_alloc_aligned) {
208
- /* If unaligned allocations down from a too-large top have left us
209
- * unaligned, we need to realign our alloc ptr. Technically, this
210
- * can consume space that is unaccounted for in the neededSpace
211
- * calculation. However, I believe this can only happen when the
212
- * workspace is too large, and specifically when it is too large
213
- * by a larger margin than the space that will be consumed. */
214
- /* TODO: cleaner, compiler warning friendly way to do this??? */
215
- ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
216
- if (ws->allocStart < ws->tableValidEnd) {
217
- ws->tableValidEnd = ws->allocStart;
293
+ { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
294
+ size_t const bytesToAlign =
295
+ ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
296
+ DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
297
+ ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
298
+ RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
299
+ memory_allocation, "aligned phase - alignment initial allocation failed!");
218
300
  }
219
- }
301
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
302
+ void* const alloc = ws->objectEnd;
303
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
304
+ void* const objectEnd = (BYTE*)alloc + bytesToAlign;
305
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
306
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
307
+ "table phase - alignment initial allocation failed!");
308
+ ws->objectEnd = objectEnd;
309
+ ws->tableEnd = objectEnd; /* table area starts being empty */
310
+ if (ws->tableValidEnd < ws->tableEnd) {
311
+ ws->tableValidEnd = ws->tableEnd;
312
+ } } }
220
313
  ws->phase = phase;
314
+ ZSTD_cwksp_assert_internal_consistency(ws);
221
315
  }
316
+ return 0;
222
317
  }
223
318
 
224
319
  /**
225
320
  * Returns whether this object/buffer/etc was allocated in this workspace.
226
321
  */
227
- MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
322
+ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
323
+ {
228
324
  return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
229
325
  }
230
326
 
231
327
  /**
232
328
  * Internal function. Do not use directly.
233
329
  */
234
- MEM_STATIC void* ZSTD_cwksp_reserve_internal(
235
- ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
330
+ MEM_STATIC void*
331
+ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
332
+ {
236
333
  void* alloc;
237
- void* bottom = ws->tableEnd;
238
- ZSTD_cwksp_internal_advance_phase(ws, phase);
239
- alloc = (BYTE *)ws->allocStart - bytes;
334
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
335
+ return NULL;
336
+ }
240
337
 
241
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
338
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
242
339
  /* over-reserve space */
243
- alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
340
+ bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
244
341
  #endif
245
342
 
246
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
247
- alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
248
- ZSTD_cwksp_assert_internal_consistency(ws);
249
- assert(alloc >= bottom);
250
- if (alloc < bottom) {
251
- DEBUGLOG(4, "cwksp: alloc failed!");
252
- ws->allocFailed = 1;
253
- return NULL;
254
- }
255
- if (alloc < ws->tableValidEnd) {
256
- ws->tableValidEnd = alloc;
257
- }
258
- ws->allocStart = alloc;
343
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
259
344
 
260
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
345
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
261
346
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
262
347
  * either size. */
263
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
264
- __asan_unpoison_memory_region(alloc, bytes);
348
+ if (alloc) {
349
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
350
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
351
+ __asan_unpoison_memory_region(alloc, bytes);
352
+ }
353
+ }
265
354
  #endif
266
355
 
267
356
  return alloc;
@@ -270,33 +359,44 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
270
359
  /**
271
360
  * Reserves and returns unaligned memory.
272
361
  */
273
- MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
362
+ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
363
+ {
274
364
  return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
275
365
  }
276
366
 
277
367
  /**
278
- * Reserves and returns memory sized on and aligned on sizeof(unsigned).
368
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
279
369
  */
280
- MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
281
- assert((bytes & (sizeof(U32)-1)) == 0);
282
- return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
370
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
371
+ {
372
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
373
+ ZSTD_cwksp_alloc_aligned);
374
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
375
+ return ptr;
283
376
  }
284
377
 
285
378
  /**
286
- * Aligned on sizeof(unsigned). These buffers have the special property that
379
+ * Aligned on 64 bytes. These buffers have the special property that
287
380
  * their values remain constrained, allowing us to re-use them without
288
381
  * memset()-ing them.
289
382
  */
290
- MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
383
+ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
384
+ {
291
385
  const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
292
- void* alloc = ws->tableEnd;
293
- void* end = (BYTE *)alloc + bytes;
294
- void* top = ws->allocStart;
386
+ void* alloc;
387
+ void* end;
388
+ void* top;
389
+
390
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
391
+ return NULL;
392
+ }
393
+ alloc = ws->tableEnd;
394
+ end = (BYTE *)alloc + bytes;
395
+ top = ws->allocStart;
295
396
 
296
397
  DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
297
398
  alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
298
399
  assert((bytes & (sizeof(U32)-1)) == 0);
299
- ZSTD_cwksp_internal_advance_phase(ws, phase);
300
400
  ZSTD_cwksp_assert_internal_consistency(ws);
301
401
  assert(end <= top);
302
402
  if (end > top) {
@@ -306,35 +406,41 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
306
406
  }
307
407
  ws->tableEnd = end;
308
408
 
309
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
310
- __asan_unpoison_memory_region(alloc, bytes);
409
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
410
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
411
+ __asan_unpoison_memory_region(alloc, bytes);
412
+ }
311
413
  #endif
312
414
 
415
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
416
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
313
417
  return alloc;
314
418
  }
315
419
 
316
420
  /**
317
421
  * Aligned on sizeof(void*).
422
+ * Note : should happen only once, at workspace first initialization
318
423
  */
319
- MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
320
- size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
424
+ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
425
+ {
426
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
321
427
  void* alloc = ws->objectEnd;
322
428
  void* end = (BYTE*)alloc + roundedBytes;
323
429
 
324
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
430
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
325
431
  /* over-reserve space */
326
432
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
327
433
  #endif
328
434
 
329
- DEBUGLOG(5,
435
+ DEBUGLOG(4,
330
436
  "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
331
437
  alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
332
- assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
333
- assert((bytes & (sizeof(void*)-1)) == 0);
438
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
439
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
334
440
  ZSTD_cwksp_assert_internal_consistency(ws);
335
441
  /* we must be in the first phase, no advance is possible */
336
442
  if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
337
- DEBUGLOG(4, "cwksp: object alloc failed!");
443
+ DEBUGLOG(3, "cwksp: object alloc failed!");
338
444
  ws->allocFailed = 1;
339
445
  return NULL;
340
446
  }
@@ -342,20 +448,23 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
342
448
  ws->tableEnd = end;
343
449
  ws->tableValidEnd = end;
344
450
 
345
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
451
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
346
452
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
347
453
  * either size. */
348
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
349
- __asan_unpoison_memory_region(alloc, bytes);
454
+ alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
455
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
456
+ __asan_unpoison_memory_region(alloc, bytes);
457
+ }
350
458
  #endif
351
459
 
352
460
  return alloc;
353
461
  }
354
462
 
355
- MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
463
+ MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
464
+ {
356
465
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
357
466
 
358
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
467
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
359
468
  /* To validate that the table re-use logic is sound, and that we don't
360
469
  * access table space that we haven't cleaned, we re-"poison" the table
361
470
  * space every time we mark it dirty. */
@@ -390,7 +499,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
390
499
  assert(ws->tableValidEnd >= ws->objectEnd);
391
500
  assert(ws->tableValidEnd <= ws->allocStart);
392
501
  if (ws->tableValidEnd < ws->tableEnd) {
393
- memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
502
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
394
503
  }
395
504
  ZSTD_cwksp_mark_tables_clean(ws);
396
505
  }
@@ -402,8 +511,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
402
511
  MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
403
512
  DEBUGLOG(4, "cwksp: clearing tables!");
404
513
 
405
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
406
- {
514
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
515
+ /* We don't do this when the workspace is statically allocated, because
516
+ * when that is the case, we have no capability to hook into the end of the
517
+ * workspace's lifecycle to unpoison the memory.
518
+ */
519
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
407
520
  size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
408
521
  __asan_poison_memory_region(ws->objectEnd, size);
409
522
  }
@@ -420,7 +533,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
420
533
  MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
421
534
  DEBUGLOG(4, "cwksp: clearing!");
422
535
 
423
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
536
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
424
537
  /* To validate that the context re-use logic is sound, and that we don't
425
538
  * access stuff that this compression hasn't initialized, we re-"poison"
426
539
  * the workspace (or at least the non-static, non-table parts of it)
@@ -431,8 +544,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
431
544
  }
432
545
  #endif
433
546
 
434
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
435
- {
547
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
548
+ /* We don't do this when the workspace is statically allocated, because
549
+ * when that is the case, we have no capability to hook into the end of the
550
+ * workspace's lifecycle to unpoison the memory.
551
+ */
552
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
436
553
  size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
437
554
  __asan_poison_memory_region(ws->objectEnd, size);
438
555
  }
@@ -452,7 +569,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
452
569
  * Any existing values in the workspace are ignored (the previously managed
453
570
  * buffer, if present, must be separately freed).
454
571
  */
455
- MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
572
+ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
456
573
  DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
457
574
  assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
458
575
  ws->workspace = start;
@@ -460,39 +577,45 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
460
577
  ws->objectEnd = ws->workspace;
461
578
  ws->tableValidEnd = ws->objectEnd;
462
579
  ws->phase = ZSTD_cwksp_alloc_objects;
580
+ ws->isStatic = isStatic;
463
581
  ZSTD_cwksp_clear(ws);
464
582
  ws->workspaceOversizedDuration = 0;
465
583
  ZSTD_cwksp_assert_internal_consistency(ws);
466
584
  }
467
585
 
468
586
  MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
469
- void* workspace = ZSTD_malloc(size, customMem);
587
+ void* workspace = ZSTD_customMalloc(size, customMem);
470
588
  DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
471
- RETURN_ERROR_IF(workspace == NULL, memory_allocation);
472
- ZSTD_cwksp_init(ws, workspace, size);
589
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
590
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
473
591
  return 0;
474
592
  }
475
593
 
476
594
  MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
477
595
  void *ptr = ws->workspace;
478
596
  DEBUGLOG(4, "cwksp: freeing workspace");
479
- memset(ws, 0, sizeof(ZSTD_cwksp));
480
- ZSTD_free(ptr, customMem);
597
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
598
+ ZSTD_customFree(ptr, customMem);
481
599
  }
482
600
 
483
601
  /**
484
602
  * Moves the management of a workspace from one cwksp to another. The src cwksp
485
- * is left in an invalid state (src must be re-init()'ed before its used again).
603
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
486
604
  */
487
605
  MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
488
606
  *dst = *src;
489
- memset(src, 0, sizeof(ZSTD_cwksp));
607
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
490
608
  }
491
609
 
492
610
  MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
493
611
  return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
494
612
  }
495
613
 
614
+ MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
615
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
616
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
617
+ }
618
+
496
619
  MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
497
620
  return ws->allocFailed;
498
621
  }
@@ -501,6 +624,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
501
624
  * Functions Checking Free Space
502
625
  ***************************************/
503
626
 
627
+ /* ZSTD_alignmentSpaceWithinBounds() :
628
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
629
+ * actual amount of space used.
630
+ */
631
+ MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
632
+ size_t const estimatedSpace, int resizedWorkspace) {
633
+ if (resizedWorkspace) {
634
+ /* Resized/newly allocated wksp should have exact bounds */
635
+ return ZSTD_cwksp_used(ws) == estimatedSpace;
636
+ } else {
637
+ /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
638
+ * than estimatedSpace. See the comments in zstd_cwksp.h for details.
639
+ */
640
+ return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
641
+ }
642
+ }
643
+
644
+
504
645
  MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
505
646
  return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
506
647
  }