zstd-ruby 1.4.4.0 → 1.5.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/README.md +78 -5
  4. data/Rakefile +8 -2
  5. data/ext/zstdruby/common.h +15 -0
  6. data/ext/zstdruby/extconf.rb +3 -2
  7. data/ext/zstdruby/libzstd/common/allocations.h +55 -0
  8. data/ext/zstdruby/libzstd/common/bits.h +200 -0
  9. data/ext/zstdruby/libzstd/common/bitstream.h +74 -97
  10. data/ext/zstdruby/libzstd/common/compiler.h +219 -20
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +11 -31
  13. data/ext/zstdruby/libzstd/common/debug.h +22 -49
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +184 -80
  15. data/ext/zstdruby/libzstd/common/error_private.c +11 -2
  16. data/ext/zstdruby/libzstd/common/error_private.h +87 -4
  17. data/ext/zstdruby/libzstd/common/fse.h +47 -116
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +127 -127
  19. data/ext/zstdruby/libzstd/common/huf.h +112 -197
  20. data/ext/zstdruby/libzstd/common/mem.h +124 -142
  21. data/ext/zstdruby/libzstd/common/pool.c +54 -27
  22. data/ext/zstdruby/libzstd/common/pool.h +11 -5
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +156 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +78 -22
  25. data/ext/zstdruby/libzstd/common/threading.h +9 -13
  26. data/ext/zstdruby/libzstd/common/xxhash.c +15 -873
  27. data/ext/zstdruby/libzstd/common/xxhash.h +5572 -191
  28. data/ext/zstdruby/libzstd/common/zstd_common.c +2 -37
  29. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  30. data/ext/zstdruby/libzstd/common/zstd_internal.h +186 -144
  31. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  32. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  33. data/ext/zstdruby/libzstd/compress/fse_compress.c +99 -196
  34. data/ext/zstdruby/libzstd/compress/hist.c +41 -63
  35. data/ext/zstdruby/libzstd/compress/hist.h +13 -33
  36. data/ext/zstdruby/libzstd/compress/huf_compress.c +968 -331
  37. data/ext/zstdruby/libzstd/compress/zstd_compress.c +4120 -1191
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +688 -159
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +121 -40
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -6
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +62 -35
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +10 -3
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +577 -0
  44. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +32 -0
  45. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +322 -115
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +394 -154
  47. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +4 -3
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.c +729 -253
  49. data/ext/zstdruby/libzstd/compress/zstd_fast.h +4 -3
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1289 -247
  51. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +61 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +339 -212
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +15 -3
  54. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.c +508 -282
  56. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +217 -466
  58. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +35 -114
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1220 -572
  60. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +576 -0
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +23 -19
  62. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +3 -3
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +859 -273
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1244 -375
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +21 -7
  66. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +74 -11
  67. data/ext/zstdruby/libzstd/dictBuilder/cover.c +75 -54
  68. data/ext/zstdruby/libzstd/dictBuilder/cover.h +20 -9
  69. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  70. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +55 -36
  71. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +126 -110
  72. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +248 -56
  73. data/ext/zstdruby/libzstd/zstd.h +1277 -306
  74. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +29 -8
  75. data/ext/zstdruby/main.c +20 -0
  76. data/ext/zstdruby/skippable_frame.c +63 -0
  77. data/ext/zstdruby/streaming_compress.c +177 -0
  78. data/ext/zstdruby/streaming_compress.h +5 -0
  79. data/ext/zstdruby/streaming_decompress.c +123 -0
  80. data/ext/zstdruby/zstdruby.c +114 -32
  81. data/lib/zstd-ruby/version.rb +1 -1
  82. data/lib/zstd-ruby.rb +0 -1
  83. data/zstd-ruby.gemspec +1 -1
  84. metadata +24 -39
  85. data/.travis.yml +0 -14
  86. data/ext/zstdruby/libzstd/.gitignore +0 -3
  87. data/ext/zstdruby/libzstd/BUCK +0 -234
  88. data/ext/zstdruby/libzstd/Makefile +0 -289
  89. data/ext/zstdruby/libzstd/README.md +0 -159
  90. data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
  91. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
  92. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -147
  93. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
  94. data/ext/zstdruby/libzstd/dll/example/Makefile +0 -47
  95. data/ext/zstdruby/libzstd/dll/example/README.md +0 -69
  96. data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
  97. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
  98. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
  99. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
  100. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2152
  101. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
  102. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3514
  103. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
  104. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3156
  105. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
  106. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3641
  107. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
  108. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4046
  109. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
  110. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4150
  111. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
  112. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4533
  113. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
  114. data/ext/zstdruby/libzstd/libzstd.pc.in +0 -15
  115. data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -18,8 +18,10 @@ extern "C" {
18
18
  /*-****************************************
19
19
  * Dependencies
20
20
  ******************************************/
21
- #include <stddef.h> /* size_t, ptrdiff_t */
22
- #include <string.h> /* memcpy */
21
+ #include <stddef.h> /* size_t, ptrdiff_t */
22
+ #include "compiler.h" /* __has_builtin */
23
+ #include "debug.h" /* DEBUG_STATIC_ASSERT */
24
+ #include "zstd_deps.h" /* ZSTD_memcpy */
23
25
 
24
26
 
25
27
  /*-****************************************
@@ -39,94 +41,18 @@ extern "C" {
39
41
  # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
40
42
  #endif
41
43
 
42
- #ifndef __has_builtin
43
- # define __has_builtin(x) 0 /* compat. with non-clang compilers */
44
- #endif
45
-
46
- /* code only tested on 32 and 64 bits systems */
47
- #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
48
- MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
49
-
50
- /* detects whether we are being compiled under msan */
51
- #if defined (__has_feature)
52
- # if __has_feature(memory_sanitizer)
53
- # define MEMORY_SANITIZER 1
54
- # endif
55
- #endif
56
-
57
- #if defined (MEMORY_SANITIZER)
58
- /* Not all platforms that support msan provide sanitizers/msan_interface.h.
59
- * We therefore declare the functions we need ourselves, rather than trying to
60
- * include the header file... */
61
-
62
- #include <stdint.h> /* intptr_t */
63
-
64
- /* Make memory region fully initialized (without changing its contents). */
65
- void __msan_unpoison(const volatile void *a, size_t size);
66
-
67
- /* Make memory region fully uninitialized (without changing its contents).
68
- This is a legacy interface that does not update origin information. Use
69
- __msan_allocated_memory() instead. */
70
- void __msan_poison(const volatile void *a, size_t size);
71
-
72
- /* Returns the offset of the first (at least partially) poisoned byte in the
73
- memory range, or -1 if the whole range is good. */
74
- intptr_t __msan_test_shadow(const volatile void *x, size_t size);
75
- #endif
76
-
77
- /* detects whether we are being compiled under asan */
78
- #if defined (__has_feature)
79
- # if __has_feature(address_sanitizer)
80
- # define ADDRESS_SANITIZER 1
81
- # endif
82
- #elif defined(__SANITIZE_ADDRESS__)
83
- # define ADDRESS_SANITIZER 1
84
- #endif
85
-
86
- #if defined (ADDRESS_SANITIZER)
87
- /* Not all platforms that support asan provide sanitizers/asan_interface.h.
88
- * We therefore declare the functions we need ourselves, rather than trying to
89
- * include the header file... */
90
-
91
- /**
92
- * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
93
- *
94
- * This memory must be previously allocated by your program. Instrumented
95
- * code is forbidden from accessing addresses in this region until it is
96
- * unpoisoned. This function is not guaranteed to poison the entire region -
97
- * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
98
- * alignment restrictions.
99
- *
100
- * \note This function is not thread-safe because no two threads can poison or
101
- * unpoison memory in the same memory region simultaneously.
102
- *
103
- * \param addr Start of memory region.
104
- * \param size Size of memory region. */
105
- void __asan_poison_memory_region(void const volatile *addr, size_t size);
106
-
107
- /**
108
- * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
109
- *
110
- * This memory must be previously allocated by your program. Accessing
111
- * addresses in this region is allowed until this region is poisoned again.
112
- * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
113
- * to ASan alignment restrictions.
114
- *
115
- * \note This function is not thread-safe because no two threads can
116
- * poison or unpoison memory in the same memory region simultaneously.
117
- *
118
- * \param addr Start of memory region.
119
- * \param size Size of memory region. */
120
- void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
121
- #endif
122
-
123
-
124
44
  /*-**************************************************************
125
45
  * Basic Types
126
46
  *****************************************************************/
127
47
  #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
128
- # include <stdint.h>
48
+ # if defined(_AIX)
49
+ # include <inttypes.h>
50
+ # else
51
+ # include <stdint.h> /* intptr_t */
52
+ # endif
129
53
  typedef uint8_t BYTE;
54
+ typedef uint8_t U8;
55
+ typedef int8_t S8;
130
56
  typedef uint16_t U16;
131
57
  typedef int16_t S16;
132
58
  typedef uint32_t U32;
@@ -139,6 +65,8 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
139
65
  # error "this implementation requires char to be exactly 8-bit type"
140
66
  #endif
141
67
  typedef unsigned char BYTE;
68
+ typedef unsigned char U8;
69
+ typedef signed char S8;
142
70
  #if USHRT_MAX != 65535
143
71
  # error "this implementation requires short to be exactly 16-bit type"
144
72
  #endif
@@ -157,25 +85,63 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
157
85
 
158
86
 
159
87
  /*-**************************************************************
160
- * Memory I/O
88
+ * Memory I/O API
89
+ *****************************************************************/
90
+ /*=== Static platform detection ===*/
91
+ MEM_STATIC unsigned MEM_32bits(void);
92
+ MEM_STATIC unsigned MEM_64bits(void);
93
+ MEM_STATIC unsigned MEM_isLittleEndian(void);
94
+
95
+ /*=== Native unaligned read/write ===*/
96
+ MEM_STATIC U16 MEM_read16(const void* memPtr);
97
+ MEM_STATIC U32 MEM_read32(const void* memPtr);
98
+ MEM_STATIC U64 MEM_read64(const void* memPtr);
99
+ MEM_STATIC size_t MEM_readST(const void* memPtr);
100
+
101
+ MEM_STATIC void MEM_write16(void* memPtr, U16 value);
102
+ MEM_STATIC void MEM_write32(void* memPtr, U32 value);
103
+ MEM_STATIC void MEM_write64(void* memPtr, U64 value);
104
+
105
+ /*=== Little endian unaligned read/write ===*/
106
+ MEM_STATIC U16 MEM_readLE16(const void* memPtr);
107
+ MEM_STATIC U32 MEM_readLE24(const void* memPtr);
108
+ MEM_STATIC U32 MEM_readLE32(const void* memPtr);
109
+ MEM_STATIC U64 MEM_readLE64(const void* memPtr);
110
+ MEM_STATIC size_t MEM_readLEST(const void* memPtr);
111
+
112
+ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
113
+ MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
114
+ MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
115
+ MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
116
+ MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
117
+
118
+ /*=== Big endian unaligned read/write ===*/
119
+ MEM_STATIC U32 MEM_readBE32(const void* memPtr);
120
+ MEM_STATIC U64 MEM_readBE64(const void* memPtr);
121
+ MEM_STATIC size_t MEM_readBEST(const void* memPtr);
122
+
123
+ MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
124
+ MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
125
+ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
126
+
127
+ /*=== Byteswap ===*/
128
+ MEM_STATIC U32 MEM_swap32(U32 in);
129
+ MEM_STATIC U64 MEM_swap64(U64 in);
130
+ MEM_STATIC size_t MEM_swapST(size_t in);
131
+
132
+
133
+ /*-**************************************************************
134
+ * Memory I/O Implementation
161
135
  *****************************************************************/
162
- /* MEM_FORCE_MEMORY_ACCESS :
163
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
164
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
165
- * The below switch allow to select different access method for improved performance.
166
- * Method 0 (default) : use `memcpy()`. Safe and portable.
167
- * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
168
- * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
136
+ /* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
137
+ * Method 0 : always use `memcpy()`. Safe and portable.
138
+ * Method 1 : Use compiler extension to set unaligned access.
169
139
  * Method 2 : direct access. This method is portable but violate C standard.
170
140
  * It can generate buggy code on targets depending on alignment.
171
- * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
172
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
173
- * Prefer these methods in priority order (0 > 1 > 2)
141
+ * Default : method 1 if supported, else method 0
174
142
  */
175
143
  #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
176
- # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
177
- # define MEM_FORCE_MEMORY_ACCESS 2
178
- # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
144
+ # ifdef __GNUC__
179
145
  # define MEM_FORCE_MEMORY_ACCESS 1
180
146
  # endif
181
147
  #endif
@@ -185,8 +151,22 @@ MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
185
151
 
186
152
  MEM_STATIC unsigned MEM_isLittleEndian(void)
187
153
  {
154
+ #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
155
+ return 1;
156
+ #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
157
+ return 0;
158
+ #elif defined(__clang__) && __LITTLE_ENDIAN__
159
+ return 1;
160
+ #elif defined(__clang__) && __BIG_ENDIAN__
161
+ return 0;
162
+ #elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86)
163
+ return 1;
164
+ #elif defined(__DMC__) && defined(_M_IX86)
165
+ return 1;
166
+ #else
188
167
  const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
189
168
  return one.c[0];
169
+ #endif
190
170
  }
191
171
 
192
172
  #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
@@ -204,30 +184,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
204
184
 
205
185
  #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
206
186
 
207
- /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
208
- /* currently only defined for gcc and icc */
209
- #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
210
- __pragma( pack(push, 1) )
211
- typedef struct { U16 v; } unalign16;
212
- typedef struct { U32 v; } unalign32;
213
- typedef struct { U64 v; } unalign64;
214
- typedef struct { size_t v; } unalignArch;
215
- __pragma( pack(pop) )
216
- #else
217
- typedef struct { U16 v; } __attribute__((packed)) unalign16;
218
- typedef struct { U32 v; } __attribute__((packed)) unalign32;
219
- typedef struct { U64 v; } __attribute__((packed)) unalign64;
220
- typedef struct { size_t v; } __attribute__((packed)) unalignArch;
221
- #endif
187
+ typedef __attribute__((aligned(1))) U16 unalign16;
188
+ typedef __attribute__((aligned(1))) U32 unalign32;
189
+ typedef __attribute__((aligned(1))) U64 unalign64;
190
+ typedef __attribute__((aligned(1))) size_t unalignArch;
222
191
 
223
- MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
224
- MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
225
- MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
226
- MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
192
+ MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
193
+ MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
194
+ MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
195
+ MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
227
196
 
228
- MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
229
- MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
230
- MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
197
+ MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
198
+ MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
199
+ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
231
200
 
232
201
  #else
233
202
 
@@ -236,41 +205,49 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v =
236
205
 
237
206
  MEM_STATIC U16 MEM_read16(const void* memPtr)
238
207
  {
239
- U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
208
+ U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
240
209
  }
241
210
 
242
211
  MEM_STATIC U32 MEM_read32(const void* memPtr)
243
212
  {
244
- U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
213
+ U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
245
214
  }
246
215
 
247
216
  MEM_STATIC U64 MEM_read64(const void* memPtr)
248
217
  {
249
- U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
218
+ U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
250
219
  }
251
220
 
252
221
  MEM_STATIC size_t MEM_readST(const void* memPtr)
253
222
  {
254
- size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
223
+ size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
255
224
  }
256
225
 
257
226
  MEM_STATIC void MEM_write16(void* memPtr, U16 value)
258
227
  {
259
- memcpy(memPtr, &value, sizeof(value));
228
+ ZSTD_memcpy(memPtr, &value, sizeof(value));
260
229
  }
261
230
 
262
231
  MEM_STATIC void MEM_write32(void* memPtr, U32 value)
263
232
  {
264
- memcpy(memPtr, &value, sizeof(value));
233
+ ZSTD_memcpy(memPtr, &value, sizeof(value));
265
234
  }
266
235
 
267
236
  MEM_STATIC void MEM_write64(void* memPtr, U64 value)
268
237
  {
269
- memcpy(memPtr, &value, sizeof(value));
238
+ ZSTD_memcpy(memPtr, &value, sizeof(value));
270
239
  }
271
240
 
272
241
  #endif /* MEM_FORCE_MEMORY_ACCESS */
273
242
 
243
+ MEM_STATIC U32 MEM_swap32_fallback(U32 in)
244
+ {
245
+ return ((in << 24) & 0xff000000 ) |
246
+ ((in << 8) & 0x00ff0000 ) |
247
+ ((in >> 8) & 0x0000ff00 ) |
248
+ ((in >> 24) & 0x000000ff );
249
+ }
250
+
274
251
  MEM_STATIC U32 MEM_swap32(U32 in)
275
252
  {
276
253
  #if defined(_MSC_VER) /* Visual Studio */
@@ -279,22 +256,13 @@ MEM_STATIC U32 MEM_swap32(U32 in)
279
256
  || (defined(__clang__) && __has_builtin(__builtin_bswap32))
280
257
  return __builtin_bswap32(in);
281
258
  #else
282
- return ((in << 24) & 0xff000000 ) |
283
- ((in << 8) & 0x00ff0000 ) |
284
- ((in >> 8) & 0x0000ff00 ) |
285
- ((in >> 24) & 0x000000ff );
259
+ return MEM_swap32_fallback(in);
286
260
  #endif
287
261
  }
288
262
 
289
- MEM_STATIC U64 MEM_swap64(U64 in)
263
+ MEM_STATIC U64 MEM_swap64_fallback(U64 in)
290
264
  {
291
- #if defined(_MSC_VER) /* Visual Studio */
292
- return _byteswap_uint64(in);
293
- #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
294
- || (defined(__clang__) && __has_builtin(__builtin_bswap64))
295
- return __builtin_bswap64(in);
296
- #else
297
- return ((in << 56) & 0xff00000000000000ULL) |
265
+ return ((in << 56) & 0xff00000000000000ULL) |
298
266
  ((in << 40) & 0x00ff000000000000ULL) |
299
267
  ((in << 24) & 0x0000ff0000000000ULL) |
300
268
  ((in << 8) & 0x000000ff00000000ULL) |
@@ -302,6 +270,17 @@ MEM_STATIC U64 MEM_swap64(U64 in)
302
270
  ((in >> 24) & 0x0000000000ff0000ULL) |
303
271
  ((in >> 40) & 0x000000000000ff00ULL) |
304
272
  ((in >> 56) & 0x00000000000000ffULL);
273
+ }
274
+
275
+ MEM_STATIC U64 MEM_swap64(U64 in)
276
+ {
277
+ #if defined(_MSC_VER) /* Visual Studio */
278
+ return _byteswap_uint64(in);
279
+ #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
280
+ || (defined(__clang__) && __has_builtin(__builtin_bswap64))
281
+ return __builtin_bswap64(in);
282
+ #else
283
+ return MEM_swap64_fallback(in);
305
284
  #endif
306
285
  }
307
286
 
@@ -338,7 +317,7 @@ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
338
317
 
339
318
  MEM_STATIC U32 MEM_readLE24(const void* memPtr)
340
319
  {
341
- return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
320
+ return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16);
342
321
  }
343
322
 
344
323
  MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
@@ -445,6 +424,9 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
445
424
  MEM_writeBE64(memPtr, (U64)val);
446
425
  }
447
426
 
427
+ /* code only tested on 32 and 64 bits systems */
428
+ MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
429
+
448
430
 
449
431
  #if defined (__cplusplus)
450
432
  }
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -10,9 +10,9 @@
10
10
 
11
11
 
12
12
  /* ====== Dependencies ======= */
13
- #include <stddef.h> /* size_t */
13
+ #include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
14
+ #include "zstd_deps.h" /* size_t */
14
15
  #include "debug.h" /* assert */
15
- #include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
16
16
  #include "pool.h"
17
17
 
18
18
  /* ====== Compiler specifics ====== */
@@ -86,7 +86,7 @@ static void* POOL_thread(void* opaque) {
86
86
  { POOL_job const job = ctx->queue[ctx->queueHead];
87
87
  ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
88
88
  ctx->numThreadsBusy++;
89
- ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
89
+ ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
90
90
  /* Unlock the mutex, signal a pusher, and run the job */
91
91
  ZSTD_pthread_cond_signal(&ctx->queuePushCond);
92
92
  ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
@@ -96,33 +96,37 @@ static void* POOL_thread(void* opaque) {
96
96
  /* If the intended queue size was 0, signal after finishing job */
97
97
  ZSTD_pthread_mutex_lock(&ctx->queueMutex);
98
98
  ctx->numThreadsBusy--;
99
- if (ctx->queueSize == 1) {
100
- ZSTD_pthread_cond_signal(&ctx->queuePushCond);
101
- }
99
+ ZSTD_pthread_cond_signal(&ctx->queuePushCond);
102
100
  ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
103
101
  }
104
102
  } /* for (;;) */
105
103
  assert(0); /* Unreachable */
106
104
  }
107
105
 
106
+ /* ZSTD_createThreadPool() : public access point */
107
+ POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
108
+ return POOL_create (numThreads, 0);
109
+ }
110
+
108
111
  POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
109
112
  return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
110
113
  }
111
114
 
112
115
  POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
113
- ZSTD_customMem customMem) {
116
+ ZSTD_customMem customMem)
117
+ {
114
118
  POOL_ctx* ctx;
115
119
  /* Check parameters */
116
120
  if (!numThreads) { return NULL; }
117
121
  /* Allocate the context and zero initialize */
118
- ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
122
+ ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
119
123
  if (!ctx) { return NULL; }
120
124
  /* Initialize the job queue.
121
125
  * It needs one extra space since one space is wasted to differentiate
122
126
  * empty and full queues.
123
127
  */
124
128
  ctx->queueSize = queueSize + 1;
125
- ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
129
+ ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
126
130
  ctx->queueHead = 0;
127
131
  ctx->queueTail = 0;
128
132
  ctx->numThreadsBusy = 0;
@@ -136,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
136
140
  }
137
141
  ctx->shutdown = 0;
138
142
  /* Allocate space for the thread handles */
139
- ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
143
+ ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
140
144
  ctx->threadCapacity = 0;
141
145
  ctx->customMem = customMem;
142
146
  /* Check for errors */
@@ -169,7 +173,7 @@ static void POOL_join(POOL_ctx* ctx) {
169
173
  /* Join all of the threads */
170
174
  { size_t i;
171
175
  for (i = 0; i < ctx->threadCapacity; ++i) {
172
- ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */
176
+ ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
173
177
  } }
174
178
  }
175
179
 
@@ -179,14 +183,27 @@ void POOL_free(POOL_ctx *ctx) {
179
183
  ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
180
184
  ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
181
185
  ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
182
- ZSTD_free(ctx->queue, ctx->customMem);
183
- ZSTD_free(ctx->threads, ctx->customMem);
184
- ZSTD_free(ctx, ctx->customMem);
186
+ ZSTD_customFree(ctx->queue, ctx->customMem);
187
+ ZSTD_customFree(ctx->threads, ctx->customMem);
188
+ ZSTD_customFree(ctx, ctx->customMem);
185
189
  }
186
190
 
191
+ /*! POOL_joinJobs() :
192
+ * Waits for all queued jobs to finish executing.
193
+ */
194
+ void POOL_joinJobs(POOL_ctx* ctx) {
195
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
196
+ while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
197
+ ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
198
+ }
199
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
200
+ }
187
201
 
202
+ void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
203
+ POOL_free (pool);
204
+ }
188
205
 
189
- size_t POOL_sizeof(POOL_ctx *ctx) {
206
+ size_t POOL_sizeof(const POOL_ctx* ctx) {
190
207
  if (ctx==NULL) return 0; /* supports sizeof NULL */
191
208
  return sizeof(*ctx)
192
209
  + ctx->queueSize * sizeof(POOL_job)
@@ -203,11 +220,11 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
203
220
  return 0;
204
221
  }
205
222
  /* numThreads > threadCapacity */
206
- { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
223
+ { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
207
224
  if (!threadPool) return 1;
208
225
  /* replace existing thread pool */
209
- memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
210
- ZSTD_free(ctx->threads, ctx->customMem);
226
+ ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
227
+ ZSTD_customFree(ctx->threads, ctx->customMem);
211
228
  ctx->threads = threadPool;
212
229
  /* Initialize additional threads */
213
230
  { size_t threadId;
@@ -251,9 +268,12 @@ static int isQueueFull(POOL_ctx const* ctx) {
251
268
  }
252
269
 
253
270
 
254
- static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
271
+ static void
272
+ POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
255
273
  {
256
- POOL_job const job = {function, opaque};
274
+ POOL_job job;
275
+ job.function = function;
276
+ job.opaque = opaque;
257
277
  assert(ctx != NULL);
258
278
  if (ctx->shutdown) return;
259
279
 
@@ -301,21 +321,28 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
301
321
  struct POOL_ctx_s {
302
322
  int dummy;
303
323
  };
304
- static POOL_ctx g_ctx;
324
+ static POOL_ctx g_poolCtx;
305
325
 
306
326
  POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
307
327
  return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
308
328
  }
309
329
 
310
- POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
330
+ POOL_ctx*
331
+ POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
332
+ {
311
333
  (void)numThreads;
312
334
  (void)queueSize;
313
335
  (void)customMem;
314
- return &g_ctx;
336
+ return &g_poolCtx;
315
337
  }
316
338
 
317
339
  void POOL_free(POOL_ctx* ctx) {
318
- assert(!ctx || ctx == &g_ctx);
340
+ assert(!ctx || ctx == &g_poolCtx);
341
+ (void)ctx;
342
+ }
343
+
344
+ void POOL_joinJobs(POOL_ctx* ctx){
345
+ assert(!ctx || ctx == &g_poolCtx);
319
346
  (void)ctx;
320
347
  }
321
348
 
@@ -335,9 +362,9 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
335
362
  return 1;
336
363
  }
337
364
 
338
- size_t POOL_sizeof(POOL_ctx* ctx) {
365
+ size_t POOL_sizeof(const POOL_ctx* ctx) {
339
366
  if (ctx==NULL) return 0; /* supports sizeof NULL */
340
- assert(ctx == &g_ctx);
367
+ assert(ctx == &g_poolCtx);
341
368
  return sizeof(*ctx);
342
369
  }
343
370
 
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -16,9 +16,9 @@ extern "C" {
16
16
  #endif
17
17
 
18
18
 
19
- #include <stddef.h> /* size_t */
19
+ #include "zstd_deps.h"
20
20
  #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
21
- #include "zstd.h"
21
+ #include "../zstd.h"
22
22
 
23
23
  typedef struct POOL_ctx_s POOL_ctx;
24
24
 
@@ -38,6 +38,12 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
38
38
  */
39
39
  void POOL_free(POOL_ctx* ctx);
40
40
 
41
+
42
+ /*! POOL_joinJobs() :
43
+ * Waits for all queued jobs to finish executing.
44
+ */
45
+ void POOL_joinJobs(POOL_ctx* ctx);
46
+
41
47
  /*! POOL_resize() :
42
48
  * Expands or shrinks pool's number of threads.
43
49
  * This is more efficient than releasing + creating a new context,
@@ -53,7 +59,7 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads);
53
59
  * @return threadpool memory usage
54
60
  * note : compatible with NULL (returns 0 in this case)
55
61
  */
56
- size_t POOL_sizeof(POOL_ctx* ctx);
62
+ size_t POOL_sizeof(const POOL_ctx* ctx);
57
63
 
58
64
  /*! POOL_function :
59
65
  * The function type that can be added to a thread pool.
@@ -70,7 +76,7 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
70
76
 
71
77
 
72
78
  /*! POOL_tryAdd() :
73
- * Add the job `function(opaque)` to thread pool _if_ a worker is available.
79
+ * Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
74
80
  * Returns immediately even if not (does not block).
75
81
  * @return : 1 if successful, 0 if not.
76
82
  */