zstd-ruby 1.5.2.3 → 1.5.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +13 -5
  3. data/ext/zstdruby/extconf.rb +1 -1
  4. data/ext/zstdruby/libzstd/common/allocations.h +55 -0
  5. data/ext/zstdruby/libzstd/common/bits.h +200 -0
  6. data/ext/zstdruby/libzstd/common/bitstream.h +19 -60
  7. data/ext/zstdruby/libzstd/common/compiler.h +26 -3
  8. data/ext/zstdruby/libzstd/common/cpu.h +1 -1
  9. data/ext/zstdruby/libzstd/common/debug.c +1 -1
  10. data/ext/zstdruby/libzstd/common/debug.h +1 -1
  11. data/ext/zstdruby/libzstd/common/entropy_common.c +12 -40
  12. data/ext/zstdruby/libzstd/common/error_private.c +9 -2
  13. data/ext/zstdruby/libzstd/common/error_private.h +1 -1
  14. data/ext/zstdruby/libzstd/common/fse.h +5 -83
  15. data/ext/zstdruby/libzstd/common/fse_decompress.c +7 -99
  16. data/ext/zstdruby/libzstd/common/huf.h +65 -156
  17. data/ext/zstdruby/libzstd/common/mem.h +39 -46
  18. data/ext/zstdruby/libzstd/common/pool.c +26 -10
  19. data/ext/zstdruby/libzstd/common/pool.h +7 -1
  20. data/ext/zstdruby/libzstd/common/portability_macros.h +22 -3
  21. data/ext/zstdruby/libzstd/common/threading.c +68 -14
  22. data/ext/zstdruby/libzstd/common/threading.h +5 -10
  23. data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
  24. data/ext/zstdruby/libzstd/common/xxhash.h +8 -8
  25. data/ext/zstdruby/libzstd/common/zstd_common.c +1 -36
  26. data/ext/zstdruby/libzstd/common/zstd_deps.h +1 -1
  27. data/ext/zstdruby/libzstd/common/zstd_internal.h +17 -118
  28. data/ext/zstdruby/libzstd/common/zstd_trace.h +3 -3
  29. data/ext/zstdruby/libzstd/compress/clevels.h +1 -1
  30. data/ext/zstdruby/libzstd/compress/fse_compress.c +7 -124
  31. data/ext/zstdruby/libzstd/compress/hist.c +1 -1
  32. data/ext/zstdruby/libzstd/compress/hist.h +1 -1
  33. data/ext/zstdruby/libzstd/compress/huf_compress.c +234 -169
  34. data/ext/zstdruby/libzstd/compress/zstd_compress.c +1243 -538
  35. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +225 -151
  36. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +115 -39
  37. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -8
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +3 -3
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -21
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
  42. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +128 -62
  43. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +95 -33
  44. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -2
  45. data/ext/zstdruby/libzstd/compress/zstd_fast.c +433 -148
  46. data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -2
  47. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +398 -345
  48. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +4 -2
  49. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +5 -5
  50. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
  51. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +1 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_opt.c +106 -80
  53. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  54. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +17 -9
  55. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +1 -1
  56. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +434 -441
  57. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +30 -39
  58. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
  59. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +1 -1
  60. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +205 -80
  61. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +201 -81
  62. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +6 -1
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +4 -2
  64. data/ext/zstdruby/libzstd/dictBuilder/cover.c +19 -15
  65. data/ext/zstdruby/libzstd/dictBuilder/cover.h +1 -1
  66. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
  67. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +11 -89
  68. data/ext/zstdruby/libzstd/zdict.h +53 -31
  69. data/ext/zstdruby/libzstd/zstd.h +580 -135
  70. data/ext/zstdruby/libzstd/zstd_errors.h +27 -8
  71. data/ext/zstdruby/main.c +6 -0
  72. data/ext/zstdruby/skippable_frame.c +63 -0
  73. data/lib/zstd-ruby/version.rb +1 -1
  74. metadata +9 -6
@@ -23,8 +23,7 @@ int g_ZSTD_threading_useless_symbol;
23
23
  #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
24
24
 
25
25
  /**
26
- * Windows minimalist Pthread Wrapper, based on :
27
- * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
26
+ * Windows minimalist Pthread Wrapper
28
27
  */
29
28
 
30
29
 
@@ -35,37 +34,92 @@ int g_ZSTD_threading_useless_symbol;
35
34
 
36
35
  /* === Implementation === */
37
36
 
37
+ typedef struct {
38
+ void* (*start_routine)(void*);
39
+ void* arg;
40
+ int initialized;
41
+ ZSTD_pthread_cond_t initialized_cond;
42
+ ZSTD_pthread_mutex_t initialized_mutex;
43
+ } ZSTD_thread_params_t;
44
+
38
45
  static unsigned __stdcall worker(void *arg)
39
46
  {
40
- ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
41
- thread->arg = thread->start_routine(thread->arg);
47
+ void* (*start_routine)(void*);
48
+ void* thread_arg;
49
+
50
+ /* Initialized thread_arg and start_routine and signal main thread that we don't need it
51
+ * to wait any longer.
52
+ */
53
+ {
54
+ ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg;
55
+ thread_arg = thread_param->arg;
56
+ start_routine = thread_param->start_routine;
57
+
58
+ /* Signal main thread that we are running and do not depend on its memory anymore */
59
+ ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex);
60
+ thread_param->initialized = 1;
61
+ ZSTD_pthread_cond_signal(&thread_param->initialized_cond);
62
+ ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex);
63
+ }
64
+
65
+ start_routine(thread_arg);
66
+
42
67
  return 0;
43
68
  }
44
69
 
45
70
  int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
46
71
  void* (*start_routine) (void*), void* arg)
47
72
  {
73
+ ZSTD_thread_params_t thread_param;
48
74
  (void)unused;
49
- thread->arg = arg;
50
- thread->start_routine = start_routine;
51
- thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
52
75
 
53
- if (!thread->handle)
76
+ thread_param.start_routine = start_routine;
77
+ thread_param.arg = arg;
78
+ thread_param.initialized = 0;
79
+ *thread = NULL;
80
+
81
+ /* Setup thread initialization synchronization */
82
+ if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) {
83
+ /* Should never happen on Windows */
84
+ return -1;
85
+ }
86
+ if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) {
87
+ /* Should never happen on Windows */
88
+ ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
89
+ return -1;
90
+ }
91
+
92
+ /* Spawn thread */
93
+ *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL);
94
+ if (!thread) {
95
+ ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
96
+ ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
54
97
  return errno;
55
- else
56
- return 0;
98
+ }
99
+
100
+ /* Wait for thread to be initialized */
101
+ ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex);
102
+ while(!thread_param.initialized) {
103
+ ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex);
104
+ }
105
+ ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex);
106
+ ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
107
+ ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
108
+
109
+ return 0;
57
110
  }
58
111
 
59
- int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
112
+ int ZSTD_pthread_join(ZSTD_pthread_t thread)
60
113
  {
61
114
  DWORD result;
62
115
 
63
- if (!thread.handle) return 0;
116
+ if (!thread) return 0;
117
+
118
+ result = WaitForSingleObject(thread, INFINITE);
119
+ CloseHandle(thread);
64
120
 
65
- result = WaitForSingleObject(thread.handle, INFINITE);
66
121
  switch (result) {
67
122
  case WAIT_OBJECT_0:
68
- if (value_ptr) *value_ptr = thread.arg;
69
123
  return 0;
70
124
  case WAIT_ABANDONED:
71
125
  return EINVAL;
@@ -23,8 +23,7 @@ extern "C" {
23
23
  #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
24
24
 
25
25
  /**
26
- * Windows minimalist Pthread Wrapper, based on :
27
- * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
26
+ * Windows minimalist Pthread Wrapper
28
27
  */
29
28
  #ifdef WINVER
30
29
  # undef WINVER
@@ -62,16 +61,12 @@ extern "C" {
62
61
  #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
63
62
 
64
63
  /* ZSTD_pthread_create() and ZSTD_pthread_join() */
65
- typedef struct {
66
- HANDLE handle;
67
- void* (*start_routine)(void*);
68
- void* arg;
69
- } ZSTD_pthread_t;
64
+ typedef HANDLE ZSTD_pthread_t;
70
65
 
71
66
  int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
72
67
  void* (*start_routine) (void*), void* arg);
73
68
 
74
- int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
69
+ int ZSTD_pthread_join(ZSTD_pthread_t thread);
75
70
 
76
71
  /**
77
72
  * add here more wrappers as required
@@ -99,7 +94,7 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
99
94
 
100
95
  #define ZSTD_pthread_t pthread_t
101
96
  #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
102
- #define ZSTD_pthread_join(a, b) pthread_join((a),(b))
97
+ #define ZSTD_pthread_join(a) pthread_join((a),NULL)
103
98
 
104
99
  #else /* DEBUGLEVEL >= 1 */
105
100
 
@@ -124,7 +119,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
124
119
 
125
120
  #define ZSTD_pthread_t pthread_t
126
121
  #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
127
- #define ZSTD_pthread_join(a, b) pthread_join((a),(b))
122
+ #define ZSTD_pthread_join(a) pthread_join((a),NULL)
128
123
 
129
124
  #endif
130
125
 
@@ -1,9 +1,9 @@
1
1
  /*
2
2
  * xxHash - Fast Hash algorithm
3
- * Copyright (c) Yann Collet, Facebook, Inc.
3
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
4
4
  *
5
5
  * You can contact the author at :
6
- * - xxHash homepage: http://www.xxhash.com
6
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
7
7
  * - xxHash source repository : https://github.com/Cyan4973/xxHash
8
8
  *
9
9
  * This source code is licensed under both the BSD-style license (found in the
@@ -1,9 +1,9 @@
1
1
  /*
2
2
  * xxHash - Fast Hash algorithm
3
- * Copyright (c) Yann Collet, Facebook, Inc.
3
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
4
4
  *
5
5
  * You can contact the author at :
6
- * - xxHash homepage: http://www.xxhash.com
6
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
7
7
  * - xxHash source repository : https://github.com/Cyan4973/xxHash
8
8
  *
9
9
  * This source code is licensed under both the BSD-style license (found in the
@@ -1314,7 +1314,7 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1314
1314
  * care, as what works on one compiler/platform/optimization level may cause
1315
1315
  * another to read garbage data or even crash.
1316
1316
  *
1317
- * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1317
+ * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1318
1318
  *
1319
1319
  * Prefer these methods in priority order (0 > 3 > 1 > 2)
1320
1320
  */
@@ -1534,7 +1534,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_
1534
1534
  * @brief Used to prevent unwanted optimizations for @p var.
1535
1535
  *
1536
1536
  * It uses an empty GCC inline assembly statement with a register constraint
1537
- * which forces @p var into a general purpose register (eg eax, ebx, ecx
1537
+ * which forces @p var into a general purpose register (e.g. eax, ebx, ecx
1538
1538
  * on x86) and marks it as modified.
1539
1539
  *
1540
1540
  * This is used in a few places to avoid unwanted autovectorization (e.g.
@@ -1655,7 +1655,7 @@ static xxh_u32 XXH_read32(const void* ptr)
1655
1655
 
1656
1656
  /*
1657
1657
  * Portable and safe solution. Generally efficient.
1658
- * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1658
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1659
1659
  */
1660
1660
  static xxh_u32 XXH_read32(const void* memPtr)
1661
1661
  {
@@ -2296,7 +2296,7 @@ static xxh_u64 XXH_read64(const void* ptr)
2296
2296
 
2297
2297
  /*
2298
2298
  * Portable and safe solution. Generally efficient.
2299
- * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2299
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2300
2300
  */
2301
2301
  static xxh_u64 XXH_read64(const void* memPtr)
2302
2302
  {
@@ -2809,7 +2809,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ {
2809
2809
  * @ingroup tuning
2810
2810
  * @brief Selects the minimum alignment for XXH3's accumulators.
2811
2811
  *
2812
- * When using SIMD, this should match the alignment reqired for said vector
2812
+ * When using SIMD, this should match the alignment required for said vector
2813
2813
  * type, so, for example, 32 for AVX2.
2814
2814
  *
2815
2815
  * Default: Auto detected.
@@ -3026,7 +3026,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ {
3026
3026
  * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
3027
3027
  * you are only using 2/3 of the CPU bandwidth.
3028
3028
  *
3029
- * This is even more noticable on the more advanced cores like the A76 which
3029
+ * This is even more noticeable on the more advanced cores like the A76 which
3030
3030
  * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
3031
3031
  *
3032
3032
  * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,6 @@
14
14
  * Dependencies
15
15
  ***************************************/
16
16
  #define ZSTD_DEPS_NEED_MALLOC
17
- #include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
18
17
  #include "error_private.h"
19
18
  #include "zstd_internal.h"
20
19
 
@@ -47,37 +46,3 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
47
46
  /*! ZSTD_getErrorString() :
48
47
  * provides error code string from enum */
49
48
  const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
50
-
51
-
52
-
53
- /*=**************************************************************
54
- * Custom allocator
55
- ****************************************************************/
56
- void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
57
- {
58
- if (customMem.customAlloc)
59
- return customMem.customAlloc(customMem.opaque, size);
60
- return ZSTD_malloc(size);
61
- }
62
-
63
- void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
64
- {
65
- if (customMem.customAlloc) {
66
- /* calloc implemented as malloc+memset;
67
- * not as efficient as calloc, but next best guess for custom malloc */
68
- void* const ptr = customMem.customAlloc(customMem.opaque, size);
69
- ZSTD_memset(ptr, 0, size);
70
- return ptr;
71
- }
72
- return ZSTD_calloc(1, size);
73
- }
74
-
75
- void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
76
- {
77
- if (ptr!=NULL) {
78
- if (customMem.customFree)
79
- customMem.customFree(customMem.opaque, ptr);
80
- else
81
- ZSTD_free(ptr);
82
- }
83
- }
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -28,7 +28,6 @@
28
28
  #include "../zstd.h"
29
29
  #define FSE_STATIC_LINKING_ONLY
30
30
  #include "fse.h"
31
- #define HUF_STATIC_LINKING_ONLY
32
31
  #include "huf.h"
33
32
  #ifndef XXH_STATIC_LINKING_ONLY
34
33
  # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
@@ -93,9 +92,9 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
93
92
  #define ZSTD_FRAMECHECKSUMSIZE 4
94
93
 
95
94
  #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
96
- #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
95
+ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
96
+ #define MIN_LITERALS_FOR_4_STREAMS 6
97
97
 
98
- #define HufLog 12
99
98
  typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
100
99
 
101
100
  #define LONGNBSEQ 0x7F00
@@ -103,6 +102,7 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
103
102
  #define MINMATCH 3
104
103
 
105
104
  #define Litbits 8
105
+ #define LitHufLog 11
106
106
  #define MaxLit ((1<<Litbits) - 1)
107
107
  #define MaxML 52
108
108
  #define MaxLL 35
@@ -113,6 +113,8 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
113
113
  #define LLFSELog 9
114
114
  #define OffFSELog 8
115
115
  #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
116
+ #define MaxMLBits 16
117
+ #define MaxLLBits 16
116
118
 
117
119
  #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
118
120
  /* Each table cannot take more than #symbols * FSELog bits */
@@ -235,12 +237,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
235
237
  * one COPY16() in the first call. Then, do two calls per loop since
236
238
  * at that point it is more likely to have a high trip count.
237
239
  */
238
- #ifdef __aarch64__
239
- do {
240
- COPY16(op, ip);
241
- }
242
- while (op < oend);
243
- #else
244
240
  ZSTD_copy16(op, ip);
245
241
  if (16 >= length) return;
246
242
  op += 16;
@@ -250,7 +246,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
250
246
  COPY16(op, ip);
251
247
  }
252
248
  while (op < oend);
253
- #endif
254
249
  }
255
250
  }
256
251
 
@@ -299,11 +294,11 @@ typedef enum {
299
294
  typedef struct {
300
295
  seqDef* sequencesStart;
301
296
  seqDef* sequences; /* ptr to end of sequences */
302
- BYTE* litStart;
303
- BYTE* lit; /* ptr to end of literals */
304
- BYTE* llCode;
305
- BYTE* mlCode;
306
- BYTE* ofCode;
297
+ BYTE* litStart;
298
+ BYTE* lit; /* ptr to end of literals */
299
+ BYTE* llCode;
300
+ BYTE* mlCode;
301
+ BYTE* ofCode;
307
302
  size_t maxNbSeq;
308
303
  size_t maxNbLit;
309
304
 
@@ -311,8 +306,8 @@ typedef struct {
311
306
  * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
312
307
  * the existing value of the litLength or matchLength by 0x10000.
313
308
  */
314
- ZSTD_longLengthType_e longLengthType;
315
- U32 longLengthPos; /* Index of the sequence to apply long length modification to */
309
+ ZSTD_longLengthType_e longLengthType;
310
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
316
311
  } seqStore_t;
317
312
 
318
313
  typedef struct {
@@ -331,10 +326,10 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore
331
326
  seqLen.matchLength = seq->mlBase + MINMATCH;
332
327
  if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
333
328
  if (seqStore->longLengthType == ZSTD_llt_literalLength) {
334
- seqLen.litLength += 0xFFFF;
329
+ seqLen.litLength += 0x10000;
335
330
  }
336
331
  if (seqStore->longLengthType == ZSTD_llt_matchLength) {
337
- seqLen.matchLength += 0xFFFF;
332
+ seqLen.matchLength += 0x10000;
338
333
  }
339
334
  }
340
335
  return seqLen;
@@ -347,109 +342,13 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore
347
342
  * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
348
343
  */
349
344
  typedef struct {
345
+ size_t nbBlocks;
350
346
  size_t compressedSize;
351
347
  unsigned long long decompressedBound;
352
348
  } ZSTD_frameSizeInfo; /* decompress & legacy */
353
349
 
354
350
  const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
355
- void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
356
-
357
- /* custom memory allocation functions */
358
- void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
359
- void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
360
- void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
361
-
362
-
363
- MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
364
- {
365
- assert(val != 0);
366
- {
367
- # if defined(_MSC_VER) /* Visual */
368
- # if STATIC_BMI2 == 1
369
- return _lzcnt_u32(val)^31;
370
- # else
371
- if (val != 0) {
372
- unsigned long r;
373
- _BitScanReverse(&r, val);
374
- return (unsigned)r;
375
- } else {
376
- /* Should not reach this code path */
377
- __assume(0);
378
- }
379
- # endif
380
- # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
381
- return __builtin_clz (val) ^ 31;
382
- # elif defined(__ICCARM__) /* IAR Intrinsic */
383
- return 31 - __CLZ(val);
384
- # else /* Software version */
385
- static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
386
- U32 v = val;
387
- v |= v >> 1;
388
- v |= v >> 2;
389
- v |= v >> 4;
390
- v |= v >> 8;
391
- v |= v >> 16;
392
- return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
393
- # endif
394
- }
395
- }
396
-
397
- /**
398
- * Counts the number of trailing zeros of a `size_t`.
399
- * Most compilers should support CTZ as a builtin. A backup
400
- * implementation is provided if the builtin isn't supported, but
401
- * it may not be terribly efficient.
402
- */
403
- MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)
404
- {
405
- if (MEM_64bits()) {
406
- # if defined(_MSC_VER) && defined(_WIN64)
407
- # if STATIC_BMI2
408
- return _tzcnt_u64(val);
409
- # else
410
- if (val != 0) {
411
- unsigned long r;
412
- _BitScanForward64(&r, (U64)val);
413
- return (unsigned)r;
414
- } else {
415
- /* Should not reach this code path */
416
- __assume(0);
417
- }
418
- # endif
419
- # elif defined(__GNUC__) && (__GNUC__ >= 4)
420
- return __builtin_ctzll((U64)val);
421
- # else
422
- static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19,
423
- 4, 25, 14, 28, 9, 34, 20, 56,
424
- 5, 17, 26, 54, 15, 41, 29, 43,
425
- 10, 31, 38, 35, 21, 45, 49, 57,
426
- 63, 6, 12, 18, 24, 27, 33, 55,
427
- 16, 53, 40, 42, 30, 37, 44, 48,
428
- 62, 11, 23, 32, 52, 39, 36, 47,
429
- 61, 22, 51, 46, 60, 50, 59, 58 };
430
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
431
- # endif
432
- } else { /* 32 bits */
433
- # if defined(_MSC_VER)
434
- if (val != 0) {
435
- unsigned long r;
436
- _BitScanForward(&r, (U32)val);
437
- return (unsigned)r;
438
- } else {
439
- /* Should not reach this code path */
440
- __assume(0);
441
- }
442
- # elif defined(__GNUC__) && (__GNUC__ >= 3)
443
- return __builtin_ctz((U32)val);
444
- # else
445
- static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3,
446
- 30, 22, 20, 15, 25, 17, 4, 8,
447
- 31, 27, 13, 23, 21, 19, 16, 7,
448
- 26, 12, 18, 6, 11, 5, 10, 9 };
449
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
450
- # endif
451
- }
452
- }
351
+ int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
453
352
 
454
353
 
455
354
  /* ZSTD_invalidateRepCodes() :
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -21,13 +21,13 @@ extern "C" {
21
21
  * For now, enable conservatively:
22
22
  * - Only GNUC
23
23
  * - Only ELF
24
- * - Only x86-64 and i386
24
+ * - Only x86-64, i386 and aarch64
25
25
  * Also, explicitly disable on platforms known not to work so they aren't
26
26
  * forgotten in the future.
27
27
  */
28
28
  #if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \
29
29
  defined(__GNUC__) && defined(__ELF__) && \
30
- (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)) && \
30
+ (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \
31
31
  !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
32
32
  !defined(__CYGWIN__) && !defined(_AIX)
33
33
  # define ZSTD_HAVE_WEAK_SYMBOLS 1
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the