zstd-ruby 1.5.4.0 → 1.5.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5025a83454e6841492b428638df26d5bbacfae564e3e0dcaabce5955e2bd3688
4
- data.tar.gz: f50bf1bb09958e116917344fddaa8bf1d45088894769d791bbe3664a74a3b907
3
+ metadata.gz: 76dbffc6a0a13fccd92ea93abd90e33c4a26ab2ac2972877b7928a515e37033e
4
+ data.tar.gz: 21e3eba574ac94d9f34ac0d33732435ed3fd2373e9db921702a74d2a2c9d606a
5
5
  SHA512:
6
- metadata.gz: c754293689a65269f3f262605ad07cd72c0e162dde804dab2d0e144b67c251cbeaf5306db451ac15878388113617d09aaef5c065349c893b233804d6f0883812
7
- data.tar.gz: f2bc576e57766984c1d81e05f69a499461ffc437383f084d6cd83f22f3621f368a9ee668ad786165d537780112b42ade0c7d3e43b8fb35ac77634fbbad12d5c8
6
+ metadata.gz: 4f8c40ad6eaa9b014467fc57651f857713767f7930b2e7e2060c50ff58d05262416d0e9cb1427f440298af577eeef5247daec1866a55003ca6d9b2e2ea6212de
7
+ data.tar.gz: 5836f8061f7588081df400bf0705fcc74003029ec9ef9225e43f5ba5a0b19ec747af71cf0e34f150212a5efd90b9e1ea5ebe49a7231297e3f2ab8080bbf8247d
data/README.md CHANGED
@@ -1,5 +1,5 @@
1
1
  [![Gem Version](https://badge.fury.io/rb/zstd-ruby.svg)](https://badge.fury.io/rb/zstd-ruby)
2
- ![Build Status](https://github.com/SpringMT/zstd-ruby/actions/workflows/ruby.yml/badge.svg?branch=master)
2
+ ![Build Status](https://github.com/SpringMT/zstd-ruby/actions/workflows/ruby.yml/badge.svg?branch=main)
3
3
 
4
4
  # zstd-ruby
5
5
 
@@ -10,7 +10,7 @@ See https://github.com/facebook/zstd
10
10
  Fork from https://github.com/jarredholman/ruby-zstd.
11
11
 
12
12
  ## Zstd version
13
- v1.5.2 (https://github.com/facebook/zstd/tree/v1.5.2)
13
+ v1.5.5 (https://github.com/facebook/zstd/tree/v1.5.5)
14
14
 
15
15
  ## Installation
16
16
 
@@ -87,6 +87,14 @@ result << stream.decompress(cstr[0, 10])
87
87
  result << stream.decompress(cstr[10..-1])
88
88
  ```
89
89
 
90
+ ### Skippable flame
91
+
92
+ ```ruby
93
+ compressed_data_with_skippable_frame = Zstd.write_skippable_frame(compressed_data, "sample data")
94
+
95
+ Zstd.read_skippable_frame(compressed_data_with_skippable_frame)
96
+ # => "sample data"
97
+ ```
90
98
 
91
99
  ## JRuby
92
100
  This gem does not support JRuby.
@@ -1,6 +1,6 @@
1
1
  require "mkmf"
2
2
 
3
- $CFLAGS = '-I. -O3 -std=c99'
3
+ $CFLAGS = '-I. -O3 -std=c99 -DZSTD_STATIC_LINKING_ONLY'
4
4
  $CPPFLAGS += " -fdeclspec" if CONFIG['CXX'] =~ /clang/
5
5
 
6
6
  Dir.chdir File.expand_path('..', __FILE__) do
@@ -0,0 +1,55 @@
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+ /* This file provides custom allocation primitives
12
+ */
13
+
14
+ #define ZSTD_DEPS_NEED_MALLOC
15
+ #include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
16
+
17
+ #include "mem.h" /* MEM_STATIC */
18
+ #define ZSTD_STATIC_LINKING_ONLY
19
+ #include "../zstd.h" /* ZSTD_customMem */
20
+
21
+ #ifndef ZSTD_ALLOCATIONS_H
22
+ #define ZSTD_ALLOCATIONS_H
23
+
24
+ /* custom memory allocation functions */
25
+
26
+ MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
27
+ {
28
+ if (customMem.customAlloc)
29
+ return customMem.customAlloc(customMem.opaque, size);
30
+ return ZSTD_malloc(size);
31
+ }
32
+
33
+ MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
34
+ {
35
+ if (customMem.customAlloc) {
36
+ /* calloc implemented as malloc+memset;
37
+ * not as efficient as calloc, but next best guess for custom malloc */
38
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
39
+ ZSTD_memset(ptr, 0, size);
40
+ return ptr;
41
+ }
42
+ return ZSTD_calloc(1, size);
43
+ }
44
+
45
+ MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
46
+ {
47
+ if (ptr!=NULL) {
48
+ if (customMem.customFree)
49
+ customMem.customFree(customMem.opaque, ptr);
50
+ else
51
+ ZSTD_free(ptr);
52
+ }
53
+ }
54
+
55
+ #endif /* ZSTD_ALLOCATIONS_H */
@@ -17,7 +17,7 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val)
17
17
  {
18
18
  assert(val != 0);
19
19
  {
20
- static const int DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3,
20
+ static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3,
21
21
  30, 22, 20, 15, 25, 17, 4, 8,
22
22
  31, 27, 13, 23, 21, 19, 16, 7,
23
23
  26, 12, 18, 6, 11, 5, 10, 9};
@@ -30,7 +30,7 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val)
30
30
  assert(val != 0);
31
31
  # if defined(_MSC_VER)
32
32
  # if STATIC_BMI2 == 1
33
- return _tzcnt_u32(val);
33
+ return (unsigned)_tzcnt_u32(val);
34
34
  # else
35
35
  if (val != 0) {
36
36
  unsigned long r;
@@ -69,7 +69,7 @@ MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val)
69
69
  assert(val != 0);
70
70
  # if defined(_MSC_VER)
71
71
  # if STATIC_BMI2 == 1
72
- return _lzcnt_u32(val);
72
+ return (unsigned)_lzcnt_u32(val);
73
73
  # else
74
74
  if (val != 0) {
75
75
  unsigned long r;
@@ -92,7 +92,7 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val)
92
92
  assert(val != 0);
93
93
  # if defined(_MSC_VER) && defined(_WIN64)
94
94
  # if STATIC_BMI2 == 1
95
- return _tzcnt_u64(val);
95
+ return (unsigned)_tzcnt_u64(val);
96
96
  # else
97
97
  if (val != 0) {
98
98
  unsigned long r;
@@ -123,7 +123,7 @@ MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val)
123
123
  assert(val != 0);
124
124
  # if defined(_MSC_VER) && defined(_WIN64)
125
125
  # if STATIC_BMI2 == 1
126
- return _lzcnt_u64(val);
126
+ return (unsigned)_lzcnt_u64(val);
127
127
  # else
128
128
  if (val != 0) {
129
129
  unsigned long r;
@@ -172,4 +172,29 @@ MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCo
172
172
  return 31 - ZSTD_countLeadingZeros32(val);
173
173
  }
174
174
 
175
+ /* ZSTD_rotateRight_*():
176
+ * Rotates a bitfield to the right by "count" bits.
177
+ * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
178
+ */
179
+ MEM_STATIC
180
+ U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
181
+ assert(count < 64);
182
+ count &= 0x3F; /* for fickle pattern recognition */
183
+ return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
184
+ }
185
+
186
+ MEM_STATIC
187
+ U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
188
+ assert(count < 32);
189
+ count &= 0x1F; /* for fickle pattern recognition */
190
+ return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
191
+ }
192
+
193
+ MEM_STATIC
194
+ U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
195
+ assert(count < 16);
196
+ count &= 0x0F; /* for fickle pattern recognition */
197
+ return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
198
+ }
199
+
175
200
  #endif /* ZSTD_BITS_H */
@@ -396,7 +396,7 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
396
396
  * This function is safe, it guarantees it will not read beyond src buffer.
397
397
  * @return : status of `BIT_DStream_t` internal register.
398
398
  * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
399
- MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
399
+ MEM_STATIC FORCE_INLINE_ATTR BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
400
400
  {
401
401
  if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
402
402
  return BIT_DStream_overflow;
@@ -311,6 +311,10 @@ void __msan_poison(const volatile void *a, size_t size);
311
311
  /* Returns the offset of the first (at least partially) poisoned byte in the
312
312
  memory range, or -1 if the whole range is good. */
313
313
  intptr_t __msan_test_shadow(const volatile void *x, size_t size);
314
+
315
+ /* Print shadow and origin for the memory range to stderr in a human-readable
316
+ format. */
317
+ void __msan_print_shadow(const volatile void *x, size_t size);
314
318
  #endif
315
319
 
316
320
  #if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE)
@@ -10,9 +10,9 @@
10
10
 
11
11
 
12
12
  /* ====== Dependencies ======= */
13
+ #include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
13
14
  #include "zstd_deps.h" /* size_t */
14
15
  #include "debug.h" /* assert */
15
- #include "zstd_internal.h" /* ZSTD_customCalloc, ZSTD_customFree */
16
16
  #include "pool.h"
17
17
 
18
18
  /* ====== Compiler specifics ====== */
@@ -47,7 +47,7 @@ static unsigned __stdcall worker(void *arg)
47
47
  void* (*start_routine)(void*);
48
48
  void* thread_arg;
49
49
 
50
- /* Inialized thread_arg and start_routine and signal main thread that we don't need it
50
+ /* Initialized thread_arg and start_routine and signal main thread that we don't need it
51
51
  * to wait any longer.
52
52
  */
53
53
  {
@@ -14,7 +14,6 @@
14
14
  * Dependencies
15
15
  ***************************************/
16
16
  #define ZSTD_DEPS_NEED_MALLOC
17
- #include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
18
17
  #include "error_private.h"
19
18
  #include "zstd_internal.h"
20
19
 
@@ -47,37 +46,3 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
47
46
  /*! ZSTD_getErrorString() :
48
47
  * provides error code string from enum */
49
48
  const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
50
-
51
-
52
-
53
- /*=**************************************************************
54
- * Custom allocator
55
- ****************************************************************/
56
- void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
57
- {
58
- if (customMem.customAlloc)
59
- return customMem.customAlloc(customMem.opaque, size);
60
- return ZSTD_malloc(size);
61
- }
62
-
63
- void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
64
- {
65
- if (customMem.customAlloc) {
66
- /* calloc implemented as malloc+memset;
67
- * not as efficient as calloc, but next best guess for custom malloc */
68
- void* const ptr = customMem.customAlloc(customMem.opaque, size);
69
- ZSTD_memset(ptr, 0, size);
70
- return ptr;
71
- }
72
- return ZSTD_calloc(1, size);
73
- }
74
-
75
- void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
76
- {
77
- if (ptr!=NULL) {
78
- if (customMem.customFree)
79
- customMem.customFree(customMem.opaque, ptr);
80
- else
81
- ZSTD_free(ptr);
82
- }
83
- }
@@ -350,11 +350,6 @@ typedef struct {
350
350
  const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
351
351
  int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
352
352
 
353
- /* custom memory allocation functions */
354
- void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
355
- void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
356
- void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
357
-
358
353
 
359
354
  /* ZSTD_invalidateRepCodes() :
360
355
  * ensures next compression will not use repcodes from previous block.