numcodecs 0.14.1__cp313-cp313-macosx_10_13_x86_64.whl → 0.15.0__cp313-cp313-macosx_10_13_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of numcodecs might be problematic. Click here for more details.

Files changed (42) hide show
  1. numcodecs/__init__.py +43 -46
  2. numcodecs/_shuffle.cpython-313-darwin.so +0 -0
  3. numcodecs/abc.py +1 -1
  4. numcodecs/astype.py +2 -6
  5. numcodecs/base64.py +1 -2
  6. numcodecs/blosc.cpython-313-darwin.so +0 -0
  7. numcodecs/categorize.py +7 -10
  8. numcodecs/compat_ext.cpython-313-darwin.so +0 -0
  9. numcodecs/delta.py +3 -10
  10. numcodecs/fixedscaleoffset.py +8 -10
  11. numcodecs/fletcher32.cpython-313-darwin.so +0 -0
  12. numcodecs/gzip.py +1 -3
  13. numcodecs/jenkins.cpython-313-darwin.so +0 -0
  14. numcodecs/json.py +11 -11
  15. numcodecs/lz4.cpython-313-darwin.so +0 -0
  16. numcodecs/lzma.py +1 -1
  17. numcodecs/msgpacks.py +6 -6
  18. numcodecs/ndarray_like.py +2 -2
  19. numcodecs/pcodec.py +59 -29
  20. numcodecs/pickles.py +1 -1
  21. numcodecs/quantize.py +7 -9
  22. numcodecs/registry.py +1 -1
  23. numcodecs/tests/common.py +3 -4
  24. numcodecs/tests/test_blosc.py +9 -11
  25. numcodecs/tests/test_lzma.py +1 -1
  26. numcodecs/tests/test_pcodec.py +18 -8
  27. numcodecs/tests/test_registry.py +2 -2
  28. numcodecs/tests/test_shuffle.py +2 -4
  29. numcodecs/tests/test_vlen_bytes.py +3 -0
  30. numcodecs/tests/test_zarr3.py +65 -37
  31. numcodecs/version.py +2 -2
  32. numcodecs/vlen.cpython-313-darwin.so +0 -0
  33. numcodecs/zarr3.py +44 -22
  34. numcodecs/zfpy.py +1 -1
  35. numcodecs/zstd.cpython-313-darwin.so +0 -0
  36. {numcodecs-0.14.1.dist-info → numcodecs-0.15.0.dist-info}/METADATA +4 -4
  37. numcodecs-0.15.0.dist-info/RECORD +78 -0
  38. {numcodecs-0.14.1.dist-info → numcodecs-0.15.0.dist-info}/WHEEL +1 -1
  39. numcodecs-0.14.1.dist-info/RECORD +0 -78
  40. {numcodecs-0.14.1.dist-info → numcodecs-0.15.0.dist-info}/LICENSE.txt +0 -0
  41. {numcodecs-0.14.1.dist-info → numcodecs-0.15.0.dist-info}/entry_points.txt +0 -0
  42. {numcodecs-0.14.1.dist-info → numcodecs-0.15.0.dist-info}/top_level.txt +0 -0
numcodecs/__init__.py CHANGED
@@ -36,41 +36,32 @@ from numcodecs.bz2 import BZ2
36
36
 
37
37
  register_codec(BZ2)
38
38
 
39
- with suppress(ImportError):
40
- from numcodecs.lzma import LZMA
39
+ from numcodecs.lzma import LZMA
41
40
 
42
- register_codec(LZMA)
41
+ register_codec(LZMA)
43
42
 
44
- with suppress(ImportError):
45
- from numcodecs import blosc
46
- from numcodecs.blosc import Blosc
47
-
48
- register_codec(Blosc)
49
- # initialize blosc
50
- try:
51
- ncores = multiprocessing.cpu_count()
52
- except OSError: # pragma: no cover
53
- ncores = 1
54
- blosc.init()
55
- blosc.set_nthreads(min(8, ncores))
56
- atexit.register(blosc.destroy)
43
+ from numcodecs import blosc
44
+ from numcodecs.blosc import Blosc
57
45
 
58
- with suppress(ImportError):
59
- from numcodecs import zstd as zstd
60
- from numcodecs.zstd import Zstd
46
+ register_codec(Blosc)
47
+ # initialize blosc
48
+ try:
49
+ ncores = multiprocessing.cpu_count()
50
+ except OSError: # pragma: no cover
51
+ ncores = 1
52
+ blosc._init()
53
+ blosc.set_nthreads(min(8, ncores))
54
+ atexit.register(blosc.destroy)
61
55
 
62
- register_codec(Zstd)
56
+ from numcodecs import zstd as zstd
57
+ from numcodecs.zstd import Zstd
63
58
 
64
- with suppress(ImportError):
65
- from numcodecs import lz4 as lz4
66
- from numcodecs.lz4 import LZ4
59
+ register_codec(Zstd)
67
60
 
68
- register_codec(LZ4)
61
+ from numcodecs import lz4 as lz4
62
+ from numcodecs.lz4 import LZ4
69
63
 
70
- with suppress(ImportError):
71
- from numcodecs.zfpy import ZFPY
72
-
73
- register_codec(ZFPY)
64
+ register_codec(LZ4)
74
65
 
75
66
  from numcodecs.astype import AsType
76
67
 
@@ -112,38 +103,44 @@ from numcodecs.bitround import BitRound
112
103
 
113
104
  register_codec(BitRound)
114
105
 
115
- with suppress(ImportError):
116
- from numcodecs.msgpacks import MsgPack
117
-
118
- register_codec(MsgPack)
119
-
120
106
  from numcodecs.checksum32 import CRC32, Adler32, JenkinsLookup3
121
107
 
122
108
  register_codec(CRC32)
123
109
  register_codec(Adler32)
124
110
  register_codec(JenkinsLookup3)
125
111
 
126
- with suppress(ImportError):
127
- from numcodecs.checksum32 import CRC32C
128
-
129
- register_codec(CRC32C)
130
-
131
112
  from numcodecs.json import JSON
132
113
 
133
114
  register_codec(JSON)
134
115
 
135
- with suppress(ImportError):
136
- from numcodecs import vlen as vlen
137
- from numcodecs.vlen import VLenArray, VLenBytes, VLenUTF8
116
+ from numcodecs import vlen as vlen
117
+ from numcodecs.vlen import VLenArray, VLenBytes, VLenUTF8
138
118
 
139
- register_codec(VLenUTF8)
140
- register_codec(VLenBytes)
141
- register_codec(VLenArray)
119
+ register_codec(VLenUTF8)
120
+ register_codec(VLenBytes)
121
+ register_codec(VLenArray)
142
122
 
143
123
  from numcodecs.fletcher32 import Fletcher32
144
124
 
145
125
  register_codec(Fletcher32)
146
126
 
147
- from numcodecs.pcodec import PCodec
127
+ # Optional depenedencies
128
+ with suppress(ImportError):
129
+ from numcodecs.zfpy import ZFPY
130
+
131
+ register_codec(ZFPY)
132
+
133
+ with suppress(ImportError):
134
+ from numcodecs.msgpacks import MsgPack
135
+
136
+ register_codec(MsgPack)
137
+
138
+ with suppress(ImportError):
139
+ from numcodecs.checksum32 import CRC32C
140
+
141
+ register_codec(CRC32C)
142
+
143
+ with suppress(ImportError):
144
+ from numcodecs.pcodec import PCodec
148
145
 
149
- register_codec(PCodec)
146
+ register_codec(PCodec)
Binary file
numcodecs/abc.py CHANGED
@@ -84,7 +84,7 @@ class Codec(ABC):
84
84
  # override in sub-class if need special encoding of config values
85
85
 
86
86
  # setup config object
87
- config = dict(id=self.codec_id)
87
+ config = {'id': self.codec_id}
88
88
 
89
89
  # by default, assume all non-private members are configuration
90
90
  # parameters - override this in sub-class if not the case
numcodecs/astype.py CHANGED
@@ -49,9 +49,7 @@ class AsType(Codec):
49
49
  arr = ensure_ndarray(buf).view(self.decode_dtype)
50
50
 
51
51
  # convert and copy
52
- enc = arr.astype(self.encode_dtype)
53
-
54
- return enc
52
+ return arr.astype(self.encode_dtype)
55
53
 
56
54
  def decode(self, buf, out=None):
57
55
  # normalise input
@@ -61,9 +59,7 @@ class AsType(Codec):
61
59
  dec = enc.astype(self.decode_dtype)
62
60
 
63
61
  # handle output
64
- out = ndarray_copy(dec, out)
65
-
66
- return out
62
+ return ndarray_copy(dec, out)
67
63
 
68
64
  def get_config(self):
69
65
  return {
numcodecs/base64.py CHANGED
@@ -13,8 +13,7 @@ class Base64(Codec):
13
13
  # normalise inputs
14
14
  buf = ensure_contiguous_ndarray(buf)
15
15
  # do compression
16
- compressed = _base64.standard_b64encode(buf)
17
- return compressed
16
+ return _base64.standard_b64encode(buf)
18
17
 
19
18
  def decode(self, buf, out=None):
20
19
  # normalise inputs
Binary file
numcodecs/categorize.py CHANGED
@@ -80,18 +80,15 @@ class Categorize(Codec):
80
80
  dec[enc == (i + 1)] = label
81
81
 
82
82
  # handle output
83
- dec = ndarray_copy(dec, out)
84
-
85
- return dec
83
+ return ndarray_copy(dec, out)
86
84
 
87
85
  def get_config(self):
88
- config = dict(
89
- id=self.codec_id,
90
- labels=self.labels,
91
- dtype=self.dtype.str,
92
- astype=self.astype.str,
93
- )
94
- return config
86
+ return {
87
+ 'id': self.codec_id,
88
+ 'labels': self.labels,
89
+ 'dtype': self.dtype.str,
90
+ 'astype': self.astype.str,
91
+ }
95
92
 
96
93
  def __repr__(self):
97
94
  # make sure labels part is not too long
Binary file
numcodecs/delta.py CHANGED
@@ -63,12 +63,7 @@ class Delta(Codec):
63
63
  enc[0] = arr[0]
64
64
 
65
65
  # compute differences
66
- # using np.subtract for in-place operations
67
- if arr.dtype == bool:
68
- np.not_equal(arr[1:], arr[:-1], out=enc[1:])
69
- else:
70
- np.subtract(arr[1:], arr[:-1], out=enc[1:])
71
-
66
+ enc[1:] = np.diff(arr)
72
67
  return enc
73
68
 
74
69
  def decode(self, buf, out=None):
@@ -85,13 +80,11 @@ class Delta(Codec):
85
80
  np.cumsum(enc, out=dec)
86
81
 
87
82
  # handle output
88
- out = ndarray_copy(dec, out)
89
-
90
- return out
83
+ return ndarray_copy(dec, out)
91
84
 
92
85
  def get_config(self):
93
86
  # override to handle encoding dtypes
94
- return dict(id=self.codec_id, dtype=self.dtype.str, astype=self.astype.str)
87
+ return {'id': self.codec_id, 'dtype': self.dtype.str, 'astype': self.astype.str}
95
88
 
96
89
  def __repr__(self):
97
90
  r = f'{type(self).__name__}(dtype={self.dtype.str!r}'
@@ -94,9 +94,7 @@ class FixedScaleOffset(Codec):
94
94
  enc = np.around(enc)
95
95
 
96
96
  # convert dtype
97
- enc = enc.astype(self.astype, copy=False)
98
-
99
- return enc
97
+ return enc.astype(self.astype, copy=False)
100
98
 
101
99
  def decode(self, buf, out=None):
102
100
  # interpret buffer as numpy array
@@ -116,13 +114,13 @@ class FixedScaleOffset(Codec):
116
114
 
117
115
  def get_config(self):
118
116
  # override to handle encoding dtypes
119
- return dict(
120
- id=self.codec_id,
121
- scale=self.scale,
122
- offset=self.offset,
123
- dtype=self.dtype.str,
124
- astype=self.astype.str,
125
- )
117
+ return {
118
+ 'id': self.codec_id,
119
+ 'scale': self.scale,
120
+ 'offset': self.offset,
121
+ 'dtype': self.dtype.str,
122
+ 'astype': self.astype.str,
123
+ }
126
124
 
127
125
  def __repr__(self):
128
126
  r = f'{type(self).__name__}(scale={self.scale}, offset={self.offset}, dtype={self.dtype.str!r}'
Binary file
numcodecs/gzip.py CHANGED
@@ -28,9 +28,7 @@ class GZip(Codec):
28
28
  compressed = io.BytesIO()
29
29
  with _gzip.GzipFile(fileobj=compressed, mode='wb', compresslevel=self.level) as compressor:
30
30
  compressor.write(buf)
31
- compressed = compressed.getvalue()
32
-
33
- return compressed
31
+ return compressed.getvalue()
34
32
 
35
33
  # noinspection PyMethodMayBeStatic
36
34
  def decode(self, buf, out=None):
Binary file
numcodecs/json.py CHANGED
@@ -52,17 +52,17 @@ class JSON(Codec):
52
52
  else:
53
53
  separators = ', ', ': '
54
54
  separators = tuple(separators)
55
- self._encoder_config = dict(
56
- skipkeys=skipkeys,
57
- ensure_ascii=ensure_ascii,
58
- check_circular=check_circular,
59
- allow_nan=allow_nan,
60
- indent=indent,
61
- separators=separators,
62
- sort_keys=sort_keys,
63
- )
55
+ self._encoder_config = {
56
+ 'skipkeys': skipkeys,
57
+ 'ensure_ascii': ensure_ascii,
58
+ 'check_circular': check_circular,
59
+ 'allow_nan': allow_nan,
60
+ 'indent': indent,
61
+ 'separators': separators,
62
+ 'sort_keys': sort_keys,
63
+ }
64
64
  self._encoder = _json.JSONEncoder(**self._encoder_config)
65
- self._decoder_config = dict(strict=strict)
65
+ self._decoder_config = {'strict': strict}
66
66
  self._decoder = _json.JSONDecoder(**self._decoder_config)
67
67
 
68
68
  def encode(self, buf):
@@ -89,7 +89,7 @@ class JSON(Codec):
89
89
  return dec
90
90
 
91
91
  def get_config(self):
92
- config = dict(id=self.codec_id, encoding=self._text_encoding)
92
+ config = {'id': self.codec_id, 'encoding': self._text_encoding}
93
93
  config.update(self._encoder_config)
94
94
  config.update(self._decoder_config)
95
95
  return config
Binary file
numcodecs/lzma.py CHANGED
@@ -5,7 +5,7 @@ _lzma: Optional[ModuleType] = None
5
5
  try:
6
6
  import lzma as _lzma
7
7
  except ImportError: # pragma: no cover
8
- try:
8
+ try: # noqa: SIM105
9
9
  from backports import lzma as _lzma # type: ignore[no-redef]
10
10
  except ImportError:
11
11
  pass
numcodecs/msgpacks.py CHANGED
@@ -75,12 +75,12 @@ class MsgPack(Codec):
75
75
  return dec
76
76
 
77
77
  def get_config(self):
78
- return dict(
79
- id=self.codec_id,
80
- raw=self.raw,
81
- use_single_float=self.use_single_float,
82
- use_bin_type=self.use_bin_type,
83
- )
78
+ return {
79
+ 'id': self.codec_id,
80
+ 'raw': self.raw,
81
+ 'use_single_float': self.use_single_float,
82
+ 'use_bin_type': self.use_bin_type,
83
+ }
84
84
 
85
85
  def __repr__(self):
86
86
  return f'MsgPack(raw={self.raw!r}, use_bin_type={self.use_bin_type!r}, use_single_float={self.use_single_float!r})'
numcodecs/ndarray_like.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, ClassVar, Optional, Protocol, runtime_checkable
1
+ from typing import Any, ClassVar, Protocol, runtime_checkable
2
2
 
3
3
 
4
4
  class _CachedProtocolMeta(Protocol.__class__): # type: ignore[name-defined]
@@ -53,7 +53,7 @@ class NDArrayLike(Protocol, metaclass=_CachedProtocolMeta):
53
53
 
54
54
  def __setitem__(self, key, value): ... # pragma: no cover
55
55
 
56
- def tobytes(self, order: Optional[str] = ...) -> bytes: ... # pragma: no cover
56
+ def tobytes(self, order: str | None = ...) -> bytes: ... # pragma: no cover
57
57
 
58
58
  def reshape(self, *shape: int, order: str = ...) -> "NDArrayLike": ... # pragma: no cover
59
59
 
numcodecs/pcodec.py CHANGED
@@ -1,13 +1,8 @@
1
- from typing import Literal, Optional
1
+ from typing import Literal
2
2
 
3
3
  from numcodecs.abc import Codec
4
4
  from numcodecs.compat import ensure_contiguous_ndarray
5
-
6
- try:
7
- from pcodec import ChunkConfig, ModeSpec, PagingSpec, standalone
8
- except ImportError: # pragma: no cover
9
- standalone = None
10
-
5
+ from pcodec import ChunkConfig, DeltaSpec, ModeSpec, PagingSpec, standalone
11
6
 
12
7
  DEFAULT_MAX_PAGE_N = 262144
13
8
 
@@ -27,14 +22,21 @@ class PCodec(Codec):
27
22
  level : int
28
23
  A compression level from 0-12, where 12 take the longest and compresses
29
24
  the most.
30
- delta_encoding_order : init or None
31
- Either a delta encoding level from 0-7 or None. If set to None, pcodec
32
- will try to infer the optimal delta encoding order.
33
- mode_spec : {'auto', 'classic'}
25
+ mode_spec : {"auto", "classic"}
34
26
  Configures whether Pcodec should try to infer the best "mode" or
35
27
  structure of the data (e.g. approximate multiples of 0.1) to improve
36
28
  compression ratio, or skip this step and just use the numbers as-is
37
- (Classic mode).
29
+ (Classic mode). Note that the "try*" specs are not currently supported.
30
+ delta_spec : {"auto", "none", "try_consecutive", "try_lookback"}
31
+ Configures the delta encoding strategy. By default, uses "auto" which
32
+ will try to infer the best encoding order.
33
+ paging_spec : {"equal_pages_up_to"}
34
+ Configures the paging strategy. Only "equal_pages_up_to" is currently
35
+ supported.
36
+ delta_encoding_order : int or None
37
+ Explicit delta encoding level from 0-7. Only valid if delta_spec is
38
+ "try_consecutive" or "auto" (to support backwards compatibility with
39
+ older versions of this codec).
38
40
  equal_pages_up_to : int
39
41
  Divide the chunk into equal pages of up to this many numbers.
40
42
  """
@@ -44,39 +46,67 @@ class PCodec(Codec):
44
46
  def __init__(
45
47
  self,
46
48
  level: int = 8,
47
- delta_encoding_order: Optional[int] = None,
48
- equal_pages_up_to: int = 262144,
49
- # TODO one day, add support for the Try* mode specs
50
- mode_spec: Literal['auto', 'classic'] = 'auto',
49
+ *,
50
+ mode_spec: Literal["auto", "classic"] = "auto",
51
+ delta_spec: Literal["auto", "none", "try_consecutive", "try_lookback"] = "auto",
52
+ paging_spec: Literal["equal_pages_up_to"] = "equal_pages_up_to",
53
+ delta_encoding_order: int | None = None,
54
+ equal_pages_up_to: int = DEFAULT_MAX_PAGE_N,
51
55
  ):
52
- if standalone is None: # pragma: no cover
53
- raise ImportError("pcodec must be installed to use the PCodec codec.")
54
-
55
56
  # note that we use `level` instead of `compression_level` to
56
57
  # match other codecs
57
58
  self.level = level
59
+ self.mode_spec = mode_spec
60
+ self.delta_spec = delta_spec
61
+ self.paging_spec = paging_spec
58
62
  self.delta_encoding_order = delta_encoding_order
59
63
  self.equal_pages_up_to = equal_pages_up_to
60
- self.mode_spec = mode_spec
61
-
62
- def encode(self, buf):
63
- buf = ensure_contiguous_ndarray(buf)
64
64
 
65
+ def _get_chunk_config(self):
65
66
  match self.mode_spec:
66
- case 'auto':
67
+ case "auto":
67
68
  mode_spec = ModeSpec.auto()
68
- case 'classic':
69
+ case "classic":
69
70
  mode_spec = ModeSpec.classic()
70
71
  case _:
71
- raise ValueError(f"unknown value for mode_spec: {self.mode_spec}")
72
- paging_spec = PagingSpec.equal_pages_up_to(self.equal_pages_up_to)
72
+ raise ValueError(f"mode_spec {self.mode_spec} is not supported")
73
+
74
+ if self.delta_encoding_order is not None and self.delta_spec == "auto":
75
+ # backwards compat for before delta_spec was introduced
76
+ delta_spec = DeltaSpec.try_consecutive(self.delta_encoding_order)
77
+ elif self.delta_encoding_order is not None and self.delta_spec != "try_consecutive":
78
+ raise ValueError(
79
+ "delta_encoding_order can only be set for delta_spec='try_consecutive'"
80
+ )
81
+ else:
82
+ match self.delta_spec:
83
+ case "auto":
84
+ delta_spec = DeltaSpec.auto()
85
+ case "none":
86
+ delta_spec = DeltaSpec.none()
87
+ case "try_consecutive":
88
+ delta_spec = DeltaSpec.try_consecutive(self.delta_encoding_order)
89
+ case "try_lookback":
90
+ delta_spec = DeltaSpec.try_lookback()
91
+ case _:
92
+ raise ValueError(f"delta_spec {self.delta_spec} is not supported")
93
+
94
+ match self.paging_spec:
95
+ case "equal_pages_up_to":
96
+ paging_spec = PagingSpec.equal_pages_up_to(self.equal_pages_up_to)
97
+ case _:
98
+ raise ValueError(f"paging_spec {self.paging_spec} is not supported")
73
99
 
74
- config = ChunkConfig(
100
+ return ChunkConfig(
75
101
  compression_level=self.level,
76
- delta_encoding_order=self.delta_encoding_order,
102
+ delta_spec=delta_spec,
77
103
  mode_spec=mode_spec,
78
104
  paging_spec=paging_spec,
79
105
  )
106
+
107
+ def encode(self, buf):
108
+ buf = ensure_contiguous_ndarray(buf)
109
+ config = self._get_chunk_config()
80
110
  return standalone.simple_compress(buf, config)
81
111
 
82
112
  def decode(self, buf, out=None):
numcodecs/pickles.py CHANGED
@@ -49,7 +49,7 @@ class Pickle(Codec):
49
49
  return dec
50
50
 
51
51
  def get_config(self):
52
- return dict(id=self.codec_id, protocol=self.protocol)
52
+ return {'id': self.codec_id, 'protocol': self.protocol}
53
53
 
54
54
  def __repr__(self):
55
55
  return f'Pickle(protocol={self.protocol})'
numcodecs/quantize.py CHANGED
@@ -73,9 +73,7 @@ class Quantize(Codec):
73
73
  enc = np.around(scale * arr) / scale
74
74
 
75
75
  # cast dtype
76
- enc = enc.astype(self.astype, copy=False)
77
-
78
- return enc
76
+ return enc.astype(self.astype, copy=False)
79
77
 
80
78
  def decode(self, buf, out=None):
81
79
  # filter is lossy, decoding is no-op
@@ -85,12 +83,12 @@ class Quantize(Codec):
85
83
 
86
84
  def get_config(self):
87
85
  # override to handle encoding dtypes
88
- return dict(
89
- id=self.codec_id,
90
- digits=self.digits,
91
- dtype=self.dtype.str,
92
- astype=self.astype.str,
93
- )
86
+ return {
87
+ 'id': self.codec_id,
88
+ 'digits': self.digits,
89
+ 'dtype': self.dtype.str,
90
+ 'astype': self.astype.str,
91
+ }
94
92
 
95
93
  def __repr__(self):
96
94
  r = f'{type(self).__name__}(digits={self.digits}, dtype={self.dtype.str!r}'
numcodecs/registry.py CHANGED
@@ -8,7 +8,7 @@ from numcodecs.abc import Codec
8
8
 
9
9
  logger = logging.getLogger("numcodecs")
10
10
  codec_registry: dict[str, Codec] = {}
11
- entries: dict[str, "EntryPoints"] = {}
11
+ entries: dict[str, EntryPoints] = {}
12
12
 
13
13
 
14
14
  def run_entrypoints():
numcodecs/tests/common.py CHANGED
@@ -7,8 +7,7 @@ import numpy as np
7
7
  import pytest
8
8
  from numpy.testing import assert_array_almost_equal, assert_array_equal
9
9
 
10
- # star import needed for repr tests so eval finds names
11
- from numcodecs import * # noqa: F403
10
+ from numcodecs import * # noqa: F403 # for eval to find names in repr tests
12
11
  from numcodecs.compat import ensure_bytes, ensure_ndarray
13
12
  from numcodecs.registry import get_codec
14
13
 
@@ -19,9 +18,9 @@ greetings = [
19
18
  'Hei maailma!',
20
19
  'Xin chào thế giới',
21
20
  'Njatjeta Botë!',
22
- 'Γεια σου κόσμε!',
21
+ 'Γεια σου κόσμε!', # noqa: RUF001
23
22
  'こんにちは世界',
24
- '世界,你好!',
23
+ '世界,你好!', # noqa: RUF001
25
24
  'Helló, világ!',
26
25
  'Zdravo svete!',
27
26
  'เฮลโลเวิลด์',
@@ -124,12 +124,12 @@ def test_compress_blocksize_default(use_threads):
124
124
 
125
125
  # default blocksize
126
126
  enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE)
127
- _, _, blocksize = blosc.cbuffer_sizes(enc)
127
+ _, _, blocksize = blosc._cbuffer_sizes(enc)
128
128
  assert blocksize > 0
129
129
 
130
130
  # explicit default blocksize
131
131
  enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE, 0)
132
- _, _, blocksize = blosc.cbuffer_sizes(enc)
132
+ _, _, blocksize = blosc._cbuffer_sizes(enc)
133
133
  assert blocksize > 0
134
134
 
135
135
 
@@ -140,7 +140,7 @@ def test_compress_blocksize(use_threads, bs):
140
140
  blosc.use_threads = use_threads
141
141
 
142
142
  enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE, bs)
143
- _, _, blocksize = blosc.cbuffer_sizes(enc)
143
+ _, _, blocksize = blosc._cbuffer_sizes(enc)
144
144
  assert blocksize == bs
145
145
 
146
146
 
@@ -174,7 +174,7 @@ def test_compress_metainfo(dtype, use_threads):
174
174
  blosc.use_threads = use_threads
175
175
  for cname in blosc.list_compressors():
176
176
  enc = blosc.compress(arr, cname.encode(), 1, shuffle)
177
- typesize, did_shuffle, _ = blosc.cbuffer_metainfo(enc)
177
+ typesize, did_shuffle, _ = blosc._cbuffer_metainfo(enc)
178
178
  assert typesize == arr.dtype.itemsize
179
179
  assert did_shuffle == shuffle
180
180
 
@@ -186,7 +186,7 @@ def test_compress_autoshuffle(use_threads):
186
186
  blosc.use_threads = use_threads
187
187
  for cname in blosc.list_compressors():
188
188
  enc = blosc.compress(varr, cname.encode(), 1, Blosc.AUTOSHUFFLE)
189
- typesize, did_shuffle, _ = blosc.cbuffer_metainfo(enc)
189
+ typesize, did_shuffle, _ = blosc._cbuffer_metainfo(enc)
190
190
  assert typesize == varr.dtype.itemsize
191
191
  if typesize == 1:
192
192
  assert did_shuffle == Blosc.BITSHUFFLE
@@ -199,12 +199,12 @@ def test_config_blocksize():
199
199
  # explicitly stated
200
200
 
201
201
  # blocksize not stated
202
- config = dict(cname='lz4', clevel=1, shuffle=Blosc.SHUFFLE)
202
+ config = {"cname": 'lz4', "clevel": 1, "shuffle": Blosc.SHUFFLE}
203
203
  codec = Blosc.from_config(config)
204
204
  assert codec.blocksize == 0
205
205
 
206
206
  # blocksize stated
207
- config = dict(cname='lz4', clevel=1, shuffle=Blosc.SHUFFLE, blocksize=2**8)
207
+ config = {"cname": 'lz4', "clevel": 1, "shuffle": Blosc.SHUFFLE, "blocksize": 2**8}
208
208
  codec = Blosc.from_config(config)
209
209
  assert codec.blocksize == 2**8
210
210
 
@@ -215,14 +215,12 @@ def test_backwards_compatibility():
215
215
 
216
216
  def _encode_worker(data):
217
217
  compressor = Blosc(cname='zlib', clevel=9, shuffle=Blosc.SHUFFLE)
218
- enc = compressor.encode(data)
219
- return enc
218
+ return compressor.encode(data)
220
219
 
221
220
 
222
221
  def _decode_worker(enc):
223
222
  compressor = Blosc()
224
- data = compressor.decode(enc)
225
- return data
223
+ return compressor.decode(enc)
226
224
 
227
225
 
228
226
  @pytest.mark.parametrize('pool', [Pool, ThreadPool])
@@ -29,7 +29,7 @@ codecs = [
29
29
  LZMA(preset=1),
30
30
  LZMA(preset=5),
31
31
  LZMA(preset=9),
32
- LZMA(format=_lzma.FORMAT_RAW, filters=[dict(id=_lzma.FILTER_LZMA2, preset=1)]),
32
+ LZMA(format=_lzma.FORMAT_RAW, filters=[{"id": _lzma.FILTER_LZMA2, "preset": 1}]),
33
33
  ]
34
34
 
35
35