numcodecs 0.12.1__cp311-cp311-win_amd64.whl → 0.13.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of numcodecs might be problematic. Click here for more details.

Files changed (71) hide show
  1. numcodecs/__init__.py +33 -7
  2. numcodecs/_shuffle.cp311-win_amd64.pyd +0 -0
  3. numcodecs/abc.py +4 -6
  4. numcodecs/astype.py +2 -10
  5. numcodecs/bitround.py +0 -1
  6. numcodecs/blosc.cp311-win_amd64.pyd +0 -0
  7. numcodecs/bz2.py +1 -4
  8. numcodecs/categorize.py +12 -18
  9. numcodecs/checksum32.py +3 -8
  10. numcodecs/compat.py +6 -12
  11. numcodecs/compat_ext.cp311-win_amd64.pyd +0 -0
  12. numcodecs/delta.py +4 -11
  13. numcodecs/fixedscaleoffset.py +5 -9
  14. numcodecs/fletcher32.cp311-win_amd64.pyd +0 -0
  15. numcodecs/gzip.py +1 -5
  16. numcodecs/jenkins.cp311-win_amd64.pyd +0 -0
  17. numcodecs/json.py +35 -18
  18. numcodecs/lz4.cp311-win_amd64.pyd +0 -0
  19. numcodecs/lzma.py +9 -10
  20. numcodecs/msgpacks.py +13 -12
  21. numcodecs/ndarray_like.py +11 -16
  22. numcodecs/packbits.py +1 -4
  23. numcodecs/pcodec.py +89 -0
  24. numcodecs/pickles.py +2 -3
  25. numcodecs/quantize.py +7 -11
  26. numcodecs/registry.py +4 -8
  27. numcodecs/shuffle.py +5 -7
  28. numcodecs/tests/common.py +42 -32
  29. numcodecs/tests/package_with_entrypoint/__init__.py +0 -1
  30. numcodecs/tests/test_astype.py +7 -7
  31. numcodecs/tests/test_base64.py +10 -13
  32. numcodecs/tests/test_bitround.py +0 -1
  33. numcodecs/tests/test_blosc.py +22 -22
  34. numcodecs/tests/test_bz2.py +12 -11
  35. numcodecs/tests/test_categorize.py +10 -14
  36. numcodecs/tests/test_checksum32.py +8 -7
  37. numcodecs/tests/test_compat.py +8 -13
  38. numcodecs/tests/test_delta.py +7 -5
  39. numcodecs/tests/test_entrypoints.py +0 -1
  40. numcodecs/tests/test_entrypoints_backport.py +8 -5
  41. numcodecs/tests/test_fixedscaleoffset.py +7 -6
  42. numcodecs/tests/test_fletcher32.py +13 -6
  43. numcodecs/tests/test_gzip.py +12 -11
  44. numcodecs/tests/test_jenkins.py +41 -42
  45. numcodecs/tests/test_json.py +17 -7
  46. numcodecs/tests/test_lz4.py +14 -15
  47. numcodecs/tests/test_lzma.py +15 -14
  48. numcodecs/tests/test_msgpacks.py +10 -8
  49. numcodecs/tests/test_packbits.py +6 -4
  50. numcodecs/tests/test_pcodec.py +80 -0
  51. numcodecs/tests/test_pickles.py +11 -8
  52. numcodecs/tests/test_quantize.py +7 -6
  53. numcodecs/tests/test_registry.py +4 -4
  54. numcodecs/tests/test_shuffle.py +34 -26
  55. numcodecs/tests/test_vlen_array.py +14 -16
  56. numcodecs/tests/test_vlen_bytes.py +13 -8
  57. numcodecs/tests/test_vlen_utf8.py +14 -9
  58. numcodecs/tests/test_zfpy.py +7 -17
  59. numcodecs/tests/test_zlib.py +12 -11
  60. numcodecs/tests/test_zstd.py +32 -16
  61. numcodecs/version.py +2 -2
  62. numcodecs/vlen.cp311-win_amd64.pyd +0 -0
  63. numcodecs/zfpy.py +35 -20
  64. numcodecs/zlib.py +1 -4
  65. numcodecs/zstd.cp311-win_amd64.pyd +0 -0
  66. {numcodecs-0.12.1.dist-info → numcodecs-0.13.1.dist-info}/METADATA +8 -5
  67. numcodecs-0.13.1.dist-info/RECORD +74 -0
  68. {numcodecs-0.12.1.dist-info → numcodecs-0.13.1.dist-info}/WHEEL +1 -1
  69. numcodecs-0.12.1.dist-info/RECORD +0 -72
  70. {numcodecs-0.12.1.dist-info → numcodecs-0.13.1.dist-info}/LICENSE.txt +0 -0
  71. {numcodecs-0.12.1.dist-info → numcodecs-0.13.1.dist-info}/top_level.txt +0 -0
numcodecs/msgpacks.py CHANGED
@@ -1,6 +1,5 @@
1
- import numpy as np
2
1
  import msgpack
3
-
2
+ import numpy as np
4
3
 
5
4
  from .abc import Codec
6
5
  from .compat import ensure_contiguous_ndarray
@@ -58,8 +57,11 @@ class MsgPack(Codec):
58
57
  buf = np.asarray(buf, dtype=object)
59
58
  items = buf.tolist()
60
59
  items.extend((buf.dtype.str, buf.shape))
61
- return msgpack.packb(items, use_bin_type=self.use_bin_type,
62
- use_single_float=self.use_single_float)
60
+ return msgpack.packb(
61
+ items,
62
+ use_bin_type=self.use_bin_type,
63
+ use_single_float=self.use_single_float,
64
+ )
63
65
 
64
66
  def decode(self, buf, out=None):
65
67
  buf = ensure_contiguous_ndarray(buf)
@@ -73,13 +75,12 @@ class MsgPack(Codec):
73
75
  return dec
74
76
 
75
77
  def get_config(self):
76
- return dict(id=self.codec_id,
77
- raw=self.raw,
78
- use_single_float=self.use_single_float,
79
- use_bin_type=self.use_bin_type)
78
+ return dict(
79
+ id=self.codec_id,
80
+ raw=self.raw,
81
+ use_single_float=self.use_single_float,
82
+ use_bin_type=self.use_bin_type,
83
+ )
80
84
 
81
85
  def __repr__(self):
82
- return (
83
- 'MsgPack(raw={!r}, use_bin_type={!r}, use_single_float={!r})'
84
- .format(self.raw, self.use_bin_type, self.use_single_float)
85
- )
86
+ return f'MsgPack(raw={self.raw!r}, use_bin_type={self.use_bin_type!r}, use_single_float={self.use_single_float!r})'
numcodecs/ndarray_like.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Protocol, Tuple, Type, runtime_checkable
1
+ from typing import Any, ClassVar, Optional, Protocol, runtime_checkable
2
2
 
3
3
 
4
4
  class _CachedProtocolMeta(Protocol.__class__):
@@ -10,7 +10,8 @@ class _CachedProtocolMeta(Protocol.__class__):
10
10
  This metaclass keeps an unbounded cache of the result of
11
11
  isinstance checks using the object's class as the cache key.
12
12
  """
13
- _instancecheck_cache: Dict[Tuple[Type, Type], bool] = {}
13
+
14
+ _instancecheck_cache: ClassVar[dict[tuple[type, type], bool]] = {}
14
15
 
15
16
  def __instancecheck__(self, instance):
16
17
  key = (self, instance.__class__)
@@ -38,31 +39,25 @@ class FlagsObj(Protocol, metaclass=_CachedProtocolMeta):
38
39
  @runtime_checkable
39
40
  class NDArrayLike(Protocol, metaclass=_CachedProtocolMeta):
40
41
  dtype: DType
41
- shape: Tuple[int, ...]
42
- strides: Tuple[int, ...]
42
+ shape: tuple[int, ...]
43
+ strides: tuple[int, ...]
43
44
  ndim: int
44
45
  size: int
45
46
  itemsize: int
46
47
  nbytes: int
47
48
  flags: FlagsObj
48
49
 
49
- def __len__(self) -> int:
50
- ... # pragma: no cover
50
+ def __len__(self) -> int: ... # pragma: no cover
51
51
 
52
- def __getitem__(self, key) -> Any:
53
- ... # pragma: no cover
52
+ def __getitem__(self, key) -> Any: ... # pragma: no cover
54
53
 
55
- def __setitem__(self, key, value):
56
- ... # pragma: no cover
54
+ def __setitem__(self, key, value): ... # pragma: no cover
57
55
 
58
- def tobytes(self, order: Optional[str] = ...) -> bytes:
59
- ... # pragma: no cover
56
+ def tobytes(self, order: Optional[str] = ...) -> bytes: ... # pragma: no cover
60
57
 
61
- def reshape(self, *shape: int, order: str = ...) -> "NDArrayLike":
62
- ... # pragma: no cover
58
+ def reshape(self, *shape: int, order: str = ...) -> "NDArrayLike": ... # pragma: no cover
63
59
 
64
- def view(self, dtype: DType = ...) -> "NDArrayLike":
65
- ... # pragma: no cover
60
+ def view(self, dtype: DType = ...) -> "NDArrayLike": ... # pragma: no cover
66
61
 
67
62
 
68
63
  def is_ndarray_like(obj: object) -> bool:
numcodecs/packbits.py CHANGED
@@ -1,6 +1,5 @@
1
1
  import numpy as np
2
2
 
3
-
4
3
  from .abc import Codec
5
4
  from .compat import ensure_ndarray, ndarray_copy
6
5
 
@@ -34,7 +33,6 @@ class PackBits(Codec):
34
33
  pass
35
34
 
36
35
  def encode(self, buf):
37
-
38
36
  # normalise input
39
37
  arr = ensure_ndarray(buf).view(bool)
40
38
 
@@ -43,7 +41,7 @@ class PackBits(Codec):
43
41
 
44
42
  # determine size of packed data
45
43
  n = arr.size
46
- n_bytes_packed = (n // 8)
44
+ n_bytes_packed = n // 8
47
45
  n_bits_leftover = n % 8
48
46
  if n_bits_leftover > 0:
49
47
  n_bytes_packed += 1
@@ -64,7 +62,6 @@ class PackBits(Codec):
64
62
  return enc
65
63
 
66
64
  def decode(self, buf, out=None):
67
-
68
65
  # normalise input
69
66
  enc = ensure_ndarray(buf).view('u1')
70
67
 
numcodecs/pcodec.py ADDED
@@ -0,0 +1,89 @@
1
+ from typing import Literal, Optional
2
+
3
+ import numcodecs
4
+ import numcodecs.abc
5
+ from numcodecs.compat import ensure_contiguous_ndarray
6
+
7
+ try:
8
+ from pcodec import ChunkConfig, ModeSpec, PagingSpec, standalone
9
+ except ImportError: # pragma: no cover
10
+ standalone = None
11
+
12
+
13
+ DEFAULT_MAX_PAGE_N = 262144
14
+
15
+
16
+ class PCodec(numcodecs.abc.Codec):
17
+ """
18
+ PCodec (or pco, pronounced "pico") losslessly compresses and decompresses
19
+ numerical sequences with high compression ratio and fast speed.
20
+
21
+ See `PCodec Repo <https://github.com/mwlon/pcodec>`_ for more information.
22
+
23
+ PCodec supports only the following numerical dtypes: uint16, uint32, uint64,
24
+ int16, int32, int64, float16, float32, and float64.
25
+
26
+ Parameters
27
+ ----------
28
+ level : int
29
+ A compression level from 0-12, where 12 take the longest and compresses
30
+ the most.
31
+ delta_encoding_order : init or None
32
+ Either a delta encoding level from 0-7 or None. If set to None, pcodec
33
+ will try to infer the optimal delta encoding order.
34
+ mode_spec : {'auto', 'classic'}
35
+ Configures whether Pcodec should try to infer the best "mode" or
36
+ structure of the data (e.g. approximate multiples of 0.1) to improve
37
+ compression ratio, or skip this step and just use the numbers as-is
38
+ (Classic mode).
39
+ equal_pages_up_to : int
40
+ Divide the chunk into equal pages of up to this many numbers.
41
+ """
42
+
43
+ codec_id = "pcodec"
44
+
45
+ def __init__(
46
+ self,
47
+ level: int = 8,
48
+ delta_encoding_order: Optional[int] = None,
49
+ equal_pages_up_to: int = 262144,
50
+ # TODO one day, add support for the Try* mode specs
51
+ mode_spec: Literal['auto', 'classic'] = 'auto',
52
+ ):
53
+ if standalone is None: # pragma: no cover
54
+ raise ImportError("pcodec must be installed to use the PCodec codec.")
55
+
56
+ # note that we use `level` instead of `compression_level` to
57
+ # match other codecs
58
+ self.level = level
59
+ self.delta_encoding_order = delta_encoding_order
60
+ self.equal_pages_up_to = equal_pages_up_to
61
+ self.mode_spec = mode_spec
62
+
63
+ def encode(self, buf):
64
+ buf = ensure_contiguous_ndarray(buf)
65
+
66
+ match self.mode_spec:
67
+ case 'auto':
68
+ mode_spec = ModeSpec.auto()
69
+ case 'classic':
70
+ mode_spec = ModeSpec.classic()
71
+ case _:
72
+ raise ValueError(f"unknown value for mode_spec: {self.mode_spec}")
73
+ paging_spec = PagingSpec.equal_pages_up_to(self.equal_pages_up_to)
74
+
75
+ config = ChunkConfig(
76
+ compression_level=self.level,
77
+ delta_encoding_order=self.delta_encoding_order,
78
+ mode_spec=mode_spec,
79
+ paging_spec=paging_spec,
80
+ )
81
+ return standalone.simple_compress(buf, config)
82
+
83
+ def decode(self, buf, out=None):
84
+ if out is not None:
85
+ out = ensure_contiguous_ndarray(out)
86
+ standalone.simple_decompress_into(buf, out)
87
+ return out
88
+ else:
89
+ return standalone.simple_decompress(buf)
numcodecs/pickles.py CHANGED
@@ -49,8 +49,7 @@ class Pickle(Codec):
49
49
  return dec
50
50
 
51
51
  def get_config(self):
52
- return dict(id=self.codec_id,
53
- protocol=self.protocol)
52
+ return dict(id=self.codec_id, protocol=self.protocol)
54
53
 
55
54
  def __repr__(self):
56
- return 'Pickle(protocol=%s)' % self.protocol
55
+ return f'Pickle(protocol={self.protocol})'
numcodecs/quantize.py CHANGED
@@ -1,9 +1,7 @@
1
1
  import math
2
2
 
3
-
4
3
  import numpy as np
5
4
 
6
-
7
5
  from .abc import Codec
8
6
  from .compat import ensure_ndarray, ndarray_copy
9
7
 
@@ -60,19 +58,18 @@ class Quantize(Codec):
60
58
  raise ValueError('only floating point data types are supported')
61
59
 
62
60
  def encode(self, buf):
63
-
64
61
  # normalise input
65
62
  arr = ensure_ndarray(buf).view(self.dtype)
66
63
 
67
64
  # apply scaling
68
- precision = 10. ** -self.digits
69
- exp = math.log(precision, 10)
65
+ precision = 10.0**-self.digits
66
+ exp = math.log10(precision)
70
67
  if exp < 0:
71
68
  exp = int(math.floor(exp))
72
69
  else:
73
70
  exp = int(math.ceil(exp))
74
- bits = math.ceil(math.log(10. ** -exp, 2))
75
- scale = 2. ** bits
71
+ bits = math.ceil(math.log2(10.0**-exp))
72
+ scale = 2.0**bits
76
73
  enc = np.around(scale * arr) / scale
77
74
 
78
75
  # cast dtype
@@ -92,13 +89,12 @@ class Quantize(Codec):
92
89
  id=self.codec_id,
93
90
  digits=self.digits,
94
91
  dtype=self.dtype.str,
95
- astype=self.astype.str
92
+ astype=self.astype.str,
96
93
  )
97
94
 
98
95
  def __repr__(self):
99
- r = '%s(digits=%s, dtype=%r' % \
100
- (type(self).__name__, self.digits, self.dtype.str)
96
+ r = f'{type(self).__name__}(digits={self.digits}, dtype={self.dtype.str!r}'
101
97
  if self.astype != self.dtype:
102
- r += ', astype=%r' % self.astype.str
98
+ r += f', astype={self.astype.str!r}'
103
99
  r += ')'
104
100
  return r
numcodecs/registry.py CHANGED
@@ -1,7 +1,8 @@
1
1
  """The registry module provides some simple convenience functions to enable
2
2
  applications to dynamically register and look-up codec classes."""
3
- from importlib.metadata import entry_points
3
+
4
4
  import logging
5
+ from importlib.metadata import entry_points
5
6
 
6
7
  logger = logging.getLogger("numcodecs")
7
8
  codec_registry = {}
@@ -11,12 +12,7 @@ entries = {}
11
12
  def run_entrypoints():
12
13
  entries.clear()
13
14
  eps = entry_points()
14
- if hasattr(eps, 'select'):
15
- # If entry_points() has a select method, use that. Python 3.10+
16
- entries.update({e.name: e for e in eps.select(group="numcodecs.codecs")})
17
- else:
18
- # Otherwise, fallback to using get
19
- entries.update({e.name: e for e in eps.get("numcodecs.codecs", [])})
15
+ entries.update({e.name: e for e in eps.select(group="numcodecs.codecs")})
20
16
 
21
17
 
22
18
  run_entrypoints()
@@ -53,7 +49,7 @@ def get_codec(config):
53
49
  register_codec(cls, codec_id=codec_id)
54
50
  if cls:
55
51
  return cls.from_config(config)
56
- raise ValueError('codec not available: %r' % codec_id)
52
+ raise ValueError(f'codec not available: {codec_id!r}')
57
53
 
58
54
 
59
55
  def register_codec(cls, codec_id=None):
numcodecs/shuffle.py CHANGED
@@ -1,7 +1,8 @@
1
1
  import numpy as np
2
- from .compat import ensure_contiguous_ndarray
3
- from .abc import Codec
2
+
4
3
  from ._shuffle import _doShuffle, _doUnshuffle
4
+ from .abc import Codec
5
+ from .compat import ensure_contiguous_ndarray
5
6
 
6
7
 
7
8
  class Shuffle(Codec):
@@ -28,7 +29,7 @@ class Shuffle(Codec):
28
29
  out = ensure_contiguous_ndarray(out)
29
30
 
30
31
  if self.elementsize <= 1:
31
- out.view(buf.dtype)[:len(buf)] = buf[:] # no shuffling needed
32
+ out.view(buf.dtype)[: len(buf)] = buf[:] # no shuffling needed
32
33
  return buf, out
33
34
 
34
35
  if buf.nbytes % self.elementsize != 0:
@@ -57,7 +58,4 @@ class Shuffle(Codec):
57
58
  return out
58
59
 
59
60
  def __repr__(self):
60
- r = '%s(elementsize=%s)' % \
61
- (type(self).__name__,
62
- self.elementsize)
63
- return r
61
+ return f'{type(self).__name__}(elementsize={self.elementsize})'
numcodecs/tests/common.py CHANGED
@@ -3,26 +3,32 @@ import json as _json
3
3
  import os
4
4
  from glob import glob
5
5
 
6
-
7
6
  import numpy as np
8
- from numpy.testing import assert_array_almost_equal, assert_array_equal
9
7
  import pytest
8
+ from numpy.testing import assert_array_almost_equal, assert_array_equal
10
9
 
11
-
10
+ # star import needed for repr tests so eval finds names
11
+ from numcodecs import * # noqa: F403
12
12
  from numcodecs.compat import ensure_bytes, ensure_ndarray
13
13
  from numcodecs.registry import get_codec
14
- # star import needed for repr tests so eval finds names
15
- from numcodecs import * # noqa
16
14
 
17
-
18
- greetings = ['¡Hola mundo!', 'Hej Världen!', 'Servus Woid!', 'Hei maailma!',
19
- 'Xin chào thế giới', 'Njatjeta Botë!', 'Γεια σου κόσμε!',
20
- 'こんにちは世界', '世界,你好!', 'Helló, világ!', 'Zdravo svete!',
21
- 'เฮลโลเวิลด์']
15
+ greetings = [
16
+ '¡Hola mundo!',
17
+ 'Hej Världen!',
18
+ 'Servus Woid!',
19
+ 'Hei maailma!',
20
+ 'Xin chào thế giới',
21
+ 'Njatjeta Botë!',
22
+ 'Γεια σου κόσμε!',
23
+ 'こんにちは世界',
24
+ '世界,你好!',
25
+ 'Helló, világ!',
26
+ 'Zdravo svete!',
27
+ 'เฮลโลเวิลด์',
28
+ ]
22
29
 
23
30
 
24
31
  def compare_arrays(arr, res, precision=None):
25
-
26
32
  # ensure numpy array with matching dtype
27
33
  res = ensure_ndarray(res).view(arr.dtype)
28
34
 
@@ -43,7 +49,6 @@ def compare_arrays(arr, res, precision=None):
43
49
 
44
50
 
45
51
  def check_encode_decode(arr, codec, precision=None):
46
-
47
52
  # N.B., watch out here with blosc compressor, if the itemsize of
48
53
  # the source buffer is different then the results of encoding
49
54
  # (i.e., compression) may be different. Hence we *do not* require that
@@ -112,7 +117,6 @@ def check_encode_decode(arr, codec, precision=None):
112
117
 
113
118
 
114
119
  def check_encode_decode_partial(arr, codec, precision=None):
115
-
116
120
  # N.B., watch out here with blosc compressor, if the itemsize of
117
121
  # the source buffer is different then the results of encoding
118
122
  # (i.e., compression) may be different. Hence we *do not* require that
@@ -122,7 +126,7 @@ def check_encode_decode_partial(arr, codec, precision=None):
122
126
 
123
127
  itemsize = arr.itemsize
124
128
  start, nitems = 5, 10
125
- compare_arr = arr[start:start+nitems]
129
+ compare_arr = arr[start : start + nitems]
126
130
  # test encoding of numpy array
127
131
  enc = codec.encode(arr)
128
132
  dec = codec.decode_partial(enc, start, nitems)
@@ -135,19 +139,19 @@ def check_encode_decode_partial(arr, codec, precision=None):
135
139
  # test partial decode of encoded bytes
136
140
  buf = arr.tobytes(order='A')
137
141
  enc = codec.encode(buf)
138
- dec = codec.decode_partial(enc, start*itemsize, nitems*itemsize, out=out)
142
+ dec = codec.decode_partial(enc, start * itemsize, nitems * itemsize, out=out)
139
143
  compare_arrays(compare_arr, dec, precision=precision)
140
144
 
141
145
  # test partial decode of encoded bytearray
142
146
  buf = bytearray(arr.tobytes(order='A'))
143
147
  enc = codec.encode(buf)
144
- dec = codec.decode_partial(enc, start*itemsize, nitems*itemsize, out=out)
148
+ dec = codec.decode_partial(enc, start * itemsize, nitems * itemsize, out=out)
145
149
  compare_arrays(compare_arr, dec, precision=precision)
146
150
 
147
151
  # test partial decode of encoded array.array
148
152
  buf = array.array('b', arr.tobytes(order='A'))
149
153
  enc = codec.encode(buf)
150
- dec = codec.decode_partial(enc, start*itemsize, nitems*itemsize, out=out)
154
+ dec = codec.decode_partial(enc, start * itemsize, nitems * itemsize, out=out)
151
155
  compare_arrays(compare_arr, dec, precision=precision)
152
156
 
153
157
  # # decoding should support any object exporting the buffer protocol,
@@ -156,32 +160,31 @@ def check_encode_decode_partial(arr, codec, precision=None):
156
160
  enc_bytes = ensure_bytes(enc)
157
161
 
158
162
  # test decoding of raw bytes into numpy array
159
- dec = codec.decode_partial(enc_bytes, start*itemsize, nitems*itemsize, out=out)
163
+ dec = codec.decode_partial(enc_bytes, start * itemsize, nitems * itemsize, out=out)
160
164
  compare_arrays(compare_arr, dec, precision=precision)
161
165
 
162
166
  # test partial decoding of bytearray
163
- dec = codec.decode_partial(bytearray(enc_bytes), start*itemsize, nitems*itemsize, out=out)
167
+ dec = codec.decode_partial(bytearray(enc_bytes), start * itemsize, nitems * itemsize, out=out)
164
168
  compare_arrays(compare_arr, dec, precision=precision)
165
169
 
166
170
  # test partial decoding of array.array
167
171
  buf = array.array('b', enc_bytes)
168
- dec = codec.decode_partial(buf, start*itemsize, nitems*itemsize, out=out)
172
+ dec = codec.decode_partial(buf, start * itemsize, nitems * itemsize, out=out)
169
173
  compare_arrays(compare_arr, dec, precision=precision)
170
174
 
171
175
  # test decoding of numpy array into numpy array
172
176
  buf = np.frombuffer(enc_bytes, dtype='u1')
173
- dec = codec.decode_partial(buf, start*itemsize, nitems*itemsize, out=out)
177
+ dec = codec.decode_partial(buf, start * itemsize, nitems * itemsize, out=out)
174
178
  compare_arrays(compare_arr, dec, precision=precision)
175
179
 
176
180
  # test decoding directly into bytearray
177
181
  out = bytearray(compare_arr.nbytes)
178
- codec.decode_partial(enc_bytes, start*itemsize, nitems*itemsize, out=out)
182
+ codec.decode_partial(enc_bytes, start * itemsize, nitems * itemsize, out=out)
179
183
  # noinspection PyTypeChecker
180
184
  compare_arrays(compare_arr, out, precision=precision)
181
185
 
182
186
 
183
187
  def assert_array_items_equal(res, arr):
184
-
185
188
  assert isinstance(res, np.ndarray)
186
189
  res = res.reshape(-1, order='A')
187
190
  arr = arr.reshape(-1, order='A')
@@ -193,7 +196,7 @@ def assert_array_items_equal(res, arr):
193
196
  # and values
194
197
  arr = arr.ravel().tolist()
195
198
  res = res.ravel().tolist()
196
- for a, r in zip(arr, res):
199
+ for a, r in zip(arr, res, strict=True):
197
200
  if isinstance(a, np.ndarray):
198
201
  assert_array_equal(a, r)
199
202
  elif a != a:
@@ -203,7 +206,6 @@ def assert_array_items_equal(res, arr):
203
206
 
204
207
 
205
208
  def check_encode_decode_array(arr, codec):
206
-
207
209
  enc = codec.encode(arr)
208
210
  dec = codec.decode(enc)
209
211
  assert_array_items_equal(arr, dec)
@@ -217,6 +219,16 @@ def check_encode_decode_array(arr, codec):
217
219
  assert_array_items_equal(arr, dec)
218
220
 
219
221
 
222
+ def check_encode_decode_array_to_bytes(arr, codec):
223
+ enc = codec.encode(arr)
224
+ dec = codec.decode(enc)
225
+ assert_array_items_equal(arr, dec)
226
+
227
+ out = np.empty_like(arr)
228
+ codec.decode(enc, out=out)
229
+ assert_array_items_equal(arr, out)
230
+
231
+
220
232
  def check_config(codec):
221
233
  config = codec.get_config()
222
234
  # round-trip through JSON to check serialization
@@ -232,7 +244,6 @@ def check_repr(stmt):
232
244
 
233
245
 
234
246
  def check_backwards_compatibility(codec_id, arrays, codecs, precision=None, prefix=None):
235
-
236
247
  # setup directory to hold data fixture
237
248
  if prefix:
238
249
  fixture_dir = os.path.join('fixture', codec_id, prefix)
@@ -243,13 +254,12 @@ def check_backwards_compatibility(codec_id, arrays, codecs, precision=None, pref
243
254
 
244
255
  # save fixture data
245
256
  for i, arr in enumerate(arrays):
246
- arr_fn = os.path.join(fixture_dir, 'array.{:02d}.npy'.format(i))
257
+ arr_fn = os.path.join(fixture_dir, f'array.{i:02d}.npy')
247
258
  if not os.path.exists(arr_fn): # pragma: no cover
248
259
  np.save(arr_fn, arr)
249
260
 
250
261
  # load fixture data
251
262
  for arr_fn in glob(os.path.join(fixture_dir, 'array.*.npy')):
252
-
253
263
  # setup
254
264
  i = int(arr_fn.split('.')[-2])
255
265
  arr = np.load(arr_fn, allow_pickle=True)
@@ -260,12 +270,11 @@ def check_backwards_compatibility(codec_id, arrays, codecs, precision=None, pref
260
270
  order = 'C'
261
271
 
262
272
  for j, codec in enumerate(codecs):
263
-
264
273
  if codec is None:
265
274
  pytest.skip("codec has been removed")
266
275
 
267
276
  # setup a directory to hold encoded data
268
- codec_dir = os.path.join(fixture_dir, 'codec.{:02d}'.format(j))
277
+ codec_dir = os.path.join(fixture_dir, f'codec.{j:02d}')
269
278
  if not os.path.exists(codec_dir): # pragma: no cover
270
279
  os.makedirs(codec_dir)
271
280
 
@@ -280,7 +289,7 @@ def check_backwards_compatibility(codec_id, arrays, codecs, precision=None, pref
280
289
  config = _json.load(cf)
281
290
  assert codec == get_codec(config)
282
291
 
283
- enc_fn = os.path.join(codec_dir, 'encoded.{:02d}.dat'.format(i))
292
+ enc_fn = os.path.join(codec_dir, f'encoded.{i:02d}.dat')
284
293
 
285
294
  # one time encode and save array
286
295
  if not os.path.exists(enc_fn): # pragma: no cover
@@ -334,7 +343,8 @@ def check_max_buffer_size(codec):
334
343
  np.zeros(max_buffer_size + 1, dtype=np.int8),
335
344
  np.zeros(max_buffer_size + 2, dtype=np.int8),
336
345
  np.zeros(max_buffer_size, dtype=np.int16),
337
- np.zeros(max_buffer_size, dtype=np.int32)]
346
+ np.zeros(max_buffer_size, dtype=np.int32),
347
+ ]
338
348
  for buf in buffers:
339
349
  with pytest.raises(ValueError):
340
350
  codec.encode(buf)
@@ -2,7 +2,6 @@ from numcodecs.abc import Codec
2
2
 
3
3
 
4
4
  class TestCodec(Codec):
5
-
6
5
  codec_id = "test"
7
6
 
8
7
  def encode(self, buf): # pragma: no cover
@@ -1,11 +1,13 @@
1
1
  import numpy as np
2
2
  from numpy.testing import assert_array_equal
3
3
 
4
-
5
4
  from numcodecs.astype import AsType
6
- from numcodecs.tests.common import check_encode_decode, check_config, \
7
- check_repr, check_backwards_compatibility
8
-
5
+ from numcodecs.tests.common import (
6
+ check_backwards_compatibility,
7
+ check_config,
8
+ check_encode_decode,
9
+ check_repr,
10
+ )
9
11
 
10
12
  # mix of dtypes: integer, float
11
13
  # mix of shapes: 1D, 2D, 3D
@@ -55,7 +57,6 @@ def test_repr():
55
57
 
56
58
 
57
59
  def test_backwards_compatibility():
58
-
59
60
  # integers
60
61
  arrs = [
61
62
  np.arange(1000, dtype='<i4'),
@@ -70,5 +71,4 @@ def test_backwards_compatibility():
70
71
  np.random.normal(loc=1000, scale=1, size=(10, 10, 10)).astype('<f8'),
71
72
  ]
72
73
  codec = AsType(encode_dtype='<f4', decode_dtype='<f8')
73
- check_backwards_compatibility(AsType.codec_id, arrs, [codec], precision=[3],
74
- prefix='f')
74
+ check_backwards_compatibility(AsType.codec_id, arrs, [codec], precision=[3], prefix='f')
@@ -1,20 +1,17 @@
1
1
  import itertools
2
2
 
3
-
4
3
  import numpy as np
5
4
  import pytest
6
5
 
7
-
8
6
  from numcodecs.base64 import Base64
9
7
  from numcodecs.tests.common import (
10
- check_encode_decode,
11
- check_repr,
12
8
  check_backwards_compatibility,
9
+ check_encode_decode,
13
10
  check_err_decode_object_buffer,
14
11
  check_err_encode_object_buffer,
12
+ check_repr,
15
13
  )
16
14
 
17
-
18
15
  codecs = [
19
16
  Base64(),
20
17
  ]
@@ -29,14 +26,14 @@ arrays = [
29
26
  np.random.normal(loc=1000, scale=1, size=(100, 10)),
30
27
  np.random.randint(0, 2, size=1000, dtype=bool).reshape(100, 10, order="F"),
31
28
  np.random.choice([b"a", b"bb", b"ccc"], size=1000).reshape(10, 10, 10),
32
- np.random.randint(0, 2 ** 60, size=1000, dtype="u8").view("M8[ns]"),
33
- np.random.randint(0, 2 ** 60, size=1000, dtype="u8").view("m8[ns]"),
34
- np.random.randint(0, 2 ** 25, size=1000, dtype="u8").view("M8[m]"),
35
- np.random.randint(0, 2 ** 25, size=1000, dtype="u8").view("m8[m]"),
36
- np.random.randint(-(2 ** 63), -(2 ** 63) + 20, size=1000, dtype="i8").view("M8[ns]"),
37
- np.random.randint(-(2 ** 63), -(2 ** 63) + 20, size=1000, dtype="i8").view("m8[ns]"),
38
- np.random.randint(-(2 ** 63), -(2 ** 63) + 20, size=1000, dtype="i8").view("M8[m]"),
39
- np.random.randint(-(2 ** 63), -(2 ** 63) + 20, size=1000, dtype="i8").view("m8[m]"),
29
+ np.random.randint(0, 2**60, size=1000, dtype="u8").view("M8[ns]"),
30
+ np.random.randint(0, 2**60, size=1000, dtype="u8").view("m8[ns]"),
31
+ np.random.randint(0, 2**25, size=1000, dtype="u8").view("M8[m]"),
32
+ np.random.randint(0, 2**25, size=1000, dtype="u8").view("m8[m]"),
33
+ np.random.randint(-(2**63), -(2**63) + 20, size=1000, dtype="i8").view("M8[ns]"),
34
+ np.random.randint(-(2**63), -(2**63) + 20, size=1000, dtype="i8").view("m8[ns]"),
35
+ np.random.randint(-(2**63), -(2**63) + 20, size=1000, dtype="i8").view("M8[m]"),
36
+ np.random.randint(-(2**63), -(2**63) + 20, size=1000, dtype="i8").view("m8[m]"),
40
37
  ]
41
38
 
42
39
 
@@ -1,5 +1,4 @@
1
1
  import numpy as np
2
-
3
2
  import pytest
4
3
 
5
4
  from numcodecs.bitround import BitRound, max_bits