legend-pydataobj 1.7.0__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: legend_pydataobj
3
- Version: 1.7.0
3
+ Version: 1.7.1
4
4
  Summary: LEGEND Python Data Objects
5
5
  Author: The LEGEND Collaboration
6
6
  Maintainer: The LEGEND Collaboration
@@ -707,7 +707,7 @@ Requires-Dist: numexpr
707
707
  Requires-Dist: numpy >=1.21
708
708
  Requires-Dist: pandas >=1.4.4
709
709
  Requires-Dist: parse
710
- Requires-Dist: pint
710
+ Requires-Dist: pint !=0.24
711
711
  Requires-Dist: pint-pandas
712
712
  Provides-Extra: all
713
713
  Requires-Dist: legend-pydataobj[docs,test] ; extra == 'all'
@@ -1,10 +1,10 @@
1
1
  lgdo/__init__.py,sha256=nv9kORuX2FCA6rQLbH959E0fuGMfZvHb0H5uyrLr2WI,3046
2
- lgdo/_version.py,sha256=2fEqxujmrV2dsREie2BmOYFLu66FowyHtZT2AoLuIzU,411
2
+ lgdo/_version.py,sha256=kn-QYzzAhfbnfKK6EpE9gJz8TDZkEk52evaid1DHkG4,411
3
3
  lgdo/cli.py,sha256=vB1Oj6kZ5gWaY9HBPBRRRyiepp72hm3bFvQeUUWeMYg,8214
4
4
  lgdo/lgdo_utils.py,sha256=6a2YWEwpyEMXlAyTHZMO01aqxy6SxJzPZkGNWKNWuS0,2567
5
5
  lgdo/lh5_store.py,sha256=xHwzbKNueEtFwScxrgfvCo2_bWKS6j7ojrpeF9kQflc,8483
6
6
  lgdo/logging.py,sha256=82wIOj7l7xr3WYyeHdpSXbbjzHJsy-uRyKYUYx2vMfQ,1003
7
- lgdo/units.py,sha256=nbJ0JTNqlhHUXiBXT3k6qhRpSfMk5_9yW7EeC0dhMuQ,151
7
+ lgdo/units.py,sha256=VQYME86_ev9S7Fq8RyCOQNqYr29MphTTYemmEouZafk,161
8
8
  lgdo/utils.py,sha256=9t_GYdB8aQhZ4Vz6ujmASzwCgTuP7ZdINtPTVPyIR6E,3661
9
9
  lgdo/compression/__init__.py,sha256=gqbdx4NnpCcW-C7kUXV-hVUZFiNlbCwIbs3uzFe4AFE,1127
10
10
  lgdo/compression/base.py,sha256=82cQJujfvoAOKBFx761dEcx_xM02TBCBBuBo6i78tuI,838
@@ -18,12 +18,12 @@ lgdo/lh5/datatype.py,sha256=VhPWeWv3FW8XM6ZOFOdTZOYK3_hRZ3i0fYsBOEOIF5U,1623
18
18
  lgdo/lh5/exceptions.py,sha256=QWStQD27Qrm4oYs5Z3UAIoq4y7X-f_Z6QWCBCH0DXwE,1006
19
19
  lgdo/lh5/iterator.py,sha256=eqH9a_ZjEhgqJUZbMj36jXK_1Xbx86450DVw7LHNB3Y,12369
20
20
  lgdo/lh5/store.py,sha256=sYX1harVGRyP0oq1LGq2qrFhorutkev9MOovwhzEWZ4,6670
21
- lgdo/lh5/tools.py,sha256=MqYvyag2Uo3RxO0jLphml6UfScLPI2a2MCz3kjkIwmU,9430
21
+ lgdo/lh5/tools.py,sha256=nb4zaBbVbQZTEzawi_faMhRvaPQf9Iea2xbuN6MPSQg,9922
22
22
  lgdo/lh5/utils.py,sha256=7QYhKd8MqpeFevLyWuasv63WfzEPx7Fd9wA-l3JjnIQ,6984
23
23
  lgdo/lh5/_serializers/__init__.py,sha256=7zvTmBdp-pqS0ium6cKKjEvcqIND-kBC7319G5wMq5Y,1213
24
24
  lgdo/lh5/_serializers/read/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  lgdo/lh5/_serializers/read/array.py,sha256=DLguBkiVNXZuUk7LdalixA8uISx_SETVPXWk-26HYmk,933
26
- lgdo/lh5/_serializers/read/composite.py,sha256=M3RR4SEyFGtb4qsscJOtB-P4HhdMQw0_JDIWKkUOsKA,12066
26
+ lgdo/lh5/_serializers/read/composite.py,sha256=xyi1raVlflgBwc855Bfj8m0zTLIme-LPXxQcMrKUhGA,12069
27
27
  lgdo/lh5/_serializers/read/encoded.py,sha256=LTdSqEH5tJOdqcSCrMLWutYiQ4FwBFIPF7FDZ-iallw,3744
28
28
  lgdo/lh5/_serializers/read/ndarray.py,sha256=4vdgHwj_yIon1KvSiOkDSHq7CJhYtFcjjSHijsvp1aU,3481
29
29
  lgdo/lh5/_serializers/read/scalar.py,sha256=YwvA6kyNUh6H0kh2L7bzfgLkA8Et2dQFjp2nFnRmeGI,755
@@ -33,22 +33,22 @@ lgdo/lh5/_serializers/write/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
33
33
  lgdo/lh5/_serializers/write/array.py,sha256=Gosg8rOCH_2dRMj_oNSWyXuoYXDjy0OK--GCYWswR4U,2803
34
34
  lgdo/lh5/_serializers/write/composite.py,sha256=f3b4YeOoUr8y1wA7zsKEFT5mIwX8SD0MYQ40unMRyQc,8460
35
35
  lgdo/lh5/_serializers/write/scalar.py,sha256=gkcF2WVBR3aQYl0EynbVUocx4y3r8tvPfQYQJjkPvP4,643
36
- lgdo/lh5/_serializers/write/vector_of_vectors.py,sha256=mZuC7NIb-IkmJ9wgn37TTvFTLLAFR71iivrY4yiSJZM,2912
36
+ lgdo/lh5/_serializers/write/vector_of_vectors.py,sha256=puGQX9XF5P_5DVbm_Cc6TvPrsDywgBLSYtkqFNltbB4,3493
37
37
  lgdo/types/__init__.py,sha256=_cHZEpsh7OGsA243WBK2a2UmCp3NSA0MjXYf95y728k,771
38
38
  lgdo/types/array.py,sha256=sUxh1CNCaefrnybt5qdjmmMpVQa_RqFxUv1tJ_pyBbc,6537
39
39
  lgdo/types/arrayofequalsizedarrays.py,sha256=DOGJiTmc1QCdm7vLbE6uIRXoMPtt8uuCfmwQawgWf5s,4949
40
40
  lgdo/types/encoded.py,sha256=JW4U5ow7KLMzhKnmhdnxbC3SZJAs4bOEDZWKG4KY1uU,15293
41
- lgdo/types/fixedsizearray.py,sha256=7Fj4QS9ubaeEf2tM3HwjSs6AuG8hKSYaT6Hy7Y_VHdQ,1525
41
+ lgdo/types/fixedsizearray.py,sha256=7RjUwTz1bW0pcrdy27JlfrXPAuOU89Kj7pOuSUCojK8,1527
42
42
  lgdo/types/lgdo.py,sha256=UnJDi1emQYVgH_H29Vipfs4LelPopxG5pgZUu1eKOlw,2761
43
43
  lgdo/types/scalar.py,sha256=c5Es2vyDqyWTPV6mujzfIzMpC1jNWkEIcvYyWQUxH3Q,1933
44
44
  lgdo/types/struct.py,sha256=Q0OWLVd4B0ciLb8t6VsxU3MPbmGLZ7WfQNno1lSQS0Q,4918
45
45
  lgdo/types/table.py,sha256=PYxHXRmuNZkz1UK6MzUVWGhEsRFf6t-xXGFFrXUP0EY,17936
46
- lgdo/types/vectorofvectors.py,sha256=d_n0lK6rut3_DdPcmMro0ObJAIRhMQnYj0cqAEpckPc,24368
46
+ lgdo/types/vectorofvectors.py,sha256=Q53K8wiHwRHpGw3ARqrLnOXu3kLHptTYMp0ay9KK1vs,24386
47
47
  lgdo/types/vovutils.py,sha256=7BWPP0BSj-92ifbCIUBcfqxG5-TS8uxujTyJJuDFI04,10302
48
48
  lgdo/types/waveformtable.py,sha256=f2tS4f1OEoYaTM5ldCX9zmw8iSISCT3t3wS1SrPdu_o,9901
49
- legend_pydataobj-1.7.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
50
- legend_pydataobj-1.7.0.dist-info/METADATA,sha256=rcfERzQLFd45YcFRMTusx2x7i1LuU7faypmwka_83Ws,44353
51
- legend_pydataobj-1.7.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
52
- legend_pydataobj-1.7.0.dist-info/entry_points.txt,sha256=Uu5MTlppBZxB4QGlLv-oX8FqACWjAZDNii__TBDJwLQ,72
53
- legend_pydataobj-1.7.0.dist-info/top_level.txt,sha256=KyR-EUloqiXcQ62IWnzBmtInDtvsHl4q2ZJAZgTcLXE,5
54
- legend_pydataobj-1.7.0.dist-info/RECORD,,
49
+ legend_pydataobj-1.7.1.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
50
+ legend_pydataobj-1.7.1.dist-info/METADATA,sha256=ZHdaQ1DJm7BXQEsppDekmab5BZFFuBtjjCF3fQagPh0,44360
51
+ legend_pydataobj-1.7.1.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
52
+ legend_pydataobj-1.7.1.dist-info/entry_points.txt,sha256=Uu5MTlppBZxB4QGlLv-oX8FqACWjAZDNii__TBDJwLQ,72
53
+ legend_pydataobj-1.7.1.dist-info/top_level.txt,sha256=KyR-EUloqiXcQ62IWnzBmtInDtvsHl4q2ZJAZgTcLXE,5
54
+ legend_pydataobj-1.7.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (70.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
lgdo/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '1.7.0'
16
- __version_tuple__ = version_tuple = (1, 7, 0)
15
+ __version__ = version = '1.7.1'
16
+ __version_tuple__ = version_tuple = (1, 7, 1)
@@ -56,7 +56,7 @@ def _h5_read_lgdo(
56
56
  lh5_file = list(h5f)
57
57
  n_rows_read = 0
58
58
 
59
- for i, h5f in enumerate(lh5_file):
59
+ for i, _h5f in enumerate(lh5_file):
60
60
  if isinstance(idx, list) and len(idx) > 0 and not np.isscalar(idx[0]):
61
61
  # a list of lists: must be one per file
62
62
  idx_i = idx[i]
@@ -65,7 +65,7 @@ def _h5_read_lgdo(
65
65
  if not (isinstance(idx, tuple) and len(idx) == 1):
66
66
  idx = (idx,)
67
67
  # idx is a long continuous array
68
- n_rows_i = read_n_rows(name, h5f)
68
+ n_rows_i = read_n_rows(name, _h5f)
69
69
  # find the length of the subset of idx that contains indices
70
70
  # that are less than n_rows_i
71
71
  n_rows_to_read_i = bisect.bisect_left(idx[0], n_rows_i)
@@ -78,7 +78,7 @@ def _h5_read_lgdo(
78
78
 
79
79
  obj_buf, n_rows_read_i = _h5_read_lgdo(
80
80
  name,
81
- h5f,
81
+ _h5f,
82
82
  start_row=start_row,
83
83
  n_rows=n_rows_i,
84
84
  idx=idx_i,
@@ -2,6 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  import logging
4
4
 
5
+ import numpy as np
6
+
5
7
  from .... import types
6
8
  from ... import utils
7
9
  from ...exceptions import LH5EncodeError
@@ -31,12 +33,15 @@ def _h5_write_vector_of_vectors(
31
33
 
32
34
  # if appending we need to add an appropriate offset to the
33
35
  # cumulative lengths as appropriate for the in-file object
34
- offset = 0 # declare here because we have to subtract it off at the end
36
+ # declare here because we have to subtract it off at the end
37
+ offset = np.int64(0)
35
38
  if (wo_mode in ("a", "o")) and "cumulative_length" in group:
36
39
  len_cl = len(group["cumulative_length"])
40
+ # if append, ignore write_start and set it to total number of vectors
37
41
  if wo_mode == "a":
38
42
  write_start = len_cl
39
43
  if len_cl > 0:
44
+ # set offset to correct number of elements in flattened_data until write_start
40
45
  offset = group["cumulative_length"][write_start - 1]
41
46
 
42
47
  # First write flattened_data array. Only write rows with data.
@@ -71,15 +76,23 @@ def _h5_write_vector_of_vectors(
71
76
  )
72
77
 
73
78
  # now offset is used to give appropriate in-file values for
74
- # cumulative_length. Need to adjust it for start_row
79
+ # cumulative_length. Need to adjust it for start_row, if different from zero
75
80
  if start_row > 0:
76
81
  offset -= obj.cumulative_length.nda[start_row - 1]
77
82
 
78
83
  # Add offset to obj.cumulative_length itself to avoid memory allocation.
79
84
  # Then subtract it off after writing! (otherwise it will be changed
80
85
  # upon return)
81
- cl_dtype = obj.cumulative_length.nda.dtype.type
82
- obj.cumulative_length.nda += cl_dtype(offset)
86
+
87
+ # NOTE: this operation is not numerically safe (uint overflow in the lower
88
+ # part of the array), but this is not a problem because those values are
89
+ # not written to disk and we are going to restore the offset at the end
90
+ np.add(
91
+ obj.cumulative_length.nda,
92
+ offset,
93
+ out=obj.cumulative_length.nda,
94
+ casting="unsafe",
95
+ )
83
96
 
84
97
  _h5_write_array(
85
98
  obj.cumulative_length,
@@ -92,4 +105,10 @@ def _h5_write_vector_of_vectors(
92
105
  write_start=write_start,
93
106
  **h5py_kwargs,
94
107
  )
95
- obj.cumulative_length.nda -= cl_dtype(offset)
108
+
109
+ np.subtract(
110
+ obj.cumulative_length.nda,
111
+ offset,
112
+ out=obj.cumulative_length.nda,
113
+ casting="unsafe",
114
+ )
lgdo/lh5/tools.py CHANGED
@@ -188,7 +188,20 @@ def show(
188
188
  else:
189
189
  toprint += f", \033[3mnumchunks\033[0m={val.id.get_num_chunks()}"
190
190
  toprint += f", \033[3mchunkshape\033[0m={chunkshape}"
191
- toprint += f", \033[3mcompression\033[0m={val.compression}"
191
+ toprint += ", \033[3mfilters\033[0m="
192
+
193
+ numfilters = val.id.get_create_plist().get_nfilters()
194
+ if numfilters == 0:
195
+ toprint += "None"
196
+ else:
197
+ toprint += "("
198
+ for i in range(numfilters):
199
+ thisfilter = val.id.get_create_plist().get_filter(i)[3].decode()
200
+ if "lz4" in thisfilter:
201
+ thisfilter = "lz4"
202
+ toprint += f"{thisfilter},"
203
+ toprint += ")"
204
+
192
205
  except TypeError:
193
206
  toprint += "(scalar)"
194
207
 
@@ -268,7 +281,7 @@ def load_nda(
268
281
  f = sto.gimme_file(ff, "r")
269
282
  for par in par_list:
270
283
  if f"{lh5_group}/{par}" not in f:
271
- msg = f"'{lh5_group}/{par}' not in file {f_list[ii]}"
284
+ msg = f"'{lh5_group}/{par}' not in file {ff}"
272
285
  raise RuntimeError(msg)
273
286
 
274
287
  if idx_list is None:
@@ -50,4 +50,4 @@ class FixedSizeArray(Array):
50
50
  --------
51
51
  .LGDO.view_as
52
52
  """
53
- return super.view_as(library, with_units=with_units)
53
+ return super().view_as(library, with_units=with_units)
@@ -480,7 +480,7 @@ class VectorOfVectors(LGDO):
480
480
  lens = np.array([lens], dtype="u4")
481
481
 
482
482
  # calculate stop index in flattened_data
483
- cum_lens = start + lens.cumsum()
483
+ cum_lens = np.add(start, lens.cumsum(), dtype=int)
484
484
 
485
485
  # fill with fast vectorized routine
486
486
  vovutils._nb_fill(vec, lens, self.flattened_data.nda[start : cum_lens[-1]])
lgdo/units.py CHANGED
@@ -3,4 +3,4 @@ from __future__ import annotations
3
3
  import pint
4
4
 
5
5
  default_units_registry = pint.get_application_registry()
6
- default_units_registry.default_format = "~P"
6
+ default_units_registry.formatter.default_format = "~P"