reboost 0.9.1__py3-none-any.whl → 0.10.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
reboost/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.9.1'
32
- __version_tuple__ = version_tuple = (0, 9, 1)
31
+ __version__ = version = '0.10.0a0'
32
+ __version_tuple__ = version_tuple = (0, 10, 0, 'a0')
33
33
 
34
34
  __commit_id__ = commit_id = None
reboost/build_hit.py CHANGED
@@ -323,7 +323,7 @@ def build_hit(
323
323
  if time_dict is not None:
324
324
  start_time = time.time()
325
325
 
326
- ak_obj = stps.view_as("ak")
326
+ ak_obj = stps.view_as("ak", with_units=True)
327
327
 
328
328
  if time_dict is not None:
329
329
  time_dict[proc_name].update_field("conv", start_time)
@@ -349,9 +349,6 @@ def build_hit(
349
349
  if out_detector not in output_tables and files.hit[file_idx] is None:
350
350
  output_tables[out_detector] = None
351
351
 
352
- # get the attributes
353
- attrs = utils.copy_units(stps)
354
-
355
352
  # if we have more than one output detector, make an independent copy.
356
353
  hit_table = (
357
354
  copy.deepcopy(hit_table_layouted)
@@ -376,9 +373,6 @@ def build_hit(
376
373
  hit_table, outputs=proc_group["outputs"]
377
374
  )
378
375
 
379
- # assign units in the output table
380
- hit_table = utils.assign_units(hit_table, attrs)
381
-
382
376
  # now write
383
377
  if files.hit[file_idx] is not None:
384
378
  # get modes to write with
@@ -443,12 +437,7 @@ def build_hit(
443
437
  def _evaluate_operation(
444
438
  hit_table, field: str, info: str | dict, local_dict: dict, time_dict: ProfileDict
445
439
  ) -> None:
446
- if isinstance(info, str):
447
- expression = info
448
- units = None
449
- else:
450
- expression = info["expression"]
451
- units = info.get("units", None)
440
+ expression = info if isinstance(info, str) else info["expression"]
452
441
 
453
442
  # evaluate the expression
454
443
  col = core.evaluate_output_column(
@@ -460,7 +449,4 @@ def _evaluate_operation(
460
449
  name=field,
461
450
  )
462
451
 
463
- if units is not None:
464
- col.attrs["units"] = units
465
-
466
452
  core.add_field_with_nesting(hit_table, field, col)
reboost/core.py CHANGED
@@ -18,18 +18,9 @@ log = logging.getLogger(__name__)
18
18
 
19
19
 
20
20
  def read_data_at_channel_as_ak(
21
- channels: ak.Array,
22
- rows: ak.Array,
23
- file: str,
24
- field: str,
25
- group: str,
26
- tab_map: dict[int, str],
27
- with_units: bool = False,
21
+ channels: ak.Array, rows: ak.Array, file: str, field: str, group: str, tab_map: dict[int, str]
28
22
  ) -> ak.Array:
29
- r"""Read the data from a particular field to an Awkward array.
30
-
31
- This replaces the TCM like object defined by the channels and rows with the
32
- corresponding data field.
23
+ r"""Read the data from a particular field to an awkward array. This replaces the TCM like object defined by the channels and rows with the corresponding data field.
33
24
 
34
25
  Parameters
35
26
  ----------
@@ -77,9 +68,7 @@ def read_data_at_channel_as_ak(
77
68
  tcm_rows = np.where(ak.flatten(channels == key))[0]
78
69
 
79
70
  # read the data with sorted idx
80
- data_ch = lh5.read(f"{group}/{tab_name}/{field}", file, idx=idx[arg_idx])
81
- units = data_ch.attrs.get("units", None)
82
- data_ch = data_ch.view_as("ak")
71
+ data_ch = lh5.read(f"{group}/{tab_name}/{field}", file, idx=idx[arg_idx]).view_as("ak")
83
72
 
84
73
  # sort back to order for tcm
85
74
  data_ch = data_ch[np.argsort(arg_idx)]
@@ -96,12 +85,8 @@ def read_data_at_channel_as_ak(
96
85
 
97
86
  # sort the final data
98
87
  data_flat = data_flat[np.argsort(tcm_rows_full)]
99
- data_unflat = ak.unflatten(data_flat, reorder)
100
-
101
- if with_units and units is not None:
102
- return ak.with_parameter(data_unflat, "units", units)
103
88
 
104
- return data_unflat
89
+ return ak.unflatten(data_flat, reorder)
105
90
 
106
91
 
107
92
  def evaluate_output_column(
reboost/hpge/psd.py CHANGED
@@ -18,7 +18,7 @@ from .utils import HPGePulseShapeLibrary, HPGeRZField
18
18
  log = logging.getLogger(__name__)
19
19
 
20
20
 
21
- def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> Array:
21
+ def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> ak.Array:
22
22
  """R90 HPGe pulse shape heuristic.
23
23
 
24
24
  Parameters
@@ -31,18 +31,22 @@ def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> Array
31
31
  array of y coordinate position.
32
32
  zloc
33
33
  array of z coordinate position.
34
+
35
+ Returns
36
+ -------
37
+ calculated r90 for each hit
34
38
  """
39
+ pos = [units.units_conv_ak(pos, "mm") for pos in [xloc, yloc, zloc]]
40
+
41
+ edep = units.units_conv_ak(edep, "keV")
42
+
35
43
  tot_energy = ak.sum(edep, axis=-1, keepdims=True)
36
44
 
37
45
  def eweight_mean(field, energy):
38
46
  return ak.sum(energy * field, axis=-1, keepdims=True) / tot_energy
39
47
 
40
48
  # Compute distance of each edep to the weighted mean
41
- dist = np.sqrt(
42
- (xloc - eweight_mean(edep, xloc)) ** 2
43
- + (yloc - eweight_mean(edep, yloc)) ** 2
44
- + (zloc - eweight_mean(edep, zloc)) ** 2
45
- )
49
+ dist = np.sqrt(sum((p - eweight_mean(edep, p)) ** 2 for p in pos))
46
50
 
47
51
  # Sort distances and corresponding edep within each event
48
52
  sorted_indices = ak.argsort(dist, axis=-1)
@@ -76,16 +80,16 @@ def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> Array
76
80
  r90_indices = ak.argmax(cumsum_edep_corrected >= threshold, axis=-1, keepdims=True)
77
81
  r90 = sorted_dist[r90_indices]
78
82
 
79
- return Array(ak.flatten(r90).to_numpy())
83
+ return units.attach_units(ak.Array(ak.flatten(r90)), "mm")
80
84
 
81
85
 
82
86
  def drift_time(
83
- xloc: ArrayLike,
84
- yloc: ArrayLike,
85
- zloc: ArrayLike,
87
+ xloc: ak.Array | VectorOfVectors,
88
+ yloc: ak.Array | VectorOfVectors,
89
+ zloc: ak.Array | VectorOfVectors,
86
90
  dt_map: HPGeRZField,
87
91
  coord_offset: pint.Quantity | pyg4ometry.gdml.Position = (0, 0, 0) * u.m,
88
- ) -> VectorOfVectors:
92
+ ) -> ak.Array:
89
93
  """Calculates drift times for each step (cluster) in an HPGe detector.
90
94
 
91
95
  Parameters
@@ -106,7 +110,7 @@ def drift_time(
106
110
  # sanitize coord_offset
107
111
  coord_offset = units.pg4_to_pint(coord_offset)
108
112
 
109
- # unit handling (for matching with drift time map units)
113
+ # unit handling (.r_units) for data in (xloc, yloc)]
110
114
  xu, yu = [units.units_convfact(data, dt_map.r_units) for data in (xloc, yloc)]
111
115
  zu = units.units_convfact(zloc, dt_map.z_units)
112
116
 
@@ -122,7 +126,6 @@ def drift_time(
122
126
 
123
127
  return None
124
128
 
125
- # transform coordinates
126
129
  xloc = xu * xloc - coord_offset[0].to(dt_map.r_units).m
127
130
  yloc = yu * yloc - coord_offset[1].to(dt_map.r_units).m
128
131
  zloc = zu * zloc - coord_offset[2].to(dt_map.z_units).m
@@ -133,16 +136,13 @@ def drift_time(
133
136
  np.sqrt(xloc**2 + yloc**2),
134
137
  zloc,
135
138
  )
136
- return VectorOfVectors(
137
- dt_values,
138
- attrs={"units": units.unit_to_lh5_attr(dt_map.φ_units)},
139
- )
139
+ return units.attach_units(ak.Array(dt_values), units.unit_to_lh5_attr(dt_map.φ_units))
140
140
 
141
141
 
142
142
  def drift_time_heuristic(
143
- drift_time: ArrayLike,
144
- edep: ArrayLike,
145
- ) -> Array:
143
+ drift_time: ak.Array | VectorOfVectors,
144
+ edep: ak.Array | VectorOfVectors,
145
+ ) -> ak.Array:
146
146
  """HPGe drift-time-based pulse-shape heuristic.
147
147
 
148
148
  See :func:`_drift_time_heuristic_impl` for a description of the algorithm.
@@ -160,11 +160,11 @@ def drift_time_heuristic(
160
160
  edep, e_units = units.unwrap_lgdo(edep)
161
161
 
162
162
  # we want to attach the right units to the dt heuristic, if possible
163
- attrs = {}
164
163
  if t_units is not None and e_units is not None:
165
- attrs["units"] = units.unit_to_lh5_attr(t_units / e_units)
164
+ unit = units.unit_to_lh5_attr(t_units / e_units)
165
+ return units.attach_units(ak.Array(_drift_time_heuristic_impl(drift_time, edep)), unit)
166
166
 
167
- return Array(_drift_time_heuristic_impl(drift_time, edep), attrs=attrs)
167
+ return ak.Array(_drift_time_heuristic_impl(drift_time, edep))
168
168
 
169
169
 
170
170
  @numba.njit(cache=True)
@@ -828,15 +828,15 @@ def maximum_current(
828
828
  activeness_surface: ArrayLike | None = None,
829
829
  surface_step_in_um: float = 10,
830
830
  return_mode: str = "current",
831
- ) -> Array:
831
+ ) -> ak.Array:
832
832
  """Estimate the maximum current in the HPGe detector based on :func:`_estimate_current_impl`.
833
833
 
834
834
  Parameters
835
835
  ----------
836
836
  edep
837
- Array of energies for each step.
837
+ Energies for each step.
838
838
  drift_time
839
- Array of drift times for each step.
839
+ Drift times for each step.
840
840
  dist_to_nplus
841
841
  Distance to n-plus electrode, only needed if surface heuristics are enabled.
842
842
  r
@@ -844,7 +844,7 @@ def maximum_current(
844
844
  z
845
845
  z coordinate (only needed if a full PSS library is used).
846
846
  template
847
- array of the bulk pulse template, can also be a :class:`HPGePulseShapeLibrary`.
847
+ Array of the bulk pulse template
848
848
  times
849
849
  time-stamps for the bulk pulse template
850
850
  fccd_in_um
@@ -864,12 +864,12 @@ def maximum_current(
864
864
  -------
865
865
  An Array of the maximum current/ time / energy for each hit.
866
866
  """
867
- # extract LGDO data and units
868
- drift_time, _ = units.unwrap_lgdo(drift_time)
869
- edep, _ = units.unwrap_lgdo(edep)
870
- dist_to_nplus, _ = units.unwrap_lgdo(dist_to_nplus)
871
- r, _ = units.unwrap_lgdo(r)
872
- z, _ = units.unwrap_lgdo(z)
867
+ # convert to target units
868
+ drift_time = units.units_conv_ak(drift_time, "ns")
869
+ edep = units.units_conv_ak(edep, "keV")
870
+ dist_to_nplus = units.units_conv_ak(dist_to_nplus, "mm")
871
+ r = units.units_conv_ak(r, "mm")
872
+ z = units.units_conv_ak(z, "mm")
873
873
 
874
874
  # prepare inputs for surface sims
875
875
  include_surface_effects, dist_to_nplus, templates_surface, activeness_surface = (
@@ -901,11 +901,12 @@ def maximum_current(
901
901
 
902
902
  # return
903
903
  if return_mode == "max_time":
904
- return Array(time)
904
+ return units.attach_units(ak.Array(time), "ns")
905
905
  if return_mode == "current":
906
- return Array(curr)
906
+ # current has no unit (depends on the template)
907
+ return ak.Array(curr)
907
908
  if return_mode == "energy":
908
- return Array(energy)
909
+ return units.attach_units(ak.Array(energy), "keV")
909
910
 
910
911
  msg = f"Return mode {return_mode} is not implemented."
911
912
  raise ValueError(msg)
reboost/hpge/surface.py CHANGED
@@ -5,27 +5,32 @@ import logging
5
5
  import awkward as ak
6
6
  import numba
7
7
  import numpy as np
8
+ import pint
9
+ import pyg4ometry
8
10
  import pygeomhpges
9
11
  from lgdo import VectorOfVectors
10
12
  from lgdo.types import LGDO
11
- from numpy.typing import ArrayLike
13
+ from numpy.typing import NDArray
12
14
  from scipy import stats
13
15
 
16
+ from reboost.units import ureg as u
17
+
18
+ from .. import units
19
+
14
20
  log = logging.getLogger(__name__)
15
21
 
16
22
 
17
23
  def distance_to_surface(
18
- positions_x: VectorOfVectors,
19
- positions_y: VectorOfVectors,
20
- positions_z: VectorOfVectors,
24
+ positions_x: ak.Array | VectorOfVectors,
25
+ positions_y: ak.Array | VectorOfVectors,
26
+ positions_z: ak.Array | VectorOfVectors,
21
27
  hpge: pygeomhpges.base.HPGe,
22
- det_pos: ArrayLike,
28
+ det_pos: pint.Quantity | pyg4ometry.gdml.Position | tuple = (0, 0, 0) * u.m,
23
29
  *,
24
30
  surface_type: str | None = None,
25
- unit: str = "mm",
26
- distances_precompute: VectorOfVectors | None = None,
31
+ distances_precompute: ak.Array | None = None,
27
32
  precompute_cutoff: float | None = None,
28
- ) -> VectorOfVectors:
33
+ ) -> ak.Array:
29
34
  """Computes the distance from each step to the detector surface.
30
35
 
31
36
  The calculation can be performed for any surface type `nplus`, `pplus`,
@@ -44,32 +49,36 @@ def distance_to_surface(
44
49
  hpge
45
50
  HPGe object.
46
51
  det_pos
47
- position of the detector origin, must be a 3 component array corresponding to `(x,y,z)`.
52
+ position of the detector origin, must be a 3 component array corresponding to `(x,y,z)`. If no units
53
+ are specified mm is assumed.
48
54
  surface_type
49
55
  string of which surface to use, can be `nplus`, `pplus` `passive` or None (in which case the distance to any surface is calculated).
50
56
  unit
51
57
  unit for the hit tier positions table.
52
58
  distances_precompute
53
- VectorOfVectors of distance to any surface computed by remage.
59
+ Distance to any surface computed by remage.
54
60
  precompute_cutoff
55
61
  cutoff on distances_precompute to not compute the distance for (in mm)
56
62
 
57
63
  Returns
58
64
  -------
59
- VectorOfVectors with the same shape as `positions_x/y/z` of the distance to the surface.
65
+ Distance to the surface for each hit with the same shape as `positions_x/y/z`.
60
66
 
61
67
  Note
62
68
  ----
63
69
  `positions_x/positions_y/positions_z` must all have the same shape.
64
70
  """
65
- factor = np.array([1, 100, 1000])[unit == np.array(["mm", "cm", "m"])][0]
66
-
67
71
  # compute local positions
68
72
  pos = []
69
73
  sizes = []
70
74
 
75
+ if not isinstance(det_pos, pint.Quantity | pyg4ometry.gdml.Position):
76
+ det_pos = det_pos * u.mm
77
+
78
+ det_pos = units.pg4_to_pint(det_pos)
79
+
71
80
  for idx, pos_tmp in enumerate([positions_x, positions_y, positions_z]):
72
- local_pos_tmp = ak.Array(pos_tmp) * factor - det_pos[idx]
81
+ local_pos_tmp = units.units_conv_ak(pos_tmp, "mm") - det_pos[idx].to("mm").m
73
82
  local_pos_flat_tmp = ak.flatten(local_pos_tmp).to_numpy()
74
83
  pos.append(local_pos_flat_tmp)
75
84
  sizes.append(ak.num(local_pos_tmp, axis=1))
@@ -106,7 +115,7 @@ def distance_to_surface(
106
115
  local_positions[indices], surface_indices=surface_indices
107
116
  )
108
117
 
109
- return VectorOfVectors(ak.unflatten(distances, size), dtype=np.float32)
118
+ return units.attach_units(ak.Array(ak.unflatten(distances, size)), "mm")
110
119
 
111
120
 
112
121
  @numba.njit(cache=True)
@@ -222,7 +231,7 @@ def get_surface_response(
222
231
  factor: float = 0.29,
223
232
  nsteps: int = 10000,
224
233
  delta_x: float = 10,
225
- ):
234
+ ) -> NDArray:
226
235
  """Extract the surface response current pulse based on diffusion.
227
236
 
228
237
  This extracts the amount of charge arrived (cumulative) at the p-n
@@ -248,6 +257,10 @@ def get_surface_response(
248
257
  the number of time steps.
249
258
  delta_x
250
259
  the width of each position bin.
260
+
261
+ Returns
262
+ -------
263
+ 2D array of the amount of charge arriving at the p-n junction as a function of time for each depth.
251
264
  """
252
265
  # number of position steps
253
266
  nx = int(fccd / delta_x)
reboost/math/functions.py CHANGED
@@ -5,14 +5,13 @@ import logging
5
5
  import awkward as ak
6
6
  import numpy as np
7
7
  from lgdo import Array, VectorOfVectors
8
- from lgdo.types import LGDO
8
+
9
+ from .. import units
9
10
 
10
11
  log = logging.getLogger(__name__)
11
12
 
12
13
 
13
- def piecewise_linear_activeness(
14
- distances: VectorOfVectors | ak.Array, fccd: float, dlf: float
15
- ) -> VectorOfVectors | Array:
14
+ def piecewise_linear_activeness(distances: ak.Array, fccd_in_mm: float, dlf: float) -> ak.Array:
16
15
  r"""Piecewise linear HPGe activeness model.
17
16
 
18
17
  Based on:
@@ -38,11 +37,10 @@ def piecewise_linear_activeness(
38
37
  Parameters
39
38
  ----------
40
39
  distances
41
- the distance from each step to the detector surface. Can be either a
42
- `numpy` or `awkward` array, or a LGDO `VectorOfVectors` or `Array`. The computation
40
+ the distance from each step to the detector surface. The computation
43
41
  is performed for each element and the shape preserved in the output.
44
42
 
45
- fccd
43
+ fccd_in_mm
46
44
  the value of the FCCD
47
45
  dlf
48
46
  the fraction of the FCCD which is fully inactive.
@@ -52,14 +50,9 @@ def piecewise_linear_activeness(
52
50
  a :class:`VectorOfVectors` or :class:`Array` of the activeness
53
51
  """
54
52
  # convert to ak
55
- if isinstance(distances, LGDO):
56
- distances_ak = distances.view_as("ak")
57
- elif not isinstance(distances, ak.Array):
58
- distances_ak = ak.Array(distances)
59
- else:
60
- distances_ak = distances
53
+ distances_ak = units.units_conv_ak(distances, "mm")
61
54
 
62
- dl = fccd * dlf
55
+ dl = fccd_in_mm * dlf
63
56
  distances_flat = (
64
57
  ak.flatten(distances_ak).to_numpy() if distances_ak.ndim > 1 else distances_ak.to_numpy()
65
58
  )
@@ -68,24 +61,26 @@ def piecewise_linear_activeness(
68
61
  results = np.full_like(distances_flat, np.nan, dtype=np.float64)
69
62
  lengths = ak.num(distances_ak) if distances_ak.ndim > 1 else len(distances_ak)
70
63
 
71
- mask1 = (distances_flat > fccd) | np.isnan(distances_flat)
64
+ mask1 = (distances_flat > fccd_in_mm) | np.isnan(distances_flat)
72
65
  mask2 = (distances_flat <= dl) & (~mask1)
73
66
  mask3 = ~(mask1 | mask2)
74
67
 
75
68
  # assign the values
76
69
  results[mask1] = 1
77
70
  results[mask2] = 0
78
- results[mask3] = (distances_flat[mask3] - dl) / (fccd - dl)
71
+ results[mask3] = (distances_flat[mask3] - dl) / (fccd_in_mm - dl)
79
72
 
80
73
  # reshape
81
74
  results = ak.unflatten(ak.Array(results), lengths) if distances_ak.ndim > 1 else results
82
75
 
83
- return VectorOfVectors(results) if results.ndim > 1 else Array(results)
76
+ results = ak.Array(results)
77
+
78
+ return units.attach_units(results, "mm")
84
79
 
85
80
 
86
81
  def vectorised_active_energy(
87
- distances: VectorOfVectors | ak.Array,
88
- edep: VectorOfVectors | ak.Array,
82
+ distances: ak.Array,
83
+ edep: ak.Array,
89
84
  fccd: float | list,
90
85
  dlf: float | list,
91
86
  ) -> VectorOfVectors | Array:
@@ -115,7 +110,7 @@ def vectorised_active_energy(
115
110
 
116
111
  Returns
117
112
  -------
118
- a :class:`VectorOfVectors` or :class:`Array` of the activeness
113
+ Activeness for each set of parameters
119
114
  """
120
115
  # add checks on fccd, dlf
121
116
  fccd = np.array(fccd)
@@ -133,20 +128,14 @@ def vectorised_active_energy(
133
128
 
134
129
  dl = fccd * dlf
135
130
 
136
- def _convert(field):
131
+ def _convert(field, unit):
137
132
  # convert to ak
138
- if isinstance(field, VectorOfVectors):
139
- field_ak = field.view_as("ak")
140
- elif not isinstance(field, ak.Array):
141
- field_ak = ak.Array(field)
142
- else:
143
- msg = f"{field} must be an awkward array or VectorOfVectors"
144
- raise TypeError(msg)
133
+ field_ak = units.units_conv_ak(field, unit)
145
134
 
146
135
  return field_ak, ak.flatten(field_ak).to_numpy()[:, np.newaxis]
147
136
 
148
- distances_ak, distances_flat = _convert(distances)
149
- _, edep_flat = _convert(edep)
137
+ distances_ak, distances_flat = _convert(distances, "mm")
138
+ _, edep_flat = _convert(edep, "keV")
150
139
  runs = ak.num(distances_ak, axis=-1)
151
140
 
152
141
  # vectorise fccd or tl
@@ -172,4 +161,4 @@ def vectorised_active_energy(
172
161
 
173
162
  energy = ak.sum(ak.unflatten(results * edep_flat, runs), axis=-2)
174
163
 
175
- return VectorOfVectors(energy) if energy.ndim > 1 else Array(energy.to_numpy())
164
+ return units.attach_units(energy, "keV")
reboost/math/stats.py CHANGED
@@ -6,7 +6,6 @@ from collections.abc import Callable
6
6
  import awkward as ak
7
7
  import numpy as np
8
8
  from lgdo import Array
9
- from numpy.typing import ArrayLike
10
9
 
11
10
  log = logging.getLogger(__name__)
12
11
 
@@ -72,7 +71,7 @@ def apply_energy_resolution(
72
71
  return ak.unflatten(energies_flat_smear, num)
73
72
 
74
73
 
75
- def gaussian_sample(mu: ArrayLike, sigma: ArrayLike | float, *, seed: int | None = None) -> Array:
74
+ def gaussian_sample(mu: ak.Array, sigma: ak.Array | float, *, seed: int | None = None) -> ak.Array:
76
75
  r"""Generate samples from a gaussian.
77
76
 
78
77
  Based on:
@@ -99,9 +98,7 @@ def gaussian_sample(mu: ArrayLike, sigma: ArrayLike | float, *, seed: int | None
99
98
  """
100
99
  # convert inputs
101
100
 
102
- if isinstance(mu, Array):
103
- mu = mu.view_as("np")
104
- elif isinstance(mu, ak.Array):
101
+ if isinstance(mu, ak.Array):
105
102
  mu = mu.to_numpy()
106
103
  elif not isinstance(mu, np.ndarray):
107
104
  mu = np.array(mu)
@@ -116,4 +113,4 @@ def gaussian_sample(mu: ArrayLike, sigma: ArrayLike | float, *, seed: int | None
116
113
 
117
114
  rng = np.random.default_rng(seed=seed) # Create a random number generator
118
115
 
119
- return Array(rng.normal(loc=mu, scale=sigma))
116
+ return ak.Array(rng.normal(loc=mu, scale=sigma))
reboost/shape/cluster.py CHANGED
@@ -7,12 +7,12 @@ import numba
7
7
  import numpy as np
8
8
  from lgdo import VectorOfVectors
9
9
 
10
+ from .. import units
11
+
10
12
  log = logging.getLogger(__name__)
11
13
 
12
14
 
13
- def apply_cluster(
14
- cluster_run_lengths: VectorOfVectors | ak.Array, field: ak.Array | VectorOfVectors
15
- ) -> VectorOfVectors:
15
+ def apply_cluster(cluster_run_lengths: ak.Array, field: ak.Array) -> ak.Array:
16
16
  """Apply clustering to a field.
17
17
 
18
18
  Parameters
@@ -26,24 +26,26 @@ def apply_cluster(
26
26
  cluster_run_lengths = cluster_run_lengths.view_as("ak")
27
27
 
28
28
  if isinstance(field, VectorOfVectors):
29
- field = field.view_as("ak")
29
+ field = field.view_as("ak", with_units=True)
30
30
 
31
31
  n_cluster = ak.num(cluster_run_lengths, axis=-1)
32
32
  clusters = ak.unflatten(ak.flatten(field), ak.flatten(cluster_run_lengths))
33
33
 
34
34
  # reshape into cluster oriented
35
- return VectorOfVectors(ak.unflatten(clusters, n_cluster))
35
+ return units.attach_units(
36
+ ak.Array(ak.unflatten(clusters, n_cluster)), units.get_unit_str(field)
37
+ )
36
38
 
37
39
 
38
40
  def cluster_by_step_length(
39
- trackid: ak.Array | VectorOfVectors,
40
- pos_x: ak.Array | VectorOfVectors,
41
- pos_y: ak.Array | VectorOfVectors,
42
- pos_z: ak.Array | VectorOfVectors,
43
- dist: ak.Array | VectorOfVectors | None = None,
41
+ trackid: ak.Array,
42
+ pos_x: ak.Array,
43
+ pos_y: ak.Array,
44
+ pos_z: ak.Array,
45
+ dist: ak.Array | None = None,
44
46
  surf_cut: float | None = None,
45
- threshold: float = 0.1,
46
- threshold_surf: float | None = None,
47
+ threshold_in_mm: float = 0.1,
48
+ threshold_surf_in_mm: float | None = None,
47
49
  ) -> VectorOfVectors:
48
50
  """Perform clustering based on the step length.
49
51
 
@@ -59,20 +61,20 @@ def cluster_by_step_length(
59
61
  Parameters
60
62
  ----------
61
63
  trackid
62
- index of the track.
64
+ index of the tracks.
63
65
  pos_x
64
- x position of the step.
66
+ x position of the steps.
65
67
  pos_y
66
- y position of the step.
68
+ y position of the steps.
67
69
  pos_z
68
- z position of the step.
70
+ z position of the steps.
69
71
  dist
70
72
  distance to the detector surface. Can be `None` in which case all steps are treated as being in the "bulk".
71
73
  surf_cut
72
74
  Size of the surface region (in mm), if `None` no selection is applied (default).
73
- threshold
75
+ threshold_in_mm
74
76
  Distance threshold in mm to combine steps in the bulk.
75
- threshold_surf
77
+ threshold_surf_in_mm
76
78
  Distance threshold in mm to combine steps in the surface.
77
79
 
78
80
  Returns
@@ -80,37 +82,22 @@ def cluster_by_step_length(
80
82
  Array of the run lengths of each cluster within a hit.
81
83
  """
82
84
  # type conversions
83
- if isinstance(pos_x, VectorOfVectors):
84
- pos_x = pos_x.view_as("ak")
85
-
86
- if isinstance(pos_y, VectorOfVectors):
87
- pos_y = pos_y.view_as("ak")
88
-
89
- if isinstance(pos_z, VectorOfVectors):
90
- pos_z = pos_z.view_as("ak")
91
-
92
- if isinstance(trackid, VectorOfVectors):
93
- trackid = trackid.view_as("ak")
94
-
95
- if isinstance(dist, VectorOfVectors):
96
- dist = dist.view_as("ak")
97
85
 
98
86
  pos = np.vstack(
99
87
  [
100
- ak.flatten(pos_x).to_numpy().astype(np.float64),
101
- ak.flatten(pos_y).to_numpy().astype(np.float64),
102
- ak.flatten(pos_z).to_numpy().astype(np.float64),
88
+ ak.flatten(units.units_conv_ak(p, "mm")).to_numpy().astype(np.float64)
89
+ for p in [pos_x, pos_y, pos_z]
103
90
  ]
104
91
  ).T
105
92
 
106
- indices_flat = cluster_by_distance_numba(
93
+ indices_flat = _cluster_by_distance_numba(
107
94
  ak.flatten(ak.local_index(trackid)).to_numpy(),
108
95
  ak.flatten(trackid).to_numpy(),
109
96
  pos,
110
97
  dist_to_surf=ak.flatten(dist).to_numpy() if dist is not None else dist,
111
98
  surf_cut=surf_cut,
112
- threshold=threshold,
113
- threshold_surf=threshold_surf,
99
+ threshold=threshold_in_mm,
100
+ threshold_surf=threshold_surf_in_mm,
114
101
  )
115
102
 
116
103
  # reshape into being event oriented
@@ -119,11 +106,11 @@ def cluster_by_step_length(
119
106
  # number of steps per cluster
120
107
  counts = ak.run_lengths(indices)
121
108
 
122
- return VectorOfVectors(counts)
109
+ return ak.Array(counts)
123
110
 
124
111
 
125
112
  @numba.njit
126
- def cluster_by_distance_numba(
113
+ def _cluster_by_distance_numba(
127
114
  local_index: np.ndarray,
128
115
  trackid: np.ndarray,
129
116
  pos: np.ndarray,
@@ -210,10 +197,10 @@ def cluster_by_distance_numba(
210
197
 
211
198
 
212
199
  def step_lengths(
213
- x_cluster: ak.Array | VectorOfVectors,
214
- y_cluster: ak.Array | VectorOfVectors,
215
- z_cluster: ak.Array | VectorOfVectors,
216
- ) -> VectorOfVectors:
200
+ x_cluster: ak.Array,
201
+ y_cluster: ak.Array,
202
+ z_cluster: ak.Array,
203
+ ) -> ak.Array:
217
204
  """Compute the distance between consecutive steps.
218
205
 
219
206
  This is based on calculating the distance between consecutive steps in the same track,
@@ -242,13 +229,14 @@ def step_lengths(
242
229
  data = [x_cluster, y_cluster, z_cluster]
243
230
 
244
231
  for idx, var in enumerate(data):
245
- if isinstance(var, VectorOfVectors):
246
- data[idx] = var.view_as("ak")
247
232
  # check shape
248
- if data[idx].ndim != 3:
233
+ if var.ndim != 3:
249
234
  msg = f"The input array for step lengths must be 3 dimensional not {data[idx.dim]}"
250
235
  raise ValueError(msg)
251
236
 
237
+ # type convert
238
+ data[idx] = units.units_conv_ak(data[idx], "mm")
239
+
252
240
  counts = ak.num(data[0], axis=-1)
253
241
  data = np.vstack([ak.flatten(ak.flatten(var)).to_numpy() for var in data])
254
242
  dist = np.append(np.sqrt(np.sum(np.diff(data, axis=1) ** 2, axis=0)), 0)
@@ -257,4 +245,4 @@ def step_lengths(
257
245
  clusters = ak.unflatten(ak.Array(dist), ak.flatten(counts))
258
246
 
259
247
  out = ak.unflatten(clusters, n_cluster)
260
- return VectorOfVectors(out[:, :, :-1])
248
+ return units.attach_units(ak.Array(out[:, :, :-1]), "mm")
reboost/shape/group.py CHANGED
@@ -8,6 +8,8 @@ from dbetto import AttrsDict
8
8
  from lgdo import Table, VectorOfVectors
9
9
  from numpy.typing import ArrayLike
10
10
 
11
+ from reboost import units
12
+
11
13
  log = logging.getLogger(__name__)
12
14
 
13
15
 
@@ -63,13 +65,19 @@ def _sort_data(obj: ak.Array, *, time_name: str = "time", evtid_name: str = "evt
63
65
  -------
64
66
  sorted awkward array
65
67
  """
68
+ units_dict = {field: units.get_unit_str(obj[field]) for field in obj.fields}
69
+
66
70
  obj = obj[ak.argsort(obj[evtid_name])]
67
71
  obj_unflat = ak.unflatten(obj, ak.run_lengths(obj[evtid_name]))
68
72
 
69
73
  indices = ak.argsort(obj_unflat[time_name], axis=-1)
70
74
  sorted_obj = obj_unflat[indices]
71
75
 
72
- return ak.flatten(sorted_obj)
76
+ out = {}
77
+ for field in sorted_obj.fields:
78
+ out[field] = units.attach_units(ak.flatten(sorted_obj[field]), units_dict[field])
79
+
80
+ return ak.Array(out)
73
81
 
74
82
 
75
83
  def group_by_evtid(data: Table | ak.Array, *, evtid_name: str = "evtid") -> Table:
@@ -95,7 +103,9 @@ def group_by_evtid(data: Table | ak.Array, *, evtid_name: str = "evtid") -> Tabl
95
103
  The input table must be sorted (by `evtid`).
96
104
  """
97
105
  # convert to awkward
98
- obj_ak = data.view_as("ak") if isinstance(data, Table) else data
106
+ obj_ak = data.view_as("ak", with_units=True) if isinstance(data, Table) else data
107
+
108
+ units_dict = {field: units.get_unit_str(obj_ak[field]) for field in obj_ak.fields}
99
109
 
100
110
  # extract cumulative lengths
101
111
  counts = ak.run_lengths(obj_ak[evtid_name])
@@ -111,10 +121,14 @@ def group_by_evtid(data: Table | ak.Array, *, evtid_name: str = "evtid") -> Tabl
111
121
  for f in obj_ak.fields:
112
122
  out_tbl.add_field(
113
123
  f,
114
- VectorOfVectors(
115
- cumulative_length=cumulative_length, flattened_data=obj_ak[f].to_numpy()
124
+ units.attach_units(
125
+ VectorOfVectors(
126
+ cumulative_length=cumulative_length, flattened_data=obj_ak[f].to_numpy()
127
+ ),
128
+ units_dict[f],
116
129
  ),
117
130
  )
131
+
118
132
  return out_tbl
119
133
 
120
134
 
@@ -139,7 +153,7 @@ def group_by_time(
139
153
  data
140
154
  :class:`lgdo.Table` or `ak.Array` which must contain the time_name and evtid_name fields
141
155
  window
142
- time window in us used to search for coincident hits
156
+ time window in us used to search for coincident hits.
143
157
  time_name
144
158
  name of the timing field
145
159
  evtid_name
@@ -155,15 +169,22 @@ def group_by_time(
155
169
  ----
156
170
  The input table must be sorted (first by `evtid` then `time`).
157
171
  """
158
- obj = data.view_as("ak") if isinstance(data, Table) else data
172
+ obj = data.view_as("ak", with_units=True) if isinstance(data, Table) else data
173
+ units_dict = {field: units.get_unit_str(obj[field]) for field in obj.fields}
174
+
175
+ window = window * 1000.0 # convert to ns
159
176
  obj = _sort_data(obj, time_name=time_name, evtid_name=evtid_name)
160
177
 
178
+ # convert to ns
179
+ obj[time_name] = units.units_conv_ak(obj[time_name], "ns")
180
+ units_dict[time_name] = "ns"
181
+
161
182
  # get difference
162
183
  time_diffs = np.diff(obj[time_name])
163
184
  index_diffs = np.diff(obj[evtid_name])
164
185
 
165
186
  # index of the last element in each run
166
- time_change = (time_diffs > window * 1000) & (index_diffs == 0)
187
+ time_change = (time_diffs > window) & (index_diffs == 0)
167
188
  index_change = index_diffs > 0
168
189
 
169
190
  # cumulative length is just the index of changes plus 1
@@ -183,7 +204,12 @@ def group_by_time(
183
204
  for f in fields:
184
205
  out_tbl.add_field(
185
206
  f,
186
- VectorOfVectors(cumulative_length=cumulative_length, flattened_data=obj[f].to_numpy()),
207
+ units.attach_units(
208
+ VectorOfVectors(
209
+ cumulative_length=cumulative_length, flattened_data=obj[f].to_numpy()
210
+ ),
211
+ units_dict[f],
212
+ ),
187
213
  )
188
214
 
189
215
  return out_tbl
reboost/units.py CHANGED
@@ -4,6 +4,7 @@ import logging
4
4
  from typing import Any
5
5
 
6
6
  import awkward as ak
7
+ import numpy as np
7
8
  import pint
8
9
  import pyg4ometry as pg4
9
10
  from lgdo import LGDO
@@ -45,7 +46,30 @@ def units_convfact(data: Any | LGDO | ak.Array, target_units: pint.Unit | str) -
45
46
  return 1
46
47
 
47
48
 
48
- def units_conv_ak(data: Any | LGDO | ak.Array, target_units: pint.Unit | str) -> ak.Array:
49
+ def attach_units(data: ak.Array | LGDO, unit: str | None) -> ak.Array | LGDO:
50
+ """Convenience function to attach units to `ak.Array` or LGDO.
51
+
52
+ Parameters
53
+ ----------
54
+ data
55
+ the array to add units to
56
+ unit
57
+ the unit
58
+ """
59
+ if isinstance(data, ak.Array):
60
+ if unit is not None:
61
+ return ak.with_parameter(data, parameter="units", value=unit)
62
+ elif isinstance(data, LGDO):
63
+ if unit is not None:
64
+ data.attrs["units"] = unit
65
+ else:
66
+ msg = f"to attach units data must be an ak.Array or LGDO not {type(data)}"
67
+ raise TypeError(msg)
68
+
69
+ return data
70
+
71
+
72
+ def units_conv_ak(data: Any | LGDO | ak.Array, target_units: pint.Unit | str) -> Any | ak.Array:
49
73
  """Calculate numeric conversion factor to reach `target_units`, and apply to data converted to ak.
50
74
 
51
75
  Parameters
@@ -62,7 +86,13 @@ def units_conv_ak(data: Any | LGDO | ak.Array, target_units: pint.Unit | str) ->
62
86
  return ak.without_parameters(data.view_as("ak") * fact)
63
87
  if isinstance(data, ak.Array) and fact != 1:
64
88
  return ak.without_parameters(data * fact)
65
- return data.view_as("ak") if isinstance(data, LGDO) else data
89
+
90
+ # try to return ak.Array if possible
91
+ if isinstance(data, LGDO):
92
+ return data.view_as("ak")
93
+ if isinstance(data, np.ndarray):
94
+ return ak.Array(data)
95
+ return data
66
96
 
67
97
 
68
98
  def unwrap_lgdo(data: Any | LGDO | ak.Array, library: str = "ak") -> tuple[Any, pint.Unit | None]:
@@ -99,6 +129,31 @@ def unwrap_lgdo(data: Any | LGDO | ak.Array, library: str = "ak") -> tuple[Any,
99
129
  return ret_data, ret_units
100
130
 
101
131
 
132
+ def get_unit_str(data: ak.Array | LGDO) -> str | None:
133
+ """Get the units as a string for an awkward array with attached units.
134
+
135
+ Parameters
136
+ ----------
137
+ data
138
+ the array with attached units
139
+
140
+ Returns
141
+ -------
142
+ a string of the units.
143
+ """
144
+ if isinstance(data, ak.Array):
145
+ attrs = ak.parameters(data)
146
+ elif isinstance(data, LGDO):
147
+ attrs = data.attrs
148
+ else:
149
+ msg = f"Cannot extract units: {data} is not an LGDO or ak.Array"
150
+ raise ValueError(msg)
151
+
152
+ if "units" in attrs:
153
+ return attrs["units"]
154
+ return None
155
+
156
+
102
157
  def unit_to_lh5_attr(unit: pint.Unit) -> str:
103
158
  """Convert Pint unit to a string that can be used as attrs["units"] in an LGDO."""
104
159
  # TODO: we should check if this can be always parsed by Unitful.jl
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reboost
3
- Version: 0.9.1
3
+ Version: 0.10.0a0
4
4
  Summary: New LEGEND Monte-Carlo simulation post-processing
5
5
  Author-email: Manuel Huber <info@manuelhu.de>, Toby Dixon <toby.dixon.23@ucl.ac.uk>, Luigi Pertoldi <gipert@pm.me>
6
6
  Maintainer: The LEGEND Collaboration
@@ -26,7 +26,7 @@ Requires-Dist: colorlog
26
26
  Requires-Dist: numpy
27
27
  Requires-Dist: scipy
28
28
  Requires-Dist: numba>=0.60
29
- Requires-Dist: legend-pydataobj>=1.15.1
29
+ Requires-Dist: legend-pydataobj>=1.17.2
30
30
  Requires-Dist: legend-pygeom-optics>=0.15.0
31
31
  Requires-Dist: legend-pygeom-tools>=0.0.26
32
32
  Requires-Dist: legend-pygeom-hpges
@@ -1,26 +1,26 @@
1
1
  reboost/__init__.py,sha256=VZz9uo7i2jgAx8Zi15SptLZnE_qcnGuNWwqkD3rYHFA,278
2
2
  reboost/__main__.py,sha256=42koSxY2st4mMIRSAnKz06nP5HppMPxBVFf2jaHljGs,95
3
- reboost/_version.py,sha256=LwGndsRSpclYq-j3wgRr2nzOXwUYj0Jtg7Kof7R0BEw,704
3
+ reboost/_version.py,sha256=uvugpfFX1pEScMzJ2JeDcYh7lOMZajzrzSxwt8iaOLU,714
4
4
  reboost/build_evt.py,sha256=VXIfK_pfe_Cgym6gI8dESwONZi-v_4fll0Pn09vePQY,3767
5
5
  reboost/build_glm.py,sha256=IerSLQfe51ZO7CQP2kmfPnOIVaDtcfw3byOM02Vaz6o,9472
6
- reboost/build_hit.py,sha256=N_nxvH69SvILVNmyvVfhQwQdD_PDW8tlsqj2ciO5nKE,17409
6
+ reboost/build_hit.py,sha256=fg0vDPnWCQN7Px0KKLuQvwdsA4DBT2z6IoAb_KjG9CY,17034
7
7
  reboost/cli.py,sha256=68EzKiWTHJ2u1RILUv7IX9HaVq6nTTM80_W_MUnWRe4,6382
8
- reboost/core.py,sha256=NtCDTZ6QQyFhj0BiGuIXBu6WBSrrY7C4GTfaw1u5K-w,15501
8
+ reboost/core.py,sha256=TPxvZgUaHZdxfQSDdX2zIerQXt3Gq-zQaA6AeXZKNvA,15232
9
9
  reboost/iterator.py,sha256=qlEqRv5qOh8eIs-dyVOLYTvH-ZpQDx9fLckpcAdtWjs,6975
10
10
  reboost/log_utils.py,sha256=VqS_9OC5NeNU3jcowVOBB0NJ6ssYvNWnirEY-JVduEA,766
11
11
  reboost/profile.py,sha256=EOTmjmS8Rm_nYgBWNh6Rntl2XDsxdyed7yEdWtsZEeg,2598
12
- reboost/units.py,sha256=LUwl6swLQoG09Rt9wcDdu6DTrwDsy-C751BNGzX4sz8,3651
12
+ reboost/units.py,sha256=Jj1OjYoY6xoWTVGId_w8Cn5ugN0QLGluVhWvLeqeyII,5019
13
13
  reboost/utils.py,sha256=vl-_BUOeXcazNs4zN-9k-OVEptdf3FtCeej2QZhClKc,14599
14
14
  reboost/daq/__init__.py,sha256=rNPhxx1Yawt3tENYhmOYSum9_TdV57ZU5kjxlWFAGuo,107
15
15
  reboost/daq/core.py,sha256=Rs6Q-17fzEod2iX_2WqEmnqKnNRFoWTYURl3wYhFihU,9915
16
16
  reboost/daq/utils.py,sha256=KcH6zvlInmD2YiF6V--DSYBTYudJw3G-hp2JGOcES2o,1042
17
17
  reboost/hpge/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- reboost/hpge/psd.py,sha256=P7dUJQPvxW6vndJ79r0j7ANZvSuV_IuauERhvWD74j0,26989
19
- reboost/hpge/surface.py,sha256=feH-kxRRp3HkikRRJ-LCu6zvVONEOYVd3THx12emGTM,8494
18
+ reboost/hpge/psd.py,sha256=ORGZepsevHq2m5QVPuPLJ-AKLChLpCZZHkQpLR5TTcM,27246
19
+ reboost/hpge/surface.py,sha256=9c0Ks1xJidqtnwMl3tI0sufSZTekGfSAhuk_b57daWU,8886
20
20
  reboost/hpge/utils.py,sha256=0hHu5S1lDOiMMVPgfHY03R5ggyeWX2OwONHVZeFmcpU,5652
21
21
  reboost/math/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- reboost/math/functions.py,sha256=OymiYTcA0NXxxm-MBDw5kqyNwHoLCmuv4J48AwnSrbU,5633
23
- reboost/math/stats.py,sha256=Rq4Wdzv-3aoSK7EsPZCuOEHfnOz3w0moIzCEHbC07xw,3173
22
+ reboost/math/functions.py,sha256=Nw6p4fxF68TmpHYcpDriisXvudOB8Ep26TlckVFGrME,5045
23
+ reboost/math/stats.py,sha256=FOC9Qyixt8L6ccqGoavADU3LqWkbJkYfURiR-XIwSpE,3080
24
24
  reboost/optmap/__init__.py,sha256=imvuyld-GLw8qdwqW-lXCg2feptcTyQo3wIzPvDHwmY,93
25
25
  reboost/optmap/__main__.py,sha256=DfzkXQ7labOe53hd7jH5pAbTW491jjQYSMLyl72L4Rk,111
26
26
  reboost/optmap/cli.py,sha256=_7WBlx55eRyW_wWB-ELbFaWXin2d3xsh6Q5bFoNJaHE,8694
@@ -31,14 +31,14 @@ reboost/optmap/mapview.py,sha256=fswwXolA6au8u8gljBKy8PSXC2W7Cy_GwOV86-duYG8,688
31
31
  reboost/optmap/numba_pdg.py,sha256=y8cXR5PWE2Liprp4ou7vl9do76dl84vXU52ZJD9_I7A,731
32
32
  reboost/optmap/optmap.py,sha256=3clc1RA8jA4YJte83w085MY8zLpG-G7DBkpZ2UeKPpM,12825
33
33
  reboost/shape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- reboost/shape/cluster.py,sha256=nwR1Dnf00SDICGPqpXeM1Q7_DwTtO9uP3wmuML45c3g,8195
35
- reboost/shape/group.py,sha256=gOCYgir2gZqmW1JXtbNRPlQqP0gmUcbe7RVb9CbY1pU,5540
34
+ reboost/shape/cluster.py,sha256=EHPgjUrbF-uMXQ3D7VPKkGKgLnea6ZBR-R_1DSuGmLU,7671
35
+ reboost/shape/group.py,sha256=PY7FGUnralj1BkRl4YtguCnzvOr-ymMd4ZFVmzPCwmM,6352
36
36
  reboost/shape/reduction.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  reboost/spms/__init__.py,sha256=8I6WT8i_kUPqEDnSD0aCf6A26cjKjQQZSNrvwZ3o-Ac,415
38
38
  reboost/spms/pe.py,sha256=LwqrK1HOZWzGcNZnntaqI6r4rnDww4KW9Mao4xLFbDE,8226
39
- reboost-0.9.1.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
40
- reboost-0.9.1.dist-info/METADATA,sha256=nW1jNoPEgO6PPTKtW0U_eDQ8ua35E3MVgumUrkYb1Y4,3877
41
- reboost-0.9.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
42
- reboost-0.9.1.dist-info/entry_points.txt,sha256=DxhD6BidSWNot9BrejHJjQ7RRLmrMaBIl52T75oWTwM,93
43
- reboost-0.9.1.dist-info/top_level.txt,sha256=q-IBsDepaY_AbzbRmQoW8EZrITXRVawVnNrB-_zyXZs,8
44
- reboost-0.9.1.dist-info/RECORD,,
39
+ reboost-0.10.0a0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
40
+ reboost-0.10.0a0.dist-info/METADATA,sha256=o7lnoQcj8MkJ-VW7NQKwE75vlHx4qJAV4ycWgb8Yvuc,3880
41
+ reboost-0.10.0a0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
42
+ reboost-0.10.0a0.dist-info/entry_points.txt,sha256=DxhD6BidSWNot9BrejHJjQ7RRLmrMaBIl52T75oWTwM,93
43
+ reboost-0.10.0a0.dist-info/top_level.txt,sha256=q-IBsDepaY_AbzbRmQoW8EZrITXRVawVnNrB-_zyXZs,8
44
+ reboost-0.10.0a0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.2)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5