reboost 0.2.6__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
reboost/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.2.6'
21
- __version_tuple__ = version_tuple = (0, 2, 6)
20
+ __version__ = version = '0.3.0'
21
+ __version_tuple__ = version_tuple = (0, 3, 0)
reboost/build_glm.py CHANGED
@@ -238,7 +238,7 @@ def build_glm(
238
238
  # start row for each table
239
239
  start_row = dict.fromkeys(lh5_table_list, 0)
240
240
 
241
- vfield = f"stp/vertices/{id_name}"
241
+ vfield = f"vtx/{id_name}"
242
242
 
243
243
  # iterate over the vertex table
244
244
  for vert_obj, vidx, n_evtid in LH5Iterator(stp_file, vfield, buffer_len=evtid_buffer):
reboost/build_hit.py CHANGED
@@ -290,7 +290,7 @@ def build_hit(
290
290
  start_row=start_evtid,
291
291
  stp_field=in_field,
292
292
  n_rows=n_evtid,
293
- read_vertices=True,
293
+ read_vertices=False,
294
294
  buffer=buffer,
295
295
  time_dict=time_dict[proc_name],
296
296
  )
@@ -299,8 +299,15 @@ def build_hit(
299
299
  if stps is None:
300
300
  continue
301
301
 
302
+ # convert to awkward
303
+ if time_dict is not None:
304
+ start_time = time.time()
305
+
302
306
  ak_obj = stps.view_as("ak")
303
307
 
308
+ if time_dict is not None:
309
+ time_dict[proc_name].update_field("conv", start_time)
310
+
304
311
  # produce the hit table
305
312
  for out_det_idx, out_detector in enumerate(out_detectors):
306
313
  # loop over the rows
reboost/iterator.py CHANGED
@@ -4,6 +4,7 @@ import logging
4
4
  import time
5
5
  import typing
6
6
 
7
+ import awkward as ak
7
8
  from lgdo.lh5 import LH5Store
8
9
  from lgdo.types import LGDO, Table
9
10
 
@@ -94,7 +95,7 @@ class GLMIterator:
94
95
  # read the glm rows]
95
96
  if self.glm_file is not None:
96
97
  glm_rows, n_rows_read = self.sto.read(
97
- f"glm/{self.lh5_group}", self.glm_file, start_row=self.start_row_tmp, n_rows=n_rows
98
+ f"/glm/{self.lh5_group}", self.glm_file, start_row=self.start_row_tmp, n_rows=n_rows
98
99
  )
99
100
  else:
100
101
  # get the maximum row to read
@@ -124,12 +125,13 @@ class GLMIterator:
124
125
  if len(glm_ak) > 0:
125
126
  # extract range of stp rows to read
126
127
  start = glm_ak.start_row[0]
127
- n = sum(glm_ak.n_rows)
128
+ n = ak.sum(glm_ak.n_rows)
129
+
128
130
  if self.time_dict is not None:
129
131
  time_start = time.time()
130
132
 
131
133
  stp_rows, n_steps = self.sto.read(
132
- f"{self.stp_field}/{self.lh5_group}",
134
+ f"/{self.stp_field}/{self.lh5_group}",
133
135
  self.stp_file,
134
136
  start_row=int(start),
135
137
  n_rows=int(n),
@@ -143,7 +145,7 @@ class GLMIterator:
143
145
 
144
146
  if self.read_vertices:
145
147
  vert_rows, _ = self.sto.read(
146
- f"{self.stp_field}/vertices",
148
+ "/vtx",
147
149
  self.stp_file,
148
150
  start_row=self.start_row,
149
151
  n_rows=n_rows,
reboost/optmap/cli.py CHANGED
@@ -166,7 +166,12 @@ def optical_cli() -> None:
166
166
  action="store",
167
167
  required=True,
168
168
  metavar="LGDO_PATH",
169
- help="path to LGDO inside non-optical LH5 hit file (e.g. /hit/detXX)",
169
+ help="path to LGDO inside non-optical LH5 hit file (e.g. /stp/detXX)",
170
+ )
171
+ convolve_parser.add_argument(
172
+ "--dist-mode",
173
+ action="store",
174
+ default="multinomial+no-fano",
170
175
  )
171
176
  convolve_parser.add_argument("--output", help="output hit LH5 file", metavar="OUTPUT_HIT")
172
177
 
@@ -267,8 +272,16 @@ def optical_cli() -> None:
267
272
  from reboost.optmap.convolve import convolve
268
273
 
269
274
  _check_input_file(parser, [args.map, args.edep])
270
- _check_output_file(parser, args.output)
271
- convolve(args.map, args.edep, args.edep_lgdo, args.material, args.output, args.bufsize)
275
+ _check_output_file(parser, args.output, optional=True)
276
+ convolve(
277
+ args.map,
278
+ args.edep,
279
+ args.edep_lgdo,
280
+ args.material,
281
+ args.output,
282
+ args.bufsize,
283
+ dist_mode=args.dist_mode,
284
+ )
272
285
 
273
286
  # STEP X: rebin maps
274
287
  if args.command == "rebin":
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import logging
4
4
 
5
5
  import legendoptics.scintillate as sc
6
+ import numba
6
7
  import numpy as np
7
8
  import pint
8
9
  from legendoptics import lar
@@ -23,7 +24,8 @@ OPTMAP_SUM_CH = -2
23
24
 
24
25
  def open_optmap(optmap_fn: str):
25
26
  maps = lh5.ls(optmap_fn)
26
- det_ntuples = [m for m in maps if m not in ("all", "_hitcounts", "_hitcounts_exp")]
27
+ # TODO: rewrite logic to only accept _<number> instead of a blacklist
28
+ det_ntuples = [m for m in maps if m not in ("all", "_hitcounts", "_hitcounts_exp", "all_orig")]
27
29
  detids = np.array([int(m.lstrip("_")) for m in det_ntuples])
28
30
  detidx = np.arange(0, detids.shape[0])
29
31
 
@@ -69,15 +71,16 @@ def iterate_stepwise_depositions(
69
71
  optmap_for_convolve,
70
72
  scint_mat_params: sc.ComputedScintParams,
71
73
  rng: np.random.Generator = None,
74
+ dist: str = "multinomial",
75
+ mode: str = "no-fano",
72
76
  ):
73
77
  # those np functions are not supported by numba, but needed for efficient array access below.
74
78
  x0 = structured_to_unstructured(edep_df[["xloc_pre", "yloc_pre", "zloc_pre"]], np.float64)
75
79
  x1 = structured_to_unstructured(edep_df[["xloc_post", "yloc_post", "zloc_post"]], np.float64)
76
80
 
77
81
  rng = np.random.default_rng() if rng is None else rng
78
-
79
82
  output_map, res = _iterate_stepwise_depositions(
80
- edep_df, x0, x1, rng, *optmap_for_convolve, scint_mat_params
83
+ edep_df, x0, x1, rng, *optmap_for_convolve, scint_mat_params, dist, mode
81
84
  )
82
85
  if res["any_no_stats"] > 0 or res["det_no_stats"] > 0:
83
86
  log.warning(
@@ -119,6 +122,9 @@ def _pdgid_to_particle(pdgid: int) -> sc.ParticleIndex:
119
122
  return sc.PARTICLE_INDEX_ELECTRON
120
123
 
121
124
 
125
+ __counts_per_bin_key_type = numba.types.UniTuple(numba.types.int64, 3)
126
+
127
+
122
128
  # - run with NUMBA_FULL_TRACEBACKS=1 NUMBA_BOUNDSCHECK=1 for testing/checking
123
129
  # - cache=True does not work with outer prange, i.e. loading the cached file fails (numba bug?)
124
130
  # - the output dictionary is not threadsafe, so parallel=True is not working with it.
@@ -134,6 +140,8 @@ def _iterate_stepwise_depositions(
134
140
  optmap_weights,
135
141
  optmap_multi_det_exp,
136
142
  scint_mat_params: sc.ComputedScintParams,
143
+ dist: str,
144
+ mode: str,
137
145
  ):
138
146
  pdgid_map = {}
139
147
  output_map = {}
@@ -160,6 +168,7 @@ def _iterate_stepwise_depositions(
160
168
  charge,
161
169
  t.edep,
162
170
  rng,
171
+ emission_term_model=("poisson" if mode == "no-fano" else "normal_fano"),
163
172
  )
164
173
  if scint_times.shape[0] == 0: # short-circuit if we have no photons at all.
165
174
  continue
@@ -175,6 +184,11 @@ def _iterate_stepwise_depositions(
175
184
  # there are _much_ less unique bins, unfortunately np.unique(..., axis=n) does not work
176
185
  # with numba; also np.sort(..., axis=n) also does not work.
177
186
 
187
+ counts_per_bin = numba.typed.Dict.empty(
188
+ key_type=__counts_per_bin_key_type,
189
+ value_type=np.int64,
190
+ )
191
+
178
192
  # get probabilities from map.
179
193
  hitcount = np.zeros((detidx.shape[0], bins.shape[0]), dtype=np.int64)
180
194
  for j in prange(bins.shape[0]):
@@ -191,35 +205,65 @@ def _iterate_stepwise_depositions(
191
205
  continue
192
206
  if px_any == 0.0:
193
207
  continue
194
- if rng.uniform() >= px_any:
195
- continue
196
- ph_det += 1
197
- # we detect this energy deposition; we should at least get one photon out here!
198
-
199
- detsel_size = 1
200
- if np.isfinite(optmap_multi_det_exp):
201
- detsel_size = rng.geometric(1 - np.exp(-optmap_multi_det_exp))
202
-
203
- px_sum = optmap_weights[OPTMAP_SUM_CH, cur_bins[0], cur_bins[1], cur_bins[2]]
204
- assert px_sum >= 0.0 # should not be negative.
205
- detp = np.empty(detidx.shape, dtype=np.float64)
206
- had_det_no_stats = 0
207
- for d in detidx:
208
- # normalize so that sum(detp) = 1
209
- detp[d] = optmap_weights[d, cur_bins[0], cur_bins[1], cur_bins[2]] / px_sum
210
- if detp[d] < 0.0:
211
- had_det_no_stats = 1
212
- detp[d] = 0.0
213
- det_no_stats += had_det_no_stats
214
-
215
- # should be equivalent to rng.choice(detidx, size=(detsel_size, p=detp)
216
- detsel = detidx[
217
- np.searchsorted(np.cumsum(detp), rng.random(size=(detsel_size,)), side="right")
218
- ]
219
- for d in detsel:
220
- hitcount[d, j] += 1
221
- ph_det2 += detsel.shape[0]
222
208
 
209
+ if dist == "multinomial":
210
+ if rng.uniform() >= px_any:
211
+ continue
212
+ ph_det += 1
213
+ # we detect this energy deposition; we should at least get one photon out here!
214
+
215
+ detsel_size = 1
216
+ if np.isfinite(optmap_multi_det_exp):
217
+ detsel_size = rng.geometric(1 - np.exp(-optmap_multi_det_exp))
218
+
219
+ px_sum = optmap_weights[OPTMAP_SUM_CH, cur_bins[0], cur_bins[1], cur_bins[2]]
220
+ assert px_sum >= 0.0 # should not be negative.
221
+ detp = np.empty(detidx.shape, dtype=np.float64)
222
+ had_det_no_stats = 0
223
+ for d in detidx:
224
+ # normalize so that sum(detp) = 1
225
+ detp[d] = optmap_weights[d, cur_bins[0], cur_bins[1], cur_bins[2]] / px_sum
226
+ if detp[d] < 0.0:
227
+ had_det_no_stats = 1
228
+ detp[d] = 0.0
229
+ det_no_stats += had_det_no_stats
230
+
231
+ # should be equivalent to rng.choice(detidx, size=(detsel_size, p=detp)
232
+ detsel = detidx[
233
+ np.searchsorted(np.cumsum(detp), rng.random(size=(detsel_size,)), side="right")
234
+ ]
235
+ for d in detsel:
236
+ hitcount[d, j] += 1
237
+ ph_det2 += detsel.shape[0]
238
+
239
+ elif dist == "poisson":
240
+ # store the photon count in each bin, to sample them all at once below.
241
+ if cur_bins not in counts_per_bin:
242
+ counts_per_bin[cur_bins] = 1
243
+ else:
244
+ counts_per_bin[cur_bins] += 1
245
+
246
+ else:
247
+ msg = "unknown distribution"
248
+ raise RuntimeError(msg)
249
+
250
+ if dist == "poisson":
251
+ for j, (cur_bins, ph_counts_to_poisson) in enumerate(counts_per_bin.items()):
252
+ had_det_no_stats = 0
253
+ had_any = 0
254
+ for d in detidx:
255
+ detp = optmap_weights[d, cur_bins[0], cur_bins[1], cur_bins[2]]
256
+ if detp < 0.0:
257
+ had_det_no_stats = 1
258
+ continue
259
+ pois_cnt = rng.poisson(lam=ph_counts_to_poisson * detp)
260
+ hitcount[d, j] += pois_cnt
261
+ ph_det2 += pois_cnt
262
+ had_any = 1
263
+ ph_det += had_any
264
+ det_no_stats += had_det_no_stats
265
+
266
+ assert scint_times.shape[0] >= hitcount.shape[1] # TODO: use the right assertion here.
223
267
  out_hits_len = np.sum(hitcount)
224
268
  if out_hits_len > 0:
225
269
  out_times = np.empty(out_hits_len, dtype=np.float64)
@@ -285,6 +329,7 @@ def convolve(
285
329
  material: str,
286
330
  output_file: str | None = None,
287
331
  buffer_len: int = int(1e6),
332
+ dist_mode: str = "multinomial+no-fano",
288
333
  ):
289
334
  if material not in ["lar", "pen"]:
290
335
  msg = f"unknown material {material} for scintillation"
@@ -304,6 +349,11 @@ def convolve(
304
349
  log.info("opening map %s", map_file)
305
350
  optmap_for_convolve = open_optmap(map_file)
306
351
 
352
+ # special handling of distributions and flags.
353
+ dist, mode = dist_mode.split("+")
354
+ assert dist in ("multinomial", "poisson")
355
+ assert mode in ("", "no-fano")
356
+
307
357
  log.info("opening energy deposition hit output %s", edep_file)
308
358
  it = LH5Iterator(edep_file, edep_path, buffer_len=buffer_len)
309
359
  for it_count, (edep_lgdo, edep_events, edep_n_rows) in enumerate(it):
@@ -311,7 +361,9 @@ def convolve(
311
361
  edep_df = edep_lgdo.view_as("pd").iloc[0:edep_n_rows].to_records()
312
362
 
313
363
  log.info("start event processing (%d)", it_count)
314
- output_map = iterate_stepwise_depositions(edep_df, optmap_for_convolve, scint_mat_params)
364
+ output_map = iterate_stepwise_depositions(
365
+ edep_df, optmap_for_convolve, scint_mat_params, dist=dist, mode=mode
366
+ )
315
367
 
316
368
  log.info("store output photon hits (%d)", it_count)
317
369
  ph_count_o, tbl = get_output_table(output_map)
@@ -319,4 +371,4 @@ def convolve(
319
371
  "output photons: %d energy depositions -> %d photons", len(output_map), ph_count_o
320
372
  )
321
373
  if output_file is not None:
322
- lh5.write(tbl, "optical", lh5_file=output_file, group="hit", wo_mode="append")
374
+ lh5.write(tbl, "optical", lh5_file=output_file, group="stp", wo_mode="append")
reboost/optmap/evt.py CHANGED
@@ -26,7 +26,7 @@ def build_optmap_evt(
26
26
  msg = f"temporary output file {lh5_out_file_tmp} already exists"
27
27
  raise RuntimeError(msg)
28
28
 
29
- vert_it = LH5Iterator(lh5_in_file, "stp/vertices", buffer_len=buffer_len)
29
+ vert_it = LH5Iterator(lh5_in_file, "vtx", buffer_len=buffer_len)
30
30
  opti_it = LH5Iterator(lh5_in_file, "stp/optical", buffer_len=buffer_len)
31
31
 
32
32
  detectors = [str(d) for d in detectors]
reboost/utils.py CHANGED
@@ -296,7 +296,10 @@ def _check_input_file(parser, file: str | Iterable[str], descr: str = "input") -
296
296
  parser.error(f"{descr} file(s) {''.join(not_existing)} missing")
297
297
 
298
298
 
299
- def _check_output_file(parser, file: str | Iterable[str]) -> None:
299
+ def _check_output_file(parser, file: str | Iterable[str] | None, optional: bool = False) -> None:
300
+ if file is None and optional:
301
+ return
302
+
300
303
  file = (file,) if isinstance(file, str) else file
301
304
  for f in file:
302
305
  if Path(f).exists():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reboost
3
- Version: 0.2.6
3
+ Version: 0.3.0
4
4
  Summary: New LEGEND Monte-Carlo simulation post-processing
5
5
  Author-email: Manuel Huber <info@manuelhu.de>, Toby Dixon <toby.dixon.23@ucl.ac.uk>, Luigi Pertoldi <gipert@pm.me>
6
6
  Maintainer: The LEGEND Collaboration
@@ -1,16 +1,16 @@
1
1
  reboost/__init__.py,sha256=RVNl3Qgx_hTUeBGXaWYmiTcmXUDhTfvlAGGC8bo_jP8,316
2
- reboost/_version.py,sha256=nObnONsicQ3YX6SG5MVBxmIp5dmRacXDauSqZijWQbY,511
2
+ reboost/_version.py,sha256=AGmG_Lx0-9ztFw_7d9mYbaYuC-2abxE1oXOUNAY29YY,511
3
3
  reboost/build_evt.py,sha256=zj3wG_kaV3EoRMQ33AkCNa_2Fv8cLtRuhyRyRmSrOYQ,4797
4
- reboost/build_glm.py,sha256=kSY9hQjEsOE-0PiblhdBy_SvFIlgXLX6CUlgpxW-_OI,9389
5
- reboost/build_hit.py,sha256=OyXkYdLIpSBmq3MLNXD_kLjVqxeSQcs7RGAOZSy1Bns,14695
4
+ reboost/build_glm.py,sha256=VIzRyCc53FQvcXAVbARszNH5wH5Pr0a9WWlSCZuUY5w,9380
5
+ reboost/build_hit.py,sha256=Zu8WoeFYtFU_xUO3I8OZM5psNaiv1boMJPnBWC58nfQ,14958
6
6
  reboost/build_tcm.py,sha256=-PawBHoHj0zsm4XsZu5bco9d9a09STicZchduefSNfI,2951
7
7
  reboost/cli.py,sha256=swPJcYzvg18rSOMN-mpe0PCMf1-a9V7osIssX7JP7k0,6459
8
8
  reboost/core.py,sha256=7Nclc6RUCOSJ1CWVAX0rFNJGM1LEgqvc4tD04CxEAtg,10766
9
- reboost/iterator.py,sha256=0KmrekpZwOYZJaP0nmp-SNrr2WmGUKeNUVcqO-OChhY,4757
9
+ reboost/iterator.py,sha256=D0aDCwtUKeo3_JQQKrmYmFc7jbqcKb5jrpMkUuaJ76s,4762
10
10
  reboost/log_utils.py,sha256=VqS_9OC5NeNU3jcowVOBB0NJ6ssYvNWnirEY-JVduEA,766
11
11
  reboost/profile.py,sha256=EOTmjmS8Rm_nYgBWNh6Rntl2XDsxdyed7yEdWtsZEeg,2598
12
12
  reboost/units.py,sha256=3EH8XlpbsObdu5vLgxhm1600L6UNYD5jng4SjJT_1QE,2202
13
- reboost/utils.py,sha256=eBw0ZzwhlniTLbjz9tnstCXSYrjSeH4FJ0fkJ9-uqps,8450
13
+ reboost/utils.py,sha256=VheyCMcaXT5VPXAotiI5YqLKTf45KhTMpwULg2xyB3o,8531
14
14
  reboost/hpge/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  reboost/hpge/psd.py,sha256=868OUJzO9TNja0YSrZ3NDGeEAbUtpDZnmvBDm0jCC9E,6856
16
16
  reboost/hpge/surface.py,sha256=SZyTmOCTipf27jYaJhtdInzGF1RZ2wKpbtf6HlOQYwM,3662
@@ -19,10 +19,10 @@ reboost/math/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  reboost/math/functions.py,sha256=OymiYTcA0NXxxm-MBDw5kqyNwHoLCmuv4J48AwnSrbU,5633
20
20
  reboost/math/stats.py,sha256=iiOEi87x93kqPWeSmlRiA5Oe-R8XR-plm6Z532PhC9M,1401
21
21
  reboost/optmap/__init__.py,sha256=imvuyld-GLw8qdwqW-lXCg2feptcTyQo3wIzPvDHwmY,93
22
- reboost/optmap/cli.py,sha256=wBexh-zrr5ABherEyk9xigxdArvOAKiiRQwAYon9Sro,9408
23
- reboost/optmap/convolve.py,sha256=5FksUrVIG8ysn42QbWBrAx8M1HfAVJtaJJyE8oJ1NGM,12043
22
+ reboost/optmap/cli.py,sha256=TszAYZIwHTXwJYQOMKxreCZ6pXlGUb6q6YZ3iW3Bxf4,9670
23
+ reboost/optmap/convolve.py,sha256=jCH_d04yioB8hsJEPunm0zynA0ne4lx0ldSC1GJG_eY,14129
24
24
  reboost/optmap/create.py,sha256=Nm5-xEe8M9q2GFQnUv8oN8qpAz9nZArIrQcPboqRmCQ,17153
25
- reboost/optmap/evt.py,sha256=m3NWuLEk4zDQJO5vXq_XLLnqmkQwmtdKI3fqmZQBBvc,4707
25
+ reboost/optmap/evt.py,sha256=9rfAdN9MqL6UuUxUcMDGVwpcuqRVc2RwmEmd87jgYww,4698
26
26
  reboost/optmap/mapview.py,sha256=73kpe0_SKDj9bIhEx1ybX1sBP8TyvufiLfps84A_ijA,6798
27
27
  reboost/optmap/numba_pdg.py,sha256=y8cXR5PWE2Liprp4ou7vl9do76dl84vXU52ZJD9_I7A,731
28
28
  reboost/optmap/optmap.py,sha256=j4rfbQ84PYSpE-BvP4Rdt96ZjPdwy8P4e4eZz1mATys,12817
@@ -30,9 +30,9 @@ reboost/shape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
30
  reboost/shape/cluster.py,sha256=RIvBlhHzp88aaUZGofp5SD9bimnoiqIOddhQ84jiwoM,8135
31
31
  reboost/shape/group.py,sha256=Q3DhEPxbhw3p4bwvpswSd0A-p224l5vRZnfQIEkOVJE,4475
32
32
  reboost/shape/reduction.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- reboost-0.2.6.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
34
- reboost-0.2.6.dist-info/METADATA,sha256=Irdm3IrpsoXDPWyqeYSa4QHI6MdACh0ZYAnGMQwofbg,44251
35
- reboost-0.2.6.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
36
- reboost-0.2.6.dist-info/entry_points.txt,sha256=DxhD6BidSWNot9BrejHJjQ7RRLmrMaBIl52T75oWTwM,93
37
- reboost-0.2.6.dist-info/top_level.txt,sha256=q-IBsDepaY_AbzbRmQoW8EZrITXRVawVnNrB-_zyXZs,8
38
- reboost-0.2.6.dist-info/RECORD,,
33
+ reboost-0.3.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
34
+ reboost-0.3.0.dist-info/METADATA,sha256=GCn2W1jn_cplxbS1-bJCRjghoEaPgscoTuAhVkizXVY,44251
35
+ reboost-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
+ reboost-0.3.0.dist-info/entry_points.txt,sha256=DxhD6BidSWNot9BrejHJjQ7RRLmrMaBIl52T75oWTwM,93
37
+ reboost-0.3.0.dist-info/top_level.txt,sha256=q-IBsDepaY_AbzbRmQoW8EZrITXRVawVnNrB-_zyXZs,8
38
+ reboost-0.3.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.7.1)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5