reboost 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reboost/_version.py +2 -2
- reboost/build_glm.py +1 -1
- reboost/build_hit.py +6 -2
- reboost/core.py +28 -21
- reboost/math/functions.py +119 -11
- reboost/shape/cluster.py +28 -21
- {reboost-0.2.1.dist-info → reboost-0.2.2.dist-info}/METADATA +1 -1
- {reboost-0.2.1.dist-info → reboost-0.2.2.dist-info}/RECORD +12 -12
- {reboost-0.2.1.dist-info → reboost-0.2.2.dist-info}/WHEEL +0 -0
- {reboost-0.2.1.dist-info → reboost-0.2.2.dist-info}/entry_points.txt +0 -0
- {reboost-0.2.1.dist-info → reboost-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {reboost-0.2.1.dist-info → reboost-0.2.2.dist-info}/top_level.txt +0 -0
reboost/_version.py
CHANGED
reboost/build_glm.py
CHANGED
reboost/build_hit.py
CHANGED
|
@@ -246,6 +246,8 @@ def build_hit(
|
|
|
246
246
|
# loop over processing groups
|
|
247
247
|
for group_idx, proc_group in enumerate(config["processing_groups"]):
|
|
248
248
|
proc_name = proc_group.get("name", "default")
|
|
249
|
+
msg = f"... starting group {proc_name}"
|
|
250
|
+
log.info(msg)
|
|
249
251
|
|
|
250
252
|
if proc_name not in time_dict:
|
|
251
253
|
time_dict[proc_name] = ProfileDict()
|
|
@@ -261,9 +263,11 @@ def build_hit(
|
|
|
261
263
|
for mapping in proc_group.get("detector_mapping")
|
|
262
264
|
]
|
|
263
265
|
)
|
|
264
|
-
|
|
265
266
|
# loop over detectors
|
|
266
267
|
for in_det_idx, (in_detector, out_detectors) in enumerate(detectors_mapping.items()):
|
|
268
|
+
msg = f"... processing {in_detector} (to {out_detectors})"
|
|
269
|
+
log.info(msg)
|
|
270
|
+
|
|
267
271
|
# get detector objects
|
|
268
272
|
det_objects = core.get_detector_objects(
|
|
269
273
|
output_detectors=out_detectors,
|
|
@@ -286,7 +290,7 @@ def build_hit(
|
|
|
286
290
|
time_dict=time_dict[proc_name],
|
|
287
291
|
)
|
|
288
292
|
for stps, _, chunk_idx, _ in glm_it:
|
|
289
|
-
# converting to
|
|
293
|
+
# converting to awkward
|
|
290
294
|
if stps is None:
|
|
291
295
|
continue
|
|
292
296
|
|
reboost/core.py
CHANGED
|
@@ -59,6 +59,7 @@ def evaluate_output_column(
|
|
|
59
59
|
expr = expression.replace(f"{table_name}.", "")
|
|
60
60
|
|
|
61
61
|
# get func call and modules to import
|
|
62
|
+
|
|
62
63
|
func_call, globals_dict = utils.get_function_string(expr)
|
|
63
64
|
|
|
64
65
|
msg = f"evaluating table with command {expr} and local_dict {local_dict.keys()}"
|
|
@@ -153,7 +154,7 @@ def get_global_objects(
|
|
|
153
154
|
|
|
154
155
|
|
|
155
156
|
def get_detectors_mapping(
|
|
156
|
-
output_detector_expression: str,
|
|
157
|
+
output_detector_expression: str | list,
|
|
157
158
|
objects: AttrsDict | None = None,
|
|
158
159
|
input_detector_name: str | None = None,
|
|
159
160
|
) -> dict:
|
|
@@ -210,15 +211,21 @@ def get_detectors_mapping(
|
|
|
210
211
|
input_detector_name = "dets",objects=objs)
|
|
211
212
|
{'dets': ['ch0', 'ch1', 'ch2']}
|
|
212
213
|
"""
|
|
213
|
-
func, globs = utils.get_function_string(output_detector_expression)
|
|
214
214
|
out_names = []
|
|
215
|
+
if isinstance(output_detector_expression, str):
|
|
216
|
+
out_list = [output_detector_expression]
|
|
217
|
+
else:
|
|
218
|
+
out_list = list(output_detector_expression)
|
|
219
|
+
|
|
220
|
+
for expression_tmp in out_list:
|
|
221
|
+
func, globs = utils.get_function_string(expression_tmp)
|
|
215
222
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
223
|
+
# if no package was imported its just a name
|
|
224
|
+
try:
|
|
225
|
+
objs = evaluate_object(expression_tmp, local_dict={"OBJECTS": objects})
|
|
226
|
+
out_names.extend(objs)
|
|
227
|
+
except Exception:
|
|
228
|
+
out_names.append(expression_tmp)
|
|
222
229
|
|
|
223
230
|
# simple one to one mapping
|
|
224
231
|
if input_detector_name is None:
|
|
@@ -273,19 +280,19 @@ def get_detector_objects(
|
|
|
273
280
|
|
|
274
281
|
det_objects_dict = {}
|
|
275
282
|
for output_detector in output_detectors:
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
)
|
|
283
|
+
obj_dict = {}
|
|
284
|
+
for obj_name, obj_expression in expressions.items():
|
|
285
|
+
obj_dict[obj_name] = evaluate_object(
|
|
286
|
+
obj_expression,
|
|
287
|
+
local_dict={
|
|
288
|
+
"ARGS": args,
|
|
289
|
+
"DETECTOR": output_detector,
|
|
290
|
+
"OBJECTS": global_objects,
|
|
291
|
+
"DETECTOR_OBJECTS": AttrsDict(obj_dict),
|
|
292
|
+
},
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
det_objects_dict[output_detector] = AttrsDict(obj_dict)
|
|
289
296
|
res = AttrsDict(det_objects_dict)
|
|
290
297
|
|
|
291
298
|
if time_dict is not None:
|
reboost/math/functions.py
CHANGED
|
@@ -11,7 +11,7 @@ log = logging.getLogger(__name__)
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def piecewise_linear_activeness(
|
|
14
|
-
distances: VectorOfVectors | ak.Array, fccd: float,
|
|
14
|
+
distances: VectorOfVectors | ak.Array, fccd: float, dlf: float
|
|
15
15
|
) -> VectorOfVectors | Array:
|
|
16
16
|
r"""Piecewise linear HPGe activeness model.
|
|
17
17
|
|
|
@@ -21,14 +21,15 @@ def piecewise_linear_activeness(
|
|
|
21
21
|
|
|
22
22
|
f(d) =
|
|
23
23
|
\begin{cases}
|
|
24
|
-
0 & \text{if } d <
|
|
25
|
-
\frac{x-l}{f - l} & \text{if } t \leq d < f, \\
|
|
24
|
+
0 & \text{if } d < f*l, \\
|
|
25
|
+
\frac{x-f*l}{f - f*l} & \text{if } t \leq d < f, \\
|
|
26
26
|
1 & \text{otherwise.}
|
|
27
27
|
\end{cases}
|
|
28
28
|
|
|
29
29
|
Where:
|
|
30
|
+
|
|
30
31
|
- `d`: Distance to surface,
|
|
31
|
-
- `l`:
|
|
32
|
+
- `l`: Dead layer fraction, the fraction of the FCCD which is fully inactive
|
|
32
33
|
- `f`: Full charge collection depth (FCCD).
|
|
33
34
|
|
|
34
35
|
In addition, any distance of `np.nan` (for example if the calculation
|
|
@@ -43,8 +44,8 @@ def piecewise_linear_activeness(
|
|
|
43
44
|
|
|
44
45
|
fccd
|
|
45
46
|
the value of the FCCD
|
|
46
|
-
|
|
47
|
-
the
|
|
47
|
+
dlf
|
|
48
|
+
the fraction of the FCCD which is fully inactive.
|
|
48
49
|
|
|
49
50
|
Returns
|
|
50
51
|
-------
|
|
@@ -58,10 +59,117 @@ def piecewise_linear_activeness(
|
|
|
58
59
|
else:
|
|
59
60
|
distances_ak = distances
|
|
60
61
|
|
|
62
|
+
dl = fccd * dlf
|
|
63
|
+
distances_flat = (
|
|
64
|
+
ak.flatten(distances_ak).to_numpy() if distances_ak.ndim > 1 else distances_ak.to_numpy()
|
|
65
|
+
)
|
|
66
|
+
|
|
61
67
|
# compute the linear piecewise
|
|
62
|
-
results =
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
68
|
+
results = np.full_like(distances_flat, np.nan, dtype=np.float64)
|
|
69
|
+
lengths = ak.num(distances_ak) if distances_ak.ndim > 1 else len(distances_ak)
|
|
70
|
+
|
|
71
|
+
mask1 = (distances_flat > fccd) | np.isnan(distances_flat)
|
|
72
|
+
mask2 = (distances_flat <= dl) & (~mask1)
|
|
73
|
+
mask3 = ~(mask1 | mask2)
|
|
74
|
+
|
|
75
|
+
# assign the values
|
|
76
|
+
results[mask1] = 1
|
|
77
|
+
results[mask2] = 0
|
|
78
|
+
results[mask3] = (distances_flat[mask3] - dl) / (fccd - dl)
|
|
79
|
+
|
|
80
|
+
# reshape
|
|
81
|
+
results = ak.unflatten(ak.Array(results), lengths) if distances_ak.ndim > 1 else results
|
|
82
|
+
|
|
83
|
+
return VectorOfVectors(results) if results.ndim > 1 else Array(results)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def vectorised_active_energy(
|
|
87
|
+
distances: VectorOfVectors | ak.Array,
|
|
88
|
+
edep: VectorOfVectors | ak.Array,
|
|
89
|
+
fccd: float | list,
|
|
90
|
+
dlf: float | list,
|
|
91
|
+
) -> VectorOfVectors | Array:
|
|
92
|
+
r"""Energy after piecewise linear HPGe activeness model vectorised over FCCD or dead layer fraction.
|
|
93
|
+
|
|
94
|
+
Based on the same linear activeness function as :func:`piecewise_linear_activeness`. However,
|
|
95
|
+
this function vectorises the calculation to provide a range of output energies varying the fccd or
|
|
96
|
+
dead layer fraction. Either fccd or dlf can be a list. This adds an extra dimension to the
|
|
97
|
+
output, with the same length as the input fccd or dlf list.
|
|
98
|
+
|
|
99
|
+
.. warning:
|
|
100
|
+
It is not currently implemented to vary both dlf and fccd.
|
|
101
|
+
|
|
102
|
+
Parameters
|
|
103
|
+
----------
|
|
104
|
+
distances
|
|
105
|
+
the distance from each step to the detector surface. Can be either a
|
|
106
|
+
`awkward` array, or a LGDO `VectorOfVectors` . The computation
|
|
107
|
+
is performed for each element and the first dimension is preserved, a
|
|
108
|
+
new dimension is added vectorising over the FCCD or DLF.
|
|
109
|
+
edep
|
|
110
|
+
the energy for each step.
|
|
111
|
+
fccd
|
|
112
|
+
the value of the FCCD, can be a list.
|
|
113
|
+
dlf
|
|
114
|
+
the fraction of the FCCD which is fully inactive, can be a list.
|
|
115
|
+
|
|
116
|
+
Returns
|
|
117
|
+
-------
|
|
118
|
+
a :class:`VectorOfVectors` or :class:`Array` of the activeness
|
|
119
|
+
"""
|
|
120
|
+
# add checks on fccd, dlf
|
|
121
|
+
fccd = np.array(fccd)
|
|
122
|
+
dlf = np.array(dlf)
|
|
123
|
+
|
|
124
|
+
if (fccd.ndim + dlf.ndim) > 1:
|
|
125
|
+
msg = "Currently only one of FCCD and dlf can be varied"
|
|
126
|
+
raise NotImplementedError(msg)
|
|
127
|
+
|
|
128
|
+
# convert fccd and or dlf to the right shape
|
|
129
|
+
if fccd.ndim == 0:
|
|
130
|
+
if dlf.ndim == 0:
|
|
131
|
+
dlf = dlf[np.newaxis]
|
|
132
|
+
fccd = np.full_like(dlf, fccd)
|
|
133
|
+
|
|
134
|
+
dl = fccd * dlf
|
|
135
|
+
|
|
136
|
+
def _convert(field):
|
|
137
|
+
# convert to ak
|
|
138
|
+
if isinstance(field, VectorOfVectors):
|
|
139
|
+
field_ak = field.view_as("ak")
|
|
140
|
+
elif not isinstance(field, ak.Array):
|
|
141
|
+
field_ak = ak.Array(field)
|
|
142
|
+
else:
|
|
143
|
+
msg = f"{field} must be an awkward array or VectorOfVectors"
|
|
144
|
+
raise TypeError(msg)
|
|
145
|
+
|
|
146
|
+
return field_ak, ak.flatten(field_ak).to_numpy()[:, np.newaxis]
|
|
147
|
+
|
|
148
|
+
distances_ak, distances_flat = _convert(distances)
|
|
149
|
+
_, edep_flat = _convert(edep)
|
|
150
|
+
runs = ak.num(distances_ak, axis=-1)
|
|
151
|
+
|
|
152
|
+
# vectorise fccd or tl
|
|
153
|
+
|
|
154
|
+
fccd_list = np.tile(fccd, (len(distances_flat), 1))
|
|
155
|
+
dl_list = np.tile(dl, (len(distances_flat), 1))
|
|
156
|
+
distances_shaped = np.tile(distances_flat, (1, len(dl)))
|
|
157
|
+
|
|
158
|
+
# compute the linear piecewise
|
|
159
|
+
results = np.full_like(fccd_list, np.nan, dtype=np.float64)
|
|
160
|
+
|
|
161
|
+
# Masks
|
|
162
|
+
mask1 = (distances_shaped > fccd_list) | np.isnan(distances_shaped)
|
|
163
|
+
mask2 = ((distances_shaped <= dl_list) | (fccd_list == dl_list)) & ~mask1
|
|
164
|
+
mask3 = ~(mask1 | mask2) # Safe, avoids recomputing anything expensive
|
|
165
|
+
|
|
166
|
+
# Assign values
|
|
167
|
+
results[mask1] = 1.0
|
|
168
|
+
results[mask2] = 0.0
|
|
169
|
+
results[mask3] = (distances_shaped[mask3] - dl_list[mask3]) / (
|
|
170
|
+
fccd_list[mask3] - dl_list[mask3]
|
|
66
171
|
)
|
|
67
|
-
|
|
172
|
+
|
|
173
|
+
energy = ak.sum(ak.unflatten(results * edep_flat, runs), axis=-2)
|
|
174
|
+
|
|
175
|
+
return VectorOfVectors(energy) if energy.ndim > 1 else Array(energy.to_numpy())
|
reboost/shape/cluster.py
CHANGED
|
@@ -40,17 +40,17 @@ def cluster_by_step_length(
|
|
|
40
40
|
pos_x: ak.Array | VectorOfVectors,
|
|
41
41
|
pos_y: ak.Array | VectorOfVectors,
|
|
42
42
|
pos_z: ak.Array | VectorOfVectors,
|
|
43
|
-
dist: ak.Array | VectorOfVectors,
|
|
44
|
-
surf_cut: float =
|
|
43
|
+
dist: ak.Array | VectorOfVectors | None = None,
|
|
44
|
+
surf_cut: float | None = None,
|
|
45
45
|
threshold: float = 0.1,
|
|
46
|
-
threshold_surf: float =
|
|
46
|
+
threshold_surf: float | None = None,
|
|
47
47
|
) -> VectorOfVectors:
|
|
48
48
|
"""Perform clustering based on the step length.
|
|
49
49
|
|
|
50
50
|
Steps are clustered based on distance, if either:
|
|
51
51
|
- a step is in a new track,
|
|
52
52
|
- a step moves from surface to bulk region (or visa versa),
|
|
53
|
-
- the distance between the
|
|
53
|
+
- the distance between the current step and the first step of the current cluster is above a threshold.
|
|
54
54
|
|
|
55
55
|
Then a new cluster is started. The surface region is defined as the volume
|
|
56
56
|
less than surf_cut distance to the surface. This allows for a fine tuning of the
|
|
@@ -67,9 +67,9 @@ def cluster_by_step_length(
|
|
|
67
67
|
pos_z
|
|
68
68
|
z position of the step.
|
|
69
69
|
dist
|
|
70
|
-
distance to the detector surface.
|
|
70
|
+
distance to the detector surface. Can be `None` in which case all steps are treated as being in the "bulk".
|
|
71
71
|
surf_cut
|
|
72
|
-
Size of the surface region (in mm)
|
|
72
|
+
Size of the surface region (in mm), if `None` no selection is applied (default).
|
|
73
73
|
threshold
|
|
74
74
|
Distance threshold in mm to combine steps in the bulk.
|
|
75
75
|
threshold_surf
|
|
@@ -107,7 +107,7 @@ def cluster_by_step_length(
|
|
|
107
107
|
ak.flatten(ak.local_index(trackid)).to_numpy(),
|
|
108
108
|
ak.flatten(trackid).to_numpy(),
|
|
109
109
|
pos,
|
|
110
|
-
ak.flatten(dist).to_numpy(),
|
|
110
|
+
dist_to_surf=ak.flatten(dist).to_numpy() if dist is not None else dist,
|
|
111
111
|
surf_cut=surf_cut,
|
|
112
112
|
threshold=threshold,
|
|
113
113
|
threshold_surf=threshold_surf,
|
|
@@ -127,10 +127,10 @@ def cluster_by_distance_numba(
|
|
|
127
127
|
local_index: np.ndarray,
|
|
128
128
|
trackid: np.ndarray,
|
|
129
129
|
pos: np.ndarray,
|
|
130
|
-
dist_to_surf: np.ndarray,
|
|
131
|
-
surf_cut: float =
|
|
130
|
+
dist_to_surf: np.ndarray | None,
|
|
131
|
+
surf_cut: float | None = None,
|
|
132
132
|
threshold: float = 0.1,
|
|
133
|
-
threshold_surf: float =
|
|
133
|
+
threshold_surf: float | None = None,
|
|
134
134
|
) -> np.ndarray:
|
|
135
135
|
"""Cluster steps by the distance between points in the same track.
|
|
136
136
|
|
|
@@ -146,9 +146,9 @@ def cluster_by_distance_numba(
|
|
|
146
146
|
pos
|
|
147
147
|
`(n,3)` size array of the positions
|
|
148
148
|
dist_to_surf
|
|
149
|
-
1D array of the distance to the detector surface.
|
|
149
|
+
1D array of the distance to the detector surface. Can be `None` in which case all steps are treated as being in the bulk.
|
|
150
150
|
surf_cut
|
|
151
|
-
Size of the surface region (in mm)
|
|
151
|
+
Size of the surface region (in mm), if `None` no selection is applied.
|
|
152
152
|
threshold
|
|
153
153
|
Distance threshold in mm to combine steps in the bulk.
|
|
154
154
|
threshold_surf
|
|
@@ -172,14 +172,20 @@ def cluster_by_distance_numba(
|
|
|
172
172
|
is_surf_prev = False
|
|
173
173
|
|
|
174
174
|
for idx in range(n):
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
175
|
+
# consider a surface and a bulk region
|
|
176
|
+
if dist_to_surf is not None:
|
|
177
|
+
thr = threshold if dist_to_surf[idx] > surf_cut else threshold_surf
|
|
178
|
+
|
|
179
|
+
new_cluster = (
|
|
180
|
+
(trackid[idx] != trackid_prev)
|
|
181
|
+
or (is_surf_prev and (dist_to_surf[idx] > surf_cut))
|
|
182
|
+
or ((not is_surf_prev) and (dist_to_surf[idx] < surf_cut))
|
|
183
|
+
or (_dist(pos[idx, :], pos_prev) > thr)
|
|
184
|
+
)
|
|
185
|
+
# basic clustering without split into surface / bulk
|
|
186
|
+
else:
|
|
187
|
+
thr = threshold
|
|
188
|
+
new_cluster = (trackid[idx] != trackid_prev) or (_dist(pos[idx, :], pos_prev) > thr)
|
|
183
189
|
|
|
184
190
|
# New hit, reset cluster index
|
|
185
191
|
if idx == 0 or local_index[idx] == 0:
|
|
@@ -197,7 +203,8 @@ def cluster_by_distance_numba(
|
|
|
197
203
|
|
|
198
204
|
# Update previous values
|
|
199
205
|
trackid_prev = trackid[idx]
|
|
200
|
-
|
|
206
|
+
if dist_to_surf is not None:
|
|
207
|
+
is_surf_prev = dist_to_surf[idx] < surf_cut
|
|
201
208
|
|
|
202
209
|
return out
|
|
203
210
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: reboost
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: New LEGEND Monte-Carlo simulation post-processing
|
|
5
5
|
Author-email: Manuel Huber <info@manuelhu.de>, Toby Dixon <toby.dixon.23@ucl.ac.uk>, Luigi Pertoldi <gipert@pm.me>
|
|
6
6
|
Maintainer: The LEGEND Collaboration
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
reboost/__init__.py,sha256=RVNl3Qgx_hTUeBGXaWYmiTcmXUDhTfvlAGGC8bo_jP8,316
|
|
2
|
-
reboost/_version.py,sha256=
|
|
2
|
+
reboost/_version.py,sha256=OjGGK5TcHVG44Y62aAqeJH4CskkZoY9ydbHOtCDew50,511
|
|
3
3
|
reboost/build_evt.py,sha256=5Q3T0LCl8xMtyRRhcs6layC1xh4vp2f26PgB1yab2zs,4798
|
|
4
|
-
reboost/build_glm.py,sha256=
|
|
5
|
-
reboost/build_hit.py,sha256=
|
|
4
|
+
reboost/build_glm.py,sha256=5cuHnMogRKGaVCi4eqTrgDroLFy4s8mGp2nKcKfKQGI,9243
|
|
5
|
+
reboost/build_hit.py,sha256=yAjYmBJUZvnyTuU_kpuJle_TKeSU027OIJr7No9z2Ms,13476
|
|
6
6
|
reboost/build_tcm.py,sha256=N1rZwht88ZaKWmURch1VrVUbQROXfP56D0aj_JLsRhU,2951
|
|
7
7
|
reboost/cli.py,sha256=HTZ05DRnDodcf_D6BJCCavx5HqhKDadJCgf-oh8HTJk,6365
|
|
8
|
-
reboost/core.py,sha256=
|
|
8
|
+
reboost/core.py,sha256=7Nclc6RUCOSJ1CWVAX0rFNJGM1LEgqvc4tD04CxEAtg,10766
|
|
9
9
|
reboost/iterator.py,sha256=cqfh3c0uLP67S0YGaw05-McZQzdMb8BISULIm3PEbKA,3990
|
|
10
10
|
reboost/log_utils.py,sha256=VqS_9OC5NeNU3jcowVOBB0NJ6ssYvNWnirEY-JVduEA,766
|
|
11
11
|
reboost/profile.py,sha256=EOTmjmS8Rm_nYgBWNh6Rntl2XDsxdyed7yEdWtsZEeg,2598
|
|
@@ -14,7 +14,7 @@ reboost/hpge/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
14
14
|
reboost/hpge/psd.py,sha256=vFs8Y5XVW261pB6aOvWmIDzqOaBg-gEOLhL9PbjlEKI,2113
|
|
15
15
|
reboost/hpge/surface.py,sha256=SZyTmOCTipf27jYaJhtdInzGF1RZ2wKpbtf6HlOQYwM,3662
|
|
16
16
|
reboost/math/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
|
-
reboost/math/functions.py,sha256=
|
|
17
|
+
reboost/math/functions.py,sha256=OymiYTcA0NXxxm-MBDw5kqyNwHoLCmuv4J48AwnSrbU,5633
|
|
18
18
|
reboost/math/stats.py,sha256=iiOEi87x93kqPWeSmlRiA5Oe-R8XR-plm6Z532PhC9M,1401
|
|
19
19
|
reboost/optmap/__init__.py,sha256=imvuyld-GLw8qdwqW-lXCg2feptcTyQo3wIzPvDHwmY,93
|
|
20
20
|
reboost/optmap/cli.py,sha256=wBexh-zrr5ABherEyk9xigxdArvOAKiiRQwAYon9Sro,9408
|
|
@@ -25,12 +25,12 @@ reboost/optmap/mapview.py,sha256=73kpe0_SKDj9bIhEx1ybX1sBP8TyvufiLfps84A_ijA,679
|
|
|
25
25
|
reboost/optmap/numba_pdg.py,sha256=y8cXR5PWE2Liprp4ou7vl9do76dl84vXU52ZJD9_I7A,731
|
|
26
26
|
reboost/optmap/optmap.py,sha256=j4rfbQ84PYSpE-BvP4Rdt96ZjPdwy8P4e4eZz1mATys,12817
|
|
27
27
|
reboost/shape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
|
-
reboost/shape/cluster.py,sha256=
|
|
28
|
+
reboost/shape/cluster.py,sha256=RIvBlhHzp88aaUZGofp5SD9bimnoiqIOddhQ84jiwoM,8135
|
|
29
29
|
reboost/shape/group.py,sha256=bSmFCl_yi1hGaKudjiicDEJsiBNyAHiKYdr8ZuH4pSM,4406
|
|
30
30
|
reboost/shape/reduction.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
|
-
reboost-0.2.
|
|
32
|
-
reboost-0.2.
|
|
33
|
-
reboost-0.2.
|
|
34
|
-
reboost-0.2.
|
|
35
|
-
reboost-0.2.
|
|
36
|
-
reboost-0.2.
|
|
31
|
+
reboost-0.2.2.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
32
|
+
reboost-0.2.2.dist-info/METADATA,sha256=F4NsVNDavleMAL-xEYSpR5ElKHtjQXLevD_-XAaT3y4,44219
|
|
33
|
+
reboost-0.2.2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
|
34
|
+
reboost-0.2.2.dist-info/entry_points.txt,sha256=DxhD6BidSWNot9BrejHJjQ7RRLmrMaBIl52T75oWTwM,93
|
|
35
|
+
reboost-0.2.2.dist-info/top_level.txt,sha256=q-IBsDepaY_AbzbRmQoW8EZrITXRVawVnNrB-_zyXZs,8
|
|
36
|
+
reboost-0.2.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|