multipers 2.3.2b1__cp313-cp313-win_amd64.whl → 2.3.3__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of multipers might be problematic. Click here for more details.
- multipers/_signed_measure_meta.py +22 -8
- multipers/array_api/__init__.py +25 -2
- multipers/array_api/numpy.py +70 -0
- multipers/array_api/torch.py +82 -0
- multipers/filtrations/density.py +11 -52
- multipers/filtrations/filtrations.py +21 -8
- multipers/function_rips.cp313-win_amd64.pyd +0 -0
- multipers/grids.cp313-win_amd64.pyd +0 -0
- multipers/grids.pyx +91 -43
- multipers/gudhi/gudhi/Multi_critical_filtration.h +1 -1
- multipers/io.cp313-win_amd64.pyd +0 -0
- multipers/ml/mma.py +1 -1
- multipers/ml/signed_measures.py +106 -26
- multipers/mma_structures.cp313-win_amd64.pyd +0 -0
- multipers/mma_structures.pyx +2 -2
- multipers/mma_structures.pyx.tp +1 -1
- multipers/multiparameter_module_approximation.cp313-win_amd64.pyd +0 -0
- multipers/multiparameter_module_approximation.pyx +2 -1
- multipers/plots.py +164 -37
- multipers/point_measure.cp313-win_amd64.pyd +0 -0
- multipers/point_measure.pyx +71 -2
- multipers/simplex_tree_multi.cp313-win_amd64.pyd +0 -0
- multipers/simplex_tree_multi.pxd +2 -2
- multipers/simplex_tree_multi.pyx +236 -36
- multipers/simplex_tree_multi.pyx.tp +18 -7
- multipers/slicer.cp313-win_amd64.pyd +0 -0
- multipers/slicer.pxd +20 -20
- multipers/slicer.pyx +67 -63
- multipers/slicer.pyx.tp +8 -4
- multipers/tbb12.dll +0 -0
- multipers/tbbbind_2_5.dll +0 -0
- multipers/tbbmalloc.dll +0 -0
- multipers/tbbmalloc_proxy.dll +0 -0
- {multipers-2.3.2b1.dist-info → multipers-2.3.3.dist-info}/METADATA +6 -1
- {multipers-2.3.2b1.dist-info → multipers-2.3.3.dist-info}/RECORD +38 -38
- {multipers-2.3.2b1.dist-info → multipers-2.3.3.dist-info}/WHEEL +1 -1
- {multipers-2.3.2b1.dist-info → multipers-2.3.3.dist-info}/licenses/LICENSE +0 -0
- {multipers-2.3.2b1.dist-info → multipers-2.3.3.dist-info}/top_level.txt +0 -0
multipers/grids.pyx
CHANGED
|
@@ -11,6 +11,7 @@ from typing import Iterable,Literal,Optional
|
|
|
11
11
|
from itertools import product
|
|
12
12
|
from multipers.array_api import api_from_tensor, api_from_tensors
|
|
13
13
|
from multipers.array_api import numpy as npapi
|
|
14
|
+
from multipers.array_api import check_keops
|
|
14
15
|
|
|
15
16
|
available_strategies = ["regular","regular_closest", "regular_left", "partition", "quantile", "precomputed"]
|
|
16
17
|
Lstrategies = Literal["regular","regular_closest", "regular_left", "partition", "quantile", "precomputed"]
|
|
@@ -123,8 +124,7 @@ def compute_grid(
|
|
|
123
124
|
except TypeError:
|
|
124
125
|
pass
|
|
125
126
|
|
|
126
|
-
|
|
127
|
-
return _compute_grid_numpy(
|
|
127
|
+
grid = _compute_grid_numpy(
|
|
128
128
|
initial_grid,
|
|
129
129
|
resolution=resolution,
|
|
130
130
|
strategy = strategy,
|
|
@@ -132,9 +132,9 @@ def compute_grid(
|
|
|
132
132
|
_q_factor=_q_factor,
|
|
133
133
|
drop_quantiles=drop_quantiles,
|
|
134
134
|
dense = dense,
|
|
135
|
-
|
|
136
|
-
from multipers.torch.diff_grids import get_grid
|
|
137
|
-
grid = get_grid(strategy)(initial_grid,resolution)
|
|
135
|
+
)
|
|
136
|
+
# from multipers.torch.diff_grids import get_grid
|
|
137
|
+
# grid = get_grid(strategy)(initial_grid,resolution)
|
|
138
138
|
if dense:
|
|
139
139
|
grid = todense(grid)
|
|
140
140
|
return grid
|
|
@@ -168,41 +168,41 @@ def _compute_grid_numpy(
|
|
|
168
168
|
Iterable[array[float, ndim=1]] : the 1d-grid for each parameter.
|
|
169
169
|
"""
|
|
170
170
|
num_parameters = len(filtrations_values)
|
|
171
|
+
api = api_from_tensors(*filtrations_values)
|
|
171
172
|
try:
|
|
172
173
|
a,b=drop_quantiles
|
|
173
174
|
except:
|
|
174
175
|
a,b=drop_quantiles,drop_quantiles
|
|
175
176
|
|
|
176
177
|
if a != 0 or b != 0:
|
|
177
|
-
boxes =
|
|
178
|
-
min_filtration, max_filtration =
|
|
178
|
+
boxes = api.astensor([api.quantile_closest(filtration, [a, b], axis=1) for filtration in filtrations_values])
|
|
179
|
+
min_filtration, max_filtration = api.minvalues(boxes, axis=(0,1)), api.maxvalues(boxes, axis=(0,1)) # box, birth/death, filtration
|
|
179
180
|
filtrations_values = [
|
|
180
181
|
filtration[(m<filtration) * (filtration <M)]
|
|
181
182
|
for filtration, m,M in zip(filtrations_values, min_filtration, max_filtration)
|
|
182
183
|
]
|
|
183
184
|
|
|
184
|
-
to_unique = lambda f : np.unique(f) if isinstance(f,np.ndarray) else f.unique()
|
|
185
185
|
## match doesn't work with cython BUG
|
|
186
186
|
if strategy == "exact":
|
|
187
|
-
F=tuple(
|
|
187
|
+
F=tuple(api.unique(f) for f in filtrations_values)
|
|
188
188
|
elif strategy == "quantile":
|
|
189
|
-
F = tuple(
|
|
189
|
+
F = tuple(api.unique(f) for f in filtrations_values)
|
|
190
190
|
max_resolution = [min(len(f),r) for f,r in zip(F,resolution)]
|
|
191
|
-
F = tuple(
|
|
191
|
+
F = tuple( api.quantile_closest(f, q=api.linspace(0,1,int(r*_q_factor)), axis=0) for f,r in zip(F, resolution) )
|
|
192
192
|
if unique:
|
|
193
|
-
F = tuple(
|
|
193
|
+
F = tuple(api.unique(f) for f in F)
|
|
194
194
|
if np.all(np.asarray(max_resolution) > np.asarray([len(f) for f in F])):
|
|
195
195
|
return _compute_grid_numpy(filtrations_values=filtrations_values, resolution=resolution, strategy="quantile",_q_factor=1.5*_q_factor)
|
|
196
196
|
elif strategy == "regular":
|
|
197
|
-
F = tuple(
|
|
197
|
+
F = tuple(_todo_regular(f,r,api) for f,r in zip(filtrations_values, resolution))
|
|
198
198
|
elif strategy == "regular_closest":
|
|
199
|
-
F = tuple(_todo_regular_closest(f,r, unique) for f,r in zip(filtrations_values, resolution))
|
|
199
|
+
F = tuple(_todo_regular_closest(f,r, unique,api) for f,r in zip(filtrations_values, resolution))
|
|
200
200
|
elif strategy == "regular_left":
|
|
201
|
-
F = tuple(_todo_regular_left(f,r, unique) for f,r in zip(filtrations_values, resolution))
|
|
202
|
-
elif strategy == "torch_regular_closest":
|
|
203
|
-
|
|
201
|
+
F = tuple(_todo_regular_left(f,r, unique,api) for f,r in zip(filtrations_values, resolution))
|
|
202
|
+
# elif strategy == "torch_regular_closest":
|
|
203
|
+
# F = tuple(_torch_regular_closest(f,r, unique) for f,r in zip(filtrations_values, resolution))
|
|
204
204
|
elif strategy == "partition":
|
|
205
|
-
F = tuple(_todo_partition(f,r, unique) for f,r in zip(filtrations_values, resolution))
|
|
205
|
+
F = tuple(_todo_partition(f,r, unique, api) for f,r in zip(filtrations_values, resolution))
|
|
206
206
|
elif strategy == "precomputed":
|
|
207
207
|
F=filtrations_values
|
|
208
208
|
else:
|
|
@@ -214,43 +214,85 @@ def _compute_grid_numpy(
|
|
|
214
214
|
def todense(grid, bool product_order=False):
|
|
215
215
|
if len(grid) == 0:
|
|
216
216
|
return np.empty(0)
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
if
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
return
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
217
|
+
api = api_from_tensors(*grid)
|
|
218
|
+
# if product_order:
|
|
219
|
+
# if not api.backend ==np:
|
|
220
|
+
# raise NotImplementedError("only numpy here.")
|
|
221
|
+
# return np.fromiter(product(*grid), dtype=np.dtype((dtype, len(grid))), count=np.prod([len(f) for f in grid]))
|
|
222
|
+
return api.cartesian_product(*grid)
|
|
223
|
+
# if not isinstance(grid[0], np.ndarray):
|
|
224
|
+
# import torch
|
|
225
|
+
# assert isinstance(grid[0], torch.Tensor)
|
|
226
|
+
# from multipers.torch.diff_grids import todense
|
|
227
|
+
# return todense(grid)
|
|
228
|
+
# dtype = grid[0].dtype
|
|
229
|
+
# if product_order:
|
|
230
|
+
# return np.fromiter(product(*grid), dtype=np.dtype((dtype, len(grid))), count=np.prod([len(f) for f in grid]))
|
|
231
|
+
# mesh = np.meshgrid(*grid)
|
|
232
|
+
# coordinates = np.stack(mesh, axis=-1).reshape(-1, len(grid)).astype(dtype)
|
|
233
|
+
# return coordinates
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def _todo_regular(f, int r, api):
|
|
238
|
+
if api.has_grad(f):
|
|
239
|
+
from warnings import warn
|
|
240
|
+
warn("`strategy=regular` is not differentiable. Removing grad.")
|
|
241
|
+
with api.no_grad():
|
|
242
|
+
return api.linspace(api.min(f), api.max(f), r)
|
|
243
|
+
|
|
244
|
+
def _project_on_1d_grid(f,grid, bool unique, api):
|
|
245
|
+
# api=api_from_tensors(f,grid)
|
|
246
|
+
if f.ndim != 1:
|
|
247
|
+
raise ValueError(f"Got ndim!=1. {f=}")
|
|
248
|
+
f = api.unique(f)
|
|
249
|
+
with api.no_grad():
|
|
250
|
+
_f = api.LazyTensor(f[:, None, None])
|
|
251
|
+
_f_reg = api.LazyTensor(grid[None, :, None])
|
|
252
|
+
indices = (_f - _f_reg).abs().argmin(0).ravel()
|
|
253
|
+
f = api.cat([f, api.tensor([api.inf], dtype=f.dtype)])
|
|
254
|
+
f_proj = f[indices]
|
|
255
|
+
if unique:
|
|
256
|
+
f_proj = api.unique(f_proj)
|
|
257
|
+
return f_proj
|
|
258
|
+
|
|
259
|
+
def _todo_regular_closest_keops(f, int r, bool unique, api):
|
|
260
|
+
f = api.astensor(f)
|
|
261
|
+
with api.no_grad():
|
|
262
|
+
f_regular = api.linspace(api.min(f), api.max(f), r, device = api.device(f),dtype=f.dtype)
|
|
263
|
+
return _project_on_1d_grid(f,f_regular,unique,api)
|
|
264
|
+
|
|
265
|
+
def _todo_regular_closest_old(some_float[:] f, int r, bool unique, api=None):
|
|
233
266
|
f_array = np.asarray(f)
|
|
234
267
|
f_regular = np.linspace(np.min(f), np.max(f),num=r, dtype=f_array.dtype)
|
|
235
|
-
f_regular_closest = np.asarray([f[<int64_t>np.argmin(np.abs(f_array-f_regular[i]))] for i in range(r)])
|
|
268
|
+
f_regular_closest = np.asarray([f[<int64_t>np.argmin(np.abs(f_array-f_regular[i]))] for i in range(r)], dtype=f_array.dtype)
|
|
236
269
|
if unique: f_regular_closest = np.unique(f_regular_closest)
|
|
237
270
|
return f_regular_closest
|
|
238
271
|
|
|
239
|
-
def _todo_regular_left(
|
|
272
|
+
def _todo_regular_left(f, int r, bool unique,api):
|
|
273
|
+
sorted_f = api.sort(f)
|
|
274
|
+
with api.no_grad():
|
|
275
|
+
f_regular = api.linspace(sorted_f[0],sorted_f[-1],r, dtype=sorted_f.dtype, device=api.device(sorted_f))
|
|
276
|
+
idx=api.searchsorted(sorted_f,f_regular)
|
|
277
|
+
f_regular_closest = sorted_f[idx]
|
|
278
|
+
if unique: f_regular_closest = api.unique(f_regular_closest)
|
|
279
|
+
return f_regular_closest
|
|
280
|
+
|
|
281
|
+
def _todo_regular_left_old(some_float[:] f, int r, bool unique):
|
|
240
282
|
sorted_f = np.sort(f)
|
|
241
283
|
f_regular = np.linspace(sorted_f[0],sorted_f[-1],num=r, dtype=sorted_f.dtype)
|
|
242
284
|
f_regular_closest = sorted_f[np.searchsorted(sorted_f,f_regular)]
|
|
243
285
|
if unique: f_regular_closest = np.unique(f_regular_closest)
|
|
244
286
|
return f_regular_closest
|
|
245
287
|
|
|
246
|
-
def
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
return
|
|
288
|
+
def _todo_partition(x, int resolution, bool unique, api):
|
|
289
|
+
if api.has_grad(x):
|
|
290
|
+
from warnings import warn
|
|
291
|
+
warn("`strategy=partition` is not differentiable. Removing grad.")
|
|
292
|
+
out = _todo_partition_(api.asnumpy(x), resolution, unique)
|
|
293
|
+
return api.from_numpy(out)
|
|
252
294
|
|
|
253
|
-
def
|
|
295
|
+
def _todo_partition_(some_float[:] data,int resolution, bool unique):
|
|
254
296
|
if data.shape[0] < resolution: resolution=data.shape[0]
|
|
255
297
|
k = data.shape[0] // resolution
|
|
256
298
|
partitions = np.partition(data, k)
|
|
@@ -259,6 +301,12 @@ def _todo_partition(some_float[:] data,int resolution, bool unique):
|
|
|
259
301
|
return f
|
|
260
302
|
|
|
261
303
|
|
|
304
|
+
if check_keops():
|
|
305
|
+
_todo_regular_closest = _todo_regular_closest_keops
|
|
306
|
+
else:
|
|
307
|
+
_todo_regular_closest = _todo_regular_closest_old
|
|
308
|
+
|
|
309
|
+
|
|
262
310
|
def compute_bounding_box(stuff, inflate = 0.):
|
|
263
311
|
r"""
|
|
264
312
|
Returns a array of shape (2, num_parameters)
|
multipers/io.cp313-win_amd64.pyd
CHANGED
|
Binary file
|
multipers/ml/mma.py
CHANGED
|
@@ -478,7 +478,7 @@ class MMAFormatter(BaseEstimator, TransformerMixin):
|
|
|
478
478
|
if self.weights is None
|
|
479
479
|
else np.asarray(self.weights)
|
|
480
480
|
)
|
|
481
|
-
standard_box =
|
|
481
|
+
standard_box = np.array([[0] * self._num_parameters, w])
|
|
482
482
|
|
|
483
483
|
X_copy = [
|
|
484
484
|
[
|
multipers/ml/signed_measures.py
CHANGED
|
@@ -9,12 +9,107 @@ from sklearn.base import BaseEstimator, TransformerMixin
|
|
|
9
9
|
from tqdm import tqdm
|
|
10
10
|
|
|
11
11
|
import multipers as mp
|
|
12
|
-
from multipers.array_api import api_from_tensor
|
|
12
|
+
from multipers.array_api import api_from_tensor, api_from_tensors
|
|
13
13
|
from multipers.filtrations.density import available_kernels, convolution_signed_measures
|
|
14
|
-
from multipers.grids import compute_grid
|
|
14
|
+
from multipers.grids import compute_grid, todense
|
|
15
15
|
from multipers.point_measure import rank_decomposition_by_rectangles, signed_betti
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
def batch_signed_measure_convolutions(
|
|
19
|
+
signed_measures, # array of shape (num_data,num_pts,D)
|
|
20
|
+
x, # array of shape (num_x, D) or (num_data, num_x, D)
|
|
21
|
+
bandwidth, # either float or matrix if multivariate kernel
|
|
22
|
+
kernel: available_kernels,
|
|
23
|
+
api=None,
|
|
24
|
+
):
|
|
25
|
+
"""
|
|
26
|
+
Input
|
|
27
|
+
-----
|
|
28
|
+
- signed_measures: unragged, of shape (num_data, num_pts, D+1)
|
|
29
|
+
where last coord is weights, (0 for dummy points)
|
|
30
|
+
- x : the points to convolve (num_x,D)
|
|
31
|
+
- bandwidth : the bandwidths or covariance matrix inverse or ... of the kernel
|
|
32
|
+
- kernel : "gaussian", "multivariate_gaussian", "exponential", or Callable (x_i, y_i, bandwidth)->float
|
|
33
|
+
|
|
34
|
+
Output
|
|
35
|
+
------
|
|
36
|
+
Array of shape (num_convolutions, (num_axis), num_data,
|
|
37
|
+
Array of shape (num_convolutions, (num_axis), num_data, max_x_size)
|
|
38
|
+
"""
|
|
39
|
+
from multipers.filtrations.density import _kernel
|
|
40
|
+
|
|
41
|
+
if api is None:
|
|
42
|
+
api = api_from_tensors(signed_measures, x)
|
|
43
|
+
if signed_measures.ndim == 2:
|
|
44
|
+
signed_measures = signed_measures[None, :, :]
|
|
45
|
+
sms = signed_measures[..., :-1]
|
|
46
|
+
weights = signed_measures[..., -1]
|
|
47
|
+
_sms = api.LazyTensor(api.ascontiguous(sms[..., None, :]))
|
|
48
|
+
_x = api.ascontiguous(x[..., None, :, :])
|
|
49
|
+
|
|
50
|
+
sms_kernel = _kernel(kernel)(_sms, _x, bandwidth)
|
|
51
|
+
out = (sms_kernel * api.ascontiguous(weights[..., None, None])).sum(
|
|
52
|
+
signed_measures.ndim - 2
|
|
53
|
+
)
|
|
54
|
+
assert out.shape[-1] == 1, "Pykeops bug fixed, TODO : refix this "
|
|
55
|
+
out = out[..., 0] ## pykeops bug + ensures its a tensor
|
|
56
|
+
# assert out.shape == (x.shape[0], x.shape[1]), f"{x.shape=}, {out.shape=}"
|
|
57
|
+
return out
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def sm2deep(signed_measure, api=None):
|
|
61
|
+
if api is None:
|
|
62
|
+
api = api_from_tensor(signed_measure[0])
|
|
63
|
+
dirac_positions, dirac_signs = signed_measure
|
|
64
|
+
dtype = dirac_positions.dtype
|
|
65
|
+
new_shape = list(dirac_positions.shape)
|
|
66
|
+
new_shape[1] += 1
|
|
67
|
+
c = api.empty(new_shape, dtype=dtype)
|
|
68
|
+
c[:, :-1] = dirac_positions
|
|
69
|
+
c[:, -1] = api.astensor(dirac_signs)
|
|
70
|
+
return c
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def deep_unrag(sms, api=None):
|
|
74
|
+
if api is None:
|
|
75
|
+
api = api_from_tensor(sms[0][0])
|
|
76
|
+
num_sm = len(sms)
|
|
77
|
+
if num_sm == 0:
|
|
78
|
+
return api.tensor([])
|
|
79
|
+
first = sms[0][0]
|
|
80
|
+
num_parameters = first.shape[1]
|
|
81
|
+
dtype = first.dtype
|
|
82
|
+
deep_sms = tuple(sm2deep(sm, api=api) for sm in sms)
|
|
83
|
+
max_num_pts = np.max([sm[0].shape[0] for sm in sms])
|
|
84
|
+
unragged_sms = api.zeros((num_sm, max_num_pts, num_parameters + 1), dtype=dtype)
|
|
85
|
+
|
|
86
|
+
for data in range(num_sm):
|
|
87
|
+
sm = deep_sms[data]
|
|
88
|
+
a, b = sm.shape
|
|
89
|
+
unragged_sms[data, :a, :b] = sm
|
|
90
|
+
return unragged_sms
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def sm_convolution(
|
|
94
|
+
sms,
|
|
95
|
+
grid,
|
|
96
|
+
bandwidth,
|
|
97
|
+
kernel: available_kernels = "gaussian",
|
|
98
|
+
plot: bool = False,
|
|
99
|
+
**plt_kwargs,
|
|
100
|
+
):
|
|
101
|
+
dense_grid = todense(grid)
|
|
102
|
+
api = api_from_tensors(sms[0][0], dense_grid)
|
|
103
|
+
sms = deep_unrag(sms, api=api)
|
|
104
|
+
convs = batch_signed_measure_convolutions(
|
|
105
|
+
sms, dense_grid, bandwidth, kernel, api=api
|
|
106
|
+
).reshape(sms.shape[0], *(len(g) for g in grid))
|
|
107
|
+
if plot:
|
|
108
|
+
from multipers.plots import plot_surfaces
|
|
109
|
+
plot_surfaces((grid, convs), **plt_kwargs)
|
|
110
|
+
return convs
|
|
111
|
+
|
|
112
|
+
|
|
18
113
|
class FilteredComplex2SignedMeasure(BaseEstimator, TransformerMixin):
|
|
19
114
|
"""
|
|
20
115
|
Input
|
|
@@ -547,27 +642,6 @@ def rescale_sparse_signed_measure(
|
|
|
547
642
|
return out
|
|
548
643
|
|
|
549
644
|
|
|
550
|
-
def sm2deep(signed_measure):
|
|
551
|
-
dirac_positions, dirac_signs = signed_measure
|
|
552
|
-
dtype = dirac_positions.dtype
|
|
553
|
-
new_shape = list(dirac_positions.shape)
|
|
554
|
-
new_shape[1] += 1
|
|
555
|
-
if isinstance(dirac_positions, np.ndarray):
|
|
556
|
-
c = np.empty(new_shape, dtype=dtype)
|
|
557
|
-
c[:, :-1] = dirac_positions
|
|
558
|
-
c[:, -1] = dirac_signs
|
|
559
|
-
|
|
560
|
-
else:
|
|
561
|
-
import torch
|
|
562
|
-
|
|
563
|
-
c = torch.empty(new_shape, dtype=dtype)
|
|
564
|
-
c[:, :-1] = dirac_positions
|
|
565
|
-
if isinstance(dirac_signs, np.ndarray):
|
|
566
|
-
dirac_signs = torch.from_numpy(dirac_signs)
|
|
567
|
-
c[:, -1] = dirac_signs
|
|
568
|
-
return c
|
|
569
|
-
|
|
570
|
-
|
|
571
645
|
class SignedMeasureFormatter(BaseEstimator, TransformerMixin):
|
|
572
646
|
"""
|
|
573
647
|
Input
|
|
@@ -758,6 +832,10 @@ class SignedMeasureFormatter(BaseEstimator, TransformerMixin):
|
|
|
758
832
|
) = self._get_filtration_bounds(X, axis=ax)
|
|
759
833
|
self._filtrations_bounds.append(filtration_bounds)
|
|
760
834
|
self._normalization_factors.append(normalization_factors)
|
|
835
|
+
self._filtrations_bounds = self._backend.astensor(self._filtrations_bounds)
|
|
836
|
+
self._normalization_factors = self._backend.astensor(
|
|
837
|
+
self._normalization_factors
|
|
838
|
+
)
|
|
761
839
|
# else:
|
|
762
840
|
# (
|
|
763
841
|
# self._filtrations_bounds,
|
|
@@ -782,8 +860,10 @@ class SignedMeasureFormatter(BaseEstimator, TransformerMixin):
|
|
|
782
860
|
]
|
|
783
861
|
# axis, filtration_values
|
|
784
862
|
filtration_values = [
|
|
785
|
-
|
|
786
|
-
|
|
863
|
+
self._backend.astensor(
|
|
864
|
+
compute_grid(
|
|
865
|
+
f_ax.T, resolution=self.resolution, strategy=self.grid_strategy
|
|
866
|
+
)
|
|
787
867
|
)
|
|
788
868
|
for f_ax in filtration_values
|
|
789
869
|
]
|
|
@@ -848,7 +928,7 @@ class SignedMeasureFormatter(BaseEstimator, TransformerMixin):
|
|
|
848
928
|
|
|
849
929
|
if self.flatten:
|
|
850
930
|
out = np.concatenate(out).flatten()
|
|
851
|
-
|
|
931
|
+
elif self.axis == -1:
|
|
852
932
|
return np.asarray(out)
|
|
853
933
|
else:
|
|
854
934
|
return np.asarray(out)[0]
|
|
Binary file
|
multipers/mma_structures.pyx
CHANGED
|
@@ -751,7 +751,7 @@ cdef class PyModule_f64:
|
|
|
751
751
|
axs = [plt.gca()]
|
|
752
752
|
for image, degree, i in zip(image_vector, degrees, range(num_degrees)):
|
|
753
753
|
ax = axs[i]
|
|
754
|
-
temp = multipers.plots.plot_surface(grid, image
|
|
754
|
+
temp = multipers.plots.plot_surface(grid, image, ax=ax)
|
|
755
755
|
plt.colorbar(temp, ax = ax)
|
|
756
756
|
if degree < 0 :
|
|
757
757
|
ax.set_title(rf"$H_{i}$ $2$-persistence image")
|
|
@@ -1712,7 +1712,7 @@ cdef class PyModule_f32:
|
|
|
1712
1712
|
axs = [plt.gca()]
|
|
1713
1713
|
for image, degree, i in zip(image_vector, degrees, range(num_degrees)):
|
|
1714
1714
|
ax = axs[i]
|
|
1715
|
-
temp = multipers.plots.plot_surface(grid, image
|
|
1715
|
+
temp = multipers.plots.plot_surface(grid, image, ax=ax)
|
|
1716
1716
|
plt.colorbar(temp, ax = ax)
|
|
1717
1717
|
if degree < 0 :
|
|
1718
1718
|
ax.set_title(rf"$H_{i}$ $2$-persistence image")
|
multipers/mma_structures.pyx.tp
CHANGED
|
@@ -773,7 +773,7 @@ cdef class PyModule_{{SHORT}}:
|
|
|
773
773
|
axs = [plt.gca()]
|
|
774
774
|
for image, degree, i in zip(image_vector, degrees, range(num_degrees)):
|
|
775
775
|
ax = axs[i]
|
|
776
|
-
temp = multipers.plots.plot_surface(grid, image
|
|
776
|
+
temp = multipers.plots.plot_surface(grid, image, ax=ax)
|
|
777
777
|
plt.colorbar(temp, ax = ax)
|
|
778
778
|
if degree < 0 :
|
|
779
779
|
ax.set_title(rf"$H_{i}$ $2$-persistence image")
|
|
Binary file
|
|
@@ -168,7 +168,8 @@ def module_approximation(
|
|
|
168
168
|
delayed(module_approximation)(slicer, box, max_error, nlines, slicer_backend, minpres, degree, complete, threshold, verbose, ignore_warnings, id, direction, swap_box_coords)
|
|
169
169
|
for slicer in input
|
|
170
170
|
))
|
|
171
|
-
|
|
171
|
+
box = modules[0].get_box()
|
|
172
|
+
mod = PyModule_f64().set_box(box)
|
|
172
173
|
for i,m in enumerate(modules):
|
|
173
174
|
mod.merge(m, input[i].minpres_degree)
|
|
174
175
|
return mod
|