multipers 2.3.0__cp312-cp312-win_amd64.whl → 2.3.2__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of multipers might be problematic. Click here for more details.

Files changed (54) hide show
  1. multipers/_signed_measure_meta.py +71 -65
  2. multipers/array_api/__init__.py +39 -0
  3. multipers/array_api/numpy.py +34 -0
  4. multipers/array_api/torch.py +35 -0
  5. multipers/distances.py +6 -2
  6. multipers/{ml/convolutions.py → filtrations/density.py} +67 -13
  7. multipers/filtrations/filtrations.py +76 -17
  8. multipers/function_rips.cp312-win_amd64.pyd +0 -0
  9. multipers/grids.cp312-win_amd64.pyd +0 -0
  10. multipers/grids.pyx +144 -61
  11. multipers/gudhi/Simplex_tree_multi_interface.h +36 -1
  12. multipers/gudhi/gudhi/Multi_persistence/Box.h +3 -0
  13. multipers/gudhi/gudhi/One_critical_filtration.h +18 -9
  14. multipers/gudhi/mma_interface_h0.h +1 -1
  15. multipers/gudhi/mma_interface_matrix.h +10 -1
  16. multipers/gudhi/naive_merge_tree.h +1 -1
  17. multipers/gudhi/truc.h +555 -42
  18. multipers/io.cp312-win_amd64.pyd +0 -0
  19. multipers/io.pyx +26 -93
  20. multipers/ml/mma.py +4 -4
  21. multipers/ml/point_clouds.py +2 -2
  22. multipers/ml/signed_measures.py +63 -65
  23. multipers/mma_structures.cp312-win_amd64.pyd +0 -0
  24. multipers/mma_structures.pxd +2 -1
  25. multipers/mma_structures.pyx +56 -16
  26. multipers/mma_structures.pyx.tp +14 -5
  27. multipers/multiparameter_module_approximation/approximation.h +48 -14
  28. multipers/multiparameter_module_approximation.cp312-win_amd64.pyd +0 -0
  29. multipers/multiparameter_module_approximation.pyx +27 -8
  30. multipers/plots.py +2 -1
  31. multipers/point_measure.cp312-win_amd64.pyd +0 -0
  32. multipers/point_measure.pyx +6 -2
  33. multipers/simplex_tree_multi.cp312-win_amd64.pyd +0 -0
  34. multipers/simplex_tree_multi.pxd +1 -0
  35. multipers/simplex_tree_multi.pyx +632 -146
  36. multipers/simplex_tree_multi.pyx.tp +92 -24
  37. multipers/slicer.cp312-win_amd64.pyd +0 -0
  38. multipers/slicer.pxd +779 -177
  39. multipers/slicer.pxd.tp +24 -5
  40. multipers/slicer.pyx +5657 -1427
  41. multipers/slicer.pyx.tp +211 -48
  42. multipers/tbb12.dll +0 -0
  43. multipers/tbbbind_2_5.dll +0 -0
  44. multipers/tbbmalloc.dll +0 -0
  45. multipers/tbbmalloc_proxy.dll +0 -0
  46. multipers/tensor/tensor.h +1 -1
  47. multipers/tests/__init__.py +9 -4
  48. multipers/torch/diff_grids.py +30 -7
  49. multipers/torch/rips_density.py +1 -1
  50. {multipers-2.3.0.dist-info → multipers-2.3.2.dist-info}/METADATA +4 -25
  51. {multipers-2.3.0.dist-info → multipers-2.3.2.dist-info}/RECORD +54 -51
  52. {multipers-2.3.0.dist-info → multipers-2.3.2.dist-info}/WHEEL +1 -1
  53. {multipers-2.3.0.dist-info → multipers-2.3.2.dist-info/licenses}/LICENSE +0 -0
  54. {multipers-2.3.0.dist-info → multipers-2.3.2.dist-info}/top_level.txt +0 -0
@@ -1,22 +1,25 @@
1
1
  from collections.abc import Sequence
2
2
  from typing import Optional
3
+ from warnings import warn
3
4
 
4
5
  import gudhi as gd
5
6
  import numpy as np
6
7
  from numpy.typing import ArrayLike
7
8
  from scipy.spatial import KDTree
8
- from scipy.spatial.distance import cdist
9
9
 
10
- from multipers.ml.convolutions import DTM, available_kernels
10
+ from multipers.array_api import api_from_tensor, api_from_tensors
11
+ from multipers.filtrations.density import DTM, available_kernels
12
+ from multipers.grids import compute_grid
11
13
  from multipers.simplex_tree_multi import SimplexTreeMulti, SimplexTreeMulti_type
12
14
 
13
15
  try:
14
16
  import pykeops
15
17
 
16
- from multipers.ml.convolutions import KDE
18
+ from multipers.filtrations.density import KDE
17
19
  except ImportError:
20
+
18
21
  from sklearn.neighbors import KernelDensity
19
- from warnings import warn
22
+
20
23
  warn("pykeops not found. Falling back to sklearn.")
21
24
 
22
25
  def KDE(bandwidth, kernel, return_log):
@@ -28,8 +31,8 @@ def RipsLowerstar(
28
31
  *,
29
32
  points: Optional[ArrayLike] = None,
30
33
  distance_matrix: Optional[ArrayLike] = None,
31
- function=None,
32
- threshold_radius=None,
34
+ function: Optional[ArrayLike] = None,
35
+ threshold_radius: Optional[float] = None,
33
36
  ):
34
37
  """
35
38
  Computes the Rips complex, with the usual rips filtration as a first parameter,
@@ -44,22 +47,37 @@ def RipsLowerstar(
44
47
  points is not None or distance_matrix is not None
45
48
  ), "`points` or `distance_matrix` has to be given."
46
49
  if distance_matrix is None:
47
- distance_matrix = cdist(points, points) # this may be slow...
50
+ api = api_from_tensor(points)
51
+ points = api.astensor(points)
52
+ D = api.cdist(points, points) # this may be slow...
53
+ else:
54
+ api = api_from_tensor(distance_matrix)
55
+ D = api.astensor(distance_matrix)
56
+
48
57
  if threshold_radius is None:
49
- threshold_radius = np.min(np.max(distance_matrix, axis=1))
58
+ threshold_radius = api.min(api.maxvalues(D, axis=1))
50
59
  st = gd.SimplexTree.create_from_array(
51
- distance_matrix, max_filtration=threshold_radius
60
+ api.asnumpy(D), max_filtration=threshold_radius
52
61
  )
53
62
  if function is None:
54
63
  return SimplexTreeMulti(st, num_parameters=1)
55
64
 
56
- function = np.asarray(function)
65
+ function = api.astensor(function)
57
66
  if function.ndim == 1:
58
67
  function = function[:, None]
68
+ if function.ndim != 2:
69
+ raise ValueError(
70
+ f"`function.ndim` should be 0 or 1 . Got {function.ndim=}.{function=}"
71
+ )
59
72
  num_parameters = function.shape[1] + 1
60
73
  st = SimplexTreeMulti(st, num_parameters=num_parameters)
61
74
  for i in range(function.shape[1]):
62
- st.fill_lowerstar(function[:, i], parameter=1 + i)
75
+ st.fill_lowerstar(api.asnumpy(function[:, i]), parameter=1 + i)
76
+ if api.has_grad(D) or api.has_grad(function):
77
+ from multipers.grids import compute_grid
78
+
79
+ grid = compute_grid([D.ravel(), *[f for f in function.T]])
80
+ st = st.grid_squeeze(grid)
63
81
  return st
64
82
 
65
83
 
@@ -99,6 +117,7 @@ def DelaunayLowerstar(
99
117
  dtype=np.float64,
100
118
  verbose: bool = False,
101
119
  clear: bool = True,
120
+ flagify: bool = False,
102
121
  ):
103
122
  """
104
123
  Computes the Function Delaunay bifiltration. Similar to RipsLowerstar, but most suited for low-dimensional euclidean data.
@@ -110,23 +129,44 @@ def DelaunayLowerstar(
110
129
  - threshold_radius: max edge length of the rips. Defaults at min(max(distance_matrix, axis=1)).
111
130
  """
112
131
  from multipers.slicer import from_function_delaunay
132
+
133
+ if flagify and reduce_degree >= 0:
134
+ raise ValueError(
135
+ "Got {reduce_degree=} and {flagify=}. Cannot flagify with reduce degree."
136
+ )
113
137
  assert distance_matrix is None, "Delaunay cannot be built from distance matrices"
114
138
  if threshold_radius is not None:
115
139
  raise NotImplementedError("Delaunay with threshold not implemented yet.")
116
- points = np.asarray(points)
117
- function = np.asarray(function).squeeze()
140
+ api = api_from_tensors(points, function)
141
+ if not flagify and (api.has_grad(points) or api.has_grad(function)):
142
+ warn("Cannot keep points gradient unless using `flagify=True`.")
143
+ points = api.astensor(points)
144
+ function = api.astensor(function).squeeze()
118
145
  assert (
119
146
  function.ndim == 1
120
147
  ), "Delaunay Lowerstar is only compatible with 1 additional parameter."
121
- return from_function_delaunay(
122
- points,
123
- function,
148
+ slicer = from_function_delaunay(
149
+ api.asnumpy(points),
150
+ api.asnumpy(function),
124
151
  degree=reduce_degree,
125
152
  vineyard=vineyard,
126
153
  dtype=dtype,
127
154
  verbose=verbose,
128
155
  clear=clear,
129
156
  )
157
+ if flagify:
158
+ from multipers.slicer import to_simplextree
159
+
160
+ slicer = to_simplextree(slicer)
161
+ slicer.flagify(2)
162
+
163
+ if api.has_grad(points) or api.has_grad(function):
164
+ distances = api.cdist(points, points) / 2
165
+ grid = compute_grid([distances.ravel(), function])
166
+ slicer = slicer.grid_squeeze(grid)
167
+ slicer = slicer._clean_filtration_grid()
168
+
169
+ return slicer
130
170
 
131
171
 
132
172
  def DelaunayCodensity(
@@ -142,6 +182,7 @@ def DelaunayCodensity(
142
182
  dtype=np.float64,
143
183
  verbose: bool = False,
144
184
  clear: bool = True,
185
+ flagify: bool = False,
145
186
  ):
146
187
  """
147
188
  TODO
@@ -165,6 +206,7 @@ def DelaunayCodensity(
165
206
  dtype=dtype,
166
207
  verbose=verbose,
167
208
  clear=clear,
209
+ flagify=flagify,
168
210
  )
169
211
 
170
212
 
@@ -178,6 +220,23 @@ def Cubical(image: ArrayLike, **slicer_kwargs):
178
220
  - ** args : specify non-default slicer parameters
179
221
  """
180
222
  from multipers.slicer import from_bitmap
223
+
224
+ api = api_from_tensor(image)
225
+ image = api.astensor(image)
226
+ if api.has_grad(image):
227
+ img2 = image.reshape(-1, image.shape[-1]).T
228
+ grid = compute_grid(img2)
229
+ coord_img = np.empty(image.shape, dtype=np.int32)
230
+ slice_shape = image.shape[:-1]
231
+ for i in range(image.shape[-1]):
232
+ coord_img[..., i] = np.searchsorted(
233
+ api.asnumpy(grid[i]),
234
+ api.asnumpy(image[..., i]).reshape(-1),
235
+ ).reshape(slice_shape)
236
+ slicer = from_bitmap(coord_img, **slicer_kwargs)
237
+ slicer.filtration_grid = grid
238
+ return slicer
239
+
181
240
  return from_bitmap(image, **slicer_kwargs)
182
241
 
183
242
 
@@ -214,7 +273,7 @@ def CoreDelaunay(
214
273
  ks = np.arange(1, len(points) + 1)
215
274
  else:
216
275
  ks = np.asarray(ks, dtype=int)
217
- ks:np.ndarray
276
+ ks: np.ndarray
218
277
 
219
278
  assert len(ks) > 0, "The parameter ks must contain at least one value."
220
279
  assert np.all(ks > 0), "All values in ks must be positive."
Binary file
Binary file
multipers/grids.pyx CHANGED
@@ -1,6 +1,6 @@
1
1
 
2
2
  from libc.stdint cimport intptr_t, int32_t, int64_t
3
- from libcpp cimport bool,int,long, float
3
+ from libcpp cimport bool,int, float
4
4
 
5
5
  cimport numpy as cnp
6
6
  import numpy as np
@@ -9,7 +9,8 @@ cnp.import_array()
9
9
 
10
10
  from typing import Iterable,Literal,Optional
11
11
  from itertools import product
12
-
12
+ from multipers.array_api import api_from_tensor, api_from_tensors
13
+ from multipers.array_api import numpy as npapi
13
14
 
14
15
  available_strategies = ["regular","regular_closest", "regular_left", "partition", "quantile", "precomputed"]
15
16
  Lstrategies = Literal["regular","regular_closest", "regular_left", "partition", "quantile", "precomputed"]
@@ -17,13 +18,22 @@ Lstrategies = Literal["regular","regular_closest", "regular_left", "partition",
17
18
  ctypedef fused some_int:
18
19
  int32_t
19
20
  int64_t
20
- int
21
- long
22
21
 
23
22
  ctypedef fused some_float:
24
23
  float
25
24
  double
26
25
 
26
+ def sanitize_grid(grid, bool numpyfy=False):
27
+ if len(grid) == 0:
28
+ raise ValueError("empty filtration grid")
29
+ api = api_from_tensors(*grid)
30
+ if numpyfy:
31
+ grid = tuple(api.asnumpy(g) for g in grid)
32
+ else:
33
+ # copy here may not be necessary, but cheap
34
+ grid = tuple(api.astensor(g) for g in grid)
35
+ assert np.all([g.ndim==1 for g in grid])
36
+ return grid
27
37
 
28
38
  def compute_grid(
29
39
  x,
@@ -63,37 +73,57 @@ def compute_grid(
63
73
 
64
74
 
65
75
  cdef bool is_numpy_compatible = True
66
- if is_slicer(x):
76
+ if (is_slicer(x) or is_simplextree_multi(x)) and x.is_squeezed:
77
+ initial_grid = x.filtration_grid
78
+ api = api_from_tensors(*initial_grid)
79
+ elif is_slicer(x):
67
80
  initial_grid = x.get_filtrations_values().T
81
+ api = npapi
68
82
  elif is_simplextree_multi(x):
69
83
  initial_grid = x.get_filtration_grid()
84
+ api = npapi
70
85
  elif is_mma(x):
71
86
  initial_grid = x.get_filtration_values()
87
+ api = npapi
72
88
  elif isinstance(x, np.ndarray):
73
89
  initial_grid = x
90
+ api = npapi
74
91
  else:
75
92
  x = tuple(x)
76
93
  if len(x) == 0: return []
77
94
  first = x[0]
78
95
  ## is_sm, i.e., iterable tuple(pts,weights)
79
- if isinstance(x[0], tuple) and getattr(x[0][0], "shape", None) is not None:
96
+ if isinstance(first, tuple) and getattr(first[0], "shape", None) is not None:
80
97
  initial_grid = tuple(f[0].T for f in x)
81
- if isinstance(initial_grid[0], np.ndarray):
82
- initial_grid = np.concatenate(initial_grid, axis=1)
83
- else:
84
- is_numpy_compatbile = False
85
- import torch
86
- assert isinstance(first[0], torch.Tensor), "Only numpy and torch are supported ftm."
87
- initial_grid = torch.cat(initial_grid, axis=1)
98
+ api = api_from_tensors(*initial_grid)
99
+ initial_grid = api.cat(initial_grid, axis=1)
100
+ # if isinstance(initial_grid[0], np.ndarray):
101
+ # initial_grid = np.concatenate(initial_grid, axis=1)
102
+ # else:
103
+ # is_numpy_compatible = False
104
+ # import torch
105
+ # assert isinstance(first[0], torch.Tensor), "Only numpy and torch are supported ftm."
106
+ # initial_grid = torch.cat(initial_grid, axis=1)
88
107
  ## is grid-like (num_params, num_pts)
89
- elif isinstance(first,list) or isinstance(first, tuple) or isinstance(first, np.ndarray):
90
- initial_grid = tuple(f for f in x)
91
108
  else:
92
- is_numpy_compatible = False
93
- import torch
94
- assert isinstance(first, torch.Tensor), "Only numpy and torch are supported ftm."
95
- initial_grid = x
96
- if is_numpy_compatible:
109
+ api = api_from_tensors(*x)
110
+ initial_grid = tuple(api.astensor(f) for f in x)
111
+ # elif isinstance(first,list) or isinstance(first, tuple) or isinstance(first, np.ndarray):
112
+ # initial_grid = tuple(f for f in x)
113
+ # else:
114
+ # is_numpy_compatible = False
115
+ # import torch
116
+ # assert isinstance(first, torch.Tensor), "Only numpy and torch are supported ftm."
117
+ # initial_grid = x
118
+
119
+ num_parameters = len(initial_grid)
120
+ try:
121
+ int(resolution)
122
+ resolution = [resolution]*num_parameters
123
+ except TypeError:
124
+ pass
125
+
126
+ if api is npapi:
97
127
  return _compute_grid_numpy(
98
128
  initial_grid,
99
129
  resolution=resolution,
@@ -104,8 +134,10 @@ def compute_grid(
104
134
  dense = dense,
105
135
  )
106
136
  from multipers.torch.diff_grids import get_grid
107
- return get_grid(strategy)(initial_grid,resolution)
108
-
137
+ grid = get_grid(strategy)(initial_grid,resolution)
138
+ if dense:
139
+ grid = todense(grid)
140
+ return grid
109
141
 
110
142
 
111
143
 
@@ -136,14 +168,6 @@ def _compute_grid_numpy(
136
168
  Iterable[array[float, ndim=1]] : the 1d-grid for each parameter.
137
169
  """
138
170
  num_parameters = len(filtrations_values)
139
- if resolution is None and strategy not in ["exact", "precomputed"]:
140
- raise ValueError("Resolution must be provided for this strategy.")
141
- elif resolution is not None:
142
- try:
143
- int(resolution)
144
- resolution = [resolution]*num_parameters
145
- except:
146
- pass
147
171
  try:
148
172
  a,b=drop_quantiles
149
173
  except:
@@ -208,7 +232,7 @@ def todense(grid, bool product_order=False):
208
232
  def _todo_regular_closest(some_float[:] f, int r, bool unique):
209
233
  f_array = np.asarray(f)
210
234
  f_regular = np.linspace(np.min(f), np.max(f),num=r, dtype=f_array.dtype)
211
- f_regular_closest = np.asarray([f[<long>np.argmin(np.abs(f_array-f_regular[i]))] for i in range(r)])
235
+ f_regular_closest = np.asarray([f[<int64_t>np.argmin(np.abs(f_array-f_regular[i]))] for i in range(r)])
212
236
  if unique: f_regular_closest = np.unique(f_regular_closest)
213
237
  return f_regular_closest
214
238
 
@@ -254,7 +278,7 @@ def push_to_grid(some_float[:,:] points, grid, bool return_coordinate=False):
254
278
  pushes the points onto the grid.
255
279
  """
256
280
  num_points, num_parameters = points.shape[0], points.shape[1]
257
- cdef cnp.ndarray[long,ndim=2] coordinates = np.empty((num_points, num_parameters),dtype=np.int64)
281
+ cdef cnp.ndarray[int64_t,ndim=2] coordinates = np.empty((num_points, num_parameters),dtype=np.int64)
258
282
  for parameter in range(num_parameters):
259
283
  coordinates[:,parameter] = np.searchsorted(grid[parameter],points[:,parameter])
260
284
  if return_coordinate:
@@ -271,7 +295,31 @@ def coarsen_points(some_float[:,:] points, strategy="exact", int resolution=-1,
271
295
  return push_to_grid(points, grid, coordinate), grid
272
296
  return push_to_grid(points, grid, coordinate)
273
297
 
274
-
298
+ def _inf_value(array):
299
+ if isinstance(array, type|np.dtype):
300
+ dtype = np.dtype(array) # torch types are not types
301
+ elif isinstance(array, np.ndarray):
302
+ dtype = np.dtype(array.dtype)
303
+ else:
304
+ import torch
305
+ if isinstance(array, torch.Tensor):
306
+ dtype=array.dtype
307
+ elif isinstance(array, torch.dtype):
308
+ dtype=array
309
+ else:
310
+ raise ValueError(f"unknown input of type {type(array)=} {array=}")
311
+
312
+ if isinstance(dtype, np.dtype):
313
+ if dtype.kind == 'f':
314
+ return np.asarray(np.inf,dtype=dtype)
315
+ if dtype.kind == 'i':
316
+ return np.iinfo(dtype).max
317
+ # torch only here.
318
+ if dtype.is_floating_point:
319
+ return torch.tensor(torch.inf, dtype=dtype)
320
+ else:
321
+ return torch.iinfo(dtype).max
322
+ raise ValueError(f"Dtype must be integer or floating like (got {dtype})")
275
323
 
276
324
  def evaluate_in_grid(pts, grid, mass_default=None):
277
325
  """
@@ -280,6 +328,7 @@ def evaluate_in_grid(pts, grid, mass_default=None):
280
328
  - pts: of the form array[int, ndim=2]
281
329
  - grid of the form Iterable[array[float, ndim=1]]
282
330
  """
331
+ assert pts.ndim == 2
283
332
  first_filtration = grid[0]
284
333
  dtype = first_filtration.dtype
285
334
  if isinstance(first_filtration, np.ndarray):
@@ -295,12 +344,18 @@ def evaluate_in_grid(pts, grid, mass_default=None):
295
344
  def empty_like(x):
296
345
  return torch.empty(x.shape,dtype=dtype)
297
346
 
298
- coords= empty_like(pts)
299
- for i in range(coords.shape[1]):
347
+ coords=empty_like(pts)
348
+ cdef int dim = coords.shape[1]
349
+ pts_inf = _inf_value(pts)
350
+ coords_inf = _inf_value(coords)
351
+ idx = np.argwhere(pts == pts_inf)
352
+ pts[idx] == 0
353
+ for i in range(dim):
300
354
  coords[:,i] = grid[i][pts[:,i]]
355
+ coords[idx] = coords_inf
301
356
  return coords
302
357
 
303
- def sm_in_grid(pts, weights, grid, int num_parameters=-1, mass_default=None):
358
+ def sm_in_grid(pts, weights, grid, mass_default=None):
304
359
  """Given a measure whose points are coordinates,
305
360
  pushes this measure in this grid.
306
361
  Input
@@ -310,34 +365,33 @@ def sm_in_grid(pts, weights, grid, int num_parameters=-1, mass_default=None):
310
365
  - grid of the form Iterable[array[float, ndim=1]]
311
366
  - num_parameters: number of parameters
312
367
  """
313
- first_filtration = grid[0]
314
- dtype = first_filtration.dtype
315
- def to_int(x):
316
- return np.asarray(x,dtype=np.int64)
317
- if isinstance(first_filtration, np.ndarray):
318
- if mass_default is not None:
319
- grid = tuple(np.concatenate([g, [m]]) for g,m in zip(grid, mass_default))
320
- def empty_like(x, weights):
321
- return np.empty_like(x, dtype=dtype), np.asarray(weights)
322
- else:
323
- import torch
324
- # assert isinstance(first_filtration, torch.Tensor), f"Invalid grid type. Got {type(grid[0])}, expected numpy or torch array."
368
+ if pts.ndim != 2:
369
+ raise ValueError(f"invalid dirac locations. got {pts.ndim=} != 2")
370
+ if len(grid) == 0:
371
+ raise ValueError(f"Empty grid given. Got {grid=}")
372
+ cdef int num_parameters = pts.shape[1]
373
+ if mass_default is None:
374
+ api = api_from_tensors(*grid)
375
+ else:
376
+ api = api_from_tensors(*grid, mass_default)
377
+
378
+ _grid = list(grid)
379
+ _mass_default = None if mass_default is None else api.astensor(mass_default)
380
+ while len(_grid) < num_parameters:
381
+ _grid += [api.cat([
382
+ (gt:=api.astensor(g))[1:],
383
+ api.astensor(_inf_value(api.asnumpy(gt))).reshape(1)
384
+ ]) for g in grid]
325
385
  if mass_default is not None:
326
- grid = tuple(torch.cat([g, torch.tensor(m)[None]]) for g,m in zip(grid, mass_default))
327
- def empty_like(x, weights):
328
- return torch.empty(x.shape,dtype=dtype), torch.from_numpy(weights)
329
-
330
- pts = to_int(pts)
331
- coords,weights = empty_like(pts,weights)
332
- for i in range(coords.shape[1]):
333
- if num_parameters > 0:
334
- coords[:,i] = grid[i%num_parameters][pts[:,i]]
335
- else:
336
- coords[:,i] = grid[i][pts[:,i]]
386
+ _mass_default = api.cat([_mass_default,mass_default])
387
+ grid = tuple(_grid)
388
+ mass_default = _mass_default
389
+
390
+ coords = evaluate_in_grid(np.asarray(pts, dtype=int), grid, mass_default)
337
391
  return (coords, weights)
338
392
 
339
393
  # TODO : optimize with memoryviews / typing
340
- def sms_in_grid(sms, grid, int num_parameters=-1, mass_default=None):
394
+ def sms_in_grid(sms, grid, mass_default=None):
341
395
  """Given a measure whose points are coordinates,
342
396
  pushes this measure in this grid.
343
397
  Input
@@ -346,5 +400,34 @@ def sms_in_grid(sms, grid, int num_parameters=-1, mass_default=None):
346
400
  where signed_measure_like = tuple(array[int, ndim=2], array[int])
347
401
  - grid of the form Iterable[array[float, ndim=1]]
348
402
  """
349
- sms = tuple(sm_in_grid(pts,weights,grid=grid,num_parameters=num_parameters, mass_default=mass_default) for pts,weights in sms)
403
+ sms = tuple(sm_in_grid(pts,weights,grid=grid, mass_default=mass_default) for pts,weights in sms)
350
404
  return sms
405
+
406
+
407
+ def _push_pts_to_line(pts, basepoint, direction=None):
408
+ api = api_from_tensors(pts, basepoint)
409
+ pts = api.astensor(pts)
410
+ basepoint = api.astensor(basepoint)
411
+ num_parameters = basepoint.shape[0]
412
+ if direction is not None:
413
+ if not api.is_promotable(direction):
414
+ raise ValueError(f"Incompatible input types. Got {type(pts)=}, {type(basepoint)=}, {type(direction)=}")
415
+
416
+ direction = api.astensor(direction)
417
+ ok_idx = direction > 0
418
+ if ok_idx.sum() == 0:
419
+ raise ValueError(f"Got invalid direction {direction}")
420
+ zero_idx = None if ok_idx.all() else direction == 0
421
+ else:
422
+ direction = api.tensor([1], dtype=int)
423
+ ok_idx = slice(None)
424
+ zero_idx = None
425
+ xa = api.maxvalues(
426
+ (pts[:, ok_idx] - basepoint[ok_idx]) / direction[ok_idx], axis=1, keepdims=True
427
+ )
428
+ if zero_idx is not None:
429
+ xb = api.where(pts[:, zero_idx] <= basepoint[zero_idx], -np.inf, np.inf)
430
+ xs = api.maxvalues(api.cat([xa, xb], axis=1), axis=1, keepdims=True)
431
+ else:
432
+ xs = xa
433
+ return xs.squeeze()
@@ -21,6 +21,8 @@
21
21
  #include "multiparameter_module_approximation/format_python-cpp.h"
22
22
 
23
23
  #include <iostream>
24
+ #include <ranges>
25
+ #include <stdexcept>
24
26
  #include <utility> // std::pair
25
27
  #include <vector>
26
28
  #include <limits> // has_quiet_NaN
@@ -165,7 +167,7 @@ class Simplex_tree_multi_interface
165
167
 
166
168
  void compute_extended_filtration() { throw std::logic_error("Incompatible with multipers"); }
167
169
 
168
- Simplex_tree_multi_interface *collapse_edges(int nb_collapse_iteration) {
170
+ Simplex_tree_multi_interface *collapse_edges([[maybe_unused]] int nb_collapse_iteration) {
169
171
  throw std::logic_error("Incompatible with multipers");
170
172
  }
171
173
 
@@ -292,6 +294,39 @@ class Simplex_tree_multi_interface
292
294
  }
293
295
  }
294
296
 
297
+ void unsqueeze_filtration(const intptr_t grid_st_ptr,
298
+ const std::vector<std::vector<double>> &grid) { // TODO : this is const but GUDHI
299
+ if constexpr (Filtration::is_multicritical())
300
+ throw std::invalid_argument("Multicritical not supported yet");
301
+ else {
302
+ constexpr const bool verbose = false;
303
+ using int_fil_type = decltype(std::declval<Filtration>().template as_type<std::int32_t>());
304
+ using st_coord_type = Simplex_tree_multi_interface<int_fil_type, int32_t>;
305
+ st_coord_type &grid_st = *(st_coord_type *)grid_st_ptr; // TODO : maybe fix this.
306
+ std::vector<int> simplex_vertex;
307
+ int num_parameters = grid_st.get_number_of_parameters();
308
+ for (auto &simplex_handle : grid_st.complex_simplex_range()) {
309
+ const auto &simplex_filtration = grid_st.filtration(simplex_handle);
310
+ if constexpr (verbose) std::cout << "Filtration " << simplex_filtration << "\n";
311
+ Filtration splx_filtration(simplex_filtration.size(), 1.);
312
+ if (simplex_filtration.is_finite()) {
313
+ for (auto i : std::views::iota(num_parameters)) splx_filtration[i] = grid[i][simplex_filtration[i]];
314
+ } else if (simplex_filtration.is_plus_inf()) {
315
+ splx_filtration = Filtration().inf();
316
+ } else if (simplex_filtration.is_minus_inf()) {
317
+ splx_filtration = Filtration().minus_inf();
318
+ } else if (simplex_filtration.is_nan()) {
319
+ splx_filtration = Filtration().nan();
320
+ }
321
+ if constexpr (verbose) std::cout << "Filtration " << splx_filtration << "\n";
322
+ for (const auto s : grid_st.simplex_vertex_range(simplex_handle)) simplex_vertex.push_back(s);
323
+ this->insert_simplex(simplex_vertex, splx_filtration);
324
+ if constexpr (verbose) std::cout << "Coords in st" << this->filtration(this->find(simplex_vertex)) << std::endl;
325
+ simplex_vertex.clear();
326
+ }
327
+ }
328
+ }
329
+
295
330
  void squeeze_filtration(const intptr_t outptr,
296
331
  const std::vector<std::vector<double>> &grid) { // TODO : this is const but GUDHI
297
332
  constexpr const bool verbose = false;
@@ -149,6 +149,9 @@ class Box {
149
149
  lowerCorner_ -= delta;
150
150
  upperCorner_ += delta;
151
151
  }
152
+ friend bool operator==(const Box& a, const Box&b){
153
+ return a.upperCorner_ == b.upperCorner_ && a.lowerCorner_ == b.lowerCorner_;
154
+ }
152
155
 
153
156
  /**
154
157
  * @brief Outstream operator.
@@ -30,7 +30,7 @@
30
30
  #include <gudhi/Debug_utils.h>
31
31
 
32
32
  namespace Gudhi {
33
- namespace multi_filtration {
33
+ namespace multi_filtration {
34
34
 
35
35
  /**
36
36
  * @class One_critical_filtration one_critical_filtration.h gudhi/one_critical_filtration.h
@@ -1034,13 +1034,21 @@ class One_critical_filtration : public std::vector<T> {
1034
1034
  "The grid should not be smaller than the number of parameters in the filtration value.");
1035
1035
  for (std::size_t parameter = 0u; parameter < Base::size(); ++parameter) {
1036
1036
  const auto &filtration = grid[parameter];
1037
- auto d =
1038
- std::distance(filtration.begin(),
1039
- std::lower_bound(filtration.begin(),
1040
- filtration.end(),
1041
- static_cast<typename oned_array::value_type>(Base::operator[](parameter))));
1042
- d = d == filtration.size() ? filtration.size() - 1 : d;
1043
- Base::operator[](parameter) = coordinate ? static_cast<T>(d) : static_cast<T>(filtration[d]);
1037
+ int num_filtration_values = filtration.size();
1038
+ auto this_at_param = static_cast<typename oned_array::value_type>(Base::operator[](parameter));
1039
+ auto it = std::lower_bound(filtration.begin(), filtration.end(), this_at_param);
1040
+ std::size_t idx;
1041
+ if (it == filtration.end()) {
1042
+ idx = num_filtration_values - 1;
1043
+ } else if (it == filtration.begin()) {
1044
+ idx = 0;
1045
+ } else {
1046
+ auto prev = it - 1;
1047
+ idx = (std::abs(*prev - this_at_param) <= std::abs(*it - this_at_param))
1048
+ ? std::distance(filtration.begin(), prev)
1049
+ : std::distance(filtration.begin(), it);
1050
+ }
1051
+ Base::operator[](parameter) = coordinate ? static_cast<T>(idx) : static_cast<T>(filtration[idx]);
1044
1052
  }
1045
1053
  }
1046
1054
 
@@ -1393,7 +1401,8 @@ class One_critical_filtration : public std::vector<T> {
1393
1401
  }
1394
1402
  };
1395
1403
 
1396
- }} // namespace Gudhi::multi_filtration
1404
+ } // namespace multi_filtration
1405
+ } // namespace Gudhi
1397
1406
 
1398
1407
  namespace std {
1399
1408
 
@@ -39,7 +39,7 @@ class Persistence_backend_h0 {
39
39
  using cycle_type = std::vector<unsigned int>;
40
40
  static const bool is_vine = true;
41
41
 
42
- std::vector<cycle_type> get_representative_cycles(bool update = true) const { throw "Unimplemented"; }
42
+ std::vector<cycle_type> get_representative_cycles([[maybe_unused]] bool update = true) const { throw "Unimplemented"; }
43
43
 
44
44
  class Barcode_iterator : public boost::iterator_facade<Barcode_iterator, const Bar &, boost::forward_traversal_tag> {
45
45
  public:
@@ -60,10 +60,19 @@ struct No_vine_multi_persistence_options : Gudhi::persistence_matrix::Default_op
60
60
  static const bool has_vine_update = false;
61
61
  };
62
62
 
63
+ template <Gudhi::persistence_matrix::Column_types column_type = Gudhi::persistence_matrix::Column_types::INTRUSIVE_SET, bool row_access = true>
64
+ struct fix_presentation_options : Gudhi::persistence_matrix::Default_options<column_type, true> {
65
+ using Index = std::uint32_t;
66
+ static const bool has_row_access = row_access;
67
+ static const bool has_map_column_container = false;
68
+ static const bool has_removable_columns = false; // WARN : idx will change if map is not true
69
+ };
70
+
63
71
  template <class Matrix_options, class Boundary_matrix_type>
64
72
  class Persistence_backend_matrix {
65
73
  public:
66
74
  using matrix_type = Gudhi::persistence_matrix::Matrix<Matrix_options>;
75
+ using options = Matrix_options;
67
76
  using cycle_type = typename matrix_type::Cycle;
68
77
  static const bool is_vine = Matrix_options::has_vine_update;
69
78
 
@@ -228,7 +237,7 @@ class Persistence_backend_matrix {
228
237
 
229
238
  inline Barcode get_barcode() { return Barcode(matrix_, permutation_); }
230
239
 
231
- inline std::size_t size() { return this->matrix_.get_number_of_columns(); }
240
+ inline std::size_t size() const { return this->matrix_.get_number_of_columns(); }
232
241
 
233
242
  inline friend std::ostream &operator<<(std::ostream &stream, Persistence_backend_matrix &structure) {
234
243
  stream << "[\n";
@@ -314,7 +314,7 @@ class Naive_merge_forest {
314
314
  struct Barcode {
315
315
  Barcode() {}
316
316
 
317
- Barcode(int numberOfSimplices, int numberOfVertices)
317
+ Barcode(int numberOfSimplices, [[maybe_unused]] int numberOfVertices)
318
318
  : barcode_(numberOfSimplices), positionToBar_(numberOfSimplices), nextBarIndex_(0) {}
319
319
 
320
320
  friend void swap(Barcode &mf1, Barcode &mf2) {