multipers 2.3.0__cp310-cp310-win_amd64.whl → 2.3.2b1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of multipers might be problematic. Click here for more details.

Files changed (54) hide show
  1. multipers/_signed_measure_meta.py +71 -65
  2. multipers/array_api/__init__.py +39 -0
  3. multipers/array_api/numpy.py +34 -0
  4. multipers/array_api/torch.py +35 -0
  5. multipers/distances.py +6 -2
  6. multipers/{ml/convolutions.py → filtrations/density.py} +67 -13
  7. multipers/filtrations/filtrations.py +76 -17
  8. multipers/function_rips.cp310-win_amd64.pyd +0 -0
  9. multipers/grids.cp310-win_amd64.pyd +0 -0
  10. multipers/grids.pyx +144 -61
  11. multipers/gudhi/Simplex_tree_multi_interface.h +36 -1
  12. multipers/gudhi/gudhi/Multi_persistence/Box.h +3 -0
  13. multipers/gudhi/gudhi/One_critical_filtration.h +18 -9
  14. multipers/gudhi/mma_interface_h0.h +1 -1
  15. multipers/gudhi/mma_interface_matrix.h +10 -1
  16. multipers/gudhi/naive_merge_tree.h +1 -1
  17. multipers/gudhi/truc.h +555 -42
  18. multipers/io.cp310-win_amd64.pyd +0 -0
  19. multipers/io.pyx +26 -93
  20. multipers/ml/mma.py +3 -3
  21. multipers/ml/point_clouds.py +2 -2
  22. multipers/ml/signed_measures.py +63 -65
  23. multipers/mma_structures.cp310-win_amd64.pyd +0 -0
  24. multipers/mma_structures.pxd +2 -1
  25. multipers/mma_structures.pyx +56 -16
  26. multipers/mma_structures.pyx.tp +14 -5
  27. multipers/multiparameter_module_approximation/approximation.h +48 -14
  28. multipers/multiparameter_module_approximation.cp310-win_amd64.pyd +0 -0
  29. multipers/multiparameter_module_approximation.pyx +25 -7
  30. multipers/plots.py +2 -1
  31. multipers/point_measure.cp310-win_amd64.pyd +0 -0
  32. multipers/point_measure.pyx +6 -2
  33. multipers/simplex_tree_multi.cp310-win_amd64.pyd +0 -0
  34. multipers/simplex_tree_multi.pxd +1 -0
  35. multipers/simplex_tree_multi.pyx +584 -142
  36. multipers/simplex_tree_multi.pyx.tp +80 -23
  37. multipers/slicer.cp310-win_amd64.pyd +0 -0
  38. multipers/slicer.pxd +799 -197
  39. multipers/slicer.pxd.tp +24 -5
  40. multipers/slicer.pyx +5653 -1426
  41. multipers/slicer.pyx.tp +208 -48
  42. multipers/tbb12.dll +0 -0
  43. multipers/tbbbind_2_5.dll +0 -0
  44. multipers/tbbmalloc.dll +0 -0
  45. multipers/tbbmalloc_proxy.dll +0 -0
  46. multipers/tensor/tensor.h +1 -1
  47. multipers/tests/__init__.py +9 -4
  48. multipers/torch/diff_grids.py +30 -7
  49. multipers/torch/rips_density.py +1 -1
  50. {multipers-2.3.0.dist-info → multipers-2.3.2b1.dist-info}/METADATA +4 -25
  51. {multipers-2.3.0.dist-info → multipers-2.3.2b1.dist-info}/RECORD +54 -51
  52. {multipers-2.3.0.dist-info → multipers-2.3.2b1.dist-info}/WHEEL +1 -1
  53. {multipers-2.3.0.dist-info → multipers-2.3.2b1.dist-info/licenses}/LICENSE +0 -0
  54. {multipers-2.3.0.dist-info → multipers-2.3.2b1.dist-info}/top_level.txt +0 -0
multipers/slicer.pyx.tp CHANGED
@@ -11,6 +11,7 @@ import pickle
11
11
  with open("build/tmp/_slicer_names.pkl", "rb") as f:
12
12
  slicers=pickle.load(f)
13
13
 
14
+ dtypes = set([(D['PY_VALUE_TYPE'], D['C_VALUE_TYPE'], D['SHORT_VALUE_TYPE']) for D in slicers])
14
15
 
15
16
  }}
16
17
 
@@ -20,13 +21,15 @@ from typing import Optional,Literal
20
21
  import threading
21
22
  import os
22
23
  from joblib import Parallel, delayed
24
+ from warnings import warn
23
25
 
24
26
  from multipers.slicer cimport *
25
27
  from multipers.filtrations cimport *
26
28
  from multipers.filtration_conversions cimport *
27
29
  ## TODO: these two are not needed, remove that by updating rank code.
28
30
  from multipers.point_measure import sparsify, rank_decomposition_by_rectangles
29
- from multipers.grids import compute_grid
31
+ from multipers.grids import compute_grid, sanitize_grid, evaluate_in_grid, _push_pts_to_line
32
+ from multipers.array_api import api_from_tensor
30
33
 
31
34
  import numpy as np
32
35
  cimport cython
@@ -104,7 +107,7 @@ _valid_pers_backend = Literal[
104
107
  #------------------------------------------------------------------------------
105
108
  cdef class {{D['PYTHON_TYPE']}}:
106
109
  cdef {{D['C_TEMPLATE_TYPE']}} truc
107
- cdef public vector[vector[double]] filtration_grid
110
+ cdef public object filtration_grid
108
111
  cdef public int minpres_degree ## TODO : maybe change directly the degree in the minpres ?
109
112
 
110
113
  def __repr__(self):
@@ -112,7 +115,7 @@ cdef class {{D['PYTHON_TYPE']}}:
112
115
 
113
116
  @property
114
117
  def is_squeezed(self)->bool:
115
- return self.filtration_grid.size() > 0 and self.filtration_grid[0].size() > 0
118
+ return self.filtration_grid is not None and len(self.filtration_grid) > 0 and len(self.filtration_grid[0]) > 0
116
119
  @property
117
120
  def is_minpres(self)->bool:
118
121
  return self.minpres_degree>=0
@@ -215,6 +218,25 @@ cdef class {{D['PYTHON_TYPE']}}:
215
218
  self.minpres_degree = -1
216
219
  {{endif}}
217
220
 
221
+ def to_colexical(self, bool return_permutation = False)->{{D['PYTHON_TYPE']}}|tuple[{{D['PYTHON_TYPE']}},np.ndarray]:
222
+ # assert not self.is_squeezed, "Unsqueeze first, this is not implented yet for squeezed slicers"
223
+ new_slicer = {{D['PYTHON_TYPE']}}()
224
+ cdef pair[{{D['C_TEMPLATE_TYPE']}}, vector[unsigned int]] stuff = self.truc.colexical_rearange()
225
+
226
+ new_slicer.truc = stuff.first
227
+ new_slicer.minpres_degree = self.minpres_degree
228
+ new_slicer.filtration_grid = self.filtration_grid
229
+
230
+ if return_permutation:
231
+ return new_slicer, np.array(stuff.second, dtype=np.int32)
232
+ return new_slicer
233
+ def permute_generators(self, permutation)->{{D['PYTHON_TYPE']}}:
234
+ cdef vector[unsigned int] c_perm = permutation
235
+ new_slicer = {{D['PYTHON_TYPE']}}()
236
+ new_slicer.truc = self.truc.permute(c_perm)
237
+ new_slicer.minpres_degree = self.minpres_degree
238
+ return new_slicer
239
+
218
240
  def copy(self)->{{D['PYTHON_TYPE']}}:
219
241
  """
220
242
  Returns a copy of the slicer.
@@ -224,6 +246,21 @@ cdef class {{D['PYTHON_TYPE']}}:
224
246
  copy_.minpres_degree = self.minpres_degree
225
247
  copy_.filtration_grid = self.filtration_grid
226
248
  return copy_
249
+ def compute_kernel_projective_cover(self, dim:Optional[int]=None)->{{D['PYTHON_TYPE']}}:
250
+ if len(self) == 0:
251
+ return {{D['PYTHON_TYPE']}}()
252
+ if dim is None:
253
+ dim = self.truc.get_dimension(0)
254
+ out = {{D['PYTHON_TYPE']}}()
255
+ out.truc = self.truc.projective_cover_kernel(dim)
256
+ out.filtration_grid = self.filtration_grid
257
+ return out
258
+
259
+ def get_barcode_idx(self, bool keep_inf = False):
260
+ """
261
+ Returns the current barcode.
262
+ """
263
+ return tuple(np.asarray(x) if len(x) else np.empty((0,2), dtype=int)for x in self.truc.get_barcode_idx())
227
264
  def get_barcode(self, bool keep_inf = False):
228
265
  """
229
266
  Returns the current barcode.
@@ -232,12 +269,13 @@ cdef class {{D['PYTHON_TYPE']}}:
232
269
  bcs = tuple(np.asarray(stuff, dtype = {{D['PY_VALUE_TYPE']}}) for stuff in self.truc.get_barcode())
233
270
  else:
234
271
  bcs = {{D['PYTHON_TYPE']}}._threshold_bcs(self.truc.get_barcode())
235
- return self.truc.get_barcode()
272
+ return bcs
236
273
  def push_to_line(self, basepoint, direction=None)->{{D['PYTHON_TYPE']}}:
237
274
  """
238
275
  Pushes the current slicer to the line defined by a basepoint and an optional direction.
239
276
  If the direction is not provided, it is assumed to be diagonal.
240
277
  """
278
+ {{if D['IS_FLOAT']}}
241
279
  basepoint = np.asarray(basepoint, dtype = {{D['PY_VALUE_TYPE']}})
242
280
  cdef Line[{{D['C_VALUE_TYPE']}}] line
243
281
  if direction is None:
@@ -247,6 +285,9 @@ cdef class {{D['PYTHON_TYPE']}}:
247
285
  line = Line[{{D['C_VALUE_TYPE']}}](_py21c_{{D['SHORT_VALUE_TYPE']}}(basepoint),_py21c_{{D['SHORT_VALUE_TYPE']}}(direction))
248
286
  self.truc.push_to(line)
249
287
  return self
288
+ {{else}}
289
+ raise NotImplementedError("There is no `int` slicing.")
290
+ {{endif}}
250
291
 
251
292
  @staticmethod
252
293
  cdef _threshold_bcs(vector[vector[pair[{{D['C_VALUE_TYPE']}}, {{D['C_VALUE_TYPE']}}]]] bcs):
@@ -254,11 +295,11 @@ cdef class {{D['PYTHON_TYPE']}}:
254
295
  @staticmethod
255
296
  def _bc_to_full(bcs, basepoint, direction=None):
256
297
  # i, (b sv d), coords
257
- basepoint = np.asarray(basepoint)[None,None,:]
258
- direction = 1 if direction is None else np.asarray(direction)[None,None,:]
298
+ basepoint = basepoint[None,None,:]
299
+ direction = 1 if direction is None else direction[None,None,:]
259
300
  return tuple(bc[:,:,None]*direction + basepoint for bc in bcs)
260
301
 
261
- def persistence_on_line(self,basepoint,direction=None, bool keep_inf=True, bool full=False):
302
+ def persistence_on_line(self,basepoint,direction=None, bool keep_inf=True, bool full=False, bool ignore_infinite_filtration_values = True):
262
303
  """
263
304
  Computes the persistence on a line L defined by
264
305
  - a basepoint (num_parameters,) array
@@ -266,8 +307,9 @@ cdef class {{D['PYTHON_TYPE']}}:
266
307
 
267
308
  Warning: This is not parallelizable. Use `persitence_on_lines`.
268
309
  """
310
+ {{if D['IS_FLOAT']}}
269
311
  self.push_to_line(basepoint,direction)
270
- self.truc.compute_persistence()
312
+ self.truc.compute_persistence(ignore_infinite_filtration_values)
271
313
  if keep_inf:
272
314
  bcs = tuple(np.asarray(stuff, dtype = {{D['PY_VALUE_TYPE']}}) for stuff in self.truc.get_barcode())
273
315
  else:
@@ -276,8 +318,36 @@ cdef class {{D['PYTHON_TYPE']}}:
276
318
  if full:
277
319
  bcs = {{D['PYTHON_TYPE']}}._bc_to_full(bcs, basepoint, direction)
278
320
  return bcs
321
+ {{else}}
322
+ if not self.is_squeezed:
323
+ raise ValueError("Unsqueeze tensor, or provide a filtration grid. Cannot slice lines with integers...")
324
+ api = api_from_tensor(self.filtration_grid[0])
325
+ s = self.unsqueeze()
326
+ fil = evaluate_in_grid(np.asarray(self.get_filtrations()), self.filtration_grid)
327
+ projected_fil =_push_pts_to_line(fil, basepoint, direction)
328
+ s.compute_persistence(projected_fil)
329
+ bcs_idx = s.get_barcode_idx()
330
+ bcs = tuple(
331
+ api.stack([projected_fil[bc[:,0]], projected_fil[bc[:,1]]], axis=1)
332
+ if bc.size>0
333
+ else api.empty((0,2), dtype = self.filtration_grid[0].dtype)
334
+ for bc in bcs_idx
335
+ )
336
+ if full:
337
+ bcs = self._bc_to_full(bcs, basepoint, direction)
338
+ return bcs
339
+ {{endif}}
279
340
 
280
- def persistence_on_lines(self, basepoints=None, directions=None, bool keep_inf=True, bool full=False):
341
+ def _custom_persistences_idx(self, filtration_array, bool ignore_inf=True):
342
+ filtration_array = np.asarray(filtration_array, dtype= {{D['PY_VALUE_TYPE']}})
343
+ cdef {{D['C_VALUE_TYPE']}}[:,:] arr_view = filtration_array
344
+ cdef int size = arr_view.shape[0]
345
+ if arr_view.shape[1] != self.truc.num_generators():
346
+ raise ValueError(f"Got filtration array of shape {filtration_array.shape=} / {arr_view.shape=}. Was expecting (-1, {len(self)=})")
347
+
348
+ return tuple(tuple(np.array(bc_idx_degree, dtype=int) for bc_idx_degree in bc_idx) for bc_idx in self.truc.custom_persistences(&arr_view[0,0], size, ignore_inf))
349
+
350
+ def persistence_on_lines(self, basepoints=None, directions=None, bool keep_inf=True, bool full=False, bool ignore_infinite_filtration_values = True):
281
351
  """
282
352
  Same as `persistence_on_line`, but with vineyards operation between
283
353
  lines if `self.is_vine`, and in parallel otherwise.
@@ -288,15 +358,17 @@ cdef class {{D['PYTHON_TYPE']}}:
288
358
  if directions is None:
289
359
  c_basepoints = basepoints
290
360
  with nogil:
291
- c_out = self.truc.persistence_on_lines(c_basepoints)
361
+ c_out = self.truc.persistence_on_lines(c_basepoints, ignore_infinite_filtration_values)
292
362
  else:
293
363
  c_truc = zip(basepoints,directions)
294
364
  with nogil:
295
- c_out = self.truc.persistence_on_lines(c_truc)
365
+ c_out = self.truc.persistence_on_lines(c_truc, ignore_infinite_filtration_values)
296
366
  cdef int num_bc = c_basepoints.size()
297
367
 
298
368
  if keep_inf:
299
- out = tuple(tuple(np.asarray(y, dtype = {{D['PY_VALUE_TYPE']}}) for y in x) for x in c_out)
369
+ out = tuple(tuple(
370
+ np.asarray(y, dtype = {{D['PY_VALUE_TYPE']}}) if len(y)>0 else np.empty((0,2), dtype = {{D['PY_VALUE_TYPE']}})
371
+ for y in x) for x in c_out)
300
372
  else:
301
373
  out = tuple({{D['PYTHON_TYPE']}}._threshold_bcs(x) for x in c_out)
302
374
 
@@ -311,7 +383,7 @@ cdef class {{D['PYTHON_TYPE']}}:
311
383
  self.get_boundaries(),
312
384
  self.get_dimensions(),
313
385
  self.get_filtrations(),
314
- tuple(np.array(f) for f in self.filtration_grid),
386
+ self.filtration_grid,
315
387
  self.minpres_degree,
316
388
  )
317
389
  def __setstate__(self, tuple dump):
@@ -340,26 +412,50 @@ cdef class {{D['PYTHON_TYPE']}}:
340
412
 
341
413
 
342
414
 
343
- def compute_persistence(self,one_filtration=None, bool ignore_infinite_filtration_values = True)->{{D['PYTHON_TYPE']}}:
415
+ def compute_persistence(self,one_filtration=None, bool ignore_infinite_filtration_values = True)->tuple:
344
416
  """
345
417
  Computes the current persistence, or the persistence
346
418
  given by the filtration one_filtration (num_generators,).
347
419
  """
348
420
  if one_filtration is not None:
349
- self.truc.set_one_filtration(one_filtration)
421
+ api = api_from_tensor(one_filtration)
422
+ one_filtration=api.astensor(one_filtration)
423
+ # self.truc.set_one_filtration(one_filtration)
424
+ # s = self.unsqueeze()
425
+ # fil = evaluate_in_grid(np.asarray(self.get_filtrations()), self.filtration_grid)
426
+ # projected_fil =_push_pts_to_line(fil, basepoint, direction)
427
+ if one_filtration.ndim > 2 or one_filtration.ndim == 0:
428
+ raise ValueError(f"Expected a filtration shape of the form ((num_1_param), num_generators). Got {one_filtration.shape=}")
429
+ squeeze = False
430
+ if one_filtration.ndim == 1:
431
+ one_filtration = one_filtration[None]
432
+ squeeze = True
433
+
434
+ bcs = self._custom_persistences_idx(api.asnumpy(one_filtration),ignore_infinite_filtration_values)
435
+
436
+ bcs = tuple(tuple(
437
+ api.stack([F[bc[:,0]], F[bc[:,1]]], axis=1)
438
+ if bc.size>0
439
+ else api.empty((0,2), dtype = F.dtype)
440
+ for bc in bcs_idx
441
+ )
442
+ for bcs_idx,F in zip(bcs,one_filtration)
443
+ )
444
+ return bcs[0] if squeeze else bcs
445
+
350
446
  # TODO: Later
351
447
  # if len(degrees)>0:
352
448
  # self.truc.compute_persistence(degrees)
353
449
  # else:
354
450
  # self.truc.compute_persistence()
355
451
  self.truc.compute_persistence(ignore_infinite_filtration_values)
356
- return self
357
- # return self.truc.get_barcode()
452
+ # return self
453
+ return self.get_barcode()
358
454
  def get_barcode(self):
359
455
  """
360
456
  Returns the barcode of the current 1d-persistence.
361
457
  """
362
- return self.truc.get_barcode()
458
+ return tuple(np.asarray(bc) for bc in self.truc.get_barcode())
363
459
  def sliced_filtration(self,basepoint, direction=None):
364
460
  """
365
461
  Computes the filtration on a line L defined by
@@ -432,7 +528,15 @@ cdef class {{D['PYTHON_TYPE']}}:
432
528
  Returns the boundaries of the generators.
433
529
  """
434
530
  return tuple(tuple(b) for b in self.truc.get_boundaries())
435
- def grid_squeeze(self, filtration_grid=None, grid_strategy="exact", resolution:Optional[int]=None, bool coordinates=True, bool inplace = False, bool force=False)->{{D['PYTHON_TYPE'][:-3]+"i32"}}|{{D['PYTHON_TYPE']}}:
531
+ def grid_squeeze(
532
+ self,
533
+ filtration_grid=None,
534
+ strategy="exact",
535
+ resolution:Optional[int]=None,
536
+ bool coordinates=True,
537
+ bool inplace = False,
538
+ grid_strategy=None
539
+ )->{{D['PYTHON_TYPE'][:-3]+"i32"}}|{{D['PYTHON_TYPE']}}:
436
540
  """
437
541
  Coarsen the filtration values on a grid. This is necessary to compute some invariants.
438
542
 
@@ -443,28 +547,53 @@ cdef class {{D['PYTHON_TYPE']}}:
443
547
 
444
548
  - inplace:bool if true, does the operation inplace, i.e., doesn't return a copy.
445
549
  """
446
- if not force and self.is_squeezed:
447
- raise ValueError("The slicer seems to be already squeezed. Use force=True to resqueeze.")
550
+ if grid_strategy is not None:
551
+ warn("`grid_strategy` is deprecated, use `strategy` instead.",DeprecationWarning)
552
+ strategy=grid_strategy
553
+
554
+ if self.is_squeezed:
555
+ warn("(copy warning) Squeezing an already squeezed slicer.")
556
+ temp = self.unsqueeze()
557
+ subgrid = compute_grid(self.filtration_grid, strategy=strategy, resolution=resolution)
558
+ return temp.grid_squeeze(subgrid, coordinates=coordinates, inplace=inplace)
559
+
448
560
  if filtration_grid is None:
449
561
  filtration_grid = compute_grid(
450
562
  self.get_filtrations_values().T,
451
- strategy=grid_strategy,
563
+ strategy=strategy,
452
564
  resolution=resolution)
453
565
  cdef vector[vector[{{D['C_VALUE_TYPE']}}]] grid = filtration_grid
454
566
  if inplace or not coordinates:
455
567
  self.truc.coarsen_on_grid_inplace(grid, coordinates)
456
- self.filtration_grid = filtration_grid
568
+ if coordinates:
569
+ self.filtration_grid = filtration_grid
457
570
  else:
458
571
  {{if D['COLUMN_TYPE'] is None}}
459
572
  raise ValueError("WIP")
460
573
  {{else}}
461
574
  out = {{D['PYTHON_TYPE'][:-3]+"i32"}}()
462
575
  out.truc = self.truc.coarsen_on_grid(grid)
463
- out.filtration_grid = filtration_grid
576
+ if coordinates:
577
+ out.filtration_grid = sanitize_grid(filtration_grid)
464
578
  out.minpres_degree = self.minpres_degree
465
579
  return out
466
580
  {{endif}}
467
581
  return self
582
+ def _clean_filtration_grid(self):
583
+ """
584
+ Removes the values in filtration_grid that are not linked to any splx.
585
+ """
586
+ if not self.is_squeezed:
587
+ raise ValueError("No grid to clean.")
588
+ F = self.filtration_grid
589
+ self.filtration_grid=None
590
+ cleaned_coordinates = compute_grid(self)
591
+ new_slicer = self.grid_squeeze(cleaned_coordinates)
592
+
593
+ self._from_ptr(new_slicer.get_ptr())
594
+ self.filtration_grid = tuple(f[g] for f,g in zip(F,cleaned_coordinates))
595
+ return self
596
+
468
597
  def minpres(self,
469
598
  int degree=-1,
470
599
  list[int] degrees=[],
@@ -479,7 +608,7 @@ cdef class {{D['PYTHON_TYPE']}}:
479
608
  Computes the minimal presentation of the slicer, and returns it as a new slicer.
480
609
  See :func:`multipers.slicer.minimal_presentation`.
481
610
  """
482
- new_slicer = minimal_presentation(self, degree=degree, degrees=degrees, backend=backend, slicer_backend=slicer_backend, vineyard=vineyard, id=id, **minpres_kwargs)
611
+ new_slicer = minimal_presentation(self, degree=degree, degrees=degrees, backend=backend, slicer_backend=slicer_backend, dtype=dtype, vineyard=vineyard, id=id, **minpres_kwargs)
483
612
  return new_slicer
484
613
 
485
614
  @property
@@ -556,10 +685,11 @@ cdef class {{D['PYTHON_TYPE']}}:
556
685
  self.truc.build_from_scc_file(c_path, rivet_compatible, reverse, shift_dimension)
557
686
  return self
558
687
 
559
- def unsqueeze(self, grid=None, dtype = np.float64):
560
- from multipers.grids import evaluate_in_grid
688
+ def unsqueeze(self, grid=None)->{{D['PYTHON_TYPE'][:-3]+"f64"}}:
689
+ from multipers.grids import evaluate_in_grid, sanitize_grid
561
690
  from multipers import Slicer
562
- grid = tuple(np.asarray(f) for f in self.filtration_grid) if grid is None else grid
691
+ grid = self.filtration_grid if grid is None else grid
692
+ grid = sanitize_grid(grid, numpyfy=True)
563
693
  new_filtrations = evaluate_in_grid(np.asarray(self.get_filtrations(), dtype=np.int32), grid)
564
694
  new_slicer = {{D['PYTHON_TYPE'][:-3]+"f64"}}(
565
695
  self.get_boundaries(),
@@ -580,10 +710,13 @@ cdef extern from "gudhi/cubical_to_boundary.h" namespace "":
580
710
  void get_vertices(unsigned int, cset[unsigned int]&, const vector[vector[unsigned int]]&) nogil
581
711
 
582
712
 
583
- def from_bitmap(image, **slicer_kwargs):
713
+ {{for pytype,ctype,fshort in dtypes}}
714
+ def _from_bitmap{{fshort}}(image, **slicer_kwargs):
584
715
  from multipers import Slicer
585
- image = np.asarray(image)
586
- slicer_kwargs["dtype"] = slicer_kwargs.get("dtype", image.dtype)
716
+ dtype = slicer_kwargs.get("dtype", image.dtype)
717
+ slicer_kwargs["dtype"] = dtype
718
+ if image.dtype != dtype:
719
+ raise ValueError(f"Invalid type matching. Got {dtype=} and {image.dtype=}")
587
720
  _Slicer = Slicer(return_type_only=True, **slicer_kwargs)
588
721
  cdef vector[unsigned int] img_shape = image.shape[:-1]
589
722
  cdef unsigned int num_parameters = image.shape[-1]
@@ -593,11 +726,12 @@ def from_bitmap(image, **slicer_kwargs):
593
726
  with nogil:
594
727
  _to_boundary(img_shape,gen_maps, gen_dims)
595
728
 
596
- cdef unsigned int num_gens = gen_dims.size()
597
- filtration_values = np.zeros(shape=(num_gens, num_parameters), dtype = np.double) - np.inf
598
- cdef double[:,:] F = filtration_values
599
- cdef double[:,:] c_img = image.reshape(-1,num_parameters)
600
729
  cdef cset[unsigned int] vertices
730
+
731
+ cdef unsigned int num_gens = gen_dims.size()
732
+ filtration_values = np.zeros(shape=(num_gens, num_parameters), dtype = {{pytype}}) - _Slicer._inf_value()
733
+ cdef {{ctype}}[:,:] F = filtration_values
734
+ cdef {{ctype}}[:,:] c_img = image.reshape(-1,num_parameters)
601
735
  with nogil:
602
736
  for i in range(num_gens):
603
737
  # with gil:
@@ -615,13 +749,22 @@ def from_bitmap(image, **slicer_kwargs):
615
749
  # print(f"F = {np.asarray(F[i])}")
616
750
  slicer = _Slicer(gen_maps, gen_dims, filtration_values)
617
751
  return slicer
752
+ {{endfor}}
753
+
754
+ def from_bitmap(img, **kwargs):
755
+ img = np.asarray(img)
756
+ {{for pytype,ctype,stype in dtypes}}
757
+ if img.dtype == {{pytype}}:
758
+ return _from_bitmap{{stype}}(img, **kwargs)
759
+ {{endfor}}
760
+ raise ValueError(f"Invalid dtype. Got {img.dtype=}, was expecting {available_dtype=}.")
618
761
 
619
762
  def from_function_delaunay(
620
763
  points,
621
764
  grades,
622
765
  int degree=-1,
623
- backend: Optional[_valid_pers_backend]=None,
624
- vineyard:Optional[bool]=None,
766
+ backend:Optional[_valid_pers_backend]=None,
767
+ vineyard=None, # TODO : Optional[bool] when cython fixes it
625
768
  dtype=np.float64,
626
769
  bool verbose = False,
627
770
  bool clear = True,
@@ -636,9 +779,9 @@ def from_function_delaunay(
636
779
  backend : slicer backend, e.g. "matrix", "clement"
637
780
  vineyard : bool, use a vineyard-compatible backend
638
781
  """
639
- from multipers.io import _init_external_softwares, function_delaunay_presentation_to_slicer
782
+ from multipers.io import _check_available, function_delaunay_presentation_to_slicer
640
783
  s = multipers.Slicer(None, backend=backend, vineyard=vineyard, dtype=dtype)
641
- _init_external_softwares(requires=["function_delaunay"])
784
+ assert _check_available("function_delaunay"), f"Could not find function_delaunay"
642
785
  function_delaunay_presentation_to_slicer(s, points, grades, degree=degree, verbose=verbose,clear=clear)
643
786
  if degree >= 0:
644
787
  s.minpres_degree = degree
@@ -685,7 +828,7 @@ def minimal_presentation(
685
828
  from multipers.io import _init_external_softwares, input_path, scc_reduce_from_str_to_slicer
686
829
  if is_slicer(slicer) and slicer.is_minpres and not force:
687
830
  from warnings import warn
688
- warn(f"The slicer seems to be already reduced, from homology of degree {slicer.minpres_degree}.")
831
+ warn(f"(unnecessary computation) The slicer seems to be already reduced, from homology of degree {slicer.minpres_degree}.")
689
832
  return slicer
690
833
  _init_external_softwares(requires=[backend])
691
834
  if len(degrees)>0:
@@ -696,22 +839,23 @@ def minimal_presentation(
696
839
  )
697
840
  # return tuple(minimal_presentation(slicer, degree=d, backend=backend, slicer_backend=slicer_backend, vineyard=vineyard, id=id, **minpres_kwargs) for d in degrees)
698
841
  assert degree>=0, f"Degree not provided."
699
- filtration_grid = slicer.filtration_grid if slicer.is_squeezed else None
842
+ if not np.any(slicer.get_dimensions() == degree):
843
+ return type(slicer)()
700
844
  if id is None:
701
845
  id = str(threading.get_native_id())
702
846
  if dtype is None:
703
847
  dtype = slicer.dtype
704
848
  dimension = slicer.dimension - degree # latest = L-1, which is empty, -1 for degree 0, -2 for degree 1 etc.
705
- slicer.to_scc(path=input_path+id, strip_comments=True, degree=degree-1)
849
+ slicer.to_scc(path=input_path+id, strip_comments=True, degree=degree-1, unsqueeze = False)
706
850
  new_slicer = multipers.Slicer(None,backend=slicer_backend, vineyard=vineyard, dtype=dtype)
707
851
  if backend=="mpfree":
708
852
  shift_dimension=degree-1
709
853
  else:
710
854
  shift_dimension=degree
711
855
  scc_reduce_from_str_to_slicer(path=input_path+id, slicer=new_slicer, dimension=dimension, backend=backend, shift_dimension=shift_dimension, **minpres_kwargs)
856
+
712
857
  new_slicer.minpres_degree = degree
713
- if filtration_grid is not None:
714
- new_slicer.filtration_grid = filtration_grid
858
+ new_slicer.filtration_grid = slicer.filtration_grid if slicer.is_squeezed else None
715
859
  return new_slicer
716
860
 
717
861
 
@@ -890,24 +1034,38 @@ def _rank_from_slicer(
890
1034
 
891
1035
  cdef int num_parameters = len(grid_shape)
892
1036
 
893
- if zero_pad:
894
- for i, _ in enumerate(grid_shape):
895
- grid_shape[i] += 1 # adds a 0
1037
+ # if zero_pad:
1038
+ # grid_shape += 1
1039
+ # for i, _ in enumerate(grid_shape):
1040
+ # grid_shape[i] += 1 # adds a 0
896
1041
  # for i,f in enumerate(grid_conversion):
897
1042
  # grid_conversion[i] = np.concatenate([f, [mass_default[i]]])
898
1043
 
1044
+
899
1045
  grid_shape_with_degree = np.asarray(np.concatenate([[len(degrees)], grid_shape, grid_shape]), dtype=python_indices_type)
1046
+ if verbose:
1047
+ print("Container shape: ", grid_shape_with_degree)
900
1048
  container_array = np.ascontiguousarray(np.zeros(grid_shape_with_degree, dtype=python_tensor_dtype).ravel())
901
1049
  assert len(container_array) < np.iinfo(python_indices_type).max, "Too large container. Raise an issue on github if you encounter this issue. (Due to tensor's operator[])"
1050
+ # if zero_pad:
1051
+ # grid_shape_with_degree[1:] -= 1
902
1052
  cdef vector[indices_type] c_grid_shape = grid_shape_with_degree
903
1053
  cdef tensor_dtype[::1] container = container_array
904
1054
  cdef tensor_dtype* container_ptr = &container[0]
905
1055
 
906
1056
  ## SLICERS
1057
+ if verbose:
1058
+ print("Computing rank invariant...", end="")
907
1059
  _compute_rank_invariant(slicer, container_ptr, c_grid_shape, degrees, n_jobs, ignore_inf)
1060
+ if verbose:
1061
+ print("Done.")
908
1062
 
1063
+ if verbose:
1064
+ print("Computing Möbius inversion...", end="")
1065
+ # if zero_pad:
1066
+ # grid_shape_with_degree[1:] += 1
909
1067
  rank = container_array.reshape(grid_shape_with_degree)
910
- rank = tuple(rank_decomposition_by_rectangles(rank_of_degree, threshold = zero_pad) for rank_of_degree in rank)
1068
+ rank = tuple(rank_decomposition_by_rectangles(rank_of_degree, threshold=zero_pad) for rank_of_degree in rank)
911
1069
  if return_raw:
912
1070
  return rank
913
1071
  out = []
@@ -921,4 +1079,6 @@ def _rank_from_slicer(
921
1079
  return coords, weights
922
1080
 
923
1081
  out = tuple(clean_rank(rank_decomposition) for rank_decomposition in rank)
1082
+ if verbose:
1083
+ print("Done.")
924
1084
  return out
multipers/tbb12.dll CHANGED
Binary file
multipers/tbbbind_2_5.dll CHANGED
Binary file
multipers/tbbmalloc.dll CHANGED
Binary file
Binary file
multipers/tensor/tensor.h CHANGED
@@ -583,7 +583,7 @@ class static_tensor_view_view
583
583
  // }
584
584
  // }
585
585
  template <typename T = std::initializer_list<indices_type>>
586
- inline dtype &operator[](T coords) {
586
+ inline dtype &operator[]([[maybe_unused]] T coords) {
587
587
  throw std::logic_error("Not yet implemented");
588
588
  // this->compute_cum_res();
589
589
  // assert(this->cum_resolution_view.size() == coords.size());
@@ -21,11 +21,15 @@ def sort_sm(sms):
21
21
  return tuple((sm[0][idx], sm[1][idx]) for sm in sms)
22
22
 
23
23
 
24
- def assert_sm_pair(sm1, sm2, exact=True, max_error=1e-3, reg=0.1):
24
+ def assert_sm_pair(sm1, sm2, exact=True, max_error=1e-3, reg=0.1, threshold=None):
25
25
  if not exact:
26
26
  from multipers.distances import sm_distance
27
+ if threshold is not None:
28
+ _inf_value_fix = threshold
29
+ sm1[0][sm1[0] >threshold] = _inf_value_fix
30
+ sm2[0][sm2[0] >threshold] = _inf_value_fix
27
31
 
28
- d = sm_distance(sm1, sm2, reg=0.1)
32
+ d = sm_distance(sm1, sm2, reg=reg)
29
33
  assert d < max_error, f"Failed comparison:\n{sm1}\n{sm2},\n with distance {d}."
30
34
  return
31
35
  assert np.all(
@@ -37,10 +41,11 @@ def assert_sm_pair(sm1, sm2, exact=True, max_error=1e-3, reg=0.1):
37
41
  ), f"Failed comparison:\n-----------------\n{sm1}\n-----------------\n{sm2}"
38
42
 
39
43
 
40
- def assert_sm(*args, exact=True, max_error=1e-5, reg=0.1):
44
+ def assert_sm(*args, exact=True, max_error=1e-5, reg=0.1, threshold=None):
41
45
  sms = tuple(args)
42
46
  for i in range(len(sms) - 1):
43
- assert_sm_pair(sms[i], sms[i + 1], exact=exact, max_error=max_error, reg=reg)
47
+ print(i)
48
+ assert_sm_pair(sms[i], sms[i + 1], exact=exact, max_error=max_error, reg=reg, threshold=threshold)
44
49
 
45
50
 
46
51
  def random_st(npts=100, num_parameters=2, max_dim=2):
@@ -15,6 +15,8 @@ def get_grid(strategy: Literal["exact", "regular_closest", "regular_left", "quan
15
15
  match strategy:
16
16
  case "exact":
17
17
  return _exact_grid
18
+ case "regular":
19
+ return _regular_grid
18
20
  case "regular_closest":
19
21
  return _regular_closest_grid
20
22
  case "regular_left":
@@ -35,27 +37,44 @@ def todense(grid: list[torch.Tensor]):
35
37
 
36
38
 
37
39
  def _exact_grid(filtration_values, r=None):
40
+ assert r is None
38
41
  grid = tuple(_unique_any(f) for f in filtration_values)
39
42
  return grid
40
43
 
41
44
 
42
- def _regular_closest_grid(filtration_values, r: int):
43
- grid = tuple(_regular_closest(f, r) for f in filtration_values)
45
+ def _regular_closest_grid(filtration_values, res):
46
+ grid = tuple(_regular_closest(f, r) for f,r in zip(filtration_values, res))
44
47
  return grid
45
48
 
49
+ def _regular_grid(filtration_values, res):
50
+ grid = tuple(_regular(g,r) for g,r in zip(filtration_values, res))
51
+ return grid
52
+
53
+ def _regular(x, r:int):
54
+ if x.ndim != 1:
55
+ raise ValueError(f"Got ndim!=1. {x=}")
56
+ return torch.linspace(start=torch.min(x), end=torch.max(x), steps=r, dtype=x.dtype)
46
57
 
47
- def _regular_left_grid(filtration_values, r: int):
48
- grid = tuple(_regular_left(f, r) for f in filtration_values)
58
+ def _regular_left_grid(filtration_values, res):
59
+ grid = tuple(_regular_left(f, r) for f,r in zip(filtration_values,res))
49
60
  return grid
50
61
 
51
62
 
52
- def _quantile_grid(filtration_values, r: int):
53
- qs = torch.linspace(0, 1, r)
54
- grid = tuple(_unique_any(torch.quantile(f, q=qs)) for f in filtration_values)
63
+ def _quantile_grid(filtration_values, res):
64
+ grid = tuple(_quantile(f, r) for f,r in zip(filtration_values,res))
55
65
  return grid
66
+ def _quantile(x, r):
67
+ if x.ndim != 1:
68
+ raise ValueError(f"Got ndim!=1. {x=}")
69
+ qs = torch.linspace(0, 1, r, dtype=x.dtype)
70
+ return _unique_any(torch.quantile(x, q=qs))
71
+
72
+
56
73
 
57
74
 
58
75
  def _unique_any(x, assume_sorted=False, remove_inf: bool = True):
76
+ if x.ndim != 1:
77
+ raise ValueError(f"Got ndim!=1. {x=}")
59
78
  if not assume_sorted:
60
79
  x, _ = x.sort()
61
80
  if remove_inf and x[-1] == torch.inf:
@@ -68,6 +87,8 @@ def _unique_any(x, assume_sorted=False, remove_inf: bool = True):
68
87
 
69
88
 
70
89
  def _regular_left(f, r: int, unique: bool = True):
90
+ if f.ndim != 1:
91
+ raise ValueError(f"Got ndim!=1. {f=}")
71
92
  f = _unique_any(f)
72
93
  with torch.no_grad():
73
94
  f_regular = torch.linspace(f[0].item(), f[-1].item(), r, device=f.device)
@@ -79,6 +100,8 @@ def _regular_left(f, r: int, unique: bool = True):
79
100
 
80
101
 
81
102
  def _regular_closest(f, r: int, unique: bool = True):
103
+ if f.ndim != 1:
104
+ raise ValueError(f"Got ndim!=1. {f=}")
82
105
  f = _unique_any(f)
83
106
  with torch.no_grad():
84
107
  f_reg = torch.linspace(
@@ -5,7 +5,7 @@ import torch
5
5
  import gudhi as gd
6
6
 
7
7
  import multipers as mp
8
- from multipers.ml.convolutions import DTM, KDE
8
+ from multipers.filtrations.density import DTM, KDE
9
9
  from multipers.simplex_tree_multi import _available_strategies
10
10
  from multipers.torch.diff_grids import get_grid
11
11