multipers 1.1.3__cp310-cp310-macosx_11_0_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of multipers might be problematic. Click here for more details.

Files changed (63) hide show
  1. multipers/.dylibs/libtbb.12.12.dylib +0 -0
  2. multipers/.dylibs/libtbbmalloc.2.12.dylib +0 -0
  3. multipers/__init__.py +5 -0
  4. multipers/_old_rank_invariant.pyx +328 -0
  5. multipers/_signed_measure_meta.py +193 -0
  6. multipers/data/MOL2.py +350 -0
  7. multipers/data/UCR.py +18 -0
  8. multipers/data/__init__.py +1 -0
  9. multipers/data/graphs.py +466 -0
  10. multipers/data/immuno_regions.py +27 -0
  11. multipers/data/minimal_presentation_to_st_bf.py +0 -0
  12. multipers/data/pytorch2simplextree.py +91 -0
  13. multipers/data/shape3d.py +101 -0
  14. multipers/data/synthetic.py +68 -0
  15. multipers/distances.py +172 -0
  16. multipers/euler_characteristic.cpython-310-darwin.so +0 -0
  17. multipers/euler_characteristic.pyx +137 -0
  18. multipers/function_rips.cpython-310-darwin.so +0 -0
  19. multipers/function_rips.pyx +102 -0
  20. multipers/hilbert_function.cpython-310-darwin.so +0 -0
  21. multipers/hilbert_function.pyi +46 -0
  22. multipers/hilbert_function.pyx +151 -0
  23. multipers/io.cpython-310-darwin.so +0 -0
  24. multipers/io.pyx +176 -0
  25. multipers/ml/__init__.py +0 -0
  26. multipers/ml/accuracies.py +61 -0
  27. multipers/ml/convolutions.py +510 -0
  28. multipers/ml/invariants_with_persistable.py +79 -0
  29. multipers/ml/kernels.py +128 -0
  30. multipers/ml/mma.py +657 -0
  31. multipers/ml/one.py +472 -0
  32. multipers/ml/point_clouds.py +191 -0
  33. multipers/ml/signed_betti.py +50 -0
  34. multipers/ml/signed_measures.py +1479 -0
  35. multipers/ml/sliced_wasserstein.py +313 -0
  36. multipers/ml/tools.py +116 -0
  37. multipers/mma_structures.cpython-310-darwin.so +0 -0
  38. multipers/mma_structures.pxd +155 -0
  39. multipers/mma_structures.pyx +651 -0
  40. multipers/multiparameter_edge_collapse.py +29 -0
  41. multipers/multiparameter_module_approximation.cpython-310-darwin.so +0 -0
  42. multipers/multiparameter_module_approximation.pyi +439 -0
  43. multipers/multiparameter_module_approximation.pyx +311 -0
  44. multipers/pickle.py +53 -0
  45. multipers/plots.py +292 -0
  46. multipers/point_measure_integration.cpython-310-darwin.so +0 -0
  47. multipers/point_measure_integration.pyx +59 -0
  48. multipers/rank_invariant.cpython-310-darwin.so +0 -0
  49. multipers/rank_invariant.pyx +154 -0
  50. multipers/simplex_tree_multi.cpython-310-darwin.so +0 -0
  51. multipers/simplex_tree_multi.pxd +121 -0
  52. multipers/simplex_tree_multi.pyi +715 -0
  53. multipers/simplex_tree_multi.pyx +1417 -0
  54. multipers/slicer.cpython-310-darwin.so +0 -0
  55. multipers/slicer.pxd +94 -0
  56. multipers/slicer.pyx +276 -0
  57. multipers/tensor.pxd +13 -0
  58. multipers/test.pyx +44 -0
  59. multipers-1.1.3.dist-info/LICENSE +21 -0
  60. multipers-1.1.3.dist-info/METADATA +22 -0
  61. multipers-1.1.3.dist-info/RECORD +63 -0
  62. multipers-1.1.3.dist-info/WHEEL +5 -0
  63. multipers-1.1.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,510 @@
1
+ from collections.abc import Callable
2
+ from typing import Iterable, Literal
3
+ import numpy as np
4
+ from itertools import product
5
+
6
+ # from numba import njit, prange
7
+ # import numba.np.unsafe.ndarray ## WORKAROUND FOR NUMBA
8
+
9
+ # @njit(nogil=True,fastmath=True,inline="always", cache=True)
10
+ # def _pts_convolution_gaussian_pt(pts, weights, pt, bandwidth):
11
+ # """
12
+ # Evaluates the convolution of the signed measure (pts, weights) with a gaussian meaasure of bandwidth bandwidth, at point pt
13
+
14
+ # Parameters
15
+ # ----------
16
+
17
+ # - pts : (npts) x (num_parameters)
18
+ # - weight : (npts)
19
+ # - pt : (num_parameters)
20
+ # - bandwidth : real
21
+
22
+ # Outputs
23
+ # -------
24
+
25
+ # The float value
26
+ # """
27
+ # num_parameters = pts.shape[1]
28
+ # distances = np.empty(len(pts), dtype=float)
29
+ # for i in prange(len(pts)):
30
+ # distances[i] = np.sum((pt - pts[i])**2)/(2*bandwidth**2)
31
+ # distances = np.exp(-distances)*weights / (np.sqrt(2*np.pi)*(bandwidth**(num_parameters / 2))) # This last renormalization is not necessary
32
+ # return np.mean(distances)
33
+
34
+
35
+ # @njit(nogil=True,fastmath=True,inline="always", cache=True)
36
+ # def _pts_convolution_exponential_pt(pts, weights, pt, bandwidth):
37
+ # """
38
+ # Evaluates the convolution of the signed measure (pts, weights) with a gaussian meaasure of bandwidth bandwidth, at point pt
39
+
40
+ # Parameters
41
+ # ----------
42
+
43
+ # - pts : (npts) x (num_parameters)
44
+ # - weight : (npts)
45
+ # - pt : (num_parameters)
46
+ # - bandwidth : real
47
+
48
+ # Outputs
49
+ # -------
50
+
51
+ # The float value
52
+ # """
53
+ # num_parameters = pts.shape[1]
54
+ # distances = np.empty(len(pts), dtype=float)
55
+ # for i in prange(len(pts)):
56
+ # distances[i] = np.linalg.norm(pt - pts[i])
57
+ # # distances = np.linalg.norm(pts-pt, axis=1)
58
+ # distances = np.exp(-distances/bandwidth)*weights / (bandwidth**num_parameters) # This last renormalization is not necessary
59
+ # return np.mean(distances)
60
+
61
+ # @njit(nogil=True, cache=True) # not sure if parallel here is worth it...
62
+ # def _pts_convolution_sparse_pts(pts:np.ndarray, weights:np.ndarray, pt_list:np.ndarray, bandwidth, kernel:int=0):
63
+ # """
64
+ # Evaluates the convolution of the signed measure (pts, weights) with a gaussian meaasure of bandwidth bandwidth, at points pt_list
65
+
66
+ # Parameters
67
+ # ----------
68
+
69
+ # - pts : (npts) x (num_parameters)
70
+ # - weight : (npts)
71
+ # - pt : (n)x(num_parameters)
72
+ # - bandwidth : real
73
+
74
+ # Outputs
75
+ # -------
76
+
77
+ # The values : (n)
78
+ # """
79
+ # if kernel == 0:
80
+ # return np.array([_pts_convolution_gaussian_pt(pts,weights,pt_list[i],bandwidth) for i in prange(pt_list.shape[0])])
81
+ # elif kernel == 1:
82
+ # return np.array([_pts_convolution_exponential_pt(pts,weights,pt_list[i],bandwidth) for i in prange(pt_list.shape[0])])
83
+ # else:
84
+ # raise Exception("Unsupported kernel")
85
+
86
+
87
+ def convolution_signed_measures(
88
+ iterable_of_signed_measures,
89
+ filtrations,
90
+ bandwidth,
91
+ flatten: bool = True,
92
+ n_jobs: int = 1,
93
+ backend="pykeops",
94
+ kernel="gaussian",
95
+ **kwargs,
96
+ ):
97
+ """
98
+ Evaluates the convolution of the signed measures Iterable(pts, weights) with a gaussian measure of bandwidth bandwidth, on a grid given by the filtrations
99
+
100
+ Parameters
101
+ ----------
102
+
103
+ - iterable_of_signed_measures : (num_signed_measure) x [ (npts) x (num_parameters), (npts)]
104
+ - filtrations : (num_parameter) x (filtration values)
105
+ - flatten : bool
106
+ - n_jobs : int
107
+
108
+ Outputs
109
+ -------
110
+
111
+ The concatenated images, for each signed measure (num_signed_measures) x (len(f) for f in filtration_values)
112
+ """
113
+ grid_iterator = np.array(list(product(*filtrations)), dtype=float)
114
+ match backend:
115
+ case "sklearn":
116
+
117
+ def convolution_signed_measures_on_grid(
118
+ signed_measures: Iterable[tuple[np.ndarray, np.ndarray]]
119
+ ):
120
+ return np.concatenate(
121
+ [
122
+ _pts_convolution_sparse_old(
123
+ pts=pts,
124
+ pts_weights=weights,
125
+ grid_iterator=grid_iterator,
126
+ bandwidth=bandwidth,
127
+ kernel=kernel,
128
+ **kwargs,
129
+ )
130
+ for pts, weights in signed_measures
131
+ ],
132
+ axis=0,
133
+ )
134
+ # case "numba":
135
+ # kernel2int = {"gaussian":0, "exponential":1, "other":2}
136
+ # def convolution_signed_measures_on_grid(signed_measures:Iterable[tuple[np.ndarray,np.ndarray]]):
137
+ # return np.concatenate([
138
+ # _pts_convolution_sparse_pts(pts,weights, grid_iterator, bandwidth, kernel=kernel2int[kernel]) for pts,weights in signed_measures
139
+ # ], axis=0)
140
+ case "pykeops":
141
+
142
+ def convolution_signed_measures_on_grid(
143
+ signed_measures: Iterable[tuple[np.ndarray, np.ndarray]]
144
+ ):
145
+ return np.concatenate(
146
+ [
147
+ _pts_convolution_pykeops(
148
+ pts=pts,
149
+ pts_weights=weights,
150
+ grid_iterator=grid_iterator,
151
+ bandwidth=bandwidth,
152
+ kernel=kernel,
153
+ **kwargs,
154
+ )
155
+ for pts, weights in signed_measures
156
+ ],
157
+ axis=0,
158
+ )
159
+
160
+ # compiles first once
161
+ pts, weights = iterable_of_signed_measures[0][0]
162
+ small_pts, small_weights = pts[:2], weights[:2]
163
+
164
+ _pts_convolution_pykeops(
165
+ small_pts,
166
+ small_weights,
167
+ grid_iterator=grid_iterator,
168
+ bandwidth=bandwidth,
169
+ kernel=kernel,
170
+ **kwargs,
171
+ )
172
+
173
+ if n_jobs > 1 or n_jobs == -1:
174
+ prefer = "processes" if backend == "sklearn" else "threads"
175
+ from joblib import Parallel, delayed
176
+
177
+ convolutions = Parallel(n_jobs=n_jobs, prefer=prefer)(
178
+ delayed(convolution_signed_measures_on_grid)(sms)
179
+ for sms in iterable_of_signed_measures
180
+ )
181
+ else:
182
+ convolutions = [
183
+ convolution_signed_measures_on_grid(sms)
184
+ for sms in iterable_of_signed_measures
185
+ ]
186
+ if not flatten:
187
+ out_shape = [-1] + [len(f) for f in filtrations] # Degree
188
+ convolutions = [x.reshape(out_shape) for x in convolutions]
189
+ return np.asarray(convolutions, dtype=float)
190
+
191
+
192
+ # def _test(r=1000, b=0.5, plot=True, kernel=0):
193
+ # import matplotlib.pyplot as plt
194
+ # pts, weigths = np.array([[1.,1.], [1.1,1.1]]), np.array([1,-1])
195
+ # pt_list = np.array(list(product(*[np.linspace(0,2,r)]*2)))
196
+ # img = _pts_convolution_sparse_pts(pts,weigths, pt_list,b,kernel=kernel)
197
+ # if plot:
198
+ # plt.imshow(img.reshape(r,-1).T, origin="lower")
199
+ # plt.show()
200
+
201
+
202
+ def _pts_convolution_sparse_old(
203
+ pts: np.ndarray,
204
+ pts_weights: np.ndarray,
205
+ grid_iterator,
206
+ kernel="gaussian",
207
+ bandwidth=0.1,
208
+ **more_kde_args,
209
+ ):
210
+ """
211
+ Old version of `convolution_signed_measures`. Scikitlearn's convolution is slower than the code above.
212
+ """
213
+ from sklearn.neighbors import KernelDensity
214
+
215
+ if len(pts) == 0:
216
+ # warn("Found a trivial signed measure !")
217
+ return np.zeros(len(grid_iterator))
218
+ kde = KernelDensity(
219
+ kernel=kernel, bandwidth=bandwidth, rtol=1e-4, **more_kde_args
220
+ ) # TODO : check rtol
221
+ pos_indices = pts_weights > 0
222
+ neg_indices = pts_weights < 0
223
+ img_pos = (
224
+ np.zeros(len(grid_iterator))
225
+ if pos_indices.sum() == 0
226
+ else kde.fit(
227
+ pts[pos_indices], sample_weight=pts_weights[pos_indices]
228
+ ).score_samples(grid_iterator)
229
+ )
230
+ img_neg = (
231
+ np.zeros(len(grid_iterator))
232
+ if neg_indices.sum() == 0
233
+ else kde.fit(
234
+ pts[neg_indices], sample_weight=-pts_weights[neg_indices]
235
+ ).score_samples(grid_iterator)
236
+ )
237
+ return np.exp(img_pos) - np.exp(img_neg)
238
+
239
+
240
+ def _pts_convolution_pykeops(
241
+ pts: np.ndarray,
242
+ pts_weights: np.ndarray,
243
+ grid_iterator,
244
+ kernel="gaussian",
245
+ bandwidth=0.1,
246
+ **more_kde_args,
247
+ ):
248
+ """
249
+ Pykeops convolution
250
+ """
251
+ kde = KDE(kernel=kernel, bandwidth=bandwidth,
252
+ return_log=False, **more_kde_args)
253
+ return kde.fit(
254
+ pts, sample_weights=np.asarray(pts_weights, dtype=pts.dtype)
255
+ ).score_samples(grid_iterator)
256
+
257
+
258
+ # TODO : multiple bandwidths at once with lazy tensors
259
+ class KDE:
260
+ """
261
+ Fast, scikit-style, and differentiable kernel density estimation, using PyKeops.
262
+ """
263
+
264
+ def __init__(
265
+ self,
266
+ bandwidth: float = 1,
267
+ kernel: Literal["gaussian", "exponential"] | Callable = "gaussian",
268
+ return_log=True,
269
+ ):
270
+ """
271
+ bandwidth : numeric
272
+ bandwidth for Gaussian kernel
273
+ """
274
+ self.X = None
275
+ self.bandwidth = bandwidth
276
+ self.kernel = kernel
277
+ self._kernel = None
278
+ self._backend = None
279
+ self._sample_weights = None
280
+ self.return_log = return_log
281
+
282
+ def fit(self, X, sample_weights=None, y=None):
283
+ self.X = X
284
+ self._sample_weights = sample_weights
285
+ if isinstance(X, np.ndarray):
286
+ self._backend = np
287
+ else:
288
+ import torch
289
+
290
+ if isinstance(X, torch.Tensor):
291
+ self._backend = torch
292
+ else:
293
+ raise Exception("Unsupported backend.")
294
+ match self.kernel:
295
+ case "gaussian":
296
+ self._kernel = self.gaussian_kernel
297
+ case "exponential":
298
+ self._kernel = self.exponential_kernel
299
+ case _:
300
+ assert callable(
301
+ self.kernel
302
+ ), f"--------------------------\nUnknown kernel {self.kernel}.\n--------------------------\n Custom kernel has to be callable, (x:LazyTensor(n,1,D),y:LazyTensor(1,m,D),bandwidth:float) ---> kernel matrix"
303
+ self._kernel = self.kernel
304
+ return self
305
+
306
+ @staticmethod
307
+ def gaussian_kernel(x_i, y_j, bandwidth):
308
+ exponent = -(((x_i - y_j) / bandwidth) ** 2).sum(dim=2) / 2
309
+ # float is necessary for some reason (pykeops fails)
310
+ kernel = (exponent).exp() / (bandwidth * float(np.sqrt(2 * np.pi)))
311
+ return kernel
312
+
313
+ @staticmethod
314
+ def exponential_kernel(x_i, y_j, bandwidth):
315
+ exponent = -(((((x_i - y_j) ** 2).sum()) **
316
+ 1 / 2) / bandwidth).sum(dim=2)
317
+ kernel = exponent.exp() / bandwidth
318
+ return kernel
319
+
320
+ @staticmethod
321
+ def to_lazy(X, Y, x_weights):
322
+ if isinstance(X, np.ndarray):
323
+ from pykeops.numpy import LazyTensor
324
+
325
+ lazy_x = LazyTensor(
326
+ X.reshape((X.shape[0], 1, X.shape[1]))
327
+ ) # numpts, 1, dim
328
+ lazy_y = LazyTensor(
329
+ Y.reshape((1, Y.shape[0], Y.shape[1]))
330
+ ) # 1, numpts, dim
331
+ if x_weights is not None:
332
+ w = LazyTensor(x_weights[:, None], axis=0)
333
+ return lazy_x, lazy_y, w
334
+ return lazy_x, lazy_y, None
335
+ import torch
336
+
337
+ if isinstance(X, torch.Tensor):
338
+ from pykeops.torch import LazyTensor
339
+
340
+ lazy_x = LazyTensor(X.view(X.shape[0], 1, X.shape[1]))
341
+ lazy_y = LazyTensor(Y.view(1, Y.shape[0], Y.shape[1]))
342
+ if x_weights is not None:
343
+ w = LazyTensor(x_weights[:, None], axis=0)
344
+ return lazy_x, lazy_y, w
345
+ return lazy_x, lazy_y, None
346
+ raise Exception("Bad tensor type.")
347
+
348
+ def score_samples(self, Y, X=None, return_kernel=False):
349
+ """Returns the kernel density estimates of each point in `Y`.
350
+
351
+ Parameters
352
+ ----------
353
+ Y : tensor (m, d)
354
+ `m` points with `d` dimensions for which the probability density will
355
+ be calculated
356
+ X : tensor (n, d), optional
357
+ `n` points with `d` dimensions to which KDE will be fit. Provided to
358
+ allow batch calculations in `log_prob`. By default, `X` is None and
359
+ all points used to initialize KernelDensityEstimator are included.
360
+
361
+
362
+ Returns
363
+ -------
364
+ log_probs : tensor (m)
365
+ log probability densities for each of the queried points in `Y`
366
+ """
367
+ X = self.X if X is None else X
368
+ assert Y.shape[1] == X.shape[1] and X.ndim == Y.ndim == 2
369
+ lazy_x, lazy_y, w = self.to_lazy(X, Y, x_weights=self._sample_weights)
370
+ kernel = self._kernel(lazy_x, lazy_y, self.bandwidth)
371
+ if w is not None:
372
+ kernel *= w
373
+ if return_kernel:
374
+ return kernel
375
+ density_estimation = kernel.sum(
376
+ dim=0).flatten() / kernel.shape[0] # mean
377
+ return (
378
+ self._backend.log(density_estimation)
379
+ if self.return_log
380
+ else density_estimation
381
+ )
382
+
383
+
384
+ class DTM:
385
+
386
+ """
387
+ Fast, scikit-style, and differentiable DTM density estimation, using PyKeops.
388
+ Tuned version of KNN from
389
+ """
390
+
391
+ def __init__(self, masses=[0.1], metric: str = "euclidean", **_kdtree_kwargs):
392
+ """
393
+ mass : float in [0,1]
394
+ The mass threshold
395
+ metric :
396
+ The distance between points to consider
397
+ """
398
+ self.masses = masses
399
+ self.metric = metric
400
+ self._kdtree_kwargs = _kdtree_kwargs
401
+ self._ks = None
402
+ self._kdtree = None
403
+ self._X = None
404
+ self._backend = None
405
+
406
+ def fit(self, X, sample_weights=None, y=None):
407
+ if len(self.masses) == 0:
408
+ return self
409
+ assert np.max(self.masses) <= 1, "All masses should be in (0,1]."
410
+ from sklearn.neighbors import KDTree
411
+
412
+ if not isinstance(X, np.ndarray):
413
+ import torch
414
+
415
+ assert isinstance(
416
+ X, torch.Tensor), "Backend has to be numpy of torch"
417
+ _X = X.detach()
418
+ self._backend = "torch"
419
+ else:
420
+ _X = X
421
+ self._backend = "numpy"
422
+ self._ks = np.array(
423
+ [int(mass * X.shape[0]) + 1 for mass in self.masses])
424
+ self._kdtree = KDTree(_X, metric=self.metric, **self._kdtree_kwargs)
425
+ self._X = X
426
+ return self
427
+
428
+ def score_samples(self, Y, X=None):
429
+ """Returns the kernel density estimates of each point in `Y`.
430
+
431
+ Parameters
432
+ ----------
433
+ Y : tensor (m, d)
434
+ `m` points with `d` dimensions for which the probability density will
435
+ be calculated
436
+
437
+
438
+ Returns
439
+ -------
440
+ the DTMs of Y, for each mass in masses.
441
+ """
442
+ if len(self.masses) == 0:
443
+ return np.empty((0, len(Y)))
444
+ assert Y.ndim == 2
445
+ if self._backend == "torch":
446
+ _Y = Y.detach().numpy()
447
+ else:
448
+ _Y = Y
449
+ NN_Dist, NN = self._kdtree.query(
450
+ _Y, self._ks.max(), return_distance=True)
451
+ DTMs = np.array([((NN_Dist**2)[:, :k].mean(1))
452
+ ** 0.5 for k in self._ks])
453
+ return DTMs
454
+
455
+ def score_samples_diff(self, Y):
456
+ """Returns the kernel density estimates of each point in `Y`.
457
+
458
+ Parameters
459
+ ----------
460
+ Y : tensor (m, d)
461
+ `m` points with `d` dimensions for which the probability density will
462
+ be calculated
463
+ X : tensor (n, d), optional
464
+ `n` points with `d` dimensions to which KDE will be fit. Provided to
465
+ allow batch calculations in `log_prob`. By default, `X` is None and
466
+ all points used to initialize KernelDensityEstimator are included.
467
+
468
+
469
+ Returns
470
+ -------
471
+ log_probs : tensor (m)
472
+ log probability densities for each of the queried points in `Y`
473
+ """
474
+ import torch
475
+
476
+ assert Y.ndim == 2
477
+ assert self._backend == "torch", "Use the non-diff version with numpy."
478
+ if len(self.masses) == 0:
479
+ return torch.empty(0, len(Y))
480
+ NN = self._kdtree.query(
481
+ Y.detach(), self._ks.max(), return_distance=False)
482
+ DTMs = tuple(
483
+ (((self._X[NN] - Y[:, None, :]) ** 2)
484
+ [:, :k].sum(dim=(1, 2)) / k) ** 0.5
485
+ for k in self._ks
486
+ ) # TODO : kdtree already computes distance, find implementation of kdtree that is pytorch differentiable
487
+ return DTMs
488
+
489
+
490
+ # def _pts_convolution_sparse(pts:np.ndarray, pts_weights:np.ndarray, filtration_grid:Iterable[np.ndarray], kernel="gaussian", bandwidth=0.1, **more_kde_args):
491
+ # """
492
+ # Old version of `convolution_signed_measures`. Scikitlearn's convolution is slower than the code above.
493
+ # """
494
+ # from sklearn.neighbors import KernelDensity
495
+ # grid_iterator = np.asarray(list(product(*filtration_grid)))
496
+ # grid_shape = [len(f) for f in filtration_grid]
497
+ # if len(pts) == 0:
498
+ # # warn("Found a trivial signed measure !")
499
+ # return np.zeros(shape=grid_shape)
500
+ # kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, rtol = 1e-4, **more_kde_args) # TODO : check rtol
501
+
502
+ # pos_indices = pts_weights>0
503
+ # neg_indices = pts_weights<0
504
+ # img_pos = kde.fit(pts[pos_indices], sample_weight=pts_weights[pos_indices]).score_samples(grid_iterator).reshape(grid_shape)
505
+ # img_neg = kde.fit(pts[neg_indices], sample_weight=-pts_weights[neg_indices]).score_samples(grid_iterator).reshape(grid_shape)
506
+ # return np.exp(img_pos) - np.exp(img_neg)
507
+
508
+
509
+ # Precompiles the convolution
510
+ # _test(r=2,b=.5, plot=False)
@@ -0,0 +1,79 @@
1
+ import persistable
2
+
3
+
4
+ # requires installing ripser (pip install ripser) as well as persistable from the higher-homology branch,
5
+ # which can be done as follows:
6
+ # pip install git+https://github.com/LuisScoccola/persistable.git@higher-homology
7
+ # NOTE: only accepts as input a distance matrix
8
+ def hf_degree_rips(
9
+ distance_matrix,
10
+ min_rips_value,
11
+ max_rips_value,
12
+ max_normalized_degree,
13
+ min_normalized_degree,
14
+ grid_granularity,
15
+ max_homological_dimension,
16
+ subsample_size = None,
17
+ ):
18
+ if subsample_size == None:
19
+ p = persistable.Persistable(distance_matrix, metric="precomputed")
20
+ else:
21
+ p = persistable.Persistable(distance_matrix, metric="precomputed", subsample=subsample_size)
22
+
23
+ rips_values, normalized_degree_values, hilbert_functions, minimal_hilbert_decompositions = p._hilbert_function(
24
+ min_rips_value,
25
+ max_rips_value,
26
+ max_normalized_degree,
27
+ min_normalized_degree,
28
+ grid_granularity,
29
+ homological_dimension=max_homological_dimension,
30
+ )
31
+
32
+ return rips_values, normalized_degree_values, hilbert_functions, minimal_hilbert_decompositions
33
+
34
+
35
+
36
+ def hf_h0_degree_rips(
37
+ point_cloud,
38
+ min_rips_value,
39
+ max_rips_value,
40
+ max_normalized_degree,
41
+ min_normalized_degree,
42
+ grid_granularity,
43
+ ):
44
+ p = persistable.Persistable(point_cloud, n_neighbors="all")
45
+
46
+ rips_values, normalized_degree_values, hilbert_functions, minimal_hilbert_decompositions = p._hilbert_function(
47
+ min_rips_value,
48
+ max_rips_value,
49
+ max_normalized_degree,
50
+ min_normalized_degree,
51
+ grid_granularity,
52
+ )
53
+
54
+ return rips_values, normalized_degree_values, hilbert_functions[0], minimal_hilbert_decompositions[0]
55
+
56
+
57
+ def ri_h0_degree_rips(
58
+ point_cloud,
59
+ min_rips_value,
60
+ max_rips_value,
61
+ max_normalized_degree,
62
+ min_normalized_degree,
63
+ grid_granularity,
64
+ ):
65
+ p = persistable.Persistable(point_cloud, n_neighbors="all")
66
+
67
+ rips_values, normalized_degree_values, rank_invariant, _, _ = p._rank_invariant(
68
+ min_rips_value,
69
+ max_rips_value,
70
+ max_normalized_degree,
71
+ min_normalized_degree,
72
+ grid_granularity,
73
+ )
74
+
75
+ return rips_values, normalized_degree_values, rank_invariant
76
+
77
+
78
+
79
+