multipers 2.2.3__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of multipers might be problematic. Click here for more details.

Files changed (189) hide show
  1. multipers/__init__.py +31 -0
  2. multipers/_signed_measure_meta.py +430 -0
  3. multipers/_slicer_meta.py +212 -0
  4. multipers/data/MOL2.py +458 -0
  5. multipers/data/UCR.py +18 -0
  6. multipers/data/__init__.py +1 -0
  7. multipers/data/graphs.py +466 -0
  8. multipers/data/immuno_regions.py +27 -0
  9. multipers/data/minimal_presentation_to_st_bf.py +0 -0
  10. multipers/data/pytorch2simplextree.py +91 -0
  11. multipers/data/shape3d.py +101 -0
  12. multipers/data/synthetic.py +111 -0
  13. multipers/distances.py +198 -0
  14. multipers/filtration_conversions.pxd +229 -0
  15. multipers/filtration_conversions.pxd.tp +84 -0
  16. multipers/filtrations.pxd +224 -0
  17. multipers/function_rips.cp312-win_amd64.pyd +0 -0
  18. multipers/function_rips.pyx +105 -0
  19. multipers/grids.cp312-win_amd64.pyd +0 -0
  20. multipers/grids.pyx +350 -0
  21. multipers/gudhi/Persistence_slices_interface.h +132 -0
  22. multipers/gudhi/Simplex_tree_interface.h +245 -0
  23. multipers/gudhi/Simplex_tree_multi_interface.h +561 -0
  24. multipers/gudhi/cubical_to_boundary.h +59 -0
  25. multipers/gudhi/gudhi/Bitmap_cubical_complex.h +450 -0
  26. multipers/gudhi/gudhi/Bitmap_cubical_complex_base.h +1070 -0
  27. multipers/gudhi/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h +579 -0
  28. multipers/gudhi/gudhi/Debug_utils.h +45 -0
  29. multipers/gudhi/gudhi/Fields/Multi_field.h +484 -0
  30. multipers/gudhi/gudhi/Fields/Multi_field_operators.h +455 -0
  31. multipers/gudhi/gudhi/Fields/Multi_field_shared.h +450 -0
  32. multipers/gudhi/gudhi/Fields/Multi_field_small.h +531 -0
  33. multipers/gudhi/gudhi/Fields/Multi_field_small_operators.h +507 -0
  34. multipers/gudhi/gudhi/Fields/Multi_field_small_shared.h +531 -0
  35. multipers/gudhi/gudhi/Fields/Z2_field.h +355 -0
  36. multipers/gudhi/gudhi/Fields/Z2_field_operators.h +376 -0
  37. multipers/gudhi/gudhi/Fields/Zp_field.h +420 -0
  38. multipers/gudhi/gudhi/Fields/Zp_field_operators.h +400 -0
  39. multipers/gudhi/gudhi/Fields/Zp_field_shared.h +418 -0
  40. multipers/gudhi/gudhi/Flag_complex_edge_collapser.h +337 -0
  41. multipers/gudhi/gudhi/Matrix.h +2107 -0
  42. multipers/gudhi/gudhi/Multi_critical_filtration.h +1038 -0
  43. multipers/gudhi/gudhi/Multi_persistence/Box.h +171 -0
  44. multipers/gudhi/gudhi/Multi_persistence/Line.h +282 -0
  45. multipers/gudhi/gudhi/Off_reader.h +173 -0
  46. multipers/gudhi/gudhi/One_critical_filtration.h +1431 -0
  47. multipers/gudhi/gudhi/Persistence_matrix/Base_matrix.h +769 -0
  48. multipers/gudhi/gudhi/Persistence_matrix/Base_matrix_with_column_compression.h +686 -0
  49. multipers/gudhi/gudhi/Persistence_matrix/Boundary_matrix.h +842 -0
  50. multipers/gudhi/gudhi/Persistence_matrix/Chain_matrix.h +1350 -0
  51. multipers/gudhi/gudhi/Persistence_matrix/Id_to_index_overlay.h +1105 -0
  52. multipers/gudhi/gudhi/Persistence_matrix/Position_to_index_overlay.h +859 -0
  53. multipers/gudhi/gudhi/Persistence_matrix/RU_matrix.h +910 -0
  54. multipers/gudhi/gudhi/Persistence_matrix/allocators/entry_constructors.h +139 -0
  55. multipers/gudhi/gudhi/Persistence_matrix/base_pairing.h +230 -0
  56. multipers/gudhi/gudhi/Persistence_matrix/base_swap.h +211 -0
  57. multipers/gudhi/gudhi/Persistence_matrix/boundary_cell_position_to_id_mapper.h +60 -0
  58. multipers/gudhi/gudhi/Persistence_matrix/boundary_face_position_to_id_mapper.h +60 -0
  59. multipers/gudhi/gudhi/Persistence_matrix/chain_pairing.h +136 -0
  60. multipers/gudhi/gudhi/Persistence_matrix/chain_rep_cycles.h +190 -0
  61. multipers/gudhi/gudhi/Persistence_matrix/chain_vine_swap.h +616 -0
  62. multipers/gudhi/gudhi/Persistence_matrix/columns/chain_column_extra_properties.h +150 -0
  63. multipers/gudhi/gudhi/Persistence_matrix/columns/column_dimension_holder.h +106 -0
  64. multipers/gudhi/gudhi/Persistence_matrix/columns/column_utilities.h +219 -0
  65. multipers/gudhi/gudhi/Persistence_matrix/columns/entry_types.h +327 -0
  66. multipers/gudhi/gudhi/Persistence_matrix/columns/heap_column.h +1140 -0
  67. multipers/gudhi/gudhi/Persistence_matrix/columns/intrusive_list_column.h +934 -0
  68. multipers/gudhi/gudhi/Persistence_matrix/columns/intrusive_set_column.h +934 -0
  69. multipers/gudhi/gudhi/Persistence_matrix/columns/list_column.h +980 -0
  70. multipers/gudhi/gudhi/Persistence_matrix/columns/naive_vector_column.h +1092 -0
  71. multipers/gudhi/gudhi/Persistence_matrix/columns/row_access.h +192 -0
  72. multipers/gudhi/gudhi/Persistence_matrix/columns/set_column.h +921 -0
  73. multipers/gudhi/gudhi/Persistence_matrix/columns/small_vector_column.h +1093 -0
  74. multipers/gudhi/gudhi/Persistence_matrix/columns/unordered_set_column.h +1012 -0
  75. multipers/gudhi/gudhi/Persistence_matrix/columns/vector_column.h +1244 -0
  76. multipers/gudhi/gudhi/Persistence_matrix/matrix_dimension_holders.h +186 -0
  77. multipers/gudhi/gudhi/Persistence_matrix/matrix_row_access.h +164 -0
  78. multipers/gudhi/gudhi/Persistence_matrix/ru_pairing.h +156 -0
  79. multipers/gudhi/gudhi/Persistence_matrix/ru_rep_cycles.h +376 -0
  80. multipers/gudhi/gudhi/Persistence_matrix/ru_vine_swap.h +540 -0
  81. multipers/gudhi/gudhi/Persistent_cohomology/Field_Zp.h +118 -0
  82. multipers/gudhi/gudhi/Persistent_cohomology/Multi_field.h +173 -0
  83. multipers/gudhi/gudhi/Persistent_cohomology/Persistent_cohomology_column.h +128 -0
  84. multipers/gudhi/gudhi/Persistent_cohomology.h +745 -0
  85. multipers/gudhi/gudhi/Points_off_io.h +171 -0
  86. multipers/gudhi/gudhi/Simple_object_pool.h +69 -0
  87. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_iterators.h +463 -0
  88. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h +83 -0
  89. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_siblings.h +106 -0
  90. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_star_simplex_iterators.h +277 -0
  91. multipers/gudhi/gudhi/Simplex_tree/hooks_simplex_base.h +62 -0
  92. multipers/gudhi/gudhi/Simplex_tree/indexing_tag.h +27 -0
  93. multipers/gudhi/gudhi/Simplex_tree/serialization_utils.h +62 -0
  94. multipers/gudhi/gudhi/Simplex_tree/simplex_tree_options.h +157 -0
  95. multipers/gudhi/gudhi/Simplex_tree.h +2794 -0
  96. multipers/gudhi/gudhi/Simplex_tree_multi.h +163 -0
  97. multipers/gudhi/gudhi/distance_functions.h +62 -0
  98. multipers/gudhi/gudhi/graph_simplicial_complex.h +104 -0
  99. multipers/gudhi/gudhi/persistence_interval.h +253 -0
  100. multipers/gudhi/gudhi/persistence_matrix_options.h +170 -0
  101. multipers/gudhi/gudhi/reader_utils.h +367 -0
  102. multipers/gudhi/mma_interface_coh.h +255 -0
  103. multipers/gudhi/mma_interface_h0.h +231 -0
  104. multipers/gudhi/mma_interface_matrix.h +282 -0
  105. multipers/gudhi/naive_merge_tree.h +575 -0
  106. multipers/gudhi/scc_io.h +289 -0
  107. multipers/gudhi/truc.h +888 -0
  108. multipers/io.cp312-win_amd64.pyd +0 -0
  109. multipers/io.pyx +711 -0
  110. multipers/ml/__init__.py +0 -0
  111. multipers/ml/accuracies.py +90 -0
  112. multipers/ml/convolutions.py +520 -0
  113. multipers/ml/invariants_with_persistable.py +79 -0
  114. multipers/ml/kernels.py +176 -0
  115. multipers/ml/mma.py +714 -0
  116. multipers/ml/one.py +472 -0
  117. multipers/ml/point_clouds.py +346 -0
  118. multipers/ml/signed_measures.py +1589 -0
  119. multipers/ml/sliced_wasserstein.py +461 -0
  120. multipers/ml/tools.py +113 -0
  121. multipers/mma_structures.cp312-win_amd64.pyd +0 -0
  122. multipers/mma_structures.pxd +127 -0
  123. multipers/mma_structures.pyx +2746 -0
  124. multipers/mma_structures.pyx.tp +1085 -0
  125. multipers/multi_parameter_rank_invariant/diff_helpers.h +93 -0
  126. multipers/multi_parameter_rank_invariant/euler_characteristic.h +97 -0
  127. multipers/multi_parameter_rank_invariant/function_rips.h +322 -0
  128. multipers/multi_parameter_rank_invariant/hilbert_function.h +769 -0
  129. multipers/multi_parameter_rank_invariant/persistence_slices.h +148 -0
  130. multipers/multi_parameter_rank_invariant/rank_invariant.h +369 -0
  131. multipers/multiparameter_edge_collapse.py +41 -0
  132. multipers/multiparameter_module_approximation/approximation.h +2295 -0
  133. multipers/multiparameter_module_approximation/combinatory.h +129 -0
  134. multipers/multiparameter_module_approximation/debug.h +107 -0
  135. multipers/multiparameter_module_approximation/euler_curves.h +0 -0
  136. multipers/multiparameter_module_approximation/format_python-cpp.h +286 -0
  137. multipers/multiparameter_module_approximation/heap_column.h +238 -0
  138. multipers/multiparameter_module_approximation/images.h +79 -0
  139. multipers/multiparameter_module_approximation/list_column.h +174 -0
  140. multipers/multiparameter_module_approximation/list_column_2.h +232 -0
  141. multipers/multiparameter_module_approximation/ru_matrix.h +347 -0
  142. multipers/multiparameter_module_approximation/set_column.h +135 -0
  143. multipers/multiparameter_module_approximation/structure_higher_dim_barcode.h +36 -0
  144. multipers/multiparameter_module_approximation/unordered_set_column.h +166 -0
  145. multipers/multiparameter_module_approximation/utilities.h +419 -0
  146. multipers/multiparameter_module_approximation/vector_column.h +223 -0
  147. multipers/multiparameter_module_approximation/vector_matrix.h +331 -0
  148. multipers/multiparameter_module_approximation/vineyards.h +464 -0
  149. multipers/multiparameter_module_approximation/vineyards_trajectories.h +649 -0
  150. multipers/multiparameter_module_approximation.cp312-win_amd64.pyd +0 -0
  151. multipers/multiparameter_module_approximation.pyx +217 -0
  152. multipers/pickle.py +53 -0
  153. multipers/plots.py +334 -0
  154. multipers/point_measure.cp312-win_amd64.pyd +0 -0
  155. multipers/point_measure.pyx +320 -0
  156. multipers/simplex_tree_multi.cp312-win_amd64.pyd +0 -0
  157. multipers/simplex_tree_multi.pxd +133 -0
  158. multipers/simplex_tree_multi.pyx +10335 -0
  159. multipers/simplex_tree_multi.pyx.tp +1935 -0
  160. multipers/slicer.cp312-win_amd64.pyd +0 -0
  161. multipers/slicer.pxd +2371 -0
  162. multipers/slicer.pxd.tp +214 -0
  163. multipers/slicer.pyx +15467 -0
  164. multipers/slicer.pyx.tp +914 -0
  165. multipers/tbb12.dll +0 -0
  166. multipers/tbbbind_2_5.dll +0 -0
  167. multipers/tbbmalloc.dll +0 -0
  168. multipers/tbbmalloc_proxy.dll +0 -0
  169. multipers/tensor/tensor.h +672 -0
  170. multipers/tensor.pxd +13 -0
  171. multipers/test.pyx +44 -0
  172. multipers/tests/__init__.py +57 -0
  173. multipers/tests/test_diff_helper.py +73 -0
  174. multipers/tests/test_hilbert_function.py +82 -0
  175. multipers/tests/test_mma.py +83 -0
  176. multipers/tests/test_point_clouds.py +49 -0
  177. multipers/tests/test_python-cpp_conversion.py +82 -0
  178. multipers/tests/test_signed_betti.py +181 -0
  179. multipers/tests/test_signed_measure.py +89 -0
  180. multipers/tests/test_simplextreemulti.py +221 -0
  181. multipers/tests/test_slicer.py +221 -0
  182. multipers/torch/__init__.py +1 -0
  183. multipers/torch/diff_grids.py +217 -0
  184. multipers/torch/rips_density.py +304 -0
  185. multipers-2.2.3.dist-info/LICENSE +21 -0
  186. multipers-2.2.3.dist-info/METADATA +134 -0
  187. multipers-2.2.3.dist-info/RECORD +189 -0
  188. multipers-2.2.3.dist-info/WHEEL +5 -0
  189. multipers-2.2.3.dist-info/top_level.txt +1 -0
File without changes
@@ -0,0 +1,90 @@
1
+ import pandas as pd
2
+ from warnings import warn
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from os.path import exists
6
+
7
+
8
+ def accuracy_to_csv(
9
+ X,
10
+ Y,
11
+ cl,
12
+ k: float = 10,
13
+ dataset: str = "",
14
+ shuffle=True,
15
+ verbose: bool = True,
16
+ **more_columns,
17
+ ):
18
+ assert k > 0, "k is either the number of kfold > 1 or the test size > 0."
19
+ if k > 1:
20
+ k = int(k)
21
+ from sklearn.model_selection import StratifiedKFold as KFold
22
+
23
+ kfold = KFold(k, shuffle=shuffle).split(X, Y)
24
+ accuracies = np.zeros(k)
25
+ for i, (train_idx, test_idx) in enumerate(
26
+ tqdm(kfold, total=k, desc="Computing kfold")
27
+ ):
28
+ xtrain = [X[i] for i in train_idx]
29
+ ytrain = [Y[i] for i in train_idx]
30
+ cl.fit(xtrain, ytrain)
31
+ xtest = [X[i] for i in test_idx]
32
+ ytest = [Y[i] for i in test_idx]
33
+ accuracies[i] = cl.score(xtest, ytest)
34
+ if verbose:
35
+ print(f"step {i+1}, {dataset} : {accuracies[i]}", flush=True)
36
+ try:
37
+ print("Best classification parameters : ", cl.best_params_)
38
+ except:
39
+ None
40
+
41
+ print(
42
+ f"""Accuracy {dataset} : {np.mean(accuracies).round(decimals=3)}±{np.std(accuracies).round(decimals=3)}"""
43
+ )
44
+ elif k > 0:
45
+ from sklearn.model_selection import train_test_split
46
+
47
+ print("Computing accuracy, with train test split", flush=True)
48
+ xtrain, xtest, ytrain, ytest = train_test_split(
49
+ X, Y, shuffle=shuffle, test_size=k
50
+ )
51
+ print("Fitting...", end="", flush=True)
52
+ cl.fit(xtrain, ytrain)
53
+ print("Computing score...", end="", flush=True)
54
+ accuracies = cl.score(xtest, ytest)
55
+ try:
56
+ print("Best classification parameters : ", cl.best_params_)
57
+ except:
58
+ None
59
+ print("Done.")
60
+ if verbose:
61
+ print(f"Accuracy {dataset} : {accuracies} ")
62
+ file_path: str = f"result_{dataset}.csv".replace("/", "_").replace(".off", "")
63
+ columns: list[str] = ["dataset", "cv", "mean", "std"]
64
+ if exists(file_path):
65
+ df: pd.DataFrame = pd.read_csv(file_path)
66
+ else:
67
+ df: pd.DataFrame = pd.DataFrame(columns=columns)
68
+ more_names = []
69
+ more_values = []
70
+ for key, value in more_columns.items():
71
+ if key not in columns:
72
+ more_names.append(key)
73
+ more_values.append(value)
74
+ else:
75
+ warn(f"Duplicate key {key} ! with value {value}")
76
+ new_line: pd.DataFrame = pd.DataFrame(
77
+ [
78
+ [
79
+ dataset,
80
+ k,
81
+ np.mean(accuracies).round(decimals=3),
82
+ np.std(accuracies).round(decimals=3),
83
+ ]
84
+ + more_values
85
+ ],
86
+ columns=columns + more_names,
87
+ )
88
+ print(new_line)
89
+ df = pd.concat([df, new_line])
90
+ df.to_csv(file_path, index=False)
@@ -0,0 +1,520 @@
1
+ from collections.abc import Callable, Iterable
2
+ from typing import Any, Literal, Union
3
+
4
+ import numpy as np
5
+
6
+ global available_kernels
7
+ available_kernels = Union[
8
+ Literal[
9
+ "gaussian", "exponential", "exponential_kernel", "multivariate_gaussian", "sinc"
10
+ ],
11
+ Callable,
12
+ ]
13
+
14
+
15
+ def convolution_signed_measures(
16
+ iterable_of_signed_measures,
17
+ filtrations,
18
+ bandwidth,
19
+ flatten: bool = True,
20
+ n_jobs: int = 1,
21
+ backend="pykeops",
22
+ kernel: available_kernels = "gaussian",
23
+ **kwargs,
24
+ ):
25
+ """
26
+ Evaluates the convolution of the signed measures Iterable(pts, weights) with a gaussian measure of bandwidth bandwidth, on a grid given by the filtrations
27
+
28
+ Parameters
29
+ ----------
30
+
31
+ - iterable_of_signed_measures : (num_signed_measure) x [ (npts) x (num_parameters), (npts)]
32
+ - filtrations : (num_parameter) x (filtration values)
33
+ - flatten : bool
34
+ - n_jobs : int
35
+
36
+ Outputs
37
+ -------
38
+
39
+ The concatenated images, for each signed measure (num_signed_measures) x (len(f) for f in filtration_values)
40
+ """
41
+ from multipers.grids import todense
42
+
43
+ grid_iterator = todense(filtrations, product_order=True)
44
+ match backend:
45
+ case "sklearn":
46
+
47
+ def convolution_signed_measures_on_grid(
48
+ signed_measures: Iterable[tuple[np.ndarray, np.ndarray]],
49
+ ):
50
+ return np.concatenate(
51
+ [
52
+ _pts_convolution_sparse_old(
53
+ pts=pts,
54
+ pts_weights=weights,
55
+ grid_iterator=grid_iterator,
56
+ bandwidth=bandwidth,
57
+ kernel=kernel,
58
+ **kwargs,
59
+ )
60
+ for pts, weights in signed_measures
61
+ ],
62
+ axis=0,
63
+ )
64
+
65
+ case "pykeops":
66
+
67
+ def convolution_signed_measures_on_grid(
68
+ signed_measures: Iterable[tuple[np.ndarray, np.ndarray]],
69
+ ) -> np.ndarray:
70
+ return np.concatenate(
71
+ [
72
+ _pts_convolution_pykeops(
73
+ pts=pts,
74
+ pts_weights=weights,
75
+ grid_iterator=grid_iterator,
76
+ bandwidth=bandwidth,
77
+ kernel=kernel,
78
+ **kwargs,
79
+ )
80
+ for pts, weights in signed_measures
81
+ ],
82
+ axis=0,
83
+ )
84
+
85
+ # compiles first once
86
+ pts, weights = iterable_of_signed_measures[0][0]
87
+ small_pts, small_weights = pts[:2], weights[:2]
88
+
89
+ _pts_convolution_pykeops(
90
+ small_pts,
91
+ small_weights,
92
+ grid_iterator=grid_iterator,
93
+ bandwidth=bandwidth,
94
+ kernel=kernel,
95
+ **kwargs,
96
+ )
97
+
98
+ if n_jobs > 1 or n_jobs == -1:
99
+ prefer = "processes" if backend == "sklearn" else "threads"
100
+ from joblib import Parallel, delayed
101
+
102
+ convolutions = Parallel(n_jobs=n_jobs, prefer=prefer)(
103
+ delayed(convolution_signed_measures_on_grid)(sms)
104
+ for sms in iterable_of_signed_measures
105
+ )
106
+ else:
107
+ convolutions = [
108
+ convolution_signed_measures_on_grid(sms)
109
+ for sms in iterable_of_signed_measures
110
+ ]
111
+ if not flatten:
112
+ out_shape = [-1] + [len(f) for f in filtrations] # Degree
113
+ convolutions = [x.reshape(out_shape) for x in convolutions]
114
+ return np.asarray(convolutions)
115
+
116
+
117
+ # def _test(r=1000, b=0.5, plot=True, kernel=0):
118
+ # import matplotlib.pyplot as plt
119
+ # pts, weigths = np.array([[1.,1.], [1.1,1.1]]), np.array([1,-1])
120
+ # pt_list = np.array(list(product(*[np.linspace(0,2,r)]*2)))
121
+ # img = _pts_convolution_sparse_pts(pts,weigths, pt_list,b,kernel=kernel)
122
+ # if plot:
123
+ # plt.imshow(img.reshape(r,-1).T, origin="lower")
124
+ # plt.show()
125
+
126
+
127
+ def _pts_convolution_sparse_old(
128
+ pts: np.ndarray,
129
+ pts_weights: np.ndarray,
130
+ grid_iterator,
131
+ kernel: available_kernels = "gaussian",
132
+ bandwidth=0.1,
133
+ **more_kde_args,
134
+ ):
135
+ """
136
+ Old version of `convolution_signed_measures`. Scikitlearn's convolution is slower than the code above.
137
+ """
138
+ from sklearn.neighbors import KernelDensity
139
+
140
+ if len(pts) == 0:
141
+ # warn("Found a trivial signed measure !")
142
+ return np.zeros(len(grid_iterator))
143
+ kde = KernelDensity(
144
+ kernel=kernel, bandwidth=bandwidth, rtol=1e-4, **more_kde_args
145
+ ) # TODO : check rtol
146
+ pos_indices = pts_weights > 0
147
+ neg_indices = pts_weights < 0
148
+ img_pos = (
149
+ np.zeros(len(grid_iterator))
150
+ if pos_indices.sum() == 0
151
+ else kde.fit(
152
+ pts[pos_indices], sample_weight=pts_weights[pos_indices]
153
+ ).score_samples(grid_iterator)
154
+ )
155
+ img_neg = (
156
+ np.zeros(len(grid_iterator))
157
+ if neg_indices.sum() == 0
158
+ else kde.fit(
159
+ pts[neg_indices], sample_weight=-pts_weights[neg_indices]
160
+ ).score_samples(grid_iterator)
161
+ )
162
+ return np.exp(img_pos) - np.exp(img_neg)
163
+
164
+
165
+ def _pts_convolution_pykeops(
166
+ pts: np.ndarray,
167
+ pts_weights: np.ndarray,
168
+ grid_iterator,
169
+ kernel: available_kernels = "gaussian",
170
+ bandwidth=0.1,
171
+ **more_kde_args,
172
+ ):
173
+ """
174
+ Pykeops convolution
175
+ """
176
+ kde = KDE(kernel=kernel, bandwidth=bandwidth, **more_kde_args)
177
+ return kde.fit(
178
+ pts, sample_weights=np.asarray(pts_weights, dtype=pts.dtype)
179
+ ).score_samples(np.asarray(grid_iterator, dtype=pts.dtype))
180
+
181
+
182
+ def gaussian_kernel(x_i, y_j, bandwidth):
183
+ exponent = -(((x_i - y_j) / bandwidth) ** 2).sum(dim=-1) / 2
184
+ # float is necessary for some reason (pykeops fails)
185
+ kernel = (exponent).exp() / (bandwidth * float(np.sqrt(2 * np.pi)))
186
+ return kernel
187
+
188
+
189
+ def multivariate_gaussian_kernel(x_i, y_j, covariance_matrix_inverse):
190
+ # 1 / \sqrt(2 \pi^dim * \Sigma.det()) * exp( -(x-y).T @ \Sigma ^{-1} @ (x-y))
191
+ # CF https://www.kernel-operations.io/keops/_auto_examples/pytorch/plot_anisotropic_kernels.html#sphx-glr-auto-examples-pytorch-plot-anisotropic-kernels-py
192
+ # and https://www.kernel-operations.io/keops/api/math-operations.html
193
+ dim = x_i.shape[-1]
194
+ z = x_i - y_j
195
+ exponent = -(z.weightedsqnorm(covariance_matrix_inverse.flatten()) / 2)
196
+ return (
197
+ float((2 * np.pi) ** (-dim / 2))
198
+ * (covariance_matrix_inverse.det().sqrt())
199
+ * exponent.exp()
200
+ )
201
+
202
+
203
+ def exponential_kernel(x_i, y_j, bandwidth):
204
+ # 1 / \sigma * exp( norm(x-y, dim=-1))
205
+ exponent = -(((((x_i - y_j) ** 2)).sum(dim=-1) ** 1 / 2) / bandwidth)
206
+ kernel = exponent.exp() / bandwidth
207
+ return kernel
208
+
209
+
210
+ def sinc_kernel(x_i, y_j, bandwidth):
211
+ norm = ((((x_i - y_j) ** 2)).sum(dim=-1) ** 1 / 2) / bandwidth
212
+ sinc = type(x_i).sinc
213
+ kernel = 2 * sinc(2 * norm) - sinc(norm)
214
+ return kernel
215
+
216
+
217
+ def _kernel(
218
+ kernel: available_kernels = "gaussian",
219
+ ):
220
+ match kernel:
221
+ case "gaussian":
222
+ return gaussian_kernel
223
+ case "exponential":
224
+ return exponential_kernel
225
+ case "multivariate_gaussian":
226
+ return multivariate_gaussian_kernel
227
+ case "sinc":
228
+ return sinc_kernel
229
+ case _:
230
+ assert callable(
231
+ kernel
232
+ ), f"""
233
+ --------------------------
234
+ Unknown kernel {kernel}.
235
+ --------------------------
236
+ Custom kernel has to be callable,
237
+ (x:LazyTensor(n,1,D),y:LazyTensor(1,m,D),bandwidth:float) ---> kernel matrix
238
+
239
+ Valid operations are given here:
240
+ https://www.kernel-operations.io/keops/python/api/index.html
241
+ """
242
+ return kernel
243
+
244
+
245
+ # TODO : multiple bandwidths at once with lazy tensors
246
+ class KDE:
247
+ """
248
+ Fast, scikit-style, and differentiable kernel density estimation, using PyKeops.
249
+ """
250
+
251
+ def __init__(
252
+ self,
253
+ bandwidth: Any = 1,
254
+ kernel: available_kernels = "gaussian",
255
+ return_log: bool = False,
256
+ ):
257
+ """
258
+ bandwidth : numeric
259
+ bandwidth for Gaussian kernel
260
+ """
261
+ self.X = None
262
+ self.bandwidth = bandwidth
263
+ self.kernel: available_kernels = kernel
264
+ self._kernel = None
265
+ self._backend = None
266
+ self._sample_weights = None
267
+ self.return_log = return_log
268
+
269
+ def fit(self, X, sample_weights=None, y=None):
270
+ self.X = X
271
+ self._sample_weights = sample_weights
272
+ if isinstance(X, np.ndarray):
273
+ self._backend = np
274
+ else:
275
+ import torch
276
+
277
+ if isinstance(X, torch.Tensor):
278
+ self._backend = torch
279
+ else:
280
+ raise Exception("Unsupported backend.")
281
+ self._kernel = _kernel(self.kernel)
282
+ return self
283
+
284
+ @staticmethod
285
+ def to_lazy(X, Y, x_weights):
286
+ if isinstance(X, np.ndarray):
287
+ from pykeops.numpy import LazyTensor
288
+
289
+ lazy_x = LazyTensor(
290
+ X.reshape((X.shape[0], 1, X.shape[1]))
291
+ ) # numpts, 1, dim
292
+ lazy_y = LazyTensor(
293
+ Y.reshape((1, Y.shape[0], Y.shape[1]))
294
+ ) # 1, numpts, dim
295
+ if x_weights is not None:
296
+ w = LazyTensor(x_weights[:, None], axis=0)
297
+ return lazy_x, lazy_y, w
298
+ return lazy_x, lazy_y, None
299
+ import torch
300
+
301
+ if isinstance(X, torch.Tensor):
302
+ from pykeops.torch import LazyTensor
303
+
304
+ lazy_x = LazyTensor(X.view(X.shape[0], 1, X.shape[1]))
305
+ lazy_y = LazyTensor(Y.view(1, Y.shape[0], Y.shape[1]))
306
+ if x_weights is not None:
307
+ w = LazyTensor(x_weights[:, None], axis=0)
308
+ return lazy_x, lazy_y, w
309
+ return lazy_x, lazy_y, None
310
+ raise Exception("Bad tensor type.")
311
+
312
+ def score_samples(self, Y, X=None, return_kernel=False):
313
+ """Returns the kernel density estimates of each point in `Y`.
314
+
315
+ Parameters
316
+ ----------
317
+ Y : tensor (m, d)
318
+ `m` points with `d` dimensions for which the probability density will
319
+ be calculated
320
+ X : tensor (n, d), optional
321
+ `n` points with `d` dimensions to which KDE will be fit. Provided to
322
+ allow batch calculations in `log_prob`. By default, `X` is None and
323
+ all points used to initialize KernelDensityEstimator are included.
324
+
325
+
326
+ Returns
327
+ -------
328
+ log_probs : tensor (m)
329
+ log probability densities for each of the queried points in `Y`
330
+ """
331
+ assert self._backend is not None and self._kernel is not None, "Fit first."
332
+ X = self.X if X is None else X
333
+ if X.shape[0] == 0:
334
+ return self._backend.zeros((Y.shape[0]))
335
+ assert Y.shape[1] == X.shape[1] and X.ndim == Y.ndim == 2
336
+ lazy_x, lazy_y, w = self.to_lazy(X, Y, x_weights=self._sample_weights)
337
+ kernel = self._kernel(lazy_x, lazy_y, self.bandwidth)
338
+ if w is not None:
339
+ kernel *= w
340
+ if return_kernel:
341
+ return kernel
342
+ density_estimation = kernel.sum(dim=0).ravel() / kernel.shape[0] # mean
343
+ return (
344
+ self._backend.log(density_estimation)
345
+ if self.return_log
346
+ else density_estimation
347
+ )
348
+
349
+
350
+ def batch_signed_measure_convolutions(
351
+ signed_measures, # array of shape (num_data,num_pts,D)
352
+ x, # array of shape (num_x, D) or (num_data, num_x, D)
353
+ bandwidth, # either float or matrix if multivariate kernel
354
+ kernel: available_kernels,
355
+ ):
356
+ """
357
+ Input
358
+ -----
359
+ - signed_measures: unragged, of shape (num_data, num_pts, D+1)
360
+ where last coord is weights, (0 for dummy points)
361
+ - x : the points to convolve (num_x,D)
362
+ - bandwidth : the bandwidths or covariance matrix inverse or ... of the kernel
363
+ - kernel : "gaussian", "multivariate_gaussian", "exponential", or Callable (x_i, y_i, bandwidth)->float
364
+
365
+ Output
366
+ ------
367
+ Array of shape (num_convolutions, (num_axis), num_data,
368
+ Array of shape (num_convolutions, (num_axis), num_data, max_x_size)
369
+ """
370
+ if signed_measures.ndim == 2:
371
+ signed_measures = signed_measures[None, :, :]
372
+ sms = signed_measures[..., :-1]
373
+ weights = signed_measures[..., -1]
374
+ if isinstance(signed_measures, np.ndarray):
375
+ from pykeops.numpy import LazyTensor
376
+ else:
377
+ import torch
378
+
379
+ assert isinstance(signed_measures, torch.Tensor)
380
+ from pykeops.torch import LazyTensor
381
+
382
+ _sms = LazyTensor(sms[..., None, :].contiguous())
383
+ _x = x[..., None, :, :].contiguous()
384
+
385
+ sms_kernel = _kernel(kernel)(_sms, _x, bandwidth)
386
+ out = (sms_kernel * weights[..., None, None].contiguous()).sum(
387
+ signed_measures.ndim - 2
388
+ )
389
+ assert out.shape[-1] == 1, "Pykeops bug fixed, TODO : refix this "
390
+ out = out[..., 0] ## pykeops bug + ensures its a tensor
391
+ # assert out.shape == (x.shape[0], x.shape[1]), f"{x.shape=}, {out.shape=}"
392
+ return out
393
+
394
+
395
+ class DTM:
396
+ """
397
+ Distance To Measure
398
+ """
399
+
400
+ def __init__(self, masses, metric: str = "euclidean", **_kdtree_kwargs):
401
+ """
402
+ mass : float in [0,1]
403
+ The mass threshold
404
+ metric :
405
+ The distance between points to consider
406
+ """
407
+ self.masses = masses
408
+ self.metric = metric
409
+ self._kdtree_kwargs = _kdtree_kwargs
410
+ self._ks = None
411
+ self._kdtree = None
412
+ self._X = None
413
+ self._backend = None
414
+
415
+ def fit(self, X, sample_weights=None, y=None):
416
+ if len(self.masses) == 0:
417
+ return self
418
+ assert np.max(self.masses) <= 1, "All masses should be in (0,1]."
419
+ from sklearn.neighbors import KDTree
420
+
421
+ if not isinstance(X, np.ndarray):
422
+ import torch
423
+
424
+ assert isinstance(X, torch.Tensor), "Backend has to be numpy of torch"
425
+ _X = X.detach()
426
+ self._backend = "torch"
427
+ else:
428
+ _X = X
429
+ self._backend = "numpy"
430
+ self._ks = np.array([int(mass * X.shape[0]) + 1 for mass in self.masses])
431
+ self._kdtree = KDTree(_X, metric=self.metric, **self._kdtree_kwargs)
432
+ self._X = X
433
+ return self
434
+
435
+ def score_samples(self, Y, X=None):
436
+ """Returns the kernel density estimates of each point in `Y`.
437
+
438
+ Parameters
439
+ ----------
440
+ Y : tensor (m, d)
441
+ `m` points with `d` dimensions for which the probability density will
442
+ be calculated
443
+
444
+
445
+ Returns
446
+ -------
447
+ the DTMs of Y, for each mass in masses.
448
+ """
449
+ if len(self.masses) == 0:
450
+ return np.empty((0, len(Y)))
451
+ assert (
452
+ self._ks is not None and self._kdtree is not None and self._X is not None
453
+ ), f"Fit first. Got {self._ks=}, {self._kdtree=}, {self._X=}."
454
+ assert Y.ndim == 2
455
+ if self._backend == "torch":
456
+ _Y = Y.detach().numpy()
457
+ else:
458
+ _Y = Y
459
+ NN_Dist, NN = self._kdtree.query(_Y, self._ks.max(), return_distance=True)
460
+ DTMs = np.array([((NN_Dist**2)[:, :k].mean(1)) ** 0.5 for k in self._ks])
461
+ return DTMs
462
+
463
+ def score_samples_diff(self, Y):
464
+ """Returns the kernel density estimates of each point in `Y`.
465
+
466
+ Parameters
467
+ ----------
468
+ Y : tensor (m, d)
469
+ `m` points with `d` dimensions for which the probability density will
470
+ be calculated
471
+ X : tensor (n, d), optional
472
+ `n` points with `d` dimensions to which KDE will be fit. Provided to
473
+ allow batch calculations in `log_prob`. By default, `X` is None and
474
+ all points used to initialize KernelDensityEstimator are included.
475
+
476
+
477
+ Returns
478
+ -------
479
+ log_probs : tensor (m)
480
+ log probability densities for each of the queried points in `Y`
481
+ """
482
+ import torch
483
+
484
+ if len(self.masses) == 0:
485
+ return torch.empty(0, len(Y))
486
+
487
+ assert Y.ndim == 2
488
+ assert self._backend == "torch", "Use the non-diff version with numpy."
489
+ assert (
490
+ self._ks is not None and self._kdtree is not None and self._X is not None
491
+ ), f"Fit first. Got {self._ks=}, {self._kdtree=}, {self._X=}."
492
+ NN = self._kdtree.query(Y.detach(), self._ks.max(), return_distance=False)
493
+ DTMs = tuple(
494
+ (((self._X[NN] - Y[:, None, :]) ** 2)[:, :k].sum(dim=(1, 2)) / k) ** 0.5
495
+ for k in self._ks
496
+ ) # TODO : kdtree already computes distance, find implementation of kdtree that is pytorch differentiable
497
+ return DTMs
498
+
499
+
500
+ # def _pts_convolution_sparse(pts:np.ndarray, pts_weights:np.ndarray, filtration_grid:Iterable[np.ndarray], kernel="gaussian", bandwidth=0.1, **more_kde_args):
501
+ # """
502
+ # Old version of `convolution_signed_measures`. Scikitlearn's convolution is slower than the code above.
503
+ # """
504
+ # from sklearn.neighbors import KernelDensity
505
+ # grid_iterator = np.asarray(list(product(*filtration_grid)))
506
+ # grid_shape = [len(f) for f in filtration_grid]
507
+ # if len(pts) == 0:
508
+ # # warn("Found a trivial signed measure !")
509
+ # return np.zeros(shape=grid_shape)
510
+ # kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, rtol = 1e-4, **more_kde_args) # TODO : check rtol
511
+
512
+ # pos_indices = pts_weights>0
513
+ # neg_indices = pts_weights<0
514
+ # img_pos = kde.fit(pts[pos_indices], sample_weight=pts_weights[pos_indices]).score_samples(grid_iterator).reshape(grid_shape)
515
+ # img_neg = kde.fit(pts[neg_indices], sample_weight=-pts_weights[neg_indices]).score_samples(grid_iterator).reshape(grid_shape)
516
+ # return np.exp(img_pos) - np.exp(img_neg)
517
+
518
+
519
+ # Precompiles the convolution
520
+ # _test(r=2,b=.5, plot=False)
@@ -0,0 +1,79 @@
1
+ import persistable
2
+
3
+
4
+ # requires installing ripser (pip install ripser) as well as persistable from the higher-homology branch,
5
+ # which can be done as follows:
6
+ # pip install git+https://github.com/LuisScoccola/persistable.git@higher-homology
7
+ # NOTE: only accepts as input a distance matrix
8
+ def hf_degree_rips(
9
+ distance_matrix,
10
+ min_rips_value,
11
+ max_rips_value,
12
+ max_normalized_degree,
13
+ min_normalized_degree,
14
+ grid_granularity,
15
+ max_homological_dimension,
16
+ subsample_size = None,
17
+ ):
18
+ if subsample_size == None:
19
+ p = persistable.Persistable(distance_matrix, metric="precomputed")
20
+ else:
21
+ p = persistable.Persistable(distance_matrix, metric="precomputed", subsample=subsample_size)
22
+
23
+ rips_values, normalized_degree_values, hilbert_functions, minimal_hilbert_decompositions = p._hilbert_function(
24
+ min_rips_value,
25
+ max_rips_value,
26
+ max_normalized_degree,
27
+ min_normalized_degree,
28
+ grid_granularity,
29
+ homological_dimension=max_homological_dimension,
30
+ )
31
+
32
+ return rips_values, normalized_degree_values, hilbert_functions, minimal_hilbert_decompositions
33
+
34
+
35
+
36
+ def hf_h0_degree_rips(
37
+ point_cloud,
38
+ min_rips_value,
39
+ max_rips_value,
40
+ max_normalized_degree,
41
+ min_normalized_degree,
42
+ grid_granularity,
43
+ ):
44
+ p = persistable.Persistable(point_cloud, n_neighbors="all")
45
+
46
+ rips_values, normalized_degree_values, hilbert_functions, minimal_hilbert_decompositions = p._hilbert_function(
47
+ min_rips_value,
48
+ max_rips_value,
49
+ max_normalized_degree,
50
+ min_normalized_degree,
51
+ grid_granularity,
52
+ )
53
+
54
+ return rips_values, normalized_degree_values, hilbert_functions[0], minimal_hilbert_decompositions[0]
55
+
56
+
57
+ def ri_h0_degree_rips(
58
+ point_cloud,
59
+ min_rips_value,
60
+ max_rips_value,
61
+ max_normalized_degree,
62
+ min_normalized_degree,
63
+ grid_granularity,
64
+ ):
65
+ p = persistable.Persistable(point_cloud, n_neighbors="all")
66
+
67
+ rips_values, normalized_degree_values, rank_invariant, _, _ = p._rank_invariant(
68
+ min_rips_value,
69
+ max_rips_value,
70
+ max_normalized_degree,
71
+ min_normalized_degree,
72
+ grid_granularity,
73
+ )
74
+
75
+ return rips_values, normalized_degree_values, rank_invariant
76
+
77
+
78
+
79
+