multipers 2.0.0__cp312-cp312-macosx_13_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of multipers might be problematic. Click here for more details.
- multipers/.dylibs/libc++.1.0.dylib +0 -0
- multipers/.dylibs/libtbb.12.12.dylib +0 -0
- multipers/.dylibs/libtbbmalloc.2.12.dylib +0 -0
- multipers/__init__.py +11 -0
- multipers/_signed_measure_meta.py +268 -0
- multipers/_slicer_meta.py +171 -0
- multipers/data/MOL2.py +350 -0
- multipers/data/UCR.py +18 -0
- multipers/data/__init__.py +1 -0
- multipers/data/graphs.py +466 -0
- multipers/data/immuno_regions.py +27 -0
- multipers/data/minimal_presentation_to_st_bf.py +0 -0
- multipers/data/pytorch2simplextree.py +91 -0
- multipers/data/shape3d.py +101 -0
- multipers/data/synthetic.py +68 -0
- multipers/distances.py +198 -0
- multipers/euler_characteristic.pyx +132 -0
- multipers/filtration_conversions.pxd +229 -0
- multipers/filtrations.pxd +225 -0
- multipers/function_rips.cpython-312-darwin.so +0 -0
- multipers/function_rips.pyx +105 -0
- multipers/grids.cpython-312-darwin.so +0 -0
- multipers/grids.pyx +281 -0
- multipers/hilbert_function.pyi +46 -0
- multipers/hilbert_function.pyx +153 -0
- multipers/io.cpython-312-darwin.so +0 -0
- multipers/io.pyx +571 -0
- multipers/ml/__init__.py +0 -0
- multipers/ml/accuracies.py +90 -0
- multipers/ml/convolutions.py +532 -0
- multipers/ml/invariants_with_persistable.py +79 -0
- multipers/ml/kernels.py +176 -0
- multipers/ml/mma.py +659 -0
- multipers/ml/one.py +472 -0
- multipers/ml/point_clouds.py +238 -0
- multipers/ml/signed_betti.py +50 -0
- multipers/ml/signed_measures.py +1542 -0
- multipers/ml/sliced_wasserstein.py +461 -0
- multipers/ml/tools.py +113 -0
- multipers/mma_structures.cpython-312-darwin.so +0 -0
- multipers/mma_structures.pxd +127 -0
- multipers/mma_structures.pyx +2433 -0
- multipers/multiparameter_edge_collapse.py +41 -0
- multipers/multiparameter_module_approximation.cpython-312-darwin.so +0 -0
- multipers/multiparameter_module_approximation.pyx +211 -0
- multipers/pickle.py +53 -0
- multipers/plots.py +326 -0
- multipers/point_measure_integration.cpython-312-darwin.so +0 -0
- multipers/point_measure_integration.pyx +139 -0
- multipers/rank_invariant.cpython-312-darwin.so +0 -0
- multipers/rank_invariant.pyx +229 -0
- multipers/simplex_tree_multi.cpython-312-darwin.so +0 -0
- multipers/simplex_tree_multi.pxd +129 -0
- multipers/simplex_tree_multi.pyi +715 -0
- multipers/simplex_tree_multi.pyx +4655 -0
- multipers/slicer.cpython-312-darwin.so +0 -0
- multipers/slicer.pxd +781 -0
- multipers/slicer.pyx +3393 -0
- multipers/tensor.pxd +13 -0
- multipers/test.pyx +44 -0
- multipers/tests/__init__.py +40 -0
- multipers/tests/old_test_rank_invariant.py +91 -0
- multipers/tests/test_diff_helper.py +74 -0
- multipers/tests/test_hilbert_function.py +82 -0
- multipers/tests/test_mma.py +51 -0
- multipers/tests/test_point_clouds.py +59 -0
- multipers/tests/test_python-cpp_conversion.py +82 -0
- multipers/tests/test_signed_betti.py +181 -0
- multipers/tests/test_simplextreemulti.py +98 -0
- multipers/tests/test_slicer.py +63 -0
- multipers/torch/__init__.py +1 -0
- multipers/torch/diff_grids.py +217 -0
- multipers/torch/rips_density.py +257 -0
- multipers-2.0.0.dist-info/LICENSE +21 -0
- multipers-2.0.0.dist-info/METADATA +29 -0
- multipers-2.0.0.dist-info/RECORD +78 -0
- multipers-2.0.0.dist-info/WHEEL +5 -0
- multipers-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
def noisy_annulus(n1:int=1000,n2:int=200, r1:float=1, r2:float=2, dim:int=2, center:np.ndarray|list|None=None, **kwargs)->np.ndarray:
|
|
3
|
+
"""Generates a noisy annulus dataset.
|
|
4
|
+
|
|
5
|
+
Parameters
|
|
6
|
+
----------
|
|
7
|
+
r1 : float.
|
|
8
|
+
Lower radius of the annulus.
|
|
9
|
+
r2 : float.
|
|
10
|
+
Upper radius of the annulus.
|
|
11
|
+
n1 : int
|
|
12
|
+
Number of points in the annulus.
|
|
13
|
+
n2 : int
|
|
14
|
+
Number of points in the square.
|
|
15
|
+
dim : int
|
|
16
|
+
Dimension of the annulus.
|
|
17
|
+
center: list or array
|
|
18
|
+
center of the annulus.
|
|
19
|
+
|
|
20
|
+
Returns
|
|
21
|
+
-------
|
|
22
|
+
numpy array
|
|
23
|
+
Dataset. size : (n1+n2) x dim
|
|
24
|
+
|
|
25
|
+
"""
|
|
26
|
+
from numpy.random import uniform
|
|
27
|
+
from numpy.linalg import norm
|
|
28
|
+
|
|
29
|
+
set =[]
|
|
30
|
+
while len(set)<n1:
|
|
31
|
+
draw=uniform(low=-r2, high=r2, size=dim)
|
|
32
|
+
if norm(draw) > r1 and norm(draw) < r2:
|
|
33
|
+
set.append(draw)
|
|
34
|
+
annulus = np.array(set) if center == None else np.array(set) + np.array(center)
|
|
35
|
+
diffuse_noise = uniform(size=(n2,dim), low=-1.1*r2,high=1.1*r2)
|
|
36
|
+
if center is not None: diffuse_noise += np.array(center)
|
|
37
|
+
return np.vstack([annulus, diffuse_noise])
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def three_annulus(num_pts:int=500,num_outliers:int=500):
|
|
41
|
+
X = np.block([
|
|
42
|
+
[np.random.uniform(low=-2,high=2,size=(num_outliers,2))],
|
|
43
|
+
[np.array(noisy_annulus(r1=0.6,r2=0.9,n1=(int)(num_pts*1/3), n2=0, center = [1,-0.2]))],
|
|
44
|
+
[np.array(noisy_annulus(r1=0.4,r2=0.55,n1=(int)(num_pts*1/3), n2=0, center = [-1.2,-1]))],
|
|
45
|
+
[np.array(noisy_annulus(r1=0.3,r2=0.4,n1=(int)(num_pts*1/3), n2=0, center = [-0.7,1.1]))],
|
|
46
|
+
])
|
|
47
|
+
return X
|
|
48
|
+
|
|
49
|
+
def orbit(n:int=1000, r:float=1., x0=[]):
|
|
50
|
+
point_list=[]
|
|
51
|
+
if len(x0) != 2:
|
|
52
|
+
x,y=np.random.uniform(size=2)
|
|
53
|
+
else:
|
|
54
|
+
x,y = x0
|
|
55
|
+
point_list.append([x,y])
|
|
56
|
+
for _ in range(n-1):
|
|
57
|
+
x = (x + r*y*(1-y)) %1
|
|
58
|
+
y = (y + r*x*(1-x)) %1
|
|
59
|
+
point_list.append([x,y])
|
|
60
|
+
return np.asarray(point_list, dtype=float)
|
|
61
|
+
|
|
62
|
+
def get_orbit5k(num_pts = 1000, num_data=5000):
|
|
63
|
+
from sklearn.preprocessing import LabelEncoder
|
|
64
|
+
rs = [2.5, 3.5, 4, 4.1, 4.3]
|
|
65
|
+
labels = np.random.choice(rs, size=num_data, replace=True)
|
|
66
|
+
X = [orbit(n=num_pts, r=r) for r in labels]
|
|
67
|
+
labels = LabelEncoder().fit_transform(labels)
|
|
68
|
+
return X, labels
|
multipers/distances.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import ot
|
|
3
|
+
|
|
4
|
+
from multipers.mma_structures import PyMultiDiagrams_type
|
|
5
|
+
from multipers.multiparameter_module_approximation import PyModule_type
|
|
6
|
+
from multipers.simplex_tree_multi import SimplexTreeMulti_type
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def sm2diff(sm1, sm2):
|
|
10
|
+
pts = sm1[0]
|
|
11
|
+
dtype = pts.dtype
|
|
12
|
+
if isinstance(pts, np.ndarray):
|
|
13
|
+
|
|
14
|
+
def backend_concatenate(a, b):
|
|
15
|
+
return np.concatenate([a, b], axis=0, dtype=dtype)
|
|
16
|
+
|
|
17
|
+
def backend_tensor(x):
|
|
18
|
+
return np.asarray(x, dtype=int)
|
|
19
|
+
|
|
20
|
+
else:
|
|
21
|
+
import torch
|
|
22
|
+
|
|
23
|
+
assert isinstance(pts, torch.Tensor), "Invalid backend. Numpy or torch."
|
|
24
|
+
|
|
25
|
+
def backend_concatenate(a, b):
|
|
26
|
+
return torch.concatenate([a, b], dim=0)
|
|
27
|
+
|
|
28
|
+
def backend_tensor(x):
|
|
29
|
+
return torch.tensor(x).type(torch.int)
|
|
30
|
+
|
|
31
|
+
pts1, w1 = sm1
|
|
32
|
+
pts2, w2 = sm2
|
|
33
|
+
## TODO: optimize this
|
|
34
|
+
pos_indices1 = backend_tensor(
|
|
35
|
+
[i for i, w in enumerate(w1) for _ in range(w) if w > 0]
|
|
36
|
+
)
|
|
37
|
+
pos_indices2 = backend_tensor(
|
|
38
|
+
[i for i, w in enumerate(w2) for _ in range(w) if w > 0]
|
|
39
|
+
)
|
|
40
|
+
neg_indices1 = backend_tensor(
|
|
41
|
+
[i for i, w in enumerate(w1) for _ in range(-w) if w < 0]
|
|
42
|
+
)
|
|
43
|
+
neg_indices2 = backend_tensor(
|
|
44
|
+
[i for i, w in enumerate(w2) for _ in range(-w) if w < 0]
|
|
45
|
+
)
|
|
46
|
+
x = backend_concatenate(pts1[pos_indices1], pts2[neg_indices2])
|
|
47
|
+
y = backend_concatenate(pts1[neg_indices1], pts2[pos_indices2])
|
|
48
|
+
return x, y
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def sm_distance(
|
|
52
|
+
sm1: tuple,
|
|
53
|
+
sm2: tuple,
|
|
54
|
+
reg: float = 0,
|
|
55
|
+
reg_m: float = 0,
|
|
56
|
+
numItermax: int = 10000,
|
|
57
|
+
p: float = 1,
|
|
58
|
+
):
|
|
59
|
+
"""
|
|
60
|
+
Computes the wasserstein distances between two signed measures,
|
|
61
|
+
of the form
|
|
62
|
+
- (pts,weights)
|
|
63
|
+
with
|
|
64
|
+
- pts : (num_pts, dim) float array
|
|
65
|
+
- weights : (num_pts,) int array
|
|
66
|
+
|
|
67
|
+
Regularisation:
|
|
68
|
+
- sinkhorn if reg != 0
|
|
69
|
+
- sinkhorn unbalanced if reg_m != 0
|
|
70
|
+
"""
|
|
71
|
+
x, y = sm2diff(sm1, sm2)
|
|
72
|
+
loss = ot.dist(
|
|
73
|
+
x, y, metric="sqeuclidean", p=p
|
|
74
|
+
) # only euc + sqeuclidian are implemented in pot for the moment with torch backend # TODO : check later
|
|
75
|
+
if isinstance(x, np.ndarray):
|
|
76
|
+
empty_tensor = np.array([]) # uniform weights
|
|
77
|
+
else:
|
|
78
|
+
import torch
|
|
79
|
+
|
|
80
|
+
assert isinstance(x, torch.Tensor), "Unimplemented backend."
|
|
81
|
+
empty_tensor = torch.tensor([]) # uniform weights
|
|
82
|
+
|
|
83
|
+
if reg == 0:
|
|
84
|
+
return ot.lp.emd2(empty_tensor, empty_tensor, M=loss) * len(x)
|
|
85
|
+
if reg_m == 0:
|
|
86
|
+
return ot.sinkhorn2(
|
|
87
|
+
a=empty_tensor, b=empty_tensor, M=loss, reg=reg, numItermax=numItermax
|
|
88
|
+
)
|
|
89
|
+
return ot.sinkhorn_unbalanced2(
|
|
90
|
+
a=empty_tensor,
|
|
91
|
+
b=empty_tensor,
|
|
92
|
+
M=loss,
|
|
93
|
+
reg=reg,
|
|
94
|
+
reg_m=reg_m,
|
|
95
|
+
numItermax=numItermax,
|
|
96
|
+
)
|
|
97
|
+
# return ot.sinkhorn2(a=onesx,b=onesy,M=loss,reg=reg, numItermax=numItermax)
|
|
98
|
+
# return ot.bregman.empirical_sinkhorn2(x,y,reg=reg)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def estimate_matching(b1: PyMultiDiagrams_type, b2: PyMultiDiagrams_type):
|
|
102
|
+
assert len(b1) == len(b2)
|
|
103
|
+
from gudhi.bottleneck import bottleneck_distance
|
|
104
|
+
|
|
105
|
+
def get_bc(b: PyMultiDiagrams_type, i: int) -> np.ndarray:
|
|
106
|
+
temp = b[i].get_points()
|
|
107
|
+
out = (
|
|
108
|
+
np.array(temp)[:, :, 0] if len(temp) > 0 else np.empty((0, 2))
|
|
109
|
+
) # GUDHI FIX
|
|
110
|
+
return out
|
|
111
|
+
|
|
112
|
+
return max(
|
|
113
|
+
(bottleneck_distance(get_bc(b1, i), get_bc(b2, i)) for i in range(len(b1)))
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
# Functions to estimate precision
|
|
118
|
+
def estimate_error(
|
|
119
|
+
st: SimplexTreeMulti_type,
|
|
120
|
+
module: PyModule_type,
|
|
121
|
+
degree: int,
|
|
122
|
+
nlines: int = 100,
|
|
123
|
+
verbose: bool = False,
|
|
124
|
+
):
|
|
125
|
+
"""
|
|
126
|
+
Given an MMA SimplexTree and PyModule, estimates the bottleneck distance using barcodes given by gudhi.
|
|
127
|
+
|
|
128
|
+
Parameters
|
|
129
|
+
----------
|
|
130
|
+
- st:SimplexTree
|
|
131
|
+
The simplextree representing the n-filtered complex. Used to define the gudhi simplextrees on different lines.
|
|
132
|
+
- module:PyModule
|
|
133
|
+
The module on which to estimate approximation error, w.r.t. the original simplextree st.
|
|
134
|
+
- degree:int
|
|
135
|
+
The homology degree to consider
|
|
136
|
+
|
|
137
|
+
Returns
|
|
138
|
+
-------
|
|
139
|
+
- float:The estimation of the matching distance, i.e., the maximum of the sampled bottleneck distances.
|
|
140
|
+
|
|
141
|
+
"""
|
|
142
|
+
from time import perf_counter
|
|
143
|
+
|
|
144
|
+
parameter = 0
|
|
145
|
+
|
|
146
|
+
def _get_bc_ST(st, basepoint, degree: int):
|
|
147
|
+
"""
|
|
148
|
+
Slices an mma simplextree to a gudhi simplextree, and compute its persistence on the diagonal line crossing the given basepoint.
|
|
149
|
+
"""
|
|
150
|
+
gst = st.project_on_line(
|
|
151
|
+
basepoint=basepoint, parameter=parameter
|
|
152
|
+
) # we consider only the 1rst coordinate (as )
|
|
153
|
+
gst.compute_persistence()
|
|
154
|
+
return gst.persistence_intervals_in_dimension(degree)
|
|
155
|
+
|
|
156
|
+
from gudhi.bottleneck import bottleneck_distance
|
|
157
|
+
|
|
158
|
+
low, high = module.get_box()
|
|
159
|
+
nfiltration = len(low)
|
|
160
|
+
basepoints = np.random.uniform(low=low, high=high, size=(nlines, nfiltration))
|
|
161
|
+
# barcodes from module
|
|
162
|
+
print("Computing mma barcodes...", flush=1, end="") if verbose else None
|
|
163
|
+
time = perf_counter()
|
|
164
|
+
bcs_from_mod = module.barcodes(degree=degree, basepoints=basepoints).get_points()
|
|
165
|
+
print(f"Done. {perf_counter() - time}s.") if verbose else None
|
|
166
|
+
|
|
167
|
+
def clean(dgm):
|
|
168
|
+
return np.array(
|
|
169
|
+
[
|
|
170
|
+
[birth[parameter], death[parameter]]
|
|
171
|
+
for birth, death in dgm
|
|
172
|
+
if len(birth) > 0 and birth[parameter] != np.inf
|
|
173
|
+
]
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
bcs_from_mod = [
|
|
177
|
+
clean(dgm) for dgm in bcs_from_mod
|
|
178
|
+
] # we only consider the 1st coordinate of the barcode
|
|
179
|
+
# Computes gudhi barcodes
|
|
180
|
+
from tqdm import tqdm
|
|
181
|
+
|
|
182
|
+
bcs_from_gudhi = [
|
|
183
|
+
_get_bc_ST(st, basepoint=basepoint, degree=degree)
|
|
184
|
+
for basepoint in tqdm(
|
|
185
|
+
basepoints, disable=not verbose, desc="Computing gudhi barcodes"
|
|
186
|
+
)
|
|
187
|
+
]
|
|
188
|
+
return max(
|
|
189
|
+
(
|
|
190
|
+
bottleneck_distance(a, b)
|
|
191
|
+
for a, b in tqdm(
|
|
192
|
+
zip(bcs_from_mod, bcs_from_gudhi),
|
|
193
|
+
disable=not verbose,
|
|
194
|
+
total=nlines,
|
|
195
|
+
desc="Computing bottleneck distances",
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
)
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# cimport multipers.tensor as mt
|
|
2
|
+
from libc.stdint cimport intptr_t, uint16_t, uint32_t, int32_t
|
|
3
|
+
from libcpp.vector cimport vector
|
|
4
|
+
from libcpp cimport bool, int, float
|
|
5
|
+
from libcpp.utility cimport pair
|
|
6
|
+
from typing import Optional,Iterable,Callable
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
cimport numpy as cnp
|
|
10
|
+
cnp.import_array()
|
|
11
|
+
|
|
12
|
+
ctypedef float value_type
|
|
13
|
+
python_value_type=np.float32
|
|
14
|
+
|
|
15
|
+
ctypedef int32_t indices_type # uint fails for some reason
|
|
16
|
+
python_indices_type=np.int32
|
|
17
|
+
|
|
18
|
+
ctypedef int32_t tensor_dtype
|
|
19
|
+
python_tensor_dtype = np.int32
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
ctypedef pair[vector[vector[indices_type]], vector[tensor_dtype]] signed_measure_type
|
|
23
|
+
|
|
24
|
+
cdef extern from "multi_parameter_rank_invariant/euler_characteristic.h" namespace "Gudhi::multiparameter::euler_characteristic":
|
|
25
|
+
void get_euler_surface_python(const intptr_t, tensor_dtype*, const vector[indices_type], bool, bool, bool) except + nogil
|
|
26
|
+
signed_measure_type get_euler_signed_measure(const intptr_t, tensor_dtype* , const vector[indices_type], bool, bool) except + nogil
|
|
27
|
+
|
|
28
|
+
def euler_signed_measure(simplextree, mass_default=None, bool verbose=False, bool plot=False, grid_conversion=None):
|
|
29
|
+
"""
|
|
30
|
+
Computes the signed measures given by the decomposition of the hilbert function.
|
|
31
|
+
|
|
32
|
+
Input
|
|
33
|
+
-----
|
|
34
|
+
- simplextree:SimplexTreeMulti, the multifiltered simplicial complex
|
|
35
|
+
- mass_default: Either None, or 'auto' or 'inf', or array-like of floats. Where to put the default mass to get a zero-mass measure.
|
|
36
|
+
- plot:bool, plots the computed measures if true.
|
|
37
|
+
- n_jobs:int, number of jobs. Defaults to #cpu, but when doing parallel computations of signed measures, we recommend setting this to 1.
|
|
38
|
+
- verbose:bool, prints c++ logs.
|
|
39
|
+
|
|
40
|
+
Output
|
|
41
|
+
------
|
|
42
|
+
`[signed_measure_of_degree for degree in degrees]`
|
|
43
|
+
with `signed_measure_of_degree` of the form `(dirac location, dirac weights)`.
|
|
44
|
+
"""
|
|
45
|
+
assert len(simplextree.filtration_grid[0]) > 0, "Squeeze grid first."
|
|
46
|
+
cdef bool zero_pad = mass_default is not None
|
|
47
|
+
# assert simplextree.num_parameters == 2
|
|
48
|
+
grid_shape = np.array([len(f) for f in simplextree.filtration_grid])
|
|
49
|
+
|
|
50
|
+
# match mass_default: ## Cython bug
|
|
51
|
+
# case None:
|
|
52
|
+
# pass
|
|
53
|
+
# case "inf":
|
|
54
|
+
# mass_default = np.array([np.inf]*simplextree.num_parameters)
|
|
55
|
+
# case "auto":
|
|
56
|
+
# mass_default = np.array([1.1*np.max(f) - 0.1*np.min(f) for f in grid_conversion])
|
|
57
|
+
# case _:
|
|
58
|
+
# mass_default = np.asarray(mass_default)
|
|
59
|
+
# assert mass_default.ndim == 1 and mass_default.shape[0] == simplextree.num_parameters
|
|
60
|
+
if mass_default is None:
|
|
61
|
+
mass_default = mass_default
|
|
62
|
+
else:
|
|
63
|
+
mass_default = np.asarray(mass_default)
|
|
64
|
+
assert mass_default.ndim == 1 and mass_default.shape[0] == simplextree.num_parameters
|
|
65
|
+
if zero_pad:
|
|
66
|
+
for i, _ in enumerate(grid_shape):
|
|
67
|
+
grid_shape[i] += 1 # adds a 0
|
|
68
|
+
if grid_conversion is not None:
|
|
69
|
+
for i,f in enumerate(grid_conversion):
|
|
70
|
+
grid_conversion[i] = np.concatenate([f, [mass_default[i]]])
|
|
71
|
+
assert len(grid_shape) == simplextree.num_parameters, "Grid shape size has to be the number of parameters."
|
|
72
|
+
container_array = np.ascontiguousarray(np.zeros(grid_shape, dtype=python_tensor_dtype).flatten())
|
|
73
|
+
assert len(container_array) < np.iinfo(python_indices_type).max, "Too large container. Raise an issue on github if you encounter this issue. (Due to tensor's operator[])"
|
|
74
|
+
cdef intptr_t simplextree_ptr = simplextree.thisptr
|
|
75
|
+
cdef vector[indices_type] c_grid_shape = grid_shape
|
|
76
|
+
cdef tensor_dtype[::1] container = container_array
|
|
77
|
+
cdef tensor_dtype* container_ptr = &container[0]
|
|
78
|
+
cdef signed_measure_type out
|
|
79
|
+
with nogil:
|
|
80
|
+
out = get_euler_signed_measure(simplextree_ptr, container_ptr, c_grid_shape, zero_pad, verbose)
|
|
81
|
+
pts, weights = np.asarray(out.first, dtype=int).reshape(-1, simplextree.num_parameters), np.asarray(out.second, dtype=int)
|
|
82
|
+
# return pts, weights
|
|
83
|
+
sm = (pts,weights)
|
|
84
|
+
|
|
85
|
+
if grid_conversion is not None:
|
|
86
|
+
from multipers.hilbert_function import sms_in_grid
|
|
87
|
+
sm, = sms_in_grid([sm], grid_conversion)
|
|
88
|
+
if plot:
|
|
89
|
+
from multipers.plots import plot_signed_measures
|
|
90
|
+
plot_signed_measures([sm])
|
|
91
|
+
return sm
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def euler_surface(simplextree, bool mobius_inversion=False, bool zero_pad=False, plot=False, bool verbose=False):
|
|
95
|
+
"""
|
|
96
|
+
Computes the hilbert function.
|
|
97
|
+
|
|
98
|
+
Input
|
|
99
|
+
-----
|
|
100
|
+
- simplextree:SimplexTreeMulti, the multifiltered simplicial complex
|
|
101
|
+
- degrees:array-like of ints, the degrees to compute
|
|
102
|
+
- mass_default: Either None, or 'auto' or 'inf', or array-like of floats. Where to put the default mass to get a zero-mass measure.
|
|
103
|
+
- plot:bool, plots the computed measures if true.
|
|
104
|
+
- n_jobs:int, number of jobs. Defaults to #cpu, but when doing parallel computations of signed measures, we recommend setting this to 1.
|
|
105
|
+
- verbose:bool, prints c++ logs.
|
|
106
|
+
|
|
107
|
+
Output
|
|
108
|
+
------
|
|
109
|
+
Integer array of the form `(num_degrees, num_filtration_values_of_parameter 1, ..., num_filtration_values_of_parameter n)`
|
|
110
|
+
"""
|
|
111
|
+
assert len(simplextree.filtration_grid[0]) > 0, "Squeeze grid first."
|
|
112
|
+
grid_conversion = [np.asarray(f) for f in simplextree.filtration_grid] if len(simplextree.filtration_grid[0]) > 0 else None
|
|
113
|
+
# assert simplextree.num_parameters == 2
|
|
114
|
+
grid_shape = [len(f) for f in grid_conversion]
|
|
115
|
+
assert len(grid_shape) == simplextree.num_parameters
|
|
116
|
+
container_array = np.ascontiguousarray(np.zeros(grid_shape, dtype=python_tensor_dtype).flatten())
|
|
117
|
+
cdef intptr_t simplextree_ptr = simplextree.thisptr
|
|
118
|
+
cdef vector[indices_type] c_grid_shape = grid_shape
|
|
119
|
+
cdef tensor_dtype[::1] container = container_array
|
|
120
|
+
cdef tensor_dtype* container_ptr = &container[0]
|
|
121
|
+
# cdef signed_measure_type out
|
|
122
|
+
# cdef indices_type i = 0
|
|
123
|
+
# cdef indices_type j = 1
|
|
124
|
+
# cdef vector[indices_type] fixed_values = np.asarray([0,0], dtype=int)
|
|
125
|
+
with nogil:
|
|
126
|
+
get_euler_surface_python(simplextree_ptr, container_ptr, c_grid_shape, mobius_inversion, zero_pad, verbose)
|
|
127
|
+
out = (grid_conversion, container_array.reshape(grid_shape))
|
|
128
|
+
if plot:
|
|
129
|
+
from multipers.plots import plot_surface
|
|
130
|
+
plot_surface(*out)
|
|
131
|
+
return out
|
|
132
|
+
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
# WARNING: Do not edit this file directly.
|
|
2
|
+
# It is automatically generated from 'multipers/filtration_conversions.pxd.tp'.
|
|
3
|
+
# Changes must be made there.
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# Python to C++ conversions
|
|
7
|
+
from multipers.filtrations cimport Finitely_critical_multi_filtration,KCriticalFiltration
|
|
8
|
+
from libcpp.vector cimport vector
|
|
9
|
+
from libcpp cimport bool
|
|
10
|
+
cimport numpy as cnp
|
|
11
|
+
import numpy as np
|
|
12
|
+
from libc.stdint cimport int32_t, int64_t
|
|
13
|
+
from cython.operator cimport dereference
|
|
14
|
+
###### ------------------- PY TO CPP
|
|
15
|
+
#### ----------
|
|
16
|
+
|
|
17
|
+
cdef inline KCriticalFiltration[int32_t] _py2kc_i32(object filtrations_):
|
|
18
|
+
cdef int32_t[:,:] filtrations = np.asarray(filtrations_, dtype=np.int32)
|
|
19
|
+
cdef KCriticalFiltration[int32_t] out
|
|
20
|
+
with nogil:
|
|
21
|
+
out.set_num_generators(filtrations.shape[0])
|
|
22
|
+
for i in range(filtrations.shape[0]):
|
|
23
|
+
out[i].resize(filtrations.shape[1])
|
|
24
|
+
for j in range(filtrations.shape[1]):
|
|
25
|
+
out[i][j] = filtrations[i,j]
|
|
26
|
+
return out
|
|
27
|
+
|
|
28
|
+
cdef inline Finitely_critical_multi_filtration[int32_t] _py21c_i32(object filtration_):
|
|
29
|
+
cdef int32_t[:] filtration = np.asarray(filtration_, dtype=np.int32)
|
|
30
|
+
cdef Finitely_critical_multi_filtration[int32_t] out
|
|
31
|
+
with nogil:
|
|
32
|
+
out.reserve(len(filtration))
|
|
33
|
+
for i in range(len(filtration)):
|
|
34
|
+
out.push_back(filtration[i])
|
|
35
|
+
return out
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
cdef inline vector[Finitely_critical_multi_filtration[int32_t]] _py2v1c_i32(object filtrations_):
|
|
39
|
+
cdef int32_t[:,:] filtrations = np.asarray(filtrations_, dtype=np.int32)
|
|
40
|
+
cdef vector[Finitely_critical_multi_filtration[int32_t]] out
|
|
41
|
+
out.reserve(filtrations.shape[0])
|
|
42
|
+
for i in range(filtrations.shape[0]):
|
|
43
|
+
out.push_back(_py21c_i32(filtrations[i,:]))
|
|
44
|
+
return out
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
###### ------------------- CPP to PY
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
## CYTHON BUG: using tuples here will cause some weird issues.
|
|
51
|
+
cdef inline _ff21cview_i32(Finitely_critical_multi_filtration[int32_t]* x, bool copy=False):
|
|
52
|
+
cdef Py_ssize_t num_parameters = dereference(x).num_parameters()
|
|
53
|
+
cdef int32_t[:] x_view = <int32_t[:num_parameters]>(&(dereference(x)[0]))
|
|
54
|
+
return np.array(x_view) if copy else np.asarray(x_view)
|
|
55
|
+
|
|
56
|
+
cdef inline _ff2kcview_i32(KCriticalFiltration[int32_t]* x, bool copy=False):
|
|
57
|
+
cdef Py_ssize_t k = dereference(x).num_generators()
|
|
58
|
+
return [_ff21cview_i32(&(dereference(x)[i]), copy=copy) for i in range(k)]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
cdef inline _vff21cview_i32(vector[Finitely_critical_multi_filtration[int32_t]]& x, bool copy = False):
|
|
62
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
63
|
+
return [_ff21cview_i32(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
64
|
+
|
|
65
|
+
cdef inline _vff2kcview_i32(vector[KCriticalFiltration[int32_t]]& x, bool copy = False):
|
|
66
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
67
|
+
return [_ff2kcview_i32(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
68
|
+
###### ------------------- PY TO CPP
|
|
69
|
+
#### ----------
|
|
70
|
+
|
|
71
|
+
cdef inline KCriticalFiltration[int64_t] _py2kc_i64(object filtrations_):
|
|
72
|
+
cdef int64_t[:,:] filtrations = np.asarray(filtrations_, dtype=np.int64)
|
|
73
|
+
cdef KCriticalFiltration[int64_t] out
|
|
74
|
+
with nogil:
|
|
75
|
+
out.set_num_generators(filtrations.shape[0])
|
|
76
|
+
for i in range(filtrations.shape[0]):
|
|
77
|
+
out[i].resize(filtrations.shape[1])
|
|
78
|
+
for j in range(filtrations.shape[1]):
|
|
79
|
+
out[i][j] = filtrations[i,j]
|
|
80
|
+
return out
|
|
81
|
+
|
|
82
|
+
cdef inline Finitely_critical_multi_filtration[int64_t] _py21c_i64(object filtration_):
|
|
83
|
+
cdef int64_t[:] filtration = np.asarray(filtration_, dtype=np.int64)
|
|
84
|
+
cdef Finitely_critical_multi_filtration[int64_t] out
|
|
85
|
+
with nogil:
|
|
86
|
+
out.reserve(len(filtration))
|
|
87
|
+
for i in range(len(filtration)):
|
|
88
|
+
out.push_back(filtration[i])
|
|
89
|
+
return out
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
cdef inline vector[Finitely_critical_multi_filtration[int64_t]] _py2v1c_i64(object filtrations_):
|
|
93
|
+
cdef int64_t[:,:] filtrations = np.asarray(filtrations_, dtype=np.int64)
|
|
94
|
+
cdef vector[Finitely_critical_multi_filtration[int64_t]] out
|
|
95
|
+
out.reserve(filtrations.shape[0])
|
|
96
|
+
for i in range(filtrations.shape[0]):
|
|
97
|
+
out.push_back(_py21c_i64(filtrations[i,:]))
|
|
98
|
+
return out
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
###### ------------------- CPP to PY
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
## CYTHON BUG: using tuples here will cause some weird issues.
|
|
105
|
+
cdef inline _ff21cview_i64(Finitely_critical_multi_filtration[int64_t]* x, bool copy=False):
|
|
106
|
+
cdef Py_ssize_t num_parameters = dereference(x).num_parameters()
|
|
107
|
+
cdef int64_t[:] x_view = <int64_t[:num_parameters]>(&(dereference(x)[0]))
|
|
108
|
+
return np.array(x_view) if copy else np.asarray(x_view)
|
|
109
|
+
|
|
110
|
+
cdef inline _ff2kcview_i64(KCriticalFiltration[int64_t]* x, bool copy=False):
|
|
111
|
+
cdef Py_ssize_t k = dereference(x).num_generators()
|
|
112
|
+
return [_ff21cview_i64(&(dereference(x)[i]), copy=copy) for i in range(k)]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
cdef inline _vff21cview_i64(vector[Finitely_critical_multi_filtration[int64_t]]& x, bool copy = False):
|
|
116
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
117
|
+
return [_ff21cview_i64(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
118
|
+
|
|
119
|
+
cdef inline _vff2kcview_i64(vector[KCriticalFiltration[int64_t]]& x, bool copy = False):
|
|
120
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
121
|
+
return [_ff2kcview_i64(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
122
|
+
###### ------------------- PY TO CPP
|
|
123
|
+
#### ----------
|
|
124
|
+
|
|
125
|
+
cdef inline KCriticalFiltration[float] _py2kc_f32(object filtrations_):
|
|
126
|
+
cdef float[:,:] filtrations = np.asarray(filtrations_, dtype=np.float32)
|
|
127
|
+
cdef KCriticalFiltration[float] out
|
|
128
|
+
with nogil:
|
|
129
|
+
out.set_num_generators(filtrations.shape[0])
|
|
130
|
+
for i in range(filtrations.shape[0]):
|
|
131
|
+
out[i].resize(filtrations.shape[1])
|
|
132
|
+
for j in range(filtrations.shape[1]):
|
|
133
|
+
out[i][j] = filtrations[i,j]
|
|
134
|
+
return out
|
|
135
|
+
|
|
136
|
+
cdef inline Finitely_critical_multi_filtration[float] _py21c_f32(object filtration_):
|
|
137
|
+
cdef float[:] filtration = np.asarray(filtration_, dtype=np.float32)
|
|
138
|
+
cdef Finitely_critical_multi_filtration[float] out
|
|
139
|
+
with nogil:
|
|
140
|
+
out.reserve(len(filtration))
|
|
141
|
+
for i in range(len(filtration)):
|
|
142
|
+
out.push_back(filtration[i])
|
|
143
|
+
return out
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
cdef inline vector[Finitely_critical_multi_filtration[float]] _py2v1c_f32(object filtrations_):
|
|
147
|
+
cdef float[:,:] filtrations = np.asarray(filtrations_, dtype=np.float32)
|
|
148
|
+
cdef vector[Finitely_critical_multi_filtration[float]] out
|
|
149
|
+
out.reserve(filtrations.shape[0])
|
|
150
|
+
for i in range(filtrations.shape[0]):
|
|
151
|
+
out.push_back(_py21c_f32(filtrations[i,:]))
|
|
152
|
+
return out
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
###### ------------------- CPP to PY
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
## CYTHON BUG: using tuples here will cause some weird issues.
|
|
159
|
+
cdef inline _ff21cview_f32(Finitely_critical_multi_filtration[float]* x, bool copy=False):
|
|
160
|
+
cdef Py_ssize_t num_parameters = dereference(x).num_parameters()
|
|
161
|
+
cdef float[:] x_view = <float[:num_parameters]>(&(dereference(x)[0]))
|
|
162
|
+
return np.array(x_view) if copy else np.asarray(x_view)
|
|
163
|
+
|
|
164
|
+
cdef inline _ff2kcview_f32(KCriticalFiltration[float]* x, bool copy=False):
|
|
165
|
+
cdef Py_ssize_t k = dereference(x).num_generators()
|
|
166
|
+
return [_ff21cview_f32(&(dereference(x)[i]), copy=copy) for i in range(k)]
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
cdef inline _vff21cview_f32(vector[Finitely_critical_multi_filtration[float]]& x, bool copy = False):
|
|
170
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
171
|
+
return [_ff21cview_f32(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
172
|
+
|
|
173
|
+
cdef inline _vff2kcview_f32(vector[KCriticalFiltration[float]]& x, bool copy = False):
|
|
174
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
175
|
+
return [_ff2kcview_f32(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
176
|
+
###### ------------------- PY TO CPP
|
|
177
|
+
#### ----------
|
|
178
|
+
|
|
179
|
+
cdef inline KCriticalFiltration[double] _py2kc_f64(object filtrations_):
|
|
180
|
+
cdef double[:,:] filtrations = np.asarray(filtrations_, dtype=np.float64)
|
|
181
|
+
cdef KCriticalFiltration[double] out
|
|
182
|
+
with nogil:
|
|
183
|
+
out.set_num_generators(filtrations.shape[0])
|
|
184
|
+
for i in range(filtrations.shape[0]):
|
|
185
|
+
out[i].resize(filtrations.shape[1])
|
|
186
|
+
for j in range(filtrations.shape[1]):
|
|
187
|
+
out[i][j] = filtrations[i,j]
|
|
188
|
+
return out
|
|
189
|
+
|
|
190
|
+
cdef inline Finitely_critical_multi_filtration[double] _py21c_f64(object filtration_):
|
|
191
|
+
cdef double[:] filtration = np.asarray(filtration_, dtype=np.float64)
|
|
192
|
+
cdef Finitely_critical_multi_filtration[double] out
|
|
193
|
+
with nogil:
|
|
194
|
+
out.reserve(len(filtration))
|
|
195
|
+
for i in range(len(filtration)):
|
|
196
|
+
out.push_back(filtration[i])
|
|
197
|
+
return out
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
cdef inline vector[Finitely_critical_multi_filtration[double]] _py2v1c_f64(object filtrations_):
|
|
201
|
+
cdef double[:,:] filtrations = np.asarray(filtrations_, dtype=np.float64)
|
|
202
|
+
cdef vector[Finitely_critical_multi_filtration[double]] out
|
|
203
|
+
out.reserve(filtrations.shape[0])
|
|
204
|
+
for i in range(filtrations.shape[0]):
|
|
205
|
+
out.push_back(_py21c_f64(filtrations[i,:]))
|
|
206
|
+
return out
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
###### ------------------- CPP to PY
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
## CYTHON BUG: using tuples here will cause some weird issues.
|
|
213
|
+
cdef inline _ff21cview_f64(Finitely_critical_multi_filtration[double]* x, bool copy=False):
|
|
214
|
+
cdef Py_ssize_t num_parameters = dereference(x).num_parameters()
|
|
215
|
+
cdef double[:] x_view = <double[:num_parameters]>(&(dereference(x)[0]))
|
|
216
|
+
return np.array(x_view) if copy else np.asarray(x_view)
|
|
217
|
+
|
|
218
|
+
cdef inline _ff2kcview_f64(KCriticalFiltration[double]* x, bool copy=False):
|
|
219
|
+
cdef Py_ssize_t k = dereference(x).num_generators()
|
|
220
|
+
return [_ff21cview_f64(&(dereference(x)[i]), copy=copy) for i in range(k)]
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
cdef inline _vff21cview_f64(vector[Finitely_critical_multi_filtration[double]]& x, bool copy = False):
|
|
224
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
225
|
+
return [_ff21cview_f64(&(x[i]), copy=copy) for i in range(num_stuff)]
|
|
226
|
+
|
|
227
|
+
cdef inline _vff2kcview_f64(vector[KCriticalFiltration[double]]& x, bool copy = False):
|
|
228
|
+
cdef Py_ssize_t num_stuff = x.size()
|
|
229
|
+
return [_ff2kcview_f64(&(x[i]), copy=copy) for i in range(num_stuff)]
|