modulo-vki 2.0.6__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modulo_vki/__init__.py +0 -22
- modulo_vki/core/__init__.py +9 -9
- modulo_vki/core/_dft.py +132 -61
- modulo_vki/core/_dmd_s.py +81 -72
- modulo_vki/core/_k_matrix.py +209 -81
- modulo_vki/core/_mpod_space.py +180 -180
- modulo_vki/core/_mpod_time.py +175 -154
- modulo_vki/core/_pod_space.py +185 -184
- modulo_vki/core/_pod_time.py +49 -48
- modulo_vki/core/_spod_s.py +101 -101
- modulo_vki/core/_spod_t.py +104 -104
- modulo_vki/core/spatial_structures.py +367 -0
- modulo_vki/core/temporal_structures.py +241 -0
- modulo_vki/core/utils.py +474 -0
- modulo_vki/modulo.py +897 -828
- modulo_vki/modulo_old.py +1368 -0
- modulo_vki/utils/__init__.py +4 -4
- modulo_vki/utils/_plots.py +51 -51
- modulo_vki/utils/_utils.py +358 -341
- modulo_vki/utils/others.py +461 -452
- modulo_vki/utils/read_db.py +339 -339
- {modulo_vki-2.0.6.dist-info → modulo_vki-2.1.0.dist-info}/METADATA +397 -304
- modulo_vki-2.1.0.dist-info/RECORD +26 -0
- {modulo_vki-2.0.6.dist-info → modulo_vki-2.1.0.dist-info}/WHEEL +1 -1
- {modulo_vki-2.0.6.dist-info → modulo_vki-2.1.0.dist-info/licenses}/LICENSE +21 -21
- modulo_vki-2.0.6.dist-info/RECORD +0 -22
- {modulo_vki-2.0.6.dist-info → modulo_vki-2.1.0.dist-info}/top_level.txt +0 -0
modulo_vki/utils/_utils.py
CHANGED
|
@@ -1,342 +1,359 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
from scipy import signal
|
|
3
|
-
from scipy.sparse.linalg import svds, eigsh
|
|
4
|
-
from sklearn.decomposition import TruncatedSVD
|
|
5
|
-
from scipy.linalg import eigh
|
|
6
|
-
from sklearn.utils.extmath import randomized_svd
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def Bound_EXT(S, Ex, boundaries):
|
|
10
|
-
"""
|
|
11
|
-
This function computes the extension of a signal for
|
|
12
|
-
filtering purposes
|
|
13
|
-
|
|
14
|
-
:param S: The Input signal
|
|
15
|
-
:param Nf: The Size of the Kernel (must be an odd number!)
|
|
16
|
-
:param boundaries: The type of extension:
|
|
17
|
-
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
18
|
-
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
19
|
-
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
20
|
-
‘extrap’ Extrapolation The input is extended via linear extrapolation.
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
"""
|
|
24
|
-
# We first perform a zero padding
|
|
25
|
-
# Ex=int((Nf-1)/2) # Extension on each size
|
|
26
|
-
size_Ext = 2 * Ex + len(S) # Compute the size of the extended signal
|
|
27
|
-
S_extend = np.zeros((int(size_Ext))) # Initialize extended signal
|
|
28
|
-
S_extend[Ex:int((size_Ext - Ex))] = S
|
|
29
|
-
|
|
30
|
-
if boundaries == "reflect":
|
|
31
|
-
LEFT = np.flip(S[0:Ex]) # Prepare the reflection on the left
|
|
32
|
-
RIGHT = np.flip(S[len(S) - Ex:len(S)]) # Prepare the
|
|
33
|
-
S_extend[0:Ex] = LEFT
|
|
34
|
-
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
35
|
-
elif boundaries == "nearest":
|
|
36
|
-
LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
|
|
37
|
-
RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
|
|
38
|
-
S_extend[0:Ex] = LEFT
|
|
39
|
-
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
40
|
-
elif boundaries == "wrap":
|
|
41
|
-
LEFT = S[len(S) - Ex:len(S)] # Wrap on the Left
|
|
42
|
-
RIGHT = S[0:Ex] # Wrap on the Right
|
|
43
|
-
S_extend[0:Ex] = LEFT
|
|
44
|
-
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
45
|
-
elif boundaries == "extrap":
|
|
46
|
-
LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
|
|
47
|
-
RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
|
|
48
|
-
S_extend[0:Ex] = LEFT
|
|
49
|
-
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
50
|
-
print('Not active yet, replaced by nearest')
|
|
51
|
-
return S_extend
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
def conv_m(K, h, Ex, boundaries):
|
|
55
|
-
"""
|
|
56
|
-
This function computes the 2D convolution by perfoming 2 sets of 1D convolutions.
|
|
57
|
-
Moreover, we here use the fft with an appropriate extension
|
|
58
|
-
that avoids the periodicity condition.
|
|
59
|
-
|
|
60
|
-
:param K: Matrix to be filtered
|
|
61
|
-
:param h: The 1D Kernel of the filter
|
|
62
|
-
:param boundaries: The type of extension:
|
|
63
|
-
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
64
|
-
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
65
|
-
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
66
|
-
‘extrap’ Extrapolation The input is extended via linear extrapolation.
|
|
67
|
-
"""
|
|
68
|
-
# Filter along the raws
|
|
69
|
-
n_t = np.shape(K)[0]
|
|
70
|
-
# Ex=int(n_t/2)
|
|
71
|
-
K_F1 = np.zeros(np.shape(K))
|
|
72
|
-
K_F2 = np.zeros(np.shape(K))
|
|
73
|
-
# K_F=np.zeros(np.shape(K))
|
|
74
|
-
for k in range(0, n_t):
|
|
75
|
-
S = K[:, k]
|
|
76
|
-
S_Ext = Bound_EXT(S, Ex, boundaries)
|
|
77
|
-
S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
|
|
78
|
-
# Compute where to take the signal
|
|
79
|
-
Ex1 = int((len(S_Filt) - len(S)) / 2)
|
|
80
|
-
# K_F1[k,:]=S_Filt[Ex:(len(S_Filt)-Ex)]
|
|
81
|
-
K_F1[:, k] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
|
|
82
|
-
for k in range(0, n_t):
|
|
83
|
-
S = K_F1[k, :]
|
|
84
|
-
S_Ext = Bound_EXT(S, Ex, boundaries)
|
|
85
|
-
S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
|
|
86
|
-
# Compute where to take the signal
|
|
87
|
-
Ex1 = int((len(S_Filt) - len(S)) / 2)
|
|
88
|
-
# K_F2[:,k]=S_Filt[Ex:(len(S_Filt)-Ex)]
|
|
89
|
-
K_F2[k, :] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
|
|
90
|
-
# K_F=K_F1+K_F2
|
|
91
|
-
return K_F2
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
#
|
|
184
|
-
#
|
|
185
|
-
#
|
|
186
|
-
#
|
|
187
|
-
#
|
|
188
|
-
|
|
189
|
-
#
|
|
190
|
-
|
|
191
|
-
#
|
|
192
|
-
#
|
|
193
|
-
#
|
|
194
|
-
#
|
|
195
|
-
|
|
196
|
-
#
|
|
197
|
-
#
|
|
198
|
-
|
|
199
|
-
#
|
|
200
|
-
#
|
|
201
|
-
#
|
|
202
|
-
#
|
|
203
|
-
#
|
|
204
|
-
#
|
|
205
|
-
|
|
206
|
-
#
|
|
207
|
-
#
|
|
208
|
-
#
|
|
209
|
-
#
|
|
210
|
-
#
|
|
211
|
-
#
|
|
212
|
-
|
|
213
|
-
#
|
|
214
|
-
#
|
|
215
|
-
|
|
216
|
-
#
|
|
217
|
-
#
|
|
218
|
-
#
|
|
219
|
-
#
|
|
220
|
-
#
|
|
221
|
-
#
|
|
222
|
-
#
|
|
223
|
-
|
|
224
|
-
#
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
:
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy import signal
|
|
3
|
+
from scipy.sparse.linalg import svds, eigsh
|
|
4
|
+
from sklearn.decomposition import TruncatedSVD
|
|
5
|
+
from scipy.linalg import eigh
|
|
6
|
+
from sklearn.utils.extmath import randomized_svd
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def Bound_EXT(S, Ex, boundaries):
|
|
10
|
+
"""
|
|
11
|
+
This function computes the extension of a signal for
|
|
12
|
+
filtering purposes
|
|
13
|
+
|
|
14
|
+
:param S: The Input signal
|
|
15
|
+
:param Nf: The Size of the Kernel (must be an odd number!)
|
|
16
|
+
:param boundaries: The type of extension:
|
|
17
|
+
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
18
|
+
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
19
|
+
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
20
|
+
‘extrap’ Extrapolation The input is extended via linear extrapolation.
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
"""
|
|
24
|
+
# We first perform a zero padding
|
|
25
|
+
# Ex=int((Nf-1)/2) # Extension on each size
|
|
26
|
+
size_Ext = 2 * Ex + len(S) # Compute the size of the extended signal
|
|
27
|
+
S_extend = np.zeros((int(size_Ext))) # Initialize extended signal
|
|
28
|
+
S_extend[Ex:int((size_Ext - Ex))] = S # Assign the Signal on the zeroes
|
|
29
|
+
|
|
30
|
+
if boundaries == "reflect":
|
|
31
|
+
LEFT = np.flip(S[0:Ex]) # Prepare the reflection on the left
|
|
32
|
+
RIGHT = np.flip(S[len(S) - Ex:len(S)]) # Prepare the reflection on the right
|
|
33
|
+
S_extend[0:Ex] = LEFT
|
|
34
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
35
|
+
elif boundaries == "nearest":
|
|
36
|
+
LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
|
|
37
|
+
RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
|
|
38
|
+
S_extend[0:Ex] = LEFT
|
|
39
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
40
|
+
elif boundaries == "wrap":
|
|
41
|
+
LEFT = S[len(S) - Ex:len(S)] # Wrap on the Left
|
|
42
|
+
RIGHT = S[0:Ex] # Wrap on the Right
|
|
43
|
+
S_extend[0:Ex] = LEFT
|
|
44
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
45
|
+
elif boundaries == "extrap":
|
|
46
|
+
LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
|
|
47
|
+
RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
|
|
48
|
+
S_extend[0:Ex] = LEFT
|
|
49
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
50
|
+
print('Not active yet, replaced by nearest')
|
|
51
|
+
return S_extend
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def conv_m(K, h, Ex, boundaries):
|
|
55
|
+
"""
|
|
56
|
+
This function computes the 2D convolution by perfoming 2 sets of 1D convolutions.
|
|
57
|
+
Moreover, we here use the fft with an appropriate extension
|
|
58
|
+
that avoids the periodicity condition.
|
|
59
|
+
|
|
60
|
+
:param K: Matrix to be filtered
|
|
61
|
+
:param h: The 1D Kernel of the filter
|
|
62
|
+
:param boundaries: The type of extension:
|
|
63
|
+
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
64
|
+
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
65
|
+
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
66
|
+
‘extrap’ Extrapolation The input is extended via linear extrapolation.
|
|
67
|
+
"""
|
|
68
|
+
# Filter along the raws
|
|
69
|
+
n_t = np.shape(K)[0]
|
|
70
|
+
# Ex=int(n_t/2)
|
|
71
|
+
K_F1 = np.zeros(np.shape(K))
|
|
72
|
+
K_F2 = np.zeros(np.shape(K))
|
|
73
|
+
# K_F=np.zeros(np.shape(K))
|
|
74
|
+
for k in range(0, n_t):
|
|
75
|
+
S = K[:, k]
|
|
76
|
+
S_Ext = Bound_EXT(S, Ex, boundaries)
|
|
77
|
+
S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
|
|
78
|
+
# Compute where to take the signal
|
|
79
|
+
Ex1 = int((len(S_Filt) - len(S)) / 2)
|
|
80
|
+
# K_F1[k,:]=S_Filt[Ex:(len(S_Filt)-Ex)]
|
|
81
|
+
K_F1[:, k] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
|
|
82
|
+
for k in range(0, n_t):
|
|
83
|
+
S = K_F1[k, :]
|
|
84
|
+
S_Ext = Bound_EXT(S, Ex, boundaries)
|
|
85
|
+
S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
|
|
86
|
+
# Compute where to take the signal
|
|
87
|
+
Ex1 = int((len(S_Filt) - len(S)) / 2)
|
|
88
|
+
# K_F2[:,k]=S_Filt[Ex:(len(S_Filt)-Ex)]
|
|
89
|
+
K_F2[k, :] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
|
|
90
|
+
# K_F=K_F1+K_F2
|
|
91
|
+
return K_F2
|
|
92
|
+
|
|
93
|
+
def conv_m_2D(K, h, Ex, boundaries):
|
|
94
|
+
|
|
95
|
+
# Extended K
|
|
96
|
+
K_ext = np.pad(K, Ex, mode=boundaries)
|
|
97
|
+
|
|
98
|
+
# Filtering matrix
|
|
99
|
+
h_mat = np.outer(np.atleast_2d(h).T, np.atleast_2d(h))
|
|
100
|
+
|
|
101
|
+
# Filtering
|
|
102
|
+
K_filt = signal.fftconvolve(K_ext, h_mat, mode='valid')
|
|
103
|
+
|
|
104
|
+
# Interior K
|
|
105
|
+
Ex1 = int((len(K_filt) - len(K)) / 2)
|
|
106
|
+
K_F2 = K_filt[Ex1:(len(K_filt) - Ex1), Ex1:(len(K_filt) - Ex1)]
|
|
107
|
+
|
|
108
|
+
return K_F2
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _loop_gemm(a, b, c=None, chunksize=100):
|
|
112
|
+
size_i = a.shape[0]
|
|
113
|
+
size_zip = a.shape[1]
|
|
114
|
+
|
|
115
|
+
size_j = b.shape[1]
|
|
116
|
+
size_alt_zip = b.shape[0]
|
|
117
|
+
|
|
118
|
+
if size_zip != size_alt_zip:
|
|
119
|
+
ValueError("Loop GEMM zip index is not of the same size for both tensors")
|
|
120
|
+
|
|
121
|
+
if c is None:
|
|
122
|
+
c = np.zeros((size_i, size_j))
|
|
123
|
+
|
|
124
|
+
istart = 0
|
|
125
|
+
for i in range(int(np.ceil(size_i / float(chunksize)))):
|
|
126
|
+
|
|
127
|
+
left_slice = slice(istart, istart + chunksize)
|
|
128
|
+
left_view = a[left_slice]
|
|
129
|
+
|
|
130
|
+
jstart = 0
|
|
131
|
+
for j in range(int(np.ceil(size_j / float(chunksize)))):
|
|
132
|
+
right_slice = slice(jstart, jstart + chunksize)
|
|
133
|
+
right_view = b[:, right_slice]
|
|
134
|
+
|
|
135
|
+
c[left_slice, right_slice] = np.dot(left_view, right_view)
|
|
136
|
+
jstart += chunksize
|
|
137
|
+
|
|
138
|
+
istart += chunksize
|
|
139
|
+
|
|
140
|
+
return c
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def svds_RND(K, R_K):
|
|
144
|
+
"""
|
|
145
|
+
Quick and dirty implementation of randomized SVD
|
|
146
|
+
for computing eigenvalues of K. We follow same input/output structure
|
|
147
|
+
as for the svds in scipy
|
|
148
|
+
"""
|
|
149
|
+
svd = TruncatedSVD(R_K)
|
|
150
|
+
svd.fit_transform(K)
|
|
151
|
+
Psi_P = svd.components_.T
|
|
152
|
+
Lambda_P = svd.singular_values_
|
|
153
|
+
return Psi_P, Lambda_P
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def overlap(array, len_chunk, len_sep=1):
|
|
157
|
+
"""
|
|
158
|
+
Returns a matrix of all full overlapping chunks of the input `array`, with a chunk
|
|
159
|
+
length of `len_chunk` and a separation length of `len_sep`. Begins with the first full
|
|
160
|
+
chunk in the array.
|
|
161
|
+
This function is taken from https://stackoverflow.com/questions/38163366/split-list-into-separate-but-overlapping-chunks
|
|
162
|
+
it is designed to split an array with certain overlaps
|
|
163
|
+
|
|
164
|
+
:param array:
|
|
165
|
+
:param len_chunk:
|
|
166
|
+
:param len_sep:
|
|
167
|
+
:return array_matrix:
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
n_arrays = int(np.ceil((array.size - len_chunk + 1) / len_sep))
|
|
171
|
+
|
|
172
|
+
array_matrix = np.tile(array, n_arrays).reshape(n_arrays, -1)
|
|
173
|
+
|
|
174
|
+
columns = np.array(((len_sep * np.arange(0, n_arrays)).reshape(n_arrays, -1) + np.tile(
|
|
175
|
+
np.arange(0, len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
|
|
176
|
+
|
|
177
|
+
rows = np.array((np.arange(n_arrays).reshape(n_arrays, -1) + np.tile(
|
|
178
|
+
np.zeros(len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
|
|
179
|
+
|
|
180
|
+
return array_matrix[rows, columns]
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
# def switch_svds_K(A, n_modes, svd_solver):
|
|
184
|
+
# """
|
|
185
|
+
# Utility function to switch between different svd solvers
|
|
186
|
+
# for the diagonalization of K. Being K symmetric and positive definite,
|
|
187
|
+
# Its eigenvalue decomposition is equivalent to an SVD.
|
|
188
|
+
# Thus we are using SVD solvers as eig solvers here.
|
|
189
|
+
# The options are the same used for switch_svds (which goes for the SVD of D)
|
|
190
|
+
|
|
191
|
+
# --------------------------------------------------------------------------------------------------------------------
|
|
192
|
+
# Parameters:
|
|
193
|
+
# -----------
|
|
194
|
+
# :param A: np.array,
|
|
195
|
+
# Array of which compute the SVD
|
|
196
|
+
# :param n_modes: int,
|
|
197
|
+
# Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
|
|
198
|
+
# computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
|
|
199
|
+
# :param svd_solver: str,
|
|
200
|
+
# can be:
|
|
201
|
+
# 'svd_numpy'.
|
|
202
|
+
# This uses np.linalg.svd.
|
|
203
|
+
# It is the most accurate but the slowest and most expensive.
|
|
204
|
+
# It computes all the modes.
|
|
205
|
+
|
|
206
|
+
# 'svd_sklearn_truncated'
|
|
207
|
+
# This uses the TruncatedSVD from scikitlearn. This uses either
|
|
208
|
+
# svds from scipy or randomized from sklearn depending on the size
|
|
209
|
+
# of the matrix. These are the two remaining options.
|
|
210
|
+
# To the merit of chosing this is to let sklearn take the decision as to
|
|
211
|
+
# what to use.
|
|
212
|
+
|
|
213
|
+
# 'svd_scipy_sparse'
|
|
214
|
+
# This uses the svds from scipy.
|
|
215
|
+
|
|
216
|
+
# 'svd_sklearn_randomized',
|
|
217
|
+
# This uses the randomized from sklearn.
|
|
218
|
+
# Returns
|
|
219
|
+
# --------
|
|
220
|
+
# :return Psi_P, np.array (N_S x n_modes)
|
|
221
|
+
# :return Sigma_P, np.array (n_modes)
|
|
222
|
+
# """
|
|
223
|
+
# if svd_solver.lower() == 'svd_sklearn_truncated':
|
|
224
|
+
# svd = TruncatedSVD(n_modes)
|
|
225
|
+
# svd.fit_transform(A)
|
|
226
|
+
# Psi_P = svd.components_.T
|
|
227
|
+
# Lambda_P = svd.singular_values_
|
|
228
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
229
|
+
# elif svd_solver.lower() == 'svd_numpy':
|
|
230
|
+
# Psi_P, Lambda_P, _ = np.linalg.svd(A)
|
|
231
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
232
|
+
# Psi_P = Psi_P[:, :n_modes]
|
|
233
|
+
# Sigma_P = Sigma_P[:n_modes]
|
|
234
|
+
# elif svd_solver.lower() == 'svd_sklearn_randomized':
|
|
235
|
+
# Psi_P, Lambda_P = randomized_svd(A, n_modes)
|
|
236
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
237
|
+
# elif svd_solver.lower() == 'svd_scipy_sparse':
|
|
238
|
+
# Psi_P, Lambda_P, _ = svds(A, k=n_modes)
|
|
239
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
240
|
+
|
|
241
|
+
# return Psi_P, Sigma_P
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def switch_svds(A, n_modes, svd_solver='svd_sklearn_truncated'):
|
|
245
|
+
"""
|
|
246
|
+
Utility function to switch between different svd solvers
|
|
247
|
+
for the SVD of the snapshot matrix. This is a true SVD solver.
|
|
248
|
+
|
|
249
|
+
--------------------------------------------------------------------------------------------------------------------
|
|
250
|
+
Parameters:
|
|
251
|
+
-----------
|
|
252
|
+
:param A: np.array,
|
|
253
|
+
Array of which compute the SVD
|
|
254
|
+
:param n_modes: int,
|
|
255
|
+
Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
|
|
256
|
+
computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
|
|
257
|
+
:param svd_solver: str,
|
|
258
|
+
can be:
|
|
259
|
+
'svd_numpy'.
|
|
260
|
+
This uses np.linalg.svd.
|
|
261
|
+
It is the most accurate but the slowest and most expensive.
|
|
262
|
+
It computes all the modes.
|
|
263
|
+
|
|
264
|
+
'svd_sklearn_truncated'
|
|
265
|
+
This uses the TruncatedSVD from scikitlearn. This uses either
|
|
266
|
+
svds from scipy or randomized from sklearn depending on the size
|
|
267
|
+
of the matrix. These are the two remaining options.
|
|
268
|
+
The merit of chosing this is to let sklearn take the decision as to
|
|
269
|
+
what to use. One prefers to force the usage of any of those with the other two
|
|
270
|
+
options
|
|
271
|
+
|
|
272
|
+
'svd_scipy_sparse'
|
|
273
|
+
This uses the svds from scipy.
|
|
274
|
+
|
|
275
|
+
'svd_sklearn_randomized',
|
|
276
|
+
This uses the randomized from sklearn.
|
|
277
|
+
|
|
278
|
+
Returns
|
|
279
|
+
--------
|
|
280
|
+
:return Psi_P, np.array (N_S x n_modes)
|
|
281
|
+
:return Sigma_P, np.array (n_modes)
|
|
282
|
+
"""
|
|
283
|
+
if svd_solver.lower() == 'svd_sklearn_truncated':
|
|
284
|
+
svd = TruncatedSVD(n_modes)
|
|
285
|
+
X_transformed = svd.fit_transform(A)
|
|
286
|
+
Phi_P = X_transformed / svd.singular_values_
|
|
287
|
+
Psi_P = svd.components_.T
|
|
288
|
+
Sigma_P = svd.singular_values_
|
|
289
|
+
elif svd_solver.lower() == 'svd_numpy':
|
|
290
|
+
Phi_P, Sigma_P, Psi_P = np.linalg.svd(A)
|
|
291
|
+
Phi_P = Phi_P[:, 0:n_modes]
|
|
292
|
+
Psi_P = Psi_P.T[:, 0:n_modes]
|
|
293
|
+
Sigma_P = Sigma_P[0:n_modes]
|
|
294
|
+
elif svd_solver.lower() == 'svd_sklearn_randomized':
|
|
295
|
+
Phi_P, Sigma_P, Psi_P = randomized_svd(A, n_modes)
|
|
296
|
+
Psi_P = Psi_P.T
|
|
297
|
+
elif svd_solver.lower() == 'svd_scipy_sparse':
|
|
298
|
+
Phi_P, Sigma_P, Psi_P = svds(A, k=n_modes)
|
|
299
|
+
Psi_P = Psi_P.T
|
|
300
|
+
# It turns out that this does not rank them in decreasing order.
|
|
301
|
+
# Hence we do it manually:
|
|
302
|
+
idx = np.flip(np.argsort(Sigma_P))
|
|
303
|
+
Sigma_P = Sigma_P[idx]
|
|
304
|
+
Phi_P = Phi_P[:, idx]
|
|
305
|
+
Psi_P = Psi_P[:, idx]
|
|
306
|
+
|
|
307
|
+
return Phi_P, Psi_P, Sigma_P
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
def switch_eigs(A, n_modes, eig_solver):
|
|
311
|
+
"""
|
|
312
|
+
Utility function to switch between different eig solvers in a consistent way across different
|
|
313
|
+
methods of the package.
|
|
314
|
+
--------------------------------------------------------------------------------------------------------------------
|
|
315
|
+
Parameters:
|
|
316
|
+
-----------
|
|
317
|
+
:param A: np.array,
|
|
318
|
+
Array of which compute the eigenvalues
|
|
319
|
+
:param n_modes: int,
|
|
320
|
+
Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
|
|
321
|
+
computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
|
|
322
|
+
:param eig_solver: str,
|
|
323
|
+
can be:
|
|
324
|
+
'svd_sklearn_randomized',
|
|
325
|
+
This uses svd truncated approach, which picks either randomized svd or scipy svds.
|
|
326
|
+
By default, it should pick mostly the first.
|
|
327
|
+
|
|
328
|
+
'eigsh' from scipy sparse. This is a compromise between the previous and the following.
|
|
329
|
+
|
|
330
|
+
'eigh' from scipy lin alg. This is the most precise, although a bit more expensive
|
|
331
|
+
|
|
332
|
+
Returns
|
|
333
|
+
--------
|
|
334
|
+
:return Psi_P, np.array (N_S x n_modes)
|
|
335
|
+
:return Sigma_P, np.array (n_modes)
|
|
336
|
+
"""
|
|
337
|
+
if eig_solver.lower() == 'svd_sklearn_randomized':
|
|
338
|
+
Psi_P, Lambda_P = svds_RND(A, n_modes)
|
|
339
|
+
elif eig_solver.lower() == 'eigh':
|
|
340
|
+
n = np.shape(A)[0]
|
|
341
|
+
Lambda_P, Psi_P = eigh(A, subset_by_index=[n - n_modes, n - 1])
|
|
342
|
+
# It turns out that this does not rank them in decreasing order.
|
|
343
|
+
# Hence we do it manually:
|
|
344
|
+
idx = np.flip(np.argsort(Lambda_P))
|
|
345
|
+
Lambda_P = Lambda_P[idx]
|
|
346
|
+
Psi_P = Psi_P[:, idx]
|
|
347
|
+
elif eig_solver.lower() == 'eigsh':
|
|
348
|
+
Lambda_P, Psi_P = eigsh(A, k=n_modes)
|
|
349
|
+
# It turns out that this does not rank them in decreasing order.
|
|
350
|
+
# Hence we do it manually:
|
|
351
|
+
idx = np.flip(np.argsort(Lambda_P))
|
|
352
|
+
Lambda_P = Lambda_P[idx]
|
|
353
|
+
Psi_P = Psi_P[:, idx]
|
|
354
|
+
else:
|
|
355
|
+
raise ValueError('eig_solver must be svd_sklearn_randomized, eigh or eigsh')
|
|
356
|
+
|
|
357
|
+
Sigma_P = np.sqrt(Lambda_P)
|
|
358
|
+
|
|
342
359
|
return Psi_P, Sigma_P
|