modulo-vki 2.0.7__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modulo_vki/__init__.py +0 -22
- modulo_vki/core/_dft.py +92 -21
- modulo_vki/core/_dmd_s.py +48 -39
- modulo_vki/core/_k_matrix.py +145 -17
- modulo_vki/core/_mpod_time.py +46 -25
- modulo_vki/core/_pod_space.py +3 -2
- modulo_vki/core/_pod_time.py +2 -1
- modulo_vki/core/spatial_structures.py +367 -0
- modulo_vki/core/temporal_structures.py +241 -0
- modulo_vki/core/utils.py +474 -0
- modulo_vki/modulo.py +751 -682
- modulo_vki/modulo_old.py +1368 -0
- modulo_vki/utils/_utils.py +19 -2
- modulo_vki/utils/others.py +18 -9
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.1.dist-info}/METADATA +104 -82
- modulo_vki-2.1.1.dist-info/RECORD +26 -0
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.1.dist-info}/WHEEL +1 -1
- modulo_vki-2.0.7.dist-info/RECORD +0 -22
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.1.dist-info/licenses}/LICENSE +0 -0
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.1.dist-info}/top_level.txt +0 -0
modulo_vki/core/utils.py
ADDED
|
@@ -0,0 +1,474 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy import signal
|
|
3
|
+
from scipy.sparse.linalg import svds, eigsh
|
|
4
|
+
from sklearn.decomposition import TruncatedSVD
|
|
5
|
+
from scipy.linalg import eigh
|
|
6
|
+
from sklearn.utils.extmath import randomized_svd
|
|
7
|
+
import warnings
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def Bound_EXT(S, Ex, boundaries):
|
|
11
|
+
"""
|
|
12
|
+
This function computes the extension of a signal for
|
|
13
|
+
filtering purposes
|
|
14
|
+
|
|
15
|
+
:param S: The Input signal
|
|
16
|
+
:param Nf: The Size of the Kernel (must be an odd number!)
|
|
17
|
+
:param boundaries: The type of extension:
|
|
18
|
+
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
19
|
+
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
20
|
+
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
21
|
+
‘extrap’ Extrapolation The input is extended via linear extrapolation.
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
"""
|
|
25
|
+
# We first perform a zero padding
|
|
26
|
+
# Ex=int((Nf-1)/2) # Extension on each size
|
|
27
|
+
size_Ext = 2 * Ex + len(S) # Compute the size of the extended signal
|
|
28
|
+
S_extend = np.zeros((int(size_Ext))) # Initialize extended signal
|
|
29
|
+
S_extend[Ex:int((size_Ext - Ex))] = S; # Assign the Signal on the zeroes
|
|
30
|
+
|
|
31
|
+
if boundaries == "reflect":
|
|
32
|
+
LEFT = np.flip(S[0:Ex]) # Prepare the reflection on the left
|
|
33
|
+
RIGHT = np.flip(S[len(S) - Ex:len(S)]) # Prepare the reflectino on the right
|
|
34
|
+
S_extend[0:Ex] = LEFT
|
|
35
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
36
|
+
elif boundaries == "nearest":
|
|
37
|
+
LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
|
|
38
|
+
RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
|
|
39
|
+
S_extend[0:Ex] = LEFT
|
|
40
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
41
|
+
elif boundaries == "wrap":
|
|
42
|
+
LEFT = S[len(S) - Ex:len(S)] # Wrap on the Left
|
|
43
|
+
RIGHT = S[0:Ex] # Wrap on the Right
|
|
44
|
+
S_extend[0:Ex] = LEFT
|
|
45
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
46
|
+
elif boundaries == "extrap":
|
|
47
|
+
LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
|
|
48
|
+
RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
|
|
49
|
+
S_extend[0:Ex] = LEFT
|
|
50
|
+
S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
|
|
51
|
+
print('Not active yet, replaced by nearest')
|
|
52
|
+
return S_extend
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def conv_m(K, h, Ex, boundaries):
|
|
56
|
+
"""
|
|
57
|
+
This function computes the 2D convolution by perfoming 2 sets of 1D convolutions.
|
|
58
|
+
Moreover, we here use the fft with an appropriate extension
|
|
59
|
+
that avoids the periodicity condition.
|
|
60
|
+
|
|
61
|
+
:param K: Matrix to be filtered
|
|
62
|
+
:param h: The 1D Kernel of the filter
|
|
63
|
+
:param boundaries: The type of extension:
|
|
64
|
+
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
65
|
+
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
66
|
+
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
67
|
+
‘extrap’ Extrapolation The input is extended via linear extrapolation.
|
|
68
|
+
"""
|
|
69
|
+
# Filter along the raws
|
|
70
|
+
n_t = np.shape(K)[0]
|
|
71
|
+
# Ex=int(n_t/2)
|
|
72
|
+
K_F1 = np.zeros(np.shape(K))
|
|
73
|
+
K_F2 = np.zeros(np.shape(K))
|
|
74
|
+
# K_F=np.zeros(np.shape(K))
|
|
75
|
+
for k in range(0, n_t):
|
|
76
|
+
S = K[:, k]
|
|
77
|
+
S_Ext = Bound_EXT(S, Ex, boundaries)
|
|
78
|
+
S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
|
|
79
|
+
# Compute where to take the signal
|
|
80
|
+
Ex1 = int((len(S_Filt) - len(S)) / 2)
|
|
81
|
+
# K_F1[k,:]=S_Filt[Ex:(len(S_Filt)-Ex)]
|
|
82
|
+
K_F1[:, k] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
|
|
83
|
+
for k in range(0, n_t):
|
|
84
|
+
S = K_F1[k, :]
|
|
85
|
+
S_Ext = Bound_EXT(S, Ex, boundaries)
|
|
86
|
+
S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
|
|
87
|
+
# Compute where to take the signal
|
|
88
|
+
Ex1 = int((len(S_Filt) - len(S)) / 2)
|
|
89
|
+
# K_F2[:,k]=S_Filt[Ex:(len(S_Filt)-Ex)]
|
|
90
|
+
K_F2[k, :] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
|
|
91
|
+
# K_F=K_F1+K_F2
|
|
92
|
+
return K_F2
|
|
93
|
+
|
|
94
|
+
def conv_m_2D(K, h, Ex, boundaries):
|
|
95
|
+
|
|
96
|
+
# Extended K
|
|
97
|
+
K_ext = np.pad(K, Ex, mode=boundaries)
|
|
98
|
+
|
|
99
|
+
# Filtering matrix
|
|
100
|
+
h_mat = np.outer(np.atleast_2d(h).T, np.atleast_2d(h))
|
|
101
|
+
|
|
102
|
+
# Filtering
|
|
103
|
+
K_filt = signal.fftconvolve(K_ext, h_mat, mode='valid')
|
|
104
|
+
|
|
105
|
+
# Interior K
|
|
106
|
+
Ex1 = int((len(K_filt) - len(K)) / 2)
|
|
107
|
+
K_F2 = K_filt[Ex1:(len(K_filt) - Ex1), Ex1:(len(K_filt) - Ex1)]
|
|
108
|
+
|
|
109
|
+
return K_F2
|
|
110
|
+
|
|
111
|
+
def _loop_gemm(a, b, c=None, chunksize=100):
|
|
112
|
+
size_i = a.shape[0]
|
|
113
|
+
size_zip = a.shape[1]
|
|
114
|
+
|
|
115
|
+
size_j = b.shape[1]
|
|
116
|
+
size_alt_zip = b.shape[0]
|
|
117
|
+
|
|
118
|
+
if size_zip != size_alt_zip:
|
|
119
|
+
ValueError("Loop GEMM zip index is not of the same size for both tensors")
|
|
120
|
+
|
|
121
|
+
if c is None:
|
|
122
|
+
c = np.zeros((size_i, size_j))
|
|
123
|
+
|
|
124
|
+
istart = 0
|
|
125
|
+
for i in range(int(np.ceil(size_i / float(chunksize)))):
|
|
126
|
+
|
|
127
|
+
left_slice = slice(istart, istart + chunksize)
|
|
128
|
+
left_view = a[left_slice]
|
|
129
|
+
|
|
130
|
+
jstart = 0
|
|
131
|
+
for j in range(int(np.ceil(size_j / float(chunksize)))):
|
|
132
|
+
right_slice = slice(jstart, jstart + chunksize)
|
|
133
|
+
right_view = b[:, right_slice]
|
|
134
|
+
|
|
135
|
+
c[left_slice, right_slice] = np.dot(left_view, right_view)
|
|
136
|
+
jstart += chunksize
|
|
137
|
+
|
|
138
|
+
istart += chunksize
|
|
139
|
+
|
|
140
|
+
return c
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def svds_RND(K, R_K):
|
|
144
|
+
"""
|
|
145
|
+
Quick and dirty implementation of randomized SVD
|
|
146
|
+
for computing eigenvalues of K. We follow same input/output structure
|
|
147
|
+
as for the svds in scipy
|
|
148
|
+
"""
|
|
149
|
+
svd = TruncatedSVD(R_K)
|
|
150
|
+
svd.fit_transform(K)
|
|
151
|
+
Psi_P = svd.components_.T
|
|
152
|
+
Lambda_P = svd.singular_values_
|
|
153
|
+
return Psi_P, Lambda_P
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def overlap(array, len_chunk, len_sep=1):
|
|
157
|
+
"""
|
|
158
|
+
Returns a matrix of all full overlapping chunks of the input `array`, with a chunk
|
|
159
|
+
length of `len_chunk` and a separation length of `len_sep`. Begins with the first full
|
|
160
|
+
chunk in the array.
|
|
161
|
+
This function is taken from https://stackoverflow.com/questions/38163366/split-list-into-separate-but-overlapping-chunks
|
|
162
|
+
it is designed to split an array with certain overlaps
|
|
163
|
+
|
|
164
|
+
:param array:
|
|
165
|
+
:param len_chunk:
|
|
166
|
+
:param len_sep:
|
|
167
|
+
:return array_matrix:
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
n_arrays = int(np.ceil((array.size - len_chunk + 1) / len_sep))
|
|
171
|
+
|
|
172
|
+
array_matrix = np.tile(array, n_arrays).reshape(n_arrays, -1)
|
|
173
|
+
|
|
174
|
+
columns = np.array(((len_sep * np.arange(0, n_arrays)).reshape(n_arrays, -1) + np.tile(
|
|
175
|
+
np.arange(0, len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
|
|
176
|
+
|
|
177
|
+
rows = np.array((np.arange(n_arrays).reshape(n_arrays, -1) + np.tile(
|
|
178
|
+
np.zeros(len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
|
|
179
|
+
|
|
180
|
+
return array_matrix[rows, columns]
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def switch_svds(A, n_modes, svd_solver='svd_sklearn_truncated'):
|
|
185
|
+
"""
|
|
186
|
+
Utility function to switch between different svd solvers
|
|
187
|
+
for the SVD of the snapshot matrix. This is a true SVD solver.
|
|
188
|
+
|
|
189
|
+
--------------------------------------------------------------------------------------------------------------------
|
|
190
|
+
Parameters:
|
|
191
|
+
-----------
|
|
192
|
+
:param A: np.array,
|
|
193
|
+
Array of which compute the SVD
|
|
194
|
+
:param n_modes: int,
|
|
195
|
+
Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
|
|
196
|
+
computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
|
|
197
|
+
:param svd_solver: str,
|
|
198
|
+
can be:
|
|
199
|
+
'svd_numpy'.
|
|
200
|
+
This uses np.linalg.svd.
|
|
201
|
+
It is the most accurate but the slowest and most expensive.
|
|
202
|
+
It computes all the modes.
|
|
203
|
+
|
|
204
|
+
'svd_sklearn_truncated'
|
|
205
|
+
This uses the TruncatedSVD from scikitlearn. This uses either
|
|
206
|
+
svds from scipy or randomized from sklearn depending on the size
|
|
207
|
+
of the matrix. These are the two remaining options.
|
|
208
|
+
The merit of chosing this is to let sklearn take the decision as to
|
|
209
|
+
what to use. One prefers to force the usage of any of those with the other two
|
|
210
|
+
options
|
|
211
|
+
|
|
212
|
+
'svd_scipy_sparse'
|
|
213
|
+
This uses the svds from scipy.
|
|
214
|
+
|
|
215
|
+
'svd_sklearn_randomized',
|
|
216
|
+
This uses the randomized from sklearn.
|
|
217
|
+
|
|
218
|
+
Returns
|
|
219
|
+
--------
|
|
220
|
+
:return Psi_P, np.array (N_S x n_modes)
|
|
221
|
+
:return Sigma_P, np.array (n_modes)
|
|
222
|
+
"""
|
|
223
|
+
if svd_solver.lower() == 'svd_sklearn_truncated':
|
|
224
|
+
svd = TruncatedSVD(n_modes)
|
|
225
|
+
X_transformed = svd.fit_transform(A)
|
|
226
|
+
Phi_P = X_transformed / svd.singular_values_
|
|
227
|
+
Psi_P = svd.components_.T
|
|
228
|
+
Sigma_P = svd.singular_values_
|
|
229
|
+
elif svd_solver.lower() == 'svd_numpy':
|
|
230
|
+
Phi_P, Sigma_P, Psi_P = np.linalg.svd(A)
|
|
231
|
+
Phi_P = Phi_P[:, 0:n_modes]
|
|
232
|
+
Psi_P = Psi_P.T[:, 0:n_modes]
|
|
233
|
+
Sigma_P = Sigma_P[0:n_modes]
|
|
234
|
+
elif svd_solver.lower() == 'svd_sklearn_randomized':
|
|
235
|
+
Phi_P, Sigma_P, Psi_P = randomized_svd(A, n_modes)
|
|
236
|
+
Psi_P = Psi_P.T
|
|
237
|
+
elif svd_solver.lower() == 'svd_scipy_sparse':
|
|
238
|
+
Phi_P, Sigma_P, Psi_P = svds(A, k=n_modes)
|
|
239
|
+
Psi_P = Psi_P.T
|
|
240
|
+
# It turns out that this does not rank them in decreasing order.
|
|
241
|
+
# Hence we do it manually:
|
|
242
|
+
idx = np.flip(np.argsort(Sigma_P))
|
|
243
|
+
Sigma_P = Sigma_P[idx]
|
|
244
|
+
Phi_P = Phi_P[:, idx]
|
|
245
|
+
Psi_P = Psi_P[:, idx]
|
|
246
|
+
|
|
247
|
+
return Phi_P, Psi_P, Sigma_P
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def switch_eigs(A, n_modes, eig_solver):
|
|
251
|
+
"""
|
|
252
|
+
Utility function to switch between different eig solvers in a consistent way across different
|
|
253
|
+
methods of the package.
|
|
254
|
+
--------------------------------------------------------------------------------------------------------------------
|
|
255
|
+
Parameters:
|
|
256
|
+
-----------
|
|
257
|
+
:param A: np.array,
|
|
258
|
+
Array of which compute the eigenvalues
|
|
259
|
+
:param n_modes: int,
|
|
260
|
+
Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
|
|
261
|
+
computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
|
|
262
|
+
:param eig_solver: str,
|
|
263
|
+
can be:
|
|
264
|
+
'svd_sklearn_randomized',
|
|
265
|
+
This uses svd truncated approach, which picks either randomized svd or scipy svds.
|
|
266
|
+
By default, it should pick mostly the first.
|
|
267
|
+
|
|
268
|
+
'eigsh' from scipy sparse. This is a compromise between the previous and the following.
|
|
269
|
+
|
|
270
|
+
'eigh' from scipy lin alg. This is the most precise, although a bit more expensive
|
|
271
|
+
|
|
272
|
+
Returns
|
|
273
|
+
--------
|
|
274
|
+
:return Psi_P, np.array (N_S x n_modes)
|
|
275
|
+
:return Sigma_P, np.array (n_modes)
|
|
276
|
+
"""
|
|
277
|
+
if eig_solver.lower() == 'svd_sklearn_randomized':
|
|
278
|
+
Psi_P, Lambda_P = svds_RND(A, n_modes)
|
|
279
|
+
elif eig_solver.lower() == 'eigh':
|
|
280
|
+
n = np.shape(A)[0]
|
|
281
|
+
Lambda_P, Psi_P = eigh(A, subset_by_index=[n - n_modes, n - 1])
|
|
282
|
+
# It turns out that this does not rank them in decreasing order.
|
|
283
|
+
# Hence we do it manually:
|
|
284
|
+
idx = np.flip(np.argsort(Lambda_P))
|
|
285
|
+
Lambda_P = Lambda_P[idx]
|
|
286
|
+
Psi_P = Psi_P[:, idx]
|
|
287
|
+
elif eig_solver.lower() == 'eigsh':
|
|
288
|
+
Lambda_P, Psi_P = eigsh(A, k=n_modes)
|
|
289
|
+
# It turns out that this does not rank them in decreasing order.
|
|
290
|
+
# Hence we do it manually:
|
|
291
|
+
idx = np.flip(np.argsort(Lambda_P))
|
|
292
|
+
Lambda_P = Lambda_P[idx]
|
|
293
|
+
Psi_P = Psi_P[:, idx]
|
|
294
|
+
else:
|
|
295
|
+
raise ValueError('eig_solver must be svd_sklearn_randomized, eigh or eigsh')
|
|
296
|
+
|
|
297
|
+
Sigma_P = np.sqrt(Lambda_P)
|
|
298
|
+
|
|
299
|
+
return Psi_P, Sigma_P
|
|
300
|
+
|
|
301
|
+
def segment_and_fft(D: np.ndarray,
|
|
302
|
+
F_S: float,
|
|
303
|
+
L_B: int,
|
|
304
|
+
O_B: int,
|
|
305
|
+
n_processes: int = 1) -> tuple:
|
|
306
|
+
"""
|
|
307
|
+
Partition the snapshot matrix D into overlapping time windows and compute
|
|
308
|
+
the FFT of each block (optionally in parallel). Auxiliary function for the
|
|
309
|
+
SPOD (Towne approach).
|
|
310
|
+
|
|
311
|
+
Parameters
|
|
312
|
+
----------
|
|
313
|
+
D : ndarray, shape (n_space, n_time)
|
|
314
|
+
Snapshot matrix, where each column is the state at a time instant.
|
|
315
|
+
F_S : float
|
|
316
|
+
Sampling frequency in Hz.
|
|
317
|
+
L_B : int
|
|
318
|
+
Length of each FFT block (window size).
|
|
319
|
+
O_B : int
|
|
320
|
+
Number of samples to overlap between consecutive blocks.
|
|
321
|
+
n_processes : int, optional
|
|
322
|
+
Number of parallel processes for block FFT. If 1, uses vectorized np.fft.
|
|
323
|
+
|
|
324
|
+
Returns
|
|
325
|
+
-------
|
|
326
|
+
D_hat : ndarray, shape (n_space, n_freq_bins, n_blocks)
|
|
327
|
+
FFT of each block along the time axis, only non-negative frequencies.
|
|
328
|
+
freqs_pos : ndarray, shape (n_freq_bins,)
|
|
329
|
+
Non-negative frequency vector corresponding to the second axis of D_hat.
|
|
330
|
+
"""
|
|
331
|
+
|
|
332
|
+
n_s, n_t = D.shape
|
|
333
|
+
|
|
334
|
+
'''
|
|
335
|
+
For this SPOD, we assemble a tensor of overlapping chunks (similar to Welch method).
|
|
336
|
+
This tensor has size
|
|
337
|
+
|
|
338
|
+
D_t \in \mathbb{R}^{n_s \times n_B \times n_P}
|
|
339
|
+
|
|
340
|
+
where:
|
|
341
|
+
- n_s is space grid (unchanged)
|
|
342
|
+
- n_B defines the blocks length
|
|
343
|
+
- n_P defines the number of partitions (blocks)
|
|
344
|
+
'''
|
|
345
|
+
ind = np.arange(n_t)
|
|
346
|
+
indices = overlap(ind, len_chunk=L_B, len_sep=O_B)
|
|
347
|
+
|
|
348
|
+
n_p, n_b = indices.shape
|
|
349
|
+
|
|
350
|
+
# D_t = np.zeros((n_s, n_b, n_p), dtype=D.dtype) # we enforce same data type with the main D (memory saving)
|
|
351
|
+
D_t = D[:, indices].transpose(0, 2, 1)
|
|
352
|
+
|
|
353
|
+
if n_processes > 1:
|
|
354
|
+
print('-------------------------------------------------- \n')
|
|
355
|
+
print('|\t Performing FFT of D_t in parallel on {} processors \t|'.format(n_processes))
|
|
356
|
+
print('-------------------------------------------------- \n')
|
|
357
|
+
|
|
358
|
+
try:
|
|
359
|
+
from joblib import Parallel, delayed
|
|
360
|
+
|
|
361
|
+
def _fft_block(block):
|
|
362
|
+
# block is D_t[:, :, k]
|
|
363
|
+
return np.fft.fft(block, n=n_b, axis=1)
|
|
364
|
+
|
|
365
|
+
fft_blocks = Parallel(n_jobs=n_processes, backend='threading')(
|
|
366
|
+
delayed(_fft_block)(D_t[:, :, k]) for k in range(n_p)
|
|
367
|
+
)
|
|
368
|
+
# stack back into (n_s, n_b, n_p)
|
|
369
|
+
D_hat_full = np.stack(fft_blocks, axis=2)
|
|
370
|
+
except:
|
|
371
|
+
n_processes = 1
|
|
372
|
+
raise ImportWarning("Parallel library joblib is not installed. Reverting to single process.")
|
|
373
|
+
else:
|
|
374
|
+
D_hat_full = np.fft.fft(D_t, n=n_b, axis=1)
|
|
375
|
+
|
|
376
|
+
freqs = np.fft.fftfreq(n_b) * F_S
|
|
377
|
+
pos = freqs >= 0
|
|
378
|
+
|
|
379
|
+
# only returning positive frequencies and discarding complex conjugates
|
|
380
|
+
freqs_pos = freqs[pos]
|
|
381
|
+
|
|
382
|
+
return D_hat_full, freqs_pos, n_processes
|
|
383
|
+
|
|
384
|
+
def pod_from_dhat(D_hat: np.ndarray,
|
|
385
|
+
n_modes: int,
|
|
386
|
+
n_freqs: int,
|
|
387
|
+
svd_solver: str,
|
|
388
|
+
n_processes: int=1):
|
|
389
|
+
"""
|
|
390
|
+
Compute SPOD modes & energies from the FFT tensor.
|
|
391
|
+
|
|
392
|
+
Parameters
|
|
393
|
+
----------
|
|
394
|
+
D_hat : ndarray, shape (n_s, n_freqs, n_blocks)
|
|
395
|
+
FFT of each block, only nonnegative frequencies.
|
|
396
|
+
n_Modes : int
|
|
397
|
+
Number of modes to extract per frequency.
|
|
398
|
+
n_processes : int
|
|
399
|
+
If >1, parallelize the SVDs over frequencies.
|
|
400
|
+
|
|
401
|
+
Returns
|
|
402
|
+
-------
|
|
403
|
+
Phi : ndarray, shape (n_s, n_Modes, n_freqs)
|
|
404
|
+
Spatial SPOD modes.
|
|
405
|
+
Sigma : ndarray, shape (n_Modes, n_freqs)
|
|
406
|
+
Modal energies.
|
|
407
|
+
"""
|
|
408
|
+
# n_freqs is n_b but considering only positive freqs.
|
|
409
|
+
|
|
410
|
+
n_s, n_b, n_p = D_hat.shape
|
|
411
|
+
|
|
412
|
+
Phi = np.zeros((n_s, n_modes, n_freqs), dtype=complex)
|
|
413
|
+
Sigma = np.zeros((n_modes, n_freqs))
|
|
414
|
+
|
|
415
|
+
if n_processes > 1:
|
|
416
|
+
try:
|
|
417
|
+
from joblib import Parallel, delayed
|
|
418
|
+
except ImportError:
|
|
419
|
+
warnings.warn(
|
|
420
|
+
'joblib not installed: falling back to serial POD'
|
|
421
|
+
)
|
|
422
|
+
n_processes = 1
|
|
423
|
+
|
|
424
|
+
if n_processes > 1:
|
|
425
|
+
# def _svd_slice(j):
|
|
426
|
+
# U,V,Sigma=switch_svds(D_hat[:, j, :], n_modes,
|
|
427
|
+
# svd_solver=svd_solver)
|
|
428
|
+
|
|
429
|
+
# return U[:, :n_modes], Sigma[:n_modes] / (n_s * n_b)
|
|
430
|
+
def _svd_slice(j):
|
|
431
|
+
U, s, _ = np.linalg.svd(D_hat[:, j, :], full_matrices=False)
|
|
432
|
+
return U[:, :n_modes], s[:n_modes] / (n_s * n_b)
|
|
433
|
+
|
|
434
|
+
results = Parallel(
|
|
435
|
+
n_jobs=n_processes,
|
|
436
|
+
backend='threading'
|
|
437
|
+
)(
|
|
438
|
+
delayed(_svd_slice)(j) for j in range(n_freqs)
|
|
439
|
+
)
|
|
440
|
+
# joblib keeps same order of outputs
|
|
441
|
+
for j, (Uj, sj) in enumerate(results):
|
|
442
|
+
Phi[:, :, j] = Uj
|
|
443
|
+
Sigma[:, j] = sj
|
|
444
|
+
else:
|
|
445
|
+
for j in range(n_freqs):
|
|
446
|
+
U, s, _ = np.linalg.svd(
|
|
447
|
+
D_hat[:, j, :], full_matrices=False
|
|
448
|
+
)
|
|
449
|
+
Phi[:, :, j] = U[:, :n_modes]
|
|
450
|
+
Sigma[:, j] = s[:n_modes] / (n_s * n_b)
|
|
451
|
+
|
|
452
|
+
return Phi, Sigma
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
def apply_weights(D: np.ndarray, weights: np.ndarray) -> np.ndarray:
|
|
456
|
+
"""
|
|
457
|
+
If `weights` is empty, return D unchanged.
|
|
458
|
+
If `weights` has length n_s, assume 1D grid and tile automatically.
|
|
459
|
+
If `weights` has length 2*n_s, assume 2D grid and use directly.
|
|
460
|
+
Returns D_star = D * sqrt(w) applied column-wise.
|
|
461
|
+
"""
|
|
462
|
+
n_s, n_t = D.shape
|
|
463
|
+
if weights.size == 0:
|
|
464
|
+
return D
|
|
465
|
+
w = np.asarray(weights, dtype=D.dtype)
|
|
466
|
+
if w.size == n_s:
|
|
467
|
+
# 1D grid: automatically broadcast each weight to full time series
|
|
468
|
+
w_full = np.repeat(w, n_t//n_s) # or tile as needed for time dims
|
|
469
|
+
elif w.size == 2*n_s:
|
|
470
|
+
w_full = w
|
|
471
|
+
else:
|
|
472
|
+
raise ValueError(f"weights must be length {n_s} or {2*n_s}, got {w.size}")
|
|
473
|
+
# apply weights: scale each row i by sqrt(w_full[i])
|
|
474
|
+
return (D.T * np.sqrt(w_full)).T
|