modulo-vki 2.0.7__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modulo_vki/__init__.py +0 -22
- modulo_vki/core/_dft.py +92 -21
- modulo_vki/core/_dmd_s.py +48 -39
- modulo_vki/core/_k_matrix.py +145 -17
- modulo_vki/core/_mpod_time.py +46 -25
- modulo_vki/core/_pod_space.py +3 -2
- modulo_vki/core/_pod_time.py +2 -1
- modulo_vki/core/spatial_structures.py +367 -0
- modulo_vki/core/temporal_structures.py +241 -0
- modulo_vki/core/utils.py +474 -0
- modulo_vki/modulo.py +751 -682
- modulo_vki/modulo_old.py +1368 -0
- modulo_vki/utils/_utils.py +19 -2
- modulo_vki/utils/others.py +18 -9
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.0.dist-info}/METADATA +123 -30
- modulo_vki-2.1.0.dist-info/RECORD +26 -0
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.0.dist-info}/WHEEL +1 -1
- modulo_vki-2.0.7.dist-info/RECORD +0 -22
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.0.dist-info/licenses}/LICENSE +0 -0
- {modulo_vki-2.0.7.dist-info → modulo_vki-2.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import numpy as np
|
|
3
|
+
from tqdm import tqdm
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def Spatial_basis_POD(D, PSI_P, Sigma_P, MEMORY_SAVING,
|
|
8
|
+
N_T, FOLDER_OUT='./', N_PARTITIONS=1, SAVE_SPATIAL_POD=False,
|
|
9
|
+
rescale=False, verbose=True):
|
|
10
|
+
"""
|
|
11
|
+
This function computs the POD spatial basis from the temporal basis,
|
|
12
|
+
|
|
13
|
+
:param D: np.array.
|
|
14
|
+
matrix on which to project the temporal basis
|
|
15
|
+
:param PSI_P: np.array.
|
|
16
|
+
POD's Psis
|
|
17
|
+
:param Sigma_P: np.array.
|
|
18
|
+
POD's Sigmas
|
|
19
|
+
:param MEMORY_SAVING: bool.
|
|
20
|
+
Inherited from main class, if True turns on the MEMORY_SAVING feature, loading the partitions and starting the proper algorithm
|
|
21
|
+
:param N_T: int.
|
|
22
|
+
Number of temporal snapshots
|
|
23
|
+
:param FOLDER_OUT: str.
|
|
24
|
+
Folder in which the results are saved if SAVE_SPATIAL_POD = True
|
|
25
|
+
:param N_PARTITIONS: int.
|
|
26
|
+
Number of partitions to be loaded. If D has been partitioned using MODULO, this parameter is automatically inherited from the main class. To be specified otherwise.
|
|
27
|
+
|
|
28
|
+
:param SAVE_SPATIAL_POD: bool.
|
|
29
|
+
If True, results are saved on disk and released from memory
|
|
30
|
+
|
|
31
|
+
:param rescale: bool.
|
|
32
|
+
If False, the Sigmas are used for the normalization. If True, these are ignored and the normalization is carried out.
|
|
33
|
+
For the standard POD, False is the way to go.
|
|
34
|
+
However, for other decompositions (eg. the SPOD_s) you must use rescale=True
|
|
35
|
+
|
|
36
|
+
:return Phi_P: np.array.
|
|
37
|
+
POD's Phis
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
R = PSI_P.shape[1]
|
|
41
|
+
|
|
42
|
+
if not MEMORY_SAVING:
|
|
43
|
+
N_S = D.shape[0]
|
|
44
|
+
|
|
45
|
+
if rescale:
|
|
46
|
+
# The following is the general normalization approach.
|
|
47
|
+
# not needed for POD for required for SPOD
|
|
48
|
+
Phi_P = np.zeros((N_S, R))
|
|
49
|
+
# N_S = D.shape[0] unused variable
|
|
50
|
+
PHI_P_SIGMA_P = np.dot(D, PSI_P)
|
|
51
|
+
if verbose:
|
|
52
|
+
print("Completing Spatial Structures Modes: \n")
|
|
53
|
+
|
|
54
|
+
for i in tqdm(range(0, R)):
|
|
55
|
+
# Normalize the columns of C to get spatial modes
|
|
56
|
+
Phi_P[:, i] = PHI_P_SIGMA_P[:, i] / Sigma_P[i]
|
|
57
|
+
|
|
58
|
+
else:
|
|
59
|
+
# We take only the first R modes.
|
|
60
|
+
Sigma_P_t = Sigma_P[0:R]
|
|
61
|
+
Sigma_P_Inv_V = 1 / Sigma_P_t
|
|
62
|
+
# So we have the inverse
|
|
63
|
+
Sigma_P_Inv = np.diag(Sigma_P_Inv_V)
|
|
64
|
+
# Here is the one shot projection:
|
|
65
|
+
Phi_P = np.linalg.multi_dot([D, PSI_P[:, 0:R], Sigma_P_Inv])
|
|
66
|
+
|
|
67
|
+
if SAVE_SPATIAL_POD:
|
|
68
|
+
os.makedirs(FOLDER_OUT + 'POD', exist_ok=True)
|
|
69
|
+
np.savez(FOLDER_OUT + '/POD/pod_spatial_basis', phis=Phi_P)
|
|
70
|
+
# removed PHI_P_SIGMA_P=PHI_P_SIGMA_P, not present if not rescale and not needed (?)
|
|
71
|
+
|
|
72
|
+
return Phi_P
|
|
73
|
+
|
|
74
|
+
else:
|
|
75
|
+
|
|
76
|
+
N_S = np.shape(np.load(FOLDER_OUT + "/data_partitions/di_1.npz")['di'])[0]
|
|
77
|
+
dim_col = math.floor(N_T / N_PARTITIONS)
|
|
78
|
+
dim_row = math.floor(N_S / N_PARTITIONS)
|
|
79
|
+
dr = np.zeros((dim_row, N_T))
|
|
80
|
+
|
|
81
|
+
# 1 -- Converting partitions dC to dR
|
|
82
|
+
if N_S % N_PARTITIONS != 0:
|
|
83
|
+
tot_blocks_row = N_PARTITIONS + 1
|
|
84
|
+
else:
|
|
85
|
+
tot_blocks_row = N_PARTITIONS
|
|
86
|
+
|
|
87
|
+
if N_T % N_PARTITIONS != 0:
|
|
88
|
+
tot_blocks_col = N_PARTITIONS + 1
|
|
89
|
+
else:
|
|
90
|
+
tot_blocks_col = N_PARTITIONS
|
|
91
|
+
|
|
92
|
+
# --- Loading Psi_P
|
|
93
|
+
fixed = 0
|
|
94
|
+
R1 = 0
|
|
95
|
+
R2 = 0
|
|
96
|
+
C1 = 0
|
|
97
|
+
C2 = 0
|
|
98
|
+
|
|
99
|
+
for i in range(1, tot_blocks_row + 1):
|
|
100
|
+
|
|
101
|
+
if (i == tot_blocks_row) and (N_S - dim_row * N_PARTITIONS > 0):
|
|
102
|
+
dim_row_fix = N_S - dim_row * N_PARTITIONS
|
|
103
|
+
dr = np.zeros((dim_row_fix, N_T))
|
|
104
|
+
|
|
105
|
+
for b in range(1, tot_blocks_col + 1):
|
|
106
|
+
di = np.load(FOLDER_OUT + f"/data_partitions/di_{b}.npz")['di']
|
|
107
|
+
if (i == tot_blocks_row) and (N_S - dim_row * N_PARTITIONS > 0) and fixed == 0:
|
|
108
|
+
R1 = R2
|
|
109
|
+
R2 = R1 + (N_S - dim_row * N_PARTITIONS)
|
|
110
|
+
fixed = 1
|
|
111
|
+
elif fixed == 0:
|
|
112
|
+
R1 = (i - 1) * dim_row
|
|
113
|
+
R2 = i * dim_row
|
|
114
|
+
|
|
115
|
+
if (b == tot_blocks_col) and (N_T - dim_col * N_PARTITIONS > 0):
|
|
116
|
+
C1 = C2
|
|
117
|
+
C2 = C1 + (N_T - dim_col * N_PARTITIONS)
|
|
118
|
+
else:
|
|
119
|
+
C1 = (b - 1) * dim_col
|
|
120
|
+
C2 = b * dim_col
|
|
121
|
+
|
|
122
|
+
np.copyto(dr[:, C1:C2], di[R1:R2, :])
|
|
123
|
+
|
|
124
|
+
PHI_SIGMA_BLOCK = np.dot(dr, PSI_P)
|
|
125
|
+
np.savez(FOLDER_OUT + f"/PHI_SIGMA_{i}",
|
|
126
|
+
phi_sigma=PHI_SIGMA_BLOCK)
|
|
127
|
+
|
|
128
|
+
# 3 - Converting partitions R to partitions C and get Sigmas
|
|
129
|
+
dim_col = math.floor(R / N_PARTITIONS)
|
|
130
|
+
dim_row = math.floor(N_S / N_PARTITIONS)
|
|
131
|
+
dps = np.zeros((N_S, dim_col))
|
|
132
|
+
Phi_P = np.zeros((N_S, R))
|
|
133
|
+
|
|
134
|
+
if R % N_PARTITIONS != 0:
|
|
135
|
+
tot_blocks_col = N_PARTITIONS + 1
|
|
136
|
+
else:
|
|
137
|
+
tot_blocks_col = N_PARTITIONS
|
|
138
|
+
|
|
139
|
+
fixed = 0
|
|
140
|
+
|
|
141
|
+
for i in range(1, tot_blocks_col + 1):
|
|
142
|
+
|
|
143
|
+
if (i == tot_blocks_col) and (R - dim_col * N_PARTITIONS > 0):
|
|
144
|
+
dim_col_fix = R - dim_col * N_PARTITIONS
|
|
145
|
+
dps = np.zeros((N_S, dim_col_fix))
|
|
146
|
+
|
|
147
|
+
for b in range(1, tot_blocks_row + 1):
|
|
148
|
+
|
|
149
|
+
PHI_SIGMA_BLOCK = np.load(FOLDER_OUT + f"/PHI_SIGMA_{b}.npz")['phi_sigma']
|
|
150
|
+
|
|
151
|
+
if (i == tot_blocks_col) and (R - dim_col * N_PARTITIONS > 0) and fixed == 0:
|
|
152
|
+
R1 = R2
|
|
153
|
+
R2 = R1 + (R - dim_col * N_PARTITIONS)
|
|
154
|
+
fixed = 1
|
|
155
|
+
elif fixed == 0:
|
|
156
|
+
R1 = (i - 1) * dim_col
|
|
157
|
+
R2 = i * dim_col
|
|
158
|
+
|
|
159
|
+
if (b == tot_blocks_row) and (N_S - dim_row * N_PARTITIONS > 0): # Change here
|
|
160
|
+
C1 = C2
|
|
161
|
+
C2 = C1 + (N_S - dim_row * N_PARTITIONS)
|
|
162
|
+
else:
|
|
163
|
+
C1 = (b - 1) * dim_row
|
|
164
|
+
C2 = b * dim_row
|
|
165
|
+
|
|
166
|
+
dps[C1:C2, :] = PHI_SIGMA_BLOCK[:, R1:R2]
|
|
167
|
+
|
|
168
|
+
# Computing Sigmas and Phis
|
|
169
|
+
if rescale:
|
|
170
|
+
for j in range(R1, R2):
|
|
171
|
+
jj = j - R1
|
|
172
|
+
Sigma_P[jj] = np.linalg.norm(dps[:, jj])
|
|
173
|
+
Phi_P = dps[:, jj] / Sigma_P[jj]
|
|
174
|
+
np.savez(FOLDER_OUT + f"/phi_{j + 1}", phi_p=Phi_P)
|
|
175
|
+
else:
|
|
176
|
+
for j in range(R1, R2):
|
|
177
|
+
jj = j - R1
|
|
178
|
+
Phi_P = dps[:, jj] / Sigma_P[j] # Change here
|
|
179
|
+
np.savez(FOLDER_OUT + f"/phi_{j + 1}", phi_p=Phi_P)
|
|
180
|
+
|
|
181
|
+
Phi_P_M = np.zeros((N_S, R))
|
|
182
|
+
for j in range(R):
|
|
183
|
+
Phi_P_V = np.load(FOLDER_OUT + f"/phi_{j + 1}.npz")['phi_p']
|
|
184
|
+
Phi_P_M[:, j] = Phi_P_V
|
|
185
|
+
|
|
186
|
+
return Phi_P_M
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def spatial_basis_mPOD(D, PSI_M, N_T, N_PARTITIONS, N_S, MEMORY_SAVING, FOLDER_OUT, SAVE: bool = False,weights: np.array = np.array([]), SIGMA_TYPE: str = "accurate", SIGMA_M: np.array = np.array([])):
|
|
190
|
+
"""
|
|
191
|
+
Given the temporal basis of the mPOD now the spatial ones are computed
|
|
192
|
+
|
|
193
|
+
:param D:
|
|
194
|
+
Snapshot matrix D: if memory savig is active, this is ignored.
|
|
195
|
+
:param PSI_M: np.array.:
|
|
196
|
+
The mPOD temporal basis Psi tentatively assembled from all scales
|
|
197
|
+
:param N_T: int.
|
|
198
|
+
Number of snapshots
|
|
199
|
+
:param N_PARTITIONS: int.
|
|
200
|
+
Number of partitions in the memory saving
|
|
201
|
+
:param N_S: int.
|
|
202
|
+
Number of grid points in space
|
|
203
|
+
:param MEMORY_SAVING: bool.
|
|
204
|
+
Inherited from main class, if True turns on the MEMORY_SAVING feature, loading the partitions and starting the proper algorithm
|
|
205
|
+
:param FOLDER_OUT: str.
|
|
206
|
+
Folder in which the results are saved if SAVE_SPATIAL_POD = True
|
|
207
|
+
:param SAVE_SPATIAL_POD: bool.
|
|
208
|
+
If True, results are saved on disk and released from memory
|
|
209
|
+
:param weights: np.array
|
|
210
|
+
weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid. Only needed if grid is non-uniform & MEMORY_SAVING== True
|
|
211
|
+
:param Sigma_type : {'accurate', 'fast'}
|
|
212
|
+
If accurate, recompute the Sigmas after QR polishing. Slightly slower than the fast option in which the Sigmas are not recomputed.
|
|
213
|
+
:param SIGMA_M: np.array.:
|
|
214
|
+
The mPOD Sigmas before the QR polishing, tentatively assembled from all scales
|
|
215
|
+
:return: Phi_M, Psi_M, Sigma_M: np.arrays. The final (sorted) mPOD decomposition
|
|
216
|
+
"""
|
|
217
|
+
|
|
218
|
+
R1 = 0; R2 = 0
|
|
219
|
+
if MEMORY_SAVING:
|
|
220
|
+
SAVE = True
|
|
221
|
+
os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
|
|
222
|
+
dim_col = math.floor(N_T / N_PARTITIONS)
|
|
223
|
+
dim_row = math.floor(N_S / N_PARTITIONS)
|
|
224
|
+
dr = np.zeros((dim_row, N_T))
|
|
225
|
+
|
|
226
|
+
# 1 --- Converting partitions dC to dR
|
|
227
|
+
if N_S % N_PARTITIONS != 0:
|
|
228
|
+
tot_blocks_row = N_PARTITIONS + 1
|
|
229
|
+
else:
|
|
230
|
+
tot_blocks_row = N_PARTITIONS
|
|
231
|
+
|
|
232
|
+
if N_T % N_PARTITIONS != 0:
|
|
233
|
+
tot_blocks_col = N_PARTITIONS + 1
|
|
234
|
+
else:
|
|
235
|
+
tot_blocks_col = N_PARTITIONS
|
|
236
|
+
|
|
237
|
+
fixed = 0
|
|
238
|
+
|
|
239
|
+
for i in range(1, tot_blocks_row + 1):
|
|
240
|
+
# --- Check if dim_row has to be fixed:
|
|
241
|
+
if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
|
|
242
|
+
dim_row_fix = N_S - dim_row * N_PARTITIONS
|
|
243
|
+
dr = np.zeros((dim_row_fix, N_T))
|
|
244
|
+
|
|
245
|
+
for cont in range(1, tot_blocks_col + 1):
|
|
246
|
+
di = np.load(FOLDER_OUT + f"/data_partitions/di_{cont}.npz")['di']
|
|
247
|
+
|
|
248
|
+
if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0) and fixed == 0:
|
|
249
|
+
R1 = R2
|
|
250
|
+
R2 = R1 + (N_S - dim_row * N_PARTITIONS)
|
|
251
|
+
fixed = 1
|
|
252
|
+
elif fixed == 0:
|
|
253
|
+
R1 = (i - 1) * dim_row
|
|
254
|
+
R2 = i * dim_row
|
|
255
|
+
|
|
256
|
+
# Same as before, but we don't need the variable fixed because if
|
|
257
|
+
# % the code runs this loop, it will be the last time
|
|
258
|
+
|
|
259
|
+
if cont == tot_blocks_col and (N_T - dim_col * N_PARTITIONS > 0):
|
|
260
|
+
C1 = C2
|
|
261
|
+
C2 = C1 + (N_T - dim_col * N_PARTITIONS)
|
|
262
|
+
else:
|
|
263
|
+
C1 = (cont - 1) * dim_col
|
|
264
|
+
C2 = cont * dim_col
|
|
265
|
+
|
|
266
|
+
dr[:, C1:C2] = di[R1:R2, :]
|
|
267
|
+
|
|
268
|
+
# 2 --- Computing partitions R of PHI_SIGMA
|
|
269
|
+
PHI_SIGMA_BLOCK = dr @ PSI_M
|
|
270
|
+
np.savez(FOLDER_OUT + f'/mPOD/phi_sigma_{i}', PHI_SIGMA_BLOCK)
|
|
271
|
+
|
|
272
|
+
# 3 --- Convert partitions R to partitions C and get SIGMA
|
|
273
|
+
R = PSI_M.shape[1]
|
|
274
|
+
dim_col = math.floor(R / N_PARTITIONS)
|
|
275
|
+
dim_row = math.floor(N_S / N_PARTITIONS)
|
|
276
|
+
dps = np.zeros((N_S, dim_col))
|
|
277
|
+
SIGMA_M = []
|
|
278
|
+
PHI_M = []
|
|
279
|
+
|
|
280
|
+
if R % N_PARTITIONS != 0:
|
|
281
|
+
tot_blocks_col = N_PARTITIONS + 1
|
|
282
|
+
else:
|
|
283
|
+
tot_blocks_col = N_PARTITIONS
|
|
284
|
+
|
|
285
|
+
fixed = 0
|
|
286
|
+
|
|
287
|
+
# Here we apply the same logic of the loop before
|
|
288
|
+
|
|
289
|
+
for j in range(1, tot_blocks_col + 1):
|
|
290
|
+
|
|
291
|
+
if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0):
|
|
292
|
+
dim_col_fix = R - dim_col * N_PARTITIONS
|
|
293
|
+
dps = np.zeros((N_S, dim_col_fix))
|
|
294
|
+
|
|
295
|
+
for k in range(1, tot_blocks_row + 1):
|
|
296
|
+
PHI_SIGMA_BLOCK = np.load(FOLDER_OUT + f"/mPOD/phi_sigma_{k}.npz")['arr_0']
|
|
297
|
+
|
|
298
|
+
if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0) and fixed == 0:
|
|
299
|
+
R1 = R2
|
|
300
|
+
R2 = R1 + (R - dim_col * N_PARTITIONS)
|
|
301
|
+
fixed = 1
|
|
302
|
+
elif fixed == 0:
|
|
303
|
+
R1 = (j - 1) * dim_col
|
|
304
|
+
R2 = j * dim_col
|
|
305
|
+
|
|
306
|
+
if k == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
|
|
307
|
+
C1 = C2
|
|
308
|
+
C2 = C1 + (N_S - dim_row * N_PARTITIONS)
|
|
309
|
+
else:
|
|
310
|
+
C1 = (k - 1) * dim_row
|
|
311
|
+
C2 = k * dim_row
|
|
312
|
+
|
|
313
|
+
dps[C1:C2, :] = PHI_SIGMA_BLOCK[:, R1:R2]
|
|
314
|
+
|
|
315
|
+
# Getting sigmas and phis
|
|
316
|
+
for z in range(R1, R2):
|
|
317
|
+
zz = z - R1
|
|
318
|
+
if weights.size == 0:
|
|
319
|
+
SIGMA_M.append(np.linalg.norm(dps[:, zz]))
|
|
320
|
+
else:
|
|
321
|
+
SIGMA_M.append(np.linalg.norm(dps[:, zz]*np.sqrt(weights)))
|
|
322
|
+
tmp = dps[:, zz] / SIGMA_M[z]
|
|
323
|
+
#print(f'Shape tmp = {np.shape(tmp)}')
|
|
324
|
+
PHI_M.append(tmp)
|
|
325
|
+
np.savez(FOLDER_OUT + f'/mPOD/phi_{z + 1}', tmp)
|
|
326
|
+
|
|
327
|
+
Indices = np.argsort(SIGMA_M)[::-1] # find indices for sorting in decreasing order
|
|
328
|
+
SIGMA_M = np.asarray(SIGMA_M)
|
|
329
|
+
PHI_M = np.asarray(PHI_M).T
|
|
330
|
+
PSI_M = np.asarray(PSI_M)
|
|
331
|
+
Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
|
|
332
|
+
Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
|
|
333
|
+
Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
|
|
334
|
+
Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
|
|
335
|
+
|
|
336
|
+
else:
|
|
337
|
+
R = PSI_M.shape[1]
|
|
338
|
+
PHI_M_SIGMA_M = np.dot(D, (PSI_M))
|
|
339
|
+
|
|
340
|
+
# Re-compute the sigma after the qr polishing
|
|
341
|
+
SIGMA_TYPE = SIGMA_TYPE.lower()
|
|
342
|
+
if SIGMA_TYPE != 'fast':
|
|
343
|
+
if SIGMA_TYPE != 'accurate':
|
|
344
|
+
print('Warning: MODULO continues to run although SIGMA_TYPE was wrongly defined. Please set it to \'accurate\' or \'fast\' ')
|
|
345
|
+
if weights.size == 0:
|
|
346
|
+
SIGMA_M = np.linalg.norm(PHI_M_SIGMA_M, axis=0)
|
|
347
|
+
else:
|
|
348
|
+
SIGMA_M = np.linalg.norm(PHI_M_SIGMA_M*np.sqrt(weights), axis=0)
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
# Normalize the columns of C to get spatial modes
|
|
352
|
+
PHI_M = PHI_M_SIGMA_M / SIGMA_M
|
|
353
|
+
|
|
354
|
+
Indices = np.flipud(np.argsort(SIGMA_M)) # find indices for sorting in decreasing order
|
|
355
|
+
Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
|
|
356
|
+
Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
|
|
357
|
+
Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
|
|
358
|
+
Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
|
|
359
|
+
|
|
360
|
+
if SAVE:
|
|
361
|
+
'''Saving results in MODULO tmp proper folder'''
|
|
362
|
+
os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
|
|
363
|
+
np.savez(FOLDER_OUT + "/mPOD/sorted_phis", Phi_M)
|
|
364
|
+
np.savez(FOLDER_OUT + "/mPOD/sorted_psis", Psi_M)
|
|
365
|
+
np.savez(FOLDER_OUT + "/mPOD/sorted_sigma", Sorted_Sigmas)
|
|
366
|
+
|
|
367
|
+
return Phi_M, Psi_M, Sigma_M
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import numpy as np
|
|
3
|
+
from scipy.signal import firwin # To create FIR kernels
|
|
4
|
+
from tqdm import tqdm
|
|
5
|
+
from modulo_vki.utils import conv_m, conv_m_2D, switch_eigs
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def temporal_basis_mPOD(K, Nf, Ex, F_V, Keep,
|
|
9
|
+
boundaries, MODE='reduced',
|
|
10
|
+
dt=1,FOLDER_OUT: str = "./",
|
|
11
|
+
MEMORY_SAVING: bool = False,SAT: int = 100,
|
|
12
|
+
n_Modes=10, eig_solver: str = 'svd_sklearn_randomized', conv_type: str = '1d', verbose: bool = True):
|
|
13
|
+
'''
|
|
14
|
+
This function computes the PSIs for the mPOD. In this implementation, a "dft-trick" is proposed, in order to avoid
|
|
15
|
+
expansive SVDs. Randomized SVD is used by default for the diagonalization.
|
|
16
|
+
|
|
17
|
+
:param K:
|
|
18
|
+
np.array Temporal correlation matrix
|
|
19
|
+
:param dt: float.
|
|
20
|
+
1/fs, the dt between snapshots. Units in seconds.
|
|
21
|
+
:param Nf:
|
|
22
|
+
np.array. Vector collecting the order of the FIR filters used in each scale. It must be of size len(F_V) + 1, where the first element defines
|
|
23
|
+
the low pass filter, and the last one is the high pass filter. The rest are for the band-pass filters.
|
|
24
|
+
:param Ex: int.
|
|
25
|
+
Extension at the boundaries of K to impose the boundary conditions (see boundaries). It must be at least as Nf, and of size len(F_V) + 1, where the first
|
|
26
|
+
element is the low pass filter, and the last one is the high pass filter. The rest are band-pass filters.
|
|
27
|
+
:param F_V: np.array.
|
|
28
|
+
Frequency splitting vector, containing the frequencies of each scale (see article). If the time axis is in seconds, these frequencies are in Hz.
|
|
29
|
+
:param Keep: np.array.
|
|
30
|
+
Vector defining which scale to keep.
|
|
31
|
+
:param boundaries: str -> {'nearest', 'reflect', 'wrap' or 'extrap'}.
|
|
32
|
+
In order to avoid 'edge effects' if the time correlation matrix is not periodic, several boundary conditions can be used. Options are (from scipy.ndimage.convolve):
|
|
33
|
+
‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
|
|
34
|
+
‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
|
|
35
|
+
‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
|
|
36
|
+
:param MODE: tr -> {‘reduced’, ‘complete’, ‘r’, ‘raw’}
|
|
37
|
+
As a final step of this algorithm, the orthogonality is imposed via a QR-factorization. This parameterd define how to perform such factorization, according to numpy.
|
|
38
|
+
Options: this is a wrapper to np.linalg.qr(_, mode=MODE). Check numpy's documentation.
|
|
39
|
+
if ‘reduced’ The final basis will not necessarely be full. If ‘complete’ The final basis will always be full
|
|
40
|
+
:param FOLDER_OUT: str.
|
|
41
|
+
This is the directory where intermediate results will be stored if the memory saving is active.It will be ignored if MEMORY_SAVING=False.
|
|
42
|
+
:param MEMORY_SAVING: Bool.
|
|
43
|
+
If memory saving is active, the results will be saved locally. Nevertheless, since Psi_M is usually not expensive, it will be returned.
|
|
44
|
+
:param SAT: int.
|
|
45
|
+
Maximum number of modes per scale. The user can decide how many modes to compute; otherwise, modulo set the default SAT=100.
|
|
46
|
+
:param n_Modes: int.
|
|
47
|
+
Total number of modes that will be finally exported
|
|
48
|
+
:param eig_solver: str.
|
|
49
|
+
This is the eigenvalue solver that will be used. Refer to eigs_swith for the options.
|
|
50
|
+
:param conv_type: {'1d', '2d'}
|
|
51
|
+
If 1d, compute K_hat applying 1d FIR filters to the columns and then rows of the extended K.
|
|
52
|
+
More robust against windowing effects but more expensive (useful for modes that are slow compared to the observation time).
|
|
53
|
+
If 2d, compute K_hat applying a 2d FIR filter on the extended K.
|
|
54
|
+
:return PSI_M: np.array.
|
|
55
|
+
The mPOD PSIs. Yet to be sorted !
|
|
56
|
+
'''
|
|
57
|
+
|
|
58
|
+
if Ex < np.max(Nf):
|
|
59
|
+
raise RuntimeError("For the mPOD temporal basis computation Ex must be larger than or equal to Nf")
|
|
60
|
+
|
|
61
|
+
#Converting F_V in radiants and initialise number of scales M
|
|
62
|
+
fs = 1.0 / dt
|
|
63
|
+
nyq = fs / 2.0
|
|
64
|
+
|
|
65
|
+
cuts = np.asarray(F_V) / nyq
|
|
66
|
+
edges = np.concatenate(([0], cuts, [1]))
|
|
67
|
+
M = len(edges) - 1
|
|
68
|
+
|
|
69
|
+
assert len(Nf) == M, "Nf must be of size M+1, where M is the number of scales. Instead got sizes {}, {}".format(len(Nf), M+1)
|
|
70
|
+
assert len(F_V) == M-1, "F_V must be of size M, where M is the number of scales. Instead got sizes {}, {}".format(len(F_V), M)
|
|
71
|
+
assert len(Keep) == M, "Keep must be of size M+1, where M is the number of scales. Instead got sizes {}, {}".format(len(Keep), M+1)
|
|
72
|
+
|
|
73
|
+
n_t = K.shape[1]
|
|
74
|
+
|
|
75
|
+
# DFT-trick below: computing frequency bins.
|
|
76
|
+
freqs = np.fft.fftfreq(n_t) #* fs
|
|
77
|
+
|
|
78
|
+
# init modes accumulators
|
|
79
|
+
Psi_M = np.empty((n_t, 0))
|
|
80
|
+
Sigma_M = np.empty((0,))
|
|
81
|
+
|
|
82
|
+
def _design_filter_and_mask(m):
|
|
83
|
+
# use normalized firwin in agreement to normalized freqs above
|
|
84
|
+
low, high = edges[m], edges[m+1]
|
|
85
|
+
if m == 0:
|
|
86
|
+
# low-pass
|
|
87
|
+
h = firwin(numtaps=Nf[m], cutoff=high, #fs=Fs,
|
|
88
|
+
window='hamming', pass_zero=True)
|
|
89
|
+
mask = lambda f: np.abs(f) <= high/2
|
|
90
|
+
elif m == M - 1:
|
|
91
|
+
# high-pass
|
|
92
|
+
h = firwin(numtaps=Nf[m], cutoff=low, #fs=Fs,
|
|
93
|
+
window='hamming', pass_zero=False)
|
|
94
|
+
mask = lambda f: np.abs(f) >= low/2
|
|
95
|
+
else:
|
|
96
|
+
# band-pass
|
|
97
|
+
h = firwin(numtaps=Nf[m], cutoff=[low, high], #fs=Fs,
|
|
98
|
+
window='hamming', pass_zero=False)
|
|
99
|
+
mask = lambda f: (np.abs(f) >= low/2) & (np.abs(f) <= high/2)
|
|
100
|
+
return h, mask
|
|
101
|
+
|
|
102
|
+
for m in range(M):
|
|
103
|
+
if not Keep[m]:
|
|
104
|
+
if verbose:
|
|
105
|
+
print(f"Skipping band {m+1}/{M}")
|
|
106
|
+
continue
|
|
107
|
+
|
|
108
|
+
# design filter & mask
|
|
109
|
+
h, mask_fn = _design_filter_and_mask(m)
|
|
110
|
+
band_label = f"{edges[m]*nyq:.2f}–{edges[m+1]*nyq:.2f} Hz"
|
|
111
|
+
if verbose:
|
|
112
|
+
print(f"\nFiltering band {m+1}/{M}: {band_label}")
|
|
113
|
+
|
|
114
|
+
# rank estimate
|
|
115
|
+
mask_idxs = mask_fn(freqs)
|
|
116
|
+
|
|
117
|
+
R_K = min(np.count_nonzero(mask_idxs), SAT, n_Modes)
|
|
118
|
+
if verbose:
|
|
119
|
+
print(f" → estimating {R_K} modes from {mask_idxs.sum()} freq bins")
|
|
120
|
+
|
|
121
|
+
if R_K == 0:
|
|
122
|
+
print(f"skipping")
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# apply filter to correlation matrix
|
|
126
|
+
conv_type = conv_type.lower()
|
|
127
|
+
if conv_type == '2d':
|
|
128
|
+
Kf = conv_m_2D(K, h, Ex, boundaries)
|
|
129
|
+
else:
|
|
130
|
+
Kf = conv_m(K, h, Ex, boundaries)
|
|
131
|
+
|
|
132
|
+
# diagonalize
|
|
133
|
+
Psi_P, Sigma_P = switch_eigs(Kf, R_K, eig_solver)
|
|
134
|
+
|
|
135
|
+
# append
|
|
136
|
+
Psi_M = np.hstack((Psi_M, Psi_P))
|
|
137
|
+
Sigma_M = np.concatenate((Sigma_M, Sigma_P))
|
|
138
|
+
|
|
139
|
+
# 5) Sort modes by energy and QR-polish
|
|
140
|
+
order = np.argsort(Sigma_M)[::-1]
|
|
141
|
+
Psi_M = Psi_M[:, order]
|
|
142
|
+
if verbose:
|
|
143
|
+
print("\nQR polishing...")
|
|
144
|
+
PSI_M, _ = np.linalg.qr(Psi_M, mode=MODE)
|
|
145
|
+
|
|
146
|
+
Sigma_M = Sigma_M[order] # potentially used if effect of qr polishing is negligible on PSI_M
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
if MEMORY_SAVING:
|
|
151
|
+
os.makedirs(FOLDER_OUT + '/mPOD', exist_ok=True)
|
|
152
|
+
np.savez(FOLDER_OUT + '/mPOD/Psis', Psis=PSI_M)
|
|
153
|
+
|
|
154
|
+
return PSI_M[:, :n_Modes], Sigma_M[:n_Modes]
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def dft(N_T, F_S, D, FOLDER_OUT, SAVE_DFT=False):
|
|
159
|
+
"""
|
|
160
|
+
Computes the Discrete Fourier Transform (DFT) from the provided dataset.
|
|
161
|
+
|
|
162
|
+
Note
|
|
163
|
+
----
|
|
164
|
+
Memory saving feature is currently not supported by this function.
|
|
165
|
+
|
|
166
|
+
Parameters
|
|
167
|
+
----------
|
|
168
|
+
N_T : int
|
|
169
|
+
Number of temporal snapshots.
|
|
170
|
+
|
|
171
|
+
F_S : float
|
|
172
|
+
Sampling frequency in Hz.
|
|
173
|
+
|
|
174
|
+
D : np.ndarray
|
|
175
|
+
Snapshot matrix.
|
|
176
|
+
|
|
177
|
+
FOLDER_OUT : str
|
|
178
|
+
Directory path where results are saved if `SAVE_DFT` is True.
|
|
179
|
+
|
|
180
|
+
SAVE_DFT : bool, default=False
|
|
181
|
+
If True, computed results are saved to disk and released from memory.
|
|
182
|
+
|
|
183
|
+
Returns
|
|
184
|
+
-------
|
|
185
|
+
Phi_F : np.ndarray
|
|
186
|
+
Complex spatial structures corresponding to each frequency mode.
|
|
187
|
+
|
|
188
|
+
Sorted_Freqs : np.ndarray
|
|
189
|
+
Frequency bins in Hz, sorted in ascending order.
|
|
190
|
+
|
|
191
|
+
SIGMA_F : np.ndarray
|
|
192
|
+
Real amplitudes associated with each frequency mode.
|
|
193
|
+
"""
|
|
194
|
+
n_t = int(N_T)
|
|
195
|
+
Freqs = np.fft.fftfreq(n_t) * F_S # Compute the frequency bins
|
|
196
|
+
|
|
197
|
+
# FFT along the snapshot axis
|
|
198
|
+
PHI_SIGMA = np.fft.fft(D, axis=1) / np.sqrt(n_t)
|
|
199
|
+
sigma_F = np.linalg.norm(PHI_SIGMA, axis=0) # Compute the norm of each column
|
|
200
|
+
|
|
201
|
+
# make phi_F orthonormal
|
|
202
|
+
Phi_F = PHI_SIGMA / sigma_F
|
|
203
|
+
|
|
204
|
+
# Sort
|
|
205
|
+
Indices = np.argsort(-sigma_F) # find indices for sorting in decreasing order
|
|
206
|
+
Sorted_Sigmas = sigma_F[Indices] # Sort all the sigmas
|
|
207
|
+
Sorted_Freqs = Freqs[Indices] # Sort all the frequencies accordingly.
|
|
208
|
+
Phi_F = Phi_F[:, Indices] # Sorted Spatial Structures Matrix
|
|
209
|
+
SIGMA_F = Sorted_Sigmas # Sorted Amplitude Matrix (vector)
|
|
210
|
+
|
|
211
|
+
if SAVE_DFT:
|
|
212
|
+
os.makedirs(FOLDER_OUT + 'DFT', exist_ok=True)
|
|
213
|
+
np.savez(FOLDER_OUT + 'DFT/dft_fitted', Freqs=Sorted_Freqs, Phis=Phi_F, Sigmas=SIGMA_F)
|
|
214
|
+
|
|
215
|
+
return Phi_F, Sorted_Freqs, SIGMA_F
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def Temporal_basis_POD(K, SAVE_T_POD=False, FOLDER_OUT='./',
|
|
219
|
+
n_Modes=10,eig_solver: str = 'eigh',verbose=True):
|
|
220
|
+
"""
|
|
221
|
+
This method computes the POD basis. For some theoretical insights, you can find the theoretical background of the proper orthogonal decomposition in a nutshell here: https://youtu.be/8fhupzhAR_M
|
|
222
|
+
|
|
223
|
+
:param FOLDER_OUT: str. Folder in which the results will be saved (if SAVE_T_POD=True)
|
|
224
|
+
:param K: np.array. Temporal correlation matrix
|
|
225
|
+
:param SAVE_T_POD: bool. A flag deciding whether the results are saved on disk or not. If the MEMORY_SAVING feature is active, it is switched True by default.
|
|
226
|
+
:param n_Modes: int. Number of modes that will be computed
|
|
227
|
+
:param svd_solver: str. Svd solver to be used throughout the computation
|
|
228
|
+
:return: Psi_P: np.array. POD's Psis
|
|
229
|
+
:return: Sigma_P: np.array. POD's Sigmas
|
|
230
|
+
"""
|
|
231
|
+
if verbose:
|
|
232
|
+
print("Diagonalizing K...")
|
|
233
|
+
Psi_P, Sigma_P = switch_eigs(K, n_Modes, eig_solver)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
if SAVE_T_POD:
|
|
237
|
+
os.makedirs(FOLDER_OUT + "/POD/", exist_ok=True)
|
|
238
|
+
print("Saving POD temporal basis")
|
|
239
|
+
np.savez(FOLDER_OUT + '/POD/temporal_basis', Psis=Psi_P, Sigmas=Sigma_P)
|
|
240
|
+
|
|
241
|
+
return Psi_P, Sigma_P
|