modulo-vki 2.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modulo_vki/__init__.py +23 -0
- modulo_vki/core/__init__.py +9 -0
- modulo_vki/core/_dft.py +61 -0
- modulo_vki/core/_dmd_s.py +72 -0
- modulo_vki/core/_k_matrix.py +81 -0
- modulo_vki/core/_mpod_space.py +180 -0
- modulo_vki/core/_mpod_time.py +154 -0
- modulo_vki/core/_pod_space.py +184 -0
- modulo_vki/core/_pod_time.py +48 -0
- modulo_vki/core/_spod_s.py +102 -0
- modulo_vki/core/_spod_t.py +104 -0
- modulo_vki/modulo.py +828 -0
- modulo_vki/utils/__init__.py +4 -0
- modulo_vki/utils/_plots.py +52 -0
- modulo_vki/utils/_utils.py +340 -0
- modulo_vki/utils/others.py +449 -0
- modulo_vki/utils/read_db.py +339 -0
- modulo_vki-2.0.5.dist-info/LICENSE +21 -0
- modulo_vki-2.0.5.dist-info/METADATA +96 -0
- modulo_vki-2.0.5.dist-info/RECORD +22 -0
- modulo_vki-2.0.5.dist-info/WHEEL +5 -0
- modulo_vki-2.0.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import numpy as np
|
|
3
|
+
from tqdm import tqdm
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def Spatial_basis_POD(D, PSI_P, Sigma_P, MEMORY_SAVING, N_T, FOLDER_OUT='./', N_PARTITIONS=1, SAVE_SPATIAL_POD=False,
|
|
8
|
+
rescale=False):
|
|
9
|
+
"""
|
|
10
|
+
This function computs the POD spatial basis from the temporal basis,
|
|
11
|
+
|
|
12
|
+
:param D: np.array.
|
|
13
|
+
matrix on which to project the temporal basis
|
|
14
|
+
:param PSI_P: np.array.
|
|
15
|
+
POD's Psis
|
|
16
|
+
:param Sigma_P: np.array.
|
|
17
|
+
POD's Sigmas
|
|
18
|
+
:param MEMORY_SAVING: bool.
|
|
19
|
+
Inherited from main class, if True turns on the MEMORY_SAVING feature, loading the partitions and starting the proper algorithm
|
|
20
|
+
:param N_T: int.
|
|
21
|
+
Number of temporal snapshots
|
|
22
|
+
:param FOLDER_OUT: str.
|
|
23
|
+
Folder in which the results are saved if SAVE_SPATIAL_POD = True
|
|
24
|
+
:param N_PARTITIONS: int.
|
|
25
|
+
Number of partitions to be loaded. If D has been partitioned using MODULO, this parameter is automatically inherited from the main class. To be specified otherwise.
|
|
26
|
+
|
|
27
|
+
:param SAVE_SPATIAL_POD: bool.
|
|
28
|
+
If True, results are saved on disk and released from memory
|
|
29
|
+
|
|
30
|
+
:param rescale: bool.
|
|
31
|
+
If False, the Sigmas are used for the normalization. If True, these are ignored and the normalization is carried out.
|
|
32
|
+
For the standard POD, False is the way to go.
|
|
33
|
+
However, for other decompositions (eg. the SPOD_s) you must use rescale=True
|
|
34
|
+
|
|
35
|
+
:return Phi_P: np.array.
|
|
36
|
+
POD's Phis
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
R = PSI_P.shape[1]
|
|
40
|
+
|
|
41
|
+
if not MEMORY_SAVING:
|
|
42
|
+
N_S = D.shape[0]
|
|
43
|
+
|
|
44
|
+
if rescale:
|
|
45
|
+
# The following is the general normalization approach.
|
|
46
|
+
# not needed for POD for required for SPOD
|
|
47
|
+
Phi_P = np.zeros((N_S, R))
|
|
48
|
+
# N_S = D.shape[0] unused variable
|
|
49
|
+
PHI_P_SIGMA_P = np.dot(D, PSI_P)
|
|
50
|
+
print("Completing Spatial Structures Modes: \n")
|
|
51
|
+
|
|
52
|
+
for i in tqdm(range(0, R)):
|
|
53
|
+
# Normalize the columns of C to get spatial modes
|
|
54
|
+
Phi_P[:, i] = PHI_P_SIGMA_P[:, i] / Sigma_P[i]
|
|
55
|
+
|
|
56
|
+
else:
|
|
57
|
+
# We take only the first R modes.
|
|
58
|
+
Sigma_P_t = Sigma_P[0:R];
|
|
59
|
+
Sigma_P_Inv_V = 1 / Sigma_P_t
|
|
60
|
+
# So we have the inverse
|
|
61
|
+
Sigma_P_Inv = np.diag(Sigma_P_Inv_V)
|
|
62
|
+
# Here is the one shot projection:
|
|
63
|
+
Phi_P = np.linalg.multi_dot([D, PSI_P[:, 0:R], Sigma_P_Inv])
|
|
64
|
+
|
|
65
|
+
if SAVE_SPATIAL_POD:
|
|
66
|
+
os.makedirs(FOLDER_OUT + 'POD', exist_ok=True)
|
|
67
|
+
np.savez(FOLDER_OUT + '/POD/pod_spatial_basis', phis=Phi_P)
|
|
68
|
+
# removed PHI_P_SIGMA_P=PHI_P_SIGMA_P, not present if not rescale and not needed (?)
|
|
69
|
+
|
|
70
|
+
return Phi_P
|
|
71
|
+
|
|
72
|
+
else:
|
|
73
|
+
|
|
74
|
+
N_S = np.shape(np.load(FOLDER_OUT + "/data_partitions/di_1.npz")['di'])[0]
|
|
75
|
+
dim_col = math.floor(N_T / N_PARTITIONS)
|
|
76
|
+
dim_row = math.floor(N_S / N_PARTITIONS)
|
|
77
|
+
dr = np.zeros((dim_row, N_T))
|
|
78
|
+
|
|
79
|
+
# 1 -- Converting partitions dC to dR
|
|
80
|
+
if N_S % N_PARTITIONS != 0:
|
|
81
|
+
tot_blocks_row = N_PARTITIONS + 1
|
|
82
|
+
else:
|
|
83
|
+
tot_blocks_row = N_PARTITIONS
|
|
84
|
+
|
|
85
|
+
if N_T % N_PARTITIONS != 0:
|
|
86
|
+
tot_blocks_col = N_PARTITIONS + 1
|
|
87
|
+
else:
|
|
88
|
+
tot_blocks_col = N_PARTITIONS
|
|
89
|
+
|
|
90
|
+
# --- Loading Psi_P
|
|
91
|
+
fixed = 0
|
|
92
|
+
R1 = 0
|
|
93
|
+
R2 = 0
|
|
94
|
+
C1 = 0
|
|
95
|
+
C2 = 0
|
|
96
|
+
|
|
97
|
+
for i in range(1, tot_blocks_row + 1):
|
|
98
|
+
|
|
99
|
+
if (i == tot_blocks_row) and (N_S - dim_row * N_PARTITIONS > 0):
|
|
100
|
+
dim_row_fix = N_S - dim_row * N_PARTITIONS
|
|
101
|
+
dr = np.zeros((dim_row_fix, N_T))
|
|
102
|
+
|
|
103
|
+
for b in range(1, tot_blocks_col + 1):
|
|
104
|
+
di = np.load(FOLDER_OUT + f"/data_partitions/di_{b}.npz")['di']
|
|
105
|
+
if (i == tot_blocks_row) and (N_S - dim_row * N_PARTITIONS > 0) and fixed == 0:
|
|
106
|
+
R1 = R2
|
|
107
|
+
R2 = R1 + (N_S - dim_row * N_PARTITIONS)
|
|
108
|
+
fixed = 1
|
|
109
|
+
elif fixed == 0:
|
|
110
|
+
R1 = (i - 1) * dim_row
|
|
111
|
+
R2 = i * dim_row
|
|
112
|
+
|
|
113
|
+
if (b == tot_blocks_col) and (N_T - dim_col * N_PARTITIONS > 0):
|
|
114
|
+
C1 = C2
|
|
115
|
+
C2 = C1 + (N_T - dim_col * N_PARTITIONS)
|
|
116
|
+
else:
|
|
117
|
+
C1 = (b - 1) * dim_col
|
|
118
|
+
C2 = b * dim_col
|
|
119
|
+
|
|
120
|
+
np.copyto(dr[:, C1:C2], di[R1:R2, :])
|
|
121
|
+
|
|
122
|
+
PHI_SIGMA_BLOCK = np.dot(dr, PSI_P)
|
|
123
|
+
np.savez(FOLDER_OUT + f"/PHI_SIGMA_{i}",
|
|
124
|
+
phi_sigma=PHI_SIGMA_BLOCK)
|
|
125
|
+
|
|
126
|
+
# 3 - Converting partitions R to partitions C and get Sigmas
|
|
127
|
+
dim_col = math.floor(R / N_PARTITIONS)
|
|
128
|
+
dim_row = math.floor(N_S / N_PARTITIONS)
|
|
129
|
+
dps = np.zeros((N_S, dim_col))
|
|
130
|
+
Phi_P = np.zeros((N_S, R))
|
|
131
|
+
|
|
132
|
+
if R % N_PARTITIONS != 0:
|
|
133
|
+
tot_blocks_col = N_PARTITIONS + 1
|
|
134
|
+
else:
|
|
135
|
+
tot_blocks_col = N_PARTITIONS
|
|
136
|
+
|
|
137
|
+
fixed = 0
|
|
138
|
+
|
|
139
|
+
for i in range(1, tot_blocks_col + 1):
|
|
140
|
+
|
|
141
|
+
if (i == tot_blocks_col) and (R - dim_col * N_PARTITIONS > 0):
|
|
142
|
+
dim_col_fix = R - dim_col * N_PARTITIONS
|
|
143
|
+
dps = np.zeros((N_S, dim_col_fix))
|
|
144
|
+
|
|
145
|
+
for b in range(1, tot_blocks_row + 1):
|
|
146
|
+
|
|
147
|
+
PHI_SIGMA_BLOCK = np.load(FOLDER_OUT + f"/PHI_SIGMA_{b}.npz")['phi_sigma']
|
|
148
|
+
|
|
149
|
+
if (i == tot_blocks_col) and (R - dim_col * N_PARTITIONS > 0) and fixed == 0:
|
|
150
|
+
R1 = R2
|
|
151
|
+
R2 = R1 + (R - dim_col * N_PARTITIONS)
|
|
152
|
+
fixed = 1
|
|
153
|
+
elif fixed == 0:
|
|
154
|
+
R1 = (i - 1) * dim_col
|
|
155
|
+
R2 = i * dim_col
|
|
156
|
+
|
|
157
|
+
if (b == tot_blocks_col) and (N_S - dim_row * N_PARTITIONS > 0):
|
|
158
|
+
C1 = C2
|
|
159
|
+
C2 = C1 + (N_S - dim_row * N_PARTITIONS)
|
|
160
|
+
else:
|
|
161
|
+
C1 = (b - 1) * dim_row
|
|
162
|
+
C2 = b * dim_row
|
|
163
|
+
|
|
164
|
+
dps[C1:C2, :] = PHI_SIGMA_BLOCK[:, R1:R2]
|
|
165
|
+
|
|
166
|
+
# Computing Sigmas and Phis
|
|
167
|
+
if rescale:
|
|
168
|
+
for j in range(R1, R2):
|
|
169
|
+
jj = j - R1
|
|
170
|
+
Sigma_P[jj] = np.linalg.norm(dps[:, jj])
|
|
171
|
+
Phi_P = dps[:, jj] / Sigma_P[jj]
|
|
172
|
+
np.savez(FOLDER_OUT + f"/phi_{j + 1}", phi_p=Phi_P)
|
|
173
|
+
else:
|
|
174
|
+
for j in range(R1, R2):
|
|
175
|
+
jj = j - R1
|
|
176
|
+
Phi_P = dps[:, jj] / Sigma_P[jj]
|
|
177
|
+
np.savez(FOLDER_OUT + f"/phi_{j + 1}", phi_p=Phi_P)
|
|
178
|
+
|
|
179
|
+
Phi_P_M = np.zeros((N_S, R))
|
|
180
|
+
for j in range(R):
|
|
181
|
+
Phi_P_V = np.load(FOLDER_OUT + f"/phi_{j + 1}.npz")['phi_p']
|
|
182
|
+
Phi_P_M[:, j] = Phi_P_V
|
|
183
|
+
|
|
184
|
+
return Phi_P_M
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import numpy as np
|
|
3
|
+
from ..utils._utils import switch_eigs
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def Temporal_basis_POD(K, SAVE_T_POD=False, FOLDER_OUT='./', n_Modes=10,eig_solver: str = 'eigh'):
|
|
7
|
+
"""
|
|
8
|
+
This method computes the POD basis. For some theoretical insights, you can find the theoretical background of the proper orthogonal decomposition in a nutshell here: https://youtu.be/8fhupzhAR_M
|
|
9
|
+
|
|
10
|
+
:param FOLDER_OUT: str. Folder in which the results will be saved (if SAVE_T_POD=True)
|
|
11
|
+
:param K: np.array. Temporal correlation matrix
|
|
12
|
+
:param SAVE_T_POD: bool. A flag deciding whether the results are saved on disk or not. If the MEMORY_SAVING feature is active, it is switched True by default.
|
|
13
|
+
:param n_Modes: int. Number of modes that will be computed
|
|
14
|
+
:param svd_solver: str. Svd solver to be used throughout the computation
|
|
15
|
+
:return: Psi_P: np.array. POD's Psis
|
|
16
|
+
:return: Sigma_P: np.array. POD's Sigmas
|
|
17
|
+
"""
|
|
18
|
+
# Solver 1: Use the standard SVD
|
|
19
|
+
# Psi_P, Lambda_P, _ = np.linalg.svd(K)
|
|
20
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
21
|
+
|
|
22
|
+
# Solver 2: Use randomized SVD ############## WARNING #################
|
|
23
|
+
# if svd_solver.lower() == 'svd_sklearn_truncated':
|
|
24
|
+
# svd = TruncatedSVD(n_Modes)
|
|
25
|
+
# svd.fit_transform(K)
|
|
26
|
+
# Psi_P = svd.components_.T
|
|
27
|
+
# Lambda_P = svd.singular_values_
|
|
28
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
29
|
+
# elif svd_solver.lower() == 'svd_numpy':
|
|
30
|
+
# Psi_P, Lambda_P, _ = np.linalg.svd(K)
|
|
31
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
32
|
+
# elif svd_solver.lower() == 'svd_sklearn_randomized':
|
|
33
|
+
# Psi_P, Lambda_P, _ = svds_RND(K, n_Modes)
|
|
34
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
35
|
+
# elif svd_solver.lower() == 'svd_scipy_sparse':
|
|
36
|
+
# Psi_P, Lambda_P, _ = svds(K, k=n_Modes)
|
|
37
|
+
# Sigma_P = np.sqrt(Lambda_P)
|
|
38
|
+
|
|
39
|
+
print("diagonalizing K....")
|
|
40
|
+
Psi_P, Sigma_P = switch_eigs(K, n_Modes, eig_solver)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
if SAVE_T_POD:
|
|
44
|
+
os.makedirs(FOLDER_OUT + "/POD/", exist_ok=True)
|
|
45
|
+
print("Saving POD temporal basis")
|
|
46
|
+
np.savez(FOLDER_OUT + '/POD/temporal_basis', Psis=Psi_P, Sigmas=Sigma_P)
|
|
47
|
+
|
|
48
|
+
return Psi_P, Sigma_P
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy import signal
|
|
3
|
+
from scipy.signal import firwin
|
|
4
|
+
from ._pod_time import Temporal_basis_POD
|
|
5
|
+
from ._pod_space import Spatial_basis_POD
|
|
6
|
+
|
|
7
|
+
def compute_SPOD_s(D, K, F_S, n_s, n_t,N_o=100, f_c=0.3,n_Modes=10, SAVE_SPOD=True,
|
|
8
|
+
FOLDER_OUT='./', MEMORY_SAVING=False, N_PARTITIONS=1):
|
|
9
|
+
"""
|
|
10
|
+
This method computes the Spectral POD of your data.
|
|
11
|
+
This is the one by Sieber
|
|
12
|
+
et al (https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/abs/spectral-proper-orthogonal-decomposition/DCD8A6EDEFD56F5A9715DBAD38BD461A)
|
|
13
|
+
|
|
14
|
+
:param F_S: float,
|
|
15
|
+
Sampling Frequency [Hz]
|
|
16
|
+
:param N_o: float,
|
|
17
|
+
Semi-Order of the diagonal filter.
|
|
18
|
+
Note that the filter order will be 2 N_o +1 (to make sure it is odd)
|
|
19
|
+
:param f_c: float,
|
|
20
|
+
cut-off frequency of the diagonal filter
|
|
21
|
+
:param n_Modes: float,
|
|
22
|
+
number of modes to be computed
|
|
23
|
+
:param SAVE_SPOD: bool,
|
|
24
|
+
If True, MODULO will save the output in self.FOLDER OUT/MODULO_tmp
|
|
25
|
+
:param FOLDER_OUT: string
|
|
26
|
+
Define where the out will be stored (ignored if SAVE_POD=False)
|
|
27
|
+
:param MEMORY SAVING: bool
|
|
28
|
+
Define if memory saving is active or not (reduntant; to be improved)
|
|
29
|
+
Currently left for compatibility with the rest of MODULO.
|
|
30
|
+
:param N_PARTITIONS: int
|
|
31
|
+
number of partitions (if memory saving = False, it should be 1).
|
|
32
|
+
(reduntant; to be improved)
|
|
33
|
+
Currently left for compatibility with the rest of MODULO.
|
|
34
|
+
:return Psi_P: np.array
|
|
35
|
+
SPOD Psis
|
|
36
|
+
:return Sigma_P: np.array
|
|
37
|
+
SPOD Sigmas.
|
|
38
|
+
:return Phi_P: np.array
|
|
39
|
+
SPOD Phis
|
|
40
|
+
"""
|
|
41
|
+
# if self.D is None:
|
|
42
|
+
# D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
|
|
43
|
+
# SAVE_SPOD = True
|
|
44
|
+
# # TODO : Lorenzo check this stuff
|
|
45
|
+
# else:
|
|
46
|
+
# D = self.D
|
|
47
|
+
#
|
|
48
|
+
# n_s = self.N_S # Repeat variable for debugging compatibility
|
|
49
|
+
# n_t = self.N_T
|
|
50
|
+
#
|
|
51
|
+
# print('Computing Correlation Matrix \n')
|
|
52
|
+
|
|
53
|
+
# The first step is the same as the POD: we compute the correlation matrix
|
|
54
|
+
# K = CorrelationMatrix(self.N_T, self.N_PARTITIONS, self.MEMORY_SAVING,
|
|
55
|
+
# self.FOLDER_OUT, D=self.D)
|
|
56
|
+
|
|
57
|
+
# 1. Initialize the extended
|
|
58
|
+
K_e = np.zeros((n_t + 2 * N_o, n_t + 2 * N_o))
|
|
59
|
+
# From which we clearly know that:
|
|
60
|
+
K_e[N_o:n_t + N_o, N_o:n_t + N_o] = K
|
|
61
|
+
|
|
62
|
+
# 2. We fill the edges ( a bit of repetition but ok.. )
|
|
63
|
+
|
|
64
|
+
# Row-wise, Upper part
|
|
65
|
+
for i in range(0, N_o):
|
|
66
|
+
K_e[i, i:i + n_t] = K[0, :]
|
|
67
|
+
|
|
68
|
+
# Row-wise, bottom part
|
|
69
|
+
for i in range(N_o + n_t, n_t + 2 * N_o):
|
|
70
|
+
K_e[i, i - n_t + 1:i + 1] = K[-1, :]
|
|
71
|
+
|
|
72
|
+
# Column-wise, left part
|
|
73
|
+
for j in range(0, N_o):
|
|
74
|
+
K_e[j:j + n_t, j] = K[:, 0]
|
|
75
|
+
|
|
76
|
+
# Column-wise, right part
|
|
77
|
+
for j in range(N_o + n_t, 2 * N_o + n_t):
|
|
78
|
+
K_e[j - n_t + 1:j + 1, j] = K[:, -1]
|
|
79
|
+
|
|
80
|
+
# Now you create the diagonal kernel in 2D
|
|
81
|
+
h_f = firwin(N_o, f_c) # Kernel in 1D
|
|
82
|
+
# This is also something that must be put in a separate file:
|
|
83
|
+
# To cancel the phase lag we make this non-causal with a symmetric
|
|
84
|
+
# shift, hence with zero padding as equal as possible on both sides
|
|
85
|
+
n_padd_l = round((n_t - N_o) / 2);
|
|
86
|
+
n_padd_r = n_t - N_o - n_padd_l
|
|
87
|
+
|
|
88
|
+
h_f_pad = np.pad(h_f, (n_padd_l, n_padd_r)) # symmetrically padded kernel in 1D
|
|
89
|
+
h_f_2 = np.diag(h_f_pad)
|
|
90
|
+
|
|
91
|
+
# Finally the filtered K is just
|
|
92
|
+
K_F = signal.fftconvolve(K_e, h_f_2, mode='same')[N_o:n_t + N_o, N_o:n_t + N_o]
|
|
93
|
+
# plt.plot(np.diag(K),'b--'); plt.plot(np.diag(K_F_e),'r')
|
|
94
|
+
|
|
95
|
+
# From now on it's just POD:
|
|
96
|
+
Psi_P, Sigma_P = Temporal_basis_POD(K_F, SAVE_SPOD, FOLDER_OUT, n_Modes)
|
|
97
|
+
# but with a normalization aspect to be careful about!
|
|
98
|
+
Phi_P = Spatial_basis_POD(D, N_T=n_t, PSI_P=Psi_P, Sigma_P=Sigma_P,
|
|
99
|
+
MEMORY_SAVING=MEMORY_SAVING, FOLDER_OUT=FOLDER_OUT,
|
|
100
|
+
N_PARTITIONS=N_PARTITIONS,rescale=True)
|
|
101
|
+
|
|
102
|
+
return Phi_P, Psi_P, Sigma_P
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from modulo_vki.utils._utils import overlap
|
|
3
|
+
from tqdm import tqdm
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from modulo_vki.utils._utils import switch_svds
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def compute_SPOD_t(D, F_S, L_B=500, O_B=250,n_Modes=10, SAVE_SPOD=True, FOLDER_OUT='/',
|
|
11
|
+
possible_svds='svd_sklearn_truncated'):
|
|
12
|
+
"""
|
|
13
|
+
This method computes the Spectral POD of your data.
|
|
14
|
+
This is the one by Town
|
|
15
|
+
et al (https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/spectral-proper-orthogonal-decomposition-and-its-relationship-to-dynamic-mode-decomposition-and-resolvent-analysis/EC2A6DF76490A0B9EB208CC2CA037717)
|
|
16
|
+
|
|
17
|
+
:param D: array.
|
|
18
|
+
snapshot matrix to decompose, of size N_S,N_T
|
|
19
|
+
:param F_S: float,
|
|
20
|
+
Sampling Frequency [Hz]
|
|
21
|
+
:param L_B: float,
|
|
22
|
+
Lenght of the chunks
|
|
23
|
+
:param O_B: float,
|
|
24
|
+
Overlapping between blocks in the chunk
|
|
25
|
+
:param n_Modes: float,
|
|
26
|
+
Number of modes to be computed FOR EACH FREQUENCY
|
|
27
|
+
:param SAVE_SPOD: bool,
|
|
28
|
+
If True, MODULO will save the output in FOLDER OUT/MODULO_tmp
|
|
29
|
+
:param possible_svds: str,
|
|
30
|
+
Svd solver to be used throughout the computation
|
|
31
|
+
|
|
32
|
+
:return Psi_P_hat: np.array
|
|
33
|
+
Spectra of the SPOD Modes
|
|
34
|
+
:return Sigma_P: np.array
|
|
35
|
+
Amplitudes of the SPOD Modes.
|
|
36
|
+
:return Phi_P: np.array
|
|
37
|
+
SPOD Phis
|
|
38
|
+
:return freq: float
|
|
39
|
+
Frequency bins for the Spectral POD
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
# if D is None:
|
|
43
|
+
# D = np.load(FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
|
|
44
|
+
# SAVE_SPOD = True
|
|
45
|
+
# else:
|
|
46
|
+
# D = D
|
|
47
|
+
#
|
|
48
|
+
# n_s = N_S # Repeat variable for debugging compatibility
|
|
49
|
+
# n_t = N_T
|
|
50
|
+
#
|
|
51
|
+
# # First comput the PS in each point (this is very time consuming and should be parallelized)
|
|
52
|
+
# # Note: this can be improved a lot...! ok for the moment
|
|
53
|
+
print('Computing PSD at all points\n')
|
|
54
|
+
N_S,N_T=np.shape(D)
|
|
55
|
+
|
|
56
|
+
# Step 1 : Partition the data into blocks ( potentially overlapping)
|
|
57
|
+
Ind = np.arange(N_T)
|
|
58
|
+
Indices = overlap(Ind, len_chunk=L_B, len_sep=O_B)
|
|
59
|
+
|
|
60
|
+
N_B = np.shape(Indices)[1]
|
|
61
|
+
N_P = np.shape(Indices)[0]
|
|
62
|
+
print('Partitioned into blocks of length n_B=' + str(N_B))
|
|
63
|
+
print('Number of partitions retained is n_P=' + str(N_P))
|
|
64
|
+
|
|
65
|
+
# The frequency bins are thus defined:
|
|
66
|
+
Freqs = np.fft.fftfreq(N_B) * F_S # Compute the frequency bins
|
|
67
|
+
Keep_IND = np.where(Freqs >= 0)
|
|
68
|
+
N_B2 = len(Keep_IND[0]) # indexes for positive frequencies
|
|
69
|
+
Freqs_Pos = Freqs[Keep_IND] # positive frequencies
|
|
70
|
+
|
|
71
|
+
# Step 2 : Construct the D_hats in each partition
|
|
72
|
+
D_P_hat_Tens = np.zeros((N_S, N_B, N_P))
|
|
73
|
+
print('Computing DFTs in each partition')
|
|
74
|
+
for k in tqdm(range(0, N_P)): # Loop over the partitions
|
|
75
|
+
D_p = D[:, Indices[k]] # Take the portion of data
|
|
76
|
+
D_P_hat_Tens[:, :, k] = np.fft.fft(D_p, N_B, 1)
|
|
77
|
+
|
|
78
|
+
# This would be the mean over the frequencies
|
|
79
|
+
# D_hat_Mean=np.mean(D_P_hat_Tens,axis=1)
|
|
80
|
+
|
|
81
|
+
# Initialize the outputs
|
|
82
|
+
Sigma_SP = np.zeros((n_Modes, N_B2))
|
|
83
|
+
Phi_SP = np.zeros((N_S, n_Modes, N_B2))
|
|
84
|
+
|
|
85
|
+
# Step 3: Loop over frequencies to build the modes.
|
|
86
|
+
# Note: you only care about half of these frequencies.
|
|
87
|
+
# This is why you loop over N_B2, not N_B
|
|
88
|
+
print('Computing POD for each frequency')
|
|
89
|
+
for j in tqdm(range(0, N_B2)):
|
|
90
|
+
# Get D_hat of the chunk
|
|
91
|
+
D_hat_f = D_P_hat_Tens[:, j, :]
|
|
92
|
+
# Go for the SVD
|
|
93
|
+
|
|
94
|
+
U,V,Sigma=switch_svds(D_hat_f,n_Modes,svd_solver=possible_svds)
|
|
95
|
+
|
|
96
|
+
Phi_SP[:, :, j] = U
|
|
97
|
+
Sigma_SP[:, j] = Sigma / (N_S * N_B)
|
|
98
|
+
|
|
99
|
+
if SAVE_SPOD:
|
|
100
|
+
folder_dir = FOLDER_OUT + '/SPOD_T'
|
|
101
|
+
os.makedirs(folder_dir, exist_ok=True)
|
|
102
|
+
np.savez(folder_dir + '/spod_t.npz', Phi=Phi_SP, Sigma=Sigma_SP, Freqs=Freqs_Pos)
|
|
103
|
+
|
|
104
|
+
return Phi_SP, Sigma_SP, Freqs_Pos
|