modulo-vki 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modulo_vki/__init__.py ADDED
@@ -0,0 +1,23 @@
1
+
2
+ #from ._version import get_versions
3
+ #__version__ = get_versions()['version']
4
+ #del get_versions
5
+
6
+
7
+ # from .utils.read_db import *
8
+ # from .utils._utils import *
9
+ # from .utils._plots import *
10
+ # from .utils.others import *
11
+
12
+ # from .core._k_matrix import *
13
+ # from .core._dft import *
14
+ # from .core._dmd_s import *
15
+ # from .core._k_matrix import *
16
+ # from .core._mpod_time import *
17
+ # from .core._mpod_space import *
18
+ # from .core._pod_time import *
19
+ # from .core._pod_space import *
20
+ # from .core._spod_s import *
21
+ # from .core._spod_t import *
22
+
23
+ from modulo_vki.modulo import ModuloVKI
@@ -0,0 +1,9 @@
1
+ from ._dft import *
2
+ from ._dmd_s import *
3
+ from ._k_matrix import *
4
+ from ._mpod_space import *
5
+ from ._mpod_time import *
6
+ from ._pod_space import *
7
+ from ._pod_time import *
8
+ from ._spod_s import *
9
+ from ._spod_t import *
@@ -0,0 +1,61 @@
1
+ import os
2
+
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+
6
+
7
+ def dft_fit(N_T, F_S, D, FOLDER_OUT, SAVE_DFT=False):
8
+ """
9
+ This function computes the DFT form the dataset D.
10
+ Currently, this does not handle the memory saving feature.
11
+
12
+ :param N_T: int.
13
+ number of snapshots
14
+ :param F_S:
15
+ Sampling frequency (in Hz)
16
+ :param D:
17
+ Snapshot matrix
18
+ :param FOLDER_OUT:
19
+ Folder in which the results are saved if SAVE_SPATIAL_POD = True
20
+ :param SAVE_DFT:
21
+ If True, results are saved on disk and released from memory
22
+
23
+ :return: Sorted_Freqs, np.array
24
+ Frequency bins, in Hz.
25
+ :return: Phi_F, np.array
26
+ (Complex) Spatial structures for each mode
27
+ :return: SIGMA_F, np.array
28
+ (real) amplitude of each modes
29
+
30
+ """
31
+ n_t = int(N_T)
32
+ Freqs = np.fft.fftfreq(n_t) * F_S # Compute the frequency bins
33
+ # PSI_F = np.conj(np.fft.fft(np.eye(n_t)) / np.sqrt(n_t)) # Prepare the Fourier Matrix.
34
+
35
+ # Method 1 (didactic!)
36
+ # PHI_SIGMA = np.dot(D, np.conj(PSI_F)) # This is PHI * SIGMA
37
+
38
+ # Method 2
39
+ PHI_SIGMA = (np.fft.fft(D, n_t, 1)) / (n_t ** 0.5)
40
+
41
+ PHI_F = np.zeros((D.shape[0], n_t), dtype=complex) # Initialize the PHI_F MATRIX
42
+ SIGMA_F = np.zeros(n_t) # Initialize the SIGMA_F MATRIX
43
+
44
+ # Now we proceed with the normalization. This is also intense so we time it
45
+ for r in tqdm(range(0, n_t)): # Loop over the PHI_SIGMA to normalize
46
+ # MEX = 'Proj ' + str(r + 1) + ' /' + str(n_t)
47
+ # print(MEX)
48
+ SIGMA_F[r] = abs(np.vdot(PHI_SIGMA[:, r], PHI_SIGMA[:, r])) ** 0.5
49
+ PHI_F[:, r] = PHI_SIGMA[:, r] / SIGMA_F[r]
50
+
51
+ Indices = np.flipud(np.argsort(SIGMA_F)) # find indices for sorting in decreasing order
52
+ Sorted_Sigmas = SIGMA_F[Indices] # Sort all the sigmas
53
+ Sorted_Freqs = Freqs[Indices] # Sort all the frequencies accordingly.
54
+ Phi_F = PHI_F[:, Indices] # Sorted Spatial Structures Matrix
55
+ SIGMA_F = Sorted_Sigmas # Sorted Amplitude Matrix (vector)
56
+
57
+ if SAVE_DFT:
58
+ os.makedirs(FOLDER_OUT + 'DFT', exist_ok=True)
59
+ np.savez(FOLDER_OUT + 'DFT/dft_fitted', Freqs=Sorted_Freqs, Phis=Phi_F, Sigmas=SIGMA_F)
60
+
61
+ return Sorted_Freqs, Phi_F, SIGMA_F
@@ -0,0 +1,72 @@
1
+ import os
2
+ import numpy as np
3
+ from numpy import linalg as LA
4
+ from ..utils._utils import switch_svds
5
+
6
+
7
+ def dmd_s(D_1, D_2, n_Modes, F_S,
8
+ SAVE_T_DMD=False,
9
+ FOLDER_OUT='./',
10
+ svd_solver: str = 'svd_sklearn_truncated'):
11
+ """
12
+ This method computes the Dynamic Mode Decomposition (DMD) using hte PIP algorithm from Penland.
13
+
14
+ :param D_1: np.array
15
+ First portion of the data, i.e. D[:,0:n_t-1]
16
+ :param D_2: np.array
17
+ Second portion of the data, i.e. D[:,1:n_t]
18
+ :param Phi_P, Psi_P, Sigma_P: np.arrays
19
+ POD decomposition of D1
20
+ :param F_S: float
21
+ Sampling frequency in Hz
22
+ :param FOLDER_OUT: str
23
+ Folder in which the results will be saved (if SAVE_T_DMD=True)
24
+ :param K: np.array
25
+ Temporal correlation matrix
26
+ :param SAVE_T_POD: bool
27
+ A flag deciding whether the results are saved on disk or not. If the MEMORY_SAVING feature is active, it is switched True by default.
28
+ :param n_Modes: int
29
+ number of modes that will be computed
30
+ :param svd_solver: str,
31
+ svd solver to be used
32
+
33
+
34
+ :return1 Phi_D: np.array.
35
+ DMD's complex spatial structures
36
+ :return2 Lambda_D: np.array.
37
+ DMD Eigenvalues (of the reduced propagator)
38
+ :return3 freqs: np.array.
39
+ Frequencies (in Hz, associated to the DMD modes)
40
+ :return4 a0s: np.array.
41
+ Initial Coefficients of the Modes
42
+ """
43
+
44
+ Phi_P, Psi_P, Sigma_P = switch_svds(D_1, n_Modes, svd_solver)
45
+ print('SVD of D1 rdy')
46
+ Sigma_inv = np.diag(1 / Sigma_P)
47
+ dt = 1 / F_S
48
+ # %% Step 3: Compute approximated propagator
49
+ P_A = LA.multi_dot([np.transpose(Phi_P), D_2, Psi_P, Sigma_inv])
50
+ print('reduced propagator rdy')
51
+
52
+ # %% Step 4: Compute eigenvalues of the system
53
+ Lambda, Q = LA.eig(P_A) # not necessarily symmetric def pos! Avoid eigsh, eigh
54
+ freqs = np.imag(np.log(Lambda)) / (2 * np.pi * dt)
55
+ print(' lambdas and freqs rdy')
56
+
57
+ # %% Step 5: Spatial structures of the DMD in the PIP style
58
+ Phi_D = LA.multi_dot([D_2, Psi_P, Sigma_inv, Q])
59
+ print('Phi_D rdy')
60
+
61
+ # %% Step 6: Compute the initial coefficients
62
+ # a0s=LA.lstsq(Phi_D, D_1[:,0],rcond=None)
63
+ a0s = LA.pinv(Phi_D).dot(D_1[:, 0])
64
+ print('Sigma_D rdy')
65
+
66
+ if SAVE_T_DMD:
67
+ os.makedirs(FOLDER_OUT + "/DMD/", exist_ok=True)
68
+ print("Saving DMD results")
69
+ np.savez(FOLDER_OUT + '/DMD/dmd_decomposition',
70
+ Phi_D=Phi_D, Lambda=Lambda, freqs=freqs, a0s=a0s)
71
+
72
+ return Phi_D, Lambda, freqs, a0s
@@ -0,0 +1,81 @@
1
+ import os
2
+ from tqdm import tqdm
3
+ import numpy as np
4
+ import math
5
+
6
+
7
+ def CorrelationMatrix(N_T, N_PARTITIONS=1, MEMORY_SAVING=False, FOLDER_OUT='./', SAVE_K=False, D=None,weights = np.array([])):
8
+ """
9
+ This method computes the temporal correlation matrix, given a data matrix as input. It's possible to use memory saving
10
+ then splitting the computing in different tranches if computationally heavy. If D has been computed using MODULO
11
+ then the dimension dim_col and N_PARTITIONS is automatically loaded
12
+
13
+ :param N_T: int. Number of temporal snapshots
14
+ :param D: np.array. Data matrix
15
+ :param SAVE_K: bool. If SAVE_K=True, the matrix K is saved on disk. If the MEMORY_SAVING feature is active, this is done by default.
16
+ :param MEMORY_SAVING: bool. If MEMORY_SAVING = True, the computation of the correlation matrix is done by steps. It requires the data matrix to be partitioned, following algorithm in MODULO._data_processing.
17
+ :param FOLDER_OUT: str. Folder in which the temporal correlation matrix will be stored
18
+ :param N_PARTITIONS: int. Number of partitions to be read in computing the correlation matrix. If _data_processing is used to partition the data matrix, this is inherited from the main class
19
+ :param weights: weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid. Only needed if grid is non-uniform & MEMORY_SAVING== True
20
+ :return: K (: np.array) if the memory saving is not active. None type otherwise.
21
+ """
22
+
23
+ if not MEMORY_SAVING:
24
+ print("\n Computing Temporal correlation matrix K ...")
25
+ K = np.dot(D.T, D)
26
+ print("\n Done.")
27
+
28
+ else:
29
+ SAVE_K = True
30
+ print("\n Using Memory Saving feature...")
31
+ K = np.zeros((N_T, N_T))
32
+ dim_col = math.floor(N_T / N_PARTITIONS)
33
+
34
+ if N_T % N_PARTITIONS != 0:
35
+ tot_blocks_col = N_PARTITIONS + 1
36
+ else:
37
+ tot_blocks_col = N_PARTITIONS
38
+
39
+ for k in tqdm(range(tot_blocks_col)):
40
+
41
+ di = np.load(FOLDER_OUT + f"/data_partitions/di_{k + 1}.npz")['di']
42
+ if weights.size != 0:
43
+ di = np.transpose(np.transpose(di) * np.sqrt(weights))
44
+
45
+ ind_start = k * dim_col
46
+ ind_end = ind_start + dim_col
47
+
48
+ if (k == tot_blocks_col - 1) and (N_T - dim_col * N_PARTITIONS > 0):
49
+ dim_col = N_T - dim_col * N_PARTITIONS
50
+ ind_end = ind_start + dim_col
51
+
52
+ K[ind_start:ind_end, ind_start:ind_end] = np.dot(di.transpose(), di)
53
+
54
+ block = k + 2
55
+
56
+ while block <= tot_blocks_col:
57
+ dj = np.load(FOLDER_OUT + f"/data_partitions/di_{block}.npz")['di']
58
+ if weights.size != 0:
59
+ dj = np.transpose(np.transpose(dj) * np.sqrt(weights))
60
+
61
+ ind_start_out = (block - 1) * dim_col
62
+ ind_end_out = ind_start_out + dim_col
63
+
64
+ if (block == tot_blocks_col) and (N_T - dim_col * N_PARTITIONS > 0):
65
+ dim_col = N_T - dim_col * N_PARTITIONS
66
+ ind_end_out = ind_start_out + dim_col
67
+ dj = dj[:, :dim_col]
68
+
69
+ K[ind_start:ind_end, ind_start_out:ind_end_out] = np.dot(di.T, dj)
70
+
71
+ K[ind_start_out:ind_end_out, ind_start:ind_end] = K[ind_start:ind_end, ind_start_out:ind_end_out].T
72
+
73
+ block += 1
74
+
75
+ dim_col = math.floor(N_T / N_PARTITIONS)
76
+
77
+ if SAVE_K:
78
+ os.makedirs(FOLDER_OUT + '/correlation_matrix', exist_ok=True)
79
+ np.savez(FOLDER_OUT + "/correlation_matrix/k_matrix", K=K)
80
+
81
+ return K if not MEMORY_SAVING else None
@@ -0,0 +1,180 @@
1
+ import numpy as np
2
+ import os
3
+ from tqdm import tqdm
4
+ import math
5
+
6
+
7
+ def spatial_basis_mPOD(D, PSI_M, N_T, N_PARTITIONS, N_S, MEMORY_SAVING, FOLDER_OUT, SAVE: bool = False,weights: np.array = np.array([])):
8
+ """
9
+ Given the temporal basis of the mPOD now the spatial ones are computed
10
+
11
+ :param D:
12
+ Snapshot matrix D: if memory savig is active, this is ignored.
13
+ :param PSI_M: np.array.:
14
+ The mPOD temporal basis Psi tentatively assembled from all scales
15
+ :param N_T: int.
16
+ Number of snapshots
17
+ :param N_PARTITIONS: int.
18
+ Number of partitions in the memory saving
19
+ :param N_S: int.
20
+ Number of grid points in space
21
+ :param MEMORY_SAVING: bool.
22
+ Inherited from main class, if True turns on the MEMORY_SAVING feature, loading the partitions and starting the proper algorithm
23
+ :param FOLDER_OUT: str.
24
+ Folder in which the results are saved if SAVE_SPATIAL_POD = True
25
+ :param SAVE_SPATIAL_POD: bool.
26
+ If True, results are saved on disk and released from memory
27
+ :param weights: np.array
28
+ weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid. Only needed if grid is non-uniform & MEMORY_SAVING== True
29
+ :return: Phi_M, Psi_M, Sigma_M: np.arrays. The final (sorted) mPOD decomposition
30
+ """
31
+
32
+ R1 = 0; R2 = 0
33
+ if MEMORY_SAVING:
34
+ SAVE = True
35
+ os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
36
+ dim_col = math.floor(N_T / N_PARTITIONS)
37
+ dim_row = math.floor(N_S / N_PARTITIONS)
38
+ dr = np.zeros((dim_row, N_T))
39
+
40
+ # 1 --- Converting partitions dC to dR
41
+ if N_S % N_PARTITIONS != 0:
42
+ tot_blocks_row = N_PARTITIONS + 1
43
+ else:
44
+ tot_blocks_row = N_PARTITIONS
45
+
46
+ if N_T % N_PARTITIONS != 0:
47
+ tot_blocks_col = N_PARTITIONS + 1
48
+ else:
49
+ tot_blocks_col = N_PARTITIONS
50
+
51
+ fixed = 0
52
+
53
+ for i in range(1, tot_blocks_row + 1):
54
+ # --- Check if dim_row has to be fixed:
55
+ if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
56
+ dim_row_fix = N_S - dim_row * N_PARTITIONS
57
+ dr = np.zeros((dim_row_fix, N_T))
58
+
59
+ for cont in range(1, tot_blocks_col + 1):
60
+ di = np.load(FOLDER_OUT + f"/data_partitions/di_{cont}.npz")['di']
61
+
62
+ if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0) and fixed == 0:
63
+ R1 = R2
64
+ R2 = R1 + (N_S - dim_row * N_PARTITIONS)
65
+ fixed = 1
66
+ elif fixed == 0:
67
+ R1 = (i - 1) * dim_row
68
+ R2 = i * dim_row
69
+
70
+ # Same as before, but we don't need the variable fixed because if
71
+ # % the code runs this loop, it will be the last time
72
+
73
+ if cont == tot_blocks_col and (N_T - dim_col * N_PARTITIONS > 0):
74
+ C1 = C2
75
+ C2 = C1 + (N_T - dim_col * N_PARTITIONS)
76
+ else:
77
+ C1 = (cont - 1) * dim_col
78
+ C2 = cont * dim_col
79
+
80
+ dr[:, C1:C2] = di[R1:R2, :]
81
+
82
+ # 2 --- Computing partitions R of PHI_SIGMA
83
+ PHI_SIGMA_BLOCK = dr @ PSI_M
84
+ np.savez(FOLDER_OUT + f'/mPOD/phi_sigma_{i}', PHI_SIGMA_BLOCK)
85
+
86
+ # 3 --- Convert partitions R to partitions C and get SIGMA
87
+ R = PSI_M.shape[1]
88
+ dim_col = math.floor(R / N_PARTITIONS)
89
+ dim_row = math.floor(N_S / N_PARTITIONS)
90
+ dps = np.zeros((N_S, dim_col))
91
+ SIGMA_M = []
92
+ PHI_M = []
93
+
94
+ if R % N_PARTITIONS != 0:
95
+ tot_blocks_col = N_PARTITIONS + 1
96
+ else:
97
+ tot_blocks_col = N_PARTITIONS
98
+
99
+ fixed = 0
100
+
101
+ # Here we apply the same logic of the loop before
102
+
103
+ for j in range(1, tot_blocks_col + 1):
104
+
105
+ if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0):
106
+ dim_col_fix = R - dim_col * N_PARTITIONS
107
+ dps = np.zeros((N_S, dim_col_fix))
108
+
109
+ for k in range(1, tot_blocks_row + 1):
110
+ PHI_SIGMA_BLOCK = np.load(FOLDER_OUT + f"/mPOD/phi_sigma_{k}.npz")['arr_0']
111
+
112
+ if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0) and fixed == 0:
113
+ R1 = R2
114
+ R2 = R1 + (R - dim_col * N_PARTITIONS)
115
+ fixed = 1
116
+ elif fixed == 0:
117
+ R1 = (j - 1) * dim_col
118
+ R2 = j * dim_col
119
+
120
+ if k == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
121
+ C1 = C2
122
+ C2 = C1 + (N_S - dim_row * N_PARTITIONS)
123
+ else:
124
+ C1 = (k - 1) * dim_row
125
+ C2 = k * dim_row
126
+
127
+ dps[C1:C2, :] = PHI_SIGMA_BLOCK[:, R1:R2]
128
+
129
+ # Getting sigmas and phis
130
+ for z in range(R1, R2):
131
+ zz = z - R1
132
+ if weights.size == 0:
133
+ SIGMA_M.append(np.linalg.norm(dps[:, zz]))
134
+ else:
135
+ SIGMA_M.append(np.linalg.norm(dps[:, zz]*np.sqrt(weights)))
136
+ tmp = dps[:, zz] / SIGMA_M[z]
137
+ #print(f'Shape tmp = {np.shape(tmp)}')
138
+ PHI_M.append(tmp)
139
+ np.savez(FOLDER_OUT + f'/mPOD/phi_{z + 1}', tmp)
140
+
141
+ Indices = np.argsort(SIGMA_M)[::-1] # find indices for sorting in decreasing order
142
+ SIGMA_M = np.asarray(SIGMA_M)
143
+ PHI_M = np.asarray(PHI_M).T
144
+ PSI_M = np.asarray(PSI_M)
145
+ Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
146
+ Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
147
+ Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
148
+ Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
149
+
150
+ else:
151
+ R = PSI_M.shape[1]
152
+ PHI_M_SIGMA_M = np.dot(D, (PSI_M))
153
+ # Initialize the output
154
+ PHI_M = np.zeros((N_S, R))
155
+ SIGMA_M = np.zeros((R))
156
+
157
+ for i in tqdm(range(0, R)):
158
+ # print('Completing mPOD Mode ' + str(i))
159
+ # Assign the norm as amplitude
160
+ if weights.size == 0:
161
+ SIGMA_M[i] = np.linalg.norm(PHI_M_SIGMA_M[:, i])
162
+ else:
163
+ SIGMA_M[i] = np.linalg.norm(PHI_M_SIGMA_M[:, i]*np.sqrt(weights))
164
+ # Normalize the columns of C to get spatial modes
165
+ PHI_M[:, i] = PHI_M_SIGMA_M[:, i] / SIGMA_M[i]
166
+
167
+ Indices = np.flipud(np.argsort(SIGMA_M)) # find indices for sorting in decreasing order
168
+ Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
169
+ Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
170
+ Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
171
+ Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
172
+
173
+ if SAVE:
174
+ '''Saving results in MODULO tmp proper folder'''
175
+ os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
176
+ np.savez(FOLDER_OUT + "/mPOD/sorted_phis", Phi_M)
177
+ np.savez(FOLDER_OUT + "/mPOD/sorted_psis", Psi_M)
178
+ np.savez(FOLDER_OUT + "/mPOD/sorted_sigma", Sorted_Sigmas)
179
+
180
+ return Phi_M, Psi_M, Sigma_M
@@ -0,0 +1,154 @@
1
+ import os
2
+ import numpy as np
3
+ from scipy.signal import firwin # To create FIR kernels
4
+ from tqdm import tqdm
5
+ from modulo_vki.utils._utils import conv_m, switch_eigs
6
+
7
+
8
+ def temporal_basis_mPOD(K, Nf, Ex, F_V, Keep, boundaries, MODE='reduced', dt=1,FOLDER_OUT: str = "./", MEMORY_SAVING: bool = False,SAT: int = 100,n_Modes=10, eig_solver: str = 'svd_sklearn_randomized'):
9
+ '''
10
+ This function computes the PSIs for the mPOD. In this implementation, a "dft-trick" is proposed, in order to avoid
11
+ expansive SVDs. Randomized SVD is used by default for the diagonalization.
12
+
13
+ :param K:
14
+ np.array Temporal correlation matrix
15
+ :param dt: float.
16
+ 1/fs, the dt between snapshots. Units in seconds.
17
+ :param Nf:
18
+ np.array. Vector collecting the order of the FIR filters used in each scale.
19
+ :param Ex: int.
20
+ Extension at the boundaries of K to impose the boundary conditions (see boundaries). It must be at least as Nf.
21
+ :param F_V: np.array.
22
+ Frequency splitting vector, containing the frequencies of each scale (see article). If the time axis is in seconds, these frequencies are in Hz.
23
+ :param Keep: np.array.
24
+ Vector defining which scale to keep.
25
+ :param boundaries: str -> {'nearest', 'reflect', 'wrap' or 'extrap'}.
26
+ In order to avoid 'edge effects' if the time correlation matrix is not periodic, several boundary conditions can be used. Options are (from scipy.ndimage.convolve):
27
+ ‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
28
+ ‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
29
+ ‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
30
+ :param MODE: tr -> {‘reduced’, ‘complete’, ‘r’, ‘raw’}
31
+ As a final step of this algorithm, the orthogonality is imposed via a QR-factorization. This parameterd define how to perform such factorization, according to numpy.
32
+ Options: this is a wrapper to np.linalg.qr(_, mode=MODE). Check numpy's documentation.
33
+ if ‘reduced’ The final basis will not necessarely be full. If ‘complete’ The final basis will always be full
34
+ :param FOLDER_OUT: str.
35
+ This is the directory where intermediate results will be stored if the memory saving is active.It will be ignored if MEMORY_SAVING=False.
36
+ :param MEMORY_SAVING: Bool.
37
+ If memory saving is active, the results will be saved locally. Nevertheless, since Psi_M is usually not expensive, it will be returned.
38
+ :param SAT: int.
39
+ Maximum number of modes per scale. The user can decide how many modes to compute; otherwise, modulo set the default SAT=100.
40
+ :param n_Modes: int.
41
+ Total number of modes that will be finally exported
42
+ :param eig_solver: str.
43
+ This is the eigenvalue solver that will be used. Refer to eigs_swith for the options.
44
+ :return PSI_M: np.array.
45
+ The mPOD PSIs. Yet to be sorted !
46
+ '''
47
+
48
+ if Ex < np.max(Nf):
49
+ raise RuntimeError("For the mPOD temporal basis computation Ex must be larger than or equal to Nf")
50
+
51
+ #Converting F_V in radiants and initialise number of scales M
52
+ Fs = 1 / dt
53
+ F_Bank_r = F_V * 2 / Fs
54
+ M = len(F_Bank_r)
55
+
56
+ # Loop over the scales to show the transfer functions
57
+ Psi_M = np.array([])
58
+ Lambda_M = np.array([])
59
+ n_t = K.shape[1]
60
+
61
+ # if K_S:
62
+ # Ks = np.zeros((n_t, n_t, M + 1))
63
+
64
+ #DFT-trick below: computing frequency bins.
65
+ Freqs = np.fft.fftfreq(n_t) * Fs
66
+
67
+ print("Filtering and Diagonalizing H scale: \n")
68
+
69
+ #Filtering and computing eigenvectors
70
+
71
+ for m in tqdm(range(0, M)):
72
+ # Generate the 1d filter for this
73
+ if m < 1:
74
+ if Keep[m] == 1:
75
+ # Low Pass Filter
76
+ h_A = firwin(Nf[m], F_Bank_r[m], window='hamming')
77
+ # Filter K_LP
78
+ print('\n Filtering Largest Scale')
79
+ K_L = conv_m(K=K, h=h_A, Ex=Ex, boundaries=boundaries)
80
+ # R_K = np.linalg.matrix_rank(K_L, tol=None, hermitian=True)
81
+ '''We replace it with an estimation based on the non-zero freqs the cut off frequency of the scale is '''
82
+ F_CUT = F_Bank_r[m] * Fs / 2
83
+ Indices = np.argwhere(np.abs(Freqs) < F_CUT)
84
+ R_K = np.min([len(Indices), SAT])
85
+ print(str(len(Indices)) + ' Modes Estimated')
86
+ print('\n Diagonalizing Largest Scale')
87
+ Psi_P, Lambda_P = switch_eigs(K_L, R_K, eig_solver) #svds_RND(K_L, R_K)
88
+ Psi_M=Psi_P; Lambda_M=Lambda_P
89
+ else:
90
+ print('\n Scale '+str(m)+' jumped (keep['+str(m)+']=0)')
91
+ # if K_S:
92
+ # Ks[:, :, m] = K_L # First large scale
93
+
94
+ # method = signal.choose_conv_method(K, h2d, mode='same')
95
+ elif m > 0 and m < M - 1:
96
+ if Keep[m] == 1:
97
+ # print(m)
98
+ print('\n Working on Scale '+str(m)+'/'+str(M))
99
+ # This is the 1d Kernel for Band pass
100
+ h1d_H = firwin(Nf[m], [F_Bank_r[m], F_Bank_r[m + 1]], pass_zero=False) # Band-pass
101
+ F_CUT1 = F_Bank_r[m] * Fs / 2
102
+ F_CUT2 = F_Bank_r[m + 1] * Fs / 2
103
+ Indices = np.argwhere((np.abs(Freqs) > F_CUT1) & (np.abs(Freqs) < F_CUT2))
104
+ R_K = np.min([len(Indices), SAT]) # number of frequencies here
105
+ print(str(len(Indices)) + ' Modes Estimated')
106
+ # print('Filtering H Scale ' + str(m + 1) + '/' + str(M))
107
+ K_H = conv_m(K, h1d_H, Ex, boundaries)
108
+ # Ks[:, :, m + 1] = K_H # Intermediate band-pass
109
+ print('Diagonalizing H Scale ' + str(m + 1) + '/' + str(M))
110
+ # R_K = np.linalg.matrix_rank(K_H, tol=None, hermitian=True)
111
+ Psi_P, Lambda_P = switch_eigs(K_H, R_K, eig_solver) #svds_RND(K_H, R_K) # Diagonalize scale
112
+ if np.shape(Psi_M)[0]==0: # if this is the first contribute to the basis
113
+ Psi_M=Psi_P; Lambda_M=Lambda_P
114
+ else:
115
+ Psi_M = np.concatenate((Psi_M, Psi_P), axis=1) # append to the previous
116
+ Lambda_M = np.concatenate((Lambda_M, Lambda_P), axis=0)
117
+ else:
118
+ print('\n Scale '+str(m)+' jumped (keep['+str(m)+']=0)')
119
+
120
+ else: # this is the case m=M: this is a high pass
121
+ if Keep[m] == 1:
122
+ print('Working on Scale '+str(m)+'/'+str(M))
123
+ # This is the 1d Kernel for High Pass (last scale)
124
+ h1d_H = firwin(Nf[m], F_Bank_r[m], pass_zero=False)
125
+ F_CUT1 = F_Bank_r[m] * Fs / 2
126
+ Indices = np.argwhere((np.abs(Freqs) > F_CUT1))
127
+ R_K = len(Indices)
128
+ R_K = np.min([len(Indices), SAT]) # number of frequencies here
129
+ print(str(len(Indices)) + ' Modes Estimated')
130
+ print('Filtering H Scale ' + str(m + 1) + '/ ' + str(M))
131
+ K_H = conv_m(K, h1d_H, Ex, boundaries)
132
+ # Ks[:, :, m + 1] = K_H # Last (high pass) scale
133
+ print('Diagonalizing H Scale ' + str(m + 1) + '/ ' + str(M))
134
+ # R_K = np.linalg.matrix_rank(K_H, tol=None, hermitian=True)
135
+ Psi_P, Lambda_P = switch_eigs(K_H, R_K, eig_solver) #svds_RND(K_H, R_K) # Diagonalize scale
136
+ Psi_M = np.concatenate((Psi_M, Psi_P), axis=1) # append to the previous
137
+ Lambda_M = np.concatenate((Lambda_M, Lambda_P), axis=0)
138
+ else:
139
+ print('\n Scale '+str(m)+' jumped (keep['+str(m)+']=0)')
140
+
141
+ # Now Order the Scales
142
+ Indices = np.flip(np.argsort(Lambda_M)) # find indices for sorting in decreasing order
143
+ Psi_M = Psi_M[:, Indices] # Sort the temporal structures
144
+ #print(f"Size psis in mpodtime = {np.shape(Psi_M)}")
145
+ # Now we complete the basis via re-orghotonalization
146
+ print('\n QR Polishing...')
147
+ PSI_M, R = np.linalg.qr(Psi_M, mode=MODE)
148
+ print('Done!')
149
+
150
+ if MEMORY_SAVING:
151
+ os.makedirs(FOLDER_OUT + '/mPOD', exist_ok=True)
152
+ np.savez(FOLDER_OUT + '/mPOD/Psis', Psis=PSI_M)
153
+
154
+ return PSI_M[:,0:n_Modes]