modulo-vki 2.0.7__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modulo_vki/modulo.py CHANGED
@@ -1,48 +1,45 @@
1
- # Functional ones:
2
1
  import os
3
2
  import numpy as np
4
- from scipy import linalg
5
- from sklearn.metrics.pairwise import pairwise_kernels
6
- # To have fancy loading bar
3
+ from numpy import linalg as LA
7
4
  from tqdm import tqdm
8
5
 
9
- # All the functions from the modulo package
10
- from modulo_vki.core._dft import dft_fit
6
+ from modulo_vki.core._k_matrix import CorrelationMatrix, spectral_filter, kernelized_K
7
+ from modulo_vki.core.temporal_structures import dft, temporal_basis_mPOD, Temporal_basis_POD
8
+ from modulo_vki.core.spatial_structures import Spatial_basis_POD, spatial_basis_mPOD
11
9
  from modulo_vki.core._dmd_s import dmd_s
12
- from modulo_vki.core._k_matrix import CorrelationMatrix
13
- from modulo_vki.core._mpod_space import spatial_basis_mPOD
14
- from modulo_vki.core._mpod_time import temporal_basis_mPOD
15
- from modulo_vki.core._pod_space import Spatial_basis_POD
16
- from modulo_vki.core._pod_time import Temporal_basis_POD
17
- from modulo_vki.core._spod_s import compute_SPOD_s
18
- from modulo_vki.core._spod_t import compute_SPOD_t
19
- from modulo_vki.utils._utils import switch_svds
20
10
 
21
- from modulo_vki.utils.read_db import ReadData
11
+ from modulo_vki.core.utils import segment_and_fft, pod_from_dhat, apply_weights, switch_svds
12
+ from sklearn.metrics.pairwise import pairwise_kernels
13
+
22
14
 
23
15
  class ModuloVKI:
24
16
  """
25
- MODULO (MODal mULtiscale pOd) is a software developed at the von Karman Institute to perform Multiscale
26
- Modal Analysis of numerical and experimental data using the Multiscale Proper Orthogonal Decomposition (mPOD).
27
-
28
- Theoretical foundation can be found at:
29
- https://arxiv.org/abs/1804.09646
30
-
31
- Presentation of the MODULO framework available here:
32
- https://arxiv.org/pdf/2004.12123.pdf
33
-
34
- YouTube channel with hands-on tutorials can be found at:
35
- https://youtube.com/playlist?list=PLEJZLD0-4PeKW6Ze984q08bNz28GTntkR
36
-
37
- All the codes so far assume that the dataset is equally spaced both in space (i.e. along a Cartesian grid)
38
- and in time. The extension to non-uniformly sampled data will be included in future releases.
39
-
40
-
17
+ MODULO (MODal mULtiscale pOd) is a software developed at the von Karman Institute
18
+ to perform Multiscale Modal Analysis using Multiscale Proper Orthogonal Decomposition (mPOD)
19
+ on numerical and experimental data.
20
+
21
+ References
22
+ ----------
23
+ - Theoretical foundation:
24
+ https://arxiv.org/abs/1804.09646
25
+
26
+ - MODULO framework presentation:
27
+ https://arxiv.org/pdf/2004.12123.pdf
28
+
29
+ - Hands-on tutorial videos:
30
+ https://youtube.com/playlist?list=PLEJZLD0-4PeKW6Ze984q08bNz28GTntkR
31
+
32
+ Notes
33
+ -----
34
+ MODULO operations assume the dataset is uniformly spaced in both space
35
+ (Cartesian grid) and time. For non-cartesian grids, the user must
36
+ provide a weights vector `[w_1, w_2, ..., w_Ns]` where `w_i = area_cell_i / area_grid`.
41
37
  """
42
38
 
43
- def __init__(self, data: np.array,
39
+ def __init__(self,
40
+ data: np.ndarray,
44
41
  N_PARTITIONS: int = 1,
45
- FOLDER_OUT='./',
42
+ FOLDER_OUT: str = './',
46
43
  SAVE_K: bool = False,
47
44
  N_T: int = 100,
48
45
  N_S: int = 200,
@@ -50,42 +47,50 @@ class ModuloVKI:
50
47
  dtype: str = 'float32',
51
48
  eig_solver: str = 'eigh',
52
49
  svd_solver: str = 'svd_sklearn_truncated',
53
- weights: np.array = np.array([])):
50
+ weights: np.ndarray = np.array([])):
54
51
  """
55
- This function initializes the main parameters needed by MODULO.
56
-
57
- Attributes:
58
-
59
- :param data: This is the data matrix to factorize. It is a np.array with
60
- shape ((N_S, N_T)). If the data has not yet been prepared in the form of a np.array,
61
- the method ReadData in MODULO can be used (see ReadData). If the memory saving is active (N_PARTITIONS >1), the folder with partitions should be prepared.
62
- If the memory saving is active, this entry = None. The data matrix is assumed to big to be saved and the
52
+ Initialize the MODULO analysis.
63
53
 
64
- :param N_PARTITIONS: If memory saving feature is active, this parameter sets the number of partitions
65
- that will be used to store the data matrices during the computations.
54
+ Parameters
55
+ ----------
56
+ data : np.ndarray
57
+ Data matrix of shape (N_S, N_T) to factorize. If not yet formatted, use the `ReadData`
58
+ method provided by MODULO. When memory saving mode (N_PARTITIONS > 1) is active,
59
+ set this parameter to None and use prepared partitions instead.
66
60
 
67
- :param FOLDER_OUT: Folder in which the output will be stored.The output includes the matrices Phi, Sigma and Psi (optional) and temporary files
68
- used for some of the calculations (e.g.: for memory saving).
61
+ N_PARTITIONS : int, default=1
62
+ Number of partitions used for memory-saving computation. If set greater than 1,
63
+ data must be partitioned in advance and `data` set to None.
69
64
 
70
- :param SAVE_K: A flag deciding if the matrix will be stored in the disk (in FOLDER_OUT/correlation_matrix) or not.
71
- Default option is 'False'.
65
+ FOLDER_OUT : str, default='./'
66
+ Directory path to store output (Phi, Sigma, Psi matrices) and intermediate
67
+ calculation files (e.g., partitions, correlation matrix).
72
68
 
73
- :param N_T: Number of time steps, must be given when N_PARTITIONS >1
69
+ SAVE_K : bool, default=False
70
+ Whether to store the correlation matrix K to disk in
71
+ `FOLDER_OUT/correlation_matrix`.
74
72
 
75
- :param N_S: Number of grid points, must be given when N_PARTITIONS >1
73
+ N_T : int, default=100
74
+ Number of temporal snapshots. Mandatory when using partitions (N_PARTITIONS > 1).
76
75
 
77
- :param n_Modes: Number of Modes to be computed
76
+ N_S : int, default=200
77
+ Number of spatial grid points. Mandatory when using partitions (N_PARTITIONS > 1).
78
78
 
79
- :param dtype: Cast "data" with type dtype
79
+ n_Modes : int, default=10
80
+ Number of modes to compute.
80
81
 
81
- :param eig_solver: Numerical solver to compute the eigen values
82
+ dtype : str, default='float32'
83
+ Data type for casting input data.
82
84
 
83
- :param svd_solver: Numerical solver to compute the Single Value Decomposition
84
-
85
- :param weights: weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid
86
- Only needed if grid is non-uniform.
85
+ eig_solver : str, default='eigh'
86
+ Solver for eigenvalue decomposition.
87
87
 
88
+ svd_solver : str, default='svd_sklearn_truncated'
89
+ Solver for Singular Value Decomposition (SVD).
88
90
 
91
+ weights : np.ndarray, default=np.array([])
92
+ Weights vector `[w_1, w_2, ..., w_Ns]` to account for non-uniform spatial grids.
93
+ Defined as `w_i = area_cell_i / area_grid`. Leave empty for uniform grids.
89
94
  """
90
95
 
91
96
  print("MODULO (MODal mULtiscale pOd) is a software developed at the von Karman Institute to perform "
@@ -95,6 +100,38 @@ class ModuloVKI:
95
100
  raise TypeError(
96
101
  "Please check that your database is in an numpy array format. If D=None, then you must have memory saving (N_PARTITIONS>1)")
97
102
 
103
+ if N_PARTITIONS > 1:
104
+ self.MEMORY_SAVING = True
105
+ else:
106
+ self.MEMORY_SAVING = False
107
+
108
+ # Assign the number of modes
109
+ self.n_Modes = n_Modes
110
+ # If particular needs, override choice for svd and eigen solve
111
+ self.svd_solver = svd_solver.lower()
112
+ self.eig_solver = eig_solver.lower()
113
+ possible_svds = ['svd_numpy', 'svd_scipy_sparse', 'svd_sklearn_randomized', 'svd_sklearn_truncated']
114
+ possible_eigs = ['svd_sklearn_randomized', 'eigsh', 'eigh']
115
+
116
+ if self.svd_solver not in possible_svds:
117
+ raise NotImplementedError("The requested SVD solver is not implemented. Please pick one of the following:"
118
+ "which belongs to: \n {}".format(possible_svds))
119
+
120
+ if self.eig_solver not in possible_eigs:
121
+ raise NotImplementedError("The requested EIG solver is not implemented. Please pick one of the following: "
122
+ " \n {}".format(possible_eigs))
123
+
124
+ # if N_PARTITIONS >= self.N_T:
125
+ # raise AttributeError("The number of requested partitions is greater of the total columns (N_T). Please,"
126
+ # "try again.")
127
+
128
+ self.N_PARTITIONS = N_PARTITIONS
129
+ self.FOLDER_OUT = FOLDER_OUT
130
+ self.SAVE_K = SAVE_K
131
+
132
+ if self.MEMORY_SAVING:
133
+ os.makedirs(self.FOLDER_OUT, exist_ok=True)
134
+
98
135
  # Load the data matrix
99
136
  if isinstance(data, np.ndarray):
100
137
  # Number of points in time and space
@@ -106,8 +143,9 @@ class ModuloVKI:
106
143
  self.D = None # D is never saved when N_partitions >1
107
144
  self.N_S = N_S # so N_S and N_t must be given as parameters of modulo
108
145
  self.N_T = N_T
109
-
110
- # Load and applied the weights to the D matrix
146
+
147
+ '''If the grid is not cartesian, ensure inner product is properly defined using weights.'''
148
+
111
149
  if weights.size != 0:
112
150
  if len(weights) == self.N_S:
113
151
  print("The weights you have input have the size of the columns of D \n"
@@ -129,407 +167,299 @@ class ModuloVKI:
129
167
  else:
130
168
  self.Dstar = None
131
169
  else:
170
+
132
171
  print("Modulo assumes you have a uniform grid. "
133
172
  "If not, please give the weights as parameters of MODULO!")
134
173
  self.weights = weights
135
174
  self.Dstar = self.D
136
-
137
- if N_PARTITIONS > 1:
138
- self.MEMORY_SAVING = True
139
- else:
140
- self.MEMORY_SAVING = False
141
-
142
- # Assign the number of modes
143
- self.n_Modes = n_Modes
144
- # If particular needs, override choice for svd and eigen solve
145
- self.svd_solver = svd_solver.lower()
146
- self.eig_solver = eig_solver.lower()
147
- possible_svds = ['svd_numpy', 'svd_scipy_sparse', 'svd_sklearn_randomized', 'svd_sklearn_truncated']
148
- possible_eigs = ['svd_sklearn_randomized', 'eigsh', 'eigh']
149
-
150
- if self.svd_solver not in possible_svds:
151
- raise NotImplementedError("The requested SVD solver is not implemented. Please pick one of the following:"
152
- "which belongs to: \n {}".format(possible_svds))
153
-
154
- if self.eig_solver not in possible_eigs:
155
- raise NotImplementedError("The requested EIG solver is not implemented. Please pick one of the following: "
156
- " \n {}".format(possible_eigs))
157
-
158
- # if N_PARTITIONS >= self.N_T:
159
- # raise AttributeError("The number of requested partitions is greater of the total columns (N_T). Please,"
160
- # "try again.")
161
-
162
- self.N_PARTITIONS = N_PARTITIONS
163
-
164
- self.FOLDER_OUT = FOLDER_OUT
165
-
166
- self.SAVE_K = SAVE_K
167
-
168
- if self.MEMORY_SAVING:
169
- os.makedirs(self.FOLDER_OUT, exist_ok=True)
170
-
171
- def _temporal_basis_POD(self,
172
- SAVE_T_POD: bool = False):
175
+
176
+
177
+ def DFT(self, F_S, SAVE_DFT=False):
173
178
  """
174
- This method computes the temporal structure for the Proper Orthogonal Decomposition (POD) computation.
175
- The theoretical background of the POD is briefly recalled here:
176
-
177
- https://youtu.be/8fhupzhAR_M
178
-
179
- The diagonalization of K is computed via Singular Value Decomposition (SVD).
180
- A speedup is available if the user is on Linux machine, in which case MODULO
181
- exploits the power of JAX and its Numpy implementation.
182
-
183
- For more on JAX:
184
-
185
- https://github.com/google/jax
186
- https://jax.readthedocs.io/en/latest/jax.numpy.html
187
-
188
- If the user is on a Win machine, Linux OS can be used using
189
- the Windows Subsystem for Linux.
190
-
191
- For more on WSL:
192
- https://docs.microsoft.com/en-us/windows/wsl/install-win10
193
-
194
- :param SAVE_T_POD: bool
195
- Flag deciding if the results will be stored on the disk.
196
- Default value is True, to limit the RAM's usage.
197
- Note that this might cause a minor slowdown for the loading,
198
- but the tradeoff seems worthy.
199
- This attribute is passed to the MODULO class.
200
-
201
-
202
- POD temporal basis are returned if MEMORY_SAVING is not active. Otherwise all the results are saved on disk.
203
-
204
- :return Psi_P: np.array
205
- POD Psis
206
-
207
- :return Sigma_P: np.array
208
- POD Sigmas. If needed, Lambdas can be easily computed recalling that: Sigma_P = np.sqrt(Lambda_P)
179
+ Computes the Discrete Fourier Transform (DFT) of the dataset.
180
+
181
+ For detailed guidance, see the tutorial video:
182
+ https://www.youtube.com/watch?v=8fhupzhAR_M&list=PLEJZLD0-4PeKW6Ze984q08bNz28GTntkR&index=2
183
+
184
+ Parameters
185
+ ----------
186
+ F_S : float
187
+ Sampling frequency in Hz.
188
+
189
+ SAVE_DFT : bool, default=False
190
+ If True, saves the computed DFT outputs to disk under:
191
+ `self.FOLDER_OUT/MODULO_tmp`.
192
+
193
+ Returns
194
+ -------
195
+ Phi_F : np.ndarray
196
+ Spatial DFT modes (spatial structures matrix).
197
+
198
+ Psi_F : np.ndarray
199
+ Temporal DFT modes (temporal structures matrix).
200
+
201
+ Sigma_F : np.ndarray
202
+ Modal amplitudes.
209
203
  """
204
+ if self.D is None:
205
+ D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
206
+ SAVE_DFT = True
207
+ Phi_F, Psi_F, Sigma_F = dft(self.N_T, F_S, D, self.FOLDER_OUT, SAVE_DFT=SAVE_DFT)
210
208
 
211
- if self.MEMORY_SAVING:
212
- K = np.load(self.FOLDER_OUT + "/correlation_matrix/k_matrix.npz")['K']
213
- SAVE_T_POD = True
214
209
  else:
215
- K = self.K
216
-
217
- Psi_P, Sigma_P = Temporal_basis_POD(K, SAVE_T_POD,
218
- self.FOLDER_OUT, self.n_Modes, self.eig_solver)
219
-
220
- del K
221
- return Psi_P, Sigma_P if not self.MEMORY_SAVING else None
222
-
223
- def _spatial_basis_POD(self, Psi_P, Sigma_P,
224
- SAVE_SPATIAL_POD: bool = True):
225
- """
226
- This method computes the spatial structure for the Proper Orthogonal Decomposition (POD) computation.
227
- The theoretical background of the POD is briefly recalled here:
228
-
229
- https://youtu.be/8fhupzhAR_M
230
-
231
- :param Psi_P: np.array
232
- POD temporal basis
233
- :param Sigma_P: np.array
234
- POD Sigmas
235
- :param SAVE_SPATIAL_POD: bool
236
- Flag deciding if the results will be stored on the disk.
237
- Default value is True, to limit the RAM's usage.
238
- Note that this might cause a minor slowdown for the loading,
239
- but the tradeoff seems worthy.
240
- This attribute is passed to the MODULO class.
241
-
242
- :return Phi_P: np.array
243
- POD Phis
244
-
245
- """
246
-
247
- self.SAVE_SPATIAL_POD = SAVE_SPATIAL_POD
248
-
249
- if self.MEMORY_SAVING:
250
- '''Loading temporal basis from disk. They're already in memory otherwise.'''
251
- Psi_P = np.load(self.FOLDER_OUT + 'POD/temporal_basis.npz')['Psis']
252
- Sigma_P = np.load(self.FOLDER_OUT + 'POD/temporal_basis.npz')['Sigmas']
253
-
254
- Phi_P = Spatial_basis_POD(self.D, N_T=self.N_T, PSI_P=Psi_P, Sigma_P=Sigma_P,
255
- MEMORY_SAVING=self.MEMORY_SAVING, FOLDER_OUT=self.FOLDER_OUT,
256
- N_PARTITIONS=self.N_PARTITIONS, SAVE_SPATIAL_POD=SAVE_SPATIAL_POD)
257
-
258
- return Phi_P if not self.MEMORY_SAVING else None
259
-
260
- def _temporal_basis_mPOD(self, K, Nf, Ex, F_V, Keep, boundaries, MODE, dt, K_S=False):
261
- """
262
- This function computes the temporal structures of each scale in the mPOD, as in step 4 of the algorithm
263
- ref: Multi-Scale Proper Orthogonal Decomposition of Complex Fluid Flows - M. A. Mendez et al.
264
-
265
- :param K: np.array
266
- Temporal correlation matrix
267
- :param Nf: np.array
268
- Order of the FIR filters that are used to isolate each of the scales
269
- :param Ex: int
270
- Extension at the boundaries of K to impose the boundary conditions (see boundaries)
271
- It must be at least as Nf.
272
- :param F_V: np.array
273
- Frequency splitting vector, containing the frequencies of each scale (see article).
274
- If the time axis is in seconds, these frequencies are in Hz.
275
- :param Keep: np.array
276
- Scale keep
277
- :param boundaries: str -> {'nearest', 'reflect', 'wrap' or 'extrap'}
278
- Define the boundary conditions for the filtering process, in order to avoid edge effects.
279
- The available boundary conditions are the classic ones implemented for image processing:
280
- nearest', 'reflect', 'wrap' or 'extrap'. See also https://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html
281
- :param MODE: str -> {‘reduced’, ‘complete’, ‘r’, ‘raw’}
282
- A QR factorization is used to enforce the orthonormality of the mPOD basis, to compensate
283
- for the non-ideal frequency response of the filters.
284
- The option MODE from np.linalg.qr carries out this operation.
285
-
286
- :return PSI_M: np.array
287
- Multiscale POD temporal basis
288
-
289
- """
290
-
291
- if self.MEMORY_SAVING:
292
- K = np.load(self.FOLDER_OUT + "/correlation_matrix/k_matrix.npz")['K']
293
-
294
- PSI_M = temporal_basis_mPOD(K=K, Nf=Nf, Ex=Ex, F_V=F_V, Keep=Keep, boundaries=boundaries,
295
- MODE=MODE, dt=dt, FOLDER_OUT=self.FOLDER_OUT,
296
- n_Modes=self.n_Modes, K_S=False,
297
- MEMORY_SAVING=self.MEMORY_SAVING, SAT=self.SAT, eig_solver=self.eig_solver)
298
-
299
- return PSI_M if not self.MEMORY_SAVING else None
300
-
301
- def _spatial_basis_mPOD(self, D, PSI_M, SAVE):
302
- """
303
- This function implements the last step of the mPOD algorithm:
304
- completing the decomposition. Here we project from psis, to get phis and sigmas
305
-
306
- :param D: np.array
307
- data matrix
308
- :param PSI_M: np.array
309
- temporal basis for the mPOD. Remember that it is not possible to impose both basis matrices
310
- phis and psis: given one of the two, the other is univocally determined.
311
- :param SAVE: bool
312
- if True, MODULO saves the results on disk.
313
-
314
- :return Phi_M: np.array
315
- mPOD Phis (Matrix of spatial structures)
316
- :return Psi_M: np.array
317
- mPOD Psis (Matrix of temporal structures)
318
- :return Sigma_M: np.array
319
- mPOD Sigmas (vector of amplitudes, i.e. the diagonal of Sigma_M)
320
-
321
- """
322
-
323
- Phi_M, Psi_M, Sigma_M = spatial_basis_mPOD(D, PSI_M, N_T=self.N_T, N_PARTITIONS=self.N_PARTITIONS,
324
- N_S=self.N_S, MEMORY_SAVING=self.MEMORY_SAVING,
325
- FOLDER_OUT=self.FOLDER_OUT,
326
- SAVE=SAVE)
327
-
328
- return Phi_M, Psi_M, Sigma_M
329
-
330
- def compute_mPOD(self, Nf, Ex, F_V, Keep, SAT, boundaries, MODE, dt, SAVE=False):
331
- """
332
- This function computes the temporal structures of each scale in the mPOD, as in step 4 of the algorithm
333
- ref: Multi-Scale Proper Orthogonal Decomposition of Complex Fluid Flows - M. A. Mendez et al.
334
-
335
- :param K: np.array
336
- Temporal correlation matrix
337
-
338
- :param Nf: np.array
339
- Order of the FIR filters that are used to isolate each of the scales
340
-
341
- :param Ex: int
342
- Extension at the boundaries of K to impose the boundary conditions (see boundaries)
343
- It must be at least as Nf.
344
-
345
- :param F_V: np.array
346
- Frequency splitting vector, containing the frequencies of each scale (see article).
347
- If the time axis is in seconds, these frequencies are in Hz.
348
-
349
- :param Keep: np.array
350
- Scale keep
351
-
352
- :param boundaries: str -> {'nearest', 'reflect', 'wrap' or 'extrap'}
353
- Define the boundary conditions for the filtering process, in order to avoid edge effects.
354
- The available boundary conditions are the classic ones implemented for image processing:
355
- nearest', 'reflect', 'wrap' or 'extrap'. See also https://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html
356
-
357
- :param MODE: str -> {‘reduced’, ‘complete’, ‘r’, ‘raw’}
358
- A QR factorization is used to enforce the orthonormality of the mPOD basis, to compensate
359
- for the non-ideal frequency response of the filters.
360
- The option MODE from np.linalg.qr carries out this operation.
361
-
362
- :param SAT: Maximum number of modes per scale.
363
- Only used for mPOD (max number of modes per scale)
364
-
365
- :param dt: float
366
- temporal step
367
-
368
- :return Phi_M: np.array
369
- mPOD Phis (Matrix of spatial structures)
370
- :return Psi_M: np.array
371
- mPOD Psis (Matrix of temporal structures)
372
- :return Sigma_M: np.array
373
- mPOD Sigmas (vector of amplitudes, i.e. the diagonal of Sigma_M
210
+ Phi_F, Psi_F, Sigma_F = dft(self.N_T, F_S, self.D,
211
+ self.FOLDER_OUT, SAVE_DFT=SAVE_DFT)
374
212
 
213
+ return Phi_F, Psi_F, Sigma_F
214
+
215
+ def POD(self, SAVE_T_POD: bool = False, mode: str = 'K',verbose=True):
375
216
  """
376
-
377
- print('Computing correlation matrix D matrix...')
378
- self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS,
379
- self.MEMORY_SAVING,
380
- self.FOLDER_OUT, self.SAVE_K, D=self.Dstar)
381
-
382
- if self.MEMORY_SAVING:
383
- self.K = np.load(self.FOLDER_OUT + '/correlation_matrix/k_matrix.npz')['K']
384
-
385
- print("Computing Temporal Basis...")
386
-
387
- PSI_M = temporal_basis_mPOD(K=self.K, Nf=Nf, Ex=Ex, F_V=F_V, Keep=Keep, boundaries=boundaries,
388
- MODE=MODE, dt=dt, FOLDER_OUT=self.FOLDER_OUT,
389
- n_Modes=self.n_Modes, MEMORY_SAVING=self.MEMORY_SAVING, SAT=SAT,
390
- eig_solver=self.eig_solver)
391
-
392
- print("Done.")
393
-
394
- if hasattr(self, 'D'): # if self.D is available:
395
- print('Computing Phi from D...')
396
- Phi_M, Psi_M, Sigma_M = spatial_basis_mPOD(self.D, PSI_M, N_T=self.N_T, N_PARTITIONS=self.N_PARTITIONS,
397
- N_S=self.N_S, MEMORY_SAVING=self.MEMORY_SAVING,
398
- FOLDER_OUT=self.FOLDER_OUT,
399
- SAVE=SAVE)
400
-
401
- else: # if not, the memory saving is on and D will not be used. We pass a dummy D
402
- print('Computing Phi from partitions...')
403
- Phi_M, Psi_M, Sigma_M = spatial_basis_mPOD(np.array([1]), PSI_M, N_T=self.N_T,
404
- N_PARTITIONS=self.N_PARTITIONS,
405
- N_S=self.N_S, MEMORY_SAVING=self.MEMORY_SAVING,
406
- FOLDER_OUT=self.FOLDER_OUT,
407
- SAVE=SAVE)
408
-
409
- print("Done.")
410
-
411
- return Phi_M, Psi_M, Sigma_M
412
-
413
- def compute_POD_K(self, SAVE_T_POD: bool = False):
217
+ Compute the Proper Orthogonal Decomposition (POD) of a dataset.
218
+
219
+ The POD is computed using the snapshot approach, working on the
220
+ temporal correlation matrix. The eigenvalue solver for this
221
+ matrix is defined in the `eig_solver` attribute of the class.
222
+
223
+ Parameters
224
+ ----------
225
+ SAVE_T_POD : bool, optional
226
+ Flag to save time-dependent POD data. Default is False.
227
+ mode : str, optional
228
+ The mode of POD computation. Must be either 'K' or 'svd'.
229
+ 'K' (default) uses the snapshot method on the temporal
230
+ correlation matrix.
231
+ 'svd' uses the SVD decomposition (full dataset must fit in memory).
232
+
233
+ Returns
234
+ -------
235
+ Phi_P : numpy.ndarray
236
+ POD temporal modes.
237
+ Psi_P : numpy.ndarray
238
+ POD spatial modes.
239
+ Sigma_P : numpy.ndarray
240
+ POD singular values (eigenvalues are Sigma_P**2).
241
+ Raises
242
+ ------
243
+ ValueError
244
+ If `mode` is not 'k' or 'svd'.
245
+
246
+ Notes
247
+ -----
248
+ A brief recall of the theoretical background of the POD is
249
+ available at https://youtu.be/8fhupzhAR_M
414
250
  """
415
- This method computes the Proper Orthogonal Decomposition (POD) of a dataset
416
- using the snapshot approach, i.e. working on the temporal correlation matrix.
417
- The eig solver for K is defined in 'eig_solver'
418
- The theoretical background of the POD is briefly recalled here:
419
-
420
- https://youtu.be/8fhupzhAR_M
421
-
422
- :return Psi_P: np.array
423
- POD Psis
424
-
425
- :return Sigma_P: np.array
426
- POD Sigmas. If needed, Lambdas can be easily computed recalling that: Sigma_P = np.sqrt(Lambda_P)
427
-
428
- :return Phi_P: np.array
429
- POD Phis
430
- """
431
-
432
- print('Computing correlation matrix...')
433
- self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS,
434
- self.MEMORY_SAVING,
435
- self.FOLDER_OUT, self.SAVE_K, D=self.Dstar, weights=self.weights)
436
-
437
- if self.MEMORY_SAVING:
438
- self.K = np.load(self.FOLDER_OUT + '/correlation_matrix/k_matrix.npz')['K']
439
-
440
- print("Computing Temporal Basis...")
441
- Psi_P, Sigma_P = Temporal_basis_POD(self.K, SAVE_T_POD,
442
- self.FOLDER_OUT, self.n_Modes, eig_solver=self.eig_solver)
443
- print("Done.")
444
- print("Computing Spatial Basis...")
445
-
446
- if self.MEMORY_SAVING: # if self.D is available:
447
- print('Computing Phi from partitions...')
448
- Phi_P = Spatial_basis_POD(np.array([1]), N_T=self.N_T,
449
- PSI_P=Psi_P,
450
- Sigma_P=Sigma_P,
451
- MEMORY_SAVING=self.MEMORY_SAVING,
452
- FOLDER_OUT=self.FOLDER_OUT,
453
- N_PARTITIONS=self.N_PARTITIONS)
454
-
455
- else: # if not, the memory saving is on and D will not be used. We pass a dummy D
456
- print('Computing Phi from D...')
457
- Phi_P = Spatial_basis_POD(self.D, N_T=self.N_T,
458
- PSI_P=Psi_P,
459
- Sigma_P=Sigma_P,
460
- MEMORY_SAVING=self.MEMORY_SAVING,
461
- FOLDER_OUT=self.FOLDER_OUT,
462
- N_PARTITIONS=self.N_PARTITIONS)
463
- print("Done.")
251
+
252
+ mode = mode.lower()
253
+ assert mode in ('k', 'svd'), "POD mode must be either 'K', temporal correlation matrix, or 'svd'."
254
+
255
+ if mode == 'k':
256
+ if verbose:
257
+ print('Computing correlation matrix...')
258
+ self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS,
259
+ self.MEMORY_SAVING,
260
+ self.FOLDER_OUT, self.SAVE_K,
261
+ D=self.Dstar, weights=self.weights,
262
+ verbose=verbose)
263
+
264
+ if self.MEMORY_SAVING:
265
+ self.K = np.load(self.FOLDER_OUT + '/correlation_matrix/k_matrix.npz')['K']
266
+ if verbose:
267
+ print("Computing Temporal Basis...")
268
+ Psi_P, Sigma_P = Temporal_basis_POD(self.K, SAVE_T_POD,
269
+ self.FOLDER_OUT, self.n_Modes, eig_solver=self.eig_solver,verbose=verbose)
270
+
271
+ if verbose:
272
+ print("Done.")
273
+ print("Computing Spatial Basis...")
274
+
275
+ if self.MEMORY_SAVING: # if self.D is available:
276
+ if verbose:
277
+ print('Computing Phi from partitions...')
278
+ Phi_P = Spatial_basis_POD(np.array([1]), N_T=self.N_T,
279
+ PSI_P=Psi_P,
280
+ Sigma_P=Sigma_P,
281
+ MEMORY_SAVING=self.MEMORY_SAVING,
282
+ FOLDER_OUT=self.FOLDER_OUT,
283
+ N_PARTITIONS=self.N_PARTITIONS,
284
+ verbose=verbose)
285
+
286
+ else: # if not, the memory saving is on and D will not be used. We pass a dummy D
287
+ if verbose:
288
+ print('Computing Phi from D...')
289
+ Phi_P = Spatial_basis_POD(self.D, N_T=self.N_T,
290
+ PSI_P=Psi_P,
291
+ Sigma_P=Sigma_P,
292
+ MEMORY_SAVING=self.MEMORY_SAVING,
293
+ FOLDER_OUT=self.FOLDER_OUT,
294
+ N_PARTITIONS=self.N_PARTITIONS,
295
+ verbose=verbose)
296
+ if verbose:
297
+ print("Done.")
298
+
299
+ else:
300
+ if self.MEMORY_SAVING:
301
+
302
+ if self.N_T % self.N_PARTITIONS != 0:
303
+ tot_blocks_col = self.N_PARTITIONS + 1
304
+ else:
305
+ tot_blocks_col = self.N_PARTITIONS
306
+
307
+ # Prepare the D matrix again
308
+ D = np.zeros((self.N_S, self.N_T))
309
+ R1 = 0
310
+
311
+ # print(' \n Reloading D from tmp...')
312
+ for k in tqdm(range(tot_blocks_col)):
313
+ di = np.load(self.FOLDER_OUT + f"/data_partitions/di_{k + 1}.npz")['di']
314
+ R2 = R1 + np.shape(di)[1]
315
+ D[:, R1:R2] = di
316
+ R1 = R2
317
+
318
+ # Now that we have D back, we can proceed with the SVD approach
319
+ Phi_P, Psi_P, Sigma_P = switch_svds(D, self.n_Modes, self.svd_solver)
320
+
321
+ else: # self.MEMORY_SAVING:
322
+ Phi_P, Psi_P, Sigma_P = switch_svds(self.D, self.n_Modes, self.svd_solver)
464
323
 
465
324
  return Phi_P, Psi_P, Sigma_P
466
325
 
467
- def compute_POD_svd(self, SAVE_T_POD: bool = False):
326
+
327
+ def mPOD(self, Nf, Ex, F_V, Keep, SAT, boundaries, MODE, dt, SAVE=False, K_in=None, Sigma_type='accurate', conv_type: str = '1d', verbose=True):
468
328
  """
469
- This method computes the Proper Orthogonal Decomposition (POD) of a dataset
470
- using the SVD decomposition. The svd solver is defined by 'svd_solver'.
471
- Note that in this case, the memory saving option is of no help, since
472
- the SVD must be performed over the entire dataset.
473
-
474
- https://youtu.be/8fhupzhAR_M
475
-
476
- :return Psi_P: np.array
477
- POD Psis
478
-
479
- :return Sigma_P: np.array
480
- POD Sigmas. If needed, Lambdas can be easily computed recalling that: Sigma_P = np.sqrt(Lambda_P)
481
-
482
- :return Phi_P: np.array
483
- POD Phis
329
+ Multi-Scale Proper Orthogonal Decomposition (mPOD) of a signal.
330
+
331
+ Parameters
332
+ ----------
333
+ Nf : np.array
334
+ Orders of the FIR filters used to isolate each scale. Must be of size len(F_V) + 1.
335
+
336
+ Ex : int
337
+ Extension length at the boundaries to impose boundary conditions (must be at least as large as Nf).
338
+
339
+ F_V : np.array
340
+ Frequency splitting vector, containing the cutoff frequencies for each scale. Units depend on the temporal step `dt`.
341
+
342
+ Keep : np.array
343
+ Boolean array indicating scales to retain. Must be of size len(F_V) + 1.
344
+
345
+ SAT : int
346
+ Maximum number of modes per scale.
347
+
348
+ boundaries : {'nearest', 'reflect', 'wrap', 'extrap'}
349
+ Boundary conditions for filtering to avoid edge effects. Refer to:
350
+ https://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html
351
+
352
+ MODE : {'reduced', 'complete', 'r', 'raw'}
353
+ Mode option for QR factorization, used to enforce orthonormality of the mPOD basis to account for non-ideal filter responses.
354
+
355
+ dt : float
356
+ Temporal step size between snapshots.
357
+
358
+ SAVE : bool, default=False
359
+ Whether to save intermediate results to disk.
360
+
361
+ K_in : np.array, default = none
362
+ K matrix. If none, compute it with D.
363
+
364
+ Sigma_type : {'accurate', 'fast'}
365
+ If accurate, recompute the Sigmas after QR polishing. Slightly slower than the fast option in which the Sigmas are not recomputed.
366
+
367
+ conv_type : {'1d', '2d'}
368
+ If 1d, compute Kf applying 1d FIR filters to the columns and then rows of the extended K.
369
+ More robust against windowing effects but more expensive (useful for modes that are slow compared to the observation time).
370
+ If 2d, compute Kf applying a 2d FIR filter on the extended K.
371
+
372
+ Returns
373
+ -------
374
+ Phi_M : np.array
375
+ Spatial mPOD modes (spatial structures matrix).
376
+
377
+ Psi_M : np.array
378
+ Temporal mPOD modes (temporal structures matrix).
379
+
380
+ Sigma_M : np.array
381
+ Modal amplitudes.
484
382
  """
485
- # If Memory saving is active, we must load back the data.
486
- # This process is memory demanding. Different SVD solver will handle this differently.
487
-
488
- if self.MEMORY_SAVING:
489
- if self.N_T % self.N_PARTITIONS != 0:
490
- tot_blocks_col = self.N_PARTITIONS + 1
491
- else:
492
- tot_blocks_col = self.N_PARTITIONS
493
-
494
- # Prepare the D matrix again
495
- D = np.zeros((self.N_S, self.N_T))
496
- R1 = 0
497
-
498
- # print(' \n Reloading D from tmp...')
499
- for k in tqdm(range(tot_blocks_col)):
500
- di = np.load(self.FOLDER_OUT + f"/data_partitions/di_{k + 1}.npz")['di']
501
- R2 = R1 + np.shape(di)[1]
502
- D[:, R1:R2] = di
503
- R1 = R2
504
-
505
- # Now that we have D back, we can proceed with the SVD approach
506
- Phi_P, Psi_P, Sigma_P = switch_svds(D, self.n_Modes, self.svd_solver)
507
-
508
-
509
- else: # self.MEMORY_SAVING:
510
- Phi_P, Psi_P, Sigma_P = switch_svds(self.D, self.n_Modes, self.svd_solver)
383
+
384
+ if K_in is None:
385
+ if verbose:
386
+ print('Computing correlation matrix D matrix...')
387
+ self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS,
388
+ self.MEMORY_SAVING,
389
+ self.FOLDER_OUT, self.SAVE_K, D=self.Dstar,
390
+ verbose=verbose)
391
+
392
+ if self.MEMORY_SAVING:
393
+ self.K = np.load(self.FOLDER_OUT + '/correlation_matrix/k_matrix.npz')['K']
394
+ else:
395
+ if verbose:
396
+ print('Using K matrix provided by the user...')
397
+ self.K = K_in
398
+
399
+ if verbose:
400
+ print("Computing Temporal Basis...")
401
+ PSI_M,SIGMA_M = temporal_basis_mPOD(
402
+ K=self.K, Nf=Nf, Ex=Ex, F_V=F_V, Keep=Keep, boundaries=boundaries,
403
+ MODE=MODE, dt=dt, FOLDER_OUT=self.FOLDER_OUT,
404
+ n_Modes=self.n_Modes, MEMORY_SAVING=self.MEMORY_SAVING, SAT=SAT,
405
+ eig_solver=self.eig_solver, conv_type=conv_type, verbose=verbose
406
+ )
407
+ if verbose:
408
+ print("Temporal Basis computed.")
409
+
410
+ if hasattr(self, 'D'):
411
+ if verbose:
412
+ print('Computing spatial modes Phi from D...')
413
+ Phi_M, Psi_M, Sigma_M = spatial_basis_mPOD(
414
+ self.D, PSI_M, N_T=self.N_T, N_PARTITIONS=self.N_PARTITIONS,
415
+ N_S=self.N_S, MEMORY_SAVING=self.MEMORY_SAVING,
416
+ FOLDER_OUT=self.FOLDER_OUT, SAVE=SAVE, SIGMA_TYPE=Sigma_type, SIGMA_M=SIGMA_M
417
+ )
418
+ else:
419
+ if verbose:
420
+ print('Computing spatial modes Phi from partitions...')
421
+ Phi_M, Psi_M, Sigma_M = spatial_basis_mPOD(
422
+ np.array([1]), PSI_M, N_T=self.N_T,
423
+ N_PARTITIONS=self.N_PARTITIONS, N_S=self.N_S,
424
+ MEMORY_SAVING=self.MEMORY_SAVING,
425
+ FOLDER_OUT=self.FOLDER_OUT, SAVE=SAVE,SIGMA_TYPE=Sigma_type, SIGMA_M=SIGMA_M
426
+ )
427
+ if verbose:
428
+ print("Spatial modes computed.")
511
429
 
512
- return Phi_P, Psi_P, Sigma_P
430
+ return Phi_M, Psi_M, Sigma_M
513
431
 
514
- def compute_DMD_PIP(self, SAVE_T_DMD: bool = True, F_S=1):
432
+ def DMD(self, SAVE_T_DMD: bool = True, F_S: float = 1.0, verbose: bool = True):
515
433
  """
516
- This method computes the Dynamic Mode Decomposition of the data
517
- using the algorithm in https://arxiv.org/abs/1312.0041, which is basically the same as
518
- the PIP algorithm proposed in https://www.sciencedirect.com/science/article/abs/pii/0167278996001248
519
- See v1 of this paper https://arxiv.org/abs/2001.01971 for more details (yes, reviewers did ask to omit this detail in v2).
520
-
521
- :return Phi_D: np.array
522
- DMD Phis. As for the DFT, these are complex.
523
-
524
- :return Lambda_D: np.array
525
- DMD Eigenvalues (of the reduced propagator). These are complex.
526
-
527
- :return freqs: np.array
528
- Frequencies (in Hz, associated to the DMD modes)
529
-
530
- :return a0s: np.array
531
- Initial Coefficients of the Modes
532
-
434
+ Compute the Dynamic Mode Decomposition (DMD) of the dataset.
435
+
436
+ This implementation follows the algorithm in Tu et al. (2014) [1]_, which is
437
+ essentially the same as Penland (1996) [2]_. For
438
+ additional low-level details see v1 of Mendez et al. (2020) [3]_.
439
+
440
+ Parameters
441
+ ----------
442
+ SAVE_T_DMD : bool, optional
443
+ If True, save time-dependent DMD results to disk. Default is True.
444
+ F_S : float, optional
445
+ Sampling frequency in Hz. Default is 1.0.
446
+
447
+ Returns
448
+ -------
449
+ Phi_D : numpy.ndarray
450
+ Complex DMD modes.
451
+ Lambda_D : numpy.ndarray
452
+ Complex eigenvalues of the reduced-order propagator.
453
+ freqs : numpy.ndarray
454
+ Frequencies (Hz) associated with each DMD mode.
455
+ a0s : numpy.ndarray
456
+ Initial amplitudes (coefficients) of the DMD modes.
457
+
458
+ References
459
+ ----------
460
+ .. [1] https://arxiv.org/abs/1312.0041
461
+ .. [2] https://www.sciencedirect.com/science/article/pii/0167278996001248
462
+ .. [3] https://arxiv.org/abs/2001.01971
533
463
  """
534
464
 
535
465
  # If Memory saving is active, we must load back the data
@@ -552,277 +482,416 @@ class ModuloVKI:
552
482
 
553
483
  # Compute the DMD
554
484
  Phi_D, Lambda, freqs, a0s = dmd_s(D[:, 0:self.N_T - 1],
555
- D[:, 1:self.N_T], self.n_Modes, F_S, svd_solver=self.svd_solver)
485
+ D[:, 1:self.N_T], self.n_Modes, F_S, svd_solver=self.svd_solver,verbose=verbose)
556
486
 
557
487
  else:
558
488
  Phi_D, Lambda, freqs, a0s = dmd_s(self.D[:, 0:self.N_T - 1],
559
489
  self.D[:, 1:self.N_T], self.n_Modes, F_S, SAVE_T_DMD=SAVE_T_DMD,
560
- svd_solver=self.svd_solver, FOLDER_OUT=self.FOLDER_OUT)
490
+ svd_solver=self.svd_solver, FOLDER_OUT=self.FOLDER_OUT,verbose=verbose)
561
491
 
562
492
  return Phi_D, Lambda, freqs, a0s
563
493
 
564
- def compute_DFT(self, F_S, SAVE_DFT=False):
494
+ def SPOD(
495
+ self,
496
+ mode: str,
497
+ F_S: float,
498
+ n_Modes: int = 10,
499
+ SAVE_SPOD: bool = True,
500
+ **kwargs
501
+ ):
565
502
  """
566
- This method computes the Discrete Fourier Transform of your data.
567
-
568
- Check out this tutorial: https://www.youtube.com/watch?v=8fhupzhAR_M&list=PLEJZLD0-4PeKW6Ze984q08bNz28GTntkR&index=2
569
-
570
- :param F_S: float,
571
- Sampling Frequency [Hz]
572
- :param SAVE_DFT: bool,
573
- If True, MODULO will save the output in self.FOLDER OUT/MODULO_tmp
574
-
575
- :return: Sorted_Freqs: np.array,
576
- Sorted Frequencies
577
- :return Phi_F: np.array,
578
- DFT Phis
579
- :return Sigma_F: np.array,
580
- DFT Sigmas
503
+ Unified Spectral POD interface.
504
+
505
+ Parameters
506
+ ----------
507
+ mode : {'sieber', 'towne'}
508
+ Which SPOD algorithm to run.
509
+ F_S : float
510
+ Sampling frequency [Hz].
511
+ n_Modes : int, optional
512
+ Number of modes to compute, by default 10.
513
+ SAVE_SPOD : bool, optional
514
+ Whether to save outputs, by default True.
515
+ **kwargs
516
+ For mode='sieber', accepts:
517
+ - N_O (int): semi-order of the diagonal filter
518
+ - f_c (float): cutoff frequency
519
+ For mode='towne', accepts:
520
+ - L_B (int): block length
521
+ - O_B (int): block overlap
522
+ - n_processes (int): number of parallel workers
523
+
524
+ Returns
525
+ -------
526
+ Phi : ndarray
527
+ Spatial modes.
528
+ Sigma : ndarray
529
+ Modal amplitudes.
530
+ Aux : tuple
531
+ Additional outputs.
581
532
  """
582
- if self.D is None:
583
- D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
584
- SAVE_DFT = True
585
- Sorted_Freqs, Phi_F, SIGMA_F = dft_fit(self.N_T, F_S, D, self.FOLDER_OUT, SAVE_DFT=SAVE_DFT)
586
-
533
+ mode = mode.lower()
534
+ if mode == 'sieber':
535
+ N_O = kwargs.pop('N_O', 100)
536
+ f_c = kwargs.pop('f_c', 0.3)
537
+
538
+ return self.compute_SPOD_s(
539
+ N_O=N_O,
540
+ f_c=f_c,
541
+ n_Modes=n_Modes,
542
+ SAVE_SPOD=SAVE_SPOD
543
+ )
544
+
545
+ elif mode == 'towne':
546
+ L_B = kwargs.pop('L_B', 500)
547
+ O_B = kwargs.pop('O_B', 250)
548
+ n_processes = kwargs.pop('n_processes', 1)
549
+
550
+ # Load or reuse data matrix
551
+
552
+ if self.D is None:
553
+ D = np.load(f"{self.FOLDER_OUT}/MODULO_tmp/data_matrix/database.npz")['D']
554
+ else:
555
+ D = self.D
556
+
557
+ # Segment and FFT - fallback n_processes in case of misassignment
558
+ D_hat, freqs_pos, n_processes = segment_and_fft(
559
+ D=D,
560
+ F_S=F_S,
561
+ L_B=L_B,
562
+ O_B=O_B,
563
+ n_processes=n_processes
564
+ )
565
+
566
+ return self.compute_SPOD_t(D_hat=D_hat,
567
+ freq_pos=freqs_pos,
568
+ n_Modes=n_Modes,
569
+ SAVE_SPOD=SAVE_SPOD,
570
+ svd_solver=self.svd_solver,
571
+ n_processes=n_processes)
572
+
587
573
  else:
588
- Sorted_Freqs, Phi_F, SIGMA_F = dft_fit(self.N_T, F_S, self.D, self.FOLDER_OUT, SAVE_DFT=SAVE_DFT)
574
+ raise ValueError("mode must be 'sieber' or 'towne'")
589
575
 
590
- return Sorted_Freqs, Phi_F, SIGMA_F
591
576
 
592
- def compute_SPOD_t(self, F_S, L_B=500, O_B=250, n_Modes=10, SAVE_SPOD=True):
577
+ def compute_SPOD_t(self, D_hat, freq_pos, n_Modes=10, SAVE_SPOD=True, svd_solver=None,
578
+ n_processes=1):
593
579
  """
594
- This method computes the Spectral POD of your data. This is the one by Towne et al
595
- (https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/abs/spectral-proper-orthogonal-decomposition-and-its-relationship-to-dynamic-mode-decomposition-and-resolvent-analysis/EC2A6DF76490A0B9EB208CC2CA037717)
596
-
597
- :param F_S: float,
598
- Sampling Frequency [Hz]
599
- :param L_B: float,
600
- lenght of the chunks
601
- :param O_B: float,
602
- Overlapping between blocks in the chunk
603
- :param n_Modes: float,
604
- number of modes to be computed for each frequency
605
- :param SAVE_SPOD: bool,
606
- If True, MODULO will save the output in self.FOLDER OUT/MODULO_tmp
607
- :return Psi_P_hat: np.array
608
- Spectra of the SPOD Modes
609
- :return Sigma_P: np.array
610
- Amplitudes of the SPOD Modes.
611
- :return Phi_P: np.array
612
- SPOD Phis
613
- :return freq: float
614
- frequency bins for the Spectral POD
615
-
616
-
580
+ Compute the CSD-based Spectral POD (Towne et al.) from a precomputed FFT tensor.
581
+
582
+ Parameters
583
+ ----------
584
+ D_hat : ndarray, shape (n_s, n_freqs, n_blocks)
585
+ FFT of each block, only nonnegative frequencies retained.
586
+ freq_pos : ndarray, shape (n_freqs,)
587
+ Positive frequency values (Hz) corresponding to D_hat’s second axis.
588
+ n_Modes : int, optional
589
+ Number of SPOD modes per frequency bin. Default is 10.
590
+ SAVE_SPOD : bool, optional
591
+ If True, save outputs under `self.FOLDER_OUT/MODULO_tmp`. Default is True.
592
+ svd_solver : str or None, optional
593
+ Which SVD solver to use (passed to `switch_svds`), by default None.
594
+ n_processes : int, optional
595
+ Number of parallel workers for the POD step. Default is 1 (serial).
596
+
597
+ Returns
598
+ -------
599
+ Phi_SP : ndarray, shape (n_s, n_Modes, n_freqs)
600
+ Spatial SPOD modes at each positive frequency.
601
+ Sigma_SP : ndarray, shape (n_Modes, n_freqs)
602
+ Modal energies per frequency bin.
603
+ freq_pos : ndarray, shape (n_freqs,)
604
+ The positive frequency vector (Hz), returned unchanged.
617
605
  """
618
- if self.D is None:
619
- D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
620
- Phi_SP, Sigma_SP, Freqs_Pos = compute_SPOD_t(D, F_S, L_B=L_B, O_B=O_B,
621
- n_Modes=n_Modes, SAVE_SPOD=SAVE_SPOD,
622
- FOLDER_OUT=self.FOLDER_OUT, possible_svds=self.svd_solver)
623
- else:
624
- Phi_SP, Sigma_SP, Freqs_Pos = compute_SPOD_t(self.D, F_S, L_B=L_B, O_B=O_B,
625
- n_Modes=n_Modes, SAVE_SPOD=SAVE_SPOD,
626
- FOLDER_OUT=self.FOLDER_OUT, possible_svds=self.svd_solver)
627
-
628
- return Phi_SP, Sigma_SP, Freqs_Pos
629
-
630
- # New Decomposition: SPOD f
631
-
632
- def compute_SPOD_s(self, F_S, N_O=100, f_c=0.3, n_Modes=10, SAVE_SPOD=True):
606
+ # Perform the POD (parallel if requested)
607
+ # received D_hat_f, this is now just a POD on the transversal direction of the tensor,
608
+ # e.g. the frequency domain.
609
+ n_freqs = len(freq_pos)
610
+
611
+ # also here we can parallelize
612
+ Phi_SP, Sigma_SP = pod_from_dhat(D_hat=D_hat, n_modes=n_Modes, n_freqs=n_freqs,
613
+ svd_solver=self.svd_solver, n_processes=n_processes)
614
+
615
+ # Optionally save the results
616
+ if SAVE_SPOD:
617
+ folder_out = self.FOLDER_OUT + "MODULO_tmp/"
618
+ os.makedirs(folder_out, exist_ok=True)
619
+ np.savez(
620
+ folder_out + "spod_towne.npz",
621
+ Phi=Phi_SP,
622
+ Sigma=Sigma_SP,
623
+ freqs=freq_pos
624
+ )
625
+
626
+ return Phi_SP, Sigma_SP, freq_pos
627
+
628
+
629
+
630
+ def compute_SPOD_s(self, N_O=100, f_c=0.3, n_Modes=10, SAVE_SPOD=True):
633
631
  """
634
- This method computes the Spectral POD of your data.
635
- This is the one by Sieber
636
- et al (https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/abs/spectral-proper-orthogonal-decomposition/DCD8A6EDEFD56F5A9715DBAD38BD461A)
637
-
638
- :param F_S: float,
639
- Sampling Frequency [Hz]
640
- :param N_o: float,
641
- Semi-Order of the diagonal filter.
642
- Note that the filter order will be 2 N_o +1 (to make sure it is odd)
643
- :param f_c: float,
644
- cut-off frequency of the diagonal filter
645
- :param n_Modes: float,
646
- number of modes to be computed
647
- :param SAVE_SPOD: bool,
648
- If True, MODULO will save the output in self.FOLDER OUT/MODULO_tmp
649
- :return Psi_P: np.array
650
- SPOD Psis
651
- :return Sigma_P: np.array
652
- SPOD Sigmas.
653
- :return Phi_P: np.array
654
- SPOD Phis
632
+ Compute the filtered‐covariance Spectral POD (Sieber _et al._) of your data.
633
+
634
+ This implementation follows Sieber et al. (2016), which applies a zero‐phase
635
+ diagonal filter to the time‐lag covariance and then performs a single POD
636
+ on the filtered covariance matrix.
637
+
638
+ Parameters
639
+ ----------
640
+ N_O : int, optional
641
+ Semi‐order of the diagonal FIR filter. The true filter length is
642
+ 2*N_O+1, by default 100.
643
+ f_c : float, optional
644
+ Normalized cutoff frequency of the diagonal filter (0 < f_c < 0.5),
645
+ by default 0.3.
646
+ n_Modes : int, optional
647
+ Number of SPOD modes to compute, by default 10.
648
+ SAVE_SPOD : bool, optional
649
+ If True, save output under `self.FOLDER_OUT/MODULO_tmp`, by default True.
650
+
651
+ Returns
652
+ -------
653
+ Phi_sP : numpy.ndarray, shape (n_S, n_Modes)
654
+ Spatial SPOD modes.
655
+ Psi_sP : numpy.ndarray, shape (n_t, n_Modes)
656
+ Temporal SPOD modes (filtered).
657
+ Sigma_sP : numpy.ndarray, shape (n_Modes,)
658
+ Modal energies (eigenvalues of the filtered covariance).
655
659
  """
656
-
657
660
  if self.D is None:
658
- D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
659
-
660
- self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS, self.MEMORY_SAVING,
661
- self.FOLDER_OUT, self.SAVE_K, D=D)
661
+ D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
662
+ else:
663
+ D = self.D
664
+
665
+ self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS, self.MEMORY_SAVING,
666
+ self.FOLDER_OUT, self.SAVE_K, D=D)
667
+
668
+ # additional step: diagonal spectral filter of K
669
+ K_F = spectral_filter(self.K, N_o=N_O, f_c=f_c)
670
+
671
+ # and then proceed with normal POD procedure
672
+ Psi_P, Sigma_P = Temporal_basis_POD(K_F, SAVE_SPOD, self.FOLDER_OUT, n_Modes)
673
+
674
+ # but with a normalization aspect to handle the non-orthogonality of the SPOD modes
675
+ Phi_P = Spatial_basis_POD(D, N_T=self.K.shape[0],
676
+ PSI_P=Psi_P, Sigma_P=Sigma_P,
677
+ MEMORY_SAVING=self.MEMORY_SAVING,
678
+ FOLDER_OUT=self.FOLDER_OUT,
679
+ N_PARTITIONS=self.N_PARTITIONS,rescale=True)
680
+
662
681
 
663
- Phi_sP, Psi_sP, Sigma_sP = compute_SPOD_s(D, self.K, F_S, self.N_S, self.N_T, N_O, f_c,
664
- n_Modes, SAVE_SPOD, self.FOLDER_OUT, self.MEMORY_SAVING,
665
- self.N_PARTITIONS)
682
+ return Phi_P, Psi_P, Sigma_P
666
683
 
667
- else:
668
- self.K = CorrelationMatrix(self.N_T, self.N_PARTITIONS, self.MEMORY_SAVING,
669
- self.FOLDER_OUT, self.SAVE_K, D=self.D)
670
-
671
- Phi_sP, Psi_sP, Sigma_sP = compute_SPOD_s(self.D, self.K, F_S, self.N_S, self.N_T, N_O, f_c,
672
- n_Modes, SAVE_SPOD, self.FOLDER_OUT, self.MEMORY_SAVING,
673
- self.N_PARTITIONS)
674
-
675
- # if self.D is None:
676
- # D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
677
- # SAVE_SPOD = True
678
- # # TODO : Lorenzo check this stuff
679
- # else:
680
- # D = self.D
681
- #
682
- # n_s = self.N_S # Repeat variable for debugging compatibility
683
- # n_t = self.N_T
684
- #
685
- # print('Computing Correlation Matrix \n')
686
- #
687
- # # The first step is the same as the POD: we compute the correlation matrix
688
- # K = CorrelationMatrix(self.N_T, self.N_PARTITIONS, self.MEMORY_SAVING,
689
- # self.FOLDER_OUT, D=self.D)
690
- #
691
- # # 1. Initialize the extended
692
- # K_e = np.zeros((n_t + 2 * N_o, n_t + 2 * N_o))
693
- # # From which we clearly know that:
694
- # K_e[N_o:n_t + N_o, N_o:n_t + N_o] = K
695
- #
696
- # # 2. We fill the edges ( a bit of repetition but ok.. )
697
- #
698
- # # Row-wise, Upper part
699
- # for i in range(0, N_o):
700
- # K_e[i, i:i + n_t] = K[0, :]
701
- #
702
- # # Row-wise, bottom part
703
- # for i in range(N_o + n_t, n_t + 2 * N_o):
704
- # K_e[i, i - n_t + 1:i + 1] = K[-1, :]
705
- #
706
- # # Column-wise, left part
707
- # for j in range(0, N_o):
708
- # K_e[j:j + n_t, j] = K[:, 0]
709
- #
710
- # # Column-wise, right part
711
- # for j in range(N_o + n_t, 2 * N_o + n_t):
712
- # K_e[j - n_t + 1:j + 1, j] = K[:, -1]
713
- #
714
- # # Now you create the diagonal kernel in 2D
715
- # h_f = firwin(N_o, f_c) # Kernel in 1D
716
- # # This is also something that must be put in a separate file:
717
- # # To cancel the phase lag we make this non-causal with a symmetric
718
- # # shift, hence with zero padding as equal as possible on both sides
719
- # n_padd_l = round((n_t - N_o) / 2);
720
- # n_padd_r = n_t - N_o - n_padd_l
721
- #
722
- # h_f_pad = np.pad(h_f, (n_padd_l, n_padd_r)) # symmetrically padded kernel in 1D
723
- # h_f_2 = np.diag(h_f_pad)
724
- #
725
- # # Finally the filtered K is just
726
- # K_F = signal.fftconvolve(K_e, h_f_2, mode='same')[N_o:n_t + N_o, N_o:n_t + N_o]
727
- # # plt.plot(np.diag(K),'b--'); plt.plot(np.diag(K_F_e),'r')
728
- #
729
- # # From now on it's just POD:
730
- # Psi_P, Sigma_P = Temporal_basis_POD(K_F, SAVE_SPOD,
731
- # self.FOLDER_OUT, self.n_Modes)
732
- #
733
- # Phi_P = Spatial_basis_POD(self.D, N_T=self.N_T, PSI_P=Psi_P, Sigma_P=Sigma_P,
734
- # MEMORY_SAVING=self.MEMORY_SAVING, FOLDER_OUT=self.FOLDER_OUT,
735
- # N_PARTITIONS=self.N_PARTITIONS)
736
-
737
- return Phi_sP, Psi_sP, Sigma_sP
738
-
739
- def compute_kPOD(self, M_DIST=[1, 10], k_m=0.1, cent=True,
740
- n_Modes=10, alpha=1e-6, metric='rbf', K_out=False):
684
+
685
+ def kPOD(self, M_DIST=[1, 10],
686
+ k_m=0.1, cent=True,
687
+ n_Modes=10,
688
+ alpha=1e-6,
689
+ metric='rbf',
690
+ K_out=False, SAVE_KPOD=False):
741
691
  """
742
- This function implements the kernel PCA as described in the VKI course https://www.vki.ac.be/index.php/events-ls/events/eventdetail/552/-/online-on-site-hands-on-machine-learning-for-fluid-dynamics-2023
743
-
744
- The computation of the kernel function is carried out as in https://arxiv.org/pdf/2208.07746.pdf.
745
-
746
-
747
- :param M_DIST: array,
748
- position of the two snapshots that will be considered to
749
- estimate the minimal k. They should be the most different ones.
750
- :param k_m: float,
751
- minimum value for the kernelized correlation
752
- :param alpha: float
753
- regularization for K_zeta
754
- :param cent: bool,
755
- if True, the matrix K is centered. Else it is not
756
- :param n_Modes: float,
757
- number of modes to be computed
758
- :param metric: string,
759
- This identifies the metric for the kernel matrix. It is a wrapper to 'pairwise_kernels' from sklearn.metrics.pairwise
760
- Note that different metrics would need different set of parameters. For the moment, only rbf was tested; use any other option at your peril !
761
- :param K_out: bool,
762
- If true, the matrix K is also exported as a fourth output.
763
- :return Psi_xi: np.array
764
- kPOD's Psis
765
- :return Sigma_xi: np.array
766
- kPOD's Sigmas.
767
- :return Phi_xi: np.array
768
- kPOD's Phis
769
- :return K_zeta: np.array
770
- Kernel Function from which the decomposition is computed.
771
- (exported only if K_out=True)
772
-
692
+ Perform kernel PCA (kPOD) for snapshot data as in VKI Machine Learning for Fluid Dynamics course.
693
+
694
+ Parameters
695
+ ----------
696
+ M_DIST : array-like of shape (2,), optional
697
+ Indices of two snapshots used to estimate the minimal kernel value.
698
+ These should be the most “distant” snapshots in your dataset. Default is [1, 10].
699
+ k_m : float, optional
700
+ Minimum value for the kernelized correlation. Default is 0.1.
701
+ cent : bool, optional
702
+ If True, center the kernel matrix before decomposition. Default is True.
703
+ n_Modes : int, optional
704
+ Number of principal modes to compute. Default is 10.
705
+ alpha : float, optional
706
+ Regularization parameter for the modified kernel matrix \(K_{\zeta}\). Default is 1e-6.
707
+ metric : str, optional
708
+ Kernel function identifier (passed to `sklearn.metrics.pairwise.pairwise_kernels`).
709
+ Only 'rbf' has been tested; other metrics may require different parameters. Default is 'rbf'.
710
+ K_out : bool, optional
711
+ If True, also return the full kernel matrix \(K\). Default is False.
712
+ SAVE_KPOD : bool, optional
713
+ If True, save the computed kPOD results to disk. Default is False.
714
+
715
+ Returns
716
+ -------
717
+ Psi_xi : ndarray of shape (n_samples, n_Modes)
718
+ The kPOD principal component time coefficients.
719
+ Sigma_xi : ndarray of shape (n_Modes,)
720
+ The kPOD singular values (eigenvalues of the centered kernel).
721
+ Phi_xi : ndarray of shape (n_samples, n_Modes)
722
+ The mapped eigenvectors (principal modes) in feature space.
723
+ K_zeta : ndarray of shape (n_samples, n_samples)
724
+ The (regularized and centered) kernel matrix used for decomposition.
725
+ Only returned if `K_out` is True.
726
+
727
+ Notes
728
+ -----
729
+ - Follows the hands-on ML for Fluid Dynamics tutorial by VKI
730
+ (https://www.vki.ac.be/index.php/events-ls/events/eventdetail/552).
731
+ - Kernel computed as described in
732
+ Horenko et al., *Machine learning for dynamics and model reduction*, arXiv:2208.07746.
773
733
 
774
734
  """
735
+
775
736
  if self.D is None:
776
737
  D = np.load(self.FOLDER_OUT + '/MODULO_tmp/data_matrix/database.npz')['D']
777
738
  else:
778
739
  D = self.D
779
740
 
780
741
  # Compute Eucledean distances
781
- i, j = M_DIST;
782
- n_s, n_t = np.shape(D)
742
+ i, j = M_DIST
743
+
783
744
  M_ij = np.linalg.norm(D[:, i] - D[:, j]) ** 2
784
745
 
785
- gamma = -np.log(k_m) / M_ij
786
-
787
- K_zeta = pairwise_kernels(D.T, metric='rbf', gamma=gamma)
788
- print('Kernel K ready')
789
-
790
- # Compute the Kernel Matrix
791
- n_t = np.shape(D)[1]
792
- # Center the Kernel Matrix (if cent is True):
793
- if cent:
794
- H = np.eye(n_t) - 1 / n_t * np.ones_like(K_zeta)
795
- K_zeta = H @ K_zeta @ H.T
796
- print('K_zeta centered')
797
- # Diagonalize and Sort
798
- lambdas, Psi_xi = linalg.eigh(K_zeta + alpha * np.eye(n_t), subset_by_index=[n_t - n_Modes, n_t - 1])
799
- lambdas, Psi_xi = lambdas[::-1], Psi_xi[:, ::-1];
800
- Sigma_xi = np.sqrt(lambdas);
801
- print('K_zeta diagonalized')
802
- # Encode
803
- # Z_xi=np.diag(Sigma_xi)@Psi_xi.T
804
- # We compute the spatial structures as projections of the data
805
- # onto the Psi_xi!
806
- R = Psi_xi.shape[1]
807
- PHI_xi_SIGMA_xi = np.dot(D, (Psi_xi))
808
- # Initialize the output
809
- PHI_xi = np.zeros((n_s, R))
810
- SIGMA_xi = np.zeros((R))
811
-
812
- for i in tqdm(range(0, R)):
813
- # Assign the norm as amplitude
814
- SIGMA_xi[i] = np.linalg.norm(PHI_xi_SIGMA_xi[:, i])
815
- # Normalize the columns of C to get spatial modes
816
- PHI_xi[:, i] = PHI_xi_SIGMA_xi[:, i] / SIGMA_xi[i]
817
-
818
- Indices = np.flipud(np.argsort(SIGMA_xi)) # find indices for sorting in decreasing order
819
- Sorted_Sigmas = SIGMA_xi[Indices] # Sort all the sigmas
820
- Phi_xi = PHI_xi[:, Indices] # Sorted Spatial Structures Matrix
821
- Psi_xi = Psi_xi[:, Indices] # Sorted Temporal Structures Matrix
822
- Sigma_xi = Sorted_Sigmas # Sorted Amplitude Matrix
823
- print('Phi_xi computed')
746
+ K_r = kernelized_K(D=D, M_ij=M_ij, k_m=k_m, metric=metric, cent=cent, alpha=alpha)
747
+
748
+ Psi_xi, Sigma_xi = Temporal_basis_POD(K=K_r, n_Modes=n_Modes, eig_solver='eigh')
749
+
750
+ PHI_xi_SIGMA_xi = D @ Psi_xi
751
+
752
+ Sigma_xi = np.linalg.norm(PHI_xi_SIGMA_xi, axis=0) # (R,)
753
+ Phi_xi = PHI_xi_SIGMA_xi / Sigma_xi[None, :] # (n_s, R)
754
+
755
+ sorted_idx = np.argsort(-Sigma_xi)
756
+
757
+ Phi_xi = Phi_xi[:, sorted_idx] # Sorted Spatial Structures Matrix
758
+ Psi_xi = Psi_xi[:, sorted_idx] # Sorted Temporal Structures Matrix
759
+ Sigma_xi = Sigma_xi[sorted_idx]
824
760
 
825
761
  if K_out:
826
- return Phi_xi, Psi_xi, Sigma_xi, K_zeta
762
+ return Phi_xi, Psi_xi, Sigma_xi, K_r
827
763
  else:
828
- return Phi_xi, Psi_xi, Sigma_xi
764
+ return Phi_xi, Psi_xi, Sigma_xi, None
765
+
766
+
767
+ # def kDMD(self,
768
+ # F_S=1.0,
769
+ # M_DIST=[1, 10],
770
+ # k_m=0.1, cent=True,
771
+ # n_Modes=10,
772
+ # n_modes_latent=None,
773
+ # alpha=1e-6,
774
+ # metric='rbf', K_out=False):
775
+ # """
776
+ # Perform kernel DMD (kDMD) for snapshot data as in VKI’s ML for Fluid Dynamics course.
777
+
778
+ # Parameters
779
+ # ----------
780
+ # M_DIST : array-like of shape (2,), optional
781
+ # Indices of two snapshots used to estimate the minimal kernel value.
782
+ # These should be the most “distant” snapshots in your dataset. Default is [1, 10].
783
+ # F_S: float, sampling frequency.
784
+ # k_m : float, optional
785
+ # Minimum value for the kernelized correlation. Default is 0.1.
786
+ # cent : bool, optional
787
+ # If True, center the kernel matrix before decomposition. Default is True.
788
+ # n_Modes : int, optional
789
+ # Number of principal modes to compute. Default is 10.
790
+ # alpha : float, optional
791
+ # Regularization parameter for the modified kernel matrix \(K_{\zeta}\). Default is 1e-6.
792
+ # metric : str, optional
793
+ # Kernel function identifier (passed to `sklearn.metrics.pairwise.pairwise_kernels`).
794
+ # Only 'rbf' has been tested; other metrics may require different parameters. Default is 'rbf'.
795
+ # K_out : bool, optional
796
+ # If True, also return the full kernel matrix \(K\). Default is False.
797
+ # SAVE_KPOD : bool, optional
798
+ # If True, save the computed kPOD results to disk. Default is False.
799
+
800
+ # Returns
801
+ # -------
802
+ # Psi_xi : ndarray of shape (n_samples, n_Modes)
803
+ # The kPOD principal component time coefficients.
804
+ # Sigma_xi : ndarray of shape (n_Modes,)
805
+ # The kPOD singular values (eigenvalues of the centered kernel).
806
+ # Phi_xi : ndarray of shape (n_samples, n_Modes)
807
+ # The mapped eigenvectors (principal modes) in feature space.
808
+ # K_zeta : ndarray of shape (n_samples, n_samples)
809
+ # The (regularized and centered) kernel matrix used for decomposition.
810
+ # Only returned if `K_out` is True.
811
+
812
+ # Notes
813
+ # -----
814
+ # - Follows the hands-on ML for Fluid Dynamics tutorial by VKI
815
+ # (https://www.vki.ac.be/index.php/events-ls/events/eventdetail/552).
816
+ # - Kernel computed as described in
817
+ # Horenko et al., *Machine learning for dynamics and model reduction*, arXiv:2208.07746.
818
+ # """
819
+ # # we need the snapshot matrix in memory for this decomposition
820
+
821
+ # if self.MEMORY_SAVING:
822
+ # if self.N_T % self.N_PARTITIONS != 0:
823
+ # tot_blocks_col = self.N_PARTITIONS + 1
824
+ # else:
825
+ # tot_blocks_col = self.N_PARTITIONS
826
+
827
+ # # Prepare the D matrix again
828
+ # D = np.zeros((self.N_S, self.N_T))
829
+ # R1 = 0
830
+
831
+ # # print(' \n Reloading D from tmp...')
832
+ # for k in tqdm(range(tot_blocks_col)):
833
+ # di = np.load(self.FOLDER_OUT + f"/data_partitions/di_{k + 1}.npz")['di']
834
+ # R2 = R1 + np.shape(di)[1]
835
+ # D[:, R1:R2] = di
836
+ # R1 = R2
837
+ # else:
838
+ # D = self.D
839
+
840
+ # n_s, n_t = D.shape
841
+ # # as done with the classic dmd, we assume X = D_1 = D(0:n_t - 1) and
842
+ # # Y = D_2 = D(1:n_t)
843
+
844
+ # X = D[:, :-1]
845
+ # Y = D[:, 1:]
846
+
847
+ # # we seek A = argmin_A ||Y - AX|| = YX^+ = Y(Psi_r Sigma_r^+ Phi^*)
848
+ # n_modes_latent = n_Modes if n_modes_latent is None else n_modes_latent
849
+
850
+ # # leverage MODULO kPOD routine to compress the system instead of standard POD
851
+ # # we are now in the kernel (feature) space, thus:
852
+ # i, j = M_DIST
853
+
854
+ # # gamma needs to be the same for the feature spaces otherwise
855
+ # # leads to inconsistent galerkin proj.!
856
+
857
+ # M_ij = np.linalg.norm(X[:, i] - X[:, j]) ** 2
858
+
859
+ # gamma = - np.log(k_m) / M_ij
860
+
861
+ # K_XX = pairwise_kernels(X.T, X.T, metric=metric, gamma=gamma)
862
+ # K_YX = pairwise_kernels(Y.T, X.T, metric=metric, gamma=gamma)
863
+
864
+ # # (optional) center feature‐space mean by centering K_XX only
865
+ # if cent:
866
+ # n = K_XX.shape[0]
867
+ # H = np.eye(n) - np.ones((n, n)) / n
868
+ # K_XX = H @ K_XX @ H
869
+
870
+ # # add ridge to K_XX
871
+ # K_XX += alpha * np.eye(K_XX.shape[0])
872
+
873
+ # # kernel‐POD on the regularized, centered K_XX
874
+ # Psi_xi, sigma_xi = Temporal_basis_POD(K=K_XX, n_Modes=n_modes_latent, eig_solver='eigh')
875
+ # Sigma_inv = np.diag(1.0 / sigma_xi)
876
+
877
+ # # Galerkin projection using the **unmodified** K_YX
878
+ # A_r = Sigma_inv @ Psi_xi.T @ K_YX @ Psi_xi @ Sigma_inv
879
+
880
+ # # eigendecomposition of A gives DMD modes
881
+ # dt = 1/F_S
882
+ # Lambda, Phi_Ar = LA.eig(A_r)
883
+ # freqs = np.imag(np.log(Lambda)) / (2 * np.pi * dt)
884
+
885
+ # # we can trace back the eigenvalues of the not-truncated A (Tu et al.)
886
+ # Phi_D = Y @ Psi_xi @ Sigma_inv @ Phi_Ar
887
+ # a0s = LA.pinv(Phi_D).dot(X[:, 0])
888
+
889
+ # return Phi_D, Lambda, freqs, a0s, None
890
+
891
+
892
+
893
+
894
+
895
+
896
+
897
+