modulo-vki 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,339 @@
1
+ import numpy as np
2
+ import os
3
+ from tqdm import tqdm
4
+ import math
5
+
6
+
7
+ class ReadData:
8
+ """
9
+ A MODULO helper class for input data. ReadData allows to load the data directly before using MODULO, and
10
+ hence assembling the data matrix D from data.
11
+ """
12
+
13
+
14
+ def __init__(self):
15
+ pass
16
+
17
+
18
+ @classmethod
19
+ def _data_processing(cls,D: np.array, FOLDER_OUT: str='./',
20
+ N_PARTITIONS: int = 1,
21
+ MR: bool = False, SAVE_D: bool = False,
22
+ FOLDER_IN: str = './', filename: str = '',
23
+ h: int = 0, f: int = 0, c: int = 0,
24
+ N: int = 0, N_S: int = 0, N_T: int = 0):
25
+ """
26
+ First, if the D matrix is not provided, this method attempts to load the data and assembles the D matrix.
27
+ Then, it performs pre-processing operations on the data matrix, D. if MR=True, the mean (per each column - i.e.: snapshot at time t_i) is removed;
28
+ If the MEMORY_SAVING=True the data matrix is splitted to optimize memory usage. Moreover, D is stored on disk and removed from the live memory.
29
+ Finally, if in this condition, also the data type of the matrix is self is changed: from float64 -> float32, with the same purpose.
30
+
31
+ :param D: np.array
32
+ data matrix D
33
+ :param FOLDER_OUT: str
34
+ folder in which the data (partitions and/or data matrix itself) will be eventually saved.
35
+ :param MEMORY_SAVING: bool, optional
36
+ If True, memory saving feature is activated. Passed through __init__
37
+ :param N_PARTITIONS: int
38
+ In memory saving environment, this parameter refers to the number of partitions to be applied
39
+ to the data matrix. If the number indicated by the user is not a multiple of the N_T
40
+ i.e.: if (N_T % N_PARTITIONS) !=0 - then an additional partition is introduced, that contains
41
+ the remaining columns
42
+ :param MR: bool, optional
43
+ If True, it removes the mean (per column) from each snapshot
44
+ :param SAVE_D: bool, optional
45
+ If True, the matrix D is saved into memory. If the Memory Saving feature is active, this is performed
46
+ by default.
47
+ :param FOLDER_IN: str, optional. Needed only if database=None
48
+ If the D matrix is not provided (database = None), read it from the path FOLDER_IN
49
+ :param filename: str, optional. Needed only if database=None
50
+ If the database is not provided, read it from the files filename
51
+ The files must be named "filenamexxxx.dat" where x is the number of the file
52
+ that goes from 0 to the number of time steps saved
53
+ :param h: int, optional. Needed only if database=None
54
+ Lines to be skipped from the header of filename
55
+ :param f: int, optional. Needed only if database=None
56
+ Lines to be skipped from the footer of filename
57
+ :param c: int, optional. Needed only if database=None
58
+ Columns to be skipped (for example if the first c columns contain the mesh grid.)
59
+ :param N: int, optional. Needed only if database=None
60
+ Components to be analysed.
61
+ :param N_S: int, optional. Needed only if database=None
62
+ Number of points in space.
63
+ :param N_T: int, optional. Needed only if database=None
64
+ components to be analysed.
65
+
66
+
67
+ :return:
68
+ There are four possible scenario:
69
+ 1. if N_Partitions ==1 and MR = True, return is D,D_MEAN (the mean snapshot!)
70
+ 2. if N_Partitions ==1 and MR = False, return is D.
71
+ 3. if N_Partitions >1 and MR = True, return is D_MEAN
72
+ 4. if N_Partitions >1 and MR=False, return is None
73
+
74
+
75
+ """
76
+
77
+ if isinstance(D, np.ndarray): # D was already initialised
78
+ N_S = int(np.shape(D)[0])
79
+ N_T = int(np.shape(D)[1])
80
+ if MR:
81
+ '''Removing mean from data matrix'''
82
+
83
+ print("Removing the mean from D ...")
84
+ D_MEAN = np.mean(D, 1) # Temporal average (along the columns)
85
+ D_Mr = D - np.array([D_MEAN, ] * N_T).transpose() # Mean Removed
86
+ print("Computing the mean-removed D ... ")
87
+ np.copyto(D, D_Mr)
88
+ del D_Mr
89
+
90
+ if N_PARTITIONS > 1:
91
+ '''Converting D into float32, applying partitions and saving all.'''
92
+ SAVE_D = True
93
+ database = D.astype('float32', casting='same_kind')
94
+ os.makedirs(FOLDER_OUT + "/data_partitions/", exist_ok=True)
95
+ print("Memory Saving feature is active. Partitioning Data Matrix...")
96
+ if N_T % N_PARTITIONS != 0:
97
+ dim_col = math.floor(N_T / N_PARTITIONS)
98
+
99
+ columns_to_part = dim_col * N_PARTITIONS
100
+ splitted_tmp = np.hsplit(database[:, :columns_to_part], N_PARTITIONS)
101
+ for ii in range(1, len(splitted_tmp) + 1):
102
+ np.savez(FOLDER_OUT + f"/data_partitions/di_{ii}", di=splitted_tmp[ii - 1])
103
+
104
+ np.savez(FOLDER_OUT + f"/data_partitions/di_{N_PARTITIONS + 1}",
105
+ di=database[:, columns_to_part:])
106
+ else:
107
+ splitted_tmp = np.hsplit(database, N_PARTITIONS)
108
+ for ii in range(1, len(splitted_tmp) + 1):
109
+ np.savez(FOLDER_OUT + f"/data_partitions/di_{ii}", di=splitted_tmp[ii - 1])
110
+
111
+ print("\n Data Matrix has been successfully splitted. \n")
112
+
113
+ if SAVE_D:
114
+ '''Saving data matrix in FOLDER_OUT'''
115
+ os.makedirs(FOLDER_OUT + "/data_matrix", exist_ok=True)
116
+ print(f"Saving the matrix D in {FOLDER_OUT}")
117
+ np.savez(FOLDER_OUT + '/data_matrix/database', D=database, n_t=N_T, n_s=N_S)
118
+ else: # try to read the data
119
+ print("Data matrix was not provided, reading it from {}".format(FOLDER_IN))
120
+ # First check if the data were saved in the supported format
121
+ try:
122
+ Name = FOLDER_IN + os.sep + filename % (0 + 1) + '.dat' # Name of the file to read
123
+ # Read data from a file
124
+ DATA = np.genfromtxt(Name, skip_header=h, skip_footer=f) # Here we have the two colums
125
+ except:
126
+ raise AttributeError(
127
+ "FOLDER_IN {} does not exist or filename {} has not the good format. Check the help!".format(
128
+ FOLDER_IN, filename))
129
+
130
+ if N_PARTITIONS == 1: # If you have only one partition (one matrix! )
131
+ D = np.zeros((N_S, N_T))
132
+
133
+ print("\n \n Importing data with no partitions... \n \n")
134
+
135
+ if MR:
136
+ print("Mean removal activated")
137
+ D_MEAN = np.zeros(N_S)
138
+
139
+ for k in tqdm(range(0, N_T)):
140
+ Name = FOLDER_IN + os.sep + filename % (k + 1) + '.dat' # Name of the file to read
141
+ # Read data from a file
142
+ DATA = np.genfromtxt(Name, # usecols=np.arange(0, 2),
143
+ skip_header=h, skip_footer=f) # Here we have the two colums
144
+ # Dat = DATA[1:, :] # Here we remove the first raw, containing the header
145
+ for ii in range(c, N + c):
146
+ tmp = DATA[:, ii]
147
+ if ii == c:
148
+ V = np.copy(tmp)
149
+ else:
150
+ V = np.concatenate([V, tmp], axis=0)
151
+ if MR:
152
+ D_MEAN += 1 / N_T * V # Snapshot contribution to the mean
153
+
154
+ D[:, k] = V # Reshape and assign
155
+
156
+ if MR:
157
+ print("Removing the mean from D ...")
158
+ D_Mr = D - D_MEAN.reshape(-1, 1) # Mean Removed
159
+ print("Computing the mean-removed D ... ")
160
+ np.copyto(D, D_Mr)
161
+ del D_Mr
162
+
163
+ elif N_PARTITIONS > 1: # then we enter in the memory saving loop
164
+ # prepare the folder to store the parittions
165
+ os.makedirs(FOLDER_OUT + "/data_partitions/", exist_ok=True)
166
+ print("Memory Saving feature is active. Partitioning Data Matrix...")
167
+
168
+ dim_col = math.floor(N_T / N_PARTITIONS)
169
+ columns_to_part = dim_col * N_PARTITIONS # These are integer multiples of N_PARTITIONS
170
+ vec = np.arange(0, columns_to_part)
171
+ # This gets the blocks
172
+ splitted_tmp = np.hsplit(vec, N_PARTITIONS)
173
+ if columns_to_part != N_T:
174
+ print("WARNING: the last " + str(
175
+ N_T - 1 - splitted_tmp[N_PARTITIONS - 1][-1]) + ' snapshots are not considered')
176
+
177
+ if MR:
178
+ print("Mean removal activated")
179
+ D_MEAN = np.zeros(N_S)
180
+
181
+ for ii in range(1, len(splitted_tmp) + 1):
182
+ count = 0
183
+ print('Working on block ' + str(ii) + '/' + str(N_PARTITIONS))
184
+ D = np.zeros((N_S, len(splitted_tmp[0])))
185
+ i1 = splitted_tmp[ii - 1][0];
186
+ i2 = splitted_tmp[ii - 1][-1] # ranges
187
+ for k in tqdm(range(i1, i2 + 1)):
188
+ Name = FOLDER_IN + os.sep + filename % (k + 1) + '.dat' # Name of the file to read
189
+ DATA = np.genfromtxt(Name, # usecols=np.arange(0, 2),
190
+ skip_header=h, skip_footer=f) # Here we have the two colums
191
+ for nn in range(c, N + c):
192
+ tmp = DATA[:, nn]
193
+ if nn == c:
194
+ V = np.copy(tmp)
195
+ else:
196
+ V = np.concatenate([V, tmp], axis=0)
197
+
198
+ if MR:
199
+ D_MEAN += 1 / N_T * V # Snapshot contribution to the mean
200
+
201
+ D[:, count] = V # Reshape and assign
202
+ count += 1
203
+ np.savez(FOLDER_OUT + f"/data_partitions/di_{ii}", di=D)
204
+ print('Partition ' + str(ii) + '/' + str(N_PARTITIONS) + ' saved')
205
+
206
+ if MR:
207
+ print('Reloading the data for removing the mean')
208
+ for ii in range(1, len(splitted_tmp) + 1):
209
+ print(f"Mean centering block {ii}")
210
+ di = np.load(FOLDER_OUT + f"/data_partitions/di_{ii}.npz")['di']
211
+ di_mr = di - D_MEAN.reshape(-1, 1) # Mean Removed
212
+ np.savez(FOLDER_OUT + f"/data_partitions/di_{ii}", di=di_mr)
213
+ else:
214
+ raise TypeError("number of partitions not valid.")
215
+
216
+ if (N_PARTITIONS ==1 and MR==True):
217
+ return D, D_MEAN
218
+ elif (N_PARTITIONS ==1 and MR==False):
219
+ return D
220
+ elif (N_PARTITIONS >1 and MR==True):
221
+ return D_MEAN
222
+ else:
223
+ return None
224
+
225
+ '''
226
+ @classmethod
227
+ def from_xls(cls, filename, **kwargs):
228
+ """
229
+ This class method builds the df from an excel file.
230
+
231
+ work
232
+
233
+ """
234
+ ## TBD
235
+ return
236
+
237
+ @classmethod
238
+ def _from_csv(cls, folder, filename, N, N_S,
239
+ h: int = 0, f: int = 0,
240
+ c: int = 0):
241
+ """
242
+ This method imports data (in the specified format) and then assemblies the corresponding
243
+ data matrix, D.
244
+
245
+ :param folder: str
246
+ Folder in which the data is stored
247
+ :param filename: str
248
+ Name of the files to be imported
249
+ :param N number of components: int
250
+ Components to be analysed
251
+ :param h: int
252
+ Lines to be skipped from header
253
+ :param f: int
254
+ Lines to be skipped from footer
255
+ :param c: int
256
+ Columns to be skipped
257
+
258
+ :return: np.array
259
+ Assembled DataMarix
260
+
261
+ """
262
+ path, dirs, files = next(os.walk(folder))
263
+ files = [f for f in files if f.endswith('.csv')]
264
+ N_T = len(files)
265
+ D = np.zeros((N_S, N_T))
266
+
267
+ print("\n \n Importing data... \n \n")
268
+
269
+ for k in tqdm(range(0, N_T)):
270
+ Name = folder + files[k] #os.sep + filename % (k + 1) + '.csv' # Name of the file to read
271
+ # Read data from a file
272
+ DATA = np.genfromtxt(Name, # usecols=np.arange(0, 2),
273
+ skip_header=h, skip_footer=f) # Here we have the two colums
274
+ # Dat = DATA[1:, :] # Here we remove the first raw, containing the header
275
+ for ii in range(c, N + c):
276
+ tmp = DATA[:, ii]
277
+ if ii == c:
278
+ V = np.copy(tmp)
279
+ else:
280
+ V = np.concatenate([V, tmp], axis=0)
281
+
282
+ D[:, k] = V # Reshape and assign
283
+
284
+ return D
285
+
286
+ @classmethod
287
+ def _from_txt(cls, folder, filename, N, N_S,
288
+ h: int = 0, f: int = 0,
289
+ c: int = 0):
290
+ """
291
+ This method imports data (in the specified format) and then assemblies the corresponding
292
+ data matrix, D.
293
+
294
+ :param folder: str
295
+ Folder in which the data is stored
296
+ :param filename: str
297
+ Name of the files to be imported
298
+ :param N number of components: int
299
+ Components to be analysed
300
+ :param h: int
301
+ Lines to be skipped from header
302
+ :param f: int
303
+ Lines to be skipped from footer
304
+ :param c: int
305
+ Columns to be skipped
306
+
307
+ :return: np.array
308
+ Assembled DataMarix
309
+
310
+ """
311
+ path, dirs, files = next(os.walk(folder))
312
+ N_T = len(files)
313
+ D = np.zeros((N_S, N_T))
314
+
315
+ print("\n \n Importing data... \n \n")
316
+
317
+ for k in tqdm(range(0, N_T)):
318
+ Name = folder + os.sep + filename % (k + 1) + '.txt' # Name of the file to read
319
+ # Read data from a file
320
+ DATA = np.genfromtxt(Name, # usecols=np.arange(0, 2),
321
+ skip_header=h, skip_footer=f) # Here we have the two colums
322
+ # Dat = DATA[1:, :] # Here we remove the first raw, containing the header
323
+ for ii in range(c, N + c):
324
+ tmp = DATA[:, ii]
325
+ if ii == c:
326
+ V = np.copy(tmp)
327
+ else:
328
+ V = np.concatenate([V, tmp], axis=0)
329
+
330
+ D[:, k] = V # Reshape and assign
331
+
332
+ return D
333
+
334
+
335
+ '''
336
+
337
+
338
+ #%%
339
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2022 lorenzoschena
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,96 @@
1
+ Metadata-Version: 2.1
2
+ Name: modulo_vki
3
+ Version: 2.0.5
4
+ Summary: MODULO (MODal mULtiscale pOd) is a software developed at the von Karman Institute to perform Multiscale Modal Analysis of numerical and experimental data.
5
+ Home-page: https://github.com/mendezVKI/MODULO/tree/master/modulo_python_package/
6
+ Author: ['R. Poletti', 'L. Schena', 'D. Ninni', 'M. A. Mendez']
7
+ Author-email: mendez@vki.ac.be
8
+ License: BSD (3-clause)
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Natural Language :: English
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Requires-Dist: tqdm
16
+ Requires-Dist: numpy
17
+ Requires-Dist: scipy
18
+ Requires-Dist: scikit-learn
19
+ Requires-Dist: ipykernel
20
+ Requires-Dist: ipython
21
+ Requires-Dist: ipython-genutils
22
+ Requires-Dist: ipywidgets
23
+ Requires-Dist: matplotlib
24
+
25
+
26
+
27
+ MODULO - latest update 2.0
28
+ ===================
29
+
30
+ This repository contains version 2.0 of MODULO (MODal mULtiscale pOd), a software developed at the von Karman Institute to perform data-driven modal decompositions and, in particular, the Multiscale Proper Orthogonal Decomposition (mPOD).
31
+
32
+ The old version based on MATLAB implementation and related GUI is no longer maintained but will remain available on the branch "Old_Matlab_Implementation". We also keep the first Python implementation in the branch "Old_Python_Implementation". See the Readme file in these branches for more information.
33
+
34
+ #### Documentation
35
+
36
+ The full documentation is available at https://modulo.readthedocs.io/en/latest/intro.html.
37
+ This documentation is stored alongside the source code and linked to a specific version of MODULO.
38
+
39
+ ## What is MODULO, and what are data-driven decompositions?
40
+
41
+ MODULO allows to compute data-driven decompositions of experimental and numerical data. To have a concise overview of the context, we refer to:
42
+
43
+ - Ninni, D., Mendez, M. A. (2020), "MODULO: A Software for Multiscale Proper Orthogonal Decomposition of data", Software X, Vol 12, 100622, https://doi.org/10.1016/j.softx.2020.100622.
44
+
45
+ - Poletti, R., Schena, L., Ninni, D., Mendez, M.A (2024) "MODULO: a python toolbox for data-driven modal decomposition", Submitted to Journal of Open Source Software. Preprint available [here](https://www.researchgate.net/publication/376885484_MODULO_a_python_toolbox_for_data-driven_modal_decomposition)
46
+
47
+ The first article also presents the first version of MODULO (available in the OLD_Matlab_Implementation branch) and its GUI developed by D. Ninni. The second introduces MODULO v2 in this branch and alternative open source projects. While many projects allows for computing common decompositions such as POD, DMD and the SPODs, MODULO is currently the only opensource project allowing to compute the mPOD.
48
+
49
+ For a more comprehensive overview on the theory of data-driven decompositions, we refer to the chapter:
50
+
51
+ - Mendez, M. A. (2023) "Generalized and Multiscale Modal Analysis". In : Mendez M.A., Ianiro, A., Noack, B.R., Brunton, S. L. (Eds), "Data-Driven Fluid Mechanics: Combining First Principles and Machine Learning". Cambridge University Press, 2023:153-181. https://doi.org/10.1017/9781108896214.013. The pre-print is available at https://arxiv.org/abs/2208.12630.
52
+
53
+ and the article that first presented the complete treatment of the mPOD :
54
+
55
+ - Mendez, M. A., Balabane, M., Buchlin, J.-M. (2019) "Multi-Scale Proper Orthogonal Decomposition of Complex Fluid Flows" Journal of Fluid Mechanics 870:988-1036, https://doi.org/10.1017/9781108896214.013. The pre-print is available at https://arxiv.org/abs/2208.12630.
56
+
57
+ Ongoing works on nonlinear methods are discussed here:
58
+
59
+ - Mendez, M. A. (2023) "Linear and Nonlinear Dimensionality Reduction from Fluid Mechanics to Machine Learning", Meas. Sci. Technol. 34(042001), https://doi.org/10.1088/1361-6501/acaffe. The pre-print is available at https://arxiv.org/abs/2208.07746.
60
+
61
+ ## What is new in this V 2.0?
62
+
63
+ This version expands considerably the version v1 in "Old_Python_Implementation", for which a first tutorial was provided by L. Schena in https://www.youtube.com/watch?v=y2uSvdxAwHk.
64
+ The major updates are the following :
65
+
66
+ 1. Faster EIG/SVD algorithms, using powerful randomized svd solvers from scikit_learn (see [this](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html) and [this](https://scikit-learn.org/stable/modules/generated/sklearn.utils.extmath.randomized_svd.html) ). It is now possible to select various options as "eig_solver" and "svd_solver", offering different trade-offs in terms of accuracy vs computational time.
67
+
68
+ 2. In addition to the traditional POD computation using the K matrix (Sirovinch's method), it is now possible to compute the POD directly via SVD using any of the four "svd_solver" options.
69
+ This is generally faster but requires more memory.
70
+
71
+ 3. Faster subscale estimators for the mPOD: the previous version used the rank of the correlation matrix in each scale to define the number of modes to be computed in each portion of the splitting vector before assembling the full basis. This is computationally very demanding. This estimation has been replaced by a frequency-based threshold (i.e. based on the frequency bins within each portion) since one can show that the frequency-based estimator is always more "conservative" than the rank-based estimator.
72
+
73
+ 4. Major improvement on the memory saving option: the previous version of modulo always required in input the matrix D. Then, if the memory saving option was active, the matrix was partitioned and stored locally to free the RAM before computing the correlation matrix (see [this tutorial by D. Ninni](https://www.youtube.com/watch?v=LclxO1WTuao)). In the new version, it is possible to initialize a modulo object *without* the matrix D (see exercise 5 in the examples). Instead, one can create the partitions without loading the matrix D.
74
+
75
+ 5. Implementation of Dynamic Mode Decomposition (DMD) from [Schmid, P.J 2010](https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/dynamic-mode-decomposition-of-numerical-and-experimental-data/AA4C763B525515AD4521A6CC5E10DBD4).
76
+
77
+ 6. Implementation of the two Spectral POD formulations, namely the one from [Sieber et al 2016](https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/abs/spectral-proper-orthogonal-decomposition/DCD8A6EDEFD56F5A9715DBAD38BD461A), and the one from [Towne et al 2018](https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/abs/spectral-proper-orthogonal-decomposition-and-its-relationship-to-dynamic-mode-decomposition-and-resolvent-analysis/EC2A6DF76490A0B9EB208CC2CA037717)
78
+
79
+ 7. Implementation of a kernel version of the POD, in which the correlation matrix is replaced by a kernel matrix. This is described in Lecture 15 of the course [Hands on Machine Learning for Fluid dynamics 2023](https://www.vki.ac.be/index.php/events-ls/events/eventdetail/552/-/online-on-site-hands-on-machine-learning-for-fluid-dynamics-2023). See also [this](https://arxiv.org/abs/2208.07746).
80
+
81
+ 8. Implementation of a formulation for non-uniform meshes, using a weighted matrix for all the relevant inner products. This is currently available only for POD and mPOD but allows for handling data produced from CFD simulation without resampling on a uniform grid (see exercise 4). It can be used both with and without the memory-saving option.
82
+
83
+ ## New Tutorials
84
+
85
+ The installation provides five exercises to explore MODULO's features while familiarizing with data-driven decompositions. These are available in the /exercise/ folder in plain Python format and jupyter notebooks.
86
+
87
+ - Exercise 1. In this exercise, we consider the flow past a cylinder. The dataset was created via Large Eddy Simulations (LES) by Denis Dumoulin during his STP at VKI in 2016 (Report available on request). For convenience, the data was first mapped to a Cartesian grid. This test case is by far the most popular because it's well-known to have a simple low-order representation with modes that have nearly harmonic temporal structures. We compute the POD and the DMD and compare the results... the difference between DMD and POD modes is hardly distinguishable!
88
+
89
+ - Exercise 2. We consider the flow of an impinging gas jet, taken from [this](https://arxiv.org/abs/1804.09646) paper. This dataset was collected via Time-Resolved Particle Image Velocimetry (TR-PIV). Only the first 200 POD modes were stored. This dataset has much richer dynamics than the previous one and cannot be easily approximated using a few modes. We use it to explore the differences between the DFT, the SPODs and the mPOD. These have different purposes and look for different features.
90
+
91
+ - Exercise 3. We take back the cylinder test case to explore the differences between the POD and the generalized Karhunen–Loève (KL) expansion in which a kernel matrix replaces the correlation matrix. The POD is a particular case of KL where the kernel function generating the kernel matrix is the plain inner product. Here, we also consider a Gaussian kernel. Different kernel functions define similarity in different ways and thus produce widely different modes. Different modal structures tell different stories about the dataset, but... what can you say about efficiency in data compression?
92
+
93
+ - Exercise 4. We consider the flow past a cylinder again, but this time in transient conditions and on an experimental test case taken from [this](https://arxiv.org/abs/2001.01971) paper. In this exercise, you can reproduce the same results from the article to see how the mPOD allows to achieve both time and frequency localization without compromising much of the convergence of the POD. The dataset is quite large, so you might have difficulties handling it if you have less than 32 GB of RAM. But fear not: the memory saving feature allows to compute POD and mPOD without loading the data into memory!
94
+
95
+ - Exercise 5. We consider the flow of an impinging gas jet again, but this time on a numerical test case. This dataset was produced by Yannic Lowenstein during his STP at VKI at the end of 2023, with the help of Dr. Maria Faruoli. The Reynolds number is two orders of magnitude higher than in exercise 2, yet the flow features you will observe are pretty similar, at least qualitatively. From a learning perspective, the key feature of this test case is that the data is not available on a uniform grid. But fear not: with the new features, it is possible to compute the decompositions using appropriate weights!
96
+
@@ -0,0 +1,22 @@
1
+ modulo_vki/__init__.py,sha256=wKu93pAmJcEzH5iKpXZ6rguM43RdI9C-4W4r8frvpc0,594
2
+ modulo_vki/modulo.py,sha256=j6cdcDcHR8omT9i-FSKzRSwh_dT4PbKM1RN35NnVmU0,37654
3
+ modulo_vki/core/__init__.py,sha256=y9mIqtmSg3o8TXMqFxoiMCoMSle6mK0LBrvNJCVM1Zg,226
4
+ modulo_vki/core/_dft.py,sha256=Sq-vH8DlypAjHEGfWBAzBZp7DqKMCQ9G82QwdbYo-vs,2285
5
+ modulo_vki/core/_dmd_s.py,sha256=lD7COE1Zinxyjd4IZl0MxT4XtqKM9JtXLL4U26MbtFc,2678
6
+ modulo_vki/core/_k_matrix.py,sha256=SxS5esKl8ifMtTYWWeX437UWADKB3fW4ozKGryxVHpM,3687
7
+ modulo_vki/core/_mpod_space.py,sha256=0Om-kMQV5n5oI67Ef5ZuXtvBDaRePMVBQYfhJ-_hK0E,7327
8
+ modulo_vki/core/_mpod_time.py,sha256=ItlFTEl-uhj76aIpHpq8U2-vokTnPiE3PL60RKHUYlM,8498
9
+ modulo_vki/core/_pod_space.py,sha256=X4VibnUad9QVRTKEAmk3dcyd97SqaB5MJE2ibPGXEjw,6933
10
+ modulo_vki/core/_pod_time.py,sha256=xWEOX6pO7Cpx1Vm7vF7X4YSORMNuAOpJyfpJiG-foTI,2144
11
+ modulo_vki/core/_spod_s.py,sha256=rxXSsNdSTLv3sjvlUErw3xjHk0eF8H7IkSJr5nMQ8Vo,4149
12
+ modulo_vki/core/_spod_t.py,sha256=csftCPRSqs-OghQa8l0mRDL7cy5eXXSCnW8O4pnXTCY,3866
13
+ modulo_vki/utils/__init__.py,sha256=F5yy5R19dONK9oaBEpKzylorVJNcjT2kiJ5Og-ZX1ek,94
14
+ modulo_vki/utils/_plots.py,sha256=m43t08cVq-TY0BW0YPqT71hN-54hBphIYKZEn8Kw16E,1453
15
+ modulo_vki/utils/_utils.py,sha256=QE-Ksu49AYJBk_Mb4eRAUNx4vkd7ifkoRVU3ePbrTP8,13838
16
+ modulo_vki/utils/others.py,sha256=KnAgXw_VLnyRHTb4P463ZQmIpCVjqTSN3KIpPL-vTgA,16962
17
+ modulo_vki/utils/read_db.py,sha256=EBVOVEVq4qCeh6SLufGRQn_ch68tlhlPhMQ85aPGrOI,15117
18
+ modulo_vki-2.0.5.dist-info/LICENSE,sha256=5TivriXFErrYrJgBq3M72kHNHqtSiCft3xESM1zHc0k,1091
19
+ modulo_vki-2.0.5.dist-info/METADATA,sha256=FDLQYRjcun3g-uHF61DHN008DG7lk_WYJ-k333Nk5SQ,10623
20
+ modulo_vki-2.0.5.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
21
+ modulo_vki-2.0.5.dist-info/top_level.txt,sha256=4PA4AmafKU6M7us7gvt_Q976Khx3qjNUEThRRM5zxeA,11
22
+ modulo_vki-2.0.5.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ modulo_vki