PODImodels 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PODImodels/PODImodels.py +1033 -0
- PODImodels/PODdata.py +522 -0
- PODImodels/__init__.py +61 -0
- PODImodels/podImodelabstract.py +840 -0
- podimodels-0.0.3.dist-info/METADATA +211 -0
- podimodels-0.0.3.dist-info/RECORD +9 -0
- podimodels-0.0.3.dist-info/WHEEL +5 -0
- podimodels-0.0.3.dist-info/licenses/LICENSE +21 -0
- podimodels-0.0.3.dist-info/top_level.txt +1 -0
PODImodels/PODdata.py
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
1
|
+
"""
|
|
2
|
+
POD Data Processing and Utilities
|
|
3
|
+
==================================
|
|
4
|
+
|
|
5
|
+
This module provides classes and functions for handling Proper Orthogonal
|
|
6
|
+
Decomposition (POD) of datasets, particularly for computational fluid dynamics
|
|
7
|
+
field data. It includes utilities for VTK file writing and POD-based
|
|
8
|
+
dimensionality reduction.
|
|
9
|
+
|
|
10
|
+
Classes
|
|
11
|
+
-------
|
|
12
|
+
PODDataSet
|
|
13
|
+
Class for performing POD on a single dataset.
|
|
14
|
+
subdomainDataSet
|
|
15
|
+
Class for performing POD on both cell and patch data with subdomain analysis.
|
|
16
|
+
|
|
17
|
+
Functions
|
|
18
|
+
---------
|
|
19
|
+
vtk_writer
|
|
20
|
+
Write field and point data to VTK MultiBlock datasets.
|
|
21
|
+
truncationErrorCal
|
|
22
|
+
Calculate truncation error from singular values.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
import numpy as np
|
|
26
|
+
from scipy.linalg import svd
|
|
27
|
+
import pyvista as pv
|
|
28
|
+
from typing import List, Optional
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# def vtk_writer(
|
|
32
|
+
# field_data,
|
|
33
|
+
# field_name,
|
|
34
|
+
# data_type,
|
|
35
|
+
# refVTMName,
|
|
36
|
+
# save_path_name,
|
|
37
|
+
# points_data=None,
|
|
38
|
+
# is2D=False,
|
|
39
|
+
# ):
|
|
40
|
+
# refVTM = pv.MultiBlock(refVTMName)
|
|
41
|
+
# for block_i in range(refVTM.n_blocks):
|
|
42
|
+
# block = refVTM[block_i]
|
|
43
|
+
# if block is not None:
|
|
44
|
+
# if data_type == "scalar":
|
|
45
|
+
# for data_i in range(len(field_name)):
|
|
46
|
+
# block.cell_data[field_name[data_i]] = field_data[data_i]
|
|
47
|
+
# elif data_type == "vector":
|
|
48
|
+
# if is2D:
|
|
49
|
+
# for data_i in range(len(field_name)):
|
|
50
|
+
# block.cell_data[field_name[data_i]] = np.hstack(
|
|
51
|
+
# (
|
|
52
|
+
# field_data[data_i].reshape(2, -1).T,
|
|
53
|
+
# np.zeros((block.n_cells, 1)),
|
|
54
|
+
# )
|
|
55
|
+
# )
|
|
56
|
+
# else:
|
|
57
|
+
# for data_i in range(len(field_name)):
|
|
58
|
+
# block.cell_data[field_name[data_i]] = (
|
|
59
|
+
# field_data[data_i].reshape(3, -1).T
|
|
60
|
+
# )
|
|
61
|
+
# if points_data is not None:
|
|
62
|
+
# points = points_data.reshape(3, -1).T
|
|
63
|
+
# block.points = points
|
|
64
|
+
|
|
65
|
+
# # Save the modified VTM file
|
|
66
|
+
# output_vtm_file_path = f"{save_path_name}.vtm"
|
|
67
|
+
# refVTM.save(output_vtm_file_path)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def vtk_writer(
|
|
71
|
+
field_data: List[np.ndarray],
|
|
72
|
+
field_name: List[str],
|
|
73
|
+
data_type: str,
|
|
74
|
+
refVTMName: str,
|
|
75
|
+
save_path_name: str,
|
|
76
|
+
points_data: np.ndarray = None,
|
|
77
|
+
is2D: bool = False,
|
|
78
|
+
):
|
|
79
|
+
"""
|
|
80
|
+
Write field and point data to a VTK MultiBlock dataset (.vtm).
|
|
81
|
+
|
|
82
|
+
This function is optimized for large meshes by leveraging PyVista's efficient
|
|
83
|
+
data handling and saving mechanisms. It supports both scalar and vector field
|
|
84
|
+
data and can handle 2D vector data by appending zero Z-components.
|
|
85
|
+
|
|
86
|
+
Parameters
|
|
87
|
+
----------
|
|
88
|
+
field_data : list of np.ndarray
|
|
89
|
+
A list of NumPy arrays containing field data, one for each field.
|
|
90
|
+
For scalar data: each array should be of shape (n_cells,).
|
|
91
|
+
For vector data: each array should be of shape (n_cells, n_components).
|
|
92
|
+
field_name : list of str
|
|
93
|
+
A list of names for the fields corresponding to field_data.
|
|
94
|
+
data_type : {'scalar', 'vector'}
|
|
95
|
+
The type of field data being written.
|
|
96
|
+
refVTMName : str
|
|
97
|
+
The path to the reference .vtm file that provides the mesh structure.
|
|
98
|
+
save_path_name : str
|
|
99
|
+
The base path and name for the output .vtm file (without extension).
|
|
100
|
+
points_data : np.ndarray, optional
|
|
101
|
+
A NumPy array of shape (n_points, 3) containing new point coordinates.
|
|
102
|
+
If None, original mesh points are preserved. Default is None.
|
|
103
|
+
is2D : bool, optional
|
|
104
|
+
If True, treats vector data as 2D and appends a zero Z-component to
|
|
105
|
+
make it 3D for VTK compatibility. Default is False.
|
|
106
|
+
|
|
107
|
+
Notes
|
|
108
|
+
-----
|
|
109
|
+
The function automatically saves each block as a separate .vtu file
|
|
110
|
+
(which is efficient for large data) and updates the .vtm file accordingly.
|
|
111
|
+
Binary format and compression are used to optimize file size and I/O performance.
|
|
112
|
+
|
|
113
|
+
Examples
|
|
114
|
+
--------
|
|
115
|
+
>>> # Write scalar field data
|
|
116
|
+
>>> field_data = [pressure_field, temperature_field]
|
|
117
|
+
>>> field_names = ['pressure', 'temperature']
|
|
118
|
+
>>> vtk_writer(field_data, field_names, 'scalar', 'mesh.vtm', 'output')
|
|
119
|
+
|
|
120
|
+
>>> # Write 2D vector field data
|
|
121
|
+
>>> field_data = [velocity_2d]
|
|
122
|
+
>>> field_names = ['velocity']
|
|
123
|
+
>>> vtk_writer(field_data, field_names, 'vector', 'mesh.vtm', 'output', is2D=True)
|
|
124
|
+
"""
|
|
125
|
+
refVTM = pv.MultiBlock(refVTMName)
|
|
126
|
+
|
|
127
|
+
# Use a counter to track the data index
|
|
128
|
+
field_data_idx = 0
|
|
129
|
+
|
|
130
|
+
for block_i in range(refVTM.n_blocks):
|
|
131
|
+
block = refVTM[block_i]
|
|
132
|
+
if block is None:
|
|
133
|
+
continue
|
|
134
|
+
|
|
135
|
+
# Check if we have enough data for this block's fields
|
|
136
|
+
num_fields_per_block = len(field_name)
|
|
137
|
+
|
|
138
|
+
if data_type == "scalar":
|
|
139
|
+
for i in range(num_fields_per_block):
|
|
140
|
+
block.cell_data[field_name[i]] = field_data[field_data_idx]
|
|
141
|
+
field_data_idx += 1
|
|
142
|
+
elif data_type == "vector":
|
|
143
|
+
if is2D:
|
|
144
|
+
for i in range(num_fields_per_block):
|
|
145
|
+
vec_2d = field_data[field_data_idx]
|
|
146
|
+
vec_3d = np.zeros((block.n_cells, 3), dtype=vec_2d.dtype)
|
|
147
|
+
vec_3d[:, :2] = vec_2d
|
|
148
|
+
block.cell_data[field_name[i]] = vec_3d
|
|
149
|
+
field_data_idx += 1
|
|
150
|
+
else:
|
|
151
|
+
for i in range(num_fields_per_block):
|
|
152
|
+
block.cell_data[field_name[i]] = field_data[field_data_idx]
|
|
153
|
+
field_data_idx += 1
|
|
154
|
+
|
|
155
|
+
if points_data is not None:
|
|
156
|
+
# Assuming points_data is already (n_points, 3) and correctly segmented per block.
|
|
157
|
+
# This is a major assumption and would need to be handled by the caller.
|
|
158
|
+
# A more robust solution would be to pass a list of per-block point arrays.
|
|
159
|
+
# For simplicity, we'll assume a single large array and slice it.
|
|
160
|
+
# This is still not ideal for memory.
|
|
161
|
+
points_start_idx = block_i * block.n_points
|
|
162
|
+
points_end_idx = points_start_idx + block.n_points
|
|
163
|
+
block.points = points_data[points_start_idx:points_end_idx]
|
|
164
|
+
|
|
165
|
+
# Save the modified VTM file. PyVista automatically saves each block as a
|
|
166
|
+
# separate .vtu file (which is efficient for large data) and updates the .vtm.
|
|
167
|
+
# Using compression and binary format is crucial for large files.
|
|
168
|
+
output_vtm_file_path = f"{save_path_name}.vtm"
|
|
169
|
+
refVTM.save(output_vtm_file_path, binary=True, compression_level=9)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def truncationErrorCal(singulars):
|
|
173
|
+
"""
|
|
174
|
+
Calculate the truncation error from singular values.
|
|
175
|
+
|
|
176
|
+
The truncation error indicates the relative energy content that would be
|
|
177
|
+
lost if the POD representation were truncated at each rank level.
|
|
178
|
+
|
|
179
|
+
Parameters
|
|
180
|
+
----------
|
|
181
|
+
singulars : np.ndarray
|
|
182
|
+
Array of singular values from SVD decomposition, typically in
|
|
183
|
+
descending order.
|
|
184
|
+
|
|
185
|
+
Returns
|
|
186
|
+
-------
|
|
187
|
+
np.ndarray
|
|
188
|
+
Array of truncation errors for each rank, representing the fraction
|
|
189
|
+
of total energy lost if truncation occurs at that level.
|
|
190
|
+
|
|
191
|
+
Notes
|
|
192
|
+
-----
|
|
193
|
+
The truncation error is calculated as:
|
|
194
|
+
error[i] = 1 - sqrt(sum(σ²[0:i+1])) / ||σ||₂
|
|
195
|
+
|
|
196
|
+
where σ are the singular values and ||σ||₂ is the Frobenius norm.
|
|
197
|
+
|
|
198
|
+
Examples
|
|
199
|
+
--------
|
|
200
|
+
>>> singulars = np.array([10.0, 5.0, 2.0, 1.0, 0.5])
|
|
201
|
+
>>> errors = truncationErrorCal(singulars)
|
|
202
|
+
>>> print(f"Error for rank 3: {errors[2]:.4f}")
|
|
203
|
+
"""
|
|
204
|
+
return 1 - np.sqrt(np.cumsum(np.power(singulars, 2))) / np.linalg.norm(singulars)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
class PODDataSet:
|
|
208
|
+
"""
|
|
209
|
+
A class for performing Proper Orthogonal Decomposition (POD) on datasets.
|
|
210
|
+
|
|
211
|
+
This class handles the decomposition of high-dimensional field data into
|
|
212
|
+
a reduced set of orthogonal modes and corresponding coefficients. It supports
|
|
213
|
+
both truncated and full decompositions and provides utilities for error
|
|
214
|
+
analysis and data export.
|
|
215
|
+
|
|
216
|
+
Parameters
|
|
217
|
+
----------
|
|
218
|
+
data : np.ndarray
|
|
219
|
+
Input data matrix of shape (n_samples, n_features) where each row
|
|
220
|
+
represents a snapshot of the field data.
|
|
221
|
+
rank : int, optional
|
|
222
|
+
Number of POD modes to retain. Default is 10.
|
|
223
|
+
fullData : bool, optional
|
|
224
|
+
If True, compute and store all POD modes for error analysis.
|
|
225
|
+
If False, only compute the first 'rank' modes. Default is True.
|
|
226
|
+
|
|
227
|
+
Attributes
|
|
228
|
+
----------
|
|
229
|
+
data : np.ndarray
|
|
230
|
+
The input data matrix.
|
|
231
|
+
rank : int
|
|
232
|
+
Number of retained POD modes.
|
|
233
|
+
fullData : bool
|
|
234
|
+
Flag indicating whether full decomposition is computed.
|
|
235
|
+
cell_modes : np.ndarray
|
|
236
|
+
Truncated POD modes matrix of shape (rank, n_features).
|
|
237
|
+
cell_coeffs : np.ndarray
|
|
238
|
+
Truncated POD coefficients matrix of shape (n_samples, rank).
|
|
239
|
+
singulars : np.ndarray
|
|
240
|
+
Truncated singular values array of length rank.
|
|
241
|
+
cell_modes_all : np.ndarray, optional
|
|
242
|
+
Full POD modes matrix (if fullData=True).
|
|
243
|
+
cell_coeffs_all : np.ndarray, optional
|
|
244
|
+
Full POD coefficients matrix (if fullData=True).
|
|
245
|
+
singulars_all : np.ndarray, optional
|
|
246
|
+
Full singular values array (if fullData=True).
|
|
247
|
+
|
|
248
|
+
Examples
|
|
249
|
+
--------
|
|
250
|
+
>>> # Create POD dataset with rank 5
|
|
251
|
+
>>> data = np.random.rand(100, 1000) # 100 snapshots, 1000 features
|
|
252
|
+
>>> pod = PODDataSet(data, rank=5, fullData=True)
|
|
253
|
+
>>> print(f"POD modes shape: {pod.cell_modes.shape}")
|
|
254
|
+
>>> print(f"Truncation error at rank 5: {pod.truncationError()[4]:.4f}")
|
|
255
|
+
|
|
256
|
+
>>> # Save POD modes to VTK file
|
|
257
|
+
>>> pod.saveModes('pod_modes', 'reference.vtm', 'vector', rank=5)
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
def __init__(self, data: np.ndarray, rank: int = 10, fullData: bool = True) -> None:
|
|
261
|
+
self.data: np.ndarray = data
|
|
262
|
+
self.rank: int = rank
|
|
263
|
+
self.fullData: bool = fullData
|
|
264
|
+
|
|
265
|
+
# Optional attributes set in POD method
|
|
266
|
+
self.cell_modes_all: Optional[np.ndarray] = None
|
|
267
|
+
self.singulars_all: Optional[np.ndarray] = None
|
|
268
|
+
self.cell_coeffs_all: Optional[np.ndarray] = None
|
|
269
|
+
|
|
270
|
+
self.printInfo()
|
|
271
|
+
self.POD()
|
|
272
|
+
|
|
273
|
+
def POD(self) -> None:
|
|
274
|
+
"""
|
|
275
|
+
Perform Singular Value Decomposition to compute POD modes and coefficients.
|
|
276
|
+
|
|
277
|
+
This method decomposes the data matrix using SVD and extracts the POD modes
|
|
278
|
+
(right singular vectors), coefficients (scaled left singular vectors), and
|
|
279
|
+
singular values. Both truncated and full decompositions are computed based
|
|
280
|
+
on the fullData flag.
|
|
281
|
+
|
|
282
|
+
Notes
|
|
283
|
+
-----
|
|
284
|
+
The POD is computed using SVD: X = U Σ Vᵀ, where:
|
|
285
|
+
- cell_modes = V (right singular vectors)
|
|
286
|
+
- cell_coeffs = U Σ (scaled left singular vectors)
|
|
287
|
+
- singulars = Σ (singular values)
|
|
288
|
+
"""
|
|
289
|
+
s, vh = svd(self.data, full_matrices=False)[1:]
|
|
290
|
+
self.cell_modes: np.ndarray = vh[: self.rank]
|
|
291
|
+
self.cell_coeffs: np.ndarray = self.data @ vh[: self.rank].T
|
|
292
|
+
self.singulars: np.ndarray = s[: self.rank]
|
|
293
|
+
|
|
294
|
+
if self.fullData:
|
|
295
|
+
self.cell_modes_all: np.ndarray = vh
|
|
296
|
+
self.singulars_all: np.ndarray = s
|
|
297
|
+
self.cell_coeffs_all: np.ndarray = self.data @ vh.T
|
|
298
|
+
|
|
299
|
+
def truncationError(self):
|
|
300
|
+
"""
|
|
301
|
+
Calculate the truncation error for all possible ranks.
|
|
302
|
+
|
|
303
|
+
Returns
|
|
304
|
+
-------
|
|
305
|
+
np.ndarray
|
|
306
|
+
Array of truncation errors for each rank from 1 to the total
|
|
307
|
+
number of modes. Only available if fullData=True.
|
|
308
|
+
|
|
309
|
+
Raises
|
|
310
|
+
------
|
|
311
|
+
AttributeError
|
|
312
|
+
If fullData=False and full singular values are not available.
|
|
313
|
+
"""
|
|
314
|
+
return truncationErrorCal(self.singulars_all)
|
|
315
|
+
|
|
316
|
+
def printInfo(self) -> None:
|
|
317
|
+
"""
|
|
318
|
+
Print information about the POD configuration.
|
|
319
|
+
|
|
320
|
+
Displays the rank parameter used for the POD decomposition.
|
|
321
|
+
"""
|
|
322
|
+
print("The POD rank is: ", self.rank)
|
|
323
|
+
|
|
324
|
+
def saveModes(
|
|
325
|
+
self,
|
|
326
|
+
saveFileName: str,
|
|
327
|
+
refVTMName: str,
|
|
328
|
+
dataType: str,
|
|
329
|
+
rank: int = 10,
|
|
330
|
+
is2D: bool = False,
|
|
331
|
+
) -> None:
|
|
332
|
+
"""
|
|
333
|
+
Save POD modes to VTK files along with singular values and truncation errors.
|
|
334
|
+
|
|
335
|
+
Parameters
|
|
336
|
+
----------
|
|
337
|
+
saveFileName : str
|
|
338
|
+
Base filename for saving the modes and analysis files.
|
|
339
|
+
refVTMName : str
|
|
340
|
+
Path to the reference VTM file that provides mesh structure.
|
|
341
|
+
dataType : {'scalar', 'vector'}
|
|
342
|
+
Type of data being saved.
|
|
343
|
+
rank : int, optional
|
|
344
|
+
Number of modes to save. Default is 10.
|
|
345
|
+
is2D : bool, optional
|
|
346
|
+
If True, treat vector data as 2D. Default is False.
|
|
347
|
+
|
|
348
|
+
Notes
|
|
349
|
+
-----
|
|
350
|
+
This method creates three files:
|
|
351
|
+
1. {saveFileName}.vtm - VTK file with POD modes
|
|
352
|
+
2. {saveFileName}_truncationError.txt - Truncation errors
|
|
353
|
+
3. {saveFileName}_singulars.txt - Singular values
|
|
354
|
+
"""
|
|
355
|
+
# Write the velocity data into VTK file
|
|
356
|
+
field_name = [f"mode_{i}" for i in range(rank)]
|
|
357
|
+
|
|
358
|
+
# loop all test data and write the data into VTK file
|
|
359
|
+
vtk_writer(
|
|
360
|
+
self.cell_modes_all[:rank],
|
|
361
|
+
field_name,
|
|
362
|
+
dataType,
|
|
363
|
+
refVTMName,
|
|
364
|
+
saveFileName,
|
|
365
|
+
is2D=is2D,
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
# write the truncation error and singular values into txt file
|
|
369
|
+
np.savetxt(f"{saveFileName}_truncationError.txt", self.truncationError())
|
|
370
|
+
np.savetxt(f"{saveFileName}_singulars.txt", self.singulars_all)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
class subdomainDataSet:
|
|
374
|
+
"""
|
|
375
|
+
A class for performing POD on both cell and patch data with subdomain analysis.
|
|
376
|
+
|
|
377
|
+
This class handles POD decomposition for datasets that contain both volumetric
|
|
378
|
+
cell data and boundary/patch data. It performs separate POD on each dataset
|
|
379
|
+
and computes projections between the two representations.
|
|
380
|
+
|
|
381
|
+
Parameters
|
|
382
|
+
----------
|
|
383
|
+
cell_data : np.ndarray
|
|
384
|
+
Cell/volume data matrix of shape (n_samples, n_cell_features).
|
|
385
|
+
patch_data : np.ndarray
|
|
386
|
+
Patch/boundary data matrix of shape (n_samples, n_patch_features).
|
|
387
|
+
cell_rank : int, optional
|
|
388
|
+
Number of POD modes to retain for cell data. Default is 10.
|
|
389
|
+
patch_rank : int, optional
|
|
390
|
+
Number of POD modes to retain for patch data. Default is 5.
|
|
391
|
+
cal_fullData : bool, optional
|
|
392
|
+
If True, compute and store all POD modes for error analysis.
|
|
393
|
+
Default is True.
|
|
394
|
+
|
|
395
|
+
Attributes
|
|
396
|
+
----------
|
|
397
|
+
cell_data : np.ndarray
|
|
398
|
+
Input cell data matrix.
|
|
399
|
+
patch_data : np.ndarray
|
|
400
|
+
Input patch data matrix.
|
|
401
|
+
cell_rank : int
|
|
402
|
+
Number of retained cell POD modes.
|
|
403
|
+
patch_rank : int
|
|
404
|
+
Number of retained patch POD modes.
|
|
405
|
+
cal_fullData : bool
|
|
406
|
+
Flag for computing full decomposition.
|
|
407
|
+
cell_modes : np.ndarray
|
|
408
|
+
Cell POD modes matrix.
|
|
409
|
+
cell_coeffs : np.ndarray
|
|
410
|
+
Cell POD coefficients matrix.
|
|
411
|
+
patch_modes : np.ndarray
|
|
412
|
+
Patch POD modes matrix.
|
|
413
|
+
patch_coeffs : np.ndarray
|
|
414
|
+
Patch POD coefficients matrix.
|
|
415
|
+
projPatch_modes : np.ndarray
|
|
416
|
+
Projection of patch modes onto cell coefficient space.
|
|
417
|
+
|
|
418
|
+
Examples
|
|
419
|
+
--------
|
|
420
|
+
>>> cell_data = np.random.rand(50, 1000) # 50 snapshots, 1000 cells
|
|
421
|
+
>>> patch_data = np.random.rand(50, 200) # 50 snapshots, 200 patch points
|
|
422
|
+
>>> subdomain = subdomainDataSet(cell_data, patch_data, cell_rank=8, patch_rank=4)
|
|
423
|
+
>>> print(f"Cell modes shape: {subdomain.cell_modes.shape}")
|
|
424
|
+
>>> print(f"Patch modes shape: {subdomain.patch_modes.shape}")
|
|
425
|
+
"""
|
|
426
|
+
|
|
427
|
+
def __init__(
|
|
428
|
+
self,
|
|
429
|
+
cell_data: np.ndarray,
|
|
430
|
+
patch_data: np.ndarray,
|
|
431
|
+
cell_rank: int = 10,
|
|
432
|
+
patch_rank: int = 5,
|
|
433
|
+
cal_fullData: bool = True,
|
|
434
|
+
) -> None:
|
|
435
|
+
self.cell_data: np.ndarray = cell_data
|
|
436
|
+
self.patch_data: np.ndarray = patch_data
|
|
437
|
+
self.cell_rank: int = cell_rank
|
|
438
|
+
self.patch_rank: int = patch_rank
|
|
439
|
+
self.cal_fullData: bool = cal_fullData
|
|
440
|
+
|
|
441
|
+
# Optional attributes
|
|
442
|
+
self.cell_modes_all: Optional[np.ndarray] = None
|
|
443
|
+
self.singulars_all: Optional[np.ndarray] = None
|
|
444
|
+
self.cell_coeffs_all: Optional[np.ndarray] = None
|
|
445
|
+
self.patch_modes_all: Optional[np.ndarray] = None
|
|
446
|
+
self.patch_coeffs_all: Optional[np.ndarray] = None
|
|
447
|
+
self.patch_singulars_all: Optional[np.ndarray] = None
|
|
448
|
+
|
|
449
|
+
self.printInfo()
|
|
450
|
+
self.cellPOD()
|
|
451
|
+
self.patchPOD()
|
|
452
|
+
self.calculate_projPatch_modes()
|
|
453
|
+
|
|
454
|
+
def cellPOD(self) -> None:
|
|
455
|
+
"""
|
|
456
|
+
Perform POD on the cell/volume data.
|
|
457
|
+
|
|
458
|
+
Computes SVD decomposition of the cell data and extracts POD modes,
|
|
459
|
+
coefficients, and singular values. Both truncated and full decompositions
|
|
460
|
+
are stored based on the cal_fullData flag.
|
|
461
|
+
"""
|
|
462
|
+
s, vh = svd(self.cell_data, full_matrices=False)[1:]
|
|
463
|
+
self.cell_modes: np.ndarray = vh[: self.cell_rank]
|
|
464
|
+
self.cell_coeffs: np.ndarray = self.cell_data @ vh[: self.cell_rank].T
|
|
465
|
+
self.singulars: np.ndarray = s[: self.cell_rank]
|
|
466
|
+
|
|
467
|
+
if self.cal_fullData:
|
|
468
|
+
self.cell_modes_all: np.ndarray = vh
|
|
469
|
+
self.singulars_all: np.ndarray = s
|
|
470
|
+
self.cell_coeffs_all: np.ndarray = self.cell_data @ vh.T
|
|
471
|
+
|
|
472
|
+
def calculate_projPatch_modes(self) -> None:
|
|
473
|
+
"""
|
|
474
|
+
Calculate the projection of patch modes onto the cell coefficient space.
|
|
475
|
+
|
|
476
|
+
This method computes the mapping between cell POD coefficients and patch
|
|
477
|
+
data, enabling reconstruction of patch data from cell POD coefficients.
|
|
478
|
+
The projection is computed as: (Σ⁻²) @ (Uᶜᵀ @ Xᵖ), where Σ are the cell
|
|
479
|
+
singular values, Uᶜ are the cell coefficients, and Xᵖ is the patch data.
|
|
480
|
+
"""
|
|
481
|
+
self.projPatch_modes: np.ndarray = np.diag(np.power(self.singulars, -2)) @ (
|
|
482
|
+
self.cell_coeffs.T @ self.patch_data
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
def truncationError(self):
|
|
486
|
+
"""
|
|
487
|
+
Calculate the truncation error for the cell data POD.
|
|
488
|
+
|
|
489
|
+
Returns
|
|
490
|
+
-------
|
|
491
|
+
np.ndarray
|
|
492
|
+
Array of truncation errors for each rank from 1 to the total
|
|
493
|
+
number of cell modes. Only available if cal_fullData=True.
|
|
494
|
+
"""
|
|
495
|
+
return truncationErrorCal(self.singulars_all)
|
|
496
|
+
|
|
497
|
+
def patchPOD(self) -> None:
|
|
498
|
+
"""
|
|
499
|
+
Perform POD on the patch/boundary data.
|
|
500
|
+
|
|
501
|
+
Computes SVD decomposition of the patch data and extracts POD modes,
|
|
502
|
+
coefficients, and singular values. Both truncated and full decompositions
|
|
503
|
+
are stored based on the cal_fullData flag.
|
|
504
|
+
"""
|
|
505
|
+
s, vh = svd(self.patch_data, full_matrices=False)[1:]
|
|
506
|
+
self.patch_modes: np.ndarray = vh[: self.patch_rank]
|
|
507
|
+
self.patch_coeffs: np.ndarray = self.patch_data @ vh[: self.patch_rank].T
|
|
508
|
+
self.patch_singulars: np.ndarray = s[: self.patch_rank]
|
|
509
|
+
|
|
510
|
+
if self.cal_fullData:
|
|
511
|
+
self.patch_modes_all: np.ndarray = vh
|
|
512
|
+
self.patch_coeffs_all: np.ndarray = self.patch_data @ vh.T
|
|
513
|
+
self.patch_singulars_all: np.ndarray = s
|
|
514
|
+
|
|
515
|
+
def printInfo(self) -> None:
|
|
516
|
+
"""
|
|
517
|
+
Print information about the subdomain POD configuration.
|
|
518
|
+
|
|
519
|
+
Displays the rank parameters used for both cell and patch POD decompositions.
|
|
520
|
+
"""
|
|
521
|
+
print("The cell POD rank is: ", self.cell_rank)
|
|
522
|
+
print("The patch POD rank is: ", self.patch_rank)
|
PODImodels/__init__.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""
|
|
2
|
+
PODImodels: Proper Orthogonal Decomposition based Interpolation Models
|
|
3
|
+
=====================================================================
|
|
4
|
+
|
|
5
|
+
A Python package for building reduced-order models using Proper Orthogonal
|
|
6
|
+
Decomposition (POD) combined with various machine learning techniques for
|
|
7
|
+
interpolation and prediction of high-dimensional field data.
|
|
8
|
+
|
|
9
|
+
This package provides:
|
|
10
|
+
- POD-based dimensionality reduction for computational fluid dynamics fields
|
|
11
|
+
- Various interpolation models (Gaussian Process Regression, Radial Basis Functions, Neural Networks)
|
|
12
|
+
- Data handling utilities for VTK/OpenFOAM field data
|
|
13
|
+
- Validation and visualization tools for model assessment
|
|
14
|
+
|
|
15
|
+
Main Classes
|
|
16
|
+
------------
|
|
17
|
+
PODImodelAbstract : Abstract base class
|
|
18
|
+
Base class for all POD-based interpolation models.
|
|
19
|
+
fieldsGPR, PODGPR : Gaussian Process Regression models
|
|
20
|
+
GPR models for direct field prediction and POD coefficient prediction.
|
|
21
|
+
fieldsRBF, PODRBF : Radial Basis Function models
|
|
22
|
+
RBF models for direct field prediction and POD coefficient prediction.
|
|
23
|
+
PODANN : Artificial Neural Network model
|
|
24
|
+
Deep learning model for POD coefficient prediction.
|
|
25
|
+
scaledROM : Scaled Reduced Order Model
|
|
26
|
+
Wrapper for applying scaling transformations to ROM models.
|
|
27
|
+
PODDataSet : POD data processing
|
|
28
|
+
Class for performing POD on datasets and handling modal decomposition.
|
|
29
|
+
|
|
30
|
+
Examples
|
|
31
|
+
--------
|
|
32
|
+
>>> from PODImodels import PODGPR
|
|
33
|
+
>>> model = PODGPR(rank=10)
|
|
34
|
+
>>> model.fit(parameters, field_data)
|
|
35
|
+
>>> predictions = model.predict(new_parameters)
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
__all__ = [
|
|
39
|
+
"PODImodelAbstract",
|
|
40
|
+
"fieldsLinear",
|
|
41
|
+
"fieldsRidge",
|
|
42
|
+
"fieldsGPR",
|
|
43
|
+
"PODGPR",
|
|
44
|
+
"fieldsRidgeGPR",
|
|
45
|
+
"PODRidgeGPR",
|
|
46
|
+
"fieldsRBF",
|
|
47
|
+
"PODRBF",
|
|
48
|
+
"fieldsRidgeRBF",
|
|
49
|
+
"PODRidgeRBF",
|
|
50
|
+
"scaledROM",
|
|
51
|
+
"PODANN"
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
from .podImodelabstract import PODImodelAbstract
|
|
55
|
+
from .PODImodels import fieldsLinear, fieldsRidge
|
|
56
|
+
from .PODImodels import fieldsGPR, PODGPR
|
|
57
|
+
from .PODImodels import fieldsRidgeGPR, PODRidgeGPR
|
|
58
|
+
from .PODImodels import fieldsRBF, PODRBF
|
|
59
|
+
from .PODImodels import fieldsRidgeRBF, PODRidgeRBF
|
|
60
|
+
from .PODImodels import PODANN
|
|
61
|
+
from .PODdata import vtk_writer, truncationErrorCal, PODDataSet, subdomainDataSet
|