foamToPython 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PODopenFOAM/__init__.py +7 -0
- PODopenFOAM/ofmodes.py +1228 -0
- foamToPython/__init__.py +11 -0
- foamToPython/headerEnd.py +22 -0
- foamToPython/readOFField.py +1275 -0
- foamToPython/readOFList.py +205 -0
- foamtopython-0.0.1.dist-info/METADATA +111 -0
- foamtopython-0.0.1.dist-info/RECORD +10 -0
- foamtopython-0.0.1.dist-info/WHEEL +5 -0
- foamtopython-0.0.1.dist-info/top_level.txt +2 -0
PODopenFOAM/ofmodes.py
ADDED
|
@@ -0,0 +1,1228 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import re
|
|
3
|
+
import os
|
|
4
|
+
import gc
|
|
5
|
+
from typing import List, Dict, Any, Optional, Tuple, Union
|
|
6
|
+
|
|
7
|
+
import multiprocessing
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
from scipy.linalg import svd, lu_factor, lu_solve, LinAlgError
|
|
11
|
+
|
|
12
|
+
from foamToPython.readOFField import OFField
|
|
13
|
+
|
|
14
|
+
import time
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Class for computing POD modes from OpenFOAM field data
|
|
18
|
+
class PODmodes:
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
fieldList: list,
|
|
22
|
+
POD_algo: str = "eigen",
|
|
23
|
+
rank: int = 10,
|
|
24
|
+
run: bool = True,
|
|
25
|
+
) -> None:
|
|
26
|
+
"""
|
|
27
|
+
Initialize the PODmodes object for computing POD modes from OpenFOAM field data.
|
|
28
|
+
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
fieldList : List[OFField]
|
|
32
|
+
List of OpenFOAM field objects.
|
|
33
|
+
POD_algo : str, optional
|
|
34
|
+
Algorithm for POD ('eigen' or 'svd'), by default 'eigen'.
|
|
35
|
+
rank : int, optional
|
|
36
|
+
Number of modes to compute, by default 10.
|
|
37
|
+
run : bool, optional
|
|
38
|
+
Whether to immediately compute the modes, by default True.
|
|
39
|
+
|
|
40
|
+
Raises
|
|
41
|
+
------
|
|
42
|
+
ValueError
|
|
43
|
+
If rank is greater than the number of fields.
|
|
44
|
+
"""
|
|
45
|
+
self.fieldList: List[OFField] = fieldList
|
|
46
|
+
self.POD_algo: str = POD_algo
|
|
47
|
+
if rank > len(fieldList):
|
|
48
|
+
raise ValueError("Rank cannot be greater than the number of fields.")
|
|
49
|
+
self._rank: int = rank
|
|
50
|
+
self._num_processors: int = (
|
|
51
|
+
fieldList[0]._num_processors if fieldList[0].parallel else 1
|
|
52
|
+
)
|
|
53
|
+
self._mode_convert: bool = True
|
|
54
|
+
|
|
55
|
+
self.start_time = time.time()
|
|
56
|
+
self.parallel = fieldList[0].parallel
|
|
57
|
+
|
|
58
|
+
self.run: bool = run
|
|
59
|
+
if self.run:
|
|
60
|
+
self.getModes()
|
|
61
|
+
|
|
62
|
+
if fieldList[0].data_type != "scalar" and fieldList[0].data_type != "vector":
|
|
63
|
+
raise TypeError("Unknown data_type. please use 'scalar' or 'vector'.")
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def rank(self):
|
|
67
|
+
return self._rank
|
|
68
|
+
|
|
69
|
+
@rank.setter
|
|
70
|
+
def rank(self, value):
|
|
71
|
+
if value > len(self.fieldList):
|
|
72
|
+
raise ValueError("Rank cannot be greater than the number of fields.")
|
|
73
|
+
self._rank = value
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def modes(self):
|
|
77
|
+
if not hasattr(self, "_modes"):
|
|
78
|
+
self.getModes()
|
|
79
|
+
if self.fieldList[0].parallel:
|
|
80
|
+
if not self._mode_convert:
|
|
81
|
+
# Flatten the list of lists into a single list
|
|
82
|
+
return [self._modes[i][: self._rank] for i in range(len(self._modes))]
|
|
83
|
+
else:
|
|
84
|
+
return self._convert_mode_list(self._modes)[: self._rank]
|
|
85
|
+
else:
|
|
86
|
+
return self._modes[: self._rank]
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def coeffs(self):
|
|
90
|
+
if not hasattr(self, "_coeffs"):
|
|
91
|
+
self._performPOD()
|
|
92
|
+
return self._coeffs[:, : self._rank]
|
|
93
|
+
|
|
94
|
+
def getModes(self) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Compute and store the POD modes from the field list.
|
|
97
|
+
|
|
98
|
+
Returns
|
|
99
|
+
-------
|
|
100
|
+
None
|
|
101
|
+
|
|
102
|
+
Raises
|
|
103
|
+
------
|
|
104
|
+
ValueError
|
|
105
|
+
If POD has not been performed yet or if fieldList is empty.
|
|
106
|
+
"""
|
|
107
|
+
# Cache frequently accessed properties
|
|
108
|
+
first_field = self.fieldList[0]
|
|
109
|
+
is_parallel = first_field.parallel
|
|
110
|
+
data_type = first_field.data_type
|
|
111
|
+
|
|
112
|
+
if not hasattr(self, "data_matrix"):
|
|
113
|
+
if is_parallel:
|
|
114
|
+
self.data_matrix: np.ndarray = self._field2ndarray_parallel(
|
|
115
|
+
self.fieldList
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
self.data_matrix: np.ndarray = self._field2ndarray_serial(
|
|
119
|
+
self.fieldList
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
print(
|
|
123
|
+
"Convert field to ndarray at time: {:.3f} s".format(
|
|
124
|
+
time.time() - self.start_time
|
|
125
|
+
)
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
self._performPOD(self.data_matrix)
|
|
129
|
+
print("Perform POD at time: {:.3f} s".format(time.time() - self.start_time))
|
|
130
|
+
|
|
131
|
+
# Free data_matrix memory immediately after POD to reduce memory footprint
|
|
132
|
+
del self.data_matrix
|
|
133
|
+
gc.collect() # Force garbage collection to release memory immediately
|
|
134
|
+
|
|
135
|
+
self.truncation_error, self.projection_error = self._truncation_error()
|
|
136
|
+
|
|
137
|
+
if is_parallel:
|
|
138
|
+
procN_idx = self._cal_procN_len(first_field, self._num_processors)
|
|
139
|
+
boundary_field = first_field.boundaryField
|
|
140
|
+
dimensions = first_field.dimensions
|
|
141
|
+
|
|
142
|
+
# Compute boundary values sequentially to avoid memory duplication in multiprocessing
|
|
143
|
+
self.boundaryValues = []
|
|
144
|
+
for procN in range(self._num_processors):
|
|
145
|
+
boundaryValue = self._computeBoundary(
|
|
146
|
+
[f.boundaryField[procN] for f in self.fieldList],
|
|
147
|
+
self._coeffs,
|
|
148
|
+
data_type,
|
|
149
|
+
)
|
|
150
|
+
self.boundaryValues.append(boundaryValue)
|
|
151
|
+
|
|
152
|
+
print(
|
|
153
|
+
"Compute boundary values at time: {:.3f} s".format(
|
|
154
|
+
time.time() - self.start_time
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Create modes sequentially, passing indices instead of sliced arrays
|
|
159
|
+
self._modes = []
|
|
160
|
+
for procN in range(self._num_processors):
|
|
161
|
+
# Use array views (indices) instead of copies
|
|
162
|
+
start_idx = procN_idx[procN]
|
|
163
|
+
end_idx = procN_idx[procN + 1]
|
|
164
|
+
cell_modes_slice = self.cellModes[:, start_idx:end_idx]
|
|
165
|
+
|
|
166
|
+
modes = self._createModes(
|
|
167
|
+
boundary_field[procN],
|
|
168
|
+
cell_modes_slice,
|
|
169
|
+
self.boundaryValues[procN],
|
|
170
|
+
data_type,
|
|
171
|
+
dimensions,
|
|
172
|
+
is_parallel,
|
|
173
|
+
)
|
|
174
|
+
self._modes.append(modes)
|
|
175
|
+
|
|
176
|
+
# Clear slice reference to help garbage collector
|
|
177
|
+
del cell_modes_slice
|
|
178
|
+
|
|
179
|
+
# Force garbage collection after mode creation
|
|
180
|
+
gc.collect()
|
|
181
|
+
else:
|
|
182
|
+
self.boundaryValues = self._computeBoundary(
|
|
183
|
+
[f.boundaryField for f in self.fieldList],
|
|
184
|
+
self._coeffs,
|
|
185
|
+
data_type,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
print(
|
|
189
|
+
"Compute boundary values at time: {:.3f} s".format(
|
|
190
|
+
time.time() - self.start_time
|
|
191
|
+
)
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
self._modes = self._createModes(
|
|
195
|
+
first_field.boundaryField,
|
|
196
|
+
self.cellModes,
|
|
197
|
+
self.boundaryValues,
|
|
198
|
+
data_type,
|
|
199
|
+
first_field.dimensions,
|
|
200
|
+
is_parallel,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
print("Create modes at time: {:.3f} s".format(time.time() - self.start_time))
|
|
204
|
+
|
|
205
|
+
@staticmethod
|
|
206
|
+
def _cal_procN_len(field, num_processors) -> np.ndarray:
|
|
207
|
+
"""
|
|
208
|
+
Calculate the number of data points in each processor for parallel fields.
|
|
209
|
+
|
|
210
|
+
Parameters
|
|
211
|
+
----------
|
|
212
|
+
field : OFField
|
|
213
|
+
An OpenFOAM field object.
|
|
214
|
+
Returns
|
|
215
|
+
-------
|
|
216
|
+
List[int]
|
|
217
|
+
A list containing the number of data points in each processor.
|
|
218
|
+
"""
|
|
219
|
+
if field.data_type == "scalar":
|
|
220
|
+
procN_len = [
|
|
221
|
+
field.internalField[procN].shape[0] for procN in range(num_processors)
|
|
222
|
+
]
|
|
223
|
+
elif field.data_type == "vector":
|
|
224
|
+
procN_len = [
|
|
225
|
+
field.internalField[procN].shape[0] * 3
|
|
226
|
+
for procN in range(num_processors)
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
return np.cumsum([0] + procN_len)
|
|
230
|
+
|
|
231
|
+
@staticmethod
|
|
232
|
+
def _field2ndarray_serial(fieldList: list) -> np.ndarray:
|
|
233
|
+
"""
|
|
234
|
+
Convert a list of OpenFOAM field files to a 2D NumPy array (serial version).
|
|
235
|
+
|
|
236
|
+
Parameters
|
|
237
|
+
----------
|
|
238
|
+
fieldList : list
|
|
239
|
+
List of OpenFOAM field objects.
|
|
240
|
+
|
|
241
|
+
Returns
|
|
242
|
+
-------
|
|
243
|
+
np.ndarray
|
|
244
|
+
A 2D array where each row is a flattened field.
|
|
245
|
+
|
|
246
|
+
Raises
|
|
247
|
+
------
|
|
248
|
+
ValueError
|
|
249
|
+
If fieldList is empty or field data is invalid.
|
|
250
|
+
SystemExit
|
|
251
|
+
If uniform fields are encountered or unknown data_type is used.
|
|
252
|
+
"""
|
|
253
|
+
if not fieldList:
|
|
254
|
+
raise ValueError("fieldList is empty.")
|
|
255
|
+
|
|
256
|
+
if fieldList[0].internal_field_type == "uniform":
|
|
257
|
+
sys.exit("Uniform field is not supported yet.")
|
|
258
|
+
|
|
259
|
+
num_fields: int = len(fieldList)
|
|
260
|
+
data_type: str = fieldList[0].data_type
|
|
261
|
+
|
|
262
|
+
# Calculate num_data based on data type
|
|
263
|
+
if data_type == "scalar":
|
|
264
|
+
num_data: int = fieldList[0].internalField.size
|
|
265
|
+
elif data_type == "vector":
|
|
266
|
+
if (
|
|
267
|
+
fieldList[0].internalField.ndim != 2
|
|
268
|
+
or fieldList[0].internalField.shape[1] != 3
|
|
269
|
+
):
|
|
270
|
+
raise ValueError(
|
|
271
|
+
"Vector field internalField must be a 2D array with shape (num_points, 3)."
|
|
272
|
+
)
|
|
273
|
+
num_data: int = fieldList[0].internalField.shape[0] * 3
|
|
274
|
+
else:
|
|
275
|
+
raise TypeError("Unknown data_type. please use 'scalar' or 'vector'.")
|
|
276
|
+
|
|
277
|
+
# Preallocate matrix
|
|
278
|
+
data_matrix: np.ndarray = np.zeros((num_fields, num_data))
|
|
279
|
+
|
|
280
|
+
# Process fields based on type (check once, not per iteration)
|
|
281
|
+
if data_type == "scalar":
|
|
282
|
+
for i, field in enumerate(fieldList):
|
|
283
|
+
# ravel() creates a view when possible, faster than flatten()
|
|
284
|
+
data_matrix[i, :] = field.internalField.ravel()
|
|
285
|
+
elif data_type == "vector":
|
|
286
|
+
for i, field in enumerate(fieldList):
|
|
287
|
+
# Use ravel() instead of flatten() for better performance
|
|
288
|
+
data_matrix[i, :] = field.internalField.T.ravel()
|
|
289
|
+
|
|
290
|
+
return data_matrix
|
|
291
|
+
|
|
292
|
+
@staticmethod
|
|
293
|
+
def _field2ndarray_parallel(fieldList: list) -> np.ndarray:
|
|
294
|
+
"""
|
|
295
|
+
Convert a list of OpenFOAM field files to a 2D NumPy array (parallel version).
|
|
296
|
+
|
|
297
|
+
Parameters
|
|
298
|
+
----------
|
|
299
|
+
fieldList : list
|
|
300
|
+
List of OpenFOAM field objects.
|
|
301
|
+
|
|
302
|
+
Returns
|
|
303
|
+
-------
|
|
304
|
+
np.ndarray
|
|
305
|
+
A 2D array where each row is a flattened field.
|
|
306
|
+
|
|
307
|
+
Raises
|
|
308
|
+
------
|
|
309
|
+
ValueError
|
|
310
|
+
If fieldList is empty or field data is invalid.
|
|
311
|
+
SystemExit
|
|
312
|
+
If uniform fields are encountered or unknown data_type is used.
|
|
313
|
+
"""
|
|
314
|
+
if not fieldList:
|
|
315
|
+
raise ValueError("fieldList is empty.")
|
|
316
|
+
|
|
317
|
+
if fieldList[0].internal_field_type == "uniform":
|
|
318
|
+
raise SystemExit("Uniform field is not supported yet.")
|
|
319
|
+
|
|
320
|
+
num_fields: int = len(fieldList)
|
|
321
|
+
data_type: str = fieldList[0].data_type
|
|
322
|
+
|
|
323
|
+
# Calculate num_data more efficiently
|
|
324
|
+
if data_type == "scalar":
|
|
325
|
+
num_data: int = sum(field.shape[0] for field in fieldList[0].internalField)
|
|
326
|
+
elif data_type == "vector":
|
|
327
|
+
if (
|
|
328
|
+
fieldList[0].internalField[0].ndim != 2
|
|
329
|
+
or fieldList[0].internalField[0].shape[1] != 3
|
|
330
|
+
):
|
|
331
|
+
raise ValueError(
|
|
332
|
+
"Vector field internalField must be a 2D array with shape (num_points, 3)."
|
|
333
|
+
)
|
|
334
|
+
num_data: int = (
|
|
335
|
+
sum(field.shape[0] for field in fieldList[0].internalField) * 3
|
|
336
|
+
)
|
|
337
|
+
else:
|
|
338
|
+
raise SystemExit("Unknown data_type. please use 'scalar' or 'vector'.")
|
|
339
|
+
|
|
340
|
+
# Preallocate matrix
|
|
341
|
+
data_matrix: np.ndarray = np.zeros((num_fields, num_data))
|
|
342
|
+
|
|
343
|
+
# Process fields based on type (check once, not per iteration)
|
|
344
|
+
if data_type == "scalar":
|
|
345
|
+
for i, field in enumerate(fieldList):
|
|
346
|
+
# np.concatenate is faster than np.hstack for 1D arrays
|
|
347
|
+
data_matrix[i, :] = np.concatenate(field.internalField)
|
|
348
|
+
elif data_type == "vector":
|
|
349
|
+
for i, field in enumerate(fieldList):
|
|
350
|
+
# Avoid redundant .flatten() - concatenate already returns 1D
|
|
351
|
+
data_matrix[i, :] = np.concatenate(
|
|
352
|
+
[f.T.ravel() for f in field.internalField]
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
return data_matrix
|
|
356
|
+
|
|
357
|
+
@staticmethod
|
|
358
|
+
def _computeBoundary(
|
|
359
|
+
boundaryFields: list, coeffs: np.ndarray, data_type: str
|
|
360
|
+
) -> dict:
|
|
361
|
+
"""
|
|
362
|
+
Compute boundary modes for each patch.
|
|
363
|
+
|
|
364
|
+
Parameters
|
|
365
|
+
----------
|
|
366
|
+
boundaryFields : list
|
|
367
|
+
List of boundary field dictionaries in one processor.
|
|
368
|
+
coeffs : np.ndarray
|
|
369
|
+
The coefficients of the POD modes.
|
|
370
|
+
data_type : str
|
|
371
|
+
The data type ('scalar' or 'vector').
|
|
372
|
+
|
|
373
|
+
Returns
|
|
374
|
+
-------
|
|
375
|
+
dict
|
|
376
|
+
Dictionary with patch names as keys and boundary mode values as values.
|
|
377
|
+
|
|
378
|
+
Raises
|
|
379
|
+
------
|
|
380
|
+
ValueError
|
|
381
|
+
If boundary field value type is unknown or coeffs matrix is singular.
|
|
382
|
+
"""
|
|
383
|
+
supported_types = ("fixedValue", "fixedGradient", "processor", "calculated")
|
|
384
|
+
boundaryValues: dict = {}
|
|
385
|
+
|
|
386
|
+
# Pre-compute inverse once (with error handling)
|
|
387
|
+
try:
|
|
388
|
+
coeffs_inv = np.linalg.inv(coeffs)
|
|
389
|
+
except np.linalg.LinAlgError:
|
|
390
|
+
raise ValueError("Coefficient matrix is singular and cannot be inverted.")
|
|
391
|
+
|
|
392
|
+
num_fields = len(boundaryFields)
|
|
393
|
+
first_boundary = boundaryFields[0]
|
|
394
|
+
|
|
395
|
+
for patch in first_boundary.keys():
|
|
396
|
+
patch_dict = first_boundary[patch]
|
|
397
|
+
|
|
398
|
+
# Skip unsupported types
|
|
399
|
+
if patch_dict["type"] not in supported_types:
|
|
400
|
+
continue
|
|
401
|
+
|
|
402
|
+
value_type = next((key for key in patch_dict.keys() if key != "type"), None)
|
|
403
|
+
if value_type is None:
|
|
404
|
+
continue
|
|
405
|
+
first_value = patch_dict[value_type]
|
|
406
|
+
|
|
407
|
+
# Skip string values
|
|
408
|
+
if isinstance(first_value, str):
|
|
409
|
+
continue
|
|
410
|
+
|
|
411
|
+
# Process only numpy arrays
|
|
412
|
+
if not isinstance(first_value, np.ndarray):
|
|
413
|
+
continue
|
|
414
|
+
|
|
415
|
+
# Get value length and type info
|
|
416
|
+
value_len, patch_value_type, uniform_indices = PODmodes._get_value_len(
|
|
417
|
+
boundaryFields, patch, value_type, data_type
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
# Pre-allocate the boundary values array
|
|
421
|
+
if data_type == "scalar":
|
|
422
|
+
boundaryValues[patch] = np.zeros((num_fields, value_len))
|
|
423
|
+
# NumPy broadcasting handles both scalar and array assignments
|
|
424
|
+
for i, field in enumerate(boundaryFields):
|
|
425
|
+
boundaryValues[patch][i, :] = field[patch][value_type]
|
|
426
|
+
|
|
427
|
+
elif data_type == "vector":
|
|
428
|
+
boundaryValues[patch] = np.zeros((num_fields, value_len))
|
|
429
|
+
uniform_set = set(uniform_indices)
|
|
430
|
+
|
|
431
|
+
for i, field in enumerate(boundaryFields):
|
|
432
|
+
field_value = field[patch][value_type]
|
|
433
|
+
if i in uniform_set:
|
|
434
|
+
# Uniform: 1D array (3,) - tile if needed for mixed, else assign directly
|
|
435
|
+
if patch_value_type == "uniform":
|
|
436
|
+
boundaryValues[patch][i, :] = field_value
|
|
437
|
+
else: # mixed case
|
|
438
|
+
boundaryValues[patch][i, :] = np.tile(
|
|
439
|
+
field_value, (value_len // 3, 1)
|
|
440
|
+
).T.ravel()
|
|
441
|
+
else:
|
|
442
|
+
# Non-uniform: 2D array (n, 3) - transpose and ravel
|
|
443
|
+
boundaryValues[patch][i, :] = field_value.T.ravel()
|
|
444
|
+
|
|
445
|
+
# Apply coefficient inverse transformation
|
|
446
|
+
boundaryValues[patch] = coeffs_inv @ boundaryValues[patch]
|
|
447
|
+
|
|
448
|
+
return boundaryValues
|
|
449
|
+
|
|
450
|
+
@staticmethod
|
|
451
|
+
def _get_value_len(
|
|
452
|
+
boundaryFields: list, patch: str, value_type: str, data_type: str
|
|
453
|
+
) -> Tuple[str, int, list]:
|
|
454
|
+
"""
|
|
455
|
+
Determine the value length and type for a boundary patch.
|
|
456
|
+
|
|
457
|
+
Parameters
|
|
458
|
+
----------
|
|
459
|
+
boundaryFields : list
|
|
460
|
+
List of boundary field dictionaries.
|
|
461
|
+
patch : str
|
|
462
|
+
The patch name.
|
|
463
|
+
value_type : str
|
|
464
|
+
The value type (e.g., 'value', 'gradient').
|
|
465
|
+
data_type : str
|
|
466
|
+
The data type ('scalar' or 'vector').
|
|
467
|
+
|
|
468
|
+
Returns
|
|
469
|
+
-------
|
|
470
|
+
tuple
|
|
471
|
+
(patch_value_type, value_len, uniform_indices)
|
|
472
|
+
- patch_value_type: 'uniform', 'nonuniform', or 'mixed'
|
|
473
|
+
- value_len: Length of the value array
|
|
474
|
+
- uniform_indices: List of indices with uniform values
|
|
475
|
+
"""
|
|
476
|
+
uniform_indices = []
|
|
477
|
+
|
|
478
|
+
# Single pass through boundary fields to identify uniform values
|
|
479
|
+
for i, field in enumerate(boundaryFields):
|
|
480
|
+
field_value = field[patch][value_type]
|
|
481
|
+
if data_type == "scalar":
|
|
482
|
+
if field_value.size == 1:
|
|
483
|
+
uniform_indices.append(i)
|
|
484
|
+
elif data_type == "vector":
|
|
485
|
+
if field_value.ndim == 1:
|
|
486
|
+
uniform_indices.append(i)
|
|
487
|
+
|
|
488
|
+
num_uniform = len(uniform_indices)
|
|
489
|
+
num_total = len(boundaryFields)
|
|
490
|
+
|
|
491
|
+
# Determine patch type and value length
|
|
492
|
+
if num_uniform == num_total:
|
|
493
|
+
# All uniform
|
|
494
|
+
patch_value_type = "uniform"
|
|
495
|
+
value_len = 1 if data_type == "scalar" else 3
|
|
496
|
+
elif num_uniform == 0:
|
|
497
|
+
# All non-uniform
|
|
498
|
+
patch_value_type = "nonuniform"
|
|
499
|
+
value_len = boundaryFields[0][patch][value_type].size
|
|
500
|
+
else:
|
|
501
|
+
# Mixed: some uniform, some non-uniform
|
|
502
|
+
patch_value_type = "mixed"
|
|
503
|
+
# Find first non-uniform index to get correct size
|
|
504
|
+
non_uniform_idx = next(
|
|
505
|
+
i for i in range(num_total) if i not in uniform_indices
|
|
506
|
+
)
|
|
507
|
+
value_len = boundaryFields[non_uniform_idx][patch][value_type].size
|
|
508
|
+
|
|
509
|
+
return value_len, patch_value_type, uniform_indices
|
|
510
|
+
|
|
511
|
+
@staticmethod
|
|
512
|
+
def _createModes(
|
|
513
|
+
bField: dict,
|
|
514
|
+
cellModes: np.ndarray,
|
|
515
|
+
boundaryValues: dict,
|
|
516
|
+
data_type: str,
|
|
517
|
+
dimensions: list,
|
|
518
|
+
parallel: bool,
|
|
519
|
+
) -> list:
|
|
520
|
+
"""
|
|
521
|
+
Assemble the POD modes into OpenFOAM field objects with cellModes and boundaryValues.
|
|
522
|
+
|
|
523
|
+
Parameters
|
|
524
|
+
----------
|
|
525
|
+
bField : dict
|
|
526
|
+
The boundary field dictionary.
|
|
527
|
+
cellModes : np.ndarray
|
|
528
|
+
The cell modes array.
|
|
529
|
+
boundaryValues : dict
|
|
530
|
+
The boundary values dictionary.
|
|
531
|
+
data_type : str
|
|
532
|
+
The data type ('scalar' or 'vector').
|
|
533
|
+
dimensions : list
|
|
534
|
+
The physical dimensions of the field.
|
|
535
|
+
parallel : bool
|
|
536
|
+
Whether the field is parallel or not.
|
|
537
|
+
|
|
538
|
+
Returns
|
|
539
|
+
-------
|
|
540
|
+
list
|
|
541
|
+
List of OpenFOAM field objects representing the POD modes.
|
|
542
|
+
|
|
543
|
+
Raises
|
|
544
|
+
------
|
|
545
|
+
ValueError
|
|
546
|
+
If boundary field value type is unknown.
|
|
547
|
+
"""
|
|
548
|
+
_modes = []
|
|
549
|
+
internal_field_type = "nonuniform"
|
|
550
|
+
supported_types = {"fixedValue", "fixedGradient", "processor", "calculated"}
|
|
551
|
+
|
|
552
|
+
num_modes = cellModes.shape[0]
|
|
553
|
+
|
|
554
|
+
for i in range(num_modes):
|
|
555
|
+
mode = OFField()
|
|
556
|
+
mode.data_type = data_type
|
|
557
|
+
mode.dimensions = dimensions
|
|
558
|
+
mode.internal_field_type = internal_field_type
|
|
559
|
+
mode._field_loaded = True
|
|
560
|
+
mode.parallel = parallel
|
|
561
|
+
|
|
562
|
+
# Set internal field based on data type
|
|
563
|
+
if data_type == "scalar":
|
|
564
|
+
mode.internalField = cellModes[i, :]
|
|
565
|
+
elif data_type == "vector":
|
|
566
|
+
num_points = cellModes.shape[1] // 3
|
|
567
|
+
mode.internalField = cellModes[i, :].reshape((3, num_points)).T
|
|
568
|
+
|
|
569
|
+
# Process boundary fields
|
|
570
|
+
mode.boundaryField = {}
|
|
571
|
+
for patch, patch_dict in bField.items():
|
|
572
|
+
patch_type = patch_dict["type"]
|
|
573
|
+
mode.boundaryField[patch] = {"type": patch_type}
|
|
574
|
+
|
|
575
|
+
# Handle supported boundary types
|
|
576
|
+
if patch_type in supported_types:
|
|
577
|
+
# Get value type (last key that's not 'type')
|
|
578
|
+
value_type = next(
|
|
579
|
+
(k for k in reversed(patch_dict.keys()) if k != "type"), None
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
if value_type is None:
|
|
583
|
+
continue
|
|
584
|
+
|
|
585
|
+
patch_value = patch_dict[value_type]
|
|
586
|
+
|
|
587
|
+
# Handle string values (e.g., "uniform (0 0 0)")
|
|
588
|
+
if isinstance(patch_value, str):
|
|
589
|
+
mode.boundaryField[patch][value_type] = patch_value
|
|
590
|
+
|
|
591
|
+
# Handle numpy array values
|
|
592
|
+
elif isinstance(patch_value, np.ndarray):
|
|
593
|
+
if patch not in boundaryValues:
|
|
594
|
+
continue
|
|
595
|
+
|
|
596
|
+
if data_type == "scalar":
|
|
597
|
+
mode.boundaryField[patch][value_type] = boundaryValues[
|
|
598
|
+
patch
|
|
599
|
+
][i, :]
|
|
600
|
+
elif data_type == "vector":
|
|
601
|
+
mode.boundaryField[patch][value_type] = (
|
|
602
|
+
boundaryValues[patch][i, :].reshape(3, -1).T
|
|
603
|
+
)
|
|
604
|
+
else:
|
|
605
|
+
raise ValueError(
|
|
606
|
+
f"Unknown boundary field value type for patch '{patch}'. "
|
|
607
|
+
f"Expected str or np.ndarray, got {type(patch_value)}."
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
elif len(patch_dict) > 1:
|
|
611
|
+
# Patch has additional keys beyond 'type' but isn't a supported type
|
|
612
|
+
raise ValueError(
|
|
613
|
+
f"Unsupported boundary type '{patch_type}' for patch '{patch}' with additional fields. "
|
|
614
|
+
f"Supported types are: {', '.join(supported_types)}."
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
_modes.append(mode)
|
|
618
|
+
|
|
619
|
+
return _modes
|
|
620
|
+
|
|
621
|
+
def writeModes(
|
|
622
|
+
self, outputDir: str, fieldName: str = "PODmode", precision: int = 10
|
|
623
|
+
) -> None:
|
|
624
|
+
"""
|
|
625
|
+
Write the POD modes to files. Should be called after `getModes`.
|
|
626
|
+
|
|
627
|
+
Parameters
|
|
628
|
+
----------
|
|
629
|
+
outputDir : str
|
|
630
|
+
The directory where the mode files will be saved, e.g., case folder.
|
|
631
|
+
fieldName : str, optional
|
|
632
|
+
The base name for the mode files, by default 'PODmode'.
|
|
633
|
+
precision : int, optional
|
|
634
|
+
The numerical precision for writing mode files, by default 10.
|
|
635
|
+
|
|
636
|
+
Returns
|
|
637
|
+
-------
|
|
638
|
+
None
|
|
639
|
+
|
|
640
|
+
Raises
|
|
641
|
+
------
|
|
642
|
+
ValueError
|
|
643
|
+
If POD modes have not been computed yet.
|
|
644
|
+
"""
|
|
645
|
+
if not hasattr(self, "modes"):
|
|
646
|
+
raise ValueError(
|
|
647
|
+
"POD modes have not been computed yet. Call getModes() first."
|
|
648
|
+
)
|
|
649
|
+
if self.fieldList[0].parallel:
|
|
650
|
+
tasks = [
|
|
651
|
+
(procN, j + 1, mode, outputDir, fieldName, precision)
|
|
652
|
+
for procN, modeList in enumerate(self._modes)
|
|
653
|
+
for j, mode in enumerate(modeList[: self._rank])
|
|
654
|
+
]
|
|
655
|
+
with multiprocessing.Pool() as pool:
|
|
656
|
+
pool.map(write_mode_worker, tasks)
|
|
657
|
+
else:
|
|
658
|
+
tasks = [
|
|
659
|
+
(i + 1, mode, outputDir, fieldName, precision)
|
|
660
|
+
for i, mode in enumerate(self._modes[: self._rank])
|
|
661
|
+
]
|
|
662
|
+
with multiprocessing.Pool() as pool:
|
|
663
|
+
pool.map(write_single_mode, tasks)
|
|
664
|
+
|
|
665
|
+
def _performPOD(self, y: np.ndarray) -> None:
|
|
666
|
+
"""
|
|
667
|
+
Perform Proper Orthogonal Decomposition (POD) on the training data.
|
|
668
|
+
|
|
669
|
+
Parameters
|
|
670
|
+
----------
|
|
671
|
+
y : np.ndarray
|
|
672
|
+
The training data for which POD is to be performed. Should be a 2D array where each row is a flattened field.
|
|
673
|
+
|
|
674
|
+
Returns
|
|
675
|
+
-------
|
|
676
|
+
None
|
|
677
|
+
|
|
678
|
+
Raises
|
|
679
|
+
------
|
|
680
|
+
ValueError
|
|
681
|
+
If the rank is greater than the number of modes.
|
|
682
|
+
"""
|
|
683
|
+
if not hasattr(self, "cellModes"):
|
|
684
|
+
self.cellModes, self.s_all, self._coeffs = self.reduction(
|
|
685
|
+
y, POD_algo=self.POD_algo
|
|
686
|
+
)
|
|
687
|
+
if self._rank > self.cellModes.shape[0]:
|
|
688
|
+
raise ValueError("Rank is greater than the number of modes.")
|
|
689
|
+
|
|
690
|
+
@staticmethod
|
|
691
|
+
def reduction(y: np.ndarray, POD_algo: str, tol: float = 1e-10) -> tuple:
|
|
692
|
+
"""
|
|
693
|
+
Perform Proper Orthogonal Decomposition (POD) on the training data using the specified method.
|
|
694
|
+
|
|
695
|
+
Parameters
|
|
696
|
+
----------
|
|
697
|
+
y : np.ndarray
|
|
698
|
+
The training data for which POD is to be performed. Should be a 2D array where each row is a flattened field.
|
|
699
|
+
POD_algo : str
|
|
700
|
+
The algorithm to use ('svd', 'eigen', or 'auto').
|
|
701
|
+
- 'svd': Use Singular Value Decomposition (best for N >> M)
|
|
702
|
+
- 'eigen': Use eigenvalue decomposition of correlation matrix (best for N << M)
|
|
703
|
+
- 'auto': Automatically choose based on matrix dimensions
|
|
704
|
+
tol : float, optional
|
|
705
|
+
Tolerance for filtering out small singular values/eigenvalues to improve numerical stability.
|
|
706
|
+
Default is 1e-10.
|
|
707
|
+
|
|
708
|
+
Returns
|
|
709
|
+
-------
|
|
710
|
+
cellModes : np.ndarray
|
|
711
|
+
The POD modes (spatial modes).
|
|
712
|
+
s_all : np.ndarray
|
|
713
|
+
The singular values (non-negative, sorted in descending order).
|
|
714
|
+
_coeffs : np.ndarray
|
|
715
|
+
The temporal coefficients of the POD modes.
|
|
716
|
+
|
|
717
|
+
Raises
|
|
718
|
+
------
|
|
719
|
+
ValueError
|
|
720
|
+
If POD_algo is not 'svd', 'eigen', or 'auto', or if input dimensions are invalid.
|
|
721
|
+
"""
|
|
722
|
+
# Validate input
|
|
723
|
+
if y.ndim != 2:
|
|
724
|
+
raise ValueError(f"Input must be a 2D array, got {y.ndim}D array.")
|
|
725
|
+
|
|
726
|
+
N, M = y.shape
|
|
727
|
+
|
|
728
|
+
if N == 0 or M == 0:
|
|
729
|
+
raise ValueError(f"Input array has invalid shape: ({N}, {M}).")
|
|
730
|
+
|
|
731
|
+
# Auto-select algorithm based on matrix dimensions
|
|
732
|
+
if POD_algo == "auto":
|
|
733
|
+
# Use eigenvalue method when snapshots << spatial points (more efficient)
|
|
734
|
+
POD_algo = "eigen" if N < M else "svd"
|
|
735
|
+
print(f"Auto-selected POD algorithm: {POD_algo} (N={N}, M={M})")
|
|
736
|
+
|
|
737
|
+
if POD_algo == "svd":
|
|
738
|
+
# SVD-based POD (direct method)
|
|
739
|
+
# Recommended when N >= M (many snapshots or comparable to spatial points)
|
|
740
|
+
u, s_all, cellModes = svd(y, full_matrices=False)
|
|
741
|
+
|
|
742
|
+
# Use broadcasting instead of np.diag for efficiency
|
|
743
|
+
_coeffs = u * s_all # Broadcasting: (N, N) * (N,) -> (N, N)
|
|
744
|
+
|
|
745
|
+
print(f"POD_SVD reduction completed (computed {len(s_all)} modes).")
|
|
746
|
+
|
|
747
|
+
elif POD_algo == "eigen":
|
|
748
|
+
# Eigenvalue-based POD (snapshot method / method of snapshots)
|
|
749
|
+
# Recommended when N << M (few snapshots, many spatial points)
|
|
750
|
+
# More efficient as it works with NxN correlation matrix instead of MxM
|
|
751
|
+
|
|
752
|
+
# Compute correlation matrix C = Y * Y^T (N x N instead of M x M)
|
|
753
|
+
C: np.ndarray = y @ y.T
|
|
754
|
+
|
|
755
|
+
# Use eigh for symmetric matrix (faster and more stable than eig)
|
|
756
|
+
eigenvalues, U = np.linalg.eigh(C)
|
|
757
|
+
|
|
758
|
+
# Sort in descending order
|
|
759
|
+
sorted_indices = np.argsort(eigenvalues)[::-1]
|
|
760
|
+
sorted_eigenvalues = eigenvalues[sorted_indices]
|
|
761
|
+
U = U[:, sorted_indices]
|
|
762
|
+
|
|
763
|
+
# Filter out negative eigenvalues from numerical errors
|
|
764
|
+
positive_mask = sorted_eigenvalues > tol
|
|
765
|
+
if not np.all(positive_mask):
|
|
766
|
+
num_filtered = np.sum(~positive_mask)
|
|
767
|
+
print(
|
|
768
|
+
f"Warning: Filtered {num_filtered} near-zero/negative eigenvalues (< {tol})."
|
|
769
|
+
)
|
|
770
|
+
sorted_eigenvalues = sorted_eigenvalues[positive_mask]
|
|
771
|
+
U = U[:, positive_mask]
|
|
772
|
+
|
|
773
|
+
# Compute singular values from eigenvalues
|
|
774
|
+
s_all = np.sqrt(np.maximum(sorted_eigenvalues, 0)) # Ensure non-negative
|
|
775
|
+
|
|
776
|
+
# Use broadcasting for coefficients (more efficient than np.diag)
|
|
777
|
+
_coeffs = U * s_all # Broadcasting: (N, k) * (k,) -> (N, k)
|
|
778
|
+
|
|
779
|
+
# Vectorized computation of spatial modes (instead of loop)
|
|
780
|
+
# cellModes[i, :] = (1 / s_all[i]) * (U[:, i]^T @ y)
|
|
781
|
+
# This is equivalent to: cellModes = diag(1/s_all) @ U^T @ y
|
|
782
|
+
# Using broadcasting: (U^T @ y) / s_all[:, None]
|
|
783
|
+
cellModes = (U.T @ y) / s_all[:, np.newaxis]
|
|
784
|
+
|
|
785
|
+
print(f"POD_eigen reduction completed (computed {len(s_all)} modes).")
|
|
786
|
+
|
|
787
|
+
else:
|
|
788
|
+
raise ValueError("POD_algo must be 'svd', 'eigen', or 'auto'.")
|
|
789
|
+
|
|
790
|
+
return cellModes, s_all, _coeffs
|
|
791
|
+
|
|
792
|
+
def _truncation_error(self) -> np.ndarray:
|
|
793
|
+
"""
|
|
794
|
+
Compute the truncation error for each mode.
|
|
795
|
+
|
|
796
|
+
Returns
|
|
797
|
+
-------
|
|
798
|
+
tuple of np.ndarray
|
|
799
|
+
truncation_error : np.ndarray
|
|
800
|
+
The truncation error for each mode.
|
|
801
|
+
projection_error : np.ndarray
|
|
802
|
+
The projection error for each mode.
|
|
803
|
+
"""
|
|
804
|
+
total_energy: float = np.sum(self.s_all**2)
|
|
805
|
+
cumulative_energy: np.ndarray = np.cumsum(self.s_all**2)
|
|
806
|
+
truncation_error: np.ndarray = 1 - cumulative_energy / total_energy
|
|
807
|
+
numerical_noise_indices = np.where(truncation_error < 0)
|
|
808
|
+
truncation_error[numerical_noise_indices] = 0.0
|
|
809
|
+
projection_error: np.ndarray = np.sqrt(truncation_error)
|
|
810
|
+
return truncation_error, projection_error
|
|
811
|
+
|
|
812
|
+
def split_cellData(self) -> np.ndarray:
|
|
813
|
+
"""
|
|
814
|
+
Split the cellModes into segments corresponding to each processor.
|
|
815
|
+
Not needed for current implementation but may be useful for future extensions.
|
|
816
|
+
|
|
817
|
+
Returns
|
|
818
|
+
-------
|
|
819
|
+
np.ndarray
|
|
820
|
+
Segmented cellModes for each processor.
|
|
821
|
+
|
|
822
|
+
Raises
|
|
823
|
+
------
|
|
824
|
+
ValueError
|
|
825
|
+
If cellModes are not computed or field is not parallel.
|
|
826
|
+
"""
|
|
827
|
+
if not hasattr(self, "cellModes"):
|
|
828
|
+
raise ValueError("cellModes not computed yet. Call _performPOD first.")
|
|
829
|
+
|
|
830
|
+
if not self.fieldList[0].parallel:
|
|
831
|
+
raise ValueError(
|
|
832
|
+
"Field is not parallel. split_cellModes is not applicable."
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
_length = [0]
|
|
836
|
+
for i in range(self.fieldList[0]._num_processors):
|
|
837
|
+
_length.append(_length[-1] + self.fieldList[0].internalField[i].shape[0])
|
|
838
|
+
total_len = sum([field.shape[0] for field in self.fieldList[0].internalField])
|
|
839
|
+
if total_len != _length[-1]:
|
|
840
|
+
raise ValueError("Mismatch in total number of cells across processors.")
|
|
841
|
+
|
|
842
|
+
proc_idx = [
|
|
843
|
+
list(range(_length[i], _length[i + 1])) for i in range(len(_length) - 1)
|
|
844
|
+
]
|
|
845
|
+
|
|
846
|
+
cellModes = []
|
|
847
|
+
if self.fieldList[0].data_type == "scalar":
|
|
848
|
+
for procI in proc_idx:
|
|
849
|
+
proc_cellModes = []
|
|
850
|
+
for i in range(self.cellModes.shape[0]):
|
|
851
|
+
modes = self.cellModes[i, procI]
|
|
852
|
+
proc_cellModes.append(modes)
|
|
853
|
+
cellModes.append(np.array(proc_cellModes))
|
|
854
|
+
elif self.fieldList[0].data_type == "vector":
|
|
855
|
+
for procI in proc_idx:
|
|
856
|
+
proc_cellModes = []
|
|
857
|
+
idx_x = procI
|
|
858
|
+
idx_y = [i + total_len for i in procI]
|
|
859
|
+
idx_z = [i + 2 * total_len for i in procI]
|
|
860
|
+
idx = idx_x + idx_y + idx_z
|
|
861
|
+
for i in range(self.cellModes.shape[0]):
|
|
862
|
+
modes = self.cellModes[i, idx]
|
|
863
|
+
proc_cellModes.append(modes)
|
|
864
|
+
cellModes.append(np.array(proc_cellModes))
|
|
865
|
+
|
|
866
|
+
return cellModes
|
|
867
|
+
|
|
868
|
+
@staticmethod
|
|
869
|
+
def _reconstructField_parallel(
|
|
870
|
+
_modes: List[List[OFField]], coeffs: np.ndarray, _num_processors: int
|
|
871
|
+
):
|
|
872
|
+
"""
|
|
873
|
+
Reconstruct the original field from the POD modes and coefficients (parallel version).
|
|
874
|
+
|
|
875
|
+
Parameters
|
|
876
|
+
----------
|
|
877
|
+
_modes : List[List[OFField]]
|
|
878
|
+
The list of POD mode OpenFOAM field objects for each processor.
|
|
879
|
+
coeffs : np.ndarray
|
|
880
|
+
The coefficients for reconstructing the field. Shape should be (rank,).
|
|
881
|
+
_num_processors : int
|
|
882
|
+
The number of processors.
|
|
883
|
+
|
|
884
|
+
Returns
|
|
885
|
+
-------
|
|
886
|
+
list
|
|
887
|
+
List of OpenFOAM field objects representing the reconstructed field for each processor.
|
|
888
|
+
|
|
889
|
+
Raises
|
|
890
|
+
------
|
|
891
|
+
ValueError
|
|
892
|
+
If rank is greater than the number of modes or coeffs is not 1D.
|
|
893
|
+
"""
|
|
894
|
+
rank = coeffs.shape[0]
|
|
895
|
+
if rank > len(_modes[0]):
|
|
896
|
+
raise ValueError("Rank cannot be greater than the number of modes.")
|
|
897
|
+
if coeffs.ndim != 1:
|
|
898
|
+
raise ValueError("Coefficients should be a 1D array.")
|
|
899
|
+
|
|
900
|
+
# Define supported boundary types
|
|
901
|
+
supported_types = {"fixedValue", "fixedGradient", "processor", "calculated"}
|
|
902
|
+
|
|
903
|
+
recOFFieldList = []
|
|
904
|
+
for procN in range(_num_processors):
|
|
905
|
+
# Initialize reconstructed field
|
|
906
|
+
recOFField = OFField.from_OFField(_modes[procN][0])
|
|
907
|
+
recOFField.internalField = np.zeros(_modes[procN][0].internalField.shape)
|
|
908
|
+
|
|
909
|
+
# Reconstruct internal field using vectorized operation
|
|
910
|
+
for i in range(rank):
|
|
911
|
+
recOFField.internalField += coeffs[i] * _modes[procN][i].internalField
|
|
912
|
+
|
|
913
|
+
# Reconstruct boundary field
|
|
914
|
+
first_boundary = _modes[procN][0].boundaryField
|
|
915
|
+
for patch, patch_dict in first_boundary.items():
|
|
916
|
+
patch_type = patch_dict["type"]
|
|
917
|
+
|
|
918
|
+
if patch_type in supported_types:
|
|
919
|
+
# Get value type (last key that's not 'type')
|
|
920
|
+
value_type = next(
|
|
921
|
+
(k for k in reversed(patch_dict.keys()) if k != "type"), None
|
|
922
|
+
)
|
|
923
|
+
|
|
924
|
+
if value_type is None:
|
|
925
|
+
continue
|
|
926
|
+
|
|
927
|
+
patch_value = patch_dict[value_type]
|
|
928
|
+
|
|
929
|
+
# Handle string values (e.g., "uniform (0 0 0)")
|
|
930
|
+
if isinstance(patch_value, str):
|
|
931
|
+
recOFField.boundaryField[patch][value_type] = patch_value
|
|
932
|
+
|
|
933
|
+
# Handle numpy array values
|
|
934
|
+
elif isinstance(patch_value, np.ndarray):
|
|
935
|
+
# Initialize with zeros
|
|
936
|
+
recOFField.boundaryField[patch][value_type] = np.zeros(
|
|
937
|
+
patch_value.shape
|
|
938
|
+
)
|
|
939
|
+
|
|
940
|
+
# Accumulate contributions from all modes
|
|
941
|
+
for i in range(rank):
|
|
942
|
+
recOFField.boundaryField[patch][value_type] += (
|
|
943
|
+
coeffs[i]
|
|
944
|
+
* _modes[procN][i].boundaryField[patch][value_type]
|
|
945
|
+
)
|
|
946
|
+
else:
|
|
947
|
+
raise ValueError(
|
|
948
|
+
f"Unknown boundary field value type for patch '{patch}'. "
|
|
949
|
+
f"Expected str or np.ndarray, got {type(patch_value)}."
|
|
950
|
+
)
|
|
951
|
+
else:
|
|
952
|
+
# For unsupported types, just copy the type
|
|
953
|
+
recOFField.boundaryField[patch]["type"] = patch_type
|
|
954
|
+
|
|
955
|
+
recOFFieldList.append(recOFField)
|
|
956
|
+
return recOFFieldList
|
|
957
|
+
|
|
958
|
+
@staticmethod
|
|
959
|
+
def _reconstructField_serial(_modes: List[OFField], coeffs: np.ndarray):
|
|
960
|
+
"""
|
|
961
|
+
Reconstruct the field using the given coefficients and rank (serial version).
|
|
962
|
+
|
|
963
|
+
Parameters
|
|
964
|
+
----------
|
|
965
|
+
_modes : List[OFField]
|
|
966
|
+
The list of POD mode OpenFOAM field objects.
|
|
967
|
+
coeffs : np.ndarray
|
|
968
|
+
The coefficients for reconstructing the field. Shape should be (rank,).
|
|
969
|
+
|
|
970
|
+
Returns
|
|
971
|
+
-------
|
|
972
|
+
OFField
|
|
973
|
+
The reconstructed OpenFOAM field object.
|
|
974
|
+
|
|
975
|
+
Raises
|
|
976
|
+
------
|
|
977
|
+
ValueError
|
|
978
|
+
If rank is greater than the number of modes or coeffs is not 1D.
|
|
979
|
+
"""
|
|
980
|
+
rank = coeffs.shape[0]
|
|
981
|
+
if rank > len(_modes):
|
|
982
|
+
raise ValueError("Rank cannot be greater than the number of modes.")
|
|
983
|
+
if coeffs.ndim != 1:
|
|
984
|
+
raise ValueError("Coefficients should be a 1D array.")
|
|
985
|
+
|
|
986
|
+
# Initialize reconstructed field
|
|
987
|
+
recOFField = OFField.from_OFField(_modes[0])
|
|
988
|
+
recOFField.internalField = np.zeros(_modes[0].internalField.shape)
|
|
989
|
+
|
|
990
|
+
# Reconstruct internal field using vectorized operation
|
|
991
|
+
for i in range(rank):
|
|
992
|
+
recOFField.internalField += coeffs[i] * _modes[i].internalField
|
|
993
|
+
|
|
994
|
+
# Define supported boundary types
|
|
995
|
+
supported_types = {"fixedValue", "fixedGradient", "calculated"}
|
|
996
|
+
|
|
997
|
+
# Reconstruct boundary field
|
|
998
|
+
first_boundary = _modes[0].boundaryField
|
|
999
|
+
for patch, patch_dict in first_boundary.items():
|
|
1000
|
+
patch_type = patch_dict["type"]
|
|
1001
|
+
|
|
1002
|
+
if patch_type in supported_types:
|
|
1003
|
+
# Get value type (last key that's not 'type')
|
|
1004
|
+
value_type = next(
|
|
1005
|
+
(k for k in reversed(patch_dict.keys()) if k != "type"), None
|
|
1006
|
+
)
|
|
1007
|
+
|
|
1008
|
+
if value_type is None:
|
|
1009
|
+
continue
|
|
1010
|
+
|
|
1011
|
+
patch_value = patch_dict[value_type]
|
|
1012
|
+
|
|
1013
|
+
# Handle string values (e.g., "uniform (0 0 0)")
|
|
1014
|
+
if isinstance(patch_value, str):
|
|
1015
|
+
recOFField.boundaryField[patch][value_type] = patch_value
|
|
1016
|
+
|
|
1017
|
+
# Handle numpy array values
|
|
1018
|
+
elif isinstance(patch_value, np.ndarray):
|
|
1019
|
+
# Initialize with zeros
|
|
1020
|
+
recOFField.boundaryField[patch][value_type] = np.zeros(
|
|
1021
|
+
patch_value.shape
|
|
1022
|
+
)
|
|
1023
|
+
|
|
1024
|
+
# Accumulate contributions from all modes
|
|
1025
|
+
for i in range(rank):
|
|
1026
|
+
recOFField.boundaryField[patch][value_type] += (
|
|
1027
|
+
coeffs[i] * _modes[i].boundaryField[patch][value_type]
|
|
1028
|
+
)
|
|
1029
|
+
else:
|
|
1030
|
+
raise ValueError(
|
|
1031
|
+
f"Unknown boundary field value type for patch '{patch}'. "
|
|
1032
|
+
f"Expected str or np.ndarray, got {type(patch_value)}."
|
|
1033
|
+
)
|
|
1034
|
+
else:
|
|
1035
|
+
# For unsupported types, just copy the type
|
|
1036
|
+
recOFField.boundaryField[patch]["type"] = patch_type
|
|
1037
|
+
|
|
1038
|
+
return recOFField
|
|
1039
|
+
|
|
1040
|
+
def writeRecField(
|
|
1041
|
+
self,
|
|
1042
|
+
coeffs: np.ndarray,
|
|
1043
|
+
outputDir: str,
|
|
1044
|
+
timeDir: int,
|
|
1045
|
+
fieldName: str = "recField",
|
|
1046
|
+
precision: int = 10,
|
|
1047
|
+
):
|
|
1048
|
+
"""
|
|
1049
|
+
Write the reconstructed field to files.
|
|
1050
|
+
|
|
1051
|
+
Parameters
|
|
1052
|
+
----------
|
|
1053
|
+
coeffs : np.ndarray
|
|
1054
|
+
The coefficients for reconstructing the field.
|
|
1055
|
+
outputDir : str
|
|
1056
|
+
The case directory where the reconstructed field files will be saved.
|
|
1057
|
+
timeDir : int
|
|
1058
|
+
The time directory for the reconstructed field files.
|
|
1059
|
+
fieldName : str, optional
|
|
1060
|
+
The base name for the reconstructed field files, by default 'recField'.
|
|
1061
|
+
precision : int, optional
|
|
1062
|
+
The numerical precision for writing the reconstructed field files, by default 10.
|
|
1063
|
+
|
|
1064
|
+
Returns
|
|
1065
|
+
-------
|
|
1066
|
+
None
|
|
1067
|
+
|
|
1068
|
+
Raises
|
|
1069
|
+
------
|
|
1070
|
+
ValueError
|
|
1071
|
+
If the reconstructed field format is incorrect.
|
|
1072
|
+
"""
|
|
1073
|
+
if self.parallel:
|
|
1074
|
+
recOFField = self._reconstructField_parallel(
|
|
1075
|
+
self._modes, coeffs, self._num_processors
|
|
1076
|
+
)
|
|
1077
|
+
if (
|
|
1078
|
+
not isinstance(recOFField, list)
|
|
1079
|
+
or len(recOFField) != self._num_processors
|
|
1080
|
+
):
|
|
1081
|
+
raise ValueError(
|
|
1082
|
+
"For parallel fields, recOFFields should be a list with length equal to the number of processors."
|
|
1083
|
+
)
|
|
1084
|
+
tasks = [
|
|
1085
|
+
(procN, timeDir, recOFField[procN], outputDir, fieldName, precision)
|
|
1086
|
+
for procN in range(self._num_processors)
|
|
1087
|
+
]
|
|
1088
|
+
with multiprocessing.Pool() as pool:
|
|
1089
|
+
pool.map(write_mode_worker, tasks)
|
|
1090
|
+
else:
|
|
1091
|
+
recOFField = self._reconstructField_serial(self._modes, coeffs)
|
|
1092
|
+
if not isinstance(recOFField, OFField):
|
|
1093
|
+
raise ValueError(
|
|
1094
|
+
"For non-parallel fields, recOFFields should be a single OFField object."
|
|
1095
|
+
)
|
|
1096
|
+
recOFField.writeField(outputDir, timeDir, fieldName, precision)
|
|
1097
|
+
|
|
1098
|
+
@staticmethod
|
|
1099
|
+
def _convert_mode_list(mode_list: List[List[OFField]]) -> List[OFField]:
|
|
1100
|
+
"""
|
|
1101
|
+
Convert processor-wise mode list to mode-wise list for parallel fields.
|
|
1102
|
+
|
|
1103
|
+
Transforms mode_list[processor][mode] structure into a list where each mode
|
|
1104
|
+
contains data from all processors, enabling parallel field reconstruction.
|
|
1105
|
+
|
|
1106
|
+
Parameters
|
|
1107
|
+
----------
|
|
1108
|
+
mode_list : List[List[OFField]]
|
|
1109
|
+
Nested list where mode_list[procN][modeN] contains the mode data for
|
|
1110
|
+
processor procN and mode modeN. All sublists must have the same length.
|
|
1111
|
+
|
|
1112
|
+
Returns
|
|
1113
|
+
-------
|
|
1114
|
+
List[OFField]
|
|
1115
|
+
List of merged OFField objects, where each mode aggregates data from
|
|
1116
|
+
all processors with parallel=True flag set.
|
|
1117
|
+
|
|
1118
|
+
Raises
|
|
1119
|
+
------
|
|
1120
|
+
ValueError
|
|
1121
|
+
If mode_list is empty or sublists have inconsistent lengths.
|
|
1122
|
+
TypeError
|
|
1123
|
+
If mode_list structure is invalid or contains non-OFField objects.
|
|
1124
|
+
|
|
1125
|
+
Examples
|
|
1126
|
+
--------
|
|
1127
|
+
>>> # mode_list[0] = [mode0_proc0, mode1_proc0]
|
|
1128
|
+
>>> # mode_list[1] = [mode0_proc1, mode1_proc1]
|
|
1129
|
+
>>> converted = _convert_mode_list(mode_list)
|
|
1130
|
+
>>> # converted[0] contains mode0 data from all processors
|
|
1131
|
+
>>> # converted[1] contains mode1 data from all processors
|
|
1132
|
+
"""
|
|
1133
|
+
# Validate input
|
|
1134
|
+
if not mode_list:
|
|
1135
|
+
raise ValueError("mode_list cannot be empty.")
|
|
1136
|
+
|
|
1137
|
+
if not isinstance(mode_list, list) or not all(
|
|
1138
|
+
isinstance(sublist, list) for sublist in mode_list
|
|
1139
|
+
):
|
|
1140
|
+
raise TypeError("mode_list must be a list of lists.")
|
|
1141
|
+
|
|
1142
|
+
num_processors = len(mode_list)
|
|
1143
|
+
num_modes = len(mode_list[0])
|
|
1144
|
+
|
|
1145
|
+
# Check consistency across processors
|
|
1146
|
+
if not all(len(sublist) == num_modes for sublist in mode_list):
|
|
1147
|
+
raise ValueError(
|
|
1148
|
+
f"Inconsistent mode counts across processors. "
|
|
1149
|
+
f"Expected {num_modes} modes in all sublists, but got varying lengths."
|
|
1150
|
+
)
|
|
1151
|
+
|
|
1152
|
+
# Validate that all elements are OFField objects
|
|
1153
|
+
for procN, sublist in enumerate(mode_list):
|
|
1154
|
+
for modeN, field in enumerate(sublist):
|
|
1155
|
+
if not isinstance(field, OFField):
|
|
1156
|
+
raise TypeError(
|
|
1157
|
+
f"mode_list[{procN}][{modeN}] is not an OFField object. "
|
|
1158
|
+
f"Got {type(field).__name__} instead."
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
# Pre-allocate list for better performance
|
|
1162
|
+
converted_modes = []
|
|
1163
|
+
|
|
1164
|
+
# Convert modes: transpose the structure from [proc][mode] to [mode][proc]
|
|
1165
|
+
for j in range(num_modes):
|
|
1166
|
+
# Clone structure from first processor's mode
|
|
1167
|
+
mode = OFField.from_OFField(mode_list[0][j])
|
|
1168
|
+
|
|
1169
|
+
# Pre-allocate lists with known size
|
|
1170
|
+
mode.internalField = [None] * num_processors
|
|
1171
|
+
mode.boundaryField = [None] * num_processors
|
|
1172
|
+
|
|
1173
|
+
# Gather data from all processors for this mode
|
|
1174
|
+
for procN in range(num_processors):
|
|
1175
|
+
mode.internalField[procN] = mode_list[procN][j].internalField
|
|
1176
|
+
mode.boundaryField[procN] = mode_list[procN][j].boundaryField
|
|
1177
|
+
|
|
1178
|
+
mode.parallel = True
|
|
1179
|
+
converted_modes.append(mode)
|
|
1180
|
+
|
|
1181
|
+
return converted_modes
|
|
1182
|
+
|
|
1183
|
+
|
|
1184
|
+
def write_mode_worker(args):
|
|
1185
|
+
"""
|
|
1186
|
+
Worker function to write a single POD mode to disk for parallel fields.
|
|
1187
|
+
|
|
1188
|
+
Parameters
|
|
1189
|
+
----------
|
|
1190
|
+
args : tuple
|
|
1191
|
+
Tuple containing (procN, j, mode, outputDir, fieldName):
|
|
1192
|
+
procN (int): Processor number.
|
|
1193
|
+
j (int): Mode index.
|
|
1194
|
+
mode (OFField): The POD mode object to write.
|
|
1195
|
+
outputDir (str): Output directory path.
|
|
1196
|
+
fieldName (str): Name for the output field file.
|
|
1197
|
+
precision (int): Numerical precision for writing the field.
|
|
1198
|
+
|
|
1199
|
+
raises
|
|
1200
|
+
------
|
|
1201
|
+
FileNotFoundError
|
|
1202
|
+
If the parallel directory does not exist.
|
|
1203
|
+
"""
|
|
1204
|
+
procN, j, mode, outputDir, fieldName, precision = args
|
|
1205
|
+
mode.parallel = False
|
|
1206
|
+
output_path = f"{outputDir}/processor{procN}"
|
|
1207
|
+
if not os.path.exists(output_path):
|
|
1208
|
+
raise FileNotFoundError(f"Processor directory {output_path} does not exist.")
|
|
1209
|
+
mode.writeField(output_path, j, fieldName, precision)
|
|
1210
|
+
mode.parallel = True
|
|
1211
|
+
|
|
1212
|
+
|
|
1213
|
+
def write_single_mode(args):
|
|
1214
|
+
"""
|
|
1215
|
+
Worker function to write a single POD mode to disk for serial fields.
|
|
1216
|
+
|
|
1217
|
+
Parameters
|
|
1218
|
+
----------
|
|
1219
|
+
args : tuple
|
|
1220
|
+
Tuple containing (i, mode, outputDir, fieldName):
|
|
1221
|
+
i (int): Mode index.
|
|
1222
|
+
mode (OFField): The POD mode object to write.
|
|
1223
|
+
outputDir (str): Output directory path.
|
|
1224
|
+
fieldName (str): Name for the output field file.
|
|
1225
|
+
precision (int): Numerical precision for writing the field.
|
|
1226
|
+
"""
|
|
1227
|
+
i, mode, outputDir, fieldName, precision = args
|
|
1228
|
+
mode.writeField(outputDir, i, fieldName, precision)
|