foamToPython 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1275 @@
1
+ import numpy as np
2
+ import sys
3
+ import os
4
+ import re
5
+ import mmap
6
+ import multiprocessing
7
+ multiprocessing.set_start_method("spawn", force=True)
8
+ import copy
9
+ from typing import List, Dict, Any, Optional, Tuple, Union
10
+ from .headerEnd import *
11
+
12
+
13
+ class OFField:
14
+ """
15
+ A class for reading and writing OpenFOAM field files supporting both scalar and vector fields.
16
+
17
+ This class provides functionality to read OpenFOAM field files in both serial and parallel
18
+ formats, parse internal and boundary field data, and write field data back to OpenFOAM format.
19
+
20
+ For serial fields, the internal field is a ndarray (for nonuniform) or a float (for uniform scalar),
21
+ and boundary fields are stored in a dictionary.
22
+ For parallel fields, the internal field is a list of ndarrays (one per processor), and boundary fields
23
+ are stored in a list of dictionaries (one per processor).
24
+
25
+ Attributes
26
+ ----------
27
+ filename : str
28
+ Path to the OpenFOAM field file.
29
+ fieldName : str
30
+ Name of the field.
31
+ timeName : str
32
+ Time directory name.
33
+ data_type : str, optional
34
+ Type of field ('scalar' or 'vector').
35
+ read_data : bool
36
+ Whether to read field data upon initialization.
37
+ parallel : bool
38
+ Whether the field uses parallel processing.
39
+ internal_field_type : str, optional
40
+ Type of internal field ('uniform', 'nonuniform', or 'nonuniformZero').
41
+ dimensions : np.ndarray
42
+ Physical dimensions of the field [kg m s K mol A cd].
43
+ internalField : Union[float, np.ndarray]
44
+ Internal field data.
45
+ boundaryField : Dict[str, Dict[str, Any]]
46
+ Boundary field data organized by patch names.
47
+
48
+ Examples
49
+ --------
50
+ Reading a scalar field file:
51
+
52
+ >>> field = OFField('case/0/p', data_type='scalar', read_data=True)
53
+ >>> pressure_values = field.internalField
54
+
55
+ Reading a vector field file:
56
+
57
+ >>> velocity = OFField('case/0/U', data_type='vector', read_data=True)
58
+ >>> u_components = velocity.internalField # Shape: (n_cells, 3)
59
+ """
60
+
61
+ filename: str
62
+ fieldName: str
63
+ timeName: str
64
+ data_type: str
65
+ read_data: bool
66
+ parallel: bool
67
+ reconstructPar: bool
68
+ caseDir: str
69
+ num_batch: int
70
+ _field_loaded: bool
71
+ _dimensions: np.ndarray
72
+ _internalField: Union[float, np.ndarray, List[np.ndarray]]
73
+ internal_field_type: str
74
+ _boundaryField: Union[Dict[str, Dict[str, Any]], List[Dict[str, Dict[str, Any]]]]
75
+ _num_processors: int
76
+
77
+ def __init__(
78
+ self,
79
+ filename: str = None,
80
+ data_type: str = None,
81
+ read_data: bool = False,
82
+ parallel: bool = False,
83
+ reconstructPar: bool = False,
84
+ num_batch: int = 8,
85
+ ) -> None:
86
+ """
87
+ Initialize OFField object.
88
+
89
+ Parameters
90
+ ----------
91
+ filename : str, optional
92
+ Path to the OpenFOAM field file, by default None.
93
+ data_type : str, optional
94
+ Type of field ('scalar' or 'vector'), by default None.
95
+ read_data : bool, optional
96
+ If True, read the field file upon initialization, by default False.
97
+ parallel : bool, optional
98
+ If True, enable parallel processing for multi-processor cases, by default False.
99
+ reconstructPar : bool, optional
100
+ If True, reconstruct the parallel field into a single field, by default False.
101
+ num_batch : int, optional
102
+ Number of processors to use for parallel reading, by default 8.
103
+
104
+ Notes
105
+ -----
106
+ If filename is provided, the object will automatically extract caseDir, fieldName,
107
+ and timeName from the path. If read_data is True, the field data will be loaded
108
+ immediately upon initialization.
109
+ """
110
+ if filename is not None:
111
+ self.filename = filename
112
+ self.caseDir = "/".join(filename.split("/")[:-2])
113
+ self.fieldName = filename.split("/")[-1]
114
+ self.timeName = filename.split("/")[-2]
115
+ else:
116
+ self.filename = ""
117
+ self.caseDir = ""
118
+ self.fieldName = ""
119
+ self.timeName = ""
120
+
121
+ self.parallel = parallel
122
+ self.reconstructPar = reconstructPar
123
+ self.num_batch = num_batch
124
+
125
+ if not self.parallel and self.reconstructPar:
126
+ raise ValueError("reconstructPar can only be True if parallel is True.")
127
+
128
+ self.data_type = data_type
129
+ self.read_data = read_data
130
+ self.internal_field_type = None
131
+
132
+ self._dimensions = np.array([])
133
+ self._internalField = np.array([])
134
+ self._boundaryField = {}
135
+ self._field_loaded = False
136
+
137
+ if self.read_data:
138
+ (
139
+ self._dimensions,
140
+ self._internalField,
141
+ self._boundaryField,
142
+ self.internal_field_type,
143
+ ) = self.readField()
144
+ print(f"Field {self.fieldName} read from {self.filename}")
145
+ self._field_loaded = True
146
+
147
+ # a initial constructor copy from another OFField object
148
+ @classmethod
149
+ def from_OFField(cls, other: "OFField") -> "OFField":
150
+ """
151
+ Create a new OFField instance by copying another OFField object.
152
+
153
+ Parameters
154
+ ----------
155
+ other : OFField
156
+ The source OFField object to copy from.
157
+
158
+ Returns
159
+ -------
160
+ OFField
161
+ A new OFField instance with copied attributes from the source object.
162
+
163
+ Notes
164
+ -----
165
+ This method performs a deep copy of arrays and dictionaries to ensure
166
+ the new instance is independent of the original object.
167
+ """
168
+ new_field = cls()
169
+
170
+ new_field.filename = other.filename
171
+ new_field.caseDir = other.caseDir
172
+ new_field.fieldName = other.fieldName
173
+ new_field.timeName = other.timeName
174
+
175
+ # Simple attributes
176
+ new_field.data_type = other.data_type
177
+ new_field.read_data = other.read_data
178
+ new_field.parallel = other.parallel
179
+ new_field.reconstructPar = other.reconstructPar
180
+ new_field.num_batch = other.num_batch
181
+ new_field.internal_field_type = other.internal_field_type
182
+
183
+ # Deep copy dimensions
184
+ new_field._dimensions = copy.deepcopy(other._dimensions)
185
+
186
+ # InternalField: handle ndarray, list-of-ndarrays (parallel case)
187
+ if isinstance(other._internalField, list):
188
+ new_field._internalField = [
189
+ other._internalField[procN].copy()
190
+ for procN in range(len(other._internalField))
191
+ ]
192
+ elif isinstance(other._internalField, np.ndarray):
193
+ new_field._internalField = other._internalField.copy()
194
+ else:
195
+ raise ValueError(
196
+ "Unsupported type for internalField. It should be ndarray or list of ndarrays."
197
+ )
198
+
199
+ # BoundaryField: handle Dict[str, Dict[str, Any]] for serial, List[Dict[str, Dict[str, Any]]] for parallel
200
+ if isinstance(other._boundaryField, list):
201
+ new_field._boundaryField = [
202
+ {
203
+ patch: {
204
+ key: (
205
+ value.copy()
206
+ if isinstance(value, np.ndarray)
207
+ else copy.deepcopy(value)
208
+ )
209
+ for key, value in info.items()
210
+ }
211
+ for patch, info in other._boundaryField[procN].items()
212
+ }
213
+ for procN in range(len(other._boundaryField))
214
+ ]
215
+ elif isinstance(other._boundaryField, dict):
216
+ new_field._boundaryField = {
217
+ patch: {
218
+ key: (
219
+ value.copy()
220
+ if isinstance(value, np.ndarray)
221
+ else copy.deepcopy(value)
222
+ )
223
+ for key, value in info.items()
224
+ }
225
+ for patch, info in other._boundaryField.items()
226
+ }
227
+ else:
228
+ raise ValueError(
229
+ "Unsupported type for boundaryField. It should be dict or list of dicts."
230
+ )
231
+
232
+ new_field._field_loaded = other._field_loaded
233
+ return new_field
234
+
235
+ @property
236
+ def dimensions(self):
237
+ if not self._field_loaded:
238
+ (
239
+ self._dimensions,
240
+ self._internalField,
241
+ self._boundaryField,
242
+ self.internal_field_type,
243
+ ) = self.readField()
244
+ self._field_loaded = True
245
+ return self._dimensions
246
+
247
+ @dimensions.setter
248
+ def dimensions(self, value):
249
+ self._dimensions = value
250
+
251
+ @property
252
+ def internalField(self):
253
+ """
254
+ Get the internal field data.
255
+
256
+ Returns
257
+ -------
258
+ Union[float, np.ndarray, List[np.ndarray]]
259
+ For serial fields, this returns a float (for uniform scalar), a single array (for nonuniform), or value.
260
+ For parallel fields, this returns a list of arrays.
261
+ """
262
+ if not self._field_loaded:
263
+ (
264
+ self._dimensions,
265
+ self._internalField,
266
+ self._boundaryField,
267
+ self.internal_field_type,
268
+ ) = self.readField()
269
+ self._field_loaded = True
270
+ return self._internalField
271
+
272
+ @internalField.setter
273
+ def internalField(self, value):
274
+ self._internalField = value
275
+
276
+ @property
277
+ def boundaryField(self):
278
+ """
279
+ Get the boundary field data.
280
+
281
+ Returns
282
+ -------
283
+ Union[Dict, List[Dict]]
284
+ For serial fields, this returns a dictionary of boundary field properties.
285
+ For parallel fields, this returns a list of dictionaries.
286
+ """
287
+ if not self._field_loaded:
288
+ (
289
+ self._dimensions,
290
+ self._internalField,
291
+ self._boundaryField,
292
+ self.internal_field_type,
293
+ ) = self.readField()
294
+ self._field_loaded = True
295
+ return self._boundaryField
296
+
297
+ @boundaryField.setter
298
+ def boundaryField(self, value):
299
+ self._boundaryField = value
300
+
301
+ def readField(self):
302
+ if self.parallel:
303
+ return self._readField_parallel()
304
+ else:
305
+ return self._readField(self.filename, self.data_type)
306
+
307
+ @staticmethod
308
+ def _readField(filename: str, data_type: str, parallel: bool = False) -> Tuple[
309
+ np.ndarray,
310
+ Union[float, np.ndarray, List[np.ndarray]],
311
+ Union[Dict[str, Dict[str, Any]], List[Dict[str, Dict[str, Any]]]],
312
+ str,
313
+ ]:
314
+ """
315
+ Read the field file and parse internal and boundary fields.
316
+
317
+ Parameters
318
+ ----------
319
+ filename : str
320
+ Path to the OpenFOAM field file.
321
+ data_type : str
322
+ Type of field ('scalar' or 'vector').
323
+ parallel : bool, optional
324
+ If True, indicates parallel processing context, by default False.
325
+
326
+ Returns
327
+ -------
328
+ tuple
329
+ A tuple containing:
330
+ - _dimensions : np.ndarray
331
+ Physical dimensions of the field.
332
+ - _internalField : Union[float, np.ndarray]
333
+ Internal field data.
334
+ - _boundaryField : Dict[str, Dict[str, Any]]
335
+ Boundary field data organized by patch names.
336
+ - internal_field_type : str
337
+ Type of internal field ('uniform', 'nonuniform', or 'nonuniformZero').
338
+
339
+ Raises
340
+ ------
341
+ ValueError
342
+ If internal field type is invalid or file format is incorrect.
343
+ SystemExit
344
+ If unknown data_type is encountered.
345
+ """
346
+ with open(f"{filename}", "rb") as f:
347
+ # For very large files, use memory mapping
348
+ file_size = os.path.getsize(filename)
349
+ if (
350
+ file_size > 50 * 1024 * 1024
351
+ ): # 50MB threshold (lower for multiprocessing)
352
+ with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mmapped_file:
353
+ content = mmapped_file.read().splitlines()
354
+ else:
355
+ content = f.readlines()
356
+
357
+ data_idx, boundary_start_idx, dim_idx, data_size, internal_field_type = (
358
+ OFField._num_field(content)
359
+ )
360
+
361
+ _dimensions = _process_dimensions(content[dim_idx].decode("utf-8"))
362
+
363
+ if internal_field_type == "uniform":
364
+ _internalField = OFField._process_uniform(
365
+ content[data_idx].decode("utf-8"), data_type
366
+ )
367
+ elif internal_field_type == "nonuniform":
368
+ data_start_idx = data_idx + 2
369
+ # Extract relevant lines containing coordinates
370
+ _internalField = OFField._process_field(
371
+ content[data_start_idx : data_start_idx + data_size],
372
+ data_size,
373
+ data_type,
374
+ )
375
+ elif internal_field_type == "nonuniformZero":
376
+ if data_type == "scalar":
377
+ _internalField = np.array([])
378
+ elif data_type == "vector":
379
+ _internalField = np.empty((0, 3))
380
+ else:
381
+ sys.exit("Unknown data_type. please use 'scalar' or 'vector'.")
382
+ else:
383
+ raise ValueError(
384
+ "internal_field_type should be 'uniform' or 'nonuniform'"
385
+ )
386
+
387
+ _boundaryField = OFField._process_boundary(
388
+ content[boundary_start_idx:], data_type, parallel
389
+ )
390
+
391
+ return _dimensions, _internalField, _boundaryField, internal_field_type
392
+
393
+ def _readField_parallel(
394
+ self,
395
+ ) -> Tuple[np.ndarray, List[np.ndarray], List[Dict[str, Dict[str, Any]]], str]:
396
+ case_dir = self.caseDir
397
+ processor_dirs = sorted(
398
+ [d for d in os.listdir(case_dir) if d.startswith("processor")],
399
+ key=lambda x: int(x.replace("processor", "")),
400
+ )
401
+ if not processor_dirs:
402
+ raise FileNotFoundError("No processor directories found.")
403
+
404
+ proc_paths = [
405
+ os.path.join(case_dir, proc_dir, self.timeName, self.fieldName)
406
+ for proc_dir in processor_dirs
407
+ ]
408
+ for proc_path in proc_paths:
409
+ if not os.path.isfile(proc_path):
410
+ raise FileNotFoundError(f"Field file not found in {proc_path}")
411
+
412
+ with multiprocessing.Pool(processes=self.num_batch) as pool:
413
+ # Use the optimized reading function
414
+ results = pool.starmap(
415
+ self._readField,
416
+ [(proc_path, self.data_type, True) for proc_path in proc_paths],
417
+ )
418
+
419
+ # Unpack results
420
+ _dimensions = results[0][0]
421
+ _internalField = []
422
+ _boundaryField = []
423
+ internal_field_types = []
424
+
425
+ for dim, internal, boundary, field_type in results: # Added _ for filepath
426
+ if not np.array_equal(dim, _dimensions):
427
+ raise ValueError("Inconsistent field dimensions across processors.")
428
+ _internalField.append(internal)
429
+ _boundaryField.append(boundary)
430
+ internal_field_types.append(field_type)
431
+
432
+ if all("nonuniform" in ft for ft in internal_field_types):
433
+ self.internal_field_type = "nonuniform"
434
+ else:
435
+ self.internal_field_type = "uniform"
436
+ self._num_processors = len(results)
437
+
438
+ return _dimensions, _internalField, _boundaryField, self.internal_field_type
439
+
440
+ # def _reconstruct_fields_optimized(self, _internalField, _boundaryField, internal_field_types):
441
+ # """
442
+ # Efficiently reconstruct parallel fields as a single field for large datasets.
443
+ # """
444
+ # if self.internal_field_type == "uniform":
445
+ # reconstructed_internal = _internalField[0]
446
+ # elif self.internal_field_type == "nonuniform":
447
+ # if self.data_type == "scalar":
448
+ # # Use numpy's concatenate for efficiency
449
+ # reconstructed_internal = np.concatenate(_internalField)
450
+ # elif self.data_type == "vector":
451
+ # # Use vstack for vectors
452
+ # reconstructed_internal = np.vstack(_internalField)
453
+ # else:
454
+ # sys.exit("Unknown data_type. please use 'scalar' or 'vector'.")
455
+ # else:
456
+ # raise ValueError(
457
+ # "internal_field_type should be 'uniform' or 'nonuniform'"
458
+ # )
459
+
460
+ # # Merge boundary fields efficiently
461
+ # merged_boundary = {}
462
+ # for boundary in _boundaryField:
463
+ # for patch, props in boundary.items():
464
+ # if "procBoundary" in patch:
465
+ # continue # Skip processor boundary patches
466
+ # if patch not in merged_boundary:
467
+ # merged_boundary[patch] = props.copy()
468
+ # else:
469
+ # # For overlapping patches, merge arrays if they exist
470
+ # for key, value in props.items():
471
+ # if key in merged_boundary[patch]:
472
+ # if isinstance(value, np.ndarray) and isinstance(merged_boundary[patch][key], np.ndarray):
473
+ # # Concatenate arrays
474
+ # if value.ndim == 1 and merged_boundary[patch][key].ndim == 1:
475
+ # merged_boundary[patch][key] = np.concatenate([merged_boundary[patch][key], value])
476
+ # elif value.ndim == 2 and merged_boundary[patch][key].ndim == 2:
477
+ # merged_boundary[patch][key] = np.vstack([merged_boundary[patch][key], value])
478
+
479
+ # return reconstructed_internal, merged_boundary
480
+
481
+ @staticmethod
482
+ def _process_uniform(line: str, data_type: str):
483
+ """
484
+ Process uniform internal field value.
485
+
486
+ Parameters
487
+ ----------
488
+ line : str
489
+ Line containing the uniform value.
490
+ data_type : str
491
+ Type of field ('scalar' or 'vector').
492
+
493
+ Returns
494
+ -------
495
+ Union[float, np.ndarray]
496
+ For scalar fields: float value.
497
+ For vector fields: numpy array with shape (3,).
498
+
499
+ Raises
500
+ ------
501
+ ValueError
502
+ If uniform field format is invalid for the specified data_type.
503
+ """
504
+ if data_type == "scalar":
505
+ # Extract the scalar value after 'uniform'
506
+ match = re.search(r"uniform\s+([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?|\d+)", line)
507
+ if match:
508
+ _internalField = float(match.group(1))
509
+ else:
510
+ raise ValueError("Invalid uniform scalar format")
511
+
512
+ elif data_type == "vector":
513
+ # Extract the vector value after 'uniform', the element may be integer or float
514
+ # for example: uniform (0 0 1.0) or uniform (1 0 0)
515
+ match = re.search(
516
+ r"uniform\s+\(\s*([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s+[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s+[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s*)\)",
517
+ line,
518
+ )
519
+ if match:
520
+ vec_str = match.group(1)
521
+ _internalField = np.array([float(x) for x in vec_str.split()])
522
+ else:
523
+ raise ValueError("Invalid uniform vector format")
524
+
525
+ return _internalField
526
+
527
+ @staticmethod
528
+ def _process_field(string_coords: List[bytes], data_size: int, data_type: str):
529
+ """
530
+ Process nonuniform internal field values.
531
+
532
+ Parameters
533
+ ----------
534
+ string_coords : List[bytes]
535
+ List of byte strings containing field values.
536
+ data_size : int
537
+ Number of data points expected.
538
+ data_type : str
539
+ Type of field ('scalar' or 'vector').
540
+
541
+ Returns
542
+ -------
543
+ np.ndarray
544
+ For scalar fields: 1D array with shape (data_size,).
545
+ For vector fields: 2D array with shape (data_size, 3).
546
+
547
+ Raises
548
+ ------
549
+ SystemExit
550
+ If unknown data_type is encountered.
551
+ """
552
+ if data_type == "scalar":
553
+ # Join all lines and replace unwanted characters once
554
+ joined_coords = b" ".join(string_coords).replace(b"\n", b"")
555
+ # Convert to a numpy array in one go
556
+ _internalField = np.fromstring(
557
+ joined_coords.decode("utf-8"), sep=" ", dtype=np.float64
558
+ )
559
+
560
+ if len(_internalField) != data_size:
561
+ raise ValueError(
562
+ f"Expected {data_size} data points, but got {len(_internalField)}."
563
+ )
564
+
565
+ elif data_type == "vector":
566
+ # Join all lines and replace unwanted characters once
567
+ joined_coords = (
568
+ b" ".join(string_coords)
569
+ .replace(b")", b"")
570
+ .replace(b"(", b"")
571
+ .replace(b"\n", b"")
572
+ )
573
+ # Convert to a numpy array
574
+ arr = np.fromstring(
575
+ joined_coords.decode("utf-8"), sep=" ", dtype=np.float64
576
+ )
577
+ try:
578
+ _internalField = arr.reshape(data_size, 3)
579
+ except ValueError:
580
+ raise ValueError(
581
+ f"Cannot reshape internal field of length {arr.size} to shape ({data_size}, 3)."
582
+ )
583
+
584
+ else:
585
+ sys.exit("Unknown data_type. please use 'scalar' or 'vector'.")
586
+
587
+ return _internalField
588
+
589
+ @staticmethod
590
+ def _process_boundary(
591
+ lines: List[Union[str, bytes]], data_type: str, parallel: bool
592
+ ) -> Union[Dict[str, Dict[str, Any]], List[Dict[str, Dict[str, Any]]]]:
593
+ """
594
+ Process boundaryField section and extract patch properties.
595
+
596
+ Parameters
597
+ ----------
598
+ lines : List[Union[str, bytes]]
599
+ List of lines (bytes or str) for boundaryField section.
600
+ data_type : str
601
+ Type of field ('scalar' or 'vector').
602
+ parallel : bool
603
+ If True, indicates parallel processing context.
604
+
605
+ Returns
606
+ -------
607
+ Union[Dict[str, Dict[str, Any]], List[Dict[str, Dict[str, Any]]]]
608
+ Dictionary containing boundary field properties organized by patch names.
609
+ For serial cases, returns a dictionary; for parallel cases, returns a list of dictionaries.
610
+ Each patch contains properties like 'type', 'value', etc.
611
+
612
+ Raises
613
+ ------
614
+ ValueError
615
+ If file format is incorrect or boundary field parsing fails.
616
+ """
617
+ # decode bytes to string if necessary
618
+ if isinstance(lines[0], bytes):
619
+ lines = [line.decode("utf-8") for line in lines]
620
+
621
+ bc_dict = {}
622
+ i = 0
623
+ n = len(lines)
624
+
625
+ def skip_empty_and_comments(idx):
626
+ while idx < n:
627
+ line = lines[idx].strip()
628
+ if line == "" or line.startswith("//"):
629
+ idx += 1
630
+ else:
631
+ break
632
+ return idx
633
+
634
+ i = skip_empty_and_comments(i)
635
+
636
+ # Expect "boundaryField {"
637
+ if not lines[i].strip().startswith("boundaryField"):
638
+ raise ValueError("File does not start with boundaryField")
639
+ i += 1
640
+ i = skip_empty_and_comments(i)
641
+ if lines[i].strip() != "{":
642
+ raise ValueError("Expected '{' after boundaryField")
643
+ i += 1
644
+
645
+ # Parse patches
646
+ while i < n:
647
+ i = skip_empty_and_comments(i)
648
+ line = lines[i].strip()
649
+ if line == "}": # end of boundaryField
650
+ break
651
+
652
+ patch_name = line
653
+ i += 1
654
+ i = skip_empty_and_comments(i)
655
+ if lines[i].strip() != "{":
656
+ raise ValueError(f"Expected '{{' after {patch_name}")
657
+ i += 1
658
+
659
+ # Parse patch properties and save all lines in a list, i.e., prop_lines
660
+ props = {}
661
+ brace_count = 1
662
+ prop_lines = []
663
+ while i < n and brace_count > 0:
664
+ l = lines[i].strip()
665
+ if "}" in l:
666
+ brace_count -= l.count("}")
667
+ prop_lines.append(l)
668
+ i += 1
669
+
670
+ # Remove last closing brace
671
+ prop_lines = prop_lines[:-1]
672
+
673
+ # Combine multi-line values into single strings
674
+ key = None
675
+ value_lines = []
676
+ for l in prop_lines:
677
+ if ";" in l:
678
+ parts = l.split(None, 1)
679
+ # Handle single-line key-value pairs (e.g., type fixedValue;)
680
+ if len(parts) == 2:
681
+ key, value = parts
682
+ value_lines.append(value)
683
+ if key:
684
+ value_str = " ".join(value_lines).replace(";", "").strip()
685
+ # convert value string to np.array if it contains numeric data
686
+ if value_str.startswith("uniform"):
687
+ if data_type == "scalar":
688
+ # read scalar
689
+ scalar_match = re.match(
690
+ r"uniform\s+([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?)",
691
+ value_str,
692
+ )
693
+ if scalar_match:
694
+ props[key] = float(scalar_match.group(1))
695
+ else:
696
+ raise ValueError(
697
+ f"Invalid scalar format: {value_str}"
698
+ )
699
+ elif data_type == "vector":
700
+ # read vector
701
+ vec_match = re.match(
702
+ r"uniform\s+\(\s*([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s+[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s+[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s*)\)",
703
+ value_str,
704
+ )
705
+ if vec_match:
706
+ props[key] = _parse_vector_string(
707
+ vec_match.group(1)
708
+ )
709
+ else:
710
+ raise ValueError(
711
+ f"Invalid vector format: {value_str}"
712
+ )
713
+ elif value_str.startswith("nonuniform List<vector>"):
714
+ # read scalar list
715
+ if data_type == "scalar":
716
+ value_str = value_str.split("(", 1)[1].rsplit(")", 1)[0]
717
+ scalar_match = re.findall(
718
+ r"[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?",
719
+ value_str,
720
+ )
721
+ if scalar_match:
722
+ props[key] = np.array(
723
+ [float(x) for x in scalar_match]
724
+ )
725
+ # read "value nonuniform List<vector> 0();" for parallel case
726
+ elif parallel:
727
+ props[key] = value_str
728
+ else:
729
+ raise ValueError(
730
+ f"Invalid scalar list format: {value_str}"
731
+ )
732
+ elif data_type == "vector":
733
+ # read vector list
734
+ vecs = re.findall(
735
+ r"\(\s*([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s+[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s+[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?\s*)\)",
736
+ value_str,
737
+ )
738
+ if vecs:
739
+ props[key] = np.array(
740
+ [[float(x) for x in v.split()] for v in vecs]
741
+ )
742
+ # read "value nonuniform List<vector> 0();" for parallel case
743
+ elif parallel:
744
+ props[key] = value_str
745
+ else:
746
+ raise ValueError(
747
+ f"Invalid vector list format: {value_str}"
748
+ )
749
+ else:
750
+ props[key] = value_str
751
+ key = None
752
+ value_lines = []
753
+ else:
754
+ # Handle multi-line values. This line does not end with ;
755
+ if key is None:
756
+ key = l.split()[0]
757
+ value_lines = l.split()[1:]
758
+ else:
759
+ value_lines.append(l)
760
+
761
+ bc_dict[patch_name] = props
762
+
763
+ return bc_dict
764
+
765
+ @staticmethod
766
+ def _num_field(
767
+ subcontent: List[bytes],
768
+ ) -> Tuple[
769
+ Optional[int], Optional[int], Optional[int], Optional[int], Optional[str]
770
+ ]:
771
+ """
772
+ Find indices for dimensions, internalField, and boundaryField sections.
773
+
774
+ This function performs a single-pass scan through the file content to locate
775
+ all critical sections efficiently.
776
+
777
+ Parameters
778
+ ----------
779
+ subcontent : List[bytes]
780
+ List of file lines as bytes.
781
+
782
+ Returns
783
+ -------
784
+ Tuple[Optional[int], Optional[int], Optional[int], Optional[int], Optional[str]]
785
+ A tuple containing:
786
+ - data_idx : int or None
787
+ Index of the internalField data.
788
+ - boundary_idx : int or None
789
+ Index where boundaryField section starts.
790
+ - dim_idx : int or None
791
+ Index of the dimensions line.
792
+ - data_size : int or None
793
+ Number of data points in nonuniform fields.
794
+ - internal_field_type : str or None
795
+ Type of internal field ('uniform', 'nonuniform', or 'nonuniformZero').
796
+
797
+ Raises
798
+ ------
799
+ ValueError
800
+ If internalField is not found in the file, or if dimensions are not found.
801
+ """
802
+ dim_idx = None
803
+ data_size = None
804
+ data_idx = None
805
+ boundary_idx = None
806
+ internal_field_type = None
807
+
808
+ # Flag to track if we need to find data size
809
+ searching_for_data_size = False
810
+
811
+ idx = 0
812
+ n_lines = len(subcontent)
813
+
814
+ while idx < n_lines:
815
+ line = subcontent[idx]
816
+
817
+ # Search for dimensions (should appear early in file)
818
+ if dim_idx is None and b"dimensions" in line:
819
+ dim_idx = idx
820
+ continue
821
+
822
+ # Search for internalField declaration
823
+ if internal_field_type is None and b"internalField" in line:
824
+ if b"nonuniform" in line:
825
+ if b"0()" in line:
826
+ # Empty nonuniform field
827
+ data_idx = idx
828
+ internal_field_type = "nonuniformZero"
829
+ data_size = 0
830
+ else:
831
+ # Standard nonuniform field - need to find data size
832
+ internal_field_type = "nonuniform"
833
+ searching_for_data_size = True
834
+ else:
835
+ # Uniform field
836
+ internal_field_type = "uniform"
837
+ data_idx = idx
838
+ data_size = None
839
+
840
+ # Search for data size (line after nonuniform declaration)
841
+ elif searching_for_data_size:
842
+ stripped_line = line.strip()
843
+ if stripped_line and not stripped_line.startswith(b"//"):
844
+ try:
845
+ data_size = int(stripped_line)
846
+ data_idx = idx
847
+ searching_for_data_size = False
848
+ # Skip ahead to after the data block for efficiency
849
+ # data_idx is the line with the size number
850
+ # +1 for opening '(', +data_size for all data lines, reaching past closing ')'
851
+ idx = data_idx + data_size + 1
852
+ continue
853
+ except ValueError:
854
+ # Not a valid integer, continue searching
855
+ pass
856
+
857
+ # Search for boundaryField (should appear after internalField)
858
+ if b"boundaryField" in line:
859
+ boundary_idx = idx
860
+ # Found all required information, exit early
861
+ break
862
+
863
+ idx += 1
864
+
865
+ # Validation
866
+ if internal_field_type is None:
867
+ raise ValueError("internalField not found in the file.")
868
+
869
+ if dim_idx is None:
870
+ raise ValueError("dimensions not found in the file.")
871
+
872
+ if boundary_idx is None:
873
+ raise ValueError("boundaryField not found in the file.")
874
+
875
+ return data_idx, boundary_idx, dim_idx, data_size, internal_field_type
876
+
877
+ def writeField(
878
+ self,
879
+ casePath: str,
880
+ timeDir: int,
881
+ fieldName: str,
882
+ precision: int = 10,
883
+ ) -> None:
884
+ """
885
+ Write field data to a file in OpenFOAM format.
886
+
887
+ Parameters
888
+ ----------
889
+ casePath : str
890
+ Path to the case directory.
891
+ timeDir : int
892
+ Time directory name.
893
+ fieldName : str
894
+ Field name.
895
+
896
+ Returns
897
+ -------
898
+ None
899
+
900
+ Notes
901
+ -----
902
+ Automatically handles both serial and parallel field writing based on the
903
+ parallel attribute of the object.
904
+ """
905
+ if self.parallel:
906
+ self._writeField_parallel(
907
+ casePath, timeDir=timeDir, fieldName=fieldName, precision=precision
908
+ )
909
+ else:
910
+ self._writeField_serial(
911
+ casePath,
912
+ internalField=self.internalField,
913
+ boundaryField=self.boundaryField,
914
+ timeDir=timeDir,
915
+ fieldName=fieldName,
916
+ precision=precision,
917
+ )
918
+
919
+ def _writeField_serial(
920
+ self,
921
+ casePath: str,
922
+ internalField: Union[float, np.ndarray],
923
+ boundaryField: Dict[str, Dict[str, Any]],
924
+ timeDir: int,
925
+ fieldName: str,
926
+ precision: int = 10,
927
+ ) -> None:
928
+ """
929
+ Write field data to a file in OpenFOAM format (serial version).
930
+
931
+ Parameters
932
+ ----------
933
+ casePath : str
934
+ Path to case folder or case/processorX for parallel case.
935
+ internalField : Union[float, np.ndarray]
936
+ Internal field data to write.
937
+ boundaryField : Dict[str, Dict[str, Any]]
938
+ Boundary field data organized by patch names.
939
+ timeDir : int
940
+ Time directory name.
941
+ fieldName : str
942
+ Field name.
943
+
944
+ Returns
945
+ -------
946
+ None
947
+
948
+ Raises
949
+ ------
950
+ ValueError
951
+ If internal_field_type is invalid.
952
+ """
953
+
954
+ fieldDir = f"{casePath}/{timeDir}/{fieldName}"
955
+
956
+ if not os.path.exists(f"{casePath}/{timeDir}"):
957
+ os.makedirs(f"{casePath}/{timeDir}")
958
+
959
+ with open(fieldDir, "w") as f:
960
+ # write header
961
+ thisHeader = header.replace(
962
+ "className;", f"vol{self.data_type.capitalize()}Field;"
963
+ )
964
+ thisHeader = thisHeader.replace("timeDir;", f"{timeDir};")
965
+ thisHeader = thisHeader.replace(
966
+ "object data;", f"object {fieldName};"
967
+ )
968
+ f.write(thisHeader + "\n\n")
969
+
970
+ # write dimensions as "dimensions [0 1 -1 0 0 0 0];"
971
+ f.write(
972
+ f"dimensions [{ ' '.join(str(d) for d in self._dimensions) }];\n\n"
973
+ )
974
+
975
+ # write internalField for scalar or vector
976
+ if self.data_type == "scalar":
977
+ if self.internal_field_type == "uniform":
978
+ f.write(f"internalField uniform {internalField:.{precision}g};\n\n")
979
+ elif self.internal_field_type == "nonuniform":
980
+ f.write(f"internalField nonuniform List<scalar>\n")
981
+ f.write(f"{internalField.shape[0]}\n")
982
+ f.write("(\n")
983
+ for point in internalField:
984
+ f.write(f"{point:.{precision}g}\n")
985
+ f.write(")\n;\n")
986
+ else:
987
+ raise ValueError(
988
+ "internal_field_type should be 'uniform' or 'nonuniform'"
989
+ )
990
+ elif self.data_type == "vector":
991
+ if self.internal_field_type == "uniform":
992
+ f.write(
993
+ f"internalField uniform ({internalField[0]:.{precision}g} {internalField[1]:.{precision}g} {internalField[2]:.{precision}g});\n\n"
994
+ )
995
+ elif self.internal_field_type == "nonuniform":
996
+ f.write(f"internalField nonuniform List<vector>\n")
997
+ f.write(f"{internalField.shape[0]}\n")
998
+ f.write("(\n")
999
+ for point in internalField:
1000
+ f.write(f"({point[0]:.{precision}g} {point[1]:.{precision}g} {point[2]:.{precision}g})\n")
1001
+ f.write(")\n;\n")
1002
+ else:
1003
+ raise ValueError(
1004
+ "internal_field_type should be 'uniform' or 'nonuniform'"
1005
+ )
1006
+
1007
+ # write boundaryField
1008
+ f.write("boundaryField\n")
1009
+ f.write("{\n")
1010
+ for patch, props in boundaryField.items():
1011
+ f.write(f" {patch}\n")
1012
+ f.write(" {\n")
1013
+ for key, value in props.items():
1014
+ if isinstance(value, np.ndarray):
1015
+ if value.ndim == 0:
1016
+ # scalar
1017
+ f.write(f" {key} uniform {value:.{precision}g};\n")
1018
+ elif value.ndim == 1 and value.shape[0] == 3:
1019
+ # vector
1020
+ f.write(
1021
+ f" {key} uniform ({value[0]:.{precision}g} {value[1]:.{precision}g} {value[2]:.{precision}g});\n"
1022
+ )
1023
+ elif value.ndim == 1:
1024
+ # scalar list
1025
+ f.write(f" {key} nonuniform List<scalar>\n")
1026
+ f.write(f"{value.shape[0]}\n")
1027
+ f.write("(\n")
1028
+ for v in value:
1029
+ f.write(f"{v:.{precision}g}\n")
1030
+ f.write(");\n")
1031
+ elif (
1032
+ value.ndim == 2
1033
+ and value.shape[0] == 1
1034
+ and value.shape[1] != 3
1035
+ ):
1036
+ # a scalar list in 2D array with shape (1, N)
1037
+ f.write(f" {key} nonuniform List<scalar>\n")
1038
+ f.write(f"{value.shape[1]}\n")
1039
+ f.write("(\n")
1040
+ for v in value.T:
1041
+ f.write(f"{v:.{precision}g}\n")
1042
+ f.write(");\n")
1043
+ elif (
1044
+ value.ndim == 2
1045
+ and value.shape[0] == 1
1046
+ and value.shape[1] == 3
1047
+ ):
1048
+ # a single vector in 2D array
1049
+ f.write(
1050
+ f" {key} uniform ({value[0,0]:.{precision}g} {value[0,1]:.{precision}g} {value[0,2]:.{precision}g});\n"
1051
+ )
1052
+ elif value.ndim == 2 and value.shape[1] == 3:
1053
+ # vector list
1054
+ f.write(f"{key} nonuniform List<vector>\n")
1055
+ f.write(f"{value.shape[0]}\n")
1056
+ f.write("(\n")
1057
+ for v in value:
1058
+ f.write(f"({v[0]:.{precision}g} {v[1]:.{precision}g} {v[2]:.{precision}g})\n")
1059
+ f.write(");\n")
1060
+ else:
1061
+ # assume it's a string or other simple type
1062
+ f.write(f" {key} {value};\n")
1063
+ f.write(" }\n")
1064
+ f.write("}\n\n")
1065
+
1066
+ # write ender
1067
+ f.write(ender)
1068
+
1069
+ def _writeField_parallel(
1070
+ self,
1071
+ casePath: str,
1072
+ timeDir: int,
1073
+ fieldName: str,
1074
+ ) -> None:
1075
+ """
1076
+ Write field data to processor directories in OpenFOAM format.
1077
+
1078
+ Parameters
1079
+ ----------
1080
+ casePath : str
1081
+ Path to the case directory.
1082
+ timeDir : int
1083
+ Time directory name.
1084
+ fieldName : str
1085
+ Field name.
1086
+
1087
+ Returns
1088
+ -------
1089
+ None
1090
+
1091
+ Raises
1092
+ ------
1093
+ ValueError
1094
+ If internalField and boundaryField are not lists for parallel writing.
1095
+ """
1096
+
1097
+ if not isinstance(self._internalField, list) or not isinstance(
1098
+ self._boundaryField, list
1099
+ ):
1100
+ raise ValueError(
1101
+ "For parallel writing, internalField and boundaryField should be lists."
1102
+ )
1103
+
1104
+ num_processors = len(self._internalField)
1105
+
1106
+ proc_field_path = [
1107
+ f"{casePath}/processor{idx}" for idx in range(num_processors)
1108
+ ]
1109
+
1110
+ with multiprocessing.Pool(processes=self.num_batch) as pool:
1111
+ list(
1112
+ pool.imap(
1113
+ self._writeField_wrapper,
1114
+ [
1115
+ (
1116
+ proc_path,
1117
+ self._internalField[idx],
1118
+ self._boundaryField[idx],
1119
+ timeDir,
1120
+ fieldName,
1121
+ )
1122
+ for idx, proc_path in enumerate(proc_field_path)
1123
+ ],
1124
+ )
1125
+ )
1126
+
1127
+ def _writeField_wrapper(self, args) -> None:
1128
+ # args: (casePath, internalField, boundaryField, timeDir, fieldName)
1129
+ return self._writeField_serial(*args)
1130
+
1131
+
1132
+ def _parse_vector_string(s: str) -> np.ndarray:
1133
+ """
1134
+ Parse a single vector string like '(0 0 1.0)' into a NumPy array.
1135
+
1136
+ Parameters
1137
+ ----------
1138
+ s : str
1139
+ Vector string with format '(x y z)' or 'x y z'.
1140
+
1141
+ Returns
1142
+ -------
1143
+ np.ndarray
1144
+ 1D array with shape (3,) containing the parsed vector components.
1145
+
1146
+ Examples
1147
+ --------
1148
+ >>> _parse_vector_string("0 0 1.0")
1149
+ array([0., 0., 1.])
1150
+ >>> _parse_vector_string("(1.5 -2.0 3.14)")
1151
+ array([ 1.5, -2.0, 3.14])
1152
+ """
1153
+ s = s.strip("()")
1154
+ return np.array([float(x) for x in s.split()])
1155
+
1156
+
1157
+ def _process_dimensions(line: str) -> np.ndarray:
1158
+ """
1159
+ Parse dimensions line from OpenFOAM file.
1160
+
1161
+ Parameters
1162
+ ----------
1163
+ line : str
1164
+ Line containing dimensions in format '[kg m s K mol A cd]'.
1165
+
1166
+ Returns
1167
+ -------
1168
+ np.ndarray
1169
+ Array of 7 integers representing physical dimensions in SI base units:
1170
+ [mass, length, time, temperature, amount, current, luminous_intensity].
1171
+
1172
+ Raises
1173
+ ------
1174
+ ValueError
1175
+ If dimensions format is invalid or cannot be parsed.
1176
+
1177
+ Examples
1178
+ --------
1179
+ >>> _process_dimensions("dimensions [0 1 -1 0 0 0 0];")
1180
+ array([ 0, 1, -1, 0, 0, 0, 0])
1181
+ """
1182
+ match = re.search(
1183
+ r"\[\s*-?\d+\s+-?\d+\s+-?\d+\s+-?\d+\s+-?\d+\s+-?\d+\s+-?\d+\s*\]\s*", line
1184
+ )
1185
+ if match:
1186
+ dims = match.group(0).strip("[]").split()
1187
+ return np.array([int(d) for d in dims])
1188
+ else:
1189
+ raise ValueError("Invalid dimensions format")
1190
+
1191
+
1192
+ def find_patches(text: List[str]) -> Any:
1193
+ """
1194
+ Generator that yields complete patch blocks from OpenFOAM boundaryField file.
1195
+
1196
+ Parameters
1197
+ ----------
1198
+ text : List[str]
1199
+ Lines of the boundaryField file as strings.
1200
+
1201
+ Yields
1202
+ ------
1203
+ List[str]
1204
+ Full text content of a single patch block as list of lines.
1205
+ Each yielded item contains all lines belonging to one patch definition.
1206
+
1207
+ Notes
1208
+ -----
1209
+ This function parses the hierarchical structure of OpenFOAM boundaryField
1210
+ files, correctly handling nested braces and extracting complete patch
1211
+ definitions including all properties and values.
1212
+
1213
+ Examples
1214
+ --------
1215
+ >>> with open('0/U') as f:
1216
+ ... lines = f.readlines()
1217
+ >>> for patch_lines in find_patches(lines):
1218
+ ... print(f"Patch: {patch_lines[0]}") # First line is patch name
1219
+ """
1220
+ in_boundary = False
1221
+ start_boundary = False
1222
+ in_patch = False
1223
+ start_patch = False
1224
+ brace_level = 0
1225
+ current_patch_lines = []
1226
+
1227
+ for line in text:
1228
+ # skip empty lines and comments
1229
+ stripped_line = line.strip()
1230
+ if not stripped_line or stripped_line.startswith("//"):
1231
+ continue
1232
+
1233
+ if "boundaryField" in line:
1234
+ in_boundary = True
1235
+ continue
1236
+
1237
+ if in_boundary and not start_boundary and "{" in line:
1238
+ start_boundary = True
1239
+ continue
1240
+
1241
+ # Look for a patch name (simple check: not starting with whitespace, not just braces)
1242
+ stripped_line = line.strip()
1243
+ if (
1244
+ not in_patch
1245
+ and stripped_line
1246
+ and not stripped_line.startswith("{")
1247
+ and not stripped_line.startswith("}")
1248
+ ):
1249
+ in_patch = True
1250
+ current_patch_lines.append(stripped_line)
1251
+
1252
+ if in_patch:
1253
+ if brace_level == 0 and "{" in stripped_line:
1254
+ start_patch = True
1255
+ brace_level += stripped_line.count("{")
1256
+ continue
1257
+ if not start_patch:
1258
+ continue
1259
+ if "}" in stripped_line:
1260
+ brace_level -= stripped_line.count("}")
1261
+ else:
1262
+ current_patch_lines.append(stripped_line)
1263
+
1264
+ # If brace_level is 0, we have found the end of the patch
1265
+ if brace_level == 0:
1266
+ yield current_patch_lines
1267
+ # Reset for the next patch
1268
+ in_patch = False
1269
+ start_patch = False
1270
+ current_patch_lines = []
1271
+ continue
1272
+
1273
+ # If we find the final closing brace of boundaryField, we can stop
1274
+ if stripped_line.startswith("}") and brace_level == 0 and not in_patch:
1275
+ break