foamlib 0.9.7__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
foamlib/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """A Python interface for interacting with OpenFOAM."""
2
2
 
3
- __version__ = "0.9.7"
3
+ __version__ = "1.1.0"
4
4
 
5
5
  from ._cases import (
6
6
  AsyncFoamCase,
foamlib/_cases/_async.py CHANGED
@@ -127,7 +127,7 @@ class AsyncFoamCase(FoamCaseRunBase):
127
127
  async def _rmtree(
128
128
  path: os.PathLike[str] | str, *, ignore_errors: bool = False
129
129
  ) -> None:
130
- await aioshutil.rmtree(path, ignore_errors=ignore_errors) # type: ignore [call-arg]
130
+ await aioshutil.rmtree(path, ignore_errors=ignore_errors, onerror=None)
131
131
 
132
132
  @staticmethod
133
133
  async def _copytree(
@@ -120,7 +120,7 @@ def _ascii_numeric_list(
120
120
 
121
121
  ret: np.ndarray[
122
122
  tuple[int] | tuple[int, int], np.dtype[np.integer | np.floating]
123
- ] = np.fromstring(s, sep=" ", dtype=dtype) # type: ignore[assignment]
123
+ ] = np.fromstring(s, sep=" ", dtype=dtype)
124
124
 
125
125
  if nested is not None:
126
126
  ret = ret.reshape(-1, nested)
@@ -183,7 +183,7 @@ def _binary_numeric_list(
183
183
  if nested is not None:
184
184
  ret = ret.reshape(-1, nested)
185
185
 
186
- return ret # type: ignore[return-value]
186
+ return ret
187
187
 
188
188
  return (
189
189
  common.integer.copy().add_parse_action(process_count) + list_
@@ -236,7 +236,7 @@ def _ascii_face_list(*, ignore: Regex | None = None) -> ParserElement:
236
236
  i = 0
237
237
  while i < raw.size:
238
238
  assert raw[i] in (3, 4)
239
- values.append(raw[i + 1 : i + raw[i] + 1]) # type: ignore[arg-type]
239
+ values.append(raw[i + 1 : i + raw[i] + 1])
240
240
  i += raw[i] + 1
241
241
 
242
242
  return [values]
@@ -58,11 +58,11 @@ def normalize_data(
58
58
  pass
59
59
  else:
60
60
  if np.issubdtype(arr.dtype, np.integer) and arr.ndim == 1:
61
- return arr # type: ignore [return-value]
61
+ return arr
62
62
  if arr.ndim == 2 and arr.shape[1] == 3:
63
63
  if not np.issubdtype(arr.dtype, np.floating):
64
64
  arr = arr.astype(float)
65
- return arr # type: ignore [return-value]
65
+ return arr
66
66
 
67
67
  if keywords is not None and (
68
68
  keywords == ("internalField",)
@@ -86,7 +86,7 @@ def normalize_data(
86
86
  arr = arr.astype(float)
87
87
 
88
88
  if arr.ndim == 1 or (arr.ndim == 2 and arr.shape[1] in (3, 6, 9)):
89
- return arr # type: ignore [return-value]
89
+ return arr
90
90
 
91
91
  return [normalize_data(d) for d in data] # type: ignore [arg-type, return-value]
92
92
 
@@ -197,7 +197,7 @@ def dumps(
197
197
  )
198
198
  and isinstance(data, (int, float, np.ndarray))
199
199
  ):
200
- data = np.asarray(data) # type: ignore [assignment]
200
+ data = np.asarray(data)
201
201
  class_ = header.get("class", "") if header else ""
202
202
  assert isinstance(class_, str)
203
203
  scalar = "Scalar" in class_
foamlib/_files/_types.py CHANGED
@@ -89,7 +89,7 @@ class Dimensioned:
89
89
  name: str | None = None,
90
90
  ) -> None:
91
91
  if is_sequence(value):
92
- self.value: Tensor = np.array(value, dtype=float) # type: ignore [assignment]
92
+ self.value: Tensor = np.array(value, dtype=float)
93
93
  else:
94
94
  assert isinstance(value, (int, float, np.ndarray))
95
95
  self.value = float(value)
@@ -113,7 +113,7 @@ class Dimensioned:
113
113
  other = Dimensioned(other, DimensionSet())
114
114
 
115
115
  return Dimensioned(
116
- self.value + other.value, # type: ignore [arg-type]
116
+ self.value + other.value,
117
117
  self.dimensions + other.dimensions,
118
118
  f"{self.name}+{other.name}"
119
119
  if self.name is not None and other.name is not None
@@ -125,7 +125,7 @@ class Dimensioned:
125
125
  other = Dimensioned(other, DimensionSet())
126
126
 
127
127
  return Dimensioned(
128
- self.value - other.value, # type: ignore [arg-type]
128
+ self.value - other.value,
129
129
  self.dimensions - other.dimensions,
130
130
  f"{self.name}-{other.name}"
131
131
  if self.name is not None and other.name is not None
@@ -137,7 +137,7 @@ class Dimensioned:
137
137
  other = Dimensioned(other, DimensionSet())
138
138
 
139
139
  return Dimensioned(
140
- self.value * other.value, # type: ignore [arg-type]
140
+ self.value * other.value,
141
141
  self.dimensions * other.dimensions,
142
142
  f"{self.name}*{other.name}"
143
143
  if self.name is not None and other.name is not None
@@ -149,7 +149,7 @@ class Dimensioned:
149
149
  other = Dimensioned(other, DimensionSet())
150
150
 
151
151
  return Dimensioned(
152
- self.value / other.value, # type: ignore [arg-type]
152
+ self.value / other.value,
153
153
  self.dimensions / other.dimensions,
154
154
  f"{self.name}/{other.name}"
155
155
  if self.name is not None and other.name is not None
@@ -161,7 +161,7 @@ class Dimensioned:
161
161
  return NotImplemented
162
162
 
163
163
  return Dimensioned(
164
- self.value**exponent, # type: ignore [arg-type]
164
+ self.value**exponent,
165
165
  self.dimensions**exponent,
166
166
  f"pow({self.name},{exponent})" if self.name is not None else None,
167
167
  )
@@ -0,0 +1,6 @@
1
+ """
2
+ The postprocessing module provides utilities for processing and analyzing simulation data.
3
+
4
+ It includes table readers and data extraction tools.
5
+
6
+ """
@@ -0,0 +1,311 @@
1
+ # ruff: noqa: UP045
2
+ """Load OpenFOAM post-processing tables."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import json
7
+ import os
8
+ from dataclasses import dataclass, field
9
+ from pathlib import Path
10
+ from typing import Callable, Optional, Union
11
+
12
+ import pandas as pd
13
+
14
+ from .table_reader import TableReader
15
+
16
+
17
+ def _of_case(dirnames: list[str]) -> bool:
18
+ """Classify directory as OpenFOAM case.
19
+
20
+ Parameters
21
+ ----------
22
+ dirnames : list[str]
23
+ list of directories in the folder
24
+
25
+ Returns
26
+ ofcase : bool
27
+ is the folder an OpenFOAM Case
28
+ """
29
+ has_constant = "constant" in dirnames
30
+ has_system = "system" in dirnames
31
+ return has_constant and has_system
32
+
33
+
34
+ def of_cases(dir_name: Union[str, Path]) -> list[str]:
35
+ """List all OpenFOAM cases in folder.
36
+
37
+ Parameters
38
+ ----------
39
+ dir_name : str
40
+ name of the search directory
41
+
42
+ Returns
43
+ ofcases : List[str]
44
+ pathes of the OpenFOAM directories
45
+ """
46
+ cases = []
47
+ for path, dirnames, _ in os.walk(dir_name):
48
+ if _of_case(dirnames):
49
+ cases.append(path)
50
+ dirnames[:] = []
51
+ return cases
52
+
53
+
54
+ @dataclass
55
+ class DataSource:
56
+ """
57
+ Describes a location of simulation output data inside a case directory.
58
+
59
+ Attributes
60
+ ----------
61
+ file_name : str
62
+ The name of the file to be read (e.g., 'forces.dat').
63
+ folder : Union[str, Path]
64
+ The subdirectory where the file is located, relative to case path.
65
+ time_resolved : bool
66
+ Whether data is stored in time-specific subdirectories.
67
+ postproc_prefix : str
68
+ Prefix for the post-processing directory, typically 'postProcessing'.
69
+ """
70
+
71
+ file_name: str
72
+ folder: Union[str, Path]
73
+ postproc_prefix: str
74
+ time_resolved: bool = True
75
+ _times: set[str] = field(default_factory=set, init=False, repr=False)
76
+
77
+ def add_time(self, t: str) -> None:
78
+ """Add a time step to the data source.
79
+
80
+ Parameters
81
+ ----------
82
+ t : str
83
+ Time step to add.
84
+ """
85
+ self._times.add(t)
86
+
87
+ @property
88
+ def times(self) -> list[str]:
89
+ """Get the list of time steps for this output file.
90
+
91
+ Returns
92
+ -------
93
+ list[str]
94
+ List of time steps as strings.
95
+ """
96
+ return sorted(self._times)
97
+
98
+ def postproc_folder(self, case_path: Path) -> Path:
99
+ """Return the path to the target's base directory under postProcessing/."""
100
+ return case_path / self.postproc_prefix / self.folder
101
+
102
+ def resolve_paths(self, case_path: Path) -> list[Path]:
103
+ """
104
+ Compute full file paths for this target inside the given case.
105
+
106
+ Parameters
107
+ ----------
108
+ case_path : Path
109
+ Root path of the case directory.
110
+
111
+ Returns
112
+ -------
113
+ list[Path]
114
+ List of resolved file paths to load.
115
+ """
116
+ base = self.postproc_folder(case_path)
117
+ if self.time_resolved:
118
+ return [base / t / self.file_name for t in self.times]
119
+ return [base / self.file_name]
120
+
121
+
122
+ def functionobject(file_name: str, folder: Union[str, Path]) -> DataSource:
123
+ """
124
+ Create a DataTarget for a standard OpenFOAM function object.
125
+
126
+ Parameters
127
+ ----------
128
+ name : str
129
+ The function object name (and default folder).
130
+ file_name : str, optional
131
+ The file name to look for (defaults to '<name>.dat').
132
+
133
+ Returns
134
+ -------
135
+ DataTarget
136
+ """
137
+ return DataSource(
138
+ file_name=file_name,
139
+ folder=folder,
140
+ time_resolved=True,
141
+ postproc_prefix="postProcessing",
142
+ )
143
+
144
+
145
+ def datafile(
146
+ file_name: str, folder: Union[str, Path], *, time_resolved: bool = False
147
+ ) -> DataSource:
148
+ """
149
+ Create a DataTarget for a custom or non-OpenFOAM output file.
150
+
151
+ Parameters
152
+ ----------
153
+ file_name : str
154
+ Name of the file (e.g., 'output.xml').
155
+ folder : str or Path
156
+ Subdirectory where the file is located (relative to 'postProcessing/').
157
+ time_resolved : bool
158
+ Whether the data is organized by time subfolders.
159
+
160
+ Returns
161
+ -------
162
+ DataTarget
163
+ """
164
+ return DataSource(
165
+ file_name=file_name,
166
+ folder=folder,
167
+ time_resolved=time_resolved,
168
+ postproc_prefix=".",
169
+ )
170
+
171
+
172
+ def load_tables(
173
+ source: DataSource,
174
+ dir_name: Union[str, Path],
175
+ filter_table: Optional[
176
+ Callable[[pd.DataFrame, list[dict[str, str]]], pd.DataFrame]
177
+ ] = None,
178
+ reader_fn: Optional[Callable[[Path], Optional[pd.DataFrame]]] = None,
179
+ ) -> Optional[pd.DataFrame]:
180
+ """
181
+ Load and concatenate all available dataframes for a DataTarget across cases and time steps.
182
+
183
+ Parameters
184
+ ----------
185
+ source : DataSource
186
+ source data descriptor for resolving output paths.
187
+ dir_name : str or Path
188
+ Root directory where OpenFOAM cases are stored.
189
+ filter_table : callable, optional
190
+ Function to filter or modify the dataframe after reading.
191
+ reader_fn : callable, optional
192
+ Function to read a file into a DataFrame. Defaults to TableReader().read.
193
+ that considers the specific file format
194
+
195
+ Returns
196
+ -------
197
+ pd.DataFrame or None
198
+ Concatenated dataframe of all found data, or None if nothing was found.
199
+ """
200
+ all_tables = []
201
+ reader_fn = reader_fn or TableReader().read
202
+
203
+ for case in of_cases(dir_name):
204
+ case_path = Path(case)
205
+ target_folder = source.postproc_folder(case_path)
206
+
207
+ # Skip if the target folder does not exist
208
+ if not target_folder.exists():
209
+ continue
210
+
211
+ # Discover time steps if needed
212
+ if source.time_resolved and not source.times:
213
+ for item in target_folder.iterdir():
214
+ if item.is_dir() and _is_float(item.name):
215
+ source.add_time(item.name)
216
+
217
+ for file_path in source.resolve_paths(case_path):
218
+ if not file_path.exists():
219
+ continue
220
+
221
+ table = reader_fn(file_path)
222
+ if table is None:
223
+ continue
224
+
225
+ # Load case metadata
226
+ json_path = case_path / "case.json"
227
+ parameters = []
228
+ if json_path.exists():
229
+ with open(json_path) as f:
230
+ json_data = json.load(f)
231
+ parameters = json_data.get("case_parameters", [])
232
+
233
+ # Add time value if applicable
234
+ if source.time_resolved and len(source.times) > 1:
235
+ time_str = file_path.parent.name
236
+ parameters.append({"category": "timeValue", "name": time_str})
237
+
238
+ # add parameters as columns
239
+ for parameter in parameters:
240
+ category = parameter["category"]
241
+ name = parameter["name"]
242
+ table[category] = name
243
+
244
+ if filter_table:
245
+ table = filter_table(table, parameters)
246
+
247
+ all_tables.append(table)
248
+
249
+ if all_tables:
250
+ return pd.concat(all_tables, ignore_index=True)
251
+
252
+ return None
253
+
254
+
255
+ def _is_float(s: str) -> bool:
256
+ try:
257
+ float(s)
258
+ except ValueError:
259
+ return False
260
+ return True
261
+
262
+
263
+ def _discover_function_objects(
264
+ file_map: dict[str, DataSource], postproc_root: Path
265
+ ) -> None:
266
+ for dirpath, _, filenames in os.walk(postproc_root):
267
+ base = Path(dirpath).name
268
+
269
+ if not _is_float(base):
270
+ # Skip directories that are not time directories
271
+ continue
272
+
273
+ time = base
274
+ time_path = Path(dirpath)
275
+ rel_to_postproc = time_path.relative_to(postproc_root)
276
+ folder = rel_to_postproc.parent
277
+ folder_str = str(folder) if folder != Path() else ""
278
+
279
+ for fname in filenames:
280
+ key = f"{folder_str}--{fname}"
281
+
282
+ if key not in file_map:
283
+ file_map[key] = functionobject(file_name=fname, folder=folder)
284
+
285
+ file_map[key].add_time(time)
286
+
287
+
288
+ def list_function_objects(cases_folder: str = "Cases") -> dict[str, DataSource]:
289
+ """List all output files in OpenFOAM cases.
290
+
291
+ Parameters
292
+ ----------
293
+ cases_folder : str
294
+ Name of the search directory.
295
+
296
+ Returns
297
+ -------
298
+ file_map : dict[str, list[OutputFile]]
299
+ Dictionary with keys as file names and values as OutputFile objects.
300
+ """
301
+ file_map: dict[str, DataSource] = {}
302
+
303
+ for case_path_str in of_cases(cases_folder):
304
+ case_path = Path(case_path_str)
305
+ postproc_root = case_path / "postProcessing"
306
+ if not postproc_root.exists():
307
+ continue
308
+
309
+ _discover_function_objects(file_map, postproc_root)
310
+
311
+ return file_map
@@ -0,0 +1,315 @@
1
+ # ruff: noqa: UP045
2
+ """This module provides a utility class for reading tabular data from files with various extensions."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import xml.etree.ElementTree as ET
7
+ from itertools import islice
8
+ from pathlib import Path
9
+ from typing import Callable, ClassVar, Optional, Union
10
+
11
+ import pandas as pd
12
+ from defusedxml.ElementTree import parse
13
+
14
+
15
+ class ReaderNotRegisteredError(Exception):
16
+ """Exception raised when no reader is registered for a given file extension."""
17
+
18
+
19
+ class TableReader:
20
+ """
21
+ TableReader is a utility class for reading tabular data from files with various extensions.
22
+
23
+ It uses a registry pattern to associate file extensions with specific reader functions.
24
+
25
+ Attributes:
26
+ _registry (Dict[str, Callable[[str], pd.DataFrame]]): A class-level dictionary that maps
27
+ file extensions (as strings) to reader functions. Each reader function takes a file
28
+ path as input and returns a pandas DataFrame.
29
+
30
+ Methods:
31
+ register(extension: str) -> Callable[[Callable[[str], pd.DataFrame]], Callable[[str], pd.DataFrame]]:
32
+ A class method decorator used to register a reader function for a specific file extension.
33
+ The extension is case-insensitive.
34
+
35
+ read(filepath: Union[str, Path]) -> pd.DataFrame:
36
+ Reads a file and returns its contents as a pandas DataFrame. The file extension is used
37
+ to determine the appropriate reader function. Raises a ValueError if no reader is registered
38
+ for the file's extension.
39
+ """
40
+
41
+ _registry: ClassVar[
42
+ dict[str, Callable[[Union[str, Path], Optional[list[str]]], pd.DataFrame]]
43
+ ] = {}
44
+
45
+ def __init__(self) -> None:
46
+ """Initialize the TableReader instance."""
47
+
48
+ @classmethod
49
+ def register(
50
+ cls, extension: str
51
+ ) -> Callable[
52
+ [Callable[[Union[str, Path], Optional[list[str]]], pd.DataFrame]],
53
+ Callable[[Union[str, Path], Optional[list[str]]], pd.DataFrame],
54
+ ]:
55
+ """
56
+ Register a reader function for a specific file extension.
57
+
58
+ The extension is case-insensitive.
59
+
60
+ Args:
61
+ extension (str): The file extension (e.g., ".dat", ".raw") to register the reader for.
62
+
63
+ Returns:
64
+ Callable[[Callable[[str, Optional[list[str]]], pd.DataFrame]], Callable[[str, Optional[list[str]]], pd.DataFrame]]:
65
+ A decorator that registers the function as a reader for the specified extension.
66
+ """
67
+
68
+ def decorator(
69
+ func: Callable[[Union[str, Path], Optional[list[str]]], pd.DataFrame],
70
+ ) -> Callable[[Union[str, Path], Optional[list[str]]], pd.DataFrame]:
71
+ cls._registry[extension.lower()] = func
72
+ return func
73
+
74
+ return decorator
75
+
76
+ def read(
77
+ self, filepath: Union[str, Path], column_names: Optional[list[str]] = None
78
+ ) -> pd.DataFrame:
79
+ """
80
+ Read a file and return its contents as a pandas DataFrame.
81
+
82
+ The file extension is used to determine the appropriate reader function.
83
+
84
+ Raises:
85
+ ValueError: If no reader is registered for the file's extension.
86
+
87
+ Args:
88
+ filepath (Union[str, Path]): The path to the file to be read.
89
+
90
+ Returns:
91
+ pd.DataFrame: The contents of the file as a pandas DataFrame.
92
+ """
93
+ ext = str(Path(filepath).suffix.lower())
94
+ if ext not in self._registry:
95
+ error_message = f"No reader registered for extension: '{ext}'"
96
+ raise ReaderNotRegisteredError(error_message)
97
+ return self._registry[ext](filepath, column_names)
98
+
99
+
100
+ def is_convertible_to_float(values: list[str]) -> bool:
101
+ """
102
+ Check if all values in a list are convertible to floats.
103
+
104
+ Args:
105
+ values (list[str]): A list of string values to check.
106
+ Returns:
107
+ bool: True if all values are convertible to floats, False otherwise.
108
+ """
109
+ try:
110
+ [float(value) for value in values]
111
+ except ValueError:
112
+ return False
113
+ else:
114
+ return True
115
+
116
+
117
+ def extract_column_names(filepath: Union[str, Path]) -> Optional[list[str]]:
118
+ """
119
+ Extract column names from the first 20 lines of a file.
120
+
121
+ Args:
122
+ filepath (str): The path to the file from which to extract column names.
123
+ Returns:
124
+ Optional[list[str]]: A list of column names extracted from the file, or None if no
125
+ comment lines are found.
126
+ """
127
+ with open(filepath) as f:
128
+ first_lines = [line.strip() for line in islice(f, 20)]
129
+
130
+ # Filter only comment lines
131
+ comment_lines = [line for line in first_lines if line.startswith("#")]
132
+
133
+ if not comment_lines:
134
+ return None
135
+
136
+ # Take the last comment line and split into column names
137
+ last_comment = comment_lines[-1]
138
+ headers = last_comment.lstrip("#").strip()
139
+ return headers.split()
140
+
141
+
142
+ def update_column_names(
143
+ table: pd.DataFrame, column_names: Optional[list[str]]
144
+ ) -> pd.DataFrame:
145
+ """
146
+ Update the column names of a DataFrame if provided.
147
+
148
+ Args:
149
+ table (pd.DataFrame): The DataFrame to update.
150
+ column_names (Optional[list[str]]): The new column names to set.
151
+ Returns:
152
+ pd.DataFrame: The updated DataFrame with new column names.
153
+ """
154
+ if column_names is not None:
155
+ if len(column_names) != len(table.columns):
156
+ error_message = (
157
+ f"Number of column names ({len(column_names)}) does not match "
158
+ f"number of columns in DataFrame ({len(table.columns)})."
159
+ )
160
+ raise ValueError(error_message)
161
+ table.columns = pd.Index(column_names)
162
+ return table
163
+
164
+
165
+ def read_oftable(
166
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
167
+ ) -> pd.DataFrame:
168
+ """
169
+ Use a regular expression to parse the file and separate on parentheses and whitespace.
170
+
171
+ Args:
172
+ filepath (str): The path to the .oftable file to be read.
173
+ column_names (Optional[list[str]]): Optional column names to assign to the DataFrame.
174
+
175
+ Returns:
176
+ pd.DataFrame: The contents of the .oftable file as a pandas DataFrame.
177
+ """
178
+ table = pd.read_csv(
179
+ filepath, comment="#", sep=r"[()\s]+", engine="python", header=None
180
+ )
181
+ # Remove empty columns
182
+ table = table.dropna(axis=1, how="all")
183
+ column_headers = extract_column_names(filepath)
184
+ if column_names is not None:
185
+ column_headers = column_names
186
+ if len(column_names) != len(table.columns):
187
+ column_names = None
188
+ if column_headers is None or len(column_headers) != len(table.columns):
189
+ column_headers = None
190
+ update_column_names(table, column_headers)
191
+ return table
192
+
193
+
194
+ @TableReader.register(".dat")
195
+ def read_dat(
196
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
197
+ ) -> pd.DataFrame:
198
+ """Read a .dat file and return a DataFrame."""
199
+ return read_oftable(filepath, column_names=column_names)
200
+
201
+
202
+ @TableReader.register(".raw")
203
+ def read_raw(
204
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
205
+ ) -> pd.DataFrame:
206
+ """Read a .raw file and return a DataFrame."""
207
+ if column_names is None:
208
+ column_names = extract_column_names(filepath)
209
+ table = pd.read_csv(filepath, comment="#", sep=r"\s+", header=None)
210
+ update_column_names(table, column_names)
211
+ return table
212
+
213
+
214
+ @TableReader.register("")
215
+ def read_default(
216
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
217
+ ) -> pd.DataFrame:
218
+ """Read a file with no extension and return a DataFrame."""
219
+ return read_oftable(filepath, column_names=column_names)
220
+
221
+
222
+ @TableReader.register(".xy")
223
+ def read_xy(
224
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
225
+ ) -> pd.DataFrame:
226
+ """Read a .xy file and return a DataFrame."""
227
+ if column_names is None:
228
+ column_names = extract_column_names(filepath)
229
+ table = pd.read_csv(filepath, comment="#", sep=r"\s+", header=None)
230
+ update_column_names(table, column_names)
231
+ return table
232
+
233
+
234
+ @TableReader.register(".csv")
235
+ def read_csv(
236
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
237
+ ) -> pd.DataFrame:
238
+ """Read a .csv file and return a DataFrame."""
239
+ with open(filepath) as f:
240
+ first_lines = list(islice(f, 20))
241
+
242
+ non_comment_lines = [line for line in first_lines if not line.startswith("#")]
243
+
244
+ # check if all of the lines can be converted to floats
245
+ # assume the they are comma separated
246
+ entries = [line.split(",") for line in non_comment_lines]
247
+
248
+ # Check if all entries in each line are convertible to floats
249
+ has_header = not all(is_convertible_to_float(entry) for entry in entries)
250
+
251
+ if has_header:
252
+ table = pd.read_csv(filepath, comment="#")
253
+ else:
254
+ table = pd.read_csv(filepath, comment="#", header=None)
255
+ update_column_names(table, column_names)
256
+ return table
257
+
258
+
259
+ def read_catch2_benchmark(
260
+ filepath: Union[str, Path], column_names: Optional[list[str]] = None
261
+ ) -> pd.DataFrame:
262
+ """Read a Catch2 XML benchmark results file and return a DataFrame."""
263
+ tree = parse(filepath)
264
+ root = tree.getroot()
265
+ if root is None:
266
+ err_msg = f"Unable to parse XML file: {filepath}"
267
+ raise ValueError(err_msg)
268
+
269
+ records = []
270
+
271
+ def _parse_sections(
272
+ sections: list[ET.Element], test_case_name: str, section_path: list[str]
273
+ ) -> None:
274
+ for section in sections:
275
+ name = section.attrib.get("name", "")
276
+ new_path = [*section_path, name]
277
+
278
+ subsections = section.findall("Section")
279
+ if subsections:
280
+ _parse_sections(subsections, test_case_name, new_path)
281
+ else:
282
+ benchmark = section.find("BenchmarkResults")
283
+ if benchmark is not None:
284
+ mean = benchmark.find("mean")
285
+
286
+ if mean is not None:
287
+ record = {
288
+ "test_case": test_case_name,
289
+ "benchmark_name": benchmark.attrib.get("name"),
290
+ "avg_runtime": float(mean.attrib.get("value", 0)),
291
+ }
292
+
293
+ # Add dynamic section depth fields
294
+ for i, sec_name in enumerate(new_path):
295
+ record[f"section{i + 1}"] = sec_name
296
+
297
+ records.append(record)
298
+
299
+ for testcase in root.findall("TestCase"):
300
+ test_case_name = testcase.attrib.get("name")
301
+ if test_case_name:
302
+ _parse_sections(testcase.findall("Section"), str(test_case_name), [])
303
+
304
+ table = pd.DataFrame(records)
305
+
306
+ # Fill missing sectionN columns with empty string (not NaN)
307
+ max_sections = max((len(r) - 18 for r in records), default=0)
308
+ for i in range(1, max_sections + 1):
309
+ col = f"section{i}"
310
+ if col not in table.columns:
311
+ table[col] = ""
312
+
313
+ if column_names:
314
+ table = table[column_names]
315
+ return table
@@ -0,0 +1 @@
1
+ """The preprocessing module provides tools and utilities for generating and modifying OpenFOAM cases."""
@@ -0,0 +1,45 @@
1
+ # ruff: noqa: UP006, D100
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+ from typing import List
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from foamlib import FoamCase
10
+ from foamlib.preprocessing.of_dict import FoamDictAssignment
11
+
12
+
13
+ class CaseParameter(BaseModel):
14
+ """Class to represent a parameter for a case."""
15
+
16
+ category: str
17
+ name: str
18
+
19
+
20
+ class CaseModifier(BaseModel):
21
+ """Class to handle the modification of a case by setting instruction-value pairs."""
22
+
23
+ template_case: Path
24
+ output_case: Path
25
+ key_value_pairs: List[FoamDictAssignment]
26
+ case_parameters: List[CaseParameter]
27
+
28
+ def create_case(self) -> FoamCase:
29
+ """Create a new case by copying the template case to the output case directory."""
30
+ of_case = FoamCase(path=self.template_case)
31
+ of_case.copy(dst=self.output_case)
32
+
33
+ return of_case
34
+
35
+ def modify_case(self) -> FoamCase:
36
+ """Modify the case by setting the instruction-value pairs and saving the case."""
37
+ of_case = FoamCase(path=self.output_case)
38
+
39
+ for pair in self.key_value_pairs:
40
+ pair.set_value(case_path=self.output_case)
41
+
42
+ with open(self.output_case / "case.json", "w") as json_file:
43
+ json_file.write(self.model_dump_json(indent=4))
44
+
45
+ return of_case
@@ -0,0 +1,27 @@
1
+ # ruff: noqa: UP006, D100
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, List
5
+
6
+ from pydantic import BaseModel
7
+
8
+ from foamlib.preprocessing.of_dict import FoamDictInstruction
9
+
10
+
11
+ class CaseParameter(BaseModel):
12
+ """Class to represent a parameter for a case."""
13
+
14
+ name: str
15
+ values: List[Any]
16
+
17
+
18
+ class GridParameter(BaseModel):
19
+ """Class to handle a grid parameter sweep by creating multiple cases based on parameter combinations."""
20
+
21
+ parameter_name: str
22
+ modify_dict: List[FoamDictInstruction]
23
+ parameters: List[CaseParameter]
24
+
25
+ def case_names(self) -> List[str]:
26
+ """Return the names of the cases."""
27
+ return [param.name for param in self.parameters]
@@ -0,0 +1,40 @@
1
+ # ruff: noqa: UP006, D100, UP045
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+ from typing import Any, List, Optional, Union
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from foamlib import FoamFile
10
+
11
+
12
+ class FoamDictInstruction(BaseModel):
13
+ """Class representing an instruction to get a value from a FoamFile."""
14
+
15
+ file_name: Union[str, Path]
16
+ keys: List[str]
17
+
18
+ def get_value(self) -> Any:
19
+ """Get the value from the FoamFile based on the instruction."""
20
+ of_dict = FoamFile(self.file_name)
21
+ return of_dict.get(tuple(self.keys))
22
+
23
+
24
+ class FoamDictAssignment(BaseModel):
25
+ """Class handling the modification of a FoamFile by setting a value for a given instruction."""
26
+
27
+ instruction: FoamDictInstruction
28
+ value: Any
29
+
30
+ def set_value(self, case_path: Optional[Path] = None) -> FoamFile:
31
+ """Set the value in the FoamFile with the given value and instruction."""
32
+ of_file = Path(self.instruction.file_name)
33
+ if case_path is not None:
34
+ of_file = case_path / of_file
35
+ if not of_file.exists():
36
+ err_msg = f"The file {of_file} does not exist."
37
+ raise FileNotFoundError(err_msg)
38
+ of_dict = FoamFile(of_file)
39
+ of_dict[tuple(self.instruction.keys)] = self.value
40
+ return of_dict
@@ -0,0 +1,113 @@
1
+ """Parameter study module for generating multiple cases based on parameter combinations."""
2
+
3
+ # ruff: noqa: UP006
4
+ from __future__ import annotations
5
+
6
+ import itertools
7
+ from pathlib import Path
8
+ from typing import List, Union
9
+
10
+ import pandas as pd
11
+ from pydantic import BaseModel
12
+
13
+ from foamlib import FoamFile
14
+ from foamlib.preprocessing.case_modifier import CaseModifier, CaseParameter
15
+ from foamlib.preprocessing.grid_parameter_sweep import GridParameter
16
+ from foamlib.preprocessing.of_dict import FoamDictAssignment, FoamDictInstruction
17
+
18
+
19
+ class ParameterStudy(BaseModel):
20
+ """Class to handle a parameter study by creating multiple cases based on parameter combinations."""
21
+
22
+ cases: List[CaseModifier]
23
+
24
+ def create_study(self, study_base_folder: Path = Path()) -> None:
25
+ """Create multiple cases based on the parameter combinations."""
26
+ with open(study_base_folder / "parameter_study.json", "w") as json_file:
27
+ json_file.write(self.model_dump_json(indent=2))
28
+
29
+ for of_case in self.cases:
30
+ of_case.create_case()
31
+ of_case.modify_case()
32
+
33
+ def __add__(self, other: ParameterStudy) -> ParameterStudy:
34
+ """Combine two ParameterStudy instances."""
35
+ return ParameterStudy(cases=self.cases + other.cases)
36
+
37
+
38
+ def csv_generator(
39
+ csv_file: str,
40
+ template_case: Union[str, Path],
41
+ output_folder: Union[str, Path] = Path("Cases"),
42
+ ) -> ParameterStudy:
43
+ """Generate a parameter study from a CSV file."""
44
+ parastudy = pd.read_csv(csv_file).to_dict(orient="records")
45
+ parameter = FoamFile(
46
+ Path(template_case) / "system" / "simulationParameters"
47
+ ).as_dict()
48
+ parameter_keys = set(parameter.keys())
49
+ case_keys = set(parastudy[0].keys())
50
+ category_keys = case_keys - parameter_keys - {"case_name"}
51
+
52
+ cases = []
53
+ for of_case in parastudy:
54
+ case_mod = CaseModifier(
55
+ template_case=Path(template_case),
56
+ output_case=Path(output_folder) / of_case["case_name"],
57
+ key_value_pairs=[
58
+ FoamDictAssignment(
59
+ instruction=FoamDictInstruction(
60
+ file_name=Path("system/simulationParameters"), keys=[str(key)]
61
+ ),
62
+ value=value,
63
+ )
64
+ for key, value in of_case.items()
65
+ if key in parameter_keys
66
+ ],
67
+ case_parameters=[
68
+ CaseParameter(category=str(key), name=of_case[str(key)])
69
+ for key, value in of_case.items()
70
+ if key in category_keys
71
+ ],
72
+ )
73
+ cases.append(case_mod)
74
+
75
+ return ParameterStudy(cases=cases)
76
+
77
+
78
+ def grid_generator(
79
+ parameters: List[GridParameter],
80
+ template_case: Union[str, Path],
81
+ output_folder: Union[str, Path] = Path("Cases"),
82
+ ) -> ParameterStudy:
83
+ """Generate a parameter study based on grid parameters."""
84
+ cases = []
85
+
86
+ categories = [param.parameter_name for param in parameters]
87
+ case_instructions = [ins for param in parameters for ins in param.modify_dict]
88
+ case_parameters = itertools.product(*[param.parameters for param in parameters])
89
+
90
+ for case_parameter in case_parameters:
91
+ flattened_parameters = list(
92
+ itertools.chain.from_iterable(val.values for val in case_parameter) # noqa: PD011
93
+ )
94
+ case_name = [val.name for val in case_parameter]
95
+ case_modifications = [
96
+ FoamDictAssignment(
97
+ instruction=case_instructions[i],
98
+ value=flattened_parameters[i],
99
+ )
100
+ for i in range(len(case_instructions))
101
+ ]
102
+ case_mod = CaseModifier(
103
+ template_case=Path(template_case),
104
+ output_case=Path(output_folder) / "_".join(case_name),
105
+ key_value_pairs=case_modifications,
106
+ case_parameters=[
107
+ CaseParameter(category=categories[i], name=str(case_parameter[i].name))
108
+ for i in range(len(case_parameter))
109
+ ],
110
+ )
111
+ cases.append(case_mod)
112
+
113
+ return ParameterStudy(cases=cases)
@@ -0,0 +1,38 @@
1
+ # ruff: noqa: N802, D100
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+
6
+ from foamlib.preprocessing.of_dict import FoamDictInstruction
7
+
8
+
9
+ def simulationParameters(keys: list[str]) -> FoamDictInstruction:
10
+ """Return the FoamDictInstruction for simulationParameters."""
11
+ return FoamDictInstruction(
12
+ file_name=Path("system/simulationParameters"),
13
+ keys=keys,
14
+ )
15
+
16
+
17
+ def controlDict(keys: list[str]) -> FoamDictInstruction:
18
+ """Return the FoamDictInstruction for controlDict."""
19
+ return FoamDictInstruction(
20
+ file_name=Path("system/controlDict"),
21
+ keys=keys,
22
+ )
23
+
24
+
25
+ def fvSchemes(keys: list[str]) -> FoamDictInstruction:
26
+ """Return the FoamDictInstruction for fvSchemes."""
27
+ return FoamDictInstruction(
28
+ file_name=Path("system/fvSchemes"),
29
+ keys=keys,
30
+ )
31
+
32
+
33
+ def fvSolution(keys: list[str]) -> FoamDictInstruction:
34
+ """Return the FoamDictInstruction for fvSolution."""
35
+ return FoamDictInstruction(
36
+ file_name=Path("system/fvSolution"),
37
+ keys=keys,
38
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: foamlib
3
- Version: 0.9.7
3
+ Version: 1.1.0
4
4
  Summary: A Python interface for interacting with OpenFOAM
5
5
  Project-URL: Homepage, https://github.com/gerlero/foamlib
6
6
  Project-URL: Repository, https://github.com/gerlero/foamlib
@@ -34,6 +34,7 @@ Provides-Extra: docs
34
34
  Requires-Dist: ruff; extra == 'docs'
35
35
  Requires-Dist: sphinx-rtd-theme; extra == 'docs'
36
36
  Requires-Dist: sphinx<9,>=5; extra == 'docs'
37
+ Requires-Dist: sphinxcontrib-mermaid; extra == 'docs'
37
38
  Description-Content-Type: text/markdown
38
39
 
39
40
  [<img alt="foamlib" src="https://github.com/gerlero/foamlib/raw/main/logo.png" height="65">](https://github.com/gerlero/foamlib)
@@ -51,6 +52,8 @@ Description-Content-Type: text/markdown
51
52
  ![OpenFOAM](https://img.shields.io/badge/openfoam-.com%20|%20.org-informational)
52
53
  [![Docker](https://github.com/gerlero/foamlib/actions/workflows/docker.yml/badge.svg)](https://github.com/gerlero/foamlib/actions/workflows/docker.yml)
53
54
  [![Docker image](https://img.shields.io/badge/docker%20image-microfluidica%2Ffoamlib-0085a0)](https://hub.docker.com/r/microfluidica/foamlib/)
55
+ [![DOI](https://joss.theoj.org/papers/10.21105/joss.07633/status.svg)](https://doi.org/10.21105/joss.07633)
56
+
54
57
 
55
58
  **foamlib** provides a simple, modern, ergonomic and fast Python interface for interacting with [OpenFOAM](https://www.openfoam.com).
56
59
 
@@ -92,6 +95,12 @@ Compared to [PyFoam](https://openfoamwiki.net/index.php/Contrib/PyFoam) and othe
92
95
  conda install -c conda-forge foamlib
93
96
  ```
94
97
 
98
+ * With [Homebrew](https://brew.sh):
99
+
100
+ ```bash
101
+ brew install gerlero/openfoam/foamlib
102
+ ```
103
+
95
104
  ### 🐑 Clone a case
96
105
 
97
106
  ```python
@@ -210,6 +219,39 @@ If you believe you have found a bug in **foamlib**, please open an [issue](https
210
219
 
211
220
  You're welcome to contribute to **foamlib**! Check out the [contributing guidelines](CONTRIBUTING.md) for more information.
212
221
 
213
- ## Footnotes
222
+ ## 🖋️Citation
223
+
224
+ If you find **foamlib** useful for your work, don't forget to cite it!
225
+
226
+ Citations help us a lot. You may find the following snippets useful:
227
+
228
+ <details>
229
+ <summary>BibTeX</summary>
230
+
231
+ ```bibtex
232
+ @article{foamlib,
233
+ author = {Gerlero, Gabriel S. and Kler, Pablo A.},
234
+ doi = {10.21105/joss.07633},
235
+ journal = {Journal of Open Source Software},
236
+ month = may,
237
+ number = {109},
238
+ pages = {7633},
239
+ title = {{foamlib: A modern Python package for working with OpenFOAM}},
240
+ url = {https://joss.theoj.org/papers/10.21105/joss.07633},
241
+ volume = {10},
242
+ year = {2025}
243
+ }
244
+ ```
245
+
246
+ </details>
247
+
248
+ <details>
249
+ <summary>APA</summary>
250
+
251
+ Gerlero, G. S., & Kler, P. A. (2025). foamlib: A modern Python package for working with OpenFOAM. Journal of Open Source Software, 10(109), 7633. https://doi.org/10.21105/joss.07633
252
+
253
+ </details>
254
+
255
+ ## 👟 Footnotes
214
256
 
215
257
  <a id="benchmark">[1]</a> foamlib 0.8.1 vs PyFoam 2023.7 on a MacBook Air (2020, M1) with 8 GB of RAM. [Benchmark script](benchmark/benchmark.py).
@@ -0,0 +1,29 @@
1
+ foamlib/__init__.py,sha256=DQJDDDj9OtWfj8xuZhADwfIJvsAkCeIChgIb7Z_5S-k,452
2
+ foamlib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ foamlib/_cases/__init__.py,sha256=_A1TTHuQfS9FH2_33lSEyLtOJZGFHZBco1tWJCVOHks,358
4
+ foamlib/_cases/_async.py,sha256=1syaHJ_OjB64SntT-rGOeV8IBFYs6ud4ZEcNrCDczIM,11784
5
+ foamlib/_cases/_base.py,sha256=0Bb45FWxxMRnx6njtnJ3Tqh2_NcphrPtVSFjmfYbTjw,7480
6
+ foamlib/_cases/_run.py,sha256=C5sf-PWE73cqyPVmmWDiZU3V9QYVsrhSXpgil7aIp10,15659
7
+ foamlib/_cases/_slurm.py,sha256=X8eSL_tDnip3bPHb2Fot-n1yD0FfiVP5sCxHxjKt1f0,2748
8
+ foamlib/_cases/_subprocess.py,sha256=VHV2SuOLqa711an6kCuvN6UlIkeh4qqFfdrpNoKzQps,5630
9
+ foamlib/_cases/_sync.py,sha256=lsgJV2dMAAmmsiJMtzqy1bhW3yAZQOUMXh3h8jNqyes,9799
10
+ foamlib/_cases/_util.py,sha256=QCizfbuJdOCeF9ogU2R-y-iWX5kfaOA4U2W68t6QlOM,2544
11
+ foamlib/_files/__init__.py,sha256=q1vkjXnjnSZvo45jPAICpWeF2LZv5V6xfzAR6S8fS5A,96
12
+ foamlib/_files/_files.py,sha256=uMCn4kNdVJBbcEl7sTSDn9bpc6JUZtNUBbyio7oMqSg,24346
13
+ foamlib/_files/_io.py,sha256=BGbbm6HKxL2ka0YMCmHqZQZ1R4PPQlkvWWb4FHMAS8k,2217
14
+ foamlib/_files/_parsing.py,sha256=Y8Ft3Jh3TtvU8kp5YoOPkHVOVrec3bTzZTxBdcJPh3o,20450
15
+ foamlib/_files/_serialization.py,sha256=sB3ySNSOEWMfMjmXVEysZa2GXSQ7m1Z7UrPLXlBBQoA,8050
16
+ foamlib/_files/_types.py,sha256=fPoQeWMfFn5VQrCvLNbM1-TKUVUswFLSzIvUQQrro1M,8038
17
+ foamlib/postprocessing/__init__.py,sha256=fKnBSOOL1wEnx5U1kNGqq6a97yVpwynwUpRQS8X_kW8,154
18
+ foamlib/postprocessing/load_tables.py,sha256=cXNA2rCH5Lw8KGrZzCUd7vyVRmb5y1fP_pbrtEFW8p4,8580
19
+ foamlib/postprocessing/table_reader.py,sha256=khCNjlFY-RuU6I29eK1WCK3B2wsq9QmWkzazNUcbvDk,10965
20
+ foamlib/preprocessing/__init__.py,sha256=fHIVJyygymfWrwSf3usSZfrgEwAjaWnb_THUrX7taJI,105
21
+ foamlib/preprocessing/case_modifier.py,sha256=GsqrPpEF3Rox1xcfBvGYhs1fDn6prR2hpSLY0M-2S_g,1292
22
+ foamlib/preprocessing/grid_parameter_sweep.py,sha256=1D60KxVzps0xqinDEaA93wcnURYsWTAgJ8FcTwvIi4k,699
23
+ foamlib/preprocessing/of_dict.py,sha256=MxdZsFd5yIsS1DHsbRa25eqPGe9IOmNE8EDY8Ok66Xg,1293
24
+ foamlib/preprocessing/parameter_study.py,sha256=xQEhzOsOqZTpRcOmNWnyi18rus6BcLyDbg3ZXGHEKHQ,4080
25
+ foamlib/preprocessing/system.py,sha256=EMCobbfg_upGC5-KSeuZagBOePQKcH2itGMjcMOlsFA,1047
26
+ foamlib-1.1.0.dist-info/METADATA,sha256=GK1FTmI-2-0D23tExr1dzId9w7Vz61ZV1ucu22hbqso,9814
27
+ foamlib-1.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
+ foamlib-1.1.0.dist-info/licenses/LICENSE.txt,sha256=5Dte9TUnLZzPRs4NQzl-Jc2-Ljd-t_v0ZR5Ng5r0UsY,35131
29
+ foamlib-1.1.0.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- foamlib/__init__.py,sha256=TlX6bgqC9lLrRtBY4Bany589hu5CarS_mYUD6XfnJHw,452
2
- foamlib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- foamlib/_cases/__init__.py,sha256=_A1TTHuQfS9FH2_33lSEyLtOJZGFHZBco1tWJCVOHks,358
4
- foamlib/_cases/_async.py,sha256=1NuBaKa7NC-320SFNYW7JWZ5rAi344br_SoEdl64dmo,11797
5
- foamlib/_cases/_base.py,sha256=0Bb45FWxxMRnx6njtnJ3Tqh2_NcphrPtVSFjmfYbTjw,7480
6
- foamlib/_cases/_run.py,sha256=C5sf-PWE73cqyPVmmWDiZU3V9QYVsrhSXpgil7aIp10,15659
7
- foamlib/_cases/_slurm.py,sha256=X8eSL_tDnip3bPHb2Fot-n1yD0FfiVP5sCxHxjKt1f0,2748
8
- foamlib/_cases/_subprocess.py,sha256=VHV2SuOLqa711an6kCuvN6UlIkeh4qqFfdrpNoKzQps,5630
9
- foamlib/_cases/_sync.py,sha256=lsgJV2dMAAmmsiJMtzqy1bhW3yAZQOUMXh3h8jNqyes,9799
10
- foamlib/_cases/_util.py,sha256=QCizfbuJdOCeF9ogU2R-y-iWX5kfaOA4U2W68t6QlOM,2544
11
- foamlib/_files/__init__.py,sha256=q1vkjXnjnSZvo45jPAICpWeF2LZv5V6xfzAR6S8fS5A,96
12
- foamlib/_files/_files.py,sha256=uMCn4kNdVJBbcEl7sTSDn9bpc6JUZtNUBbyio7oMqSg,24346
13
- foamlib/_files/_io.py,sha256=BGbbm6HKxL2ka0YMCmHqZQZ1R4PPQlkvWWb4FHMAS8k,2217
14
- foamlib/_files/_parsing.py,sha256=zLRXwv9PEil-vlIr1QiIEw8bhanRQ_vbVIEdTHv4bdI,20534
15
- foamlib/_files/_serialization.py,sha256=kQfPfuTXtc9jryQdieCbAX0-8_Oz__vY_kr7uH9f_rU,8172
16
- foamlib/_files/_types.py,sha256=7reA_TjRjCFV3waQVaGaYWURFoN8u92ao-NH9rESiAk,8202
17
- foamlib-0.9.7.dist-info/METADATA,sha256=tI5zFm2gLiXI1mKn92fMk5kitpUdNWnoO2iv9cjNhFw,8701
18
- foamlib-0.9.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
19
- foamlib-0.9.7.dist-info/licenses/LICENSE.txt,sha256=5Dte9TUnLZzPRs4NQzl-Jc2-Ljd-t_v0ZR5Ng5r0UsY,35131
20
- foamlib-0.9.7.dist-info/RECORD,,