capytaine 2.2.1__cp312-cp312-macosx_13_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. capytaine/.dylibs/libgcc_s.1.1.dylib +0 -0
  2. capytaine/.dylibs/libgfortran.5.dylib +0 -0
  3. capytaine/.dylibs/libquadmath.0.dylib +0 -0
  4. capytaine/__about__.py +16 -0
  5. capytaine/__init__.py +35 -0
  6. capytaine/bem/__init__.py +0 -0
  7. capytaine/bem/airy_waves.py +106 -0
  8. capytaine/bem/engines.py +441 -0
  9. capytaine/bem/problems_and_results.py +548 -0
  10. capytaine/bem/solver.py +506 -0
  11. capytaine/bodies/__init__.py +4 -0
  12. capytaine/bodies/bodies.py +1193 -0
  13. capytaine/bodies/dofs.py +19 -0
  14. capytaine/bodies/predefined/__init__.py +6 -0
  15. capytaine/bodies/predefined/cylinders.py +151 -0
  16. capytaine/bodies/predefined/rectangles.py +109 -0
  17. capytaine/bodies/predefined/spheres.py +70 -0
  18. capytaine/green_functions/__init__.py +2 -0
  19. capytaine/green_functions/abstract_green_function.py +12 -0
  20. capytaine/green_functions/delhommeau.py +432 -0
  21. capytaine/green_functions/libs/Delhommeau_float32.cpython-312-darwin.so +0 -0
  22. capytaine/green_functions/libs/Delhommeau_float64.cpython-312-darwin.so +0 -0
  23. capytaine/green_functions/libs/__init__.py +0 -0
  24. capytaine/io/__init__.py +0 -0
  25. capytaine/io/bemio.py +141 -0
  26. capytaine/io/legacy.py +328 -0
  27. capytaine/io/mesh_loaders.py +1086 -0
  28. capytaine/io/mesh_writers.py +692 -0
  29. capytaine/io/meshio.py +38 -0
  30. capytaine/io/xarray.py +524 -0
  31. capytaine/matrices/__init__.py +16 -0
  32. capytaine/matrices/block.py +592 -0
  33. capytaine/matrices/block_toeplitz.py +325 -0
  34. capytaine/matrices/builders.py +89 -0
  35. capytaine/matrices/linear_solvers.py +232 -0
  36. capytaine/matrices/low_rank.py +395 -0
  37. capytaine/meshes/__init__.py +6 -0
  38. capytaine/meshes/clipper.py +464 -0
  39. capytaine/meshes/collections.py +324 -0
  40. capytaine/meshes/geometry.py +409 -0
  41. capytaine/meshes/meshes.py +870 -0
  42. capytaine/meshes/predefined/__init__.py +6 -0
  43. capytaine/meshes/predefined/cylinders.py +314 -0
  44. capytaine/meshes/predefined/rectangles.py +261 -0
  45. capytaine/meshes/predefined/spheres.py +62 -0
  46. capytaine/meshes/properties.py +276 -0
  47. capytaine/meshes/quadratures.py +80 -0
  48. capytaine/meshes/quality.py +448 -0
  49. capytaine/meshes/surface_integrals.py +63 -0
  50. capytaine/meshes/symmetric.py +383 -0
  51. capytaine/post_pro/__init__.py +6 -0
  52. capytaine/post_pro/free_surfaces.py +88 -0
  53. capytaine/post_pro/impedance.py +92 -0
  54. capytaine/post_pro/kochin.py +54 -0
  55. capytaine/post_pro/rao.py +60 -0
  56. capytaine/tools/__init__.py +0 -0
  57. capytaine/tools/cache_on_disk.py +26 -0
  58. capytaine/tools/deprecation_handling.py +18 -0
  59. capytaine/tools/lists_of_points.py +52 -0
  60. capytaine/tools/lru_cache.py +49 -0
  61. capytaine/tools/optional_imports.py +27 -0
  62. capytaine/tools/prony_decomposition.py +94 -0
  63. capytaine/tools/symbolic_multiplication.py +123 -0
  64. capytaine/ui/__init__.py +0 -0
  65. capytaine/ui/cli.py +28 -0
  66. capytaine/ui/rich.py +5 -0
  67. capytaine/ui/vtk/__init__.py +3 -0
  68. capytaine/ui/vtk/animation.py +329 -0
  69. capytaine/ui/vtk/body_viewer.py +28 -0
  70. capytaine/ui/vtk/helpers.py +82 -0
  71. capytaine/ui/vtk/mesh_viewer.py +461 -0
  72. capytaine-2.2.1.dist-info/LICENSE +674 -0
  73. capytaine-2.2.1.dist-info/METADATA +754 -0
  74. capytaine-2.2.1.dist-info/RECORD +76 -0
  75. capytaine-2.2.1.dist-info/WHEEL +4 -0
  76. capytaine-2.2.1.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,432 @@
1
+ """Variants of Delhommeau's method for the computation of the Green function."""
2
+ # Copyright (C) 2017-2024 Matthieu Ancellin
3
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
4
+
5
+ import os
6
+ import logging
7
+ from functools import lru_cache
8
+ from importlib import import_module
9
+
10
+ import numpy as np
11
+
12
+ from capytaine.meshes.meshes import Mesh
13
+ from capytaine.meshes.collections import CollectionOfMeshes
14
+ from capytaine.tools.prony_decomposition import exponential_decomposition, error_exponential_decomposition
15
+ from capytaine.tools.cache_on_disk import cache_directory
16
+
17
+ from capytaine.green_functions.abstract_green_function import AbstractGreenFunction
18
+
19
+ LOG = logging.getLogger(__name__)
20
+
21
+ _default_parameters = dict(
22
+ tabulation_nr=676,
23
+ tabulation_rmax=100.0,
24
+ tabulation_nz=372,
25
+ tabulation_zmin=-251.0,
26
+ tabulation_nb_integration_points=1001,
27
+ tabulation_grid_shape="scaled_nemoh3",
28
+ finite_depth_prony_decomposition_method="fortran",
29
+ floating_point_precision="float64",
30
+ gf_singularities="low_freq",
31
+ )
32
+
33
+
34
+ class Delhommeau(AbstractGreenFunction):
35
+ """The Green function as implemented in Aquadyn and Nemoh.
36
+
37
+ Parameters
38
+ ----------
39
+ tabulation_nr: int, optional
40
+ Number of tabulation points for horizontal coordinate.
41
+ If 0 is given, no tabulation is used at all.
42
+ Default: 676
43
+ tabulation_rmax: float, optional
44
+ Maximum value of r range for the tabulation. (Minimum is zero.)
45
+ Only used with the :code:`"scaled_nemoh3"` method.
46
+ Default: 100.0
47
+ tabulation_nz: int, optional
48
+ Number of tabulation points for vertical coordinate.
49
+ If 0 is given, no tabulation is used at all.
50
+ Default: 372
51
+ tabulation_zmin: float, optional
52
+ Minimum value of z range for the tabulation. (Maximum is zero.)
53
+ Only used with the :code:`"scaled_nemoh3"` method.
54
+ Default: -251.0
55
+ tabulation_nb_integration_points: int, optional
56
+ Number of points for the numerical integration w.r.t. :math:`theta` of
57
+ Delhommeau's integrals
58
+ Default: 1000
59
+ tabulation_grid_shape: string, optional
60
+ Either :code:`"legacy"` or :code:`"scaled_nemoh3"`, which are the two
61
+ methods currently implemented.
62
+ Default: :code:`"scaled_nemoh3"`
63
+ tabulation_cache_dir: str or None, optional
64
+ Directory in which to save the tabulation file(s).
65
+ If None, the tabulation is not saved on disk.
66
+ Default: calls capytaine.tools.cache_on_disk.cache_directory(), which
67
+ returns the value of the environment variable CAPYTAINE_CACHE_DIR if
68
+ set, or else the default cache directory on your system.
69
+ finite_depth_prony_decomposition_method: string, optional
70
+ The implementation of the Prony decomposition used to compute the
71
+ finite water_depth Green function. Accepted values: :code:`'fortran'`
72
+ for Nemoh's implementation (by default), :code:`'python'` for an
73
+ experimental Python implementation.
74
+ See :func:`find_best_exponential_decomposition`.
75
+ floating_point_precision: string, optional
76
+ Either :code:`'float32'` for single precision computations or
77
+ :code:`'float64'` for double precision computations.
78
+ Default: :code:`'float64'`.
79
+ gf_singularities: string, optional
80
+ Chose of the variant among the ways singularities can be extracted from
81
+ the Green function. Currently only affects the infinite depth Green
82
+ function.
83
+ Default: "low_freq".
84
+
85
+ Attributes
86
+ ----------
87
+ fortran_core:
88
+ Compiled Fortran module with functions used to compute the Green
89
+ function.
90
+ tabulation_grid_shape_index: int
91
+ Integer passed to Fortran code to describe which method is used.
92
+ tabulated_r_range: numpy.array of shape (tabulation_nr,) and type floating_point_precision
93
+ tabulated_z_range: numpy.array of shape (tabulation_nz,) and type floating_point_precision
94
+ Coordinates of the tabulation points.
95
+ tabulated_integrals: numpy.array of shape (tabulation_nr, tabulation_nz, nb_tabulated_values) and type floating_point_precision
96
+ Tabulated Delhommeau integrals.
97
+ """
98
+
99
+
100
+
101
+ def __init__(self, *,
102
+ tabulation_nr=_default_parameters["tabulation_nr"],
103
+ tabulation_rmax=_default_parameters["tabulation_rmax"],
104
+ tabulation_nz=_default_parameters["tabulation_nz"],
105
+ tabulation_zmin=_default_parameters["tabulation_zmin"],
106
+ tabulation_nb_integration_points=_default_parameters["tabulation_nb_integration_points"],
107
+ tabulation_grid_shape=_default_parameters["tabulation_grid_shape"],
108
+ tabulation_cache_dir=cache_directory(),
109
+ finite_depth_prony_decomposition_method=_default_parameters["finite_depth_prony_decomposition_method"],
110
+ floating_point_precision=_default_parameters["floating_point_precision"],
111
+ gf_singularities=_default_parameters["gf_singularities"],
112
+ ):
113
+
114
+ self.fortran_core = import_module(f"capytaine.green_functions.libs.Delhommeau_{floating_point_precision}")
115
+
116
+ self.tabulation_grid_shape = tabulation_grid_shape
117
+ fortran_enum = {
118
+ 'legacy': self.fortran_core.constants.legacy_grid,
119
+ 'scaled_nemoh3': self.fortran_core.constants.scaled_nemoh3_grid,
120
+ }
121
+ self.tabulation_grid_shape_index = fortran_enum[tabulation_grid_shape]
122
+
123
+ self.gf_singularities = gf_singularities
124
+ fortran_enum = {
125
+ 'high_freq': self.fortran_core.constants.high_freq,
126
+ 'low_freq': self.fortran_core.constants.low_freq,
127
+ 'low_freq_with_rankine_part': self.fortran_core.constants.low_freq_with_rankine_part,
128
+ }
129
+ self.gf_singularities_index = fortran_enum[gf_singularities]
130
+
131
+ self.floating_point_precision = floating_point_precision
132
+ self.tabulation_nb_integration_points = tabulation_nb_integration_points
133
+
134
+ self.tabulation_cache_dir = tabulation_cache_dir
135
+ if tabulation_cache_dir is None:
136
+ self._create_tabulation(tabulation_nr, tabulation_rmax,
137
+ tabulation_nz, tabulation_zmin,
138
+ tabulation_nb_integration_points)
139
+ else:
140
+ self._create_or_load_tabulation(tabulation_nr, tabulation_rmax,
141
+ tabulation_nz, tabulation_zmin,
142
+ tabulation_nb_integration_points,
143
+ tabulation_cache_dir)
144
+
145
+ self.finite_depth_prony_decomposition_method = finite_depth_prony_decomposition_method
146
+
147
+ self.exportable_settings = {
148
+ 'green_function': self.__class__.__name__,
149
+ 'tabulation_nr': tabulation_nr,
150
+ 'tabulation_rmax': tabulation_rmax,
151
+ 'tabulation_nz': tabulation_nz,
152
+ 'tabulation_zmin': tabulation_zmin,
153
+ 'tabulation_nb_integration_points': tabulation_nb_integration_points,
154
+ 'tabulation_grid_shape': tabulation_grid_shape,
155
+ 'finite_depth_prony_decomposition_method': finite_depth_prony_decomposition_method,
156
+ 'floating_point_precision': floating_point_precision,
157
+ 'gf_singularities': gf_singularities,
158
+ }
159
+
160
+ self._hash = hash(self.exportable_settings.values())
161
+
162
+ def __hash__(self):
163
+ return self._hash
164
+
165
+ def __str__(self):
166
+ # Print only the non-default values.
167
+ to_be_printed = []
168
+ for name, value in self.exportable_settings.items():
169
+ if name in _default_parameters and value != _default_parameters[name]:
170
+ to_be_printed.append(f"{name}={repr(value)}")
171
+ return f"{self.__class__.__name__}({', '.join(to_be_printed)})"
172
+
173
+ def __repr__(self):
174
+ # Same as __str__ except all values are printed even when they are the
175
+ # default value.
176
+ to_be_printed = []
177
+ for name, value in self.exportable_settings.items():
178
+ if name in _default_parameters:
179
+ to_be_printed.append(f"{name}={repr(value)}")
180
+ return f"{self.__class__.__name__}({', '.join(to_be_printed)})"
181
+
182
+ def _repr_pretty_(self, p, cycle):
183
+ p.text(self.__repr__())
184
+
185
+ def _create_or_load_tabulation(self, tabulation_nr, tabulation_rmax,
186
+ tabulation_nz, tabulation_zmin,
187
+ tabulation_nb_integration_points,
188
+ tabulation_cache_dir):
189
+ """This method either:
190
+ - loads an existing tabulation saved on disk
191
+ - generates a new tabulation with the data provided as argument and save it on disk.
192
+ """
193
+
194
+ # Normalize inputs
195
+ tabulation_rmax = float(tabulation_rmax)
196
+ tabulation_zmin = float(tabulation_zmin)
197
+
198
+ filename = "tabulation_{}_{}_{}_{}_{}_{}_{}.npz".format(
199
+ self.floating_point_precision, self.tabulation_grid_shape,
200
+ tabulation_nr, tabulation_rmax, tabulation_nz, tabulation_zmin,
201
+ tabulation_nb_integration_points
202
+ )
203
+ filepath = os.path.join(tabulation_cache_dir, filename)
204
+
205
+ if os.path.exists(filepath):
206
+ LOG.info("Loading tabulation from %s", filepath)
207
+ loaded_arrays = np.load(filepath)
208
+ self.tabulated_r_range = loaded_arrays["r_range"]
209
+ self.tabulated_z_range = loaded_arrays["z_range"]
210
+ self.tabulated_integrals = loaded_arrays["values"]
211
+
212
+ else:
213
+ self._create_tabulation(tabulation_nr, tabulation_rmax,
214
+ tabulation_nz, tabulation_zmin,
215
+ tabulation_nb_integration_points)
216
+ LOG.debug("Saving tabulation in %s", filepath)
217
+ np.savez_compressed(
218
+ filepath, r_range=self.tabulated_r_range, z_range=self.tabulated_z_range,
219
+ values=self.tabulated_integrals
220
+ )
221
+
222
+ def _create_tabulation(self, tabulation_nr, tabulation_rmax,
223
+ tabulation_nz, tabulation_zmin,
224
+ tabulation_nb_integration_points):
225
+ LOG.warning("Precomputing tabulation, it may take a few seconds.")
226
+ self.tabulated_r_range = self.fortran_core.delhommeau_integrals.default_r_spacing(
227
+ tabulation_nr, tabulation_rmax, self.tabulation_grid_shape_index
228
+ )
229
+ self.tabulated_z_range = self.fortran_core.delhommeau_integrals.default_z_spacing(
230
+ tabulation_nz, tabulation_zmin, self.tabulation_grid_shape_index
231
+ )
232
+ self.tabulated_integrals = self.fortran_core.delhommeau_integrals.construct_tabulation(
233
+ self.tabulated_r_range, self.tabulated_z_range, tabulation_nb_integration_points,
234
+ )
235
+
236
+ @lru_cache(maxsize=128)
237
+ def find_best_exponential_decomposition(self, dimensionless_omega, dimensionless_wavenumber):
238
+ """Compute the decomposition of a part of the finite water_depth Green function as a sum of exponential functions.
239
+
240
+ Two implementations are available: the legacy Fortran implementation from Nemoh and a newer one written in Python.
241
+ For some still unexplained reasons, the two implementations do not always give the exact same result.
242
+ Until the problem is better understood, the Fortran implementation is the default one, to ensure consistency with Nemoh.
243
+ The Fortran version is also significantly faster...
244
+
245
+ Results are cached.
246
+
247
+ Parameters
248
+ ----------
249
+ dimensionless_omega: float
250
+ dimensionless angular frequency: :math:`kh \\tanh (kh) = \\omega^2 h/g`
251
+ dimensionless_wavenumber: float
252
+ dimensionless wavenumber: :math:`kh`
253
+ method: string, optional
254
+ the implementation that should be used to compute the Prony decomposition
255
+
256
+ Returns
257
+ -------
258
+ Tuple[np.ndarray, np.ndarray]
259
+ the amplitude and growth rates of the exponentials
260
+ """
261
+
262
+ LOG.debug("\tCompute Prony decomposition in finite water_depth Green function "
263
+ "for dimless_omega=%.2e and dimless_wavenumber=%.2e",
264
+ dimensionless_omega, dimensionless_wavenumber)
265
+
266
+ if self.finite_depth_prony_decomposition_method.lower() == 'python':
267
+ # The function that will be approximated.
268
+ @np.vectorize
269
+ def f(x):
270
+ return self.fortran_core.initialize_green_wave.ff(x, dimensionless_omega, dimensionless_wavenumber)
271
+
272
+ # Try different increasing number of exponentials
273
+ for n_exp in range(4, 31, 2):
274
+
275
+ # The coefficients are computed on a resolution of 4*n_exp+1 ...
276
+ X = np.linspace(-0.1, 20.0, 4*n_exp+1)
277
+ a, lamda = exponential_decomposition(X, f(X), n_exp)
278
+
279
+ # ... and they are evaluated on a finer discretization.
280
+ X = np.linspace(-0.1, 20.0, 8*n_exp+1)
281
+ if error_exponential_decomposition(X, f(X), a, lamda) < 1e-4:
282
+ break
283
+
284
+ else:
285
+ LOG.warning("No suitable exponential decomposition has been found"
286
+ "for dimless_omega=%.2e and dimless_wavenumber=%.2e",
287
+ dimensionless_omega, dimensionless_wavenumber)
288
+
289
+ elif self.finite_depth_prony_decomposition_method.lower() == 'fortran':
290
+ lamda, a, nexp = self.fortran_core.old_prony_decomposition.lisc(dimensionless_omega, dimensionless_wavenumber)
291
+ lamda = lamda[:nexp]
292
+ a = a[:nexp]
293
+
294
+ else:
295
+ raise ValueError("Unrecognized method name for the Prony decomposition.")
296
+
297
+ # Add one more exponential function (actually a constant).
298
+ # It is not clear where it comes from exactly in the theory...
299
+ a = np.concatenate([a, np.array([2])])
300
+ lamda = np.concatenate([lamda, np.array([0.0])])
301
+
302
+ return a, lamda
303
+
304
+ def evaluate(self, mesh1, mesh2, free_surface=0.0, water_depth=np.inf, wavenumber=1.0, adjoint_double_layer=True, early_dot_product=True):
305
+ r"""The main method of the class, called by the engine to assemble the influence matrices.
306
+
307
+ Parameters
308
+ ----------
309
+ mesh1: Mesh or CollectionOfMeshes or list of points
310
+ mesh of the receiving body (where the potential is measured)
311
+ if only S is wanted or early_dot_product is False, then only a list of points as an array of shape (n, 3) can be passed.
312
+ mesh2: Mesh or CollectionOfMeshes
313
+ mesh of the source body (over which the source distribution is integrated)
314
+ free_surface: float, optional
315
+ position of the free surface (default: :math:`z = 0`)
316
+ water_depth: float, optional
317
+ constant depth of water (default: :math:`+\infty`)
318
+ wavenumber: float, optional
319
+ wavenumber (default: 1.0)
320
+ adjoint_double_layer: bool, optional
321
+ compute double layer for direct method (F) or adjoint double layer for indirect method (T) matrices (default: True)
322
+ early_dot_product: boolean, optional
323
+ if False, return K as a (n, m, 3) array storing ∫∇G
324
+ if True, return K as a (n, m) array storing ∫∇G·n
325
+
326
+ Returns
327
+ -------
328
+ tuple of numpy arrays
329
+ the matrices :math:`S` and :math:`K`
330
+ """
331
+
332
+ wavenumber = float(wavenumber)
333
+
334
+ if free_surface == np.inf: # No free surface, only a single Rankine source term
335
+
336
+ a_exp, lamda_exp = np.empty(1), np.empty(1) # Dummy arrays that won't actually be used by the fortran code.
337
+
338
+ coeffs = np.array((1.0, 0.0, 0.0))
339
+
340
+ elif water_depth == np.inf:
341
+
342
+ a_exp, lamda_exp = np.empty(1), np.empty(1) # Idem
343
+
344
+ if wavenumber == 0.0:
345
+ coeffs = np.array((1.0, 1.0, 0.0))
346
+ elif wavenumber == np.inf:
347
+ coeffs = np.array((1.0, -1.0, 0.0))
348
+ else:
349
+ if self.gf_singularities == "high_freq":
350
+ coeffs = np.array((1.0, -1.0, 1.0))
351
+ else: # low_freq or low_freq_with_rankine_part
352
+ coeffs = np.array((1.0, 1.0, 1.0))
353
+
354
+ else: # Finite water_depth
355
+ if wavenumber == 0.0 or wavenumber == np.inf:
356
+ raise NotImplementedError("Zero or infinite frequencies not implemented for finite depth.")
357
+ else:
358
+ a_exp, lamda_exp = self.find_best_exponential_decomposition(
359
+ wavenumber*water_depth*np.tanh(wavenumber*water_depth),
360
+ wavenumber*water_depth,
361
+ )
362
+ coeffs = np.array((1.0, 1.0, 1.0))
363
+
364
+ if isinstance(mesh1, Mesh) or isinstance(mesh1, CollectionOfMeshes):
365
+ collocation_points = mesh1.faces_centers
366
+ nb_collocation_points = mesh1.nb_faces
367
+ if not adjoint_double_layer: # Computing the D matrix
368
+ early_dot_product_normals = mesh2.faces_normals
369
+ else: # Computing the K matrix
370
+ early_dot_product_normals = mesh1.faces_normals
371
+
372
+ elif isinstance(mesh1, np.ndarray) and mesh1.ndim == 2 and mesh1.shape[1] == 3:
373
+ # This is used when computing potential or velocity at given points in postprocessing
374
+ collocation_points = mesh1
375
+ nb_collocation_points = mesh1.shape[0]
376
+ if not adjoint_double_layer: # Computing the D matrix
377
+ early_dot_product_normals = mesh2.faces_normals
378
+ else: # Computing the K matrix
379
+ early_dot_product_normals = np.zeros((nb_collocation_points, 3))
380
+ # Dummy argument since this method is meant to be used either
381
+ # - to compute potential, then only S is needed and early_dot_product_normals is irrelevant,
382
+ # - to compute velocity, then the adjoint full gradient is needed and early_dot_product is False and this value is unused.
383
+ # TODO: add an only_S argument and return an error here if (early_dot_product and not only_S)
384
+
385
+ else:
386
+ raise ValueError(f"Unrecognized first input for {self.__class__.__name__}.evaluate:\n{mesh1}")
387
+
388
+ if (np.any(abs(mesh2.faces_centers[:, 2]) < 1e-6) # free surface panel
389
+ and self.gf_singularities != "low_freq"):
390
+ raise NotImplementedError("Free surface panels are only supported for cpt.Delhommeau(..., gf_singularities='low_freq').")
391
+
392
+ if self.floating_point_precision == "float32":
393
+ dtype = "complex64"
394
+ elif self.floating_point_precision == "float64":
395
+ dtype = "complex128"
396
+ else:
397
+ raise NotImplementedError(f"Unsupported floating point precision: {self.floating_point_precision}")
398
+
399
+ S = np.empty((nb_collocation_points, mesh2.nb_faces), order="F", dtype=dtype)
400
+ K = np.empty((nb_collocation_points, mesh2.nb_faces, 1 if early_dot_product else 3), order="F", dtype=dtype)
401
+
402
+ # Main call to Fortran code
403
+ self.fortran_core.matrices.build_matrices(
404
+ collocation_points, early_dot_product_normals,
405
+ mesh2.vertices, mesh2.faces + 1,
406
+ mesh2.faces_centers, mesh2.faces_normals,
407
+ mesh2.faces_areas, mesh2.faces_radiuses,
408
+ *mesh2.quadrature_points,
409
+ wavenumber, water_depth,
410
+ coeffs,
411
+ self.tabulation_nb_integration_points, self.tabulation_grid_shape_index,
412
+ self.tabulated_r_range, self.tabulated_z_range, self.tabulated_integrals,
413
+ lamda_exp, a_exp,
414
+ mesh1 is mesh2, self.gf_singularities_index, adjoint_double_layer,
415
+ S, K
416
+ )
417
+
418
+ if np.any(np.isnan(S)) or np.any(np.isnan(K)):
419
+ raise RuntimeError("Green function returned a NaN in the interaction matrix.\n"
420
+ "It could be due to overlapping panels.")
421
+
422
+ if early_dot_product: K = K.reshape((nb_collocation_points, mesh2.nb_faces))
423
+
424
+ return S, K
425
+
426
+ ################################
427
+
428
+ class XieDelhommeau(Delhommeau):
429
+ """Legacy way to call the gf_singularities="low_freq" variant."""
430
+
431
+ def __init__(self, **kwargs):
432
+ super().__init__(gf_singularities="low_freq", **kwargs)
File without changes
File without changes
capytaine/io/bemio.py ADDED
@@ -0,0 +1,141 @@
1
+ import logging
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from scipy.optimize import newton
6
+
7
+ LOG = logging.getLogger(__name__)
8
+
9
+ #######################
10
+ # Import from Bemio #
11
+ #######################
12
+
13
+ def dataframe_from_bemio(bemio_obj, wavenumber, wavelength):
14
+ """Transform a :class:`bemio.data_structures.bem.HydrodynamicData` into a
15
+ :class:`pandas.DataFrame`.
16
+
17
+ Parameters
18
+ ----------
19
+ bemio_obj: Bemio data_stuctures.bem.HydrodynamicData class
20
+ Loaded NEMOH, AQWA, or WAMIT data created using `bemio.io.nemoh.read`,
21
+ `bemio.io.aqwa.read`, or `bemio.io.wamit.read` functions, respectively.
22
+ wavenumber: bool
23
+ If True, the coordinate 'wavenumber' will be added to the output dataset.
24
+ wavelength: bool
25
+ If True, the coordinate 'wavelength' will be added to the output dataset.
26
+ """
27
+
28
+
29
+ dofs = np.array(['Surge', 'Sway', 'Heave', 'Roll', 'Pitch', 'Yaw'])
30
+ for i in range(bemio_obj.body[0].num_bodies):
31
+ difr_dict = []
32
+ rad_dict = []
33
+
34
+ rho = bemio_obj.body[0].rho
35
+ g = bemio_obj.body[0].g
36
+
37
+ if bemio_obj.body[i].water_depth == 'infinite':
38
+ bemio_obj.body[i].water_depth = np.inf
39
+
40
+ if bemio_obj.body[i].bem_code == 'WAMIT': # WAMIT coefficients need to be dimensionalized
41
+ from_wamit = True
42
+
43
+ for omega_idx, omega in enumerate(np.sort(bemio_obj.body[i].w)):
44
+
45
+ # DiffractionProblem variable equivalents
46
+ for dir_idx, dir in enumerate(bemio_obj.body[i].wave_dir):
47
+ temp_dict = {}
48
+ temp_dict['body_name'] = bemio_obj.body[i].name
49
+ temp_dict['water_depth'] = bemio_obj.body[i].water_depth
50
+ temp_dict['omega'] = omega
51
+ temp_dict['period'] = 2*np.pi/omega
52
+ temp_dict['rho'] = rho
53
+ temp_dict['g'] = g
54
+ temp_dict['forward_speed'] = 0.0
55
+ temp_dict['wave_direction'] = np.radians(dir)
56
+ temp_dict['influenced_dof'] = dofs
57
+
58
+ if wavenumber or wavelength:
59
+ if temp_dict['water_depth'] == np.inf or omega**2*temp_dict['water_depth']/temp_dict['g'] > 20:
60
+ k = omega**2/temp_dict['g']
61
+ else:
62
+ k = newton(lambda x: x*np.tanh(x) - omega**2*temp_dict['water_depth']/temp_dict['g'], x0=1.0)/temp_dict['water_depth']
63
+
64
+ if wavenumber:
65
+ temp_dict['wavenumber'] = k
66
+
67
+ if wavelength:
68
+ if k == 0.0:
69
+ temp_dict['wavelength'] = np.inf
70
+ else:
71
+ temp_dict['wavelength'] = 2*np.pi/k
72
+
73
+ Fexc = np.empty(shape=bemio_obj.body[i].ex.re[:, dir_idx, omega_idx].shape, dtype=np.complex128)
74
+ if from_wamit:
75
+ Fexc.real = bemio_obj.body[i].ex.re[:, dir_idx, omega_idx] * rho * g
76
+ Fexc.imag = bemio_obj.body[i].ex.im[:, dir_idx, omega_idx] * rho * g
77
+ else:
78
+ Fexc.real = bemio_obj.body[i].ex.re[:, dir_idx, omega_idx]
79
+ Fexc.imag = bemio_obj.body[i].ex.im[:, dir_idx, omega_idx]
80
+ temp_dict['diffraction_force'] = Fexc.flatten()
81
+
82
+ try:
83
+ Fexc_fk = np.empty(shape=bemio_obj.body[i].ex.fk.re[:, dir_idx, omega_idx].shape, dtype=np.complex128)
84
+ if from_wamit:
85
+ Fexc_fk.real = bemio_obj.body[i].ex.fk.re[:, dir_idx, omega_idx] * rho * g
86
+ Fexc_fk.imag = bemio_obj.body[i].ex.fk.im[:, dir_idx, omega_idx] * rho * g
87
+ else:
88
+ Fexc_fk.real = bemio_obj.body[i].ex.fk.re[:, dir_idx, omega_idx]
89
+ Fexc_fk.imag = bemio_obj.body[i].ex.fk.im[:, dir_idx, omega_idx]
90
+ temp_dict['Froude_Krylov_force'] = Fexc_fk.flatten()
91
+
92
+ except AttributeError:
93
+ # LOG.warning('\tNo Froude-Krylov forces found for ' + bemio_obj.body[i].name + ' at ' + str(dir) + \
94
+ # ' degrees (omega = ' + str(omega) + '), replacing with zeros.')
95
+ temp_dict['Froude_Krylov_force'] = np.zeros((bemio_obj.body[i].ex.re[:, dir_idx, omega_idx].size,), dtype=np.complex128)
96
+
97
+ difr_dict.append(temp_dict)
98
+
99
+ # RadiationProblem + Hydrostatics variable equivalents
100
+ for radiating_dof_idx, radiating_dof in enumerate(dofs):
101
+ temp_dict = {}
102
+ temp_dict['body_name'] = bemio_obj.body[i].name
103
+ temp_dict['water_depth'] = bemio_obj.body[i].water_depth
104
+ temp_dict['omega'] = omega
105
+ temp_dict['rho'] = rho
106
+ temp_dict['g'] = g
107
+ temp_dict['forward_speed'] = 0.0
108
+ temp_dict['wave_direction'] = 0.0
109
+ temp_dict['influenced_dof'] = dofs
110
+ temp_dict['radiating_dof'] = radiating_dof
111
+ temp_dict['added_mass'] = bemio_obj.body[i].am.all[radiating_dof_idx, :, omega_idx].flatten()
112
+ temp_dict['radiation_damping'] = bemio_obj.body[i].rd.all[radiating_dof_idx, :, omega_idx].flatten()
113
+
114
+ if from_wamit:
115
+ temp_dict['added_mass'] = temp_dict['added_mass'] * rho
116
+ temp_dict['radiation_damping'] = temp_dict['radiation_damping'] * rho * omega
117
+
118
+ if wavenumber or wavelength:
119
+ if temp_dict['water_depth'] == np.inf or omega**2*temp_dict['water_depth']/temp_dict['g'] > 20:
120
+ k = omega**2/temp_dict['g']
121
+ else:
122
+ k = newton(lambda x: x*np.tanh(x) - omega**2*temp_dict['water_depth']/temp_dict['g'], x0=1.0)/temp_dict['water_depth']
123
+
124
+ if wavenumber:
125
+ temp_dict['wavenumber'] = k
126
+
127
+ if wavelength:
128
+ if k == 0.0:
129
+ temp_dict['wavelength'] = np.inf
130
+ else:
131
+ temp_dict['wavelength'] = 2*np.pi/k
132
+
133
+ rad_dict.append(temp_dict)
134
+
135
+ df = pd.concat([
136
+ pd.DataFrame.from_dict(difr_dict).explode(['influenced_dof', 'diffraction_force', 'Froude_Krylov_force']),
137
+ pd.DataFrame.from_dict(rad_dict).explode(['influenced_dof', 'added_mass', 'radiation_damping'])
138
+ ])
139
+ df = df.astype({'added_mass': np.float64, 'radiation_damping': np.float64, 'diffraction_force': np.complex128, 'Froude_Krylov_force': np.complex128})
140
+
141
+ return df