capytaine 3.0.0a1__cp38-cp38-macosx_15_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. capytaine/.dylibs/libgcc_s.1.1.dylib +0 -0
  2. capytaine/.dylibs/libgfortran.5.dylib +0 -0
  3. capytaine/.dylibs/libquadmath.0.dylib +0 -0
  4. capytaine/__about__.py +21 -0
  5. capytaine/__init__.py +32 -0
  6. capytaine/bem/__init__.py +0 -0
  7. capytaine/bem/airy_waves.py +111 -0
  8. capytaine/bem/engines.py +321 -0
  9. capytaine/bem/problems_and_results.py +601 -0
  10. capytaine/bem/solver.py +718 -0
  11. capytaine/bodies/__init__.py +4 -0
  12. capytaine/bodies/bodies.py +630 -0
  13. capytaine/bodies/dofs.py +146 -0
  14. capytaine/bodies/hydrostatics.py +540 -0
  15. capytaine/bodies/multibodies.py +216 -0
  16. capytaine/green_functions/Delhommeau_float32.cpython-38-darwin.so +0 -0
  17. capytaine/green_functions/Delhommeau_float64.cpython-38-darwin.so +0 -0
  18. capytaine/green_functions/__init__.py +2 -0
  19. capytaine/green_functions/abstract_green_function.py +64 -0
  20. capytaine/green_functions/delhommeau.py +522 -0
  21. capytaine/green_functions/hams.py +210 -0
  22. capytaine/io/__init__.py +0 -0
  23. capytaine/io/bemio.py +153 -0
  24. capytaine/io/legacy.py +228 -0
  25. capytaine/io/wamit.py +479 -0
  26. capytaine/io/xarray.py +673 -0
  27. capytaine/meshes/__init__.py +2 -0
  28. capytaine/meshes/abstract_meshes.py +375 -0
  29. capytaine/meshes/clean.py +302 -0
  30. capytaine/meshes/clip.py +347 -0
  31. capytaine/meshes/export.py +89 -0
  32. capytaine/meshes/geometry.py +259 -0
  33. capytaine/meshes/io.py +433 -0
  34. capytaine/meshes/meshes.py +826 -0
  35. capytaine/meshes/predefined/__init__.py +6 -0
  36. capytaine/meshes/predefined/cylinders.py +280 -0
  37. capytaine/meshes/predefined/rectangles.py +202 -0
  38. capytaine/meshes/predefined/spheres.py +55 -0
  39. capytaine/meshes/quality.py +159 -0
  40. capytaine/meshes/surface_integrals.py +82 -0
  41. capytaine/meshes/symmetric_meshes.py +641 -0
  42. capytaine/meshes/visualization.py +353 -0
  43. capytaine/post_pro/__init__.py +6 -0
  44. capytaine/post_pro/free_surfaces.py +85 -0
  45. capytaine/post_pro/impedance.py +92 -0
  46. capytaine/post_pro/kochin.py +54 -0
  47. capytaine/post_pro/rao.py +60 -0
  48. capytaine/tools/__init__.py +0 -0
  49. capytaine/tools/block_circulant_matrices.py +275 -0
  50. capytaine/tools/cache_on_disk.py +26 -0
  51. capytaine/tools/deprecation_handling.py +18 -0
  52. capytaine/tools/lists_of_points.py +52 -0
  53. capytaine/tools/memory_monitor.py +45 -0
  54. capytaine/tools/optional_imports.py +27 -0
  55. capytaine/tools/prony_decomposition.py +150 -0
  56. capytaine/tools/symbolic_multiplication.py +161 -0
  57. capytaine/tools/timer.py +90 -0
  58. capytaine/ui/__init__.py +0 -0
  59. capytaine/ui/cli.py +28 -0
  60. capytaine/ui/rich.py +5 -0
  61. capytaine-3.0.0a1.dist-info/LICENSE +674 -0
  62. capytaine-3.0.0a1.dist-info/METADATA +755 -0
  63. capytaine-3.0.0a1.dist-info/RECORD +65 -0
  64. capytaine-3.0.0a1.dist-info/WHEEL +4 -0
  65. capytaine-3.0.0a1.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,522 @@
1
+ """Variants of Delhommeau's method for the computation of the Green function."""
2
+ # Copyright (C) 2017-2024 Matthieu Ancellin
3
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
4
+
5
+ import os
6
+ import logging
7
+ from functools import lru_cache
8
+ from importlib import import_module
9
+
10
+ import numpy as np
11
+
12
+ from capytaine.tools.prony_decomposition import find_best_exponential_decomposition, PronyDecompositionFailure
13
+ from capytaine.tools.cache_on_disk import cache_directory
14
+
15
+ from capytaine.green_functions.abstract_green_function import AbstractGreenFunction, GreenFunctionEvaluationError
16
+
17
+ LOG = logging.getLogger(__name__)
18
+
19
+ _default_parameters = dict(
20
+ tabulation_nr=676,
21
+ tabulation_rmax=100.0,
22
+ tabulation_nz=372,
23
+ tabulation_zmin=-251.0,
24
+ tabulation_nb_integration_points=1001,
25
+ tabulation_grid_shape="scaled_nemoh3",
26
+ finite_depth_method="newer",
27
+ finite_depth_prony_decomposition_method="python",
28
+ floating_point_precision="float64",
29
+ gf_singularities="low_freq",
30
+ )
31
+
32
+
33
+ class Delhommeau(AbstractGreenFunction):
34
+ """The Green function as implemented in Aquadyn and Nemoh.
35
+
36
+ Parameters
37
+ ----------
38
+ tabulation_nr: int, optional
39
+ Number of tabulation points for horizontal coordinate.
40
+ If 0 is given, no tabulation is used at all.
41
+ Default: 676
42
+ tabulation_rmax: float, optional
43
+ Maximum value of r range for the tabulation. (Minimum is zero.)
44
+ Only used with the :code:`"scaled_nemoh3"` method.
45
+ Default: 100.0
46
+ tabulation_nz: int, optional
47
+ Number of tabulation points for vertical coordinate.
48
+ If 0 is given, no tabulation is used at all.
49
+ Default: 372
50
+ tabulation_zmin: float, optional
51
+ Minimum value of z range for the tabulation. (Maximum is zero.)
52
+ Only used with the :code:`"scaled_nemoh3"` method.
53
+ Default: -251.0
54
+ tabulation_nb_integration_points: int, optional
55
+ Number of points for the numerical integration w.r.t. :math:`theta` of
56
+ Delhommeau's integrals
57
+ Default: 1000
58
+ tabulation_grid_shape: string, optional
59
+ Either :code:`"legacy"` or :code:`"scaled_nemoh3"`, which are the two
60
+ methods currently implemented.
61
+ Default: :code:`"scaled_nemoh3"`
62
+ tabulation_cache_dir: str or None, optional
63
+ Directory in which to save the tabulation file(s).
64
+ If None, the tabulation is not saved on disk.
65
+ Default: calls capytaine.tools.cache_on_disk.cache_directory(), which
66
+ returns the value of the environment variable CAPYTAINE_CACHE_DIR if
67
+ set, or else the default cache directory on your system.
68
+ finite_depth_method: string, optional
69
+ The method used to compute the finite depth Green function.
70
+ finite_depth_prony_decomposition_method: string, optional
71
+ The implementation of the Prony decomposition used to compute the
72
+ finite water_depth Green function. Accepted values: :code:`'fortran'`
73
+ for Nemoh's implementation (by default), :code:`'python'` for an
74
+ experimental Python implementation.
75
+ See :func:`find_best_exponential_decomposition`.
76
+ floating_point_precision: string, optional
77
+ Either :code:`'float32'` for single precision computations or
78
+ :code:`'float64'` for double precision computations.
79
+ Default: :code:`'float64'`.
80
+ gf_singularities: string, optional
81
+ Chose of the variant among the ways singularities can be extracted from
82
+ the Green function. Currently only affects the infinite depth Green
83
+ function.
84
+ Default: "low_freq".
85
+
86
+ Attributes
87
+ ----------
88
+ fortran_core:
89
+ Compiled Fortran module with functions used to compute the Green
90
+ function.
91
+ tabulation_grid_shape_index: int
92
+ gf_singularities_index: int
93
+ finite_depth_method_index: int
94
+ Integers passed to Fortran code to describe which method is used.
95
+ tabulated_r_range: numpy.array of shape (tabulation_nr,) and type floating_point_precision
96
+ tabulated_z_range: numpy.array of shape (tabulation_nz,) and type floating_point_precision
97
+ Coordinates of the tabulation points.
98
+ tabulated_integrals: numpy.array of shape (tabulation_nr, tabulation_nz, nb_tabulated_values) and type floating_point_precision
99
+ Tabulated Delhommeau integrals.
100
+ """
101
+
102
+ dispersion_relation_roots = np.empty(1) # dummy array
103
+
104
+
105
+ def __init__(self, *,
106
+ tabulation_nr=_default_parameters["tabulation_nr"],
107
+ tabulation_rmax=_default_parameters["tabulation_rmax"],
108
+ tabulation_nz=_default_parameters["tabulation_nz"],
109
+ tabulation_zmin=_default_parameters["tabulation_zmin"],
110
+ tabulation_nb_integration_points=_default_parameters["tabulation_nb_integration_points"],
111
+ tabulation_grid_shape=_default_parameters["tabulation_grid_shape"],
112
+ tabulation_cache_dir=cache_directory(),
113
+ finite_depth_method=_default_parameters["finite_depth_method"],
114
+ finite_depth_prony_decomposition_method=_default_parameters["finite_depth_prony_decomposition_method"],
115
+ floating_point_precision=_default_parameters["floating_point_precision"],
116
+ gf_singularities=_default_parameters["gf_singularities"],
117
+ ):
118
+
119
+ self.fortran_core = import_module(f"capytaine.green_functions.Delhommeau_{floating_point_precision}")
120
+
121
+ self.tabulation_grid_shape = tabulation_grid_shape
122
+ fortran_enum = {
123
+ 'legacy': self.fortran_core.constants.legacy_grid,
124
+ 'scaled_nemoh3': self.fortran_core.constants.scaled_nemoh3_grid,
125
+ }
126
+ self.tabulation_grid_shape_index = fortran_enum[tabulation_grid_shape]
127
+
128
+ self.gf_singularities = gf_singularities
129
+ self.gf_singularities_fortran_enum = {
130
+ 'high_freq': self.fortran_core.constants.high_freq,
131
+ 'low_freq': self.fortran_core.constants.low_freq,
132
+ 'low_freq_with_rankine_part': self.fortran_core.constants.low_freq_with_rankine_part,
133
+ }
134
+
135
+ self.finite_depth_method = finite_depth_method
136
+ fortran_enum = {
137
+ 'legacy': self.fortran_core.constants.legacy_finite_depth,
138
+ 'newer': self.fortran_core.constants.newer_finite_depth,
139
+ }
140
+ self.finite_depth_method_index = fortran_enum[finite_depth_method]
141
+
142
+ self.floating_point_precision = floating_point_precision
143
+ self.tabulation_nb_integration_points = tabulation_nb_integration_points
144
+
145
+ self.tabulation_cache_dir = tabulation_cache_dir
146
+ if tabulation_cache_dir is None:
147
+ self._create_tabulation(tabulation_nr, tabulation_rmax,
148
+ tabulation_nz, tabulation_zmin,
149
+ tabulation_nb_integration_points)
150
+ else:
151
+ self._create_or_load_tabulation(tabulation_nr, tabulation_rmax,
152
+ tabulation_nz, tabulation_zmin,
153
+ tabulation_nb_integration_points,
154
+ tabulation_cache_dir)
155
+
156
+ self.finite_depth_prony_decomposition_method = finite_depth_prony_decomposition_method
157
+
158
+ self.exportable_settings = {
159
+ 'green_function': self.__class__.__name__,
160
+ 'tabulation_nr': tabulation_nr,
161
+ 'tabulation_rmax': tabulation_rmax,
162
+ 'tabulation_nz': tabulation_nz,
163
+ 'tabulation_zmin': tabulation_zmin,
164
+ 'tabulation_nb_integration_points': tabulation_nb_integration_points,
165
+ 'tabulation_grid_shape': tabulation_grid_shape,
166
+ 'finite_depth_method': finite_depth_method,
167
+ 'finite_depth_prony_decomposition_method': finite_depth_prony_decomposition_method,
168
+ 'floating_point_precision': floating_point_precision,
169
+ 'gf_singularities': gf_singularities,
170
+ }
171
+
172
+ self._hash = hash(self.exportable_settings.values())
173
+
174
+ def __hash__(self):
175
+ return self._hash
176
+
177
+ def __str__(self):
178
+ # Print only the non-default values.
179
+ to_be_printed = []
180
+ for name, value in self.exportable_settings.items():
181
+ if name in _default_parameters and value != _default_parameters[name]:
182
+ to_be_printed.append(f"{name}={repr(value)}")
183
+ return f"{self.__class__.__name__}({', '.join(to_be_printed)})"
184
+
185
+ def __repr__(self):
186
+ # Same as __str__ except all values are printed even when they are the
187
+ # default value.
188
+ to_be_printed = []
189
+ for name, value in self.exportable_settings.items():
190
+ if name in _default_parameters:
191
+ to_be_printed.append(f"{name}={repr(value)}")
192
+ return f"{self.__class__.__name__}({', '.join(to_be_printed)})"
193
+
194
+ def _repr_pretty_(self, p, cycle):
195
+ p.text(self.__repr__())
196
+
197
+ def _create_or_load_tabulation(self, tabulation_nr, tabulation_rmax,
198
+ tabulation_nz, tabulation_zmin,
199
+ tabulation_nb_integration_points,
200
+ tabulation_cache_dir):
201
+ """This method either:
202
+ - loads an existing tabulation saved on disk
203
+ - generates a new tabulation with the data provided as argument and save it on disk.
204
+ """
205
+
206
+ # Normalize inputs
207
+ tabulation_rmax = float(tabulation_rmax)
208
+ tabulation_zmin = float(tabulation_zmin)
209
+
210
+ filename = "tabulation_{}_{}_{}_{}_{}_{}_{}.npz".format(
211
+ self.floating_point_precision, self.tabulation_grid_shape,
212
+ tabulation_nr, tabulation_rmax, tabulation_nz, tabulation_zmin,
213
+ tabulation_nb_integration_points
214
+ )
215
+ filepath = os.path.join(tabulation_cache_dir, filename)
216
+
217
+ if os.path.exists(filepath):
218
+ try:
219
+ LOG.info("Loading tabulation from %s", filepath)
220
+ loaded_arrays = np.load(filepath)
221
+ self.tabulated_r_range = loaded_arrays["r_range"]
222
+ self.tabulated_z_range = loaded_arrays["z_range"]
223
+ self.tabulated_integrals = loaded_arrays["values"]
224
+ return filename
225
+ except (EOFError, FileNotFoundError, KeyError, ValueError):
226
+ LOG.warning("Error loading tabulation from %s", filepath)
227
+
228
+ self._create_tabulation(tabulation_nr, tabulation_rmax,
229
+ tabulation_nz, tabulation_zmin,
230
+ tabulation_nb_integration_points)
231
+ LOG.debug("Saving tabulation in %s", filepath)
232
+ np.savez_compressed(
233
+ filepath, r_range=self.tabulated_r_range, z_range=self.tabulated_z_range,
234
+ values=self.tabulated_integrals
235
+ )
236
+ return filename
237
+
238
+ def _create_tabulation(self, tabulation_nr, tabulation_rmax,
239
+ tabulation_nz, tabulation_zmin,
240
+ tabulation_nb_integration_points):
241
+ LOG.warning("Precomputing tabulation, it may take a few seconds.")
242
+ self.tabulated_r_range = self.fortran_core.delhommeau_integrals.default_r_spacing(
243
+ tabulation_nr, tabulation_rmax, self.tabulation_grid_shape_index
244
+ )
245
+ self.tabulated_z_range = self.fortran_core.delhommeau_integrals.default_z_spacing(
246
+ tabulation_nz, tabulation_zmin, self.tabulation_grid_shape_index
247
+ )
248
+ self.tabulated_integrals = self.fortran_core.delhommeau_integrals.construct_tabulation(
249
+ self.tabulated_r_range, self.tabulated_z_range, tabulation_nb_integration_points,
250
+ )
251
+
252
+ @property
253
+ def all_tabulation_parameters(self):
254
+ """An alias meant to pass to the Fortran functions all the parameters controlling the tabulation in a single item."""
255
+ return (self.tabulation_nb_integration_points, self.tabulation_grid_shape_index,
256
+ self.tabulated_r_range, self.tabulated_z_range, self.tabulated_integrals)
257
+
258
+ @lru_cache(maxsize=128)
259
+ def find_best_exponential_decomposition(self, dimensionless_wavenumber, *, method=None):
260
+ """Compute the decomposition of a part of the finite water_depth Green function as a sum of exponential functions.
261
+
262
+ Two implementations are available: the legacy Fortran implementation from Nemoh and a newer one written in Python.
263
+ For some still unexplained reasons, the two implementations do not always give the exact same result.
264
+ Until the problem is better understood, the Fortran implementation is the default one, to ensure consistency with Nemoh.
265
+ The Fortran version is also significantly faster...
266
+
267
+ Results are cached.
268
+
269
+ Parameters
270
+ ----------
271
+ dimensionless_wavenumber: float
272
+ dimensionless wavenumber: :math:`kh`
273
+ method: str, optional
274
+ "python" or "fortran". If not provided, uses self.finite_depth_prony_decomposition_method.
275
+
276
+ Returns
277
+ -------
278
+ Tuple[np.ndarray, np.ndarray]
279
+ the amplitude and growth rates of the exponentials
280
+ """
281
+ kh = dimensionless_wavenumber
282
+
283
+ if method is None:
284
+ method = self.finite_depth_prony_decomposition_method
285
+
286
+ LOG.debug("\tCompute Prony decomposition in finite water_depth Green function "
287
+ "for dimensionless_wavenumber=%.2e", dimensionless_wavenumber)
288
+
289
+ if method.lower() == 'python':
290
+ if kh <= 0.1:
291
+ raise NotImplementedError(
292
+ f"{self} cannot evaluate finite depth Green function "
293
+ f"for kh<0.1 (kh={kh})"
294
+ )
295
+ elif kh < 1e5:
296
+ # The function that will be approximated.
297
+ sing_coef = (1 + np.tanh(kh))**2/(1 - np.tanh(kh)**2 + np.tanh(kh)/kh)
298
+ def ref_function(x):
299
+ """The function that should be approximated by a sum of exponentials."""
300
+ return ((x + kh*np.tanh(kh)) * np.exp(x))/(x*np.sinh(x) - kh*np.tanh(kh)*np.cosh(x)) - sing_coef/(x - kh) - 2
301
+ else:
302
+ # Asymptotic approximation of the function for large kh, including infinite frequency
303
+ def ref_function(x):
304
+ return -2/(1 + np.exp(-2*x)) + 2
305
+
306
+ try:
307
+ a, lamda = find_best_exponential_decomposition(ref_function, x_min=-0.1, x_max=20.0, n_exp_range=range(4, 31, 2), tol=1e-4)
308
+ return np.stack([lamda, a])
309
+ except PronyDecompositionFailure as e:
310
+ raise GreenFunctionEvaluationError(
311
+ f"{self} cannot evaluate finite depth Green function "
312
+ f"for kh={dimensionless_wavenumber}"
313
+ ) from e
314
+
315
+ elif method.lower() == 'fortran':
316
+ if kh > 1e5:
317
+ raise NotImplementedError("Fortran implementation of the Prony decomposition does not support infinite frequency")
318
+ omega2_h_over_g = kh*np.tanh(kh)
319
+ nexp, pr_d = self.fortran_core.old_prony_decomposition.lisc(omega2_h_over_g, kh)
320
+ return pr_d[0:2, :nexp]
321
+
322
+ else:
323
+ raise ValueError(f"Unrecognized name for the Prony decomposition method: {repr(method)}. Expected 'python' or 'fortran'.")
324
+
325
+ def evaluate_rankine_only(
326
+ self,
327
+ mesh1, mesh2, *,
328
+ adjoint_double_layer=True, early_dot_product=True,
329
+ diagonal_term_in_double_layer=True,
330
+ ):
331
+ r"""Construct the matrices between mesh1 (that can also be a list of points)
332
+ and mesh2 for a Rankine kernel.
333
+
334
+ Parameters
335
+ ----------
336
+ mesh1: MeshLike or list of points
337
+ mesh of the receiving body (where the potential is measured)
338
+ if only S is wanted or early_dot_product is False, then only a list
339
+ of points as an array of shape (n, 3) can be passed.
340
+ mesh2: MeshLike
341
+ mesh of the source body (over which the source distribution is integrated)
342
+ adjoint_double_layer: bool, optional
343
+ compute double layer for direct method (F) or adjoint double layer
344
+ for indirect method (T) matrices (default: True)
345
+ early_dot_product: boolean, optional
346
+ if False, return K as a (n, m, 3) array storing ∫∇G
347
+ if True, return K as a (n, m) array storing ∫∇G·n
348
+ diagonal_term_in_double_layer: boolean, optional
349
+ if True, add the I/2 term in the double layer operator
350
+ It is assumed that mesh1 == mesh2, or at least that
351
+ the `n := min(mesh1.nb_faces, mesh2.nb_faces)` first faces
352
+ of each mesh are identical.
353
+
354
+ Returns
355
+ -------
356
+ tuple of real-valued numpy arrays
357
+ the matrices :math:`S` and :math:`K`
358
+ """
359
+ collocation_points, early_dot_product_normals = \
360
+ self._get_colocation_points_and_normals(mesh1, mesh2, adjoint_double_layer)
361
+
362
+ S, K = self._init_matrices(
363
+ (collocation_points.shape[0], mesh2.nb_faces), early_dot_product
364
+ )
365
+
366
+ self.fortran_core.matrices.add_rankine_term_only(
367
+ collocation_points, early_dot_product_normals,
368
+ mesh2.vertices, mesh2.faces + 1,
369
+ mesh2.faces_centers, mesh2.faces_normals,
370
+ mesh2.faces_areas, mesh2.faces_radiuses,
371
+ *mesh2.quadrature_points,
372
+ adjoint_double_layer,
373
+ S, K)
374
+
375
+ if diagonal_term_in_double_layer:
376
+ n = min(K.shape[0], K.shape[1])
377
+ self.fortran_core.matrices.add_diagonal_term(
378
+ mesh2.faces_centers[:n, :], early_dot_product_normals, np.inf, K,
379
+ )
380
+
381
+ S, K = np.real(S), np.real(K)
382
+
383
+ if np.any(np.isnan(S)) or np.any(np.isnan(K)):
384
+ raise GreenFunctionEvaluationError(
385
+ "Green function returned a NaN in the interaction matrix.\n"
386
+ "It could be due to overlapping panels.")
387
+
388
+ if early_dot_product:
389
+ K = K.reshape((collocation_points.shape[0], mesh2.nb_faces))
390
+
391
+ return S, K
392
+
393
+
394
+ def evaluate(
395
+ self, mesh1, mesh2, *,
396
+ free_surface=0.0, water_depth=np.inf, wavenumber=1.0,
397
+ adjoint_double_layer=True, early_dot_product=True,
398
+ diagonal_term_in_double_layer=True,
399
+ ):
400
+ r"""The main method of the class, called by the engine to assemble the influence matrices.
401
+
402
+ Parameters
403
+ ----------
404
+ mesh1: MeshLike or list of points
405
+ mesh of the receiving body (where the potential is measured)
406
+ if only S is wanted or early_dot_product is False, then only a list of points as an array of shape (n, 3) can be passed.
407
+ mesh2: MeshLike
408
+ mesh of the source body (over which the source distribution is integrated)
409
+ free_surface: float, optional
410
+ position of the free surface (default: :math:`z = 0`)
411
+ water_depth: float, optional
412
+ constant depth of water (default: :math:`+\infty`)
413
+ wavenumber: float, optional
414
+ wavenumber (default: 1.0)
415
+ adjoint_double_layer: bool, optional
416
+ compute double layer for direct method (F) or adjoint double layer for indirect method (T) matrices (default: True)
417
+ early_dot_product: boolean, optional
418
+ if False, return K as a (n, m, 3) array storing ∫∇G
419
+ if True, return K as a (n, m) array storing ∫∇G·n
420
+ diagonal_term_in_double_layer: boolean, optional
421
+ if True, add the I/2 term in the double layer operator.
422
+ It is assumed that mesh1 == mesh2, or at least that
423
+ the `n := min(mesh1.nb_faces, mesh2.nb_faces)` first faces
424
+ of each mesh are identical.
425
+
426
+ Returns
427
+ -------
428
+ tuple of numpy arrays
429
+ the matrices :math:`S` and :math:`K`
430
+ the dtype of the matrix can be real or complex and depends on self.floating_point_precision
431
+ """
432
+
433
+ if free_surface == np.inf: # No free surface, only a single Rankine source term
434
+ if water_depth != np.inf:
435
+ raise ValueError("When setting free_surface=inf, "
436
+ "the water depth should also be infinite "
437
+ f"(got water_depth={water_depth})")
438
+
439
+ return self.evaluate_rankine_only(
440
+ mesh1, mesh2,
441
+ adjoint_double_layer=adjoint_double_layer,
442
+ early_dot_product=early_dot_product,
443
+ )
444
+
445
+ # Main case:
446
+ collocation_points, early_dot_product_normals = \
447
+ self._get_colocation_points_and_normals(mesh1, mesh2, adjoint_double_layer)
448
+
449
+ S, K = self._init_matrices(
450
+ (collocation_points.shape[0], mesh2.nb_faces), early_dot_product
451
+ )
452
+
453
+ wavenumber = float(wavenumber)
454
+
455
+ # Overrides gf_singularities setting in some specific cases, else use the class one.
456
+ if water_depth < np.inf and self.finite_depth_method == 'legacy' and not self.gf_singularities == 'low_freq':
457
+ gf_singularities = "low_freq" # Reproduce legacy method behavior
458
+ LOG.debug(
459
+ f"Overriding gf_singularities='{self.gf_singularities}' because of finite_depth_method=='legacy'"
460
+ )
461
+ elif wavenumber == 0.0 and not self.gf_singularities == 'low_freq':
462
+ gf_singularities = "low_freq"
463
+ LOG.debug(
464
+ f"Overriding gf_singularities='{self.gf_singularities}' because of wavenumber==0.0"
465
+ )
466
+ elif wavenumber == np.inf and not self.gf_singularities == 'high_freq':
467
+ gf_singularities = "high_freq"
468
+ LOG.debug(
469
+ f"Overriding gf_singularities='{self.gf_singularities}' because of wavenumber==np.inf"
470
+ )
471
+ elif np.any(abs(mesh2.faces_centers[:, 2]) < 1e-6) and not self.gf_singularities == 'low_freq':
472
+ gf_singularities = "low_freq"
473
+ LOG.warning(
474
+ f"Overriding gf_singularities='{self.gf_singularities}' because of free surface panels, "
475
+ "which are currently only supported by gf_singularities='low_freq'"
476
+ )
477
+ else:
478
+ gf_singularities = self.gf_singularities
479
+ gf_singularities_index = self.gf_singularities_fortran_enum[gf_singularities]
480
+
481
+ if water_depth == np.inf:
482
+ prony_decomposition = np.zeros((1, 1)) # Dummy array that won't actually be used by the fortran code.
483
+ else:
484
+ prony_decomposition = self.find_best_exponential_decomposition(wavenumber*water_depth)
485
+
486
+ # Main call to Fortran code
487
+ self.fortran_core.matrices.build_matrices(
488
+ collocation_points, early_dot_product_normals,
489
+ mesh2.vertices, mesh2.faces + 1,
490
+ mesh2.faces_centers, mesh2.faces_normals,
491
+ mesh2.faces_areas, mesh2.faces_radiuses,
492
+ *mesh2.quadrature_points,
493
+ wavenumber, water_depth,
494
+ *self.all_tabulation_parameters,
495
+ self.finite_depth_method_index, prony_decomposition, self.dispersion_relation_roots,
496
+ gf_singularities_index, adjoint_double_layer,
497
+ S, K
498
+ )
499
+
500
+ if diagonal_term_in_double_layer:
501
+ n = min(K.shape[0], K.shape[1])
502
+ self.fortran_core.matrices.add_diagonal_term(
503
+ mesh2.faces_centers[:n, :], early_dot_product_normals, free_surface, K,
504
+ )
505
+
506
+ if np.any(np.isnan(S)) or np.any(np.isnan(K)):
507
+ raise GreenFunctionEvaluationError(
508
+ "Green function returned a NaN in the interaction matrix.\n"
509
+ "It could be due to overlapping panels.")
510
+
511
+ if early_dot_product:
512
+ K = K.reshape((collocation_points.shape[0], mesh2.nb_faces))
513
+
514
+ return S, K
515
+
516
+ ################################
517
+
518
+ class XieDelhommeau(Delhommeau):
519
+ """Legacy way to call the gf_singularities="low_freq" variant."""
520
+
521
+ def __init__(self, **kwargs):
522
+ super().__init__(gf_singularities="low_freq", **kwargs)