capytaine 2.2.1__cp311-cp311-macosx_14_0_arm64.whl → 2.3__cp311-cp311-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. capytaine/.dylibs/libgcc_s.1.1.dylib +0 -0
  2. capytaine/.dylibs/libgfortran.5.dylib +0 -0
  3. capytaine/.dylibs/libquadmath.0.dylib +0 -0
  4. capytaine/__about__.py +1 -1
  5. capytaine/__init__.py +2 -1
  6. capytaine/bem/airy_waves.py +7 -2
  7. capytaine/bem/problems_and_results.py +78 -34
  8. capytaine/bem/solver.py +127 -39
  9. capytaine/bodies/bodies.py +30 -10
  10. capytaine/bodies/predefined/rectangles.py +2 -0
  11. capytaine/green_functions/FinGreen3D/.gitignore +1 -0
  12. capytaine/green_functions/FinGreen3D/FinGreen3D.f90 +3589 -0
  13. capytaine/green_functions/FinGreen3D/LICENSE +165 -0
  14. capytaine/green_functions/FinGreen3D/Makefile +16 -0
  15. capytaine/green_functions/FinGreen3D/README.md +24 -0
  16. capytaine/green_functions/FinGreen3D/test_program.f90 +39 -0
  17. capytaine/green_functions/LiangWuNoblesse/.gitignore +1 -0
  18. capytaine/green_functions/LiangWuNoblesse/LICENSE +504 -0
  19. capytaine/green_functions/LiangWuNoblesse/LiangWuNoblesseWaveTerm.f90 +751 -0
  20. capytaine/green_functions/LiangWuNoblesse/Makefile +18 -0
  21. capytaine/green_functions/LiangWuNoblesse/README.md +2 -0
  22. capytaine/green_functions/LiangWuNoblesse/test_program.f90 +28 -0
  23. capytaine/green_functions/abstract_green_function.py +55 -3
  24. capytaine/green_functions/delhommeau.py +186 -115
  25. capytaine/green_functions/hams.py +204 -0
  26. capytaine/green_functions/libs/Delhommeau_float32.cpython-311-darwin.so +0 -0
  27. capytaine/green_functions/libs/Delhommeau_float64.cpython-311-darwin.so +0 -0
  28. capytaine/io/bemio.py +14 -2
  29. capytaine/io/mesh_loaders.py +1 -1
  30. capytaine/io/wamit.py +479 -0
  31. capytaine/io/xarray.py +257 -113
  32. capytaine/matrices/linear_solvers.py +1 -1
  33. capytaine/meshes/clipper.py +1 -0
  34. capytaine/meshes/collections.py +11 -1
  35. capytaine/meshes/mesh_like_protocol.py +37 -0
  36. capytaine/meshes/meshes.py +17 -6
  37. capytaine/meshes/symmetric.py +11 -2
  38. capytaine/post_pro/kochin.py +4 -4
  39. capytaine/tools/lists_of_points.py +3 -3
  40. capytaine/tools/prony_decomposition.py +60 -4
  41. capytaine/tools/symbolic_multiplication.py +12 -0
  42. capytaine/tools/timer.py +64 -0
  43. {capytaine-2.2.1.dist-info → capytaine-2.3.dist-info}/METADATA +9 -2
  44. capytaine-2.3.dist-info/RECORD +92 -0
  45. capytaine-2.2.1.dist-info/RECORD +0 -76
  46. {capytaine-2.2.1.dist-info → capytaine-2.3.dist-info}/LICENSE +0 -0
  47. {capytaine-2.2.1.dist-info → capytaine-2.3.dist-info}/WHEEL +0 -0
  48. {capytaine-2.2.1.dist-info → capytaine-2.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,18 @@
1
+ BUILD_DIR=build
2
+
3
+ run_test: $(BUILD_DIR)/test_program
4
+ $(BUILD_DIR)/test_program
5
+
6
+ $(BUILD_DIR)/test_program: test_program.f90 $(BUILD_DIR)/LiangWuNoblesseWaveTerm.o
7
+ mkdir -p $(BUILD_DIR)
8
+ gfortran -fopenmp $^ -o $@ -J$(BUILD_DIR)
9
+
10
+ $(BUILD_DIR)/LiangWuNoblesseWaveTerm.o: LiangWuNoblesseWaveTerm.f90
11
+ mkdir -p $(BUILD_DIR)
12
+ gfortran -c $< -o $@ -J$(BUILD_DIR)
13
+
14
+ clean:
15
+ rm -rf $(BUILD_DIR)
16
+ .PHONY: run_test clean
17
+
18
+
@@ -0,0 +1,2 @@
1
+ # Green-function-in-deep-water
2
+ Fortran routine for the evaluation of the Green function in deep water
@@ -0,0 +1,28 @@
1
+ ! Just test that the library compiles and run
2
+
3
+ program test
4
+ use LiangWuNoblesseWaveTerm, only: HavelockGF
5
+ implicit none
6
+
7
+ integer, parameter :: n = 10
8
+
9
+ real(8), dimension(n) :: r, z
10
+ complex(8), dimension(n) :: gf, gf_r
11
+ integer :: i
12
+
13
+ do i = 1, n
14
+ r(i) = real(i, kind=8)/n
15
+ end do
16
+
17
+ do i = 1, n
18
+ z(i) = -real(i, kind=8)/n
19
+ end do
20
+
21
+ !$OMP PARALLEL DO PRIVATE(i)
22
+ do i = 1, n
23
+ call HavelockGF(r(i), z(i), gf(i), gf_r(i))
24
+ end do
25
+
26
+ print*, real(gf(:))
27
+
28
+ end program test
@@ -1,12 +1,64 @@
1
1
  """Abstract structure of a class used to compute the Green function"""
2
- # Copyright (C) 2017-2019 Matthieu Ancellin
3
- # See LICENSE file at <https://github.com/mancellin/capytaine>
2
+ # Copyright (C) 2017-2024 Matthieu Ancellin
3
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
4
4
 
5
5
  from abc import ABC, abstractmethod
6
6
 
7
+ import numpy as np
8
+
9
+ from capytaine.meshes.mesh_like_protocol import MeshLike
10
+
11
+
12
+ class GreenFunctionEvaluationError(Exception):
13
+ pass
14
+
15
+
7
16
  class AbstractGreenFunction(ABC):
8
17
  """Abstract method to evaluate the Green function."""
9
18
 
19
+ floating_point_precision: str
20
+
21
+ def _get_colocation_points_and_normals(self, mesh1, mesh2, adjoint_double_layer):
22
+ if isinstance(mesh1, MeshLike):
23
+ collocation_points = mesh1.faces_centers
24
+ nb_collocation_points = mesh1.nb_faces
25
+ if not adjoint_double_layer: # Computing the D matrix
26
+ early_dot_product_normals = mesh2.faces_normals
27
+ else: # Computing the K matrix
28
+ early_dot_product_normals = mesh1.faces_normals
29
+
30
+ elif isinstance(mesh1, np.ndarray) and mesh1.ndim == 2 and mesh1.shape[1] == 3:
31
+ # This is used when computing potential or velocity at given points in postprocessing
32
+ collocation_points = mesh1
33
+ nb_collocation_points = mesh1.shape[0]
34
+ if not adjoint_double_layer: # Computing the D matrix
35
+ early_dot_product_normals = mesh2.faces_normals
36
+ else: # Computing the K matrix
37
+ early_dot_product_normals = np.zeros((nb_collocation_points, 3))
38
+ # Dummy argument since this method is meant to be used either
39
+ # - to compute potential, then only S is needed and early_dot_product_normals is irrelevant,
40
+ # - to compute velocity, then the adjoint full gradient is needed and early_dot_product is False and this value is unused.
41
+ # TODO: add an only_S argument and return an error here if (early_dot_product and not only_S)
42
+
43
+ else:
44
+ raise ValueError(f"Unrecognized first input for {self.__class__.__name__}.evaluate:\n{mesh1}")
45
+
46
+ return collocation_points, early_dot_product_normals
47
+
48
+ def _init_matrices(self, shape, early_dot_product):
49
+ if self.floating_point_precision == "float32":
50
+ dtype = "complex64"
51
+ elif self.floating_point_precision == "float64":
52
+ dtype = "complex128"
53
+ else:
54
+ raise NotImplementedError(
55
+ f"Unsupported floating point precision: {self.floating_point_precision}"
56
+ )
57
+
58
+ S = np.zeros(shape, order="F", dtype=dtype)
59
+ K = np.zeros((shape[0], shape[1], 1 if early_dot_product else 3), order="F", dtype=dtype)
60
+ return S, K
61
+
10
62
  @abstractmethod
11
- def evaluate(self, mesh1, mesh2, free_surface, sea_bottom, wavenumber):
63
+ def evaluate(self, mesh1, mesh2, free_surface, water_depth, wavenumber, adjoint_double_layer=True, early_dot_product=True):
12
64
  pass
@@ -9,12 +9,10 @@ from importlib import import_module
9
9
 
10
10
  import numpy as np
11
11
 
12
- from capytaine.meshes.meshes import Mesh
13
- from capytaine.meshes.collections import CollectionOfMeshes
14
- from capytaine.tools.prony_decomposition import exponential_decomposition, error_exponential_decomposition
12
+ from capytaine.tools.prony_decomposition import find_best_exponential_decomposition, PronyDecompositionFailure
15
13
  from capytaine.tools.cache_on_disk import cache_directory
16
14
 
17
- from capytaine.green_functions.abstract_green_function import AbstractGreenFunction
15
+ from capytaine.green_functions.abstract_green_function import AbstractGreenFunction, GreenFunctionEvaluationError
18
16
 
19
17
  LOG = logging.getLogger(__name__)
20
18
 
@@ -25,7 +23,8 @@ _default_parameters = dict(
25
23
  tabulation_zmin=-251.0,
26
24
  tabulation_nb_integration_points=1001,
27
25
  tabulation_grid_shape="scaled_nemoh3",
28
- finite_depth_prony_decomposition_method="fortran",
26
+ finite_depth_method="newer",
27
+ finite_depth_prony_decomposition_method="python",
29
28
  floating_point_precision="float64",
30
29
  gf_singularities="low_freq",
31
30
  )
@@ -66,6 +65,8 @@ class Delhommeau(AbstractGreenFunction):
66
65
  Default: calls capytaine.tools.cache_on_disk.cache_directory(), which
67
66
  returns the value of the environment variable CAPYTAINE_CACHE_DIR if
68
67
  set, or else the default cache directory on your system.
68
+ finite_depth_method: string, optional
69
+ The method used to compute the finite depth Green function.
69
70
  finite_depth_prony_decomposition_method: string, optional
70
71
  The implementation of the Prony decomposition used to compute the
71
72
  finite water_depth Green function. Accepted values: :code:`'fortran'`
@@ -88,7 +89,9 @@ class Delhommeau(AbstractGreenFunction):
88
89
  Compiled Fortran module with functions used to compute the Green
89
90
  function.
90
91
  tabulation_grid_shape_index: int
91
- Integer passed to Fortran code to describe which method is used.
92
+ gf_singularities_index: int
93
+ finite_depth_method_index: int
94
+ Integers passed to Fortran code to describe which method is used.
92
95
  tabulated_r_range: numpy.array of shape (tabulation_nr,) and type floating_point_precision
93
96
  tabulated_z_range: numpy.array of shape (tabulation_nz,) and type floating_point_precision
94
97
  Coordinates of the tabulation points.
@@ -96,6 +99,7 @@ class Delhommeau(AbstractGreenFunction):
96
99
  Tabulated Delhommeau integrals.
97
100
  """
98
101
 
102
+ dispersion_relation_roots = np.empty(1) # dummy array
99
103
 
100
104
 
101
105
  def __init__(self, *,
@@ -106,6 +110,7 @@ class Delhommeau(AbstractGreenFunction):
106
110
  tabulation_nb_integration_points=_default_parameters["tabulation_nb_integration_points"],
107
111
  tabulation_grid_shape=_default_parameters["tabulation_grid_shape"],
108
112
  tabulation_cache_dir=cache_directory(),
113
+ finite_depth_method=_default_parameters["finite_depth_method"],
109
114
  finite_depth_prony_decomposition_method=_default_parameters["finite_depth_prony_decomposition_method"],
110
115
  floating_point_precision=_default_parameters["floating_point_precision"],
111
116
  gf_singularities=_default_parameters["gf_singularities"],
@@ -121,12 +126,18 @@ class Delhommeau(AbstractGreenFunction):
121
126
  self.tabulation_grid_shape_index = fortran_enum[tabulation_grid_shape]
122
127
 
123
128
  self.gf_singularities = gf_singularities
124
- fortran_enum = {
129
+ self.gf_singularities_fortran_enum = {
125
130
  'high_freq': self.fortran_core.constants.high_freq,
126
131
  'low_freq': self.fortran_core.constants.low_freq,
127
132
  'low_freq_with_rankine_part': self.fortran_core.constants.low_freq_with_rankine_part,
133
+ }
134
+
135
+ self.finite_depth_method = finite_depth_method
136
+ fortran_enum = {
137
+ 'legacy': self.fortran_core.constants.legacy_finite_depth,
138
+ 'newer': self.fortran_core.constants.newer_finite_depth,
128
139
  }
129
- self.gf_singularities_index = fortran_enum[gf_singularities]
140
+ self.finite_depth_method_index = fortran_enum[finite_depth_method]
130
141
 
131
142
  self.floating_point_precision = floating_point_precision
132
143
  self.tabulation_nb_integration_points = tabulation_nb_integration_points
@@ -152,6 +163,7 @@ class Delhommeau(AbstractGreenFunction):
152
163
  'tabulation_zmin': tabulation_zmin,
153
164
  'tabulation_nb_integration_points': tabulation_nb_integration_points,
154
165
  'tabulation_grid_shape': tabulation_grid_shape,
166
+ 'finite_depth_method': finite_depth_method,
155
167
  'finite_depth_prony_decomposition_method': finite_depth_prony_decomposition_method,
156
168
  'floating_point_precision': floating_point_precision,
157
169
  'gf_singularities': gf_singularities,
@@ -233,8 +245,14 @@ class Delhommeau(AbstractGreenFunction):
233
245
  self.tabulated_r_range, self.tabulated_z_range, tabulation_nb_integration_points,
234
246
  )
235
247
 
248
+ @property
249
+ def all_tabulation_parameters(self):
250
+ """An alias meant to pass to the Fortran functions all the parameters controlling the tabulation in a single item."""
251
+ return (self.tabulation_nb_integration_points, self.tabulation_grid_shape_index,
252
+ self.tabulated_r_range, self.tabulated_z_range, self.tabulated_integrals)
253
+
236
254
  @lru_cache(maxsize=128)
237
- def find_best_exponential_decomposition(self, dimensionless_omega, dimensionless_wavenumber):
255
+ def find_best_exponential_decomposition(self, dimensionless_wavenumber, *, method=None):
238
256
  """Compute the decomposition of a part of the finite water_depth Green function as a sum of exponential functions.
239
257
 
240
258
  Two implementations are available: the legacy Fortran implementation from Nemoh and a newer one written in Python.
@@ -246,70 +264,134 @@ class Delhommeau(AbstractGreenFunction):
246
264
 
247
265
  Parameters
248
266
  ----------
249
- dimensionless_omega: float
250
- dimensionless angular frequency: :math:`kh \\tanh (kh) = \\omega^2 h/g`
251
267
  dimensionless_wavenumber: float
252
268
  dimensionless wavenumber: :math:`kh`
253
- method: string, optional
254
- the implementation that should be used to compute the Prony decomposition
269
+ method: str, optional
270
+ "python" or "fortran". If not provided, uses self.finite_depth_prony_decomposition_method.
255
271
 
256
272
  Returns
257
273
  -------
258
274
  Tuple[np.ndarray, np.ndarray]
259
275
  the amplitude and growth rates of the exponentials
260
276
  """
277
+ kh = dimensionless_wavenumber
278
+
279
+ if method is None:
280
+ method = self.finite_depth_prony_decomposition_method
261
281
 
262
282
  LOG.debug("\tCompute Prony decomposition in finite water_depth Green function "
263
- "for dimless_omega=%.2e and dimless_wavenumber=%.2e",
264
- dimensionless_omega, dimensionless_wavenumber)
283
+ "for dimensionless_wavenumber=%.2e", dimensionless_wavenumber)
284
+
285
+ if method.lower() == 'python':
286
+ if kh <= 0.1:
287
+ raise NotImplementedError(
288
+ f"{self} cannot evaluate finite depth Green function "
289
+ f"for kh<0.1 (kh={kh})"
290
+ )
291
+ elif kh < 1e5:
292
+ # The function that will be approximated.
293
+ sing_coef = (1 + np.tanh(kh))**2/(1 - np.tanh(kh)**2 + np.tanh(kh)/kh)
294
+ def ref_function(x):
295
+ """The function that should be approximated by a sum of exponentials."""
296
+ return ((x + kh*np.tanh(kh)) * np.exp(x))/(x*np.sinh(x) - kh*np.tanh(kh)*np.cosh(x)) - sing_coef/(x - kh) - 2
297
+ else:
298
+ # Asymptotic approximation of the function for large kh, including infinite frequency
299
+ def ref_function(x):
300
+ return -2/(1 + np.exp(-2*x)) + 2
301
+
302
+ try:
303
+ a, lamda = find_best_exponential_decomposition(ref_function, x_min=-0.1, x_max=20.0, n_exp_range=range(4, 31, 2), tol=1e-4)
304
+ return np.stack([lamda, a])
305
+ except PronyDecompositionFailure as e:
306
+ raise GreenFunctionEvaluationError(
307
+ f"{self} cannot evaluate finite depth Green function "
308
+ f"for kh={dimensionless_wavenumber}"
309
+ ) from e
310
+
311
+ elif method.lower() == 'fortran':
312
+ if kh > 1e5:
313
+ raise NotImplementedError("Fortran implementation of the Prony decomposition does not support infinite frequency")
314
+ omega2_h_over_g = kh*np.tanh(kh)
315
+ nexp, pr_d = self.fortran_core.old_prony_decomposition.lisc(omega2_h_over_g, kh)
316
+ return pr_d[0:2, :nexp]
265
317
 
266
- if self.finite_depth_prony_decomposition_method.lower() == 'python':
267
- # The function that will be approximated.
268
- @np.vectorize
269
- def f(x):
270
- return self.fortran_core.initialize_green_wave.ff(x, dimensionless_omega, dimensionless_wavenumber)
318
+ else:
319
+ raise ValueError(f"Unrecognized name for the Prony decomposition method: {repr(method)}. Expected 'python' or 'fortran'.")
271
320
 
272
- # Try different increasing number of exponentials
273
- for n_exp in range(4, 31, 2):
321
+ def evaluate_rankine_only(self,
322
+ mesh1, mesh2,
323
+ adjoint_double_layer=True, early_dot_product=True
324
+ ):
325
+ r"""Construct the matrices between mesh1 (that can also be a list of points)
326
+ and mesh2 for a Rankine kernel.
274
327
 
275
- # The coefficients are computed on a resolution of 4*n_exp+1 ...
276
- X = np.linspace(-0.1, 20.0, 4*n_exp+1)
277
- a, lamda = exponential_decomposition(X, f(X), n_exp)
328
+ Parameters
329
+ ----------
330
+ mesh1: Mesh or CollectionOfMeshes or list of points
331
+ mesh of the receiving body (where the potential is measured)
332
+ if only S is wanted or early_dot_product is False, then only a list
333
+ of points as an array of shape (n, 3) can be passed.
334
+ mesh2: Mesh or CollectionOfMeshes
335
+ mesh of the source body (over which the source distribution is integrated)
336
+ adjoint_double_layer: bool, optional
337
+ compute double layer for direct method (F) or adjoint double layer
338
+ for indirect method (T) matrices (default: True)
339
+ early_dot_product: boolean, optional
340
+ if False, return K as a (n, m, 3) array storing ∫∇G
341
+ if True, return K as a (n, m) array storing ∫∇G·n
278
342
 
279
- # ... and they are evaluated on a finer discretization.
280
- X = np.linspace(-0.1, 20.0, 8*n_exp+1)
281
- if error_exponential_decomposition(X, f(X), a, lamda) < 1e-4:
282
- break
343
+ Returns
344
+ -------
345
+ tuple of real-valued numpy arrays
346
+ the matrices :math:`S` and :math:`K`
347
+ """
348
+ collocation_points, early_dot_product_normals = \
349
+ self._get_colocation_points_and_normals(mesh1, mesh2, adjoint_double_layer)
283
350
 
284
- else:
285
- LOG.warning("No suitable exponential decomposition has been found"
286
- "for dimless_omega=%.2e and dimless_wavenumber=%.2e",
287
- dimensionless_omega, dimensionless_wavenumber)
351
+ S, K = self._init_matrices(
352
+ (collocation_points.shape[0], mesh2.nb_faces), early_dot_product
353
+ )
288
354
 
289
- elif self.finite_depth_prony_decomposition_method.lower() == 'fortran':
290
- lamda, a, nexp = self.fortran_core.old_prony_decomposition.lisc(dimensionless_omega, dimensionless_wavenumber)
291
- lamda = lamda[:nexp]
292
- a = a[:nexp]
355
+ self.fortran_core.matrices.add_rankine_term_only(
356
+ collocation_points, early_dot_product_normals,
357
+ mesh2.vertices, mesh2.faces + 1,
358
+ mesh2.faces_centers, mesh2.faces_normals,
359
+ mesh2.faces_areas, mesh2.faces_radiuses,
360
+ *mesh2.quadrature_points,
361
+ adjoint_double_layer,
362
+ S, K)
293
363
 
294
- else:
295
- raise ValueError("Unrecognized method name for the Prony decomposition.")
364
+ if mesh1 is mesh2:
365
+ self.fortran_core.matrices.add_diagonal_term(
366
+ mesh2.faces_centers, early_dot_product_normals, np.inf, K,
367
+ )
296
368
 
297
- # Add one more exponential function (actually a constant).
298
- # It is not clear where it comes from exactly in the theory...
299
- a = np.concatenate([a, np.array([2])])
300
- lamda = np.concatenate([lamda, np.array([0.0])])
369
+ S, K = np.real(S), np.real(K)
370
+
371
+ if np.any(np.isnan(S)) or np.any(np.isnan(K)):
372
+ raise GreenFunctionEvaluationError(
373
+ "Green function returned a NaN in the interaction matrix.\n"
374
+ "It could be due to overlapping panels.")
375
+
376
+ if early_dot_product:
377
+ K = K.reshape((collocation_points.shape[0], mesh2.nb_faces))
378
+
379
+ return S, K
301
380
 
302
- return a, lamda
303
381
 
304
- def evaluate(self, mesh1, mesh2, free_surface=0.0, water_depth=np.inf, wavenumber=1.0, adjoint_double_layer=True, early_dot_product=True):
382
+ def evaluate(self,
383
+ mesh1, mesh2,
384
+ free_surface=0.0, water_depth=np.inf, wavenumber=1.0,
385
+ adjoint_double_layer=True, early_dot_product=True
386
+ ):
305
387
  r"""The main method of the class, called by the engine to assemble the influence matrices.
306
388
 
307
389
  Parameters
308
390
  ----------
309
- mesh1: Mesh or CollectionOfMeshes or list of points
391
+ mesh1: MeshLike or list of points
310
392
  mesh of the receiving body (where the potential is measured)
311
393
  if only S is wanted or early_dot_product is False, then only a list of points as an array of shape (n, 3) can be passed.
312
- mesh2: Mesh or CollectionOfMeshes
394
+ mesh2: MeshLike
313
395
  mesh of the source body (over which the source distribution is integrated)
314
396
  free_surface: float, optional
315
397
  position of the free surface (default: :math:`z = 0`)
@@ -327,77 +409,61 @@ class Delhommeau(AbstractGreenFunction):
327
409
  -------
328
410
  tuple of numpy arrays
329
411
  the matrices :math:`S` and :math:`K`
412
+ the dtype of the matrix can be real or complex and depends on self.floating_point_precision
330
413
  """
331
414
 
332
- wavenumber = float(wavenumber)
333
-
334
- if free_surface == np.inf: # No free surface, only a single Rankine source term
335
-
336
- a_exp, lamda_exp = np.empty(1), np.empty(1) # Dummy arrays that won't actually be used by the fortran code.
415
+ if free_surface == np.inf: # No free surface, only a single Rankine source term
416
+ if water_depth != np.inf:
417
+ raise ValueError("When setting free_surface=inf, "
418
+ "the water depth should also be infinite "
419
+ f"(got water_depth={water_depth})")
337
420
 
338
- coeffs = np.array((1.0, 0.0, 0.0))
421
+ return self.evaluate_rankine_only(
422
+ mesh1, mesh2,
423
+ adjoint_double_layer=adjoint_double_layer,
424
+ early_dot_product=early_dot_product,
425
+ )
339
426
 
340
- elif water_depth == np.inf:
427
+ # Main case:
428
+ collocation_points, early_dot_product_normals = \
429
+ self._get_colocation_points_and_normals(mesh1, mesh2, adjoint_double_layer)
341
430
 
342
- a_exp, lamda_exp = np.empty(1), np.empty(1) # Idem
431
+ S, K = self._init_matrices(
432
+ (collocation_points.shape[0], mesh2.nb_faces), early_dot_product
433
+ )
343
434
 
344
- if wavenumber == 0.0:
345
- coeffs = np.array((1.0, 1.0, 0.0))
346
- elif wavenumber == np.inf:
347
- coeffs = np.array((1.0, -1.0, 0.0))
348
- else:
349
- if self.gf_singularities == "high_freq":
350
- coeffs = np.array((1.0, -1.0, 1.0))
351
- else: # low_freq or low_freq_with_rankine_part
352
- coeffs = np.array((1.0, 1.0, 1.0))
353
-
354
- else: # Finite water_depth
355
- if wavenumber == 0.0 or wavenumber == np.inf:
356
- raise NotImplementedError("Zero or infinite frequencies not implemented for finite depth.")
357
- else:
358
- a_exp, lamda_exp = self.find_best_exponential_decomposition(
359
- wavenumber*water_depth*np.tanh(wavenumber*water_depth),
360
- wavenumber*water_depth,
361
- )
362
- coeffs = np.array((1.0, 1.0, 1.0))
363
-
364
- if isinstance(mesh1, Mesh) or isinstance(mesh1, CollectionOfMeshes):
365
- collocation_points = mesh1.faces_centers
366
- nb_collocation_points = mesh1.nb_faces
367
- if not adjoint_double_layer: # Computing the D matrix
368
- early_dot_product_normals = mesh2.faces_normals
369
- else: # Computing the K matrix
370
- early_dot_product_normals = mesh1.faces_normals
371
-
372
- elif isinstance(mesh1, np.ndarray) and mesh1.ndim == 2 and mesh1.shape[1] == 3:
373
- # This is used when computing potential or velocity at given points in postprocessing
374
- collocation_points = mesh1
375
- nb_collocation_points = mesh1.shape[0]
376
- if not adjoint_double_layer: # Computing the D matrix
377
- early_dot_product_normals = mesh2.faces_normals
378
- else: # Computing the K matrix
379
- early_dot_product_normals = np.zeros((nb_collocation_points, 3))
380
- # Dummy argument since this method is meant to be used either
381
- # - to compute potential, then only S is needed and early_dot_product_normals is irrelevant,
382
- # - to compute velocity, then the adjoint full gradient is needed and early_dot_product is False and this value is unused.
383
- # TODO: add an only_S argument and return an error here if (early_dot_product and not only_S)
435
+ wavenumber = float(wavenumber)
384
436
 
437
+ # Overrides gf_singularities setting in some specific cases, else use the class one.
438
+ if water_depth < np.inf and self.finite_depth_method == 'legacy' and not self.gf_singularities == 'low_freq':
439
+ gf_singularities = "low_freq" # Reproduce legacy method behavior
440
+ LOG.debug(
441
+ f"Overriding gf_singularities='{self.gf_singularities}' because of finite_depth_method=='legacy'"
442
+ )
443
+ elif wavenumber == 0.0 and not self.gf_singularities == 'low_freq':
444
+ gf_singularities = "low_freq"
445
+ LOG.debug(
446
+ f"Overriding gf_singularities='{self.gf_singularities}' because of wavenumber==0.0"
447
+ )
448
+ elif wavenumber == np.inf and not self.gf_singularities == 'high_freq':
449
+ gf_singularities = "high_freq"
450
+ LOG.debug(
451
+ f"Overriding gf_singularities='{self.gf_singularities}' because of wavenumber==np.inf"
452
+ )
453
+ elif np.any(abs(mesh2.faces_centers[:, 2]) < 1e-6) and not self.gf_singularities == 'low_freq':
454
+ gf_singularities = "low_freq"
455
+ LOG.warning(
456
+ f"Overriding gf_singularities='{self.gf_singularities}' because of free surface panels, "
457
+ "which are currently only supported by gf_singularities='low_freq'"
458
+ )
385
459
  else:
386
- raise ValueError(f"Unrecognized first input for {self.__class__.__name__}.evaluate:\n{mesh1}")
460
+ gf_singularities = self.gf_singularities
461
+ gf_singularities_index = self.gf_singularities_fortran_enum[gf_singularities]
387
462
 
388
- if (np.any(abs(mesh2.faces_centers[:, 2]) < 1e-6) # free surface panel
389
- and self.gf_singularities != "low_freq"):
390
- raise NotImplementedError("Free surface panels are only supported for cpt.Delhommeau(..., gf_singularities='low_freq').")
391
-
392
- if self.floating_point_precision == "float32":
393
- dtype = "complex64"
394
- elif self.floating_point_precision == "float64":
395
- dtype = "complex128"
463
+ if water_depth == np.inf:
464
+ prony_decomposition = np.zeros((1, 1)) # Dummy array that won't actually be used by the fortran code.
396
465
  else:
397
- raise NotImplementedError(f"Unsupported floating point precision: {self.floating_point_precision}")
398
-
399
- S = np.empty((nb_collocation_points, mesh2.nb_faces), order="F", dtype=dtype)
400
- K = np.empty((nb_collocation_points, mesh2.nb_faces, 1 if early_dot_product else 3), order="F", dtype=dtype)
466
+ prony_decomposition = self.find_best_exponential_decomposition(wavenumber*water_depth)
401
467
 
402
468
  # Main call to Fortran code
403
469
  self.fortran_core.matrices.build_matrices(
@@ -407,19 +473,24 @@ class Delhommeau(AbstractGreenFunction):
407
473
  mesh2.faces_areas, mesh2.faces_radiuses,
408
474
  *mesh2.quadrature_points,
409
475
  wavenumber, water_depth,
410
- coeffs,
411
- self.tabulation_nb_integration_points, self.tabulation_grid_shape_index,
412
- self.tabulated_r_range, self.tabulated_z_range, self.tabulated_integrals,
413
- lamda_exp, a_exp,
414
- mesh1 is mesh2, self.gf_singularities_index, adjoint_double_layer,
476
+ *self.all_tabulation_parameters,
477
+ self.finite_depth_method_index, prony_decomposition, self.dispersion_relation_roots,
478
+ gf_singularities_index, adjoint_double_layer,
415
479
  S, K
416
480
  )
417
481
 
482
+ if mesh1 is mesh2:
483
+ self.fortran_core.matrices.add_diagonal_term(
484
+ mesh2.faces_centers, early_dot_product_normals, free_surface, K,
485
+ )
486
+
418
487
  if np.any(np.isnan(S)) or np.any(np.isnan(K)):
419
- raise RuntimeError("Green function returned a NaN in the interaction matrix.\n"
488
+ raise GreenFunctionEvaluationError(
489
+ "Green function returned a NaN in the interaction matrix.\n"
420
490
  "It could be due to overlapping panels.")
421
491
 
422
- if early_dot_product: K = K.reshape((nb_collocation_points, mesh2.nb_faces))
492
+ if early_dot_product:
493
+ K = K.reshape((collocation_points.shape[0], mesh2.nb_faces))
423
494
 
424
495
  return S, K
425
496