capytaine 2.3.1__cp314-cp314-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. capytaine/.dylibs/libgcc_s.1.1.dylib +0 -0
  2. capytaine/.dylibs/libgfortran.5.dylib +0 -0
  3. capytaine/.dylibs/libquadmath.0.dylib +0 -0
  4. capytaine/__about__.py +16 -0
  5. capytaine/__init__.py +36 -0
  6. capytaine/bem/__init__.py +0 -0
  7. capytaine/bem/airy_waves.py +111 -0
  8. capytaine/bem/engines.py +441 -0
  9. capytaine/bem/problems_and_results.py +600 -0
  10. capytaine/bem/solver.py +594 -0
  11. capytaine/bodies/__init__.py +4 -0
  12. capytaine/bodies/bodies.py +1221 -0
  13. capytaine/bodies/dofs.py +19 -0
  14. capytaine/bodies/predefined/__init__.py +6 -0
  15. capytaine/bodies/predefined/cylinders.py +151 -0
  16. capytaine/bodies/predefined/rectangles.py +111 -0
  17. capytaine/bodies/predefined/spheres.py +70 -0
  18. capytaine/green_functions/FinGreen3D/.gitignore +1 -0
  19. capytaine/green_functions/FinGreen3D/FinGreen3D.f90 +3589 -0
  20. capytaine/green_functions/FinGreen3D/LICENSE +165 -0
  21. capytaine/green_functions/FinGreen3D/Makefile +16 -0
  22. capytaine/green_functions/FinGreen3D/README.md +24 -0
  23. capytaine/green_functions/FinGreen3D/test_program.f90 +39 -0
  24. capytaine/green_functions/LiangWuNoblesse/.gitignore +1 -0
  25. capytaine/green_functions/LiangWuNoblesse/LICENSE +504 -0
  26. capytaine/green_functions/LiangWuNoblesse/LiangWuNoblesseWaveTerm.f90 +751 -0
  27. capytaine/green_functions/LiangWuNoblesse/Makefile +16 -0
  28. capytaine/green_functions/LiangWuNoblesse/README.md +2 -0
  29. capytaine/green_functions/LiangWuNoblesse/test_program.f90 +28 -0
  30. capytaine/green_functions/__init__.py +2 -0
  31. capytaine/green_functions/abstract_green_function.py +64 -0
  32. capytaine/green_functions/delhommeau.py +507 -0
  33. capytaine/green_functions/hams.py +204 -0
  34. capytaine/green_functions/libs/Delhommeau_float32.cpython-314-darwin.so +0 -0
  35. capytaine/green_functions/libs/Delhommeau_float64.cpython-314-darwin.so +0 -0
  36. capytaine/green_functions/libs/__init__.py +0 -0
  37. capytaine/io/__init__.py +0 -0
  38. capytaine/io/bemio.py +153 -0
  39. capytaine/io/legacy.py +328 -0
  40. capytaine/io/mesh_loaders.py +1086 -0
  41. capytaine/io/mesh_writers.py +692 -0
  42. capytaine/io/meshio.py +38 -0
  43. capytaine/io/wamit.py +479 -0
  44. capytaine/io/xarray.py +668 -0
  45. capytaine/matrices/__init__.py +16 -0
  46. capytaine/matrices/block.py +592 -0
  47. capytaine/matrices/block_toeplitz.py +325 -0
  48. capytaine/matrices/builders.py +89 -0
  49. capytaine/matrices/linear_solvers.py +232 -0
  50. capytaine/matrices/low_rank.py +395 -0
  51. capytaine/meshes/__init__.py +6 -0
  52. capytaine/meshes/clipper.py +465 -0
  53. capytaine/meshes/collections.py +342 -0
  54. capytaine/meshes/geometry.py +409 -0
  55. capytaine/meshes/mesh_like_protocol.py +37 -0
  56. capytaine/meshes/meshes.py +890 -0
  57. capytaine/meshes/predefined/__init__.py +6 -0
  58. capytaine/meshes/predefined/cylinders.py +314 -0
  59. capytaine/meshes/predefined/rectangles.py +261 -0
  60. capytaine/meshes/predefined/spheres.py +62 -0
  61. capytaine/meshes/properties.py +276 -0
  62. capytaine/meshes/quadratures.py +80 -0
  63. capytaine/meshes/quality.py +448 -0
  64. capytaine/meshes/surface_integrals.py +63 -0
  65. capytaine/meshes/symmetric.py +462 -0
  66. capytaine/post_pro/__init__.py +6 -0
  67. capytaine/post_pro/free_surfaces.py +88 -0
  68. capytaine/post_pro/impedance.py +92 -0
  69. capytaine/post_pro/kochin.py +54 -0
  70. capytaine/post_pro/rao.py +60 -0
  71. capytaine/tools/__init__.py +0 -0
  72. capytaine/tools/cache_on_disk.py +26 -0
  73. capytaine/tools/deprecation_handling.py +18 -0
  74. capytaine/tools/lists_of_points.py +52 -0
  75. capytaine/tools/lru_cache.py +49 -0
  76. capytaine/tools/optional_imports.py +27 -0
  77. capytaine/tools/prony_decomposition.py +150 -0
  78. capytaine/tools/symbolic_multiplication.py +149 -0
  79. capytaine/tools/timer.py +66 -0
  80. capytaine/ui/__init__.py +0 -0
  81. capytaine/ui/cli.py +28 -0
  82. capytaine/ui/rich.py +5 -0
  83. capytaine/ui/vtk/__init__.py +3 -0
  84. capytaine/ui/vtk/animation.py +329 -0
  85. capytaine/ui/vtk/body_viewer.py +28 -0
  86. capytaine/ui/vtk/helpers.py +82 -0
  87. capytaine/ui/vtk/mesh_viewer.py +461 -0
  88. capytaine-2.3.1.dist-info/LICENSE +674 -0
  89. capytaine-2.3.1.dist-info/METADATA +750 -0
  90. capytaine-2.3.1.dist-info/RECORD +92 -0
  91. capytaine-2.3.1.dist-info/WHEEL +6 -0
  92. capytaine-2.3.1.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,54 @@
1
+ """Computation of the Kochin function."""
2
+ # Copyright (C) 2017-2019 Matthieu Ancellin
3
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
4
+
5
+ import logging
6
+ import numpy as np
7
+
8
+ LOG = logging.getLogger(__name__)
9
+
10
+ def compute_kochin(result, theta, ref_point=(0.0, 0.0)):
11
+ """Compute the far field coefficient
12
+
13
+ Parameters
14
+ ----------
15
+ result: LinearPotentialFlowResult
16
+ solved potential flow problem
17
+ theta: float or 1-dim array of floats
18
+ angles at which the coefficient is computed
19
+ ref_point: couple of float, optional
20
+ point of reference around which the far field coefficient is computed
21
+
22
+ Returns
23
+ -------
24
+ H: same type as theta
25
+ values of the Kochin function
26
+ """
27
+
28
+ if result.forward_speed != 0.0:
29
+ LOG.warning("Kochin functions with forward speed have never been validated.")
30
+
31
+ if result.sources is None:
32
+ raise ValueError(f"""The values of the sources of {result} cannot been found.
33
+ They have not been stored by the solver either because the direct method has been used or the option keep_details=True have not been set.
34
+ Please re-run the resolution with `method='indirect'` and `keep_details=True`.""")
35
+
36
+ k = result.wavenumber
37
+ h = result.water_depth
38
+
39
+ # omega_bar.shape = (nb_faces, 2) @ (2, nb_theta)
40
+ omega_bar = (result.body.mesh.faces_centers[:, 0:2] - ref_point) @ (np.cos(theta), np.sin(theta))
41
+
42
+ if 0 <= k*h < 20:
43
+ cih = np.cosh(k*(result.body.mesh.faces_centers[:, 2]+h))/np.cosh(k*h)
44
+ else:
45
+ cih = np.exp(k*result.body.mesh.faces_centers[:, 2])
46
+
47
+ # cih.shape = (nb_faces,)
48
+ # omega_bar.T.shape = (nb_theta, nb_faces)
49
+ # result.body.mesh.faces_areas.shape = (nb_faces,)
50
+ zs = cih * np.exp(-1j * k * omega_bar.T) * result.body.mesh.faces_areas
51
+
52
+ # zs.shape = (nb_theta, nb_faces)
53
+ # result.sources.shape = (nb_faces,)
54
+ return zs @ result.sources/(4*np.pi)
@@ -0,0 +1,60 @@
1
+ """Experimental function to compute the Response Amplitude Operator."""
2
+ # Copyright (C) 2017-2019 Matthieu Ancellin
3
+ # See LICENSE file at <https://github.com/mancellin/capytaine>
4
+
5
+ import logging
6
+
7
+ import numpy as np
8
+ import xarray as xr
9
+ from capytaine.post_pro.impedance import rao_transfer_function
10
+
11
+ LOG = logging.getLogger(__name__)
12
+
13
+
14
+ def rao(dataset, wave_direction=None, dissipation=None, stiffness=None):
15
+ """Response Amplitude Operator.
16
+
17
+ Parameters
18
+ ----------
19
+ dataset: xarray Dataset
20
+ The hydrodynamical dataset.
21
+ This function supposes that variables named 'inertia_matrix' and 'hydrostatic_stiffness' are in the dataset.
22
+ Other variables can be computed by Capytaine, by those two should be manually added to the dataset.
23
+ wave_direction: float, optional
24
+ Select a wave directions for the computation. (Not recommended, kept for legacy.)
25
+ Default: all wave directions in the dataset.
26
+ dissipation: array, optional
27
+ An optional dissipation matrix (e.g. Power Take Off) to be included in the RAO.
28
+ Default: none.
29
+ stiffness: array, optional
30
+ An optional stiffness matrix (e.g. mooring stiffness) to be included in the RAO.
31
+ Default: none.
32
+
33
+ Returns
34
+ -------
35
+ xarray DataArray
36
+ The RAO as an array depending of omega and the degree of freedom.
37
+ """
38
+
39
+ # ASSEMBLE MATRICES
40
+ H = rao_transfer_function(dataset, dissipation, stiffness)
41
+ fex = dataset.excitation_force
42
+
43
+ LOG.info("Compute RAO.")
44
+
45
+ # SOLVE LINEAR SYSTEMS
46
+ # Match dimensions of the arrays to be sure to solve the right systems.
47
+ H, fex = xr.broadcast(H, fex, exclude=["radiating_dof", "influenced_dof"])
48
+ H = H.transpose(..., 'radiating_dof', 'influenced_dof')
49
+ fex = fex.transpose(..., 'influenced_dof')
50
+
51
+ if wave_direction is not None: # Legacy behavior for backward compatibility
52
+ H = H.sel(wave_direction=wave_direction)
53
+ fex = fex.sel(wave_direction=wave_direction)
54
+
55
+ # Solve and add coordinates
56
+ rao_dims = [d for d in H.dims if d != 'influenced_dof']
57
+ rao_coords = {c: H.coords[c] for c in H.coords if c != 'influenced_dof'}
58
+ rao = xr.DataArray(np.linalg.solve(H.values, fex.values[..., np.newaxis])[..., 0], coords=rao_coords, dims=rao_dims)
59
+
60
+ return rao
File without changes
@@ -0,0 +1,26 @@
1
+ """
2
+ Adapted from https://github.com/platformdirs/platformdirs (MIT Licensed)
3
+ """
4
+ import os
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ from capytaine import __version__
9
+
10
+
11
+ def cache_directory():
12
+ if "CAPYTAINE_CACHE_DIR" in os.environ:
13
+ path = os.path.join(os.environ["CAPYTAINE_CACHE_DIR"], __version__)
14
+ elif sys.platform == "win32": # Windows
15
+ path = os.path.normpath(os.environ.get("LOCALAPPDATA"))
16
+ path = os.path.join(path, "capytaine", "Cache", __version__)
17
+ elif sys.platform == "darwin": # MacOS
18
+ path = os.path.expanduser("~/Library/Caches")
19
+ path = os.path.join(path, "capytaine", __version__)
20
+ else:
21
+ path = os.environ.get("XDG_CACHE_HOME", "")
22
+ if path.strip() == "":
23
+ path = os.path.expanduser("~/.cache")
24
+ path = os.path.join(path, "capytaine", __version__)
25
+ Path(path).mkdir(parents=True, exist_ok=True)
26
+ return path
@@ -0,0 +1,18 @@
1
+ import logging
2
+
3
+ import numpy as np
4
+
5
+ LOG = logging.getLogger(__name__)
6
+
7
+ def _get_water_depth(free_surface, water_depth, sea_bottom, default_water_depth=np.inf):
8
+ if water_depth is None and sea_bottom is None:
9
+ return default_water_depth
10
+ elif water_depth is not None and sea_bottom is None:
11
+ if water_depth <= 0.0:
12
+ raise ValueError(f"`water_depth` should be strictly positive. Received value: {water_depth}")
13
+ return float(water_depth)
14
+ elif water_depth is None and sea_bottom is not None:
15
+ LOG.warning("To uniformize notations througouth Capytaine, setting `water_depth` is preferred to `sea_bottom` since version 2.0.")
16
+ return float(free_surface - sea_bottom)
17
+ else:
18
+ raise ValueError("Cannot give both a `water_depth` and a `sea_bottom`.")
@@ -0,0 +1,52 @@
1
+ import numpy as np
2
+ from capytaine.bodies import FloatingBody
3
+ from capytaine.post_pro.free_surfaces import FreeSurface
4
+ from capytaine.meshes.mesh_like_protocol import MeshLike
5
+
6
+
7
+ def _normalize_points(points, keep_mesh=False):
8
+ if isinstance(points, (FloatingBody, FreeSurface)):
9
+ if keep_mesh:
10
+ return points.mesh, (points.mesh.nb_faces,)
11
+ else:
12
+ return points.mesh.faces_centers, (points.mesh.nb_faces,)
13
+
14
+ if isinstance(points, MeshLike):
15
+ if keep_mesh:
16
+ return points, (points.nb_faces,)
17
+ else:
18
+ return points.faces_centers, (points.nb_faces,)
19
+
20
+ points = np.asarray(points)
21
+
22
+ if points.ndim == 1: # A single point has been provided
23
+ output_shape = (1,)
24
+ points = points.reshape((1, points.shape[0]))
25
+
26
+ elif points.ndim == 2:
27
+ output_shape = (points.shape[0],)
28
+
29
+ elif points.ndim > 2:
30
+ # `points` is expected to be the results of a meshgrid. Points has shape (d, nx, ny, ...)
31
+ output_shape = points.shape[1:]
32
+ points = points.reshape(points.shape[0], -1).transpose()
33
+ # points is now a (nx*ny*... , d) array
34
+
35
+ else:
36
+ raise ValueError(f"Expected a list of points or a mesh, but got instead: {points}")
37
+
38
+ return points, output_shape
39
+
40
+ def _normalize_free_surface_points(points, keep_mesh=False):
41
+ if keep_mesh and isinstance(points, (FloatingBody, FreeSurface)):
42
+ return points.mesh, (points.mesh.nb_faces,)
43
+
44
+ if keep_mesh and isinstance(points, MeshLike):
45
+ return points, (points.nb_faces,)
46
+
47
+ points, output_shape = _normalize_points(points, keep_mesh)
48
+
49
+ if points.ndim == 2 and points.shape[1] == 2: # Only x and y have been provided
50
+ points = np.concatenate([points, np.zeros((points.shape[0], 1))], axis=1)
51
+
52
+ return points, output_shape
@@ -0,0 +1,49 @@
1
+ # Copyright (C) 2017-2024 Matthieu Ancellin
2
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
3
+ """Tools for memoization of functions."""
4
+ from collections import OrderedDict
5
+ from functools import wraps
6
+
7
+ import logging
8
+
9
+ LOG = logging.getLogger(__name__)
10
+
11
+
12
+ def lru_cache_with_strict_maxsize(maxsize=1):
13
+ """Behaves mostly like functools.lru_cache(), but the oldest data in the cache is
14
+ deleted *before* computing a new one, in order to *never* have more that
15
+ `maxsize` items in memory.
16
+ This is useful to limit RAM usage when stored objects are big, like the interaction
17
+ matrices of Capytaine."""
18
+
19
+ def decorator(f):
20
+ cache = OrderedDict()
21
+
22
+ @wraps(f)
23
+ def decorated_f(*args, **kwargs):
24
+ hashable_kwargs = tuple((k, v) for (k, v) in kwargs.items())
25
+ # Might miss a cache hit if the order of kwargs is changed.
26
+ # But at least unlike a previous version, should not return a wrong value.
27
+
28
+ if (args, hashable_kwargs) in cache:
29
+ # Get item in cache
30
+ LOG.debug("Get cached version of %s(%s, %s)", f.__name__, args, hashable_kwargs)
31
+ return cache[(args, hashable_kwargs)]
32
+
33
+ if len(cache) + 1 > maxsize:
34
+ # Drop oldest item in cache.
35
+ cache.popitem(last=False)
36
+
37
+ # Compute and store
38
+ LOG.debug("Computing %s(%s, %s)", f.__name__, args, hashable_kwargs)
39
+ result = f(*args, **kwargs)
40
+ cache[(args, hashable_kwargs)] = result
41
+
42
+ return result
43
+
44
+ return decorated_f
45
+
46
+ return decorator
47
+
48
+
49
+ delete_first_lru_cache = lru_cache_with_strict_maxsize # For backward compatibility...
@@ -0,0 +1,27 @@
1
+ """Tool to import optional dependencies. Inspired by similar code in pandas."""
2
+
3
+ import importlib
4
+
5
+ def import_optional_dependency(module_name: str, package_name: str = None):
6
+ try:
7
+ module = importlib.import_module(module_name)
8
+ except ImportError:
9
+ if package_name is None:
10
+ package_name = module_name
11
+
12
+ message = (
13
+ f"Missing optional dependency '{module_name}'. "
14
+ f"Use pip or conda to install {package_name}."
15
+ )
16
+ raise ImportError(message) from None
17
+
18
+ return module
19
+
20
+ def silently_import_optional_dependency(module_name: str):
21
+ # Same as above, except it does not raise a exception when the module is not found.
22
+ # Instead, simply returns None.
23
+ try:
24
+ module = importlib.import_module(module_name)
25
+ except ImportError:
26
+ module = None
27
+ return module
@@ -0,0 +1,150 @@
1
+ """Prony decomposition: tool to approximate a function as a sum of exponentials.
2
+ Used in particular in the finite depth Green function.
3
+ """
4
+ # Copyright (C) 2017-2024 Matthieu Ancellin
5
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
6
+
7
+ import logging
8
+
9
+ import numpy as np
10
+ from numpy.polynomial import polynomial
11
+ from scipy.optimize import curve_fit
12
+ from scipy.linalg import toeplitz
13
+
14
+ LOG = logging.getLogger(__name__)
15
+ RNG = np.random.default_rng()
16
+
17
+
18
+ def exponential_decomposition(X, F, m):
19
+ """Use Prony's method to approximate the sampled real function F=f(X) as a sum of m
20
+ exponential functions x → Σ a_i exp(lamda_i x).
21
+
22
+ Parameters
23
+ ----------
24
+ X: 1D array
25
+ sampling points.
26
+ F: 1D array (same size as X)
27
+ values of the function to approximate at the points of x.
28
+ m: integer
29
+ number of exponential functions
30
+
31
+ Return
32
+ ------
33
+ a: 1D array (size m)
34
+ coefficients of the exponentials
35
+ lamda: 1D array (size m)
36
+ growth rate of the exponentials
37
+ """
38
+ assert X.shape == F.shape
39
+
40
+ # Compute the coefficients of the polynomials of Prony's method
41
+ A = toeplitz(c=F[m-1:-1], r=F[:m][::-1])
42
+ P, *_ = np.linalg.lstsq(A, F[m:], rcond=None)
43
+
44
+ # Build and solve polynomial function
45
+ coeffs = np.ones(m+1)
46
+ # coeffs[:m] = -P[::-1]
47
+ for i in range(m):
48
+ coeffs[m-i-1] = -P[i]
49
+ roots = polynomial.polyroots(coeffs)
50
+
51
+ # Discard values where log is undefined
52
+ roots = roots[np.logical_or(np.imag(roots) != 0.0, np.real(roots) >= 0.0)]
53
+
54
+ # Deduce lamda and keep only interesting values
55
+ lamda = np.real(np.log(roots)/(X[1] - X[0]))
56
+ lamda = np.unique(lamda)
57
+ lamda = lamda[np.logical_and(-20.0 < lamda, lamda < 0.0)]
58
+
59
+ # Fit the values of 'a' on the curve
60
+ def f(x, *ar):
61
+ ar = np.asarray(ar)[:, np.newaxis]
62
+ la = lamda[:, np.newaxis]
63
+ return np.sum(ar * np.exp(la * x), axis=0)
64
+ a, *_ = curve_fit(f, X, F, p0=np.zeros(lamda.shape))
65
+
66
+ return a, lamda
67
+
68
+
69
+ def error_exponential_decomposition(X, F, a, lamda):
70
+ """Mean square error of the exponential decomposition defined by the
71
+ coefficients a and lamda with respect to the reference values in F.
72
+
73
+ Parameters
74
+ ----------
75
+ X: 1D array
76
+ sampling points
77
+ F: 1D array (same size as X)
78
+ reference values
79
+ a: 1D array
80
+ coefficients of the exponentials
81
+ lamda: 1D array (same size as a)
82
+ growth rate of the exponentials
83
+
84
+ Returns
85
+ -------
86
+ error: float
87
+ mean square error of the decomposition
88
+ """
89
+ a = np.asarray(a)[:, np.newaxis]
90
+ lamda = np.asarray(lamda)[:, np.newaxis]
91
+
92
+ def f(x):
93
+ return np.sum(a * np.exp(lamda*x), axis=0)
94
+
95
+ return np.square(f(X) - F).mean()
96
+
97
+
98
+ class PronyDecompositionFailure(Exception):
99
+ pass
100
+
101
+
102
+ def find_best_exponential_decomposition(f, x_min, x_max, n_exp_range, *, tol=1e-4, noise_on_domain_points_std=0.01):
103
+ """Tries to construct an exponential decompositoin of the function f on the
104
+ domain [x_min, x_max] by testing the number of exponentials in n_exp_range.
105
+
106
+ Parameters
107
+ ----------
108
+ f: callable
109
+ The function ℝ→ℝ to be approximated.
110
+ Should support vectorized calls (that is passing a vector of inputs
111
+ and get the vector of corresponding outputs)
112
+ x_min, x_max: floats
113
+ The bounds of the domain of input in which f should be approximated
114
+ n_exp_range: iterable of ints
115
+ The decomposition sizes that will be tested
116
+ tol: float, optional
117
+ The target mean square error.
118
+ noise_on_domain_points_std: float, optional
119
+ Introduces some random variability on the points where the function is evaluated.
120
+ Set this parameter to zero to disable randomness.
121
+
122
+ """
123
+ # Try different range of evaluation points to construct the decomposition.
124
+ for n_exp in n_exp_range:
125
+
126
+ # f might be ill-defined at some single specific values
127
+ # (for the use-case of delhommeau.py, it is when x = kh exactly).
128
+ # Thus we slightly randomize the range of evaluation points for the Prony decomposition.
129
+ # This way, if one of the evaluation points hits the singular point, it will most likely not hit it again at the next iteration.
130
+ x_max_iter = (1 + noise_on_domain_points_std*RNG.uniform())*x_max
131
+
132
+ try:
133
+ # The coefficients are computed on a resolution of 4*n_exp+1 ...
134
+ X = np.linspace(x_min, x_max_iter, 4*n_exp+1)
135
+ a, lamda = exponential_decomposition(X, f(X), n_exp)
136
+
137
+ # ... and they are evaluated on a finer discretization.
138
+ X = np.linspace(x_min, x_max_iter, 8*n_exp+1)
139
+ if error_exponential_decomposition(X, f(X), a, lamda) < tol:
140
+ return a, lamda
141
+ except Exception:
142
+ # If something bad happened while computing the decomposition, try
143
+ # the next one.
144
+ continue
145
+
146
+ raise PronyDecompositionFailure(
147
+ "No suitable Prony decomposition has been found in "
148
+ f"[{x_min}, {x_max}] for tol={tol} "
149
+ f"using a number of terms in {n_exp_range}."
150
+ )
@@ -0,0 +1,149 @@
1
+ """This module is used for the handling of zero and infinite frequencies.
2
+ In this cases, the magnitudes that the solver has to manipulate are in the form of ω times a non-zero term.
3
+ Instead of evaluating this multiplication as zero of infinity, we keep it symbolic using the class defined here.
4
+
5
+ The frequency can be provided to the solver as something like
6
+ `SymbolicMultiplication("0", 1.0)` (that is zero) and the solver will return an
7
+ output of the form `SymbolicMultiplication("0", np.array(...))`
8
+ (that is also actually zero, except we may be intested in the non-zero array).
9
+ """
10
+
11
+ import numpy as np
12
+ from functools import wraps, total_ordering
13
+
14
+ class SymbolicMultiplication:
15
+ def __init__(self, symbol, value=1.0):
16
+ self.symbol = symbol
17
+ self.value = value
18
+ if hasattr(value, "shape"):
19
+ self.shape = value.shape # When wrapping Numpy arrays
20
+
21
+ def __format__(self, format_spec):
22
+ return f"{self.symbol}×{self.value.__format__(format_spec)}"
23
+
24
+ __array_priority__ = 1.0
25
+
26
+ def __array_function__(self, func, types, *args, **kwargs):
27
+ if func in {np.real, np.imag, np.sum}:
28
+ return SymbolicMultiplication(self.symbol, func(self.value))
29
+ else:
30
+ return NotImplemented
31
+
32
+ def __str__(self):
33
+ return f"{self.symbol}×{self.value}"
34
+
35
+ def __repr__(self):
36
+ return f"SymbolicMultiplication(\"{self.symbol}\", {repr(self.value)})"
37
+
38
+ def __add__(self, x):
39
+ return self._concretize() + x
40
+
41
+ def __radd__(self, x):
42
+ return x + self._concretize()
43
+
44
+ def __neg__(self):
45
+ return SymbolicMultiplication(self.symbol, -self.value)
46
+
47
+ def __mul__(self, x):
48
+ return SymbolicMultiplication(self.symbol, self.value * x)
49
+
50
+ def __rmul__(self, x):
51
+ return SymbolicMultiplication(self.symbol, x * self.value)
52
+
53
+ def __pow__(self, n):
54
+ if n == 2:
55
+ return self * self
56
+ else:
57
+ raise NotImplementedError
58
+
59
+ def __truediv__(self, x):
60
+ if hasattr(x, 'symbol') and self.symbol == x.symbol:
61
+ return self.value / x.value
62
+ else:
63
+ return SymbolicMultiplication(self.symbol, self.value / x)
64
+
65
+ def __rtruediv__(self, x):
66
+ if hasattr(x, 'symbol') and self.symbol == x.symbol:
67
+ return x.value / self.value
68
+ elif self.symbol == "0":
69
+ return SymbolicMultiplication("∞", x/self.value)
70
+ elif self.symbol == "∞":
71
+ return SymbolicMultiplication("0", x/self.value)
72
+ else:
73
+ raise NotImplementedError
74
+
75
+ def __matmul__(self, x):
76
+ return SymbolicMultiplication(self.symbol, self.value @ x)
77
+
78
+ def __rmatmul__(self, x):
79
+ return SymbolicMultiplication(self.symbol, x @ self.value)
80
+
81
+ def __getitem__(self, item):
82
+ return SymbolicMultiplication(self.symbol, self.value[item])
83
+
84
+ def __setitem__(self, item, val):
85
+ if isinstance(val, SymbolicMultiplication) and self.symbol == val.symbol:
86
+ self.value.__setitem__(item, val.value)
87
+ else:
88
+ raise NotImplementedError
89
+
90
+ def __lt__(self, x):
91
+ return self._concretize() < x
92
+
93
+ def __le__(self, x):
94
+ return self._concretize() <= x
95
+
96
+ def __eq__(self, x):
97
+ return self._concretize() == x
98
+
99
+ def __ge__(self, x):
100
+ return self._concretize() >= x
101
+
102
+ def __gt__(self, x):
103
+ return self._concretize() > x
104
+
105
+ def __hash__(self):
106
+ return hash((self.symbol, self.value))
107
+
108
+ def _concretize(self):
109
+ if isinstance(self.value, np.ndarray):
110
+ if self.symbol == "0":
111
+ return np.zeros_like(self.value)
112
+ elif self.symbol == "∞":
113
+ return np.full_like(self.value, np.inf)
114
+ else:
115
+ return float(self)
116
+
117
+ def __float__(self):
118
+ if self.symbol == "0":
119
+ return 0.0 * float(self.value)
120
+ elif self.symbol == "∞":
121
+ return np.inf * float(self.value)
122
+ else:
123
+ raise NotImplementedError
124
+
125
+ def reshape(self, *args):
126
+ return SymbolicMultiplication(self.symbol, self.value.reshape(*args))
127
+
128
+ def sum(self, *args, **kwargs):
129
+ return SymbolicMultiplication(self.symbol, self.value.sum(*args, **kwargs))
130
+
131
+ @property
132
+ def T(self):
133
+ return SymbolicMultiplication(self.symbol, self.value.T)
134
+
135
+
136
+ def supporting_symbolic_multiplication(f):
137
+ """
138
+ When this decorator is applied to a function, this function can now take
139
+ as input a `SymbolicMultiplication` object. The function is applied on the
140
+ `value` part of the `SymbolicMultiplication` without modifying the
141
+ `symbol`.
142
+ """
143
+ @wraps(f)
144
+ def wrapped_f(a, x):
145
+ if hasattr(x, 'symbol'):
146
+ return SymbolicMultiplication(x.symbol, f(a, x.value))
147
+ else:
148
+ return f(a, x)
149
+ return wrapped_f
@@ -0,0 +1,66 @@
1
+ """A simple timer class used to measure the time spent in various parts of the BEM solver."""
2
+
3
+ from functools import wraps
4
+ import time
5
+
6
+ class Timer:
7
+ """A simple timer class that can be used as context manager or as decorator using `wraps_function` method
8
+
9
+ Example
10
+ -------
11
+ ::
12
+
13
+ timer = Timer()
14
+ with timer:
15
+ sleep(1.0)
16
+
17
+ print(timer.total) # 1.0...
18
+
19
+ @timer.wraps_function
20
+ def my_function():
21
+ sleep(0.5)
22
+
23
+ my_function()
24
+ print(timer.total) # 1.5...
25
+ my_function()
26
+ print(timer.total) # 2.0...
27
+
28
+ print(timer.timings) # [1.0, 0.5, 0.5]
29
+ """
30
+
31
+ def __init__(self, timings=None):
32
+ if timings is None:
33
+ self.timings = []
34
+ else:
35
+ self.timings = timings
36
+
37
+ def __repr__(self):
38
+ return f"Timer({self.timings})"
39
+
40
+ @property
41
+ def nb_timings(self):
42
+ return len(self.timings)
43
+
44
+ @property
45
+ def total(self):
46
+ return sum(self.timings)
47
+
48
+ @property
49
+ def mean(self):
50
+ if self.nb_timings == 0:
51
+ return float('nan')
52
+ else:
53
+ return self.total/self.nb_timings
54
+
55
+ def __enter__(self):
56
+ self.start_time = time.perf_counter()
57
+
58
+ def __exit__(self, *exc):
59
+ self.timings.append(time.perf_counter() - self.start_time)
60
+
61
+ def wraps_function(self, f):
62
+ @wraps(f)
63
+ def wrapped_f(*args, **kwargs):
64
+ with self:
65
+ return f(*args, **kwargs)
66
+ return wrapped_f
File without changes
capytaine/ui/cli.py ADDED
@@ -0,0 +1,28 @@
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+ """Experimental command-line interface for Capytaine."""
4
+ # Copyright (C) 2017-2023 Matthieu Ancellin
5
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
6
+
7
+ import argparse
8
+
9
+ import capytaine as cpt
10
+ from capytaine.io.legacy import run_cal_file
11
+
12
+ cpt.set_logging()
13
+
14
+ parser = argparse.ArgumentParser(description="Command-line interface for Capytaine taking Nemoh.cal files as input and returning Tecplots files.")
15
+ parser.add_argument('paramfiles',
16
+ default=['./Nemoh.cal'],
17
+ nargs='*',
18
+ help='path of parameters files (default: ./Nemoh.cal)')
19
+
20
+
21
+ def main():
22
+ args = parser.parse_args()
23
+ for paramfile in args.paramfiles:
24
+ run_cal_file(paramfile)
25
+
26
+
27
+ if __name__ == '__main__':
28
+ main()