capytaine 3.0.0a1__cp310-cp310-macosx_15_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- capytaine/.dylibs/libgcc_s.1.1.dylib +0 -0
- capytaine/.dylibs/libgfortran.5.dylib +0 -0
- capytaine/.dylibs/libquadmath.0.dylib +0 -0
- capytaine/__about__.py +21 -0
- capytaine/__init__.py +32 -0
- capytaine/bem/__init__.py +0 -0
- capytaine/bem/airy_waves.py +111 -0
- capytaine/bem/engines.py +321 -0
- capytaine/bem/problems_and_results.py +601 -0
- capytaine/bem/solver.py +718 -0
- capytaine/bodies/__init__.py +4 -0
- capytaine/bodies/bodies.py +630 -0
- capytaine/bodies/dofs.py +146 -0
- capytaine/bodies/hydrostatics.py +540 -0
- capytaine/bodies/multibodies.py +216 -0
- capytaine/green_functions/Delhommeau_float32.cpython-310-darwin.so +0 -0
- capytaine/green_functions/Delhommeau_float64.cpython-310-darwin.so +0 -0
- capytaine/green_functions/__init__.py +2 -0
- capytaine/green_functions/abstract_green_function.py +64 -0
- capytaine/green_functions/delhommeau.py +522 -0
- capytaine/green_functions/hams.py +210 -0
- capytaine/io/__init__.py +0 -0
- capytaine/io/bemio.py +153 -0
- capytaine/io/legacy.py +228 -0
- capytaine/io/wamit.py +479 -0
- capytaine/io/xarray.py +673 -0
- capytaine/meshes/__init__.py +2 -0
- capytaine/meshes/abstract_meshes.py +375 -0
- capytaine/meshes/clean.py +302 -0
- capytaine/meshes/clip.py +347 -0
- capytaine/meshes/export.py +89 -0
- capytaine/meshes/geometry.py +259 -0
- capytaine/meshes/io.py +433 -0
- capytaine/meshes/meshes.py +826 -0
- capytaine/meshes/predefined/__init__.py +6 -0
- capytaine/meshes/predefined/cylinders.py +280 -0
- capytaine/meshes/predefined/rectangles.py +202 -0
- capytaine/meshes/predefined/spheres.py +55 -0
- capytaine/meshes/quality.py +159 -0
- capytaine/meshes/surface_integrals.py +82 -0
- capytaine/meshes/symmetric_meshes.py +641 -0
- capytaine/meshes/visualization.py +353 -0
- capytaine/post_pro/__init__.py +6 -0
- capytaine/post_pro/free_surfaces.py +85 -0
- capytaine/post_pro/impedance.py +92 -0
- capytaine/post_pro/kochin.py +54 -0
- capytaine/post_pro/rao.py +60 -0
- capytaine/tools/__init__.py +0 -0
- capytaine/tools/block_circulant_matrices.py +275 -0
- capytaine/tools/cache_on_disk.py +26 -0
- capytaine/tools/deprecation_handling.py +18 -0
- capytaine/tools/lists_of_points.py +52 -0
- capytaine/tools/memory_monitor.py +45 -0
- capytaine/tools/optional_imports.py +27 -0
- capytaine/tools/prony_decomposition.py +150 -0
- capytaine/tools/symbolic_multiplication.py +161 -0
- capytaine/tools/timer.py +90 -0
- capytaine/ui/__init__.py +0 -0
- capytaine/ui/cli.py +28 -0
- capytaine/ui/rich.py +5 -0
- capytaine-3.0.0a1.dist-info/LICENSE +674 -0
- capytaine-3.0.0a1.dist-info/METADATA +755 -0
- capytaine-3.0.0a1.dist-info/RECORD +65 -0
- capytaine-3.0.0a1.dist-info/WHEEL +6 -0
- capytaine-3.0.0a1.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
"""Implementation of block circulant matrices to be used for optimizing resolution with symmetries."""
|
|
2
|
+
# Copyright (C) 2025 Capytaine developers
|
|
3
|
+
# See LICENSE file at <https://github.com/capytaine/capytaine>
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import numpy as np
|
|
7
|
+
from typing import List, Union, Sequence
|
|
8
|
+
from numpy.typing import NDArray, ArrayLike
|
|
9
|
+
import scipy.linalg as sl
|
|
10
|
+
|
|
11
|
+
LOG = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def circular_permutation(l: List, i: int) -> List:
|
|
15
|
+
return l[-i:] + l[:-i]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def leading_dimensions_at_the_end(a):
|
|
19
|
+
"""Transform an array of shape (n, m, ...) into (..., n, m).
|
|
20
|
+
Invert of `leading_dimensions_at_the_end`"""
|
|
21
|
+
return np.moveaxis(a, [0, 1], [-2, -1])
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def ending_dimensions_at_the_beginning(a):
|
|
25
|
+
"""Transform an array of shape (..., n, m) into (n, m, ...).
|
|
26
|
+
Invert of `leading_dimensions_at_the_end`"""
|
|
27
|
+
return np.moveaxis(a, [-2, -1], [0, 1])
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class BlockCirculantMatrix:
|
|
31
|
+
"""Data-sparse representation of a block matrix of the following form
|
|
32
|
+
|
|
33
|
+
( a d c b )
|
|
34
|
+
( b a d c )
|
|
35
|
+
( c b a d )
|
|
36
|
+
( d c b a )
|
|
37
|
+
|
|
38
|
+
where a, b, c and d are matrices of the same shape.
|
|
39
|
+
|
|
40
|
+
Parameters
|
|
41
|
+
----------
|
|
42
|
+
blocks: Sequence of matrix-like, can be also a ndarray of shape (nb_blocks, n, n, ...)
|
|
43
|
+
The **first column** of blocks [a, b, c, d, ...]
|
|
44
|
+
Each block should have the same shape.
|
|
45
|
+
"""
|
|
46
|
+
def __init__(self, blocks: Sequence[ArrayLike]):
|
|
47
|
+
self.blocks = blocks
|
|
48
|
+
self.nb_blocks = len(blocks)
|
|
49
|
+
assert all(self.blocks[0].shape == b.shape for b in self.blocks[1:])
|
|
50
|
+
assert all(self.blocks[0].dtype == b.dtype for b in self.blocks[1:])
|
|
51
|
+
self.shape = (
|
|
52
|
+
self.nb_blocks*self.blocks[0].shape[0],
|
|
53
|
+
self.nb_blocks*self.blocks[0].shape[1],
|
|
54
|
+
*self.blocks[0].shape[2:]
|
|
55
|
+
)
|
|
56
|
+
self.ndim = len(self.shape)
|
|
57
|
+
self.dtype = self.blocks[0].dtype
|
|
58
|
+
|
|
59
|
+
def __array__(self, dtype=None, copy=True):
|
|
60
|
+
if not copy:
|
|
61
|
+
raise NotImplementedError
|
|
62
|
+
if dtype is None:
|
|
63
|
+
dtype = self.dtype
|
|
64
|
+
full_blocks = [np.asarray(b) for b in self.blocks] # Transform all blocks to numpy arrays
|
|
65
|
+
first_row = [full_blocks[0], *(full_blocks[1:][::-1])]
|
|
66
|
+
if self.ndim >= 3:
|
|
67
|
+
first_row = [leading_dimensions_at_the_end(b) for b in first_row]
|
|
68
|
+
# Need to permute_dims to conform to `block` usage when the array is more than 2D
|
|
69
|
+
full_matrix = np.block([[b for b in circular_permutation(first_row, i)]
|
|
70
|
+
for i in range(self.nb_blocks)]).astype(dtype)
|
|
71
|
+
if self.ndim >= 3:
|
|
72
|
+
full_matrix = ending_dimensions_at_the_beginning(full_matrix)
|
|
73
|
+
return full_matrix
|
|
74
|
+
|
|
75
|
+
def __add__(self, other):
|
|
76
|
+
if isinstance(other, BlockCirculantMatrix) and self.shape == other.shape:
|
|
77
|
+
return BlockCirculantMatrix([a + b for (a, b) in zip(self.blocks, other.blocks)])
|
|
78
|
+
else:
|
|
79
|
+
return NotImplemented
|
|
80
|
+
|
|
81
|
+
def __sub__(self, other):
|
|
82
|
+
if isinstance(other, BlockCirculantMatrix) and self.shape == other.shape:
|
|
83
|
+
return BlockCirculantMatrix([a - b for (a, b) in zip(self.blocks, other.blocks)])
|
|
84
|
+
else:
|
|
85
|
+
return NotImplemented
|
|
86
|
+
|
|
87
|
+
def __matmul__(self, other):
|
|
88
|
+
if self.nb_blocks == 2 and isinstance(other, np.ndarray) and other.ndim == 1:
|
|
89
|
+
a, b = self.blocks
|
|
90
|
+
x1, x2 = other[:len(other)//2], other[len(other)//2:]
|
|
91
|
+
y = np.concatenate([a @ x1 + b @ x2, b @ x1 + a @ x2], axis=0)
|
|
92
|
+
return y
|
|
93
|
+
elif self.nb_blocks == 3 and isinstance(other, np.ndarray) and other.ndim == 1:
|
|
94
|
+
a, b, c = self.blocks
|
|
95
|
+
n = len(other)
|
|
96
|
+
x1, x2, x3 = other[:n//3], other[n//3:2*n//3], other[2*n//3:]
|
|
97
|
+
y = np.concatenate([
|
|
98
|
+
a @ x1 + c @ x2 + b @ x3,
|
|
99
|
+
b @ x1 + a @ x2 + c @ x3,
|
|
100
|
+
c @ x1 + b @ x2 + a @ x3,
|
|
101
|
+
], axis=0)
|
|
102
|
+
return y
|
|
103
|
+
elif isinstance(other, np.ndarray) and other.ndim == 1:
|
|
104
|
+
self.blocks
|
|
105
|
+
y = np.zeros(other.shape, dtype=np.result_type(self.dtype, other.dtype))
|
|
106
|
+
blocks_indices = list(range(self.nb_blocks))
|
|
107
|
+
for i, x_i in enumerate(np.split(other, self.nb_blocks)):
|
|
108
|
+
y += np.concatenate([self.blocks[j] @ x_i for j in circular_permutation(blocks_indices, i)])
|
|
109
|
+
return y
|
|
110
|
+
else:
|
|
111
|
+
return NotImplemented
|
|
112
|
+
|
|
113
|
+
def matvec(self, other):
|
|
114
|
+
return self.__matmul__(other)
|
|
115
|
+
|
|
116
|
+
def block_diagonalize(self) -> "BlockDiagonalMatrix":
|
|
117
|
+
if self.ndim == 2 and self.nb_blocks == 2:
|
|
118
|
+
a, b = self.blocks
|
|
119
|
+
return BlockDiagonalMatrix([a + b, a - b])
|
|
120
|
+
elif self.ndim == 2 and self.nb_blocks == 3:
|
|
121
|
+
a, b, c = self.blocks
|
|
122
|
+
return BlockDiagonalMatrix([
|
|
123
|
+
a + b + c,
|
|
124
|
+
a + np.exp(-2j*np.pi/3, dtype=self.dtype) * b + np.exp(2j*np.pi/3, dtype=self.dtype) * c,
|
|
125
|
+
a + np.exp(2j*np.pi/3, dtype=self.dtype) * b + np.exp(-2j*np.pi/3, dtype=self.dtype) * c,
|
|
126
|
+
])
|
|
127
|
+
elif self.ndim == 2 and self.nb_blocks == 4:
|
|
128
|
+
a, b, c, d = self.blocks
|
|
129
|
+
return BlockDiagonalMatrix([
|
|
130
|
+
a + b + c + d,
|
|
131
|
+
a - 1j*b - c + 1j*d,
|
|
132
|
+
a - b + c - d,
|
|
133
|
+
a + 1j*b - c - 1j*d,
|
|
134
|
+
])
|
|
135
|
+
elif self.ndim == 2 and all(isinstance(b, np.ndarray) for b in self.blocks):
|
|
136
|
+
return BlockDiagonalMatrix(np.fft.fft(np.asarray(self.blocks), axis=0))
|
|
137
|
+
else:
|
|
138
|
+
raise NotImplementedError()
|
|
139
|
+
|
|
140
|
+
def solve(self, b: np.ndarray) -> np.ndarray:
|
|
141
|
+
LOG.debug("Called solve on %s of shape %s",
|
|
142
|
+
self.__class__.__name__, self.shape)
|
|
143
|
+
n = self.nb_blocks
|
|
144
|
+
b_fft = np.fft.fft(b.reshape((n, b.shape[0]//n)), axis=0).reshape(b.shape)
|
|
145
|
+
res_fft = self.block_diagonalize().solve(b_fft)
|
|
146
|
+
res = np.fft.ifft(res_fft.reshape((n, b.shape[0]//n)), axis=0).reshape(b.shape)
|
|
147
|
+
LOG.debug("Done")
|
|
148
|
+
return res
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class BlockDiagonalMatrix:
|
|
152
|
+
"""Data-sparse representation of a block matrix of the following form
|
|
153
|
+
|
|
154
|
+
( a 0 0 0 )
|
|
155
|
+
( 0 b 0 0 )
|
|
156
|
+
( 0 0 c 0 )
|
|
157
|
+
( 0 0 0 d )
|
|
158
|
+
|
|
159
|
+
where a, b, c and d are matrices of the same shape.
|
|
160
|
+
|
|
161
|
+
Parameters
|
|
162
|
+
----------
|
|
163
|
+
blocks: iterable of matrix-like
|
|
164
|
+
The blocks [a, b, c, d, ...]
|
|
165
|
+
"""
|
|
166
|
+
def __init__(self, blocks: Sequence[ArrayLike]):
|
|
167
|
+
self.blocks = blocks
|
|
168
|
+
self.nb_blocks = len(blocks)
|
|
169
|
+
assert all(blocks[0].shape == b.shape for b in blocks[1:])
|
|
170
|
+
self.shape = (
|
|
171
|
+
sum(bl.shape[0] for bl in blocks),
|
|
172
|
+
sum(bl.shape[1] for bl in blocks)
|
|
173
|
+
)
|
|
174
|
+
assert all(blocks[0].dtype == b.dtype for b in blocks[1:])
|
|
175
|
+
self.dtype = blocks[0].dtype
|
|
176
|
+
|
|
177
|
+
def __array__(self, dtype=None, copy=True):
|
|
178
|
+
if not copy:
|
|
179
|
+
raise NotImplementedError
|
|
180
|
+
if dtype is None:
|
|
181
|
+
dtype = self.dtype
|
|
182
|
+
full_blocks = [np.asarray(b) for b in self.blocks] # Transform all blocks to numpy arrays
|
|
183
|
+
if self.ndim >= 3:
|
|
184
|
+
full_blocks = [leading_dimensions_at_the_end(b) for b in full_blocks]
|
|
185
|
+
full_matrix = np.block([
|
|
186
|
+
[full_blocks[i] if i == j else np.zeros(full_blocks[i].shape)
|
|
187
|
+
for j in range(self.nb_blocks)]
|
|
188
|
+
for i in range(self.nb_blocks)])
|
|
189
|
+
if self.ndim >= 3:
|
|
190
|
+
full_matrix = ending_dimensions_at_the_beginning(full_matrix)
|
|
191
|
+
return full_matrix
|
|
192
|
+
|
|
193
|
+
def solve(self, b: np.ndarray) -> np.ndarray:
|
|
194
|
+
LOG.debug("Called solve on %s of shape %s",
|
|
195
|
+
self.__class__.__name__, self.shape)
|
|
196
|
+
n = self.nb_blocks
|
|
197
|
+
rhs = np.split(b, n)
|
|
198
|
+
res = [np.linalg.solve(Ai, bi) if isinstance(Ai, np.ndarray) else Ai.solve(bi)
|
|
199
|
+
for (Ai, bi) in zip(self.blocks, rhs)]
|
|
200
|
+
LOG.debug("Done")
|
|
201
|
+
return np.hstack(res)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
MatrixLike = Union[np.ndarray, BlockDiagonalMatrix, BlockCirculantMatrix]
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def lu_decompose(A: MatrixLike, *, overwrite_a : bool = False):
|
|
208
|
+
if isinstance(A, np.ndarray):
|
|
209
|
+
return LUDecomposedMatrix(A, overwrite_a=overwrite_a)
|
|
210
|
+
elif isinstance(A, BlockDiagonalMatrix):
|
|
211
|
+
return LUDecomposedBlockDiagonalMatrix(A, overwrite_a=overwrite_a)
|
|
212
|
+
elif isinstance(A, BlockCirculantMatrix):
|
|
213
|
+
return LUDecomposedBlockCirculantMatrix(A, overwrite_a=overwrite_a)
|
|
214
|
+
else:
|
|
215
|
+
raise NotImplementedError()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class LUDecomposedMatrix:
|
|
219
|
+
def __init__(self, A: NDArray, *, overwrite_a : bool = False):
|
|
220
|
+
LOG.debug("LU decomp of %s of shape %s",
|
|
221
|
+
A.__class__.__name__, A.shape)
|
|
222
|
+
self._lu_decomp = sl.lu_factor(A, overwrite_a=overwrite_a)
|
|
223
|
+
self.shape = A.shape
|
|
224
|
+
self.dtype = A.dtype
|
|
225
|
+
|
|
226
|
+
def solve(self, b: np.ndarray) -> np.ndarray:
|
|
227
|
+
LOG.debug("Called solve on %s of shape %s",
|
|
228
|
+
self.__class__.__name__, self.shape)
|
|
229
|
+
return sl.lu_solve(self._lu_decomp, b)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
class LUDecomposedBlockDiagonalMatrix:
|
|
233
|
+
"""LU decomposition of a BlockDiagonalMatrix,
|
|
234
|
+
stored as the LU decomposition of each block."""
|
|
235
|
+
def __init__(self, bdm: BlockDiagonalMatrix, *, overwrite_a : bool = False):
|
|
236
|
+
LOG.debug("LU decomp of %s of shape %s",
|
|
237
|
+
bdm.__class__.__name__, bdm.shape)
|
|
238
|
+
self._lu_decomp = [lu_decompose(bl, overwrite_a=overwrite_a) for bl in bdm.blocks]
|
|
239
|
+
self.shape = bdm.shape
|
|
240
|
+
self.nb_blocks = bdm.nb_blocks
|
|
241
|
+
self.dtype = bdm.dtype
|
|
242
|
+
|
|
243
|
+
def solve(self, b: np.ndarray) -> np.ndarray:
|
|
244
|
+
LOG.debug("Called solve on %s of shape %s",
|
|
245
|
+
self.__class__.__name__, self.shape)
|
|
246
|
+
rhs = np.split(b, self.nb_blocks)
|
|
247
|
+
res = [Ai.solve(bi) for (Ai, bi) in zip(self._lu_decomp, rhs)]
|
|
248
|
+
return np.hstack(res)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class LUDecomposedBlockCirculantMatrix:
|
|
252
|
+
def __init__(self, bcm: BlockCirculantMatrix, *, overwrite_a : bool = False):
|
|
253
|
+
LOG.debug("LU decomp of %s of shape %s",
|
|
254
|
+
bcm.__class__.__name__, bcm.shape)
|
|
255
|
+
self._lu_decomp = lu_decompose(bcm.block_diagonalize(), overwrite_a=overwrite_a)
|
|
256
|
+
self.shape = bcm.shape
|
|
257
|
+
self.nb_blocks = bcm.nb_blocks
|
|
258
|
+
self.dtype = bcm.dtype
|
|
259
|
+
|
|
260
|
+
def solve(self, b: np.ndarray) -> np.ndarray:
|
|
261
|
+
LOG.debug("Called solve on %s of shape %s",
|
|
262
|
+
self.__class__.__name__, self.shape)
|
|
263
|
+
n = self.nb_blocks
|
|
264
|
+
b_fft = np.fft.fft(b.reshape((n, b.shape[0]//n)), axis=0).reshape(b.shape)
|
|
265
|
+
res_fft = self._lu_decomp.solve(b_fft)
|
|
266
|
+
res = np.fft.ifft(res_fft.reshape((n, b.shape[0]//n)), axis=0).reshape(b.shape)
|
|
267
|
+
return res
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
LUDecomposedMatrixLike = Union[LUDecomposedMatrix, LUDecomposedBlockDiagonalMatrix, LUDecomposedBlockCirculantMatrix]
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def has_been_lu_decomposed(A):
|
|
274
|
+
# Python 3.8 does not support isinstance(A, LUDecomposedMatrixLike)
|
|
275
|
+
return isinstance(A, (LUDecomposedMatrix, LUDecomposedBlockDiagonalMatrix, LUDecomposedBlockCirculantMatrix))
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adapted from https://github.com/platformdirs/platformdirs (MIT Licensed)
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from capytaine import __version__
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def cache_directory():
|
|
12
|
+
if "CAPYTAINE_CACHE_DIR" in os.environ:
|
|
13
|
+
path = os.path.join(os.environ["CAPYTAINE_CACHE_DIR"], __version__)
|
|
14
|
+
elif sys.platform == "win32": # Windows
|
|
15
|
+
path = os.path.normpath(os.environ.get("LOCALAPPDATA"))
|
|
16
|
+
path = os.path.join(path, "capytaine", "Cache", __version__)
|
|
17
|
+
elif sys.platform == "darwin": # MacOS
|
|
18
|
+
path = os.path.expanduser("~/Library/Caches")
|
|
19
|
+
path = os.path.join(path, "capytaine", __version__)
|
|
20
|
+
else:
|
|
21
|
+
path = os.environ.get("XDG_CACHE_HOME", "")
|
|
22
|
+
if path.strip() == "":
|
|
23
|
+
path = os.path.expanduser("~/.cache")
|
|
24
|
+
path = os.path.join(path, "capytaine", __version__)
|
|
25
|
+
Path(path).mkdir(parents=True, exist_ok=True)
|
|
26
|
+
return path
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
LOG = logging.getLogger(__name__)
|
|
6
|
+
|
|
7
|
+
def _get_water_depth(free_surface, water_depth, sea_bottom, default_water_depth=np.inf):
|
|
8
|
+
if water_depth is None and sea_bottom is None:
|
|
9
|
+
return default_water_depth
|
|
10
|
+
elif water_depth is not None and sea_bottom is None:
|
|
11
|
+
if water_depth <= 0.0:
|
|
12
|
+
raise ValueError(f"`water_depth` should be strictly positive. Received value: {water_depth}")
|
|
13
|
+
return float(water_depth)
|
|
14
|
+
elif water_depth is None and sea_bottom is not None:
|
|
15
|
+
LOG.warning("To uniformize notations througouth Capytaine, setting `water_depth` is preferred to `sea_bottom` since version 2.0.")
|
|
16
|
+
return float(free_surface - sea_bottom)
|
|
17
|
+
else:
|
|
18
|
+
raise ValueError("Cannot give both a `water_depth` and a `sea_bottom`.")
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from capytaine.bodies import FloatingBody
|
|
3
|
+
from capytaine.post_pro.free_surfaces import FreeSurface
|
|
4
|
+
from capytaine.meshes.abstract_meshes import AbstractMesh
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def _normalize_points(points, keep_mesh=False):
|
|
8
|
+
if isinstance(points, (FloatingBody, FreeSurface)):
|
|
9
|
+
if keep_mesh:
|
|
10
|
+
return points.mesh, (points.mesh.nb_faces,)
|
|
11
|
+
else:
|
|
12
|
+
return points.mesh.faces_centers, (points.mesh.nb_faces,)
|
|
13
|
+
|
|
14
|
+
if isinstance(points, AbstractMesh):
|
|
15
|
+
if keep_mesh:
|
|
16
|
+
return points, (points.nb_faces,)
|
|
17
|
+
else:
|
|
18
|
+
return points.faces_centers, (points.nb_faces,)
|
|
19
|
+
|
|
20
|
+
points = np.asarray(points)
|
|
21
|
+
|
|
22
|
+
if points.ndim == 1: # A single point has been provided
|
|
23
|
+
output_shape = (1,)
|
|
24
|
+
points = points.reshape((1, points.shape[0]))
|
|
25
|
+
|
|
26
|
+
elif points.ndim == 2:
|
|
27
|
+
output_shape = (points.shape[0],)
|
|
28
|
+
|
|
29
|
+
elif points.ndim > 2:
|
|
30
|
+
# `points` is expected to be the results of a meshgrid. Points has shape (d, nx, ny, ...)
|
|
31
|
+
output_shape = points.shape[1:]
|
|
32
|
+
points = points.reshape(points.shape[0], -1).transpose()
|
|
33
|
+
# points is now a (nx*ny*... , d) array
|
|
34
|
+
|
|
35
|
+
else:
|
|
36
|
+
raise ValueError(f"Expected a list of points or a mesh, but got instead: {points}")
|
|
37
|
+
|
|
38
|
+
return points, output_shape
|
|
39
|
+
|
|
40
|
+
def _normalize_free_surface_points(points, keep_mesh=False):
|
|
41
|
+
if keep_mesh and isinstance(points, (FloatingBody, FreeSurface)):
|
|
42
|
+
return points.mesh, (points.mesh.nb_faces,)
|
|
43
|
+
|
|
44
|
+
if keep_mesh and isinstance(points, MeshLike):
|
|
45
|
+
return points, (points.nb_faces,)
|
|
46
|
+
|
|
47
|
+
points, output_shape = _normalize_points(points, keep_mesh)
|
|
48
|
+
|
|
49
|
+
if points.ndim == 2 and points.shape[1] == 2: # Only x and y have been provided
|
|
50
|
+
points = np.concatenate([points, np.zeros((points.shape[0], 1))], axis=1)
|
|
51
|
+
|
|
52
|
+
return points, output_shape
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
from threading import Thread
|
|
4
|
+
|
|
5
|
+
from capytaine.tools.optional_imports import silently_import_optional_dependency
|
|
6
|
+
|
|
7
|
+
LOG = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
class MemoryMonitor(Thread):
|
|
10
|
+
"""Monitor the memory usage in a separate thread.
|
|
11
|
+
from : https://joblib.readthedocs.io/en/stable/auto_examples/parallel_generator.html#sphx-glr-auto-examples-parallel-generator-py
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__()
|
|
16
|
+
self.stop = False
|
|
17
|
+
self.memory_buffer = [0]
|
|
18
|
+
self.psutil = silently_import_optional_dependency("psutil")
|
|
19
|
+
self.start()
|
|
20
|
+
|
|
21
|
+
def get_memory(self):
|
|
22
|
+
"Get memory of a process and its children."
|
|
23
|
+
p = self.psutil.Process()
|
|
24
|
+
memory = p.memory_info().rss
|
|
25
|
+
for c in p.children():
|
|
26
|
+
try:
|
|
27
|
+
memory += c.memory_info().rss
|
|
28
|
+
except self.psutil.NoSuchProcess:
|
|
29
|
+
pass
|
|
30
|
+
return memory
|
|
31
|
+
|
|
32
|
+
def run(self):
|
|
33
|
+
if self.psutil is not None:
|
|
34
|
+
memory_start = self.get_memory()
|
|
35
|
+
while not self.stop:
|
|
36
|
+
self.memory_buffer.append(self.get_memory() - memory_start)
|
|
37
|
+
time.sleep(0.2)
|
|
38
|
+
|
|
39
|
+
def get_memory_peak(self):
|
|
40
|
+
self.stop = True
|
|
41
|
+
super().join()
|
|
42
|
+
if self.psutil is None:
|
|
43
|
+
return None
|
|
44
|
+
else:
|
|
45
|
+
return round(max(self.memory_buffer) / 1e9, 2)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Tool to import optional dependencies. Inspired by similar code in pandas."""
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
|
|
5
|
+
def import_optional_dependency(module_name: str, package_name: str = None):
|
|
6
|
+
try:
|
|
7
|
+
module = importlib.import_module(module_name)
|
|
8
|
+
except ImportError:
|
|
9
|
+
if package_name is None:
|
|
10
|
+
package_name = module_name
|
|
11
|
+
|
|
12
|
+
message = (
|
|
13
|
+
f"Missing optional dependency '{module_name}'. "
|
|
14
|
+
f"Use pip or conda to install {package_name}."
|
|
15
|
+
)
|
|
16
|
+
raise ImportError(message) from None
|
|
17
|
+
|
|
18
|
+
return module
|
|
19
|
+
|
|
20
|
+
def silently_import_optional_dependency(module_name: str):
|
|
21
|
+
# Same as above, except it does not raise a exception when the module is not found.
|
|
22
|
+
# Instead, simply returns None.
|
|
23
|
+
try:
|
|
24
|
+
module = importlib.import_module(module_name)
|
|
25
|
+
except ImportError:
|
|
26
|
+
module = None
|
|
27
|
+
return module
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""Prony decomposition: tool to approximate a function as a sum of exponentials.
|
|
2
|
+
Used in particular in the finite depth Green function.
|
|
3
|
+
"""
|
|
4
|
+
# Copyright (C) 2017-2024 Matthieu Ancellin
|
|
5
|
+
# See LICENSE file at <https://github.com/capytaine/capytaine>
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
from numpy.polynomial import polynomial
|
|
11
|
+
from scipy.optimize import curve_fit
|
|
12
|
+
from scipy.linalg import toeplitz
|
|
13
|
+
|
|
14
|
+
LOG = logging.getLogger(__name__)
|
|
15
|
+
RNG = np.random.default_rng()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def exponential_decomposition(X, F, m):
|
|
19
|
+
"""Use Prony's method to approximate the sampled real function F=f(X) as a sum of m
|
|
20
|
+
exponential functions x → Σ a_i exp(lamda_i x).
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
X: 1D array
|
|
25
|
+
sampling points.
|
|
26
|
+
F: 1D array (same size as X)
|
|
27
|
+
values of the function to approximate at the points of x.
|
|
28
|
+
m: integer
|
|
29
|
+
number of exponential functions
|
|
30
|
+
|
|
31
|
+
Return
|
|
32
|
+
------
|
|
33
|
+
a: 1D array (size m)
|
|
34
|
+
coefficients of the exponentials
|
|
35
|
+
lamda: 1D array (size m)
|
|
36
|
+
growth rate of the exponentials
|
|
37
|
+
"""
|
|
38
|
+
assert X.shape == F.shape
|
|
39
|
+
|
|
40
|
+
# Compute the coefficients of the polynomials of Prony's method
|
|
41
|
+
A = toeplitz(c=F[m-1:-1], r=F[:m][::-1])
|
|
42
|
+
P, *_ = np.linalg.lstsq(A, F[m:], rcond=None)
|
|
43
|
+
|
|
44
|
+
# Build and solve polynomial function
|
|
45
|
+
coeffs = np.ones(m+1)
|
|
46
|
+
# coeffs[:m] = -P[::-1]
|
|
47
|
+
for i in range(m):
|
|
48
|
+
coeffs[m-i-1] = -P[i]
|
|
49
|
+
roots = polynomial.polyroots(coeffs)
|
|
50
|
+
|
|
51
|
+
# Discard values where log is undefined
|
|
52
|
+
roots = roots[np.logical_or(np.imag(roots) != 0.0, np.real(roots) >= 0.0)]
|
|
53
|
+
|
|
54
|
+
# Deduce lamda and keep only interesting values
|
|
55
|
+
lamda = np.real(np.log(roots)/(X[1] - X[0]))
|
|
56
|
+
lamda = np.unique(lamda)
|
|
57
|
+
lamda = lamda[np.logical_and(-20.0 < lamda, lamda < 0.0)]
|
|
58
|
+
|
|
59
|
+
# Fit the values of 'a' on the curve
|
|
60
|
+
def f(x, *ar):
|
|
61
|
+
ar = np.asarray(ar)[:, np.newaxis]
|
|
62
|
+
la = lamda[:, np.newaxis]
|
|
63
|
+
return np.sum(ar * np.exp(la * x), axis=0)
|
|
64
|
+
a, *_ = curve_fit(f, X, F, p0=np.zeros(lamda.shape))
|
|
65
|
+
|
|
66
|
+
return a, lamda
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def error_exponential_decomposition(X, F, a, lamda):
|
|
70
|
+
"""Mean square error of the exponential decomposition defined by the
|
|
71
|
+
coefficients a and lamda with respect to the reference values in F.
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
X: 1D array
|
|
76
|
+
sampling points
|
|
77
|
+
F: 1D array (same size as X)
|
|
78
|
+
reference values
|
|
79
|
+
a: 1D array
|
|
80
|
+
coefficients of the exponentials
|
|
81
|
+
lamda: 1D array (same size as a)
|
|
82
|
+
growth rate of the exponentials
|
|
83
|
+
|
|
84
|
+
Returns
|
|
85
|
+
-------
|
|
86
|
+
error: float
|
|
87
|
+
mean square error of the decomposition
|
|
88
|
+
"""
|
|
89
|
+
a = np.asarray(a)[:, np.newaxis]
|
|
90
|
+
lamda = np.asarray(lamda)[:, np.newaxis]
|
|
91
|
+
|
|
92
|
+
def f(x):
|
|
93
|
+
return np.sum(a * np.exp(lamda*x), axis=0)
|
|
94
|
+
|
|
95
|
+
return np.square(f(X) - F).mean()
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class PronyDecompositionFailure(Exception):
|
|
99
|
+
pass
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def find_best_exponential_decomposition(f, x_min, x_max, n_exp_range, *, tol=1e-4, noise_on_domain_points_std=0.01):
|
|
103
|
+
"""Tries to construct an exponential decompositoin of the function f on the
|
|
104
|
+
domain [x_min, x_max] by testing the number of exponentials in n_exp_range.
|
|
105
|
+
|
|
106
|
+
Parameters
|
|
107
|
+
----------
|
|
108
|
+
f: callable
|
|
109
|
+
The function ℝ→ℝ to be approximated.
|
|
110
|
+
Should support vectorized calls (that is passing a vector of inputs
|
|
111
|
+
and get the vector of corresponding outputs)
|
|
112
|
+
x_min, x_max: floats
|
|
113
|
+
The bounds of the domain of input in which f should be approximated
|
|
114
|
+
n_exp_range: iterable of ints
|
|
115
|
+
The decomposition sizes that will be tested
|
|
116
|
+
tol: float, optional
|
|
117
|
+
The target mean square error.
|
|
118
|
+
noise_on_domain_points_std: float, optional
|
|
119
|
+
Introduces some random variability on the points where the function is evaluated.
|
|
120
|
+
Set this parameter to zero to disable randomness.
|
|
121
|
+
|
|
122
|
+
"""
|
|
123
|
+
# Try different range of evaluation points to construct the decomposition.
|
|
124
|
+
for n_exp in n_exp_range:
|
|
125
|
+
|
|
126
|
+
# f might be ill-defined at some single specific values
|
|
127
|
+
# (for the use-case of delhommeau.py, it is when x = kh exactly).
|
|
128
|
+
# Thus we slightly randomize the range of evaluation points for the Prony decomposition.
|
|
129
|
+
# This way, if one of the evaluation points hits the singular point, it will most likely not hit it again at the next iteration.
|
|
130
|
+
x_max_iter = (1 + noise_on_domain_points_std*RNG.uniform())*x_max
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
# The coefficients are computed on a resolution of 4*n_exp+1 ...
|
|
134
|
+
X = np.linspace(x_min, x_max_iter, 4*n_exp+1)
|
|
135
|
+
a, lamda = exponential_decomposition(X, f(X), n_exp)
|
|
136
|
+
|
|
137
|
+
# ... and they are evaluated on a finer discretization.
|
|
138
|
+
X = np.linspace(x_min, x_max_iter, 8*n_exp+1)
|
|
139
|
+
if error_exponential_decomposition(X, f(X), a, lamda) < tol:
|
|
140
|
+
return a, lamda
|
|
141
|
+
except Exception:
|
|
142
|
+
# If something bad happened while computing the decomposition, try
|
|
143
|
+
# the next one.
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
raise PronyDecompositionFailure(
|
|
147
|
+
"No suitable Prony decomposition has been found in "
|
|
148
|
+
f"[{x_min}, {x_max}] for tol={tol} "
|
|
149
|
+
f"using a number of terms in {n_exp_range}."
|
|
150
|
+
)
|