capytaine 2.3.1__cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. capytaine/__about__.py +16 -0
  2. capytaine/__init__.py +36 -0
  3. capytaine/bem/__init__.py +0 -0
  4. capytaine/bem/airy_waves.py +111 -0
  5. capytaine/bem/engines.py +441 -0
  6. capytaine/bem/problems_and_results.py +600 -0
  7. capytaine/bem/solver.py +594 -0
  8. capytaine/bodies/__init__.py +4 -0
  9. capytaine/bodies/bodies.py +1221 -0
  10. capytaine/bodies/dofs.py +19 -0
  11. capytaine/bodies/predefined/__init__.py +6 -0
  12. capytaine/bodies/predefined/cylinders.py +151 -0
  13. capytaine/bodies/predefined/rectangles.py +111 -0
  14. capytaine/bodies/predefined/spheres.py +70 -0
  15. capytaine/green_functions/FinGreen3D/.gitignore +1 -0
  16. capytaine/green_functions/FinGreen3D/FinGreen3D.f90 +3589 -0
  17. capytaine/green_functions/FinGreen3D/LICENSE +165 -0
  18. capytaine/green_functions/FinGreen3D/Makefile +16 -0
  19. capytaine/green_functions/FinGreen3D/README.md +24 -0
  20. capytaine/green_functions/FinGreen3D/test_program.f90 +39 -0
  21. capytaine/green_functions/LiangWuNoblesse/.gitignore +1 -0
  22. capytaine/green_functions/LiangWuNoblesse/LICENSE +504 -0
  23. capytaine/green_functions/LiangWuNoblesse/LiangWuNoblesseWaveTerm.f90 +751 -0
  24. capytaine/green_functions/LiangWuNoblesse/Makefile +16 -0
  25. capytaine/green_functions/LiangWuNoblesse/README.md +2 -0
  26. capytaine/green_functions/LiangWuNoblesse/test_program.f90 +28 -0
  27. capytaine/green_functions/__init__.py +2 -0
  28. capytaine/green_functions/abstract_green_function.py +64 -0
  29. capytaine/green_functions/delhommeau.py +507 -0
  30. capytaine/green_functions/hams.py +204 -0
  31. capytaine/green_functions/libs/Delhommeau_float32.cpython-312-x86_64-linux-gnu.so +0 -0
  32. capytaine/green_functions/libs/Delhommeau_float64.cpython-312-x86_64-linux-gnu.so +0 -0
  33. capytaine/green_functions/libs/__init__.py +0 -0
  34. capytaine/io/__init__.py +0 -0
  35. capytaine/io/bemio.py +153 -0
  36. capytaine/io/legacy.py +328 -0
  37. capytaine/io/mesh_loaders.py +1086 -0
  38. capytaine/io/mesh_writers.py +692 -0
  39. capytaine/io/meshio.py +38 -0
  40. capytaine/io/wamit.py +479 -0
  41. capytaine/io/xarray.py +668 -0
  42. capytaine/matrices/__init__.py +16 -0
  43. capytaine/matrices/block.py +592 -0
  44. capytaine/matrices/block_toeplitz.py +325 -0
  45. capytaine/matrices/builders.py +89 -0
  46. capytaine/matrices/linear_solvers.py +232 -0
  47. capytaine/matrices/low_rank.py +395 -0
  48. capytaine/meshes/__init__.py +6 -0
  49. capytaine/meshes/clipper.py +465 -0
  50. capytaine/meshes/collections.py +342 -0
  51. capytaine/meshes/geometry.py +409 -0
  52. capytaine/meshes/mesh_like_protocol.py +37 -0
  53. capytaine/meshes/meshes.py +890 -0
  54. capytaine/meshes/predefined/__init__.py +6 -0
  55. capytaine/meshes/predefined/cylinders.py +314 -0
  56. capytaine/meshes/predefined/rectangles.py +261 -0
  57. capytaine/meshes/predefined/spheres.py +62 -0
  58. capytaine/meshes/properties.py +276 -0
  59. capytaine/meshes/quadratures.py +80 -0
  60. capytaine/meshes/quality.py +448 -0
  61. capytaine/meshes/surface_integrals.py +63 -0
  62. capytaine/meshes/symmetric.py +462 -0
  63. capytaine/post_pro/__init__.py +6 -0
  64. capytaine/post_pro/free_surfaces.py +88 -0
  65. capytaine/post_pro/impedance.py +92 -0
  66. capytaine/post_pro/kochin.py +54 -0
  67. capytaine/post_pro/rao.py +60 -0
  68. capytaine/tools/__init__.py +0 -0
  69. capytaine/tools/cache_on_disk.py +26 -0
  70. capytaine/tools/deprecation_handling.py +18 -0
  71. capytaine/tools/lists_of_points.py +52 -0
  72. capytaine/tools/lru_cache.py +49 -0
  73. capytaine/tools/optional_imports.py +27 -0
  74. capytaine/tools/prony_decomposition.py +150 -0
  75. capytaine/tools/symbolic_multiplication.py +149 -0
  76. capytaine/tools/timer.py +66 -0
  77. capytaine/ui/__init__.py +0 -0
  78. capytaine/ui/cli.py +28 -0
  79. capytaine/ui/rich.py +5 -0
  80. capytaine/ui/vtk/__init__.py +3 -0
  81. capytaine/ui/vtk/animation.py +329 -0
  82. capytaine/ui/vtk/body_viewer.py +28 -0
  83. capytaine/ui/vtk/helpers.py +82 -0
  84. capytaine/ui/vtk/mesh_viewer.py +461 -0
  85. capytaine-2.3.1.dist-info/LICENSE +674 -0
  86. capytaine-2.3.1.dist-info/METADATA +750 -0
  87. capytaine-2.3.1.dist-info/RECORD +93 -0
  88. capytaine-2.3.1.dist-info/WHEEL +6 -0
  89. capytaine-2.3.1.dist-info/entry_points.txt +3 -0
  90. capytaine.libs/libgfortran-83c28eba.so.5.0.0 +0 -0
  91. capytaine.libs/libgomp-e985bcbb.so.1.0.0 +0 -0
  92. capytaine.libs/libmvec-2-583a17db.28.so +0 -0
  93. capytaine.libs/libquadmath-2284e583.so.0.0.0 +0 -0
@@ -0,0 +1,325 @@
1
+ """This modules implements block Toeplitz matrices to be used in Hierarchical Toeplitz matrices.
2
+
3
+ The module also contains several special cases such as block symmetric Toeplitz matrices and block circulant matrices.
4
+ All classes inherits from the BlockMatrix class.
5
+ """
6
+ # Copyright (C) 2017-2019 Matthieu Ancellin
7
+ # See LICENSE file at <https://github.com/mancellin/capytaine>
8
+
9
+ import logging
10
+ from typing import Tuple, List, Set, Iterable
11
+
12
+ import numpy as np
13
+
14
+ from capytaine.matrices.block import BlockMatrix
15
+
16
+ LOG = logging.getLogger(__name__)
17
+
18
+
19
+ ################################################################################
20
+ # Block Toeplitz matrix #
21
+ ################################################################################
22
+
23
+ class BlockToeplitzMatrix(BlockMatrix):
24
+ """A (2D) block Toeplitz matrix, stored as a list of blocks.
25
+ All blocks should have the same shape.
26
+
27
+ Stored in the backend as a 1×(2N-1) array of arrays."""
28
+
29
+ # INITIALIZATION
30
+
31
+ def _compute_shape(self) -> Tuple[int, int]:
32
+ # The full shape is found by multiplying the shape of the blocks. All of them have the same shape.
33
+ return (self._stored_block_shapes[0][0]*self.nb_blocks[0],
34
+ self._stored_block_shapes[1][0]*self.nb_blocks[1])
35
+
36
+ def _compute_nb_blocks(self) -> Tuple[int, int]:
37
+ """Will be overridden by subclasses."""
38
+ assert self._stored_nb_blocks[1] % 2 == 1, "Expecting an odd number of blocks to build a Toeplitz matrix"
39
+ n = (self._stored_nb_blocks[1]+1)//2
40
+ return n, n
41
+
42
+ def _check_dimensions_of_blocks(self) -> bool:
43
+ for block in self._stored_blocks[0, :]:
44
+ if not block.shape == self.block_shape: # All blocks have same shape
45
+ return False
46
+ return True
47
+
48
+ # ACCESSING DATA
49
+
50
+ @property
51
+ def block_shapes(self) -> Tuple[List[int], List[int]]:
52
+ """The shapes of the blocks composing the block matrix.
53
+ Actually, they should be all the same."""
54
+ return ([self._stored_block_shapes[0][0]]*self.nb_blocks[0],
55
+ [self._stored_block_shapes[1][0]]*self.nb_blocks[1])
56
+
57
+ @property
58
+ def block_shape(self) -> Tuple[int, int]:
59
+ """The shape of any of the blocks."""
60
+ return self._stored_block_shapes[0][0], self._stored_block_shapes[1][0]
61
+
62
+ def _block_indices_of(self, k: int) -> Set[Tuple[int, int]]:
63
+ """The block indices at which the stored block k can be found in the full matrix of size n.
64
+ Will be overridden by subclasses."""
65
+ n = self.nb_blocks[0]
66
+
67
+ if k < n:
68
+ i, j = 0, k # Upper triangle
69
+ elif n <= k < 2*n:
70
+ i, j = 2*n-1-k, 0 # Lower triangle
71
+ else:
72
+ raise AttributeError
73
+
74
+ indices = set()
75
+ while i < n and j < n:
76
+ indices.add((i, j))
77
+ i, j = i+1, j+1 # Going along the diagonal
78
+
79
+ return indices
80
+
81
+ @property
82
+ def all_blocks(self):
83
+ all_blocks = np.empty(self.nb_blocks, dtype=object)
84
+ for k in range(self._stored_nb_blocks[1]):
85
+ for i, j in self._block_indices_of(k):
86
+ all_blocks[i, j] = self._stored_blocks[0, k]
87
+ return all_blocks
88
+
89
+ def _positions_of(self, k: int, global_frame=(0, 0)) -> List[Tuple[int, int]]:
90
+ """The positions in the full matrix at which the block k from the first line can also be found."""
91
+ shape = self.block_shape
92
+ return sorted([(global_frame[0] + i*shape[0], global_frame[1] + j*shape[1])
93
+ for i, j in self._block_indices_of(k)])
94
+
95
+ def _stored_block_positions(self, global_frame=(0, 0)) -> Iterable[List[Tuple[int, int]]]:
96
+ """The position of each blocks in the matrix.
97
+
98
+ Example::
99
+
100
+ AABB
101
+ AABB -> list(matrix._stored_block_positions) = [[(0,0), (2, 2)], [(0, 2), (2, 0)]]
102
+ BBAA
103
+ BBAA
104
+ """
105
+ return (self._positions_of(k, global_frame=global_frame) for k in range(self._stored_nb_blocks[1]))
106
+
107
+ # # TRANSFORMING DATA
108
+
109
+ @property
110
+ def circulant_super_matrix(self):
111
+ if not hasattr(self, '_circulant_super_matrix'):
112
+ self._circulant_super_matrix = BlockCirculantMatrix(
113
+ self._stored_blocks,
114
+ _stored_block_shapes=self._stored_block_shapes,
115
+ check=False)
116
+ return self._circulant_super_matrix
117
+
118
+ def matvec(self, other):
119
+ """Matrix vector product.
120
+ Named as such to be used as scipy LinearOperator."""
121
+ LOG.debug(f"Product of {self} with vector of shape {other.shape}")
122
+ A = self.circulant_super_matrix
123
+ b = np.concatenate([other, np.zeros(A.shape[1] - self.shape[1])])
124
+ return (A @ b)[:self.shape[0]]
125
+
126
+ def rmatvec(self, other):
127
+ """Matrix vector product.
128
+ Named as such to be used as scipy LinearOperator."""
129
+ LOG.debug(f"Product of vector of shape {other.shape} with {self}")
130
+ if other.ndim == 2 and other.shape[0] == 1: # Actually a 1×N matrix
131
+ other = other[0, :]
132
+ A = self.circulant_super_matrix
133
+ b = np.concatenate([other, np.zeros(A.shape[0] - self.shape[0])])
134
+ return (A.rmatvec(b))[:self.shape[1]]
135
+
136
+
137
+ ################################################################################
138
+ # Block symmetric Toeplitz matrix #
139
+ ################################################################################
140
+
141
+ class BlockSymmetricToeplitzMatrix(BlockToeplitzMatrix):
142
+ """A (2D) block symmetric Toeplitz matrix, stored as a list of blocks.
143
+ All blocks should have the same shape.
144
+
145
+ Stored in the backend as a 1×N array of arrays."""
146
+
147
+ def _compute_nb_blocks(self) -> Tuple[int, int]:
148
+ n = self._stored_nb_blocks[1]
149
+ return n, n
150
+
151
+ def _block_indices_of(self, k: int) -> Set[Tuple[int, int]]:
152
+ """The block indices at which the stored block k can be found in the full matrix."""
153
+ n = self.nb_blocks[0]
154
+ assert k < n
155
+
156
+ if k == 0:
157
+ return BlockToeplitzMatrix._block_indices_of(self, 0)
158
+ else:
159
+ return BlockToeplitzMatrix._block_indices_of(self, k).union(
160
+ BlockToeplitzMatrix._block_indices_of(self, 2*n-k-1))
161
+
162
+ @property
163
+ def circulant_super_matrix(self):
164
+ if not hasattr(self, '_circulant_super_matrix'):
165
+ self._circulant_super_matrix = EvenBlockSymmetricCirculantMatrix(
166
+ self._stored_blocks,
167
+ _stored_block_shapes=self._stored_block_shapes,
168
+ check=False)
169
+ return self._circulant_super_matrix
170
+
171
+
172
+ ################################################################################
173
+ # Block circulant matrix #
174
+ ################################################################################
175
+
176
+ class BlockCirculantMatrix(BlockToeplitzMatrix):
177
+ """A (2D) block circulant matrix, stored as a list of blocks.
178
+ All blocks should have the same shape.
179
+
180
+ Stored in the backend as a 1×N array of arrays."""
181
+
182
+ def _compute_nb_blocks(self) -> Tuple[int, int]:
183
+ n = self._stored_nb_blocks[1]
184
+ return n, n
185
+
186
+ def _block_indices_of(self, k: int) -> Set[Tuple[int, int]]:
187
+ """The block indices at which the stored block k can be found in the full matrix."""
188
+ n = self.nb_blocks[0]
189
+ assert k < n
190
+
191
+ if k == 0:
192
+ return BlockToeplitzMatrix._block_indices_of(self, 0)
193
+ else:
194
+ return BlockToeplitzMatrix._block_indices_of(self, k).union(
195
+ BlockToeplitzMatrix._block_indices_of(self, n+k-1))
196
+
197
+ # LINEAR SYSTEMS
198
+
199
+ def block_diagonalize(self):
200
+ """Returns a vector of matrices"""
201
+ if not hasattr(self, 'block_diagonalization'):
202
+ if all(isinstance(matrix, BlockMatrix) for matrix in self._stored_blocks[0, :]):
203
+ self.block_diagonalization = BlockMatrix.fft_of_list(*self.all_blocks[:, 0])
204
+ else:
205
+ stacked_blocks = np.empty((self.nb_blocks[1],) + self.block_shape, dtype=self.dtype)
206
+ for i, block in enumerate(self.all_blocks[:, 0]):
207
+ stacked_blocks[i] = block.full_matrix() if not isinstance(block, np.ndarray) else block
208
+ self.block_diagonalization = np.fft.fft(stacked_blocks, axis=0)
209
+ return self.block_diagonalization
210
+
211
+ def matvec(self, other):
212
+ """Matrix vector product.
213
+ Named as such to be used as scipy LinearOperator."""
214
+ LOG.debug(f"Product of {self} with vector of shape {other.shape}")
215
+ fft_of_vector = np.fft.fft(np.reshape(other, (self.nb_blocks[0], self.block_shape[1], 1)), axis=0)
216
+ blocks_of_diagonalization = self.block_diagonalize()
217
+ try: # Try to run it as vectorized numpy arrays.
218
+ fft_of_result = blocks_of_diagonalization @ fft_of_vector
219
+ # When the above fails, numpy 1.15 returns a TypeError, whereas numpy 1.16 returns a ValueError.
220
+ except (TypeError, ValueError): # Or do the same thing with list comprehension.
221
+ fft_of_result = np.array([block @ vec for block, vec in zip(blocks_of_diagonalization, fft_of_vector)])
222
+ result = np.fft.ifft(fft_of_result, axis=0).reshape(self.shape[0])
223
+ if self.dtype == complex or other.dtype == complex:
224
+ return np.asarray(result)
225
+ else:
226
+ return np.asarray(np.real(result))
227
+
228
+ def rmatvec(self, other):
229
+ """Matrix vector product.
230
+ Named as such to be used as scipy LinearOperator."""
231
+ other = np.conjugate(other)
232
+ fft_of_vector = np.fft.ifft(np.reshape(other, (self.nb_blocks[0], 1, self.block_shape[0])), axis=0)
233
+ blocks_of_diagonalization = self.block_diagonalize()
234
+ try: # Try to run it as vectorized numpy arrays.
235
+ fft_of_result = fft_of_vector @ blocks_of_diagonalization
236
+ # When the above fails, numpy 1.15 returns a TypeError, whereas numpy 1.16 returns a ValueError.
237
+ except (TypeError, ValueError):
238
+ # Instead we do the same thing with list comprehension.
239
+ fft_of_result = np.array(
240
+ [block.rmatvec(vec.flatten()) for block, vec in zip(blocks_of_diagonalization, fft_of_vector)]
241
+ )
242
+ result = np.fft.fft(fft_of_result, axis=0).reshape(self.shape[1])
243
+ if self.dtype == complex or other.dtype == complex:
244
+ return np.asarray(result)
245
+ else:
246
+ return np.asarray(np.real(result))
247
+
248
+
249
+ ###########################################################################
250
+ # Block symmetric circulant matrix #
251
+ ###########################################################################
252
+
253
+ class EvenBlockSymmetricCirculantMatrix(BlockCirculantMatrix, BlockSymmetricToeplitzMatrix):
254
+ """A block symmetric circulant matrix, with an even number of blocks.
255
+
256
+ Examples::
257
+
258
+ ABCB
259
+ BABC
260
+ CBAB
261
+ BCBA
262
+
263
+ ABCDCB
264
+ BABCDB
265
+ CBABCD
266
+ DCBABC
267
+ CDCBAB
268
+ BCDCBA
269
+
270
+ Stored in the backend as a 1×(N/2+1) array of arrays."""
271
+
272
+ def _compute_nb_blocks(self) -> Tuple[int, int]:
273
+ """The number of blocks in the full matrix."""
274
+ n = (self._stored_nb_blocks[1] - 1)*2
275
+ return n, n
276
+
277
+ def _block_indices_of(self, k: int) -> List[Tuple[int, int]]:
278
+ n = self.nb_blocks[0]
279
+ assert k < n/2 + 1
280
+ if k == 0:
281
+ return BlockToeplitzMatrix._block_indices_of(self, 0)
282
+ else:
283
+ return (BlockToeplitzMatrix._block_indices_of(self, k) |
284
+ BlockToeplitzMatrix._block_indices_of(self, n+k-1) |
285
+ BlockToeplitzMatrix._block_indices_of(self, n-k) |
286
+ BlockToeplitzMatrix._block_indices_of(self, 2*n-k-1)
287
+ )
288
+
289
+
290
+ class OddBlockSymmetricCirculantMatrix(BlockCirculantMatrix, BlockSymmetricToeplitzMatrix):
291
+ """A block symmetric circulant matrix, with an odd number of blocks.
292
+
293
+ Examples::
294
+
295
+ ABCCB
296
+ BABCC
297
+ CBABC
298
+ CCBAB
299
+ BCCBA
300
+
301
+ ABCDDCB
302
+ BABCDDB
303
+ CBABCDD
304
+ DCBABCD
305
+ DDCBABC
306
+ CDDCBAB
307
+ BCDDCBA
308
+
309
+ Stored in the backend as a 1×(N+1)/2 array of arrays."""
310
+
311
+ def _compute_nb_blocks(self) -> Tuple[int, int]:
312
+ n = self._stored_nb_blocks[1]*2 - 1
313
+ return n, n
314
+
315
+ def _block_indices_of(self, k: int) -> List[Tuple[int, int]]:
316
+ n = self.nb_blocks[0]
317
+ assert k < (n+1)/2
318
+ if k == 0:
319
+ return BlockToeplitzMatrix._block_indices_of(self, 0)
320
+ else:
321
+ return (BlockToeplitzMatrix._block_indices_of(self, k) |
322
+ BlockToeplitzMatrix._block_indices_of(self, n+k-1) |
323
+ BlockToeplitzMatrix._block_indices_of(self, n-k) |
324
+ BlockToeplitzMatrix._block_indices_of(self, 2*n-k-1)
325
+ )
@@ -0,0 +1,89 @@
1
+ """This module contains some helpful functions to create block matrices."""
2
+ # Copyright (C) 2017-2019 Matthieu Ancellin
3
+ # See LICENSE file at <https://github.com/mancellin/capytaine>
4
+
5
+ import logging
6
+ from itertools import accumulate
7
+
8
+ import numpy as np
9
+
10
+ from capytaine.matrices.block import BlockMatrix
11
+ from capytaine.matrices.low_rank import LowRankMatrix
12
+
13
+ LOG = logging.getLogger(__name__)
14
+
15
+
16
+ def cut_matrix(full_matrix, x_shapes, y_shapes, check=False):
17
+ """Transform a numpy array into a block matrix of numpy arrays.
18
+
19
+ Parameters
20
+ ----------
21
+ full_matrix: numpy array
22
+ The matrix to split into blocks.
23
+ x_shapes: sequence of int
24
+ The columns at which to split the blocks.
25
+ y_shapes: sequence of int
26
+ The lines at which to split the blocks.
27
+ check: bool, optional
28
+ Check to dimensions and type of the matrix after creation (default: False).
29
+
30
+ Return
31
+ ------
32
+ BlockMatrix
33
+ The same matrix as the input one but in block form.
34
+ """
35
+ new_block_matrix = []
36
+ for i, di in zip(accumulate([0] + x_shapes[:-1]), x_shapes):
37
+ line = []
38
+ for j, dj in zip(accumulate([0] + x_shapes[:-1]), y_shapes):
39
+ line.append(full_matrix[i:i+di, j:j+dj])
40
+ new_block_matrix.append(line)
41
+ return BlockMatrix(new_block_matrix, check=check)
42
+
43
+
44
+ def random_block_matrix(x_shapes, y_shapes, rng=np.random.default_rng()):
45
+ """A random block matrix."""
46
+ return cut_matrix(rng.uniform(size=(sum(x_shapes), sum(y_shapes))), x_shapes, y_shapes)
47
+
48
+
49
+ def full_like(A, value, dtype=np.float64):
50
+ """A matrix of the same kind and shape as A but filled with a single value."""
51
+ if isinstance(A, BlockMatrix):
52
+ new_matrix = []
53
+ for i in range(A._stored_nb_blocks[0]):
54
+ line = []
55
+ for j in range(A._stored_nb_blocks[1]):
56
+ line.append(full_like(A._stored_blocks[i, j], value, dtype=dtype))
57
+ new_matrix.append(line)
58
+ return A.__class__(new_matrix)
59
+ elif isinstance(A, LowRankMatrix):
60
+ return LowRankMatrix(np.ones((A.shape[0], 1)), np.full((1, A.shape[1]), value))
61
+ elif isinstance(A, np.ndarray):
62
+ return np.full_like(A, value, dtype=dtype)
63
+
64
+
65
+ def zeros_like(A, dtype=np.float64):
66
+ """A matrix of the same kind and shape as A but filled with zeros."""
67
+ return full_like(A, 0.0, dtype=dtype)
68
+
69
+
70
+ def ones_like(A, dtype=np.float64):
71
+ """A matrix of the same kind and shape as A but filled with ones."""
72
+ return full_like(A, 1.0, dtype=dtype)
73
+
74
+
75
+ def identity_like(A, dtype=np.float64):
76
+ """A identity matrix of the same kind and shape as A."""
77
+ if isinstance(A, BlockMatrix):
78
+ I = []
79
+ for i in range(A._stored_nb_blocks[0]):
80
+ line = []
81
+ for j in range(A._stored_nb_blocks[1]):
82
+ if i == j:
83
+ line.append(identity_like(A._stored_blocks[i, j], dtype=dtype))
84
+ else:
85
+ line.append(zeros_like(A._stored_blocks[i, j], dtype=dtype))
86
+ I.append(line)
87
+ return A.__class__(I)
88
+ elif isinstance(A, np.ndarray):
89
+ return np.eye(A.shape[0], A.shape[1], dtype=dtype)
@@ -0,0 +1,232 @@
1
+ """The linear solvers used in Capytaine.
2
+
3
+ They are based on numpy solvers with a thin layer for the handling of Hierarchical Toeplitz matrices.
4
+ """
5
+ # Copyright (C) 2017-2019 Matthieu Ancellin
6
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
7
+
8
+ import logging
9
+
10
+ import numpy as np
11
+ from scipy import linalg as sl
12
+ from scipy.sparse import linalg as ssl
13
+
14
+ from capytaine.matrices.block import BlockMatrix
15
+ from capytaine.matrices.block_toeplitz import BlockSymmetricToeplitzMatrix, BlockCirculantMatrix
16
+
17
+ LOG = logging.getLogger(__name__)
18
+
19
+
20
+ # DIRECT SOLVER
21
+
22
+ def solve_directly(A, b):
23
+ assert isinstance(b, np.ndarray) and A.ndim == b.ndim+1 and A.shape[-2] == b.shape[-1]
24
+ if isinstance(A, BlockCirculantMatrix):
25
+ LOG.debug("\tSolve linear system %s", A)
26
+ blocks_of_diagonalization = A.block_diagonalize()
27
+ fft_of_rhs = np.fft.fft(np.reshape(b, (A.nb_blocks[0], A.block_shape[0])), axis=0)
28
+ try: # Try to run it as vectorized numpy arrays.
29
+ fft_of_result = np.linalg.solve(blocks_of_diagonalization, fft_of_rhs[..., np.newaxis])[..., 0]
30
+ except np.linalg.LinAlgError: # Or do the same thing with list comprehension.
31
+ fft_of_result = np.array([solve_directly(block, vec) for block, vec in zip(blocks_of_diagonalization, fft_of_rhs)])
32
+ result = np.fft.ifft(fft_of_result, axis=0).reshape((A.shape[1],))
33
+ return result
34
+
35
+ elif isinstance(A, BlockSymmetricToeplitzMatrix):
36
+ if A.nb_blocks == (2, 2):
37
+ LOG.debug("\tSolve linear system %s", A)
38
+ A1, A2 = A._stored_blocks[0, :]
39
+ b1, b2 = b[:len(b)//2], b[len(b)//2:]
40
+ x_plus = solve_directly(A1 + A2, b1 + b2)
41
+ x_minus = solve_directly(A1 - A2, b1 - b2)
42
+ return np.concatenate([x_plus + x_minus, x_plus - x_minus])/2
43
+ else:
44
+ # Not implemented
45
+ LOG.debug("\tSolve linear system %s", A)
46
+ return solve_directly(A.full_matrix(), b)
47
+
48
+ elif isinstance(A, BlockMatrix):
49
+ LOG.debug("\tSolve linear system %s", A)
50
+ return solve_directly(A.full_matrix(), b)
51
+
52
+ elif isinstance(A, np.ndarray):
53
+ LOG.debug(f"\tSolve linear system (size: {A.shape}) with numpy direct solver.")
54
+ return np.linalg.solve(A, b)
55
+
56
+ else:
57
+ raise ValueError(f"Unrecognized type of matrix to solve: {A}")
58
+
59
+
60
+ # CACHED LU DECOMPOSITION
61
+ class LUSolverWithCache:
62
+ """Solve linear system with the LU decomposition.
63
+
64
+ The latest LU decomposition is kept in memory, if a system with the same matrix needs to be solved again, then the decomposition is reused.
65
+
66
+ Most of the complexity of this class comes from:
67
+ 1. @lru_cache does not work because numpy arrays are not hashable. So a basic cache system has been recoded from scratch.
68
+ 2. To be the default solver for the BasicMatrixEngine, the solver needs to support matrices for problems with one or two reflection symmetries.
69
+ Hence, a custom way to cache the LU decomposition of the matrices involved in the direct linear resolution of the symmetric problem.
70
+ """
71
+ def __init__(self):
72
+ self.cached_matrix = None
73
+ self.cached_decomp = None
74
+
75
+ def solve(self, A, b):
76
+ return self.solve_with_decomp(self.cached_lu_decomp(A), b)
77
+
78
+ def lu_decomp(self, A):
79
+ """Return the LU decomposition of A.
80
+ If A is BlockSymmetricToeplitzMatrix, then return a list of LU decompositions for each block of the block diagonalisation of the matrix.
81
+ """
82
+ if isinstance(A, BlockSymmetricToeplitzMatrix) and A.nb_blocks == (2, 2):
83
+ A1, A2 = A._stored_blocks[0, :]
84
+ return [self.lu_decomp(A1 + A2), self.lu_decomp(A1 - A2)]
85
+ elif isinstance(A, np.ndarray):
86
+ return sl.lu_factor(A)
87
+ else:
88
+ raise NotImplementedError("Cached LU solver is only implemented for dense matrices and 2×2 BlockSymmetricToeplitzMatrix.")
89
+
90
+ def cached_lu_decomp(self, A):
91
+ if not(A is self.cached_matrix):
92
+ self.cached_matrix = A
93
+ LOG.debug(f"Computing and caching LU decomposition")
94
+ self.cached_decomp = self.lu_decomp(A)
95
+ else:
96
+ LOG.debug(f"Using cached LU decomposition")
97
+ return self.cached_decomp
98
+
99
+ def solve_with_decomp(self, decomp, b):
100
+ """Solve the system using the precomputed LU decomposition.
101
+ TODO: find a better way to differentiate a LU decomposition (returned as tuple by sl.lu_factor)
102
+ and a set of LU decomposition (stored as a list in self.lu_decomp).
103
+ """
104
+ if isinstance(decomp, list): # The matrix was a BlockSymmetricToeplitzMatrix
105
+ b1, b2 = b[:len(b)//2], b[len(b)//2:]
106
+ x_plus = self.solve_with_decomp(decomp[0], b1 + b2)
107
+ x_minus = self.solve_with_decomp(decomp[1], b1 - b2)
108
+ return np.concatenate([x_plus + x_minus, x_plus - x_minus])/2
109
+ elif isinstance(decomp, tuple): # The matrix was a np.ndarray
110
+ return sl.lu_solve(decomp, b)
111
+ else:
112
+ raise NotImplementedError("Cached LU solver is only implemented for dense matrices and 2×2 BlockSymmetricToeplitzMatrix.")
113
+
114
+
115
+ # ITERATIVE SOLVER
116
+
117
+ class Counter:
118
+ def __init__(self):
119
+ self.nb_iter = 0
120
+
121
+ def __call__(self, *args, **kwargs):
122
+ self.nb_iter += 1
123
+
124
+
125
+ def solve_gmres(A, b):
126
+ LOG.debug(f"Solve with GMRES for {A}.")
127
+
128
+ if LOG.isEnabledFor(logging.INFO):
129
+ counter = Counter()
130
+ x, info = ssl.gmres(A, b, atol=1e-6, callback=counter, callback_type="pr_norm")
131
+ LOG.info(f"End of GMRES after {counter.nb_iter} iterations.")
132
+
133
+ else:
134
+ x, info = ssl.gmres(A, b, atol=1e-6)
135
+
136
+ if info > 0:
137
+ raise RuntimeError(f"No convergence of the GMRES after {info} iterations.\n"
138
+ "This can be due to overlapping panels or irregular frequencies.\n"
139
+ "In the latter case, using a direct solver can help (https://github.com/mancellin/capytaine/issues/30).")
140
+
141
+ return x
142
+
143
+ def gmres_no_fft(A, b):
144
+ LOG.debug(f"Solve with GMRES for {A} without using FFT.")
145
+
146
+ x, info = ssl.gmres(A.no_toeplitz() if isinstance(A, BlockMatrix) else A, b, atol=1e-6)
147
+
148
+ if info != 0:
149
+ LOG.warning(f"No convergence of the GMRES. Error code: {info}")
150
+
151
+ return x
152
+
153
+
154
+ # PRECONDITIONED SOLVER
155
+
156
+ def _block_Jacobi_coarse_corr(A, b, x0, R, RA, AcLU, DLU, diag_shapes, n):
157
+ """
158
+ Performs a single step of the block-Jacobi method with coarse correction.
159
+ Can be used as a preconditioner for matrix A.
160
+
161
+ Parameters
162
+ ----------
163
+ A: BlockMatrix
164
+ System matrix
165
+ b: array
166
+ System right hand side vector
167
+ x0: array
168
+ Initial guess of the solution
169
+ R: array
170
+ Coarse space restriction matrix
171
+ RA: array
172
+ Precomputed product of R and A
173
+ AcLU: list
174
+ LU decomposition data of the coarse system matrix Ac (output of
175
+ scipy.linalg.lu_factor)
176
+ DLU: list
177
+ List of LU decomposition data for the diagonal blocks of A
178
+ diag_shapes: list
179
+ List of shapes of diagonal blocks
180
+ n: integer
181
+ Size of the coarse problem (e.g. number of bodies simulated)
182
+
183
+ Returns
184
+ -------
185
+ array
186
+ Action of a step of the method on vector x0
187
+ """
188
+ # x_ps = x after the pre-smoothing step (block-Jacobi)
189
+ x_ps = np.zeros(A.shape[0], dtype=complex)
190
+
191
+ # the diagonal blocks of A have already been put to zero in build_matrices
192
+ # they are not needed anymore
193
+ q = b - A@x0
194
+ # loop over diagonal blocks
195
+ for kk in range(n):
196
+ local_slice = slice(sum(diag_shapes[:kk]), sum(diag_shapes[:kk+1]))
197
+ local_rhs = q[local_slice]
198
+ local_sol = sl.lu_solve(DLU[kk], local_rhs, check_finite=False)
199
+
200
+ x_ps[local_slice] = local_sol
201
+
202
+ r_c = R@b - RA@x_ps #restricted residual
203
+ e_c = sl.lu_solve(AcLU, r_c, check_finite=False)
204
+ # update
205
+ return x_ps + R.T@e_c
206
+
207
+ def solve_precond_gmres(A_and_precond_data, b):
208
+ """
209
+ Implementation of the preconditioner presented in
210
+ `<https://doi.org/10.1007/978-3-031-50769-4_14>`.
211
+ """
212
+ A, R, RA, AcLU, DLU, diag_shapes, n, PinvA = A_and_precond_data
213
+ N = A.shape[0]
214
+
215
+ Pinvb = _block_Jacobi_coarse_corr(A, b, np.zeros(N, dtype=complex), R, RA, AcLU, DLU, diag_shapes, n)
216
+
217
+ LOG.debug(f"Solve with GMRES for {A}.")
218
+
219
+ if LOG.isEnabledFor(logging.INFO):
220
+ counter = Counter()
221
+ x, info = ssl.gmres(PinvA, Pinvb, atol=1e-6, callback=counter)
222
+ LOG.info(f"End of GMRES after {counter.nb_iter} iterations.")
223
+
224
+ else:
225
+ x, info = ssl.gmres(PinvA, Pinvb, atol=1e-6)
226
+
227
+ if info > 0:
228
+ raise RuntimeError(f"No convergence of the GMRES after {info} iterations.\n"
229
+ "This can be due to overlapping panels or irregular frequencies.\n"
230
+ "In the latter case, using a direct solver can help (https://github.com/mancellin/capytaine/issues/30).")
231
+
232
+ return x