capytaine 2.2__cp39-cp39-win_amd64.whl → 2.3__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. capytaine/__about__.py +1 -1
  2. capytaine/__init__.py +12 -8
  3. capytaine/bem/airy_waves.py +7 -2
  4. capytaine/bem/engines.py +2 -2
  5. capytaine/bem/problems_and_results.py +82 -35
  6. capytaine/bem/solver.py +138 -41
  7. capytaine/bodies/bodies.py +40 -12
  8. capytaine/bodies/predefined/rectangles.py +2 -0
  9. capytaine/green_functions/FinGreen3D/.gitignore +1 -0
  10. capytaine/green_functions/FinGreen3D/FinGreen3D.f90 +3589 -0
  11. capytaine/green_functions/FinGreen3D/LICENSE +165 -0
  12. capytaine/green_functions/FinGreen3D/Makefile +16 -0
  13. capytaine/green_functions/FinGreen3D/README.md +24 -0
  14. capytaine/green_functions/FinGreen3D/test_program.f90 +39 -0
  15. capytaine/green_functions/LiangWuNoblesse/.gitignore +1 -0
  16. capytaine/green_functions/LiangWuNoblesse/LICENSE +504 -0
  17. capytaine/green_functions/LiangWuNoblesse/LiangWuNoblesseWaveTerm.f90 +751 -0
  18. capytaine/green_functions/LiangWuNoblesse/Makefile +18 -0
  19. capytaine/green_functions/LiangWuNoblesse/README.md +2 -0
  20. capytaine/green_functions/LiangWuNoblesse/test_program.f90 +28 -0
  21. capytaine/green_functions/abstract_green_function.py +55 -3
  22. capytaine/green_functions/delhommeau.py +186 -115
  23. capytaine/green_functions/hams.py +204 -0
  24. capytaine/green_functions/libs/Delhommeau_float32.cp39-win_amd64.dll.a +0 -0
  25. capytaine/green_functions/libs/Delhommeau_float32.cp39-win_amd64.pyd +0 -0
  26. capytaine/green_functions/libs/Delhommeau_float64.cp39-win_amd64.dll.a +0 -0
  27. capytaine/green_functions/libs/Delhommeau_float64.cp39-win_amd64.pyd +0 -0
  28. capytaine/io/bemio.py +14 -2
  29. capytaine/io/mesh_loaders.py +2 -1
  30. capytaine/io/wamit.py +479 -0
  31. capytaine/io/xarray.py +252 -100
  32. capytaine/matrices/block.py +4 -2
  33. capytaine/matrices/linear_solvers.py +1 -1
  34. capytaine/matrices/low_rank.py +3 -1
  35. capytaine/meshes/clipper.py +4 -3
  36. capytaine/meshes/collections.py +11 -1
  37. capytaine/meshes/mesh_like_protocol.py +37 -0
  38. capytaine/meshes/meshes.py +22 -9
  39. capytaine/meshes/properties.py +58 -24
  40. capytaine/meshes/symmetric.py +11 -2
  41. capytaine/post_pro/kochin.py +4 -4
  42. capytaine/tools/lists_of_points.py +3 -3
  43. capytaine/tools/prony_decomposition.py +60 -4
  44. capytaine/tools/symbolic_multiplication.py +30 -2
  45. capytaine/tools/timer.py +64 -0
  46. capytaine-2.3.dist-info/DELVEWHEEL +2 -0
  47. capytaine-2.3.dist-info/METADATA +761 -0
  48. capytaine-2.3.dist-info/RECORD +98 -0
  49. capytaine-2.2.dist-info/DELVEWHEEL +0 -2
  50. capytaine-2.2.dist-info/METADATA +0 -751
  51. capytaine-2.2.dist-info/RECORD +0 -82
  52. {capytaine-2.2.dist-info → capytaine-2.3.dist-info}/LICENSE +0 -0
  53. {capytaine-2.2.dist-info → capytaine-2.3.dist-info}/WHEEL +0 -0
  54. {capytaine-2.2.dist-info → capytaine-2.3.dist-info}/entry_points.txt +0 -0
  55. capytaine.libs/{.load-order-capytaine-2.2 → .load-order-capytaine-2.3} +2 -2
@@ -470,8 +470,9 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
470
470
  Other parameters are passed to Poly3DCollection.
471
471
  """
472
472
  matplotlib = import_optional_dependency("matplotlib")
473
- plt = matplotlib.pyplot
474
- cm = matplotlib.cm
473
+ import importlib
474
+ plt = importlib.import_module("matplotlib.pyplot")
475
+ cm = importlib.import_module("matplotlib.cm")
475
476
 
476
477
  mpl_toolkits = import_optional_dependency("mpl_toolkits", package_name="matplotlib")
477
478
  Poly3DCollection = mpl_toolkits.mplot3d.art3d.Poly3DCollection
@@ -760,17 +761,19 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
760
761
  z_lid = min(z_lid, z_lid_comp)
761
762
  return 0.9*z_lid # Add a small safety margin
762
763
 
763
- def generate_lid(self, z=0.0, faces_max_radius=None):
764
+ def generate_lid(self, z=0.0, faces_max_radius=None, name=None):
764
765
  """
765
766
  Return a mesh of the internal free surface of the body.
766
767
 
767
768
  Parameters
768
769
  ----------
769
- z: float
770
+ z: float, optional
770
771
  Vertical position of the lid. Default: 0.0
771
- faces_max_radius: float
772
+ faces_max_radius: float, optional
772
773
  resolution of the mesh of the lid.
773
774
  Default: mean of hull mesh resolution.
775
+ name: str, optional
776
+ A name for the new mesh
774
777
 
775
778
  Returns
776
779
  -------
@@ -794,8 +797,14 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
794
797
  if faces_max_radius is None:
795
798
  faces_max_radius = np.mean(clipped_hull_mesh.faces_radiuses)
796
799
 
800
+ candidate_lid_size = (
801
+ max(faces_max_radius/2, 1.1*x_span),
802
+ max(faces_max_radius/2, 1.1*y_span),
803
+ )
804
+ # The size of the lid is at least the characteristic length of a face
805
+
797
806
  candidate_lid_mesh = mesh_rectangle(
798
- size=(1.1*y_span, 1.1*x_span), # TODO Fix mesh_rectangle
807
+ size=(candidate_lid_size[1], candidate_lid_size[0]), # TODO Fix: Exchange x and y in mesh_rectangle
799
808
  faces_max_radius=faces_max_radius,
800
809
  center=(x_mean, y_mean, z),
801
810
  normal=(0.0, 0.0, -1.0),
@@ -808,7 +817,8 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
808
817
  # edges_of_hull_faces.shape = (nb_full_faces, 4, 2)
809
818
  lid_points_in_local_coords = candidate_lid_points[:, np.newaxis, np.newaxis, :] - hull_faces[:, :, :]
810
819
  # lid_points_in_local_coords.shape = (nb_candidate_lid_points, nb_full_faces, 4, 2)
811
- side_of_hull_edges = np.cross(lid_points_in_local_coords, edges_of_hull_faces)
820
+ side_of_hull_edges = (lid_points_in_local_coords[..., 0] * edges_of_hull_faces[..., 1]
821
+ - lid_points_in_local_coords[..., 1] * edges_of_hull_faces[..., 0])
812
822
  # side_of_hull_edges.shape = (nb_candidate_lid_points, nb_full_faces, 4)
813
823
  point_is_above_panel = np.all(side_of_hull_edges <= 0, axis=-1) | np.all(side_of_hull_edges >= 0, axis=-1)
814
824
  # point_is_above_panel.shape = (nb_candidate_lid_points, nb_full_faces)
@@ -823,10 +833,13 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
823
833
 
824
834
  lid_faces = candidate_lid_mesh.faces[np.all(np.isin(candidate_lid_mesh.faces, needs_lid), axis=-1), :]
825
835
 
836
+ if name is None:
837
+ name = "lid for {}".format(self.name)
838
+
826
839
  if len(lid_faces) == 0:
827
- return Mesh(None, None, name="lid for {}".format(self.name))
840
+ return Mesh(None, None, name=name)
828
841
 
829
- lid_mesh = Mesh(candidate_lid_mesh.vertices, lid_faces, name="lid for {}".format(self.name))
842
+ lid_mesh = Mesh(candidate_lid_mesh.vertices, lid_faces, name=name)
830
843
  lid_mesh.heal_mesh()
831
844
 
832
845
  return lid_mesh
@@ -4,7 +4,11 @@ Based on meshmagick <https://github.com/LHEEA/meshmagick> by François Rongère.
4
4
  # Copyright (C) 2017-2019 Matthieu Ancellin, based on the work of François Rongère
5
5
  # See LICENSE file at <https://github.com/mancellin/capytaine>
6
6
 
7
+ from functools import reduce
8
+ from itertools import chain
7
9
  import numpy as np
10
+ from typing import List
11
+ from numpy.typing import NDArray
8
12
 
9
13
 
10
14
  def compute_faces_properties(mesh):
@@ -198,37 +202,67 @@ def compute_connectivity(mesh):
198
202
  'f_f': f_f,
199
203
  'boundaries': boundaries}
200
204
 
205
+ def faces_in_group(faces: NDArray[np.integer], group: NDArray[np.integer]) -> NDArray[np.bool_]:
206
+ """Identification of faces with vertices within group.
201
207
 
202
- def connected_components(mesh):
203
- """Returns a list of meshes that each corresponds to the a connected component in the original mesh.
204
- Assumes the mesh is mostly conformal without duplicate vertices.
208
+ Parameters
209
+ ----------
210
+ faces : NDArray[np.integer]
211
+ Mesh faces. Expecting a numpy array of shape N_faces x N_vertices_per_face.
212
+ group : NDArray[np.integer]
213
+ Group of connected vertices
214
+
215
+ Returns
216
+ -------
217
+ NDArray[np.bool]
218
+ Mask of faces containing vertices from the group
205
219
  """
206
- from typing import Set, FrozenSet, List
220
+ return np.any(np.isin(faces, group), axis=1)
207
221
 
208
- vertices_components: Set[FrozenSet[int]] = set()
209
- for set_of_v_in_face in map(frozenset, mesh.faces):
210
- intersecting_components = [c for c in vertices_components if len(c.intersection(set_of_v_in_face)) > 0]
211
- if len(intersecting_components) == 0:
212
- vertices_components.add(set_of_v_in_face)
222
+ def clustering(faces: NDArray[np.integer]) -> List[NDArray[np.integer]]:
223
+ """Clustering of vertices per connected faces.
224
+
225
+ Parameters
226
+ ----------
227
+ faces : NDArray[np.integer]
228
+ Mesh faces. Expecting a numpy array of shape N_faces x N_vertices_per_face.
229
+
230
+ Returns
231
+ -------
232
+ list[NDArray[np.integer]]
233
+ Groups of connected vertices.
234
+ """
235
+ vert_groups: list[NDArray[np.integer]] = []
236
+ mask = np.ones(faces.shape[0], dtype=bool)
237
+ while np.any(mask):
238
+ # Consider faces whose vertices are not already identified in a group.
239
+ # Start new group by considering first face
240
+ remaining_faces = faces[mask]
241
+ group = remaining_faces[0]
242
+ rem_mask = np.ones(remaining_faces.shape[0], dtype=bool)
243
+ # Iterative update of vertices group. Output final result to frozenset
244
+ while not np.allclose(new:=faces_in_group(remaining_faces, group), rem_mask):
245
+ group = np.unique(remaining_faces[new])
246
+ rem_mask = new
213
247
  else:
214
- for c in intersecting_components:
215
- vertices_components.remove(c)
216
- vertices_components.add(frozenset.union(set_of_v_in_face, *intersecting_components))
248
+ group = np.unique(remaining_faces[new])
249
+ vert_groups.append(group)
250
+ # Identify faces that have no vertices in current groups
251
+ mask = ~reduce(np.logical_or, [faces_in_group(faces, group) for group in vert_groups])
252
+ return vert_groups
217
253
 
218
- # Verification
219
- for component in vertices_components:
220
- assert all(len(component.intersection(c)) == 0 for c in vertices_components if c != component)
221
254
 
255
+ def connected_components(mesh):
256
+ """Returns a list of meshes that each corresponds to the a connected component in the original mesh.
257
+ Assumes the mesh is mostly conformal without duplicate vertices.
258
+ """
259
+ # Get connected vertices
260
+ vertices_components = clustering(mesh.faces)
261
+ # Verification
262
+ if sum(len(group) for group in vertices_components) != len(set(chain.from_iterable(vertices_components))):
263
+ raise ValueError("Error in connected components clustering. Some elements are duplicated")
222
264
  # The components are found. The rest is just about retrieving the faces in each components.
223
- vertices_components: List[FrozenSet[int]] = list(vertices_components)
224
- faces_components: List[List[int]] = [[] for _ in vertices_components]
225
- for i_face, v_in_face in enumerate(mesh.faces):
226
- for i_component, v_c in enumerate(vertices_components):
227
- if any(v in v_c for v in v_in_face):
228
- assert all(v in v_c for v in v_in_face)
229
- faces_components[i_component].append(i_face)
230
- break
231
-
265
+ faces_components = [np.argwhere(faces_in_group(mesh.faces, group)) for group in vertices_components]
232
266
  components = [mesh.extract_faces(f) for f in faces_components]
233
267
  return components
234
268
 
@@ -10,7 +10,7 @@ import numpy as np
10
10
 
11
11
  from capytaine.meshes.meshes import Mesh
12
12
  from capytaine.meshes.collections import CollectionOfMeshes
13
- from capytaine.meshes.geometry import Axis, Plane, Oz_axis, inplace_transformation
13
+ from capytaine.meshes.geometry import Axis, Plane, xOy_Plane, Oz_axis, inplace_transformation
14
14
 
15
15
  LOG = logging.getLogger(__name__)
16
16
 
@@ -89,7 +89,7 @@ class ReflectionSymmetricMesh(SymmetricMesh):
89
89
  "Only meshes with the same symmetry can be joined together."
90
90
  assert all(meshes[0].plane == mesh.plane for mesh in meshes), \
91
91
  "Only reflection symmetric meshes with the same reflection plane can be joined together."
92
- half_mesh = CollectionOfMeshes([mesh.half for mesh in meshes], name=f"half_of_{name}" if name is not None else None)
92
+ half_mesh = meshes[0].half.join_meshes(*(mesh.half for mesh in meshes[1:]), name=f"half_of_{name}" if name is not None else None)
93
93
  return ReflectionSymmetricMesh(half_mesh, plane=meshes[0].plane, name=name)
94
94
 
95
95
  @inplace_transformation
@@ -110,6 +110,15 @@ class ReflectionSymmetricMesh(SymmetricMesh):
110
110
  CollectionOfMeshes.mirror(self, plane)
111
111
  return self
112
112
 
113
+ def generate_lid(self, z=0.0, faces_max_radius=None, name=None):
114
+ if name is None:
115
+ name = "lid for {}".format(self.name)
116
+ return ReflectionSymmetricMesh(self.half.generate_lid(z, faces_max_radius), self.plane, name=name)
117
+
118
+ def extract_lid(self, plane=xOy_Plane):
119
+ hull, lid = self.half.extract_lid(plane)
120
+ return ReflectionSymmetricMesh(hull, self.plane), ReflectionSymmetricMesh(lid, self.plane)
121
+
113
122
 
114
123
  class TranslationalSymmetricMesh(SymmetricMesh):
115
124
  """A mesh with a repeating pattern by translation.
@@ -1,6 +1,6 @@
1
1
  """Computation of the Kochin function."""
2
2
  # Copyright (C) 2017-2019 Matthieu Ancellin
3
- # See LICENSE file at <https://github.com/mancellin/capytaine>
3
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
4
4
 
5
5
  import logging
6
6
  import numpy as np
@@ -29,9 +29,9 @@ def compute_kochin(result, theta, ref_point=(0.0, 0.0)):
29
29
  LOG.warning("Kochin functions with forward speed have never been validated.")
30
30
 
31
31
  if result.sources is None:
32
- raise Exception(f"""The values of the sources of {result} cannot been found.
33
- They probably have not been stored by the solver because the option keep_details=True have not been set.
34
- Please re-run the resolution with this option.""")
32
+ raise ValueError(f"""The values of the sources of {result} cannot been found.
33
+ They have not been stored by the solver either because the direct method has been used or the option keep_details=True have not been set.
34
+ Please re-run the resolution with `method='indirect'` and `keep_details=True`.""")
35
35
 
36
36
  k = result.wavenumber
37
37
  h = result.water_depth
@@ -1,7 +1,7 @@
1
1
  import numpy as np
2
2
  from capytaine.bodies import FloatingBody
3
3
  from capytaine.post_pro.free_surfaces import FreeSurface
4
- from capytaine.meshes import Mesh, CollectionOfMeshes
4
+ from capytaine.meshes.mesh_like_protocol import MeshLike
5
5
 
6
6
 
7
7
  def _normalize_points(points, keep_mesh=False):
@@ -11,7 +11,7 @@ def _normalize_points(points, keep_mesh=False):
11
11
  else:
12
12
  return points.mesh.faces_centers, (points.mesh.nb_faces,)
13
13
 
14
- if isinstance(points, (Mesh, CollectionOfMeshes)):
14
+ if isinstance(points, MeshLike):
15
15
  if keep_mesh:
16
16
  return points, (points.nb_faces,)
17
17
  else:
@@ -41,7 +41,7 @@ def _normalize_free_surface_points(points, keep_mesh=False):
41
41
  if keep_mesh and isinstance(points, (FloatingBody, FreeSurface)):
42
42
  return points.mesh, (points.mesh.nb_faces,)
43
43
 
44
- if keep_mesh and isinstance(points, (Mesh, CollectionOfMeshes)):
44
+ if keep_mesh and isinstance(points, MeshLike):
45
45
  return points, (points.nb_faces,)
46
46
 
47
47
  points, output_shape = _normalize_points(points, keep_mesh)
@@ -1,8 +1,8 @@
1
1
  """Prony decomposition: tool to approximate a function as a sum of exponentials.
2
2
  Used in particular in the finite depth Green function.
3
3
  """
4
- # Copyright (C) 2017-2019 Matthieu Ancellin
5
- # See LICENSE file at <https://github.com/mancellin/capytaine>
4
+ # Copyright (C) 2017-2024 Matthieu Ancellin
5
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
6
6
 
7
7
  import logging
8
8
 
@@ -12,6 +12,7 @@ from scipy.optimize import curve_fit
12
12
  from scipy.linalg import toeplitz
13
13
 
14
14
  LOG = logging.getLogger(__name__)
15
+ RNG = np.random.default_rng()
15
16
 
16
17
 
17
18
  def exponential_decomposition(X, F, m):
@@ -66,8 +67,8 @@ def exponential_decomposition(X, F, m):
66
67
 
67
68
 
68
69
  def error_exponential_decomposition(X, F, a, lamda):
69
- """Compare exponential decomposition defined by the coefficients a and lamda to the reference
70
- values in F.
70
+ """Mean square error of the exponential decomposition defined by the
71
+ coefficients a and lamda with respect to the reference values in F.
71
72
 
72
73
  Parameters
73
74
  ----------
@@ -92,3 +93,58 @@ def error_exponential_decomposition(X, F, a, lamda):
92
93
  return np.sum(a * np.exp(lamda*x), axis=0)
93
94
 
94
95
  return np.square(f(X) - F).mean()
96
+
97
+
98
+ class PronyDecompositionFailure(Exception):
99
+ pass
100
+
101
+
102
+ def find_best_exponential_decomposition(f, x_min, x_max, n_exp_range, *, tol=1e-4, noise_on_domain_points_std=0.01):
103
+ """Tries to construct an exponential decompositoin of the function f on the
104
+ domain [x_min, x_max] by testing the number of exponentials in n_exp_range.
105
+
106
+ Parameters
107
+ ----------
108
+ f: callable
109
+ The function ℝ→ℝ to be approximated.
110
+ Should support vectorized calls (that is passing a vector of inputs
111
+ and get the vector of corresponding outputs)
112
+ x_min, x_max: floats
113
+ The bounds of the domain of input in which f should be approximated
114
+ n_exp_range: iterable of ints
115
+ The decomposition sizes that will be tested
116
+ tol: float, optional
117
+ The target mean square error.
118
+ noise_on_domain_points_std: float, optional
119
+ Introduces some random variability on the points where the function is evaluated.
120
+ Set this parameter to zero to disable randomness.
121
+
122
+ """
123
+ # Try different range of evaluation points to construct the decomposition.
124
+ for n_exp in n_exp_range:
125
+
126
+ # f might be ill-defined at some single specific values
127
+ # (for the use-case of delhommeau.py, it is when x = kh exactly).
128
+ # Thus we slightly randomize the range of evaluation points for the Prony decomposition.
129
+ # This way, if one of the evaluation points hits the singular point, it will most likely not hit it again at the next iteration.
130
+ x_max_iter = (1 + noise_on_domain_points_std*RNG.uniform())*x_max
131
+
132
+ try:
133
+ # The coefficients are computed on a resolution of 4*n_exp+1 ...
134
+ X = np.linspace(x_min, x_max_iter, 4*n_exp+1)
135
+ a, lamda = exponential_decomposition(X, f(X), n_exp)
136
+
137
+ # ... and they are evaluated on a finer discretization.
138
+ X = np.linspace(x_min, x_max_iter, 8*n_exp+1)
139
+ if error_exponential_decomposition(X, f(X), a, lamda) < tol:
140
+ return a, lamda
141
+ except Exception:
142
+ # If something bad happened while computing the decomposition, try
143
+ # the next one.
144
+ continue
145
+
146
+ raise PronyDecompositionFailure(
147
+ "No suitable Prony decomposition has been found in "
148
+ f"[{x_min}, {x_max}] for tol={tol} "
149
+ f"using a number of terms in {n_exp_range}."
150
+ )
@@ -1,3 +1,13 @@
1
+ """This module is used for the handling of zero and infinite frequencies.
2
+ In this cases, the magnitudes that the solver has to manipulate are in the form of ω times a non-zero term.
3
+ Instead of evaluating this multiplication as zero of infinity, we keep it symbolic using the class defined here.
4
+
5
+ The frequency can be provided to the solver as something like
6
+ `SymbolicMultiplication("0", 1.0)` (that is zero) and the solver will return an
7
+ output of the form `SymbolicMultiplication("0", np.array(...))`
8
+ (that is also actually zero, except we may be intested in the non-zero array).
9
+ """
10
+
1
11
  import numpy as np
2
12
  from functools import wraps, total_ordering
3
13
 
@@ -6,6 +16,8 @@ class SymbolicMultiplication:
6
16
  def __init__(self, symbol, value=1.0):
7
17
  self.symbol = symbol
8
18
  self.value = value
19
+ if hasattr(value, "shape"):
20
+ self.shape = value.shape # When wrapping Numpy arrays
9
21
 
10
22
  def __format__(self, format_spec):
11
23
  return f"{self.symbol}×{self.value.__format__(format_spec)}"
@@ -30,6 +42,9 @@ class SymbolicMultiplication:
30
42
  def __radd__(self, x):
31
43
  return x + self._concretize()
32
44
 
45
+ def __neg__(self):
46
+ return SymbolicMultiplication(self.symbol, -self.value)
47
+
33
48
  def __mul__(self, x):
34
49
  return SymbolicMultiplication(self.symbol, self.value * x)
35
50
 
@@ -87,17 +102,30 @@ class SymbolicMultiplication:
87
102
 
88
103
  def __float__(self):
89
104
  if self.symbol == "0":
90
- return 0.0
105
+ return 0.0 * float(self.value)
91
106
  elif self.symbol == "∞":
92
- return np.inf
107
+ return np.inf * float(self.value)
93
108
  else:
94
109
  raise NotImplementedError
95
110
 
96
111
  def reshape(self, *args):
97
112
  return SymbolicMultiplication(self.symbol, self.value.reshape(*args))
98
113
 
114
+ def sum(self, *args, **kwargs):
115
+ return SymbolicMultiplication(self.symbol, self.value.sum(*args, **kwargs))
116
+
117
+ @property
118
+ def T(self):
119
+ return SymbolicMultiplication(self.symbol, self.value.T)
120
+
99
121
 
100
122
  def supporting_symbolic_multiplication(f):
123
+ """
124
+ When this decorator is applied to a function, this function can now take
125
+ as input a `SymbolicMultiplication` object. The function is applied on the
126
+ `value` part of the `SymbolicMultiplication` without modifying the
127
+ `symbol`.
128
+ """
101
129
  @wraps(f)
102
130
  def wrapped_f(a, x):
103
131
  if hasattr(x, 'symbol'):
@@ -0,0 +1,64 @@
1
+ """A simple timer class used to measure the time spent in various parts of the BEM solver."""
2
+
3
+ from functools import wraps
4
+ import time
5
+
6
+ class Timer:
7
+ """A simple timer class that can be used as context manager or as decorator using `wraps_function` method
8
+
9
+ Example
10
+ -------
11
+ timer = Timer()
12
+ with timer:
13
+ sleep(1.0)
14
+
15
+ print(timer.total) # 1.0...
16
+
17
+ @timer.wraps_function
18
+ def my_function():
19
+ sleep(0.5)
20
+
21
+ my_function()
22
+ print(timer.total) # 1.5...
23
+ my_function()
24
+ print(timer.total) # 2.0...
25
+
26
+ print(timer.timings) # [1.0, 0.5, 0.5]
27
+ """
28
+
29
+ def __init__(self, timings=None):
30
+ if timings is None:
31
+ self.timings = []
32
+ else:
33
+ self.timings = timings
34
+
35
+ def __repr__(self):
36
+ return f"Timer({self.timings})"
37
+
38
+ @property
39
+ def nb_timings(self):
40
+ return len(self.timings)
41
+
42
+ @property
43
+ def total(self):
44
+ return sum(self.timings)
45
+
46
+ @property
47
+ def mean(self):
48
+ if self.nb_timings == 0:
49
+ return float('nan')
50
+ else:
51
+ return self.total/self.nb_timings
52
+
53
+ def __enter__(self):
54
+ self.start_time = time.perf_counter()
55
+
56
+ def __exit__(self, *exc):
57
+ self.timings.append(time.perf_counter() - self.start_time)
58
+
59
+ def wraps_function(self, f):
60
+ @wraps(f)
61
+ def wrapped_f(*args, **kwargs):
62
+ with self:
63
+ return f(*args, **kwargs)
64
+ return wrapped_f
@@ -0,0 +1,2 @@
1
+ Version: 1.10.1
2
+ Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-9rerwt60\\cp39-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-9rerwt60\\cp39-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-9rerwt60\\cp39-win_amd64\\built_wheel\\capytaine-2.3-cp39-cp39-win_amd64.whl', '--no-mangle-all']