capytaine 2.2.1__cp39-cp39-win_amd64.whl → 2.3.1__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. capytaine/__about__.py +1 -1
  2. capytaine/__init__.py +7 -6
  3. capytaine/bem/airy_waves.py +7 -2
  4. capytaine/bem/problems_and_results.py +91 -39
  5. capytaine/bem/solver.py +128 -40
  6. capytaine/bodies/bodies.py +46 -18
  7. capytaine/bodies/predefined/rectangles.py +2 -0
  8. capytaine/green_functions/FinGreen3D/.gitignore +1 -0
  9. capytaine/green_functions/FinGreen3D/FinGreen3D.f90 +3589 -0
  10. capytaine/green_functions/FinGreen3D/LICENSE +165 -0
  11. capytaine/green_functions/FinGreen3D/Makefile +16 -0
  12. capytaine/green_functions/FinGreen3D/README.md +24 -0
  13. capytaine/green_functions/FinGreen3D/test_program.f90 +39 -0
  14. capytaine/green_functions/LiangWuNoblesse/.gitignore +1 -0
  15. capytaine/green_functions/LiangWuNoblesse/LICENSE +504 -0
  16. capytaine/green_functions/LiangWuNoblesse/LiangWuNoblesseWaveTerm.f90 +751 -0
  17. capytaine/green_functions/LiangWuNoblesse/Makefile +16 -0
  18. capytaine/green_functions/LiangWuNoblesse/README.md +2 -0
  19. capytaine/green_functions/LiangWuNoblesse/test_program.f90 +28 -0
  20. capytaine/green_functions/abstract_green_function.py +55 -3
  21. capytaine/green_functions/delhommeau.py +205 -130
  22. capytaine/green_functions/hams.py +204 -0
  23. capytaine/green_functions/libs/Delhommeau_float32.cp39-win_amd64.dll.a +0 -0
  24. capytaine/green_functions/libs/Delhommeau_float32.cp39-win_amd64.pyd +0 -0
  25. capytaine/green_functions/libs/Delhommeau_float64.cp39-win_amd64.dll.a +0 -0
  26. capytaine/green_functions/libs/Delhommeau_float64.cp39-win_amd64.pyd +0 -0
  27. capytaine/io/bemio.py +14 -2
  28. capytaine/io/mesh_loaders.py +1 -1
  29. capytaine/io/wamit.py +479 -0
  30. capytaine/io/xarray.py +261 -117
  31. capytaine/matrices/linear_solvers.py +1 -1
  32. capytaine/meshes/clipper.py +1 -0
  33. capytaine/meshes/collections.py +19 -1
  34. capytaine/meshes/mesh_like_protocol.py +37 -0
  35. capytaine/meshes/meshes.py +28 -8
  36. capytaine/meshes/symmetric.py +89 -10
  37. capytaine/post_pro/kochin.py +4 -4
  38. capytaine/tools/lists_of_points.py +3 -3
  39. capytaine/tools/prony_decomposition.py +60 -4
  40. capytaine/tools/symbolic_multiplication.py +30 -4
  41. capytaine/tools/timer.py +66 -0
  42. capytaine-2.3.1.dist-info/DELVEWHEEL +2 -0
  43. {capytaine-2.2.1.dist-info → capytaine-2.3.1.dist-info}/METADATA +6 -10
  44. {capytaine-2.2.1.dist-info → capytaine-2.3.1.dist-info}/RECORD +48 -32
  45. capytaine-2.2.1.dist-info/DELVEWHEEL +0 -2
  46. {capytaine-2.2.1.dist-info → capytaine-2.3.1.dist-info}/LICENSE +0 -0
  47. {capytaine-2.2.1.dist-info → capytaine-2.3.1.dist-info}/WHEEL +0 -0
  48. {capytaine-2.2.1.dist-info → capytaine-2.3.1.dist-info}/entry_points.txt +0 -0
  49. capytaine.libs/{.load-order-capytaine-2.2.1 → .load-order-capytaine-2.3.1} +2 -2
@@ -644,9 +644,18 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
644
644
  # Combine meshes #
645
645
  ####################
646
646
 
647
- def join_meshes(*meshes, name=None):
647
+ def join_meshes(*meshes, name=None, return_masks=False):
648
648
  from capytaine.meshes.collections import CollectionOfMeshes
649
- return CollectionOfMeshes(meshes, name=name).merged()
649
+ coll = CollectionOfMeshes(meshes, name=name)
650
+ if return_masks:
651
+ masks = []
652
+ for i_mesh in range(len(meshes)):
653
+ mask = np.full((coll.nb_faces,), False)
654
+ mask[coll.indices_of_mesh(i_mesh)] = True
655
+ masks.append(mask)
656
+ return coll.merged(), masks
657
+ else:
658
+ return coll.merged()
650
659
 
651
660
  def __add__(self, mesh_to_add) -> 'Mesh':
652
661
  return self.join_meshes(mesh_to_add)
@@ -761,17 +770,19 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
761
770
  z_lid = min(z_lid, z_lid_comp)
762
771
  return 0.9*z_lid # Add a small safety margin
763
772
 
764
- def generate_lid(self, z=0.0, faces_max_radius=None):
773
+ def generate_lid(self, z=0.0, faces_max_radius=None, name=None):
765
774
  """
766
775
  Return a mesh of the internal free surface of the body.
767
776
 
768
777
  Parameters
769
778
  ----------
770
- z: float
779
+ z: float, optional
771
780
  Vertical position of the lid. Default: 0.0
772
- faces_max_radius: float
781
+ faces_max_radius: float, optional
773
782
  resolution of the mesh of the lid.
774
783
  Default: mean of hull mesh resolution.
784
+ name: str, optional
785
+ A name for the new mesh
775
786
 
776
787
  Returns
777
788
  -------
@@ -795,8 +806,14 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
795
806
  if faces_max_radius is None:
796
807
  faces_max_radius = np.mean(clipped_hull_mesh.faces_radiuses)
797
808
 
809
+ candidate_lid_size = (
810
+ max(faces_max_radius/2, 1.1*x_span),
811
+ max(faces_max_radius/2, 1.1*y_span),
812
+ )
813
+ # The size of the lid is at least the characteristic length of a face
814
+
798
815
  candidate_lid_mesh = mesh_rectangle(
799
- size=(1.1*y_span, 1.1*x_span), # TODO Fix mesh_rectangle
816
+ size=(candidate_lid_size[1], candidate_lid_size[0]), # TODO Fix: Exchange x and y in mesh_rectangle
800
817
  faces_max_radius=faces_max_radius,
801
818
  center=(x_mean, y_mean, z),
802
819
  normal=(0.0, 0.0, -1.0),
@@ -825,10 +842,13 @@ class Mesh(ClippableMixin, SurfaceIntegralsMixin, Abstract3DObject):
825
842
 
826
843
  lid_faces = candidate_lid_mesh.faces[np.all(np.isin(candidate_lid_mesh.faces, needs_lid), axis=-1), :]
827
844
 
845
+ if name is None:
846
+ name = "lid for {}".format(self.name)
847
+
828
848
  if len(lid_faces) == 0:
829
- return Mesh(None, None, name="lid for {}".format(self.name))
849
+ return Mesh(None, None, name=name)
830
850
 
831
- lid_mesh = Mesh(candidate_lid_mesh.vertices, lid_faces, name="lid for {}".format(self.name))
851
+ lid_mesh = Mesh(candidate_lid_mesh.vertices, lid_faces, name=name)
832
852
  lid_mesh.heal_mesh()
833
853
 
834
854
  return lid_mesh
@@ -10,7 +10,7 @@ import numpy as np
10
10
 
11
11
  from capytaine.meshes.meshes import Mesh
12
12
  from capytaine.meshes.collections import CollectionOfMeshes
13
- from capytaine.meshes.geometry import Axis, Plane, Oz_axis, inplace_transformation
13
+ from capytaine.meshes.geometry import Axis, Plane, xOy_Plane, Oz_axis, inplace_transformation
14
14
 
15
15
  LOG = logging.getLogger(__name__)
16
16
 
@@ -84,13 +84,27 @@ class ReflectionSymmetricMesh(SymmetricMesh):
84
84
  def __deepcopy__(self, *args):
85
85
  return ReflectionSymmetricMesh(self.half.copy(), self.plane, name=self.name)
86
86
 
87
- def join_meshes(*meshes, name=None):
87
+ def join_meshes(*meshes, name=None, return_masks=False):
88
88
  assert all(isinstance(mesh, ReflectionSymmetricMesh) for mesh in meshes), \
89
89
  "Only meshes with the same symmetry can be joined together."
90
90
  assert all(meshes[0].plane == mesh.plane for mesh in meshes), \
91
91
  "Only reflection symmetric meshes with the same reflection plane can be joined together."
92
- half_mesh = CollectionOfMeshes([mesh.half for mesh in meshes], name=f"half_of_{name}" if name is not None else None)
93
- return ReflectionSymmetricMesh(half_mesh, plane=meshes[0].plane, name=name)
92
+ if not return_masks:
93
+ name = name=f"half_of_{name}" if name is not None else None
94
+ half_mesh = meshes[0].half.join_meshes(
95
+ *(mesh.half for mesh in meshes[1:]),
96
+ name=name, return_masks=False
97
+ )
98
+ return ReflectionSymmetricMesh(half_mesh, plane=meshes[0].plane, name=name)
99
+ else:
100
+ name = name=f"half_of_{name}" if name is not None else None
101
+ half_mesh, half_masks = meshes[0].half.join_meshes(
102
+ *(mesh.half for mesh in meshes[1:]),
103
+ name=name, return_masks=True
104
+ )
105
+ masks = [np.concatenate([half_mask, half_mask]) for half_mask in half_masks]
106
+ joined = ReflectionSymmetricMesh(half_mesh, plane=meshes[0].plane, name=name)
107
+ return joined, masks
94
108
 
95
109
  @inplace_transformation
96
110
  def translate(self, vector):
@@ -110,6 +124,15 @@ class ReflectionSymmetricMesh(SymmetricMesh):
110
124
  CollectionOfMeshes.mirror(self, plane)
111
125
  return self
112
126
 
127
+ def generate_lid(self, z=0.0, faces_max_radius=None, name=None):
128
+ if name is None:
129
+ name = "lid for {}".format(self.name)
130
+ return ReflectionSymmetricMesh(self.half.generate_lid(z, faces_max_radius), self.plane, name=name)
131
+
132
+ def extract_lid(self, plane=xOy_Plane):
133
+ hull, lid = self.half.extract_lid(plane)
134
+ return ReflectionSymmetricMesh(hull, self.plane), ReflectionSymmetricMesh(lid, self.plane)
135
+
113
136
 
114
137
  class TranslationalSymmetricMesh(SymmetricMesh):
115
138
  """A mesh with a repeating pattern by translation.
@@ -194,15 +217,43 @@ class TranslationalSymmetricMesh(SymmetricMesh):
194
217
  CollectionOfMeshes.mirror(self, plane)
195
218
  return self
196
219
 
197
- def join_meshes(*meshes, name=None):
220
+ def join_meshes(*meshes, name=None, return_masks=False):
198
221
  assert all(isinstance(mesh, TranslationalSymmetricMesh) for mesh in meshes), \
199
222
  "Only meshes with the same symmetry can be joined together."
200
223
  assert all(np.allclose(meshes[0].translation, mesh.translation) for mesh in meshes), \
201
224
  "Only translation symmetric meshes with the same translation vector can be joined together."
202
225
  assert all(len(meshes[0]) == len(mesh) for mesh in meshes), \
203
226
  "Only symmetric meshes with the same number of elements can be joined together."
204
- mesh_strip = CollectionOfMeshes([mesh.first_slice for mesh in meshes], name=f"strip_of_{name}" if name is not None else None)
205
- return TranslationalSymmetricMesh(mesh_strip, translation=meshes[0].translation, nb_repetitions=len(meshes[0]) - 1, name=name)
227
+ if not return_masks:
228
+ strip_name = f"strip_of_{name}" if name is not None else None
229
+ mesh_strip = meshes[0].first_slice.join_meshes(
230
+ *(mesh.first_slice for mesh in meshes[1:]),
231
+ name=strip_name,
232
+ return_masks=False
233
+ )
234
+ return TranslationalSymmetricMesh(
235
+ mesh_strip,
236
+ translation=meshes[0].translation,
237
+ nb_repetitions=len(meshes[0]) - 1,
238
+ name=name
239
+ )
240
+ else:
241
+ strip_name = f"strip_of_{name}" if name is not None else None
242
+ mesh_strip, strip_masks = meshes[0].first_slice.join_meshes(
243
+ *(mesh.first_slice for mesh in meshes[1:]),
244
+ name=strip_name,
245
+ return_masks=True
246
+ )
247
+ joined = TranslationalSymmetricMesh(
248
+ mesh_strip,
249
+ translation=meshes[0].translation,
250
+ nb_repetitions=len(meshes[0]) - 1,
251
+ name=name
252
+ )
253
+ masks = [np.concatenate([
254
+ strip_mask for _ in range(len(meshes[0]))
255
+ ]) for strip_mask in strip_masks]
256
+ return joined, masks
206
257
 
207
258
 
208
259
  def build_regular_array_of_meshes(base_mesh, distance, nb_bodies):
@@ -354,15 +405,43 @@ class AxialSymmetricMesh(SymmetricMesh):
354
405
  def __deepcopy__(self, *args):
355
406
  return AxialSymmetricMesh(self.first_slice.copy(), axis=self.axis.copy(), nb_repetitions=len(self) - 1, name=self.name)
356
407
 
357
- def join_meshes(*meshes, name=None):
408
+ def join_meshes(*meshes, name=None, return_masks=False):
358
409
  assert all(isinstance(mesh, AxialSymmetricMesh) for mesh in meshes), \
359
410
  "Only meshes with the same symmetry can be joined together."
360
411
  assert all(meshes[0].axis == mesh.axis for mesh in meshes), \
361
412
  "Only axisymmetric meshes with the same symmetry axis can be joined together."
362
413
  assert all(len(meshes[0]) == len(mesh) for mesh in meshes), \
363
414
  "Only axisymmetric meshes with the same number of elements can be joined together."
364
- mesh_slice = CollectionOfMeshes([mesh.first_slice for mesh in meshes], name=f"slice_of_{name}" if name is not None else None)
365
- return AxialSymmetricMesh(mesh_slice, axis=meshes[0].axis, nb_repetitions=len(meshes[0]) - 1, name=name)
415
+ if not return_masks:
416
+ slice_name = f"slice_of_{name}" if name is not None else None
417
+ mesh_slice = meshes[0].first_slice.join_meshes(
418
+ *(mesh.first_slice for mesh in meshes[1:]),
419
+ name=slice_name,
420
+ return_masks=False
421
+ )
422
+ return AxialSymmetricMesh(
423
+ mesh_slice,
424
+ axis=meshes[0].axis,
425
+ nb_repetitions=len(meshes[0]) - 1,
426
+ name=name
427
+ )
428
+ else:
429
+ slice_name = f"slice_of_{name}" if name is not None else None
430
+ mesh_slice, slice_masks = meshes[0].first_slice.join_meshes(
431
+ *(mesh.first_slice for mesh in meshes[1:]),
432
+ name=slice_name,
433
+ return_masks=True
434
+ )
435
+ joined = AxialSymmetricMesh(
436
+ mesh_slice,
437
+ axis=meshes[0].axis,
438
+ nb_repetitions=len(meshes[0]) - 1,
439
+ name=name
440
+ )
441
+ masks = [np.concatenate([
442
+ slice_mask for _ in range(len(meshes[0]))
443
+ ]) for slice_mask in slice_masks]
444
+ return joined, masks
366
445
 
367
446
  @inplace_transformation
368
447
  def translate(self, vector):
@@ -1,6 +1,6 @@
1
1
  """Computation of the Kochin function."""
2
2
  # Copyright (C) 2017-2019 Matthieu Ancellin
3
- # See LICENSE file at <https://github.com/mancellin/capytaine>
3
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
4
4
 
5
5
  import logging
6
6
  import numpy as np
@@ -29,9 +29,9 @@ def compute_kochin(result, theta, ref_point=(0.0, 0.0)):
29
29
  LOG.warning("Kochin functions with forward speed have never been validated.")
30
30
 
31
31
  if result.sources is None:
32
- raise Exception(f"""The values of the sources of {result} cannot been found.
33
- They probably have not been stored by the solver because the option keep_details=True have not been set.
34
- Please re-run the resolution with this option.""")
32
+ raise ValueError(f"""The values of the sources of {result} cannot been found.
33
+ They have not been stored by the solver either because the direct method has been used or the option keep_details=True have not been set.
34
+ Please re-run the resolution with `method='indirect'` and `keep_details=True`.""")
35
35
 
36
36
  k = result.wavenumber
37
37
  h = result.water_depth
@@ -1,7 +1,7 @@
1
1
  import numpy as np
2
2
  from capytaine.bodies import FloatingBody
3
3
  from capytaine.post_pro.free_surfaces import FreeSurface
4
- from capytaine.meshes import Mesh, CollectionOfMeshes
4
+ from capytaine.meshes.mesh_like_protocol import MeshLike
5
5
 
6
6
 
7
7
  def _normalize_points(points, keep_mesh=False):
@@ -11,7 +11,7 @@ def _normalize_points(points, keep_mesh=False):
11
11
  else:
12
12
  return points.mesh.faces_centers, (points.mesh.nb_faces,)
13
13
 
14
- if isinstance(points, (Mesh, CollectionOfMeshes)):
14
+ if isinstance(points, MeshLike):
15
15
  if keep_mesh:
16
16
  return points, (points.nb_faces,)
17
17
  else:
@@ -41,7 +41,7 @@ def _normalize_free_surface_points(points, keep_mesh=False):
41
41
  if keep_mesh and isinstance(points, (FloatingBody, FreeSurface)):
42
42
  return points.mesh, (points.mesh.nb_faces,)
43
43
 
44
- if keep_mesh and isinstance(points, (Mesh, CollectionOfMeshes)):
44
+ if keep_mesh and isinstance(points, MeshLike):
45
45
  return points, (points.nb_faces,)
46
46
 
47
47
  points, output_shape = _normalize_points(points, keep_mesh)
@@ -1,8 +1,8 @@
1
1
  """Prony decomposition: tool to approximate a function as a sum of exponentials.
2
2
  Used in particular in the finite depth Green function.
3
3
  """
4
- # Copyright (C) 2017-2019 Matthieu Ancellin
5
- # See LICENSE file at <https://github.com/mancellin/capytaine>
4
+ # Copyright (C) 2017-2024 Matthieu Ancellin
5
+ # See LICENSE file at <https://github.com/capytaine/capytaine>
6
6
 
7
7
  import logging
8
8
 
@@ -12,6 +12,7 @@ from scipy.optimize import curve_fit
12
12
  from scipy.linalg import toeplitz
13
13
 
14
14
  LOG = logging.getLogger(__name__)
15
+ RNG = np.random.default_rng()
15
16
 
16
17
 
17
18
  def exponential_decomposition(X, F, m):
@@ -66,8 +67,8 @@ def exponential_decomposition(X, F, m):
66
67
 
67
68
 
68
69
  def error_exponential_decomposition(X, F, a, lamda):
69
- """Compare exponential decomposition defined by the coefficients a and lamda to the reference
70
- values in F.
70
+ """Mean square error of the exponential decomposition defined by the
71
+ coefficients a and lamda with respect to the reference values in F.
71
72
 
72
73
  Parameters
73
74
  ----------
@@ -92,3 +93,58 @@ def error_exponential_decomposition(X, F, a, lamda):
92
93
  return np.sum(a * np.exp(lamda*x), axis=0)
93
94
 
94
95
  return np.square(f(X) - F).mean()
96
+
97
+
98
+ class PronyDecompositionFailure(Exception):
99
+ pass
100
+
101
+
102
+ def find_best_exponential_decomposition(f, x_min, x_max, n_exp_range, *, tol=1e-4, noise_on_domain_points_std=0.01):
103
+ """Tries to construct an exponential decompositoin of the function f on the
104
+ domain [x_min, x_max] by testing the number of exponentials in n_exp_range.
105
+
106
+ Parameters
107
+ ----------
108
+ f: callable
109
+ The function ℝ→ℝ to be approximated.
110
+ Should support vectorized calls (that is passing a vector of inputs
111
+ and get the vector of corresponding outputs)
112
+ x_min, x_max: floats
113
+ The bounds of the domain of input in which f should be approximated
114
+ n_exp_range: iterable of ints
115
+ The decomposition sizes that will be tested
116
+ tol: float, optional
117
+ The target mean square error.
118
+ noise_on_domain_points_std: float, optional
119
+ Introduces some random variability on the points where the function is evaluated.
120
+ Set this parameter to zero to disable randomness.
121
+
122
+ """
123
+ # Try different range of evaluation points to construct the decomposition.
124
+ for n_exp in n_exp_range:
125
+
126
+ # f might be ill-defined at some single specific values
127
+ # (for the use-case of delhommeau.py, it is when x = kh exactly).
128
+ # Thus we slightly randomize the range of evaluation points for the Prony decomposition.
129
+ # This way, if one of the evaluation points hits the singular point, it will most likely not hit it again at the next iteration.
130
+ x_max_iter = (1 + noise_on_domain_points_std*RNG.uniform())*x_max
131
+
132
+ try:
133
+ # The coefficients are computed on a resolution of 4*n_exp+1 ...
134
+ X = np.linspace(x_min, x_max_iter, 4*n_exp+1)
135
+ a, lamda = exponential_decomposition(X, f(X), n_exp)
136
+
137
+ # ... and they are evaluated on a finer discretization.
138
+ X = np.linspace(x_min, x_max_iter, 8*n_exp+1)
139
+ if error_exponential_decomposition(X, f(X), a, lamda) < tol:
140
+ return a, lamda
141
+ except Exception:
142
+ # If something bad happened while computing the decomposition, try
143
+ # the next one.
144
+ continue
145
+
146
+ raise PronyDecompositionFailure(
147
+ "No suitable Prony decomposition has been found in "
148
+ f"[{x_min}, {x_max}] for tol={tol} "
149
+ f"using a number of terms in {n_exp_range}."
150
+ )
@@ -11,11 +11,12 @@ output of the form `SymbolicMultiplication("0", np.array(...))`
11
11
  import numpy as np
12
12
  from functools import wraps, total_ordering
13
13
 
14
- @total_ordering
15
14
  class SymbolicMultiplication:
16
15
  def __init__(self, symbol, value=1.0):
17
16
  self.symbol = symbol
18
17
  self.value = value
18
+ if hasattr(value, "shape"):
19
+ self.shape = value.shape # When wrapping Numpy arrays
19
20
 
20
21
  def __format__(self, format_spec):
21
22
  return f"{self.symbol}×{self.value.__format__(format_spec)}"
@@ -40,6 +41,9 @@ class SymbolicMultiplication:
40
41
  def __radd__(self, x):
41
42
  return x + self._concretize()
42
43
 
44
+ def __neg__(self):
45
+ return SymbolicMultiplication(self.symbol, -self.value)
46
+
43
47
  def __mul__(self, x):
44
48
  return SymbolicMultiplication(self.symbol, self.value * x)
45
49
 
@@ -77,11 +81,26 @@ class SymbolicMultiplication:
77
81
  def __getitem__(self, item):
78
82
  return SymbolicMultiplication(self.symbol, self.value[item])
79
83
 
80
- def __eq__(self, x):
81
- return float(self) == x
84
+ def __setitem__(self, item, val):
85
+ if isinstance(val, SymbolicMultiplication) and self.symbol == val.symbol:
86
+ self.value.__setitem__(item, val.value)
87
+ else:
88
+ raise NotImplementedError
82
89
 
83
90
  def __lt__(self, x):
84
- return float(self) < x
91
+ return self._concretize() < x
92
+
93
+ def __le__(self, x):
94
+ return self._concretize() <= x
95
+
96
+ def __eq__(self, x):
97
+ return self._concretize() == x
98
+
99
+ def __ge__(self, x):
100
+ return self._concretize() >= x
101
+
102
+ def __gt__(self, x):
103
+ return self._concretize() > x
85
104
 
86
105
  def __hash__(self):
87
106
  return hash((self.symbol, self.value))
@@ -106,6 +125,13 @@ class SymbolicMultiplication:
106
125
  def reshape(self, *args):
107
126
  return SymbolicMultiplication(self.symbol, self.value.reshape(*args))
108
127
 
128
+ def sum(self, *args, **kwargs):
129
+ return SymbolicMultiplication(self.symbol, self.value.sum(*args, **kwargs))
130
+
131
+ @property
132
+ def T(self):
133
+ return SymbolicMultiplication(self.symbol, self.value.T)
134
+
109
135
 
110
136
  def supporting_symbolic_multiplication(f):
111
137
  """
@@ -0,0 +1,66 @@
1
+ """A simple timer class used to measure the time spent in various parts of the BEM solver."""
2
+
3
+ from functools import wraps
4
+ import time
5
+
6
+ class Timer:
7
+ """A simple timer class that can be used as context manager or as decorator using `wraps_function` method
8
+
9
+ Example
10
+ -------
11
+ ::
12
+
13
+ timer = Timer()
14
+ with timer:
15
+ sleep(1.0)
16
+
17
+ print(timer.total) # 1.0...
18
+
19
+ @timer.wraps_function
20
+ def my_function():
21
+ sleep(0.5)
22
+
23
+ my_function()
24
+ print(timer.total) # 1.5...
25
+ my_function()
26
+ print(timer.total) # 2.0...
27
+
28
+ print(timer.timings) # [1.0, 0.5, 0.5]
29
+ """
30
+
31
+ def __init__(self, timings=None):
32
+ if timings is None:
33
+ self.timings = []
34
+ else:
35
+ self.timings = timings
36
+
37
+ def __repr__(self):
38
+ return f"Timer({self.timings})"
39
+
40
+ @property
41
+ def nb_timings(self):
42
+ return len(self.timings)
43
+
44
+ @property
45
+ def total(self):
46
+ return sum(self.timings)
47
+
48
+ @property
49
+ def mean(self):
50
+ if self.nb_timings == 0:
51
+ return float('nan')
52
+ else:
53
+ return self.total/self.nb_timings
54
+
55
+ def __enter__(self):
56
+ self.start_time = time.perf_counter()
57
+
58
+ def __exit__(self, *exc):
59
+ self.timings.append(time.perf_counter() - self.start_time)
60
+
61
+ def wraps_function(self, f):
62
+ @wraps(f)
63
+ def wrapped_f(*args, **kwargs):
64
+ with self:
65
+ return f(*args, **kwargs)
66
+ return wrapped_f
@@ -0,0 +1,2 @@
1
+ Version: 1.11.2
2
+ Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-f4_ko6cf\\cp39-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-f4_ko6cf\\cp39-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-f4_ko6cf\\cp39-win_amd64\\built_wheel\\capytaine-2.3.1-cp39-cp39-win_amd64.whl', '--no-mangle-all']
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: capytaine
3
- Version: 2.2.1
3
+ Version: 2.3.1
4
4
  Summary: Python BEM solver for linear potential flow, based on Nemoh
5
- Author-Email: Matthieu Ancellin <matthieu.ancellin@eurobios.com>
5
+ Author-Email: Matthieu Ancellin <matthieu.ancellin@mews-labs.com>
6
6
  License: GNU GENERAL PUBLIC LICENSE
7
7
  Version 3, 29 June 2007
8
8
 
@@ -683,25 +683,21 @@ Classifier: Programming Language :: Fortran
683
683
  Classifier: Intended Audience :: Science/Research
684
684
  Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
685
685
  Classifier: Topic :: Scientific/Engineering
686
+ Project-URL: homepage, https://capytaine.org
686
687
  Project-URL: repository, https://github.com/capytaine/capytaine
687
688
  Requires-Python: >=3.8
688
689
  Requires-Dist: numpy>=1.20; python_version >= "3.9"
689
690
  Requires-Dist: numpy>=1.24; python_version == "3.8"
690
691
  Requires-Dist: scipy
691
- Requires-Dist: pandas>=1.3
692
+ Requires-Dist: pandas<3,>=1.3
692
693
  Requires-Dist: xarray
693
694
  Requires-Dist: rich
694
695
  Provides-Extra: optional
695
696
  Requires-Dist: matplotlib; extra == "optional"
696
697
  Requires-Dist: joblib>=1.3; extra == "optional"
697
698
  Requires-Dist: meshio; extra == "optional"
698
- Provides-Extra: test
699
- Requires-Dist: pytest; extra == "test"
700
- Provides-Extra: docs
701
- Requires-Dist: sphinx; extra == "docs"
702
- Requires-Dist: sphinx-toolbox; extra == "docs"
703
- Requires-Dist: sphinxcontrib-proof; extra == "docs"
704
- Requires-Dist: sphinxcontrib-mermaid; extra == "docs"
699
+ Requires-Dist: netcdf4; extra == "optional"
700
+ Requires-Dist: vtk; extra == "optional"
705
701
  Description-Content-Type: text/markdown
706
702
 
707
703
  # Capytaine: a linear potential flow BEM solver with Python.