jinns 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. jinns/__init__.py +17 -7
  2. jinns/data/_AbstractDataGenerator.py +19 -0
  3. jinns/data/_Batchs.py +31 -12
  4. jinns/data/_CubicMeshPDENonStatio.py +431 -0
  5. jinns/data/_CubicMeshPDEStatio.py +464 -0
  6. jinns/data/_DataGeneratorODE.py +187 -0
  7. jinns/data/_DataGeneratorObservations.py +189 -0
  8. jinns/data/_DataGeneratorParameter.py +206 -0
  9. jinns/data/__init__.py +19 -9
  10. jinns/data/_utils.py +149 -0
  11. jinns/experimental/__init__.py +9 -0
  12. jinns/loss/_DynamicLoss.py +114 -187
  13. jinns/loss/_DynamicLossAbstract.py +45 -68
  14. jinns/loss/_LossODE.py +71 -336
  15. jinns/loss/_LossPDE.py +146 -520
  16. jinns/loss/__init__.py +28 -6
  17. jinns/loss/_abstract_loss.py +15 -0
  18. jinns/loss/_boundary_conditions.py +20 -19
  19. jinns/loss/_loss_utils.py +78 -159
  20. jinns/loss/_loss_weights.py +12 -44
  21. jinns/loss/_operators.py +84 -74
  22. jinns/nn/__init__.py +15 -0
  23. jinns/nn/_abstract_pinn.py +22 -0
  24. jinns/nn/_hyperpinn.py +94 -57
  25. jinns/nn/_mlp.py +50 -25
  26. jinns/nn/_pinn.py +33 -19
  27. jinns/nn/_ppinn.py +70 -34
  28. jinns/nn/_save_load.py +21 -51
  29. jinns/nn/_spinn.py +33 -16
  30. jinns/nn/_spinn_mlp.py +28 -22
  31. jinns/nn/_utils.py +38 -0
  32. jinns/parameters/__init__.py +8 -1
  33. jinns/parameters/_derivative_keys.py +116 -177
  34. jinns/parameters/_params.py +18 -46
  35. jinns/plot/__init__.py +2 -0
  36. jinns/plot/_plot.py +35 -34
  37. jinns/solver/_rar.py +80 -63
  38. jinns/solver/_solve.py +89 -63
  39. jinns/solver/_utils.py +4 -6
  40. jinns/utils/__init__.py +2 -0
  41. jinns/utils/_containers.py +12 -9
  42. jinns/utils/_types.py +11 -57
  43. jinns/utils/_utils.py +4 -11
  44. jinns/validation/__init__.py +2 -0
  45. jinns/validation/_validation.py +20 -19
  46. {jinns-1.3.0.dist-info → jinns-1.4.0.dist-info}/METADATA +4 -3
  47. jinns-1.4.0.dist-info/RECORD +53 -0
  48. {jinns-1.3.0.dist-info → jinns-1.4.0.dist-info}/WHEEL +1 -1
  49. jinns/data/_DataGenerators.py +0 -1634
  50. jinns-1.3.0.dist-info/RECORD +0 -44
  51. {jinns-1.3.0.dist-info → jinns-1.4.0.dist-info/licenses}/AUTHORS +0 -0
  52. {jinns-1.3.0.dist-info → jinns-1.4.0.dist-info/licenses}/LICENSE +0 -0
  53. {jinns-1.3.0.dist-info → jinns-1.4.0.dist-info}/top_level.txt +0 -0
@@ -6,23 +6,25 @@ from __future__ import (
6
6
  annotations,
7
7
  ) # https://docs.python.org/3/library/typing.html#constant
8
8
 
9
+ import abc
10
+ from functools import partial
11
+ from typing import Callable, TYPE_CHECKING, ClassVar, Generic, TypeVar
9
12
  import equinox as eqx
10
- from typing import Callable, Dict, TYPE_CHECKING, ClassVar
11
13
  from jaxtyping import Float, Array
12
- from functools import partial
13
- import abc
14
14
 
15
15
 
16
16
  # See : https://docs.kidger.site/equinox/api/module/advanced_fields/#equinox.AbstractClassVar--known-issues
17
17
  if TYPE_CHECKING:
18
18
  from typing import ClassVar as AbstractClassVar
19
- from jinns.parameters import Params, ParamsDict
19
+ from jinns.parameters import Params
20
+ from jinns.nn._abstract_pinn import AbstractPINN
20
21
  else:
21
22
  from equinox import AbstractClassVar
22
23
 
24
+ InputDim = TypeVar("InputDim")
23
25
 
24
- def _decorator_heteregeneous_params(evaluate):
25
26
 
27
+ def _decorator_heteregeneous_params(evaluate):
26
28
  def wrapper(*args):
27
29
  self, inputs, u, params = args
28
30
  _params = eqx.tree_at(
@@ -39,7 +41,7 @@ def _decorator_heteregeneous_params(evaluate):
39
41
  return wrapper
40
42
 
41
43
 
42
- class DynamicLoss(eqx.Module):
44
+ class DynamicLoss(eqx.Module, Generic[InputDim]):
43
45
  r"""
44
46
  Abstract base class for dynamic losses. Implements the physical term:
45
47
 
@@ -55,7 +57,7 @@ class DynamicLoss(eqx.Module):
55
57
  Tmax needs to be given when the PINN time input is normalized in
56
58
  [0, 1], ie. we have performed renormalization of the differential
57
59
  equation
58
- eq_params_heterogeneity : Dict[str, Callable | None], default=None
60
+ eq_params_heterogeneity : dict[str, Callable | None], default=None
59
61
  A dict with the same keys as eq_params and the value being either None
60
62
  (no heterogeneity) or a function which encodes for the spatio-temporal
61
63
  heterogeneity of the parameter.
@@ -73,37 +75,40 @@ class DynamicLoss(eqx.Module):
73
75
  _eq_type = AbstractClassVar[str] # class variable denoting the type of
74
76
  # differential equation
75
77
  Tmax: Float = eqx.field(kw_only=True, default=1)
76
- eq_params_heterogeneity: Dict[str, Callable | None] = eqx.field(
78
+ eq_params_heterogeneity: dict[str, Callable | None] = eqx.field(
77
79
  kw_only=True, default=None, static=True
78
80
  )
79
81
 
80
82
  def _eval_heterogeneous_parameters(
81
83
  self,
82
- inputs: Float[Array, "1"] | Float[Array, "dim"] | Float[Array, "1+dim"],
83
- u: eqx.Module,
84
- params: Params | ParamsDict,
85
- eq_params_heterogeneity: Dict[str, Callable | None] = None,
86
- ) -> Dict[str, float | Float[Array, "parameter_dimension"]]:
84
+ inputs: InputDim,
85
+ u: AbstractPINN,
86
+ params: Params[Array],
87
+ eq_params_heterogeneity: dict[str, Callable | None] | None = None,
88
+ ) -> dict[str, Array]:
87
89
  eq_params_ = {}
88
90
  if eq_params_heterogeneity is None:
89
91
  return params.eq_params
92
+
90
93
  for k, p in params.eq_params.items():
91
94
  try:
92
- if eq_params_heterogeneity[k] is None:
93
- eq_params_[k] = p
95
+ if eq_params_heterogeneity[k] is not None:
96
+ eq_params_[k] = eq_params_heterogeneity[k](inputs, u, params) # type: ignore don't know why pyright says
97
+ # eq_params_heterogeneity[k] can be None here
94
98
  else:
95
- eq_params_[k] = eq_params_heterogeneity[k](inputs, u, params)
99
+ eq_params_[k] = p
96
100
  except KeyError:
97
101
  # we authorize missing eq_params_heterogeneity key
98
102
  # if its heterogeneity is None anyway
99
103
  eq_params_[k] = p
100
104
  return eq_params_
101
105
 
102
- def _evaluate(
106
+ @partial(_decorator_heteregeneous_params)
107
+ def evaluate(
103
108
  self,
104
- inputs: Float[Array, "1"] | Float[Array, "dim"] | Float[Array, "1+dim"],
105
- u: eqx.Module,
106
- params: Params | ParamsDict,
109
+ inputs: InputDim,
110
+ u: AbstractPINN,
111
+ params: Params[Array],
107
112
  ) -> float:
108
113
  evaluation = self.equation(inputs, u, params)
109
114
  if len(evaluation.shape) == 0:
@@ -120,7 +125,7 @@ class DynamicLoss(eqx.Module):
120
125
  raise NotImplementedError("You should implement your equation.")
121
126
 
122
127
 
123
- class ODE(DynamicLoss):
128
+ class ODE(DynamicLoss[Float[Array, " 1"]]):
124
129
  r"""
125
130
  Abstract base class for ODE dynamic losses. All dynamic loss must subclass
126
131
  this class and override the abstract method `equation`.
@@ -131,7 +136,7 @@ class ODE(DynamicLoss):
131
136
  Tmax needs to be given when the PINN time input is normalized in
132
137
  [0, 1], ie. we have performed renormalization of the differential
133
138
  equation
134
- eq_params_heterogeneity : Dict[str, Callable | None], default=None
139
+ eq_params_heterogeneity : dict[str, Callable | None], default=None
135
140
  Default None. A dict with the keys being the same as in eq_params
136
141
  and the value being either None (no heterogeneity) or a function
137
142
  which encodes for the spatio-temporal heterogeneity of the parameter.
@@ -147,19 +152,9 @@ class ODE(DynamicLoss):
147
152
 
148
153
  _eq_type: ClassVar[str] = "ODE"
149
154
 
150
- @partial(_decorator_heteregeneous_params)
151
- def evaluate(
152
- self,
153
- t: Float[Array, "1"],
154
- u: eqx.Module | Dict[str, eqx.Module],
155
- params: Params | ParamsDict,
156
- ) -> float:
157
- """Here we call DynamicLoss._evaluate with x=None"""
158
- return self._evaluate(t, u, params)
159
-
160
155
  @abc.abstractmethod
161
156
  def equation(
162
- self, t: Float[Array, "1"], u: eqx.Module, params: Params | ParamsDict
157
+ self, t: Float[Array, " 1"], u: AbstractPINN, params: Params[Array]
163
158
  ) -> float:
164
159
  r"""
165
160
  The differential operator defining the ODE.
@@ -170,11 +165,11 @@ class ODE(DynamicLoss):
170
165
 
171
166
  Parameters
172
167
  ----------
173
- t : Float[Array, "1"]
168
+ t : Float[Array, " 1"]
174
169
  A 1-dimensional jnp.array representing the time point.
175
- u : eqx.Module
170
+ u : AbstractPINN
176
171
  The network with a call signature `u(t, params)`.
177
- params : Params | ParamsDict
172
+ params : Params[Array]
178
173
  The equation and neural network parameters $\theta$ and $\nu$.
179
174
 
180
175
  Returns
@@ -190,7 +185,7 @@ class ODE(DynamicLoss):
190
185
  raise NotImplementedError
191
186
 
192
187
 
193
- class PDEStatio(DynamicLoss):
188
+ class PDEStatio(DynamicLoss[Float[Array, " dim"]]):
194
189
  r"""
195
190
  Abstract base class for stationnary PDE dynamic losses. All dynamic loss must subclass this class and override the abstract method `equation`.
196
191
 
@@ -200,7 +195,7 @@ class PDEStatio(DynamicLoss):
200
195
  Tmax needs to be given when the PINN time input is normalized in
201
196
  [0, 1], ie. we have performed renormalization of the differential
202
197
  equation
203
- eq_params_heterogeneity : Dict[str, Callable | None], default=None
198
+ eq_params_heterogeneity : dict[str, Callable | None], default=None
204
199
  Default None. A dict with the keys being the same as in eq_params
205
200
  and the value being either None (no heterogeneity) or a function
206
201
  which encodes for the spatio-temporal heterogeneity of the parameter.
@@ -216,16 +211,9 @@ class PDEStatio(DynamicLoss):
216
211
 
217
212
  _eq_type: ClassVar[str] = "Statio PDE"
218
213
 
219
- @partial(_decorator_heteregeneous_params)
220
- def evaluate(
221
- self, x: Float[Array, "dimension"], u: eqx.Module, params: Params | ParamsDict
222
- ) -> float:
223
- """Here we call the DynamicLoss._evaluate with t=None"""
224
- return self._evaluate(x, u, params)
225
-
226
214
  @abc.abstractmethod
227
215
  def equation(
228
- self, x: Float[Array, "d"], u: eqx.Module, params: Params | ParamsDict
216
+ self, x: Float[Array, " dim"], u: AbstractPINN, params: Params[Array]
229
217
  ) -> float:
230
218
  r"""The differential operator defining the stationnary PDE.
231
219
 
@@ -235,11 +223,11 @@ class PDEStatio(DynamicLoss):
235
223
 
236
224
  Parameters
237
225
  ----------
238
- x : Float[Array, "d"]
226
+ x : Float[Array, " dim"]
239
227
  A `d` dimensional jnp.array representing a point in the spatial domain $\Omega$.
240
- u : eqx.Module
228
+ u : AbstractPINN
241
229
  The neural network.
242
- params : Params | ParamsDict
230
+ params : Params[Array]
243
231
  The parameters of the equation and the networks, $\theta$ and $\nu$ respectively.
244
232
 
245
233
  Returns
@@ -255,7 +243,7 @@ class PDEStatio(DynamicLoss):
255
243
  raise NotImplementedError
256
244
 
257
245
 
258
- class PDENonStatio(DynamicLoss):
246
+ class PDENonStatio(DynamicLoss[Float[Array, " 1 + dim"]]):
259
247
  """
260
248
  Abstract base class for non-stationnary PDE dynamic losses. All dynamic loss must subclass this class and override the abstract method `equation`.
261
249
 
@@ -265,7 +253,7 @@ class PDENonStatio(DynamicLoss):
265
253
  Tmax needs to be given when the PINN time input is normalized in
266
254
  [0, 1], ie. we have performed renormalization of the differential
267
255
  equation
268
- eq_params_heterogeneity : Dict[str, Callable | None], default=None
256
+ eq_params_heterogeneity : dict[str, Callable | None], default=None
269
257
  Default None. A dict with the keys being the same as in eq_params
270
258
  and the value being either None (no heterogeneity) or a function
271
259
  which encodes for the spatio-temporal heterogeneity of the parameter.
@@ -281,23 +269,12 @@ class PDENonStatio(DynamicLoss):
281
269
 
282
270
  _eq_type: ClassVar[str] = "Non-statio PDE"
283
271
 
284
- @partial(_decorator_heteregeneous_params)
285
- def evaluate(
286
- self,
287
- t_x: Float[Array, "1 + dim"],
288
- u: eqx.Module,
289
- params: Params | ParamsDict,
290
- ) -> float:
291
- """Here we call the DynamicLoss._evaluate with full arguments"""
292
- ans = self._evaluate(t_x, u, params)
293
- return ans
294
-
295
272
  @abc.abstractmethod
296
273
  def equation(
297
274
  self,
298
- t_x: Float[Array, "1 + dim"],
299
- u: eqx.Module,
300
- params: Params | ParamsDict,
275
+ t_x: Float[Array, " 1 + dim"],
276
+ u: AbstractPINN,
277
+ params: Params[Array],
301
278
  ) -> float:
302
279
  r"""The differential operator defining the non-stationnary PDE.
303
280
 
@@ -307,11 +284,11 @@ class PDENonStatio(DynamicLoss):
307
284
 
308
285
  Parameters
309
286
  ----------
310
- t_x : Float[Array, "1 + dim"]
287
+ t_x : Float[Array, " 1 + dim"]
311
288
  A jnp array containing the concatenation of a time point and a point in $\Omega$
312
- u : eqx.Module
289
+ u : AbstractPINN
313
290
  The neural network.
314
- params : Params | ParamsDict
291
+ params : Params[Array]
315
292
  The parameters of the equation and the networks, $\theta$ and $\nu$ respectively.
316
293
  Returns
317
294
  -------